thanks to show ❤
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .config/.last_opt_in_prompt.yaml +1 -0
- .config/.last_survey_prompt.yaml +1 -0
- .config/.last_update_check.json +1 -0
- .config/active_config +1 -0
- .config/config_sentinel +0 -0
- .config/configurations/config_default +6 -0
- .config/gce +1 -0
- .config/logs/2023.06.12/13.34.47.039331.log +596 -0
- .config/logs/2023.06.12/13.35.12.270546.log +5 -0
- .config/logs/2023.06.12/13.35.35.936702.log +169 -0
- .config/logs/2023.06.12/13.35.43.661450.log +5 -0
- .config/logs/2023.06.12/13.36.08.323143.log +8 -0
- .config/logs/2023.06.12/13.36.09.026206.log +8 -0
- .gitattributes +57 -0
- SHOW/.gitignore +147 -0
- SHOW/.gitignore copy +201 -0
- SHOW/.vscode/PythonImportHelper-v2-Completion.json +0 -0
- SHOW/.vscode/launch.json +16 -0
- SHOW/EDIT_insightface/storage.py +60 -0
- SHOW/LICENSE +88 -0
- SHOW/README.md +260 -0
- SHOW/SHOW/__init__.py +25 -0
- SHOW/SHOW/constants.py +12 -0
- SHOW/SHOW/datasets/__init__.py +4 -0
- SHOW/SHOW/datasets/model_func_atach.py +57 -0
- SHOW/SHOW/datasets/op_base.py +168 -0
- SHOW/SHOW/datasets/op_dataset.py +732 -0
- SHOW/SHOW/datasets/op_post_process.py +212 -0
- SHOW/SHOW/datasets/pre_dataset.py +111 -0
- SHOW/SHOW/datasets/pre_runner.py +108 -0
- SHOW/SHOW/detector/__init__.py +2 -0
- SHOW/SHOW/detector/face_detector.py +106 -0
- SHOW/SHOW/detector/fan_detector.py +62 -0
- SHOW/SHOW/detector/pifpaf_detector.py +53 -0
- SHOW/SHOW/face_iders/__init__.py +11 -0
- SHOW/SHOW/face_iders/arcface_ider.py +122 -0
- SHOW/SHOW/face_iders/base.py +38 -0
- SHOW/SHOW/face_iders/builder.py +10 -0
- SHOW/SHOW/face_iders/utils.py +23 -0
- SHOW/SHOW/flame/FLAME.py +283 -0
- SHOW/SHOW/flame/__init__.py +2 -0
- SHOW/SHOW/flame/lbs.py +406 -0
- SHOW/SHOW/image.py +119 -0
- SHOW/SHOW/load_assets.py +243 -0
- SHOW/SHOW/load_models.py +72 -0
- SHOW/SHOW/loggers/MyNeptuneLogger.py +66 -0
- SHOW/SHOW/loggers/MyTFLogger.py +31 -0
- SHOW/SHOW/loggers/MyTextLogger.py +29 -0
- SHOW/SHOW/loggers/MyWandbLogger.py +59 -0
- SHOW/SHOW/loggers/__init__.py +6 -0
.config/.last_opt_in_prompt.yaml
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{}
|
.config/.last_survey_prompt.yaml
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
last_prompt_time: 1686576935.2613058
|
.config/.last_update_check.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"last_update_check_time": 1686576943.101163, "last_update_check_revision": 20230602161805, "notifications": [], "last_nag_times": {}}
|
.config/active_config
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
default
|
.config/config_sentinel
ADDED
File without changes
|
.config/configurations/config_default
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[component_manager]
|
2 |
+
disable_update_check = true
|
3 |
+
|
4 |
+
[compute]
|
5 |
+
gce_metadata_read_timeout_sec = 0
|
6 |
+
|
.config/gce
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
False
|
.config/logs/2023.06.12/13.34.47.039331.log
ADDED
@@ -0,0 +1,596 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
2023-06-12 13:34:47,043 DEBUG root Loaded Command Group: ['gcloud', 'components']
|
2 |
+
2023-06-12 13:34:47,047 DEBUG root Loaded Command Group: ['gcloud', 'components', 'update']
|
3 |
+
2023-06-12 13:34:47,050 DEBUG root Running [gcloud.components.update] with arguments: [--allow-no-backup: "True", --compile-python: "True", --quiet: "True", COMPONENT-IDS:7: "['core', 'gcloud-deps', 'bq', 'gcloud', 'gcloud-crc32c', 'gsutil', 'anthoscli']"]
|
4 |
+
2023-06-12 13:34:47,050 INFO ___FILE_ONLY___ Beginning update. This process may take several minutes.
|
5 |
+
|
6 |
+
2023-06-12 13:34:59,078 DEBUG urllib3.connectionpool Starting new HTTPS connection (1): dl.google.com:443
|
7 |
+
2023-06-12 13:34:59,167 DEBUG urllib3.connectionpool https://dl.google.com:443 "GET /dl/cloudsdk/channels/rapid/components-2.json HTTP/1.1" 200 206160
|
8 |
+
2023-06-12 13:34:59,186 INFO ___FILE_ONLY___
|
9 |
+
|
10 |
+
2023-06-12 13:34:59,186 INFO ___FILE_ONLY___
|
11 |
+
Your current Google Cloud CLI version is: 434.0.0
|
12 |
+
|
13 |
+
2023-06-12 13:34:59,186 INFO ___FILE_ONLY___ Installing components from version: 434.0.0
|
14 |
+
|
15 |
+
2023-06-12 13:34:59,187 INFO ___FILE_ONLY___
|
16 |
+
|
17 |
+
2023-06-12 13:34:59,187 DEBUG root Chosen display Format:table[box,title="These components will be removed."](details.display_name:label=Name:align=left,version.version_string:label=Version:align=right,data.size.size(zero="",min=1048576):label=Size:align=right)
|
18 |
+
2023-06-12 13:34:59,188 DEBUG root Chosen display Format:table[box,title="These components will be updated."](details.display_name:label=Name:align=left,version.version_string:label=Version:align=right,data.size.size(zero="",min=1048576):label=Size:align=right)
|
19 |
+
2023-06-12 13:34:59,189 DEBUG root Chosen display Format:table[box,title="These components will be installed."](details.display_name:label=Name:align=left,version.version_string:label=Version:align=right,data.size.size(zero="",min=1048576):label=Size:align=right)
|
20 |
+
2023-06-12 13:34:59,196 INFO ___FILE_ONLY___ ┌─────────────────────────────────────────────────────────────────────────────┐
|
21 |
+
2023-06-12 13:34:59,196 INFO ___FILE_ONLY___
|
22 |
+
|
23 |
+
2023-06-12 13:34:59,196 INFO ___FILE_ONLY___ │ These components will be installed. │
|
24 |
+
2023-06-12 13:34:59,196 INFO ___FILE_ONLY___
|
25 |
+
|
26 |
+
2023-06-12 13:34:59,196 INFO ___FILE_ONLY___ ├─────────────────────────────────────────────────────┬────────────┬──────────┤
|
27 |
+
2023-06-12 13:34:59,196 INFO ___FILE_ONLY___
|
28 |
+
|
29 |
+
2023-06-12 13:34:59,196 INFO ___FILE_ONLY___ │ Name │ Version │ Size │
|
30 |
+
2023-06-12 13:34:59,196 INFO ___FILE_ONLY___
|
31 |
+
|
32 |
+
2023-06-12 13:34:59,196 INFO ___FILE_ONLY___ ├─────────────────────────────────────────────────────┼────────────┼──────────┤
|
33 |
+
2023-06-12 13:34:59,196 INFO ___FILE_ONLY___
|
34 |
+
|
35 |
+
2023-06-12 13:34:59,196 INFO ___FILE_ONLY___ │
|
36 |
+
2023-06-12 13:34:59,197 INFO ___FILE_ONLY___ BigQuery Command Line Tool
|
37 |
+
2023-06-12 13:34:59,197 INFO ___FILE_ONLY___
|
38 |
+
2023-06-12 13:34:59,197 INFO ___FILE_ONLY___ │
|
39 |
+
2023-06-12 13:34:59,197 INFO ___FILE_ONLY___ 2.0.93
|
40 |
+
2023-06-12 13:34:59,197 INFO ___FILE_ONLY___
|
41 |
+
2023-06-12 13:34:59,197 INFO ___FILE_ONLY___ │
|
42 |
+
2023-06-12 13:34:59,197 INFO ___FILE_ONLY___ 1.6 MiB
|
43 |
+
2023-06-12 13:34:59,197 INFO ___FILE_ONLY___
|
44 |
+
2023-06-12 13:34:59,197 INFO ___FILE_ONLY___ │
|
45 |
+
2023-06-12 13:34:59,197 INFO ___FILE_ONLY___
|
46 |
+
|
47 |
+
2023-06-12 13:34:59,198 INFO ___FILE_ONLY___ │
|
48 |
+
2023-06-12 13:34:59,198 INFO ___FILE_ONLY___ BigQuery Command Line Tool (Platform Specific)
|
49 |
+
2023-06-12 13:34:59,198 INFO ___FILE_ONLY___
|
50 |
+
2023-06-12 13:34:59,198 INFO ___FILE_ONLY___ │
|
51 |
+
2023-06-12 13:34:59,198 INFO ___FILE_ONLY___ 2.0.77
|
52 |
+
2023-06-12 13:34:59,198 INFO ___FILE_ONLY___
|
53 |
+
2023-06-12 13:34:59,198 INFO ___FILE_ONLY___ │
|
54 |
+
2023-06-12 13:34:59,198 INFO ___FILE_ONLY___ < 1 MiB
|
55 |
+
2023-06-12 13:34:59,198 INFO ___FILE_ONLY___
|
56 |
+
2023-06-12 13:34:59,199 INFO ___FILE_ONLY___ │
|
57 |
+
2023-06-12 13:34:59,199 INFO ___FILE_ONLY___
|
58 |
+
|
59 |
+
2023-06-12 13:34:59,199 INFO ___FILE_ONLY___ │
|
60 |
+
2023-06-12 13:34:59,199 INFO ___FILE_ONLY___ Bundled Python 3.9
|
61 |
+
2023-06-12 13:34:59,199 INFO ___FILE_ONLY___
|
62 |
+
2023-06-12 13:34:59,199 INFO ___FILE_ONLY___ │
|
63 |
+
2023-06-12 13:34:59,199 INFO ___FILE_ONLY___ 3.9.16
|
64 |
+
2023-06-12 13:34:59,199 INFO ___FILE_ONLY___
|
65 |
+
2023-06-12 13:34:59,199 INFO ___FILE_ONLY___ │
|
66 |
+
2023-06-12 13:34:59,200 INFO ___FILE_ONLY___ 63.5 MiB
|
67 |
+
2023-06-12 13:34:59,200 INFO ___FILE_ONLY___
|
68 |
+
2023-06-12 13:34:59,200 INFO ___FILE_ONLY___ │
|
69 |
+
2023-06-12 13:34:59,200 INFO ___FILE_ONLY___
|
70 |
+
|
71 |
+
2023-06-12 13:34:59,200 INFO ___FILE_ONLY___ │
|
72 |
+
2023-06-12 13:34:59,200 INFO ___FILE_ONLY___ Cloud Storage Command Line Tool
|
73 |
+
2023-06-12 13:34:59,200 INFO ___FILE_ONLY___
|
74 |
+
2023-06-12 13:34:59,200 INFO ___FILE_ONLY___ │
|
75 |
+
2023-06-12 13:34:59,200 INFO ___FILE_ONLY___ 5.24
|
76 |
+
2023-06-12 13:34:59,200 INFO ___FILE_ONLY___
|
77 |
+
2023-06-12 13:34:59,201 INFO ___FILE_ONLY___ │
|
78 |
+
2023-06-12 13:34:59,201 INFO ___FILE_ONLY___ 11.3 MiB
|
79 |
+
2023-06-12 13:34:59,201 INFO ___FILE_ONLY___
|
80 |
+
2023-06-12 13:34:59,201 INFO ___FILE_ONLY___ │
|
81 |
+
2023-06-12 13:34:59,201 INFO ___FILE_ONLY___
|
82 |
+
|
83 |
+
2023-06-12 13:34:59,201 INFO ___FILE_ONLY___ │
|
84 |
+
2023-06-12 13:34:59,201 INFO ___FILE_ONLY___ Cloud Storage Command Line Tool (Platform Specific)
|
85 |
+
2023-06-12 13:34:59,201 INFO ___FILE_ONLY___
|
86 |
+
2023-06-12 13:34:59,201 INFO ___FILE_ONLY___ │
|
87 |
+
2023-06-12 13:34:59,201 INFO ___FILE_ONLY___ 5.13
|
88 |
+
2023-06-12 13:34:59,202 INFO ___FILE_ONLY___
|
89 |
+
2023-06-12 13:34:59,202 INFO ___FILE_ONLY___ │
|
90 |
+
2023-06-12 13:34:59,202 INFO ___FILE_ONLY___ < 1 MiB
|
91 |
+
2023-06-12 13:34:59,202 INFO ___FILE_ONLY___
|
92 |
+
2023-06-12 13:34:59,202 INFO ___FILE_ONLY___ │
|
93 |
+
2023-06-12 13:34:59,202 INFO ___FILE_ONLY___
|
94 |
+
|
95 |
+
2023-06-12 13:34:59,202 INFO ___FILE_ONLY___ │
|
96 |
+
2023-06-12 13:34:59,202 INFO ___FILE_ONLY___ Google Cloud CLI Core Libraries (Platform Specific)
|
97 |
+
2023-06-12 13:34:59,202 INFO ___FILE_ONLY___
|
98 |
+
2023-06-12 13:34:59,202 INFO ___FILE_ONLY___ │
|
99 |
+
2023-06-12 13:34:59,202 INFO ___FILE_ONLY___ 2022.09.20
|
100 |
+
2023-06-12 13:34:59,203 INFO ___FILE_ONLY___
|
101 |
+
2023-06-12 13:34:59,203 INFO ___FILE_ONLY___ │
|
102 |
+
2023-06-12 13:34:59,203 INFO ___FILE_ONLY___ < 1 MiB
|
103 |
+
2023-06-12 13:34:59,203 INFO ___FILE_ONLY___
|
104 |
+
2023-06-12 13:34:59,203 INFO ___FILE_ONLY___ │
|
105 |
+
2023-06-12 13:34:59,203 INFO ___FILE_ONLY___
|
106 |
+
|
107 |
+
2023-06-12 13:34:59,203 INFO ___FILE_ONLY___ │
|
108 |
+
2023-06-12 13:34:59,203 INFO ___FILE_ONLY___ Google Cloud CRC32C Hash Tool
|
109 |
+
2023-06-12 13:34:59,203 INFO ___FILE_ONLY___
|
110 |
+
2023-06-12 13:34:59,204 INFO ___FILE_ONLY___ │
|
111 |
+
2023-06-12 13:34:59,204 INFO ___FILE_ONLY___ 1.0.0
|
112 |
+
2023-06-12 13:34:59,204 INFO ___FILE_ONLY___
|
113 |
+
2023-06-12 13:34:59,204 INFO ___FILE_ONLY___ │
|
114 |
+
2023-06-12 13:34:59,204 INFO ___FILE_ONLY___ 1.2 MiB
|
115 |
+
2023-06-12 13:34:59,204 INFO ___FILE_ONLY___
|
116 |
+
2023-06-12 13:34:59,204 INFO ___FILE_ONLY___ │
|
117 |
+
2023-06-12 13:34:59,204 INFO ___FILE_ONLY___
|
118 |
+
|
119 |
+
2023-06-12 13:34:59,204 INFO ___FILE_ONLY___ │
|
120 |
+
2023-06-12 13:34:59,204 INFO ___FILE_ONLY___ anthoscli
|
121 |
+
2023-06-12 13:34:59,205 INFO ___FILE_ONLY___
|
122 |
+
2023-06-12 13:34:59,205 INFO ___FILE_ONLY___ │
|
123 |
+
2023-06-12 13:34:59,205 INFO ___FILE_ONLY___ 0.2.36
|
124 |
+
2023-06-12 13:34:59,205 INFO ___FILE_ONLY___
|
125 |
+
2023-06-12 13:34:59,205 INFO ___FILE_ONLY___ │
|
126 |
+
2023-06-12 13:34:59,205 INFO ___FILE_ONLY___ 68.4 MiB
|
127 |
+
2023-06-12 13:34:59,205 INFO ___FILE_ONLY___
|
128 |
+
2023-06-12 13:34:59,205 INFO ___FILE_ONLY___ │
|
129 |
+
2023-06-12 13:34:59,205 INFO ___FILE_ONLY___
|
130 |
+
|
131 |
+
2023-06-12 13:34:59,205 INFO ___FILE_ONLY___ │
|
132 |
+
2023-06-12 13:34:59,206 INFO ___FILE_ONLY___ gcloud cli dependencies
|
133 |
+
2023-06-12 13:34:59,206 INFO ___FILE_ONLY___
|
134 |
+
2023-06-12 13:34:59,206 INFO ___FILE_ONLY___ │
|
135 |
+
2023-06-12 13:34:59,206 INFO ___FILE_ONLY___ 2021.04.16
|
136 |
+
2023-06-12 13:34:59,206 INFO ___FILE_ONLY___
|
137 |
+
2023-06-12 13:34:59,206 INFO ___FILE_ONLY___ │
|
138 |
+
2023-06-12 13:34:59,206 INFO ___FILE_ONLY___ < 1 MiB
|
139 |
+
2023-06-12 13:34:59,206 INFO ___FILE_ONLY___
|
140 |
+
2023-06-12 13:34:59,206 INFO ___FILE_ONLY___ │
|
141 |
+
2023-06-12 13:34:59,206 INFO ___FILE_ONLY___
|
142 |
+
|
143 |
+
2023-06-12 13:34:59,207 INFO ___FILE_ONLY___ └─────────────────────────────────────────────────────┴────────────┴──────────┘
|
144 |
+
2023-06-12 13:34:59,207 INFO ___FILE_ONLY___
|
145 |
+
|
146 |
+
2023-06-12 13:34:59,207 INFO ___FILE_ONLY___
|
147 |
+
|
148 |
+
2023-06-12 13:34:59,209 DEBUG urllib3.connectionpool Starting new HTTPS connection (1): dl.google.com:443
|
149 |
+
2023-06-12 13:34:59,299 DEBUG urllib3.connectionpool https://dl.google.com:443 "GET /dl/cloudsdk/channels/rapid/RELEASE_NOTES HTTP/1.1" 200 1011317
|
150 |
+
2023-06-12 13:34:59,400 INFO ___FILE_ONLY___ For the latest full release notes, please visit:
|
151 |
+
https://cloud.google.com/sdk/release_notes
|
152 |
+
|
153 |
+
|
154 |
+
2023-06-12 13:34:59,403 INFO ___FILE_ONLY___ ╔═════════════════════════════════════════���══════════════════╗
|
155 |
+
|
156 |
+
2023-06-12 13:34:59,403 INFO ___FILE_ONLY___ ╠═ Creating update staging area ═╣
|
157 |
+
|
158 |
+
2023-06-12 13:34:59,403 INFO ___FILE_ONLY___ ╚
|
159 |
+
2023-06-12 13:34:59,403 INFO ___FILE_ONLY___ ══════
|
160 |
+
2023-06-12 13:34:59,404 INFO ___FILE_ONLY___ ══════
|
161 |
+
2023-06-12 13:34:59,404 INFO ___FILE_ONLY___ ══════
|
162 |
+
2023-06-12 13:34:59,593 INFO ___FILE_ONLY___ ═
|
163 |
+
2023-06-12 13:34:59,637 INFO ___FILE_ONLY___ ═
|
164 |
+
2023-06-12 13:34:59,694 INFO ___FILE_ONLY___ ═
|
165 |
+
2023-06-12 13:34:59,736 INFO ___FILE_ONLY___ ═
|
166 |
+
2023-06-12 13:34:59,782 INFO ___FILE_ONLY___ ═
|
167 |
+
2023-06-12 13:34:59,825 INFO ___FILE_ONLY___ ═
|
168 |
+
2023-06-12 13:34:59,874 INFO ___FILE_ONLY___ ═
|
169 |
+
2023-06-12 13:34:59,921 INFO ___FILE_ONLY___ ═
|
170 |
+
2023-06-12 13:34:59,959 INFO ___FILE_ONLY___ ═
|
171 |
+
2023-06-12 13:35:00,020 INFO ___FILE_ONLY___ ═
|
172 |
+
2023-06-12 13:35:00,065 INFO ___FILE_ONLY___ ═
|
173 |
+
2023-06-12 13:35:00,117 INFO ___FILE_ONLY___ ═
|
174 |
+
2023-06-12 13:35:00,154 INFO ___FILE_ONLY___ ═
|
175 |
+
2023-06-12 13:35:00,215 INFO ___FILE_ONLY___ ═
|
176 |
+
2023-06-12 13:35:00,271 INFO ___FILE_ONLY___ ═
|
177 |
+
2023-06-12 13:35:00,315 INFO ___FILE_ONLY___ ═
|
178 |
+
2023-06-12 13:35:00,464 INFO ___FILE_ONLY___ ═
|
179 |
+
2023-06-12 13:35:00,522 INFO ___FILE_ONLY___ ═
|
180 |
+
2023-06-12 13:35:00,579 INFO ___FILE_ONLY___ ═
|
181 |
+
2023-06-12 13:35:00,629 INFO ___FILE_ONLY___ ═
|
182 |
+
2023-06-12 13:35:00,679 INFO ___FILE_ONLY___ ═
|
183 |
+
2023-06-12 13:35:00,742 INFO ___FILE_ONLY___ ═
|
184 |
+
2023-06-12 13:35:00,806 INFO ___FILE_ONLY___ ═
|
185 |
+
2023-06-12 13:35:00,860 INFO ___FILE_ONLY___ ═
|
186 |
+
2023-06-12 13:35:00,907 INFO ___FILE_ONLY___ ═
|
187 |
+
2023-06-12 13:35:00,958 INFO ___FILE_ONLY___ ═
|
188 |
+
2023-06-12 13:35:01,011 INFO ___FILE_ONLY___ ═
|
189 |
+
2023-06-12 13:35:01,059 INFO ___FILE_ONLY___ ═
|
190 |
+
2023-06-12 13:35:01,110 INFO ___FILE_ONLY___ ═
|
191 |
+
2023-06-12 13:35:01,161 INFO ___FILE_ONLY___ ═
|
192 |
+
2023-06-12 13:35:01,219 INFO ___FILE_ONLY___ ═
|
193 |
+
2023-06-12 13:35:01,281 INFO ___FILE_ONLY___ ═
|
194 |
+
2023-06-12 13:35:01,450 INFO ___FILE_ONLY___ ═
|
195 |
+
2023-06-12 13:35:01,561 INFO ___FILE_ONLY___ ═
|
196 |
+
2023-06-12 13:35:01,598 INFO ___FILE_ONLY___ ═
|
197 |
+
2023-06-12 13:35:01,626 INFO ___FILE_ONLY___ ═
|
198 |
+
2023-06-12 13:35:01,653 INFO ___FILE_ONLY___ ═
|
199 |
+
2023-06-12 13:35:01,681 INFO ___FILE_ONLY___ ═
|
200 |
+
2023-06-12 13:35:01,715 INFO ___FILE_ONLY___ ═
|
201 |
+
2023-06-12 13:35:01,746 INFO ___FILE_ONLY___ ═
|
202 |
+
2023-06-12 13:35:01,815 INFO ___FILE_ONLY___ ═
|
203 |
+
2023-06-12 13:35:01,914 INFO ___FILE_ONLY___ ═
|
204 |
+
2023-06-12 13:35:01,914 INFO ___FILE_ONLY___ ╝
|
205 |
+
|
206 |
+
2023-06-12 13:35:02,111 INFO ___FILE_ONLY___ ╔════════════════════════════════════════════════════════════╗
|
207 |
+
|
208 |
+
2023-06-12 13:35:02,112 INFO ___FILE_ONLY___ ╠═ Installing: BigQuery Command Line Tool ═╣
|
209 |
+
|
210 |
+
2023-06-12 13:35:02,112 INFO ___FILE_ONLY___ ╚
|
211 |
+
2023-06-12 13:35:02,114 DEBUG urllib3.connectionpool Starting new HTTPS connection (1): dl.google.com:443
|
212 |
+
2023-06-12 13:35:02,209 DEBUG urllib3.connectionpool https://dl.google.com:443 "GET /dl/cloudsdk/channels/rapid/components/google-cloud-sdk-bq-20230519173220.tar.gz HTTP/1.1" 200 1681057
|
213 |
+
2023-06-12 13:35:02,273 INFO ___FILE_ONLY___ ═
|
214 |
+
2023-06-12 13:35:02,274 INFO ___FILE_ONLY___ ═
|
215 |
+
2023-06-12 13:35:02,274 INFO ___FILE_ONLY___ ═
|
216 |
+
2023-06-12 13:35:02,274 INFO ___FILE_ONLY___ ═
|
217 |
+
2023-06-12 13:35:02,274 INFO ___FILE_ONLY___ ═
|
218 |
+
2023-06-12 13:35:02,274 INFO ___FILE_ONLY___ ═
|
219 |
+
2023-06-12 13:35:02,274 INFO ___FILE_ONLY___ ═
|
220 |
+
2023-06-12 13:35:02,274 INFO ___FILE_ONLY___ ═
|
221 |
+
2023-06-12 13:35:02,275 INFO ___FILE_ONLY___ ═
|
222 |
+
2023-06-12 13:35:02,275 INFO ___FILE_ONLY___ ═
|
223 |
+
2023-06-12 13:35:02,275 INFO ___FILE_ONLY___ ═
|
224 |
+
2023-06-12 13:35:02,275 INFO ___FILE_ONLY___ ═
|
225 |
+
2023-06-12 13:35:02,275 INFO ___FILE_ONLY___ ═
|
226 |
+
2023-06-12 13:35:02,275 INFO ___FILE_ONLY___ ═
|
227 |
+
2023-06-12 13:35:02,276 INFO ___FILE_ONLY___ ═
|
228 |
+
2023-06-12 13:35:02,276 INFO ___FILE_ONLY___ ═
|
229 |
+
2023-06-12 13:35:02,276 INFO ___FILE_ONLY___ ═
|
230 |
+
2023-06-12 13:35:02,276 INFO ___FILE_ONLY___ ═
|
231 |
+
2023-06-12 13:35:02,276 INFO ___FILE_ONLY___ ═
|
232 |
+
2023-06-12 13:35:02,276 INFO ___FILE_ONLY___ ═
|
233 |
+
2023-06-12 13:35:02,276 INFO ___FILE_ONLY___ ═
|
234 |
+
2023-06-12 13:35:02,277 INFO ___FILE_ONLY___ ═
|
235 |
+
2023-06-12 13:35:02,277 INFO ___FILE_ONLY___ ═
|
236 |
+
2023-06-12 13:35:02,277 INFO ___FILE_ONLY___ ═
|
237 |
+
2023-06-12 13:35:02,277 INFO ___FILE_ONLY___ ═
|
238 |
+
2023-06-12 13:35:02,277 INFO ___FILE_ONLY___ ═
|
239 |
+
2023-06-12 13:35:02,277 INFO ___FILE_ONLY___ ═
|
240 |
+
2023-06-12 13:35:02,278 INFO ___FILE_ONLY___ ═
|
241 |
+
2023-06-12 13:35:02,278 INFO ___FILE_ONLY___ ═
|
242 |
+
2023-06-12 13:35:02,278 INFO ___FILE_ONLY___ ═
|
243 |
+
2023-06-12 13:35:02,404 INFO ___FILE_ONLY___ ═
|
244 |
+
2023-06-12 13:35:02,408 INFO ___FILE_ONLY___ ═
|
245 |
+
2023-06-12 13:35:02,413 INFO ___FILE_ONLY___ ═
|
246 |
+
2023-06-12 13:35:02,417 INFO ___FILE_ONLY___ ═
|
247 |
+
2023-06-12 13:35:02,421 INFO ___FILE_ONLY___ ═
|
248 |
+
2023-06-12 13:35:02,425 INFO ___FILE_ONLY___ ═
|
249 |
+
2023-06-12 13:35:02,429 INFO ___FILE_ONLY___ ═
|
250 |
+
2023-06-12 13:35:02,433 INFO ___FILE_ONLY___ ═
|
251 |
+
2023-06-12 13:35:02,437 INFO ___FILE_ONLY___ ═
|
252 |
+
2023-06-12 13:35:02,441 INFO ___FILE_ONLY___ ═
|
253 |
+
2023-06-12 13:35:02,445 INFO ___FILE_ONLY___ ═
|
254 |
+
2023-06-12 13:35:02,450 INFO ___FILE_ONLY___ ═
|
255 |
+
2023-06-12 13:35:02,453 INFO ___FILE_ONLY___ ═
|
256 |
+
2023-06-12 13:35:02,457 INFO ___FILE_ONLY___ ═
|
257 |
+
2023-06-12 13:35:02,463 INFO ___FILE_ONLY___ ═
|
258 |
+
2023-06-12 13:35:02,466 INFO ___FILE_ONLY___ ═
|
259 |
+
2023-06-12 13:35:02,470 INFO ___FILE_ONLY___ ═
|
260 |
+
2023-06-12 13:35:02,473 INFO ___FILE_ONLY___ ═
|
261 |
+
2023-06-12 13:35:02,477 INFO ___FILE_ONLY___ ═
|
262 |
+
2023-06-12 13:35:02,479 INFO ___FILE_ONLY___ ═
|
263 |
+
2023-06-12 13:35:02,484 INFO ___FILE_ONLY___ ═
|
264 |
+
2023-06-12 13:35:02,487 INFO ___FILE_ONLY___ ═
|
265 |
+
2023-06-12 13:35:02,491 INFO ___FILE_ONLY___ ═
|
266 |
+
2023-06-12 13:35:02,495 INFO ___FILE_ONLY___ ═
|
267 |
+
2023-06-12 13:35:02,499 INFO ___FILE_ONLY___ ═
|
268 |
+
2023-06-12 13:35:02,505 INFO ___FILE_ONLY___ ═
|
269 |
+
2023-06-12 13:35:02,509 INFO ___FILE_ONLY___ ═
|
270 |
+
2023-06-12 13:35:02,513 INFO ___FILE_ONLY___ ═
|
271 |
+
2023-06-12 13:35:02,516 INFO ___FILE_ONLY___ ═
|
272 |
+
2023-06-12 13:35:02,520 INFO ___FILE_ONLY___ ═
|
273 |
+
2023-06-12 13:35:02,520 INFO ___FILE_ONLY___ ╝
|
274 |
+
|
275 |
+
2023-06-12 13:35:02,536 INFO ___FILE_ONLY___ ╔════════════════════════════════════════════════════════════╗
|
276 |
+
|
277 |
+
2023-06-12 13:35:02,536 INFO ___FILE_ONLY___ ╠═ Installing: BigQuery Command Line Tool (Platform Spec... ═╣
|
278 |
+
|
279 |
+
2023-06-12 13:35:02,536 INFO ___FILE_ONLY___ ╚
|
280 |
+
2023-06-12 13:35:02,538 DEBUG urllib3.connectionpool Starting new HTTPS connection (1): dl.google.com:443
|
281 |
+
2023-06-12 13:35:02,627 DEBUG urllib3.connectionpool https://dl.google.com:443 "GET /dl/cloudsdk/channels/rapid/components/google-cloud-sdk-bq-nix-20220920185015.tar.gz HTTP/1.1" 200 1837
|
282 |
+
2023-06-12 13:35:02,628 INFO ___FILE_ONLY___ ══════════════════════════════
|
283 |
+
2023-06-12 13:35:02,629 INFO ___FILE_ONLY___ ══════════════════════════════
|
284 |
+
2023-06-12 13:35:02,629 INFO ___FILE_ONLY___ ╝
|
285 |
+
|
286 |
+
2023-06-12 13:35:02,637 INFO ___FILE_ONLY___ ╔════════════════════════════════════════════════════════════╗
|
287 |
+
|
288 |
+
2023-06-12 13:35:02,637 INFO ___FILE_ONLY___ ╠═ Installing: Bundled Python 3.9 ═╣
|
289 |
+
|
290 |
+
2023-06-12 13:35:02,637 INFO ___FILE_ONLY___ ╚
|
291 |
+
2023-06-12 13:35:02,640 DEBUG urllib3.connectionpool Starting new HTTPS connection (1): dl.google.com:443
|
292 |
+
2023-06-12 13:35:02,673 DEBUG urllib3.connectionpool https://dl.google.com:443 "GET /dl/cloudsdk/channels/rapid/components/google-cloud-sdk-bundled-python3-unix-linux-x86_64-20230428152336.tar.gz HTTP/1.1" 200 66615539
|
293 |
+
2023-06-12 13:35:03,249 INFO ___FILE_ONLY___ ═
|
294 |
+
2023-06-12 13:35:03,252 INFO ___FILE_ONLY___ ═
|
295 |
+
2023-06-12 13:35:03,255 INFO ___FILE_ONLY___ ═
|
296 |
+
2023-06-12 13:35:03,258 INFO ___FILE_ONLY___ ═
|
297 |
+
2023-06-12 13:35:03,260 INFO ___FILE_ONLY___ ═
|
298 |
+
2023-06-12 13:35:03,263 INFO ___FILE_ONLY___ ═
|
299 |
+
2023-06-12 13:35:03,266 INFO ___FILE_ONLY___ ═
|
300 |
+
2023-06-12 13:35:03,269 INFO ___FILE_ONLY___ ═
|
301 |
+
2023-06-12 13:35:03,271 INFO ___FILE_ONLY___ ═
|
302 |
+
2023-06-12 13:35:03,274 INFO ___FILE_ONLY___ ═
|
303 |
+
2023-06-12 13:35:03,276 INFO ___FILE_ONLY___ ═
|
304 |
+
2023-06-12 13:35:03,279 INFO ___FILE_ONLY___ ═
|
305 |
+
2023-06-12 13:35:03,282 INFO ___FILE_ONLY___ ═
|
306 |
+
2023-06-12 13:35:03,285 INFO ___FILE_ONLY___ ═
|
307 |
+
2023-06-12 13:35:03,288 INFO ___FILE_ONLY___ ═
|
308 |
+
2023-06-12 13:35:03,290 INFO ___FILE_ONLY___ ═
|
309 |
+
2023-06-12 13:35:03,293 INFO ___FILE_ONLY___ ═
|
310 |
+
2023-06-12 13:35:03,296 INFO ___FILE_ONLY___ ═
|
311 |
+
2023-06-12 13:35:03,299 INFO ___FILE_ONLY___ ═
|
312 |
+
2023-06-12 13:35:03,301 INFO ___FILE_ONLY___ ═
|
313 |
+
2023-06-12 13:35:03,304 INFO ___FILE_ONLY___ ═
|
314 |
+
2023-06-12 13:35:03,307 INFO ___FILE_ONLY___ ═
|
315 |
+
2023-06-12 13:35:03,309 INFO ___FILE_ONLY___ ═
|
316 |
+
2023-06-12 13:35:03,312 INFO ___FILE_ONLY___ ═
|
317 |
+
2023-06-12 13:35:03,315 INFO ___FILE_ONLY___ ═
|
318 |
+
2023-06-12 13:35:03,317 INFO ___FILE_ONLY___ ═
|
319 |
+
2023-06-12 13:35:03,320 INFO ___FILE_ONLY___ ═
|
320 |
+
2023-06-12 13:35:03,323 INFO ___FILE_ONLY___ ═
|
321 |
+
2023-06-12 13:35:03,326 INFO ___FILE_ONLY___ ═
|
322 |
+
2023-06-12 13:35:03,329 INFO ___FILE_ONLY___ ═
|
323 |
+
2023-06-12 13:35:05,116 INFO ___FILE_ONLY___ ═
|
324 |
+
2023-06-12 13:35:05,129 INFO ___FILE_ONLY___ ═
|
325 |
+
2023-06-12 13:35:05,142 INFO ___FILE_ONLY___ ═
|
326 |
+
2023-06-12 13:35:05,155 INFO ___FILE_ONLY___ ═
|
327 |
+
2023-06-12 13:35:05,167 INFO ___FILE_ONLY___ ═
|
328 |
+
2023-06-12 13:35:05,184 INFO ___FILE_ONLY___ ═
|
329 |
+
2023-06-12 13:35:05,200 INFO ___FILE_ONLY___ ═
|
330 |
+
2023-06-12 13:35:05,220 INFO ___FILE_ONLY___ ═
|
331 |
+
2023-06-12 13:35:05,237 INFO ___FILE_ONLY___ ═
|
332 |
+
2023-06-12 13:35:05,250 INFO ___FILE_ONLY___ ═
|
333 |
+
2023-06-12 13:35:05,268 INFO ___FILE_ONLY___ ═
|
334 |
+
2023-06-12 13:35:05,288 INFO ___FILE_ONLY___ ═
|
335 |
+
2023-06-12 13:35:05,402 INFO ___FILE_ONLY___ ═
|
336 |
+
2023-06-12 13:35:05,415 INFO ___FILE_ONLY___ ═
|
337 |
+
2023-06-12 13:35:05,506 INFO ___FILE_ONLY___ ═
|
338 |
+
2023-06-12 13:35:05,518 INFO ___FILE_ONLY___ ═
|
339 |
+
2023-06-12 13:35:05,540 INFO ___FILE_ONLY___ ═
|
340 |
+
2023-06-12 13:35:05,561 INFO ___FILE_ONLY___ ═
|
341 |
+
2023-06-12 13:35:05,578 INFO ___FILE_ONLY___ ═
|
342 |
+
2023-06-12 13:35:05,612 INFO ___FILE_ONLY___ ═
|
343 |
+
2023-06-12 13:35:05,625 INFO ___FILE_ONLY___ ═
|
344 |
+
2023-06-12 13:35:05,638 INFO ___FILE_ONLY___ ═
|
345 |
+
2023-06-12 13:35:05,653 INFO ___FILE_ONLY___ ═
|
346 |
+
2023-06-12 13:35:05,671 INFO ___FILE_ONLY___ ═
|
347 |
+
2023-06-12 13:35:05,688 INFO ___FILE_ONLY___ ═
|
348 |
+
2023-06-12 13:35:05,705 INFO ___FILE_ONLY___ ═
|
349 |
+
2023-06-12 13:35:06,287 INFO ___FILE_ONLY___ ═
|
350 |
+
2023-06-12 13:35:06,557 INFO ___FILE_ONLY___ ═
|
351 |
+
2023-06-12 13:35:06,569 INFO ___FILE_ONLY___ ═
|
352 |
+
2023-06-12 13:35:06,581 INFO ___FILE_ONLY___ ═
|
353 |
+
2023-06-12 13:35:06,581 INFO ___FILE_ONLY___ ╝
|
354 |
+
|
355 |
+
2023-06-12 13:35:06,638 INFO ___FILE_ONLY___ ╔════════════════════════════════════════════════════════════╗
|
356 |
+
|
357 |
+
2023-06-12 13:35:06,638 INFO ___FILE_ONLY___ ╠═ Installing: Bundled Python 3.9 ═╣
|
358 |
+
|
359 |
+
2023-06-12 13:35:06,638 INFO ___FILE_ONLY___ ╚
|
360 |
+
2023-06-12 13:35:06,643 INFO ___FILE_ONLY___ ════════════════════════════════════════════════════════════
|
361 |
+
2023-06-12 13:35:06,643 INFO ___FILE_ONLY___ ╝
|
362 |
+
|
363 |
+
2023-06-12 13:35:06,644 INFO ___FILE_ONLY___ ╔════════════════════════════════════════════════════════════╗
|
364 |
+
|
365 |
+
2023-06-12 13:35:06,645 INFO ___FILE_ONLY___ ╠═ Installing: Cloud Storage Command Line Tool ═╣
|
366 |
+
|
367 |
+
2023-06-12 13:35:06,645 INFO ___FILE_ONLY___ ╚
|
368 |
+
2023-06-12 13:35:06,647 DEBUG urllib3.connectionpool Starting new HTTPS connection (1): dl.google.com:443
|
369 |
+
2023-06-12 13:35:06,738 DEBUG urllib3.connectionpool https://dl.google.com:443 "GET /dl/cloudsdk/channels/rapid/components/google-cloud-sdk-gsutil-20230526134617.tar.gz HTTP/1.1" 200 11815391
|
370 |
+
2023-06-12 13:35:06,872 INFO ___FILE_ONLY___ ═
|
371 |
+
2023-06-12 13:35:06,872 INFO ___FILE_ONLY___ ═
|
372 |
+
2023-06-12 13:35:06,873 INFO ___FILE_ONLY___ ═
|
373 |
+
2023-06-12 13:35:06,873 INFO ___FILE_ONLY___ ═
|
374 |
+
2023-06-12 13:35:06,874 INFO ___FILE_ONLY___ ═
|
375 |
+
2023-06-12 13:35:06,875 INFO ___FILE_ONLY___ ═
|
376 |
+
2023-06-12 13:35:06,875 INFO ___FILE_ONLY___ ═
|
377 |
+
2023-06-12 13:35:06,876 INFO ___FILE_ONLY___ ═
|
378 |
+
2023-06-12 13:35:06,876 INFO ___FILE_ONLY___ ═
|
379 |
+
2023-06-12 13:35:06,877 INFO ___FILE_ONLY___ ═
|
380 |
+
2023-06-12 13:35:06,877 INFO ___FILE_ONLY___ ═
|
381 |
+
2023-06-12 13:35:06,878 INFO ___FILE_ONLY___ ═
|
382 |
+
2023-06-12 13:35:06,878 INFO ___FILE_ONLY___ ═
|
383 |
+
2023-06-12 13:35:06,879 INFO ___FILE_ONLY___ ═
|
384 |
+
2023-06-12 13:35:06,880 INFO ___FILE_ONLY___ ═
|
385 |
+
2023-06-12 13:35:06,880 INFO ___FILE_ONLY___ ═
|
386 |
+
2023-06-12 13:35:06,881 INFO ___FILE_ONLY___ ═
|
387 |
+
2023-06-12 13:35:06,881 INFO ___FILE_ONLY___ ═
|
388 |
+
2023-06-12 13:35:06,882 INFO ___FILE_ONLY___ ═
|
389 |
+
2023-06-12 13:35:06,882 INFO ___FILE_ONLY___ ═
|
390 |
+
2023-06-12 13:35:06,883 INFO ___FILE_ONLY___ ═
|
391 |
+
2023-06-12 13:35:06,883 INFO ___FILE_ONLY___ ═
|
392 |
+
2023-06-12 13:35:06,884 INFO ___FILE_ONLY___ ═
|
393 |
+
2023-06-12 13:35:06,885 INFO ___FILE_ONLY___ ═
|
394 |
+
2023-06-12 13:35:06,885 INFO ___FILE_ONLY___ ═
|
395 |
+
2023-06-12 13:35:06,886 INFO ___FILE_ONLY___ ═
|
396 |
+
2023-06-12 13:35:06,886 INFO ___FILE_ONLY___ ═
|
397 |
+
2023-06-12 13:35:06,887 INFO ___FILE_ONLY___ ═
|
398 |
+
2023-06-12 13:35:06,887 INFO ___FILE_ONLY___ ═
|
399 |
+
2023-06-12 13:35:06,888 INFO ___FILE_ONLY___ ═
|
400 |
+
2023-06-12 13:35:07,625 INFO ___FILE_ONLY___ ═
|
401 |
+
2023-06-12 13:35:07,649 INFO ___FILE_ONLY___ ═
|
402 |
+
2023-06-12 13:35:07,696 INFO ___FILE_ONLY___ ═
|
403 |
+
2023-06-12 13:35:07,720 INFO ___FILE_ONLY___ ═
|
404 |
+
2023-06-12 13:35:07,742 INFO ___FILE_ONLY___ ═
|
405 |
+
2023-06-12 13:35:07,765 INFO ___FILE_ONLY___ ═
|
406 |
+
2023-06-12 13:35:07,796 INFO ___FILE_ONLY___ ═
|
407 |
+
2023-06-12 13:35:07,827 INFO ___FILE_ONLY___ ═
|
408 |
+
2023-06-12 13:35:07,856 INFO ___FILE_ONLY___ ═
|
409 |
+
2023-06-12 13:35:07,877 INFO ___FILE_ONLY___ ═
|
410 |
+
2023-06-12 13:35:07,898 INFO ___FILE_ONLY___ ═
|
411 |
+
2023-06-12 13:35:07,925 INFO ___FILE_ONLY___ ═
|
412 |
+
2023-06-12 13:35:07,945 INFO ___FILE_ONLY___ ═
|
413 |
+
2023-06-12 13:35:07,966 INFO ___FILE_ONLY___ ═
|
414 |
+
2023-06-12 13:35:07,986 INFO ___FILE_ONLY___ ═
|
415 |
+
2023-06-12 13:35:08,005 INFO ___FILE_ONLY___ ═
|
416 |
+
2023-06-12 13:35:08,026 INFO ___FILE_ONLY___ ═
|
417 |
+
2023-06-12 13:35:08,047 INFO ___FILE_ONLY___ ═
|
418 |
+
2023-06-12 13:35:08,066 INFO ___FILE_ONLY___ ═
|
419 |
+
2023-06-12 13:35:08,096 INFO ___FILE_ONLY___ ═
|
420 |
+
2023-06-12 13:35:08,123 INFO ___FILE_ONLY___ ═
|
421 |
+
2023-06-12 13:35:08,147 INFO ___FILE_ONLY___ ═
|
422 |
+
2023-06-12 13:35:08,172 INFO ___FILE_ONLY___ ═
|
423 |
+
2023-06-12 13:35:08,197 INFO ___FILE_ONLY___ ═
|
424 |
+
2023-06-12 13:35:08,219 INFO ___FILE_ONLY___ ═
|
425 |
+
2023-06-12 13:35:08,240 INFO ___FILE_ONLY___ ═
|
426 |
+
2023-06-12 13:35:08,259 INFO ___FILE_ONLY___ ═
|
427 |
+
2023-06-12 13:35:08,278 INFO ___FILE_ONLY___ ═
|
428 |
+
2023-06-12 13:35:08,298 INFO ___FILE_ONLY___ ═
|
429 |
+
2023-06-12 13:35:08,333 INFO ___FILE_ONLY___ ═
|
430 |
+
2023-06-12 13:35:08,333 INFO ___FILE_ONLY___ ╝
|
431 |
+
|
432 |
+
2023-06-12 13:35:08,401 INFO ___FILE_ONLY___ ╔════════════════════════════════════════════════════════════╗
|
433 |
+
|
434 |
+
2023-06-12 13:35:08,401 INFO ___FILE_ONLY___ ╠═ Installing: Cloud Storage Command Line Tool (Platform... ═╣
|
435 |
+
|
436 |
+
2023-06-12 13:35:08,401 INFO ___FILE_ONLY___ ╚
|
437 |
+
2023-06-12 13:35:08,404 DEBUG urllib3.connectionpool Starting new HTTPS connection (1): dl.google.com:443
|
438 |
+
2023-06-12 13:35:08,487 DEBUG urllib3.connectionpool https://dl.google.com:443 "GET /dl/cloudsdk/channels/rapid/components/google-cloud-sdk-gsutil-nix-20220920185015.tar.gz HTTP/1.1" 200 1851
|
439 |
+
2023-06-12 13:35:08,488 INFO ___FILE_ONLY___ ══════════════════════════════
|
440 |
+
2023-06-12 13:35:08,489 INFO ___FILE_ONLY___ ══════════════════════════════
|
441 |
+
2023-06-12 13:35:08,489 INFO ___FILE_ONLY___ ╝
|
442 |
+
|
443 |
+
2023-06-12 13:35:08,497 INFO ___FILE_ONLY___ ╔════════════════════════════════════════════════════════════╗
|
444 |
+
|
445 |
+
2023-06-12 13:35:08,498 INFO ___FILE_ONLY___ ╠═ Installing: Default set of gcloud commands ═╣
|
446 |
+
|
447 |
+
2023-06-12 13:35:08,498 INFO ___FILE_ONLY___ ╚
|
448 |
+
2023-06-12 13:35:08,502 INFO ___FILE_ONLY___ ════════════════════════════════════════════════════════════
|
449 |
+
2023-06-12 13:35:08,502 INFO ___FILE_ONLY___ ╝
|
450 |
+
|
451 |
+
2023-06-12 13:35:08,504 INFO ___FILE_ONLY___ ╔════════════════════════════════════════════════════════════╗
|
452 |
+
|
453 |
+
2023-06-12 13:35:08,504 INFO ___FILE_ONLY___ ╠═ Installing: Google Cloud CLI Core Libraries (Platform... ═╣
|
454 |
+
|
455 |
+
2023-06-12 13:35:08,504 INFO ___FILE_ONLY___ ╚
|
456 |
+
2023-06-12 13:35:08,506 DEBUG urllib3.connectionpool Starting new HTTPS connection (1): dl.google.com:443
|
457 |
+
2023-06-12 13:35:08,590 DEBUG urllib3.connectionpool https://dl.google.com:443 "GET /dl/cloudsdk/channels/rapid/components/google-cloud-sdk-core-nix-20220920185015.tar.gz HTTP/1.1" 200 2221
|
458 |
+
2023-06-12 13:35:08,591 INFO ___FILE_ONLY___ ══════════════════════════════
|
459 |
+
2023-06-12 13:35:08,592 INFO ___FILE_ONLY___ ═══════════════
|
460 |
+
2023-06-12 13:35:08,592 INFO ___FILE_ONLY___ ═══════════════
|
461 |
+
2023-06-12 13:35:08,593 INFO ___FILE_ONLY___ ╝
|
462 |
+
|
463 |
+
2023-06-12 13:35:08,601 INFO ___FILE_ONLY___ ╔════════════════════════════════════════════════════════════╗
|
464 |
+
|
465 |
+
2023-06-12 13:35:08,601 INFO ___FILE_ONLY___ ╠═ Installing: Google Cloud CRC32C Hash Tool ═╣
|
466 |
+
|
467 |
+
2023-06-12 13:35:08,601 INFO ___FILE_ONLY___ ╚
|
468 |
+
2023-06-12 13:35:08,606 INFO ___FILE_ONLY___ ════════════════════════════════════════════════════════════
|
469 |
+
2023-06-12 13:35:08,606 INFO ___FILE_ONLY___ ╝
|
470 |
+
|
471 |
+
2023-06-12 13:35:08,608 INFO ___FILE_ONLY___ ╔════════════════════════════════════════════════════════════╗
|
472 |
+
|
473 |
+
2023-06-12 13:35:08,608 INFO ___FILE_ONLY___ ╠═ Installing: Google Cloud CRC32C Hash Tool ═╣
|
474 |
+
|
475 |
+
2023-06-12 13:35:08,608 INFO ___FILE_ONLY___ ╚
|
476 |
+
2023-06-12 13:35:08,611 DEBUG urllib3.connectionpool Starting new HTTPS connection (1): dl.google.com:443
|
477 |
+
2023-06-12 13:35:08,700 DEBUG urllib3.connectionpool https://dl.google.com:443 "GET /dl/cloudsdk/channels/rapid/components/google-cloud-sdk-gcloud-crc32c-linux-x86_64-20230526134617.tar.gz HTTP/1.1" 200 1270213
|
478 |
+
2023-06-12 13:35:08,759 INFO ___FILE_ONLY___ ═
|
479 |
+
2023-06-12 13:35:08,759 INFO ___FILE_ONLY___ ═
|
480 |
+
2023-06-12 13:35:08,759 INFO ___FILE_ONLY___ ═
|
481 |
+
2023-06-12 13:35:08,760 INFO ___FILE_ONLY___ ═
|
482 |
+
2023-06-12 13:35:08,760 INFO ___FILE_ONLY___ ═
|
483 |
+
2023-06-12 13:35:08,760 INFO ___FILE_ONLY___ ═
|
484 |
+
2023-06-12 13:35:08,760 INFO ___FILE_ONLY___ ═
|
485 |
+
2023-06-12 13:35:08,760 INFO ___FILE_ONLY___ ═
|
486 |
+
2023-06-12 13:35:08,761 INFO ___FILE_ONLY___ ═
|
487 |
+
2023-06-12 13:35:08,761 INFO ___FILE_ONLY___ ═
|
488 |
+
2023-06-12 13:35:08,761 INFO ___FILE_ONLY___ ═
|
489 |
+
2023-06-12 13:35:08,761 INFO ___FILE_ONLY___ ═
|
490 |
+
2023-06-12 13:35:08,761 INFO ___FILE_ONLY___ ═
|
491 |
+
2023-06-12 13:35:08,762 INFO ___FILE_ONLY___ ═
|
492 |
+
2023-06-12 13:35:08,762 INFO ___FILE_ONLY___ ═
|
493 |
+
2023-06-12 13:35:08,762 INFO ___FILE_ONLY___ ═
|
494 |
+
2023-06-12 13:35:08,762 INFO ___FILE_ONLY___ ═
|
495 |
+
2023-06-12 13:35:08,762 INFO ___FILE_ONLY___ ═
|
496 |
+
2023-06-12 13:35:08,762 INFO ___FILE_ONLY___ ═
|
497 |
+
2023-06-12 13:35:08,763 INFO ___FILE_ONLY___ ═
|
498 |
+
2023-06-12 13:35:08,763 INFO ___FILE_ONLY___ ═
|
499 |
+
2023-06-12 13:35:08,763 INFO ___FILE_ONLY___ ═
|
500 |
+
2023-06-12 13:35:08,763 INFO ___FILE_ONLY___ ═
|
501 |
+
2023-06-12 13:35:08,763 INFO ___FILE_ONLY___ ═
|
502 |
+
2023-06-12 13:35:08,764 INFO ___FILE_ONLY___ ═
|
503 |
+
2023-06-12 13:35:08,764 INFO ___FILE_ONLY___ ═
|
504 |
+
2023-06-12 13:35:08,764 INFO ___FILE_ONLY___ ═
|
505 |
+
2023-06-12 13:35:08,764 INFO ___FILE_ONLY___ ═
|
506 |
+
2023-06-12 13:35:08,764 INFO ___FILE_ONLY___ ═
|
507 |
+
2023-06-12 13:35:08,765 INFO ___FILE_ONLY___ ═
|
508 |
+
2023-06-12 13:35:08,796 INFO ___FILE_ONLY___ ═══════════════
|
509 |
+
2023-06-12 13:35:08,797 INFO ___FILE_ONLY___ ═══════════════
|
510 |
+
2023-06-12 13:35:08,797 INFO ___FILE_ONLY___ ╝
|
511 |
+
|
512 |
+
2023-06-12 13:35:08,806 INFO ___FILE_ONLY___ ╔════════════════════════════════════════════════════════════╗
|
513 |
+
|
514 |
+
2023-06-12 13:35:08,806 INFO ___FILE_ONLY___ ╠═ Installing: anthoscli ═╣
|
515 |
+
|
516 |
+
2023-06-12 13:35:08,806 INFO ___FILE_ONLY___ ╚
|
517 |
+
2023-06-12 13:35:08,811 INFO ___FILE_ONLY___ ════════════════════════════════════════════════════════════
|
518 |
+
2023-06-12 13:35:08,811 INFO ___FILE_ONLY___ ╝
|
519 |
+
|
520 |
+
2023-06-12 13:35:08,813 INFO ___FILE_ONLY___ ╔════════════════════════════════════════════════════════════╗
|
521 |
+
|
522 |
+
2023-06-12 13:35:08,813 INFO ___FILE_ONLY___ ╠═ Installing: anthoscli ═╣
|
523 |
+
|
524 |
+
2023-06-12 13:35:08,813 INFO ___FILE_ONLY___ ╚
|
525 |
+
2023-06-12 13:35:08,815 DEBUG urllib3.connectionpool Starting new HTTPS connection (1): dl.google.com:443
|
526 |
+
2023-06-12 13:35:08,906 DEBUG urllib3.connectionpool https://dl.google.com:443 "GET /dl/cloudsdk/channels/rapid/components/google-cloud-sdk-anthoscli-linux-x86_64-20230417163046.tar.gz HTTP/1.1" 200 71766688
|
527 |
+
2023-06-12 13:35:09,438 INFO ___FILE_ONLY___ ═
|
528 |
+
2023-06-12 13:35:09,441 INFO ___FILE_ONLY___ ═
|
529 |
+
2023-06-12 13:35:09,444 INFO ___FILE_ONLY___ ═
|
530 |
+
2023-06-12 13:35:09,447 INFO ___FILE_ONLY___ ═
|
531 |
+
2023-06-12 13:35:09,450 INFO ___FILE_ONLY___ ═
|
532 |
+
2023-06-12 13:35:09,453 INFO ___FILE_ONLY___ ═
|
533 |
+
2023-06-12 13:35:09,456 INFO ___FILE_ONLY___ ═
|
534 |
+
2023-06-12 13:35:09,459 INFO ___FILE_ONLY___ ═
|
535 |
+
2023-06-12 13:35:09,462 INFO ___FILE_ONLY___ ═
|
536 |
+
2023-06-12 13:35:09,465 INFO ___FILE_ONLY___ ═
|
537 |
+
2023-06-12 13:35:09,468 INFO ___FILE_ONLY___ ═
|
538 |
+
2023-06-12 13:35:09,471 INFO ___FILE_ONLY___ ═
|
539 |
+
2023-06-12 13:35:09,473 INFO ___FILE_ONLY___ ═
|
540 |
+
2023-06-12 13:35:09,476 INFO ___FILE_ONLY___ ═
|
541 |
+
2023-06-12 13:35:09,479 INFO ___FILE_ONLY___ ═
|
542 |
+
2023-06-12 13:35:09,482 INFO ___FILE_ONLY___ ═
|
543 |
+
2023-06-12 13:35:09,485 INFO ___FILE_ONLY___ ═
|
544 |
+
2023-06-12 13:35:09,488 INFO ___FILE_ONLY___ ═
|
545 |
+
2023-06-12 13:35:09,491 INFO ___FILE_ONLY___ ═
|
546 |
+
2023-06-12 13:35:09,494 INFO ___FILE_ONLY___ ═
|
547 |
+
2023-06-12 13:35:09,496 INFO ___FILE_ONLY___ ═
|
548 |
+
2023-06-12 13:35:09,500 INFO ___FILE_ONLY___ ═
|
549 |
+
2023-06-12 13:35:09,502 INFO ___FILE_ONLY___ ═
|
550 |
+
2023-06-12 13:35:09,505 INFO ___FILE_ONLY___ ═
|
551 |
+
2023-06-12 13:35:09,508 INFO ___FILE_ONLY___ ═
|
552 |
+
2023-06-12 13:35:09,511 INFO ___FILE_ONLY___ ═
|
553 |
+
2023-06-12 13:35:09,515 INFO ___FILE_ONLY___ ═
|
554 |
+
2023-06-12 13:35:09,518 INFO ___FILE_ONLY___ ═
|
555 |
+
2023-06-12 13:35:09,521 INFO ___FILE_ONLY___ ═
|
556 |
+
2023-06-12 13:35:09,524 INFO ___FILE_ONLY___ ═
|
557 |
+
2023-06-12 13:35:11,558 INFO ___FILE_ONLY___ ══════════
|
558 |
+
2023-06-12 13:35:11,563 INFO ___FILE_ONLY___ ═════════
|
559 |
+
2023-06-12 13:35:11,587 INFO ___FILE_ONLY___ ═══════════
|
560 |
+
2023-06-12 13:35:11,587 INFO ___FILE_ONLY___ ╝
|
561 |
+
|
562 |
+
2023-06-12 13:35:11,610 INFO ___FILE_ONLY___ ╔════════════════════════════════════════════════════════════╗
|
563 |
+
|
564 |
+
2023-06-12 13:35:11,610 INFO ___FILE_ONLY___ ╠═ Installing: gcloud cli dependencies ═╣
|
565 |
+
|
566 |
+
2023-06-12 13:35:11,610 INFO ___FILE_ONLY___ ╚
|
567 |
+
2023-06-12 13:35:11,613 DEBUG urllib3.connectionpool Starting new HTTPS connection (1): dl.google.com:443
|
568 |
+
2023-06-12 13:35:11,704 DEBUG urllib3.connectionpool https://dl.google.com:443 "GET /dl/cloudsdk/channels/rapid/components/google-cloud-sdk-gcloud-deps-linux-x86_64-20210416153011.tar.gz HTTP/1.1" 200 104
|
569 |
+
2023-06-12 13:35:11,705 INFO ___FILE_ONLY___ ══════════════════════════════
|
570 |
+
2023-06-12 13:35:11,705 INFO ___FILE_ONLY___ ══════════════════════════════
|
571 |
+
2023-06-12 13:35:11,705 INFO ___FILE_ONLY___ ╝
|
572 |
+
|
573 |
+
2023-06-12 13:35:11,714 INFO ___FILE_ONLY___ ╔════════════════════════════════════════════════════════════╗
|
574 |
+
|
575 |
+
2023-06-12 13:35:11,714 INFO ___FILE_ONLY___ ╠═ Creating backup and activating new installation ═╣
|
576 |
+
|
577 |
+
2023-06-12 13:35:11,714 INFO ___FILE_ONLY___ ╚
|
578 |
+
2023-06-12 13:35:11,714 DEBUG root Attempting to move directory [/tools/google-cloud-sdk] to [/tools/google-cloud-sdk.staging/.install/.backup]
|
579 |
+
2023-06-12 13:35:11,714 INFO ___FILE_ONLY___ ══════════════════════════════
|
580 |
+
2023-06-12 13:35:11,714 DEBUG root Attempting to move directory [/tools/google-cloud-sdk.staging] to [/tools/google-cloud-sdk]
|
581 |
+
2023-06-12 13:35:11,714 INFO ___FILE_ONLY___ ══════════════════════════════
|
582 |
+
2023-06-12 13:35:11,714 INFO ___FILE_ONLY___ ╝
|
583 |
+
|
584 |
+
2023-06-12 13:35:11,718 DEBUG root Updating notification cache...
|
585 |
+
2023-06-12 13:35:11,719 INFO ___FILE_ONLY___
|
586 |
+
|
587 |
+
2023-06-12 13:35:11,720 INFO ___FILE_ONLY___ Performing post processing steps...
|
588 |
+
2023-06-12 13:35:11,721 DEBUG root Executing command: ['python3', '-S', '/tools/google-cloud-sdk/lib/gcloud.py', 'components', 'post-process']
|
589 |
+
2023-06-12 13:35:35,241 DEBUG ___FILE_ONLY___
|
590 |
+
2023-06-12 13:35:35,241 DEBUG ___FILE_ONLY___
|
591 |
+
2023-06-12 13:35:35,256 INFO ___FILE_ONLY___
|
592 |
+
Update done!
|
593 |
+
|
594 |
+
|
595 |
+
2023-06-12 13:35:35,260 DEBUG root Chosen display Format:none
|
596 |
+
2023-06-12 13:35:35,260 INFO root Display format: "none"
|
.config/logs/2023.06.12/13.35.12.270546.log
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
2023-06-12 13:35:12,271 DEBUG root Loaded Command Group: ['gcloud', 'components']
|
2 |
+
2023-06-12 13:35:12,273 DEBUG root Loaded Command Group: ['gcloud', 'components', 'post_process']
|
3 |
+
2023-06-12 13:35:12,275 DEBUG root Running [gcloud.components.post-process] with arguments: []
|
4 |
+
2023-06-12 13:35:35,133 DEBUG root Chosen display Format:none
|
5 |
+
2023-06-12 13:35:35,133 INFO root Display format: "none"
|
.config/logs/2023.06.12/13.35.35.936702.log
ADDED
@@ -0,0 +1,169 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
2023-06-12 13:35:35,937 DEBUG root Loaded Command Group: ['gcloud', 'components']
|
2 |
+
2023-06-12 13:35:35,940 DEBUG root Loaded Command Group: ['gcloud', 'components', 'update']
|
3 |
+
2023-06-12 13:35:35,942 DEBUG root Running [gcloud.components.update] with arguments: [--quiet: "True", COMPONENT-IDS:8: "['gcloud', 'core', 'bq', 'gsutil', 'compute', 'preview', 'alpha', 'beta']"]
|
4 |
+
2023-06-12 13:35:35,943 INFO ___FILE_ONLY___ Beginning update. This process may take several minutes.
|
5 |
+
|
6 |
+
2023-06-12 13:35:35,947 DEBUG urllib3.connectionpool Starting new HTTPS connection (1): dl.google.com:443
|
7 |
+
2023-06-12 13:35:36,034 DEBUG urllib3.connectionpool https://dl.google.com:443 "GET /dl/cloudsdk/channels/rapid/components-2.json HTTP/1.1" 200 206160
|
8 |
+
2023-06-12 13:35:36,055 WARNING root Component [compute] no longer exists.
|
9 |
+
2023-06-12 13:35:36,055 WARNING root Component [preview] no longer exists.
|
10 |
+
2023-06-12 13:35:36,056 INFO ___FILE_ONLY___
|
11 |
+
|
12 |
+
2023-06-12 13:35:36,057 INFO ___FILE_ONLY___
|
13 |
+
Your current Google Cloud CLI version is: 434.0.0
|
14 |
+
|
15 |
+
2023-06-12 13:35:36,057 INFO ___FILE_ONLY___ Installing components from version: 434.0.0
|
16 |
+
|
17 |
+
2023-06-12 13:35:36,057 INFO ___FILE_ONLY___
|
18 |
+
|
19 |
+
2023-06-12 13:35:36,057 DEBUG root Chosen display Format:table[box,title="These components will be removed."](details.display_name:label=Name:align=left,version.version_string:label=Version:align=right,data.size.size(zero="",min=1048576):label=Size:align=right)
|
20 |
+
2023-06-12 13:35:36,058 DEBUG root Chosen display Format:table[box,title="These components will be updated."](details.display_name:label=Name:align=left,version.version_string:label=Version:align=right,data.size.size(zero="",min=1048576):label=Size:align=right)
|
21 |
+
2023-06-12 13:35:36,059 DEBUG root Chosen display Format:table[box,title="These components will be installed."](details.display_name:label=Name:align=left,version.version_string:label=Version:align=right,data.size.size(zero="",min=1048576):label=Size:align=right)
|
22 |
+
2023-06-12 13:35:36,060 INFO ___FILE_ONLY___ ┌──────────────────────────────────────────────┐
|
23 |
+
2023-06-12 13:35:36,061 INFO ___FILE_ONLY___
|
24 |
+
|
25 |
+
2023-06-12 13:35:36,061 INFO ___FILE_ONLY___ │ These components will be installed. │
|
26 |
+
2023-06-12 13:35:36,061 INFO ___FILE_ONLY___
|
27 |
+
|
28 |
+
2023-06-12 13:35:36,061 INFO ___FILE_ONLY___ ├───────────────────────┬────────────┬─────────┤
|
29 |
+
2023-06-12 13:35:36,061 INFO ___FILE_ONLY___
|
30 |
+
|
31 |
+
2023-06-12 13:35:36,061 INFO ___FILE_ONLY___ │ Name │ Version │ Size │
|
32 |
+
2023-06-12 13:35:36,061 INFO ___FILE_ONLY___
|
33 |
+
|
34 |
+
2023-06-12 13:35:36,061 INFO ___FILE_ONLY___ ├───────────────────────┼────────────┼─────────┤
|
35 |
+
2023-06-12 13:35:36,061 INFO ___FILE_ONLY___
|
36 |
+
|
37 |
+
2023-06-12 13:35:36,061 INFO ___FILE_ONLY___ │
|
38 |
+
2023-06-12 13:35:36,062 INFO ___FILE_ONLY___ gcloud Alpha Commands
|
39 |
+
2023-06-12 13:35:36,062 INFO ___FILE_ONLY___
|
40 |
+
2023-06-12 13:35:36,062 INFO ___FILE_ONLY___ │
|
41 |
+
2023-06-12 13:35:36,062 INFO ___FILE_ONLY___ 2023.06.02
|
42 |
+
2023-06-12 13:35:36,062 INFO ___FILE_ONLY___
|
43 |
+
2023-06-12 13:35:36,062 INFO ___FILE_ONLY___ │
|
44 |
+
2023-06-12 13:35:36,062 INFO ___FILE_ONLY___ < 1 MiB
|
45 |
+
2023-06-12 13:35:36,062 INFO ___FILE_ONLY___
|
46 |
+
2023-06-12 13:35:36,062 INFO ___FILE_ONLY___ │
|
47 |
+
2023-06-12 13:35:36,062 INFO ___FILE_ONLY___
|
48 |
+
|
49 |
+
2023-06-12 13:35:36,063 INFO ___FILE_ONLY___ │
|
50 |
+
2023-06-12 13:35:36,063 INFO ___FILE_ONLY___ gcloud Beta Commands
|
51 |
+
2023-06-12 13:35:36,063 INFO ___FILE_ONLY___
|
52 |
+
2023-06-12 13:35:36,063 INFO ___FILE_ONLY___ │
|
53 |
+
2023-06-12 13:35:36,063 INFO ___FILE_ONLY___ 2023.06.02
|
54 |
+
2023-06-12 13:35:36,063 INFO ___FILE_ONLY___
|
55 |
+
2023-06-12 13:35:36,063 INFO ___FILE_ONLY___ │
|
56 |
+
2023-06-12 13:35:36,063 INFO ___FILE_ONLY___ < 1 MiB
|
57 |
+
2023-06-12 13:35:36,064 INFO ___FILE_ONLY___
|
58 |
+
2023-06-12 13:35:36,064 INFO ___FILE_ONLY___ │
|
59 |
+
2023-06-12 13:35:36,064 INFO ___FILE_ONLY___
|
60 |
+
|
61 |
+
2023-06-12 13:35:36,064 INFO ___FILE_ONLY___ └───────────────────────┴────────────┴─────────┘
|
62 |
+
2023-06-12 13:35:36,064 INFO ___FILE_ONLY___
|
63 |
+
|
64 |
+
2023-06-12 13:35:36,064 INFO ___FILE_ONLY___
|
65 |
+
|
66 |
+
2023-06-12 13:35:36,066 DEBUG urllib3.connectionpool Starting new HTTPS connection (1): dl.google.com:443
|
67 |
+
2023-06-12 13:35:36,158 DEBUG urllib3.connectionpool https://dl.google.com:443 "GET /dl/cloudsdk/channels/rapid/RELEASE_NOTES HTTP/1.1" 200 1011317
|
68 |
+
2023-06-12 13:35:36,260 INFO ___FILE_ONLY___ For the latest full release notes, please visit:
|
69 |
+
https://cloud.google.com/sdk/release_notes
|
70 |
+
|
71 |
+
|
72 |
+
2023-06-12 13:35:36,263 INFO ___FILE_ONLY___ ╔════════════════════════════════════════════════════════════╗
|
73 |
+
|
74 |
+
2023-06-12 13:35:36,263 INFO ___FILE_ONLY___ ╠═ Creating update staging area ═╣
|
75 |
+
|
76 |
+
2023-06-12 13:35:36,263 INFO ___FILE_ONLY___ ╚
|
77 |
+
2023-06-12 13:35:36,263 INFO ___FILE_ONLY___ ══════
|
78 |
+
2023-06-12 13:35:36,862 INFO ___FILE_ONLY___ ══════
|
79 |
+
2023-06-12 13:35:36,862 INFO ___FILE_ONLY___ ══════
|
80 |
+
2023-06-12 13:35:37,427 INFO ___FILE_ONLY___ ═
|
81 |
+
2023-06-12 13:35:37,523 INFO ___FILE_ONLY___ ═
|
82 |
+
2023-06-12 13:35:37,639 INFO ___FILE_ONLY___ ═
|
83 |
+
2023-06-12 13:35:37,739 INFO ___FILE_ONLY___ ═
|
84 |
+
2023-06-12 13:35:37,858 INFO ___FILE_ONLY___ ═
|
85 |
+
2023-06-12 13:35:37,933 INFO ___FILE_ONLY___ ═
|
86 |
+
2023-06-12 13:35:38,006 INFO ___FILE_ONLY___ ═
|
87 |
+
2023-06-12 13:35:38,105 INFO ___FILE_ONLY___ ═
|
88 |
+
2023-06-12 13:35:38,196 INFO ___FILE_ONLY___ ═
|
89 |
+
2023-06-12 13:35:38,276 INFO ___FILE_ONLY___ ═
|
90 |
+
2023-06-12 13:35:38,344 INFO ___FILE_ONLY___ ═
|
91 |
+
2023-06-12 13:35:38,474 INFO ___FILE_ONLY___ ═
|
92 |
+
2023-06-12 13:35:38,563 INFO ___FILE_ONLY___ ═
|
93 |
+
2023-06-12 13:35:38,616 INFO ___FILE_ONLY___ ═
|
94 |
+
2023-06-12 13:35:38,672 INFO ___FILE_ONLY___ ═
|
95 |
+
2023-06-12 13:35:38,733 INFO ___FILE_ONLY___ ═
|
96 |
+
2023-06-12 13:35:38,809 INFO ___FILE_ONLY___ ═
|
97 |
+
2023-06-12 13:35:38,862 INFO ___FILE_ONLY___ ═
|
98 |
+
2023-06-12 13:35:38,915 INFO ___FILE_ONLY___ ═
|
99 |
+
2023-06-12 13:35:38,969 INFO ___FILE_ONLY___ ═
|
100 |
+
2023-06-12 13:35:39,028 INFO ___FILE_ONLY___ ═
|
101 |
+
2023-06-12 13:35:39,082 INFO ___FILE_ONLY___ ═
|
102 |
+
2023-06-12 13:35:39,145 INFO ___FILE_ONLY___ ═
|
103 |
+
2023-06-12 13:35:39,201 INFO ___FILE_ONLY___ ═
|
104 |
+
2023-06-12 13:35:39,258 INFO ___FILE_ONLY___ ═
|
105 |
+
2023-06-12 13:35:39,307 INFO ___FILE_ONLY___ ═
|
106 |
+
2023-06-12 13:35:39,366 INFO ___FILE_ONLY___ ═
|
107 |
+
2023-06-12 13:35:39,480 INFO ___FILE_ONLY___ ═
|
108 |
+
2023-06-12 13:35:39,538 INFO ___FILE_ONLY___ ═
|
109 |
+
2023-06-12 13:35:39,606 INFO ___FILE_ONLY___ ═
|
110 |
+
2023-06-12 13:35:39,662 INFO ___FILE_ONLY___ ═
|
111 |
+
2023-06-12 13:35:39,704 INFO ___FILE_ONLY___ ═
|
112 |
+
2023-06-12 13:35:39,754 INFO ___FILE_ONLY___ ═
|
113 |
+
2023-06-12 13:35:39,820 INFO ___FILE_ONLY___ ═
|
114 |
+
2023-06-12 13:35:39,884 INFO ___FILE_ONLY___ ═
|
115 |
+
2023-06-12 13:35:39,945 INFO ___FILE_ONLY___ ═
|
116 |
+
2023-06-12 13:35:39,990 INFO ___FILE_ONLY___ ═
|
117 |
+
2023-06-12 13:35:40,153 INFO ___FILE_ONLY___ ═
|
118 |
+
2023-06-12 13:35:40,210 INFO ___FILE_ONLY___ ═
|
119 |
+
2023-06-12 13:35:40,281 INFO ___FILE_ONLY___ ═
|
120 |
+
2023-06-12 13:35:40,341 INFO ___FILE_ONLY___ ═
|
121 |
+
2023-06-12 13:35:40,402 INFO ___FILE_ONLY___ ═
|
122 |
+
2023-06-12 13:35:40,402 INFO ___FILE_ONLY___ ╝
|
123 |
+
|
124 |
+
2023-06-12 13:35:42,900 INFO ___FILE_ONLY___ ╔════════════════════════════════════════════════════════════╗
|
125 |
+
|
126 |
+
2023-06-12 13:35:42,900 INFO ___FILE_ONLY___ ╠═ Installing: gcloud Alpha Commands ═╣
|
127 |
+
|
128 |
+
2023-06-12 13:35:42,900 INFO ___FILE_ONLY___ ╚
|
129 |
+
2023-06-12 13:35:42,903 DEBUG urllib3.connectionpool Starting new HTTPS connection (1): dl.google.com:443
|
130 |
+
2023-06-12 13:35:42,988 DEBUG urllib3.connectionpool https://dl.google.com:443 "GET /dl/cloudsdk/channels/rapid/components/google-cloud-sdk-alpha-20230602161805.tar.gz HTTP/1.1" 200 800
|
131 |
+
2023-06-12 13:35:42,989 INFO ___FILE_ONLY___ ══════════════════════════════
|
132 |
+
2023-06-12 13:35:42,990 INFO ___FILE_ONLY___ ══════════════════════════════
|
133 |
+
2023-06-12 13:35:42,991 INFO ___FILE_ONLY___ ╝
|
134 |
+
|
135 |
+
2023-06-12 13:35:42,998 INFO ___FILE_ONLY___ ╔════════════════════════════════════════════════════════════╗
|
136 |
+
|
137 |
+
2023-06-12 13:35:42,999 INFO ___FILE_ONLY___ ╠═ Installing: gcloud Beta Commands ═╣
|
138 |
+
|
139 |
+
2023-06-12 13:35:42,999 INFO ___FILE_ONLY___ ╚
|
140 |
+
2023-06-12 13:35:43,001 DEBUG urllib3.connectionpool Starting new HTTPS connection (1): dl.google.com:443
|
141 |
+
2023-06-12 13:35:43,086 DEBUG urllib3.connectionpool https://dl.google.com:443 "GET /dl/cloudsdk/channels/rapid/components/google-cloud-sdk-beta-20230602161805.tar.gz HTTP/1.1" 200 797
|
142 |
+
2023-06-12 13:35:43,087 INFO ___FILE_ONLY___ ══════════════════════════════
|
143 |
+
2023-06-12 13:35:43,088 INFO ___FILE_ONLY___ ══════════════════════════════
|
144 |
+
2023-06-12 13:35:43,088 INFO ___FILE_ONLY___ ╝
|
145 |
+
|
146 |
+
2023-06-12 13:35:43,096 INFO ___FILE_ONLY___ ��════════════════════════════════════════════════════════════╗
|
147 |
+
|
148 |
+
2023-06-12 13:35:43,096 INFO ___FILE_ONLY___ ╠═ Creating backup and activating new installation ═╣
|
149 |
+
|
150 |
+
2023-06-12 13:35:43,096 INFO ___FILE_ONLY___ ╚
|
151 |
+
2023-06-12 13:35:43,096 DEBUG root Attempting to move directory [/tools/google-cloud-sdk] to [/tools/google-cloud-sdk.staging/.install/.backup]
|
152 |
+
2023-06-12 13:35:43,096 INFO ___FILE_ONLY___ ══════════════════════════════
|
153 |
+
2023-06-12 13:35:43,096 DEBUG root Attempting to move directory [/tools/google-cloud-sdk.staging] to [/tools/google-cloud-sdk]
|
154 |
+
2023-06-12 13:35:43,096 INFO ___FILE_ONLY___ ══════════════════════════════
|
155 |
+
2023-06-12 13:35:43,096 INFO ___FILE_ONLY___ ╝
|
156 |
+
|
157 |
+
2023-06-12 13:35:43,101 DEBUG root Updating notification cache...
|
158 |
+
2023-06-12 13:35:43,101 INFO ___FILE_ONLY___
|
159 |
+
|
160 |
+
2023-06-12 13:35:43,103 INFO ___FILE_ONLY___ Performing post processing steps...
|
161 |
+
2023-06-12 13:35:43,103 DEBUG root Executing command: ['python3', '-S', '/tools/google-cloud-sdk/lib/gcloud.py', 'components', 'post-process']
|
162 |
+
2023-06-12 13:36:07,481 DEBUG ___FILE_ONLY___
|
163 |
+
2023-06-12 13:36:07,481 DEBUG ___FILE_ONLY___
|
164 |
+
2023-06-12 13:36:07,643 INFO ___FILE_ONLY___
|
165 |
+
Update done!
|
166 |
+
|
167 |
+
|
168 |
+
2023-06-12 13:36:07,647 DEBUG root Chosen display Format:none
|
169 |
+
2023-06-12 13:36:07,647 INFO root Display format: "none"
|
.config/logs/2023.06.12/13.35.43.661450.log
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
2023-06-12 13:35:43,662 DEBUG root Loaded Command Group: ['gcloud', 'components']
|
2 |
+
2023-06-12 13:35:43,664 DEBUG root Loaded Command Group: ['gcloud', 'components', 'post_process']
|
3 |
+
2023-06-12 13:35:43,666 DEBUG root Running [gcloud.components.post-process] with arguments: []
|
4 |
+
2023-06-12 13:36:07,378 DEBUG root Chosen display Format:none
|
5 |
+
2023-06-12 13:36:07,378 INFO root Display format: "none"
|
.config/logs/2023.06.12/13.36.08.323143.log
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
2023-06-12 13:36:08,325 DEBUG root Loaded Command Group: ['gcloud', 'config']
|
2 |
+
2023-06-12 13:36:08,353 DEBUG root Loaded Command Group: ['gcloud', 'config', 'set']
|
3 |
+
2023-06-12 13:36:08,356 DEBUG root Running [gcloud.config.set] with arguments: [SECTION/PROPERTY: "component_manager/disable_update_check", VALUE: "true"]
|
4 |
+
2023-06-12 13:36:08,357 INFO ___FILE_ONLY___ Updated property [component_manager/disable_update_check].
|
5 |
+
|
6 |
+
2023-06-12 13:36:08,358 DEBUG root Chosen display Format:default
|
7 |
+
2023-06-12 13:36:08,359 INFO root Display format: "default"
|
8 |
+
2023-06-12 13:36:08,359 DEBUG root SDK update checks are disabled.
|
.config/logs/2023.06.12/13.36.09.026206.log
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
2023-06-12 13:36:09,028 DEBUG root Loaded Command Group: ['gcloud', 'config']
|
2 |
+
2023-06-12 13:36:09,054 DEBUG root Loaded Command Group: ['gcloud', 'config', 'set']
|
3 |
+
2023-06-12 13:36:09,057 DEBUG root Running [gcloud.config.set] with arguments: [SECTION/PROPERTY: "compute/gce_metadata_read_timeout_sec", VALUE: "0"]
|
4 |
+
2023-06-12 13:36:09,058 INFO ___FILE_ONLY___ Updated property [compute/gce_metadata_read_timeout_sec].
|
5 |
+
|
6 |
+
2023-06-12 13:36:09,058 DEBUG root Chosen display Format:default
|
7 |
+
2023-06-12 13:36:09,059 INFO root Display format: "default"
|
8 |
+
2023-06-12 13:36:09,060 DEBUG root SDK update checks are disabled.
|
.gitattributes
CHANGED
@@ -32,3 +32,60 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
32 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
33 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
34 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
32 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
33 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
34 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
35 |
+
SHOW/doc/images/rec_results_detial.png filter=lfs diff=lfs merge=lfs -text
|
36 |
+
SHOW/doc/show.gif filter=lfs diff=lfs merge=lfs -text
|
37 |
+
SHOW/modules/DECA/Doc/images/soubhik.gif filter=lfs diff=lfs merge=lfs -text
|
38 |
+
SHOW/modules/MICA/documents/D3DFACS.gif filter=lfs diff=lfs merge=lfs -text
|
39 |
+
SHOW/modules/MICA/documents/FACEWAREHOUSE.gif filter=lfs diff=lfs merge=lfs -text
|
40 |
+
SHOW/modules/MICA/documents/FLORENCE.gif filter=lfs diff=lfs merge=lfs -text
|
41 |
+
SHOW/modules/MICA/documents/FRGC.gif filter=lfs diff=lfs merge=lfs -text
|
42 |
+
SHOW/modules/MICA/documents/LYHM.gif filter=lfs diff=lfs merge=lfs -text
|
43 |
+
SHOW/modules/MICA/documents/STIRLING.gif filter=lfs diff=lfs merge=lfs -text
|
44 |
+
SHOW/modules/MICA/documents/voxceleb.gif filter=lfs diff=lfs merge=lfs -text
|
45 |
+
data/uv_mask.jpg filter=lfs diff=lfs merge=lfs -text
|
46 |
+
data/uv_mask_eyes.jpg filter=lfs diff=lfs merge=lfs -text
|
47 |
+
mmdetection-2.26.0/configs/reppoints/reppoints.png filter=lfs diff=lfs merge=lfs -text
|
48 |
+
mmdetection-2.26.0/demo/MMDet_InstanceSeg_Tutorial.ipynb filter=lfs diff=lfs merge=lfs -text
|
49 |
+
mmdetection-2.26.0/demo/MMDet_Tutorial.ipynb filter=lfs diff=lfs merge=lfs -text
|
50 |
+
mmdetection-2.26.0/dist/mmdet-2.26.0-py3.10.egg filter=lfs diff=lfs merge=lfs -text
|
51 |
+
mmdetection-2.26.0/resources/corruptions_sev_3.png filter=lfs diff=lfs merge=lfs -text
|
52 |
+
mmpose-0.29.0/demo/resources/demo_coco.gif filter=lfs diff=lfs merge=lfs -text
|
53 |
+
mmpose-0.29.0/dist/mmpose-0.29.0-py3.10.egg filter=lfs diff=lfs merge=lfs -text
|
54 |
+
models/models_MICA/FLAME2020/FLAME_masks/FLAME_masks.gif filter=lfs diff=lfs merge=lfs -text
|
55 |
+
models/models_MICA/pretrained/mica.tar filter=lfs diff=lfs merge=lfs -text
|
56 |
+
models/models_deca/data/deca_model.tar filter=lfs diff=lfs merge=lfs -text
|
57 |
+
models/models_pixie/data/pixie_model.tar filter=lfs diff=lfs merge=lfs -text
|
58 |
+
models/models_pixie/data/smplx_tex.png filter=lfs diff=lfs merge=lfs -text
|
59 |
+
models/pymaf_data/UV_data/UV_symmetry_transforms.mat filter=lfs diff=lfs merge=lfs -text
|
60 |
+
openpose/.github/media/body_heat_maps.png filter=lfs diff=lfs merge=lfs -text
|
61 |
+
openpose/.github/media/dance_foot.gif filter=lfs diff=lfs merge=lfs -text
|
62 |
+
openpose/.github/media/paf_heat_maps.png filter=lfs diff=lfs merge=lfs -text
|
63 |
+
openpose/.github/media/pose_face.gif filter=lfs diff=lfs merge=lfs -text
|
64 |
+
openpose/.github/media/pose_face_hands.gif filter=lfs diff=lfs merge=lfs -text
|
65 |
+
openpose/.github/media/pose_hands.gif filter=lfs diff=lfs merge=lfs -text
|
66 |
+
openpose/.github/media/shake.gif filter=lfs diff=lfs merge=lfs -text
|
67 |
+
openpose/3rdparty/windows/wget/wget.exe filter=lfs diff=lfs merge=lfs -text
|
68 |
+
openpose/3rdparty/windows/wget/wget.exe.debug filter=lfs diff=lfs merge=lfs -text
|
69 |
+
openpose/build/caffe/lib/libcaffe.so filter=lfs diff=lfs merge=lfs -text
|
70 |
+
openpose/build/caffe/lib/libcaffe.so.1.0.0 filter=lfs diff=lfs merge=lfs -text
|
71 |
+
openpose/build/caffe/lib/libcaffeproto.a filter=lfs diff=lfs merge=lfs -text
|
72 |
+
openpose/build/caffe/src/openpose_lib-build/lib/libcaffe.so filter=lfs diff=lfs merge=lfs -text
|
73 |
+
openpose/build/caffe/src/openpose_lib-build/lib/libcaffe.so.1.0.0 filter=lfs diff=lfs merge=lfs -text
|
74 |
+
openpose/build/caffe/src/openpose_lib-build/lib/libcaffeproto.a filter=lfs diff=lfs merge=lfs -text
|
75 |
+
openpose/build/caffe/src/openpose_lib-build/src/caffe/CMakeFiles/caffeproto.dir/__/__/include/caffe/proto/caffe.pb.cc.o filter=lfs diff=lfs merge=lfs -text
|
76 |
+
openpose/build/examples/deprecated/CMakeFiles/tutorial_add_module_custom_post_processing.bin.dir/tutorial_add_module_custom_post_processing.cpp.o filter=lfs diff=lfs merge=lfs -text
|
77 |
+
openpose/build/examples/tutorial_api_cpp/CMakeFiles/13_asynchronous_custom_input_output_and_datum.bin.dir/13_asynchronous_custom_input_output_and_datum.cpp.o filter=lfs diff=lfs merge=lfs -text
|
78 |
+
openpose/build/examples/tutorial_api_cpp/CMakeFiles/18_synchronous_custom_all_and_datum.bin.dir/18_synchronous_custom_all_and_datum.cpp.o filter=lfs diff=lfs merge=lfs -text
|
79 |
+
openpose/build/src/openpose/CMakeFiles/openpose.dir/core/array.cpp.o filter=lfs diff=lfs merge=lfs -text
|
80 |
+
openpose/build/src/openpose/core/CMakeFiles/openpose_core.dir/array.cpp.o filter=lfs diff=lfs merge=lfs -text
|
81 |
+
openpose/build/src/openpose/core/libopenpose_core.so filter=lfs diff=lfs merge=lfs -text
|
82 |
+
openpose/build/src/openpose/face/libopenpose_face.so filter=lfs diff=lfs merge=lfs -text
|
83 |
+
openpose/build/src/openpose/hand/libopenpose_hand.so filter=lfs diff=lfs merge=lfs -text
|
84 |
+
openpose/build/src/openpose/libopenpose.so filter=lfs diff=lfs merge=lfs -text
|
85 |
+
openpose/build/src/openpose/libopenpose.so.1.7.0 filter=lfs diff=lfs merge=lfs -text
|
86 |
+
openpose/build/src/openpose/net/libopenpose_net.so filter=lfs diff=lfs merge=lfs -text
|
87 |
+
openpose/build/src/openpose/pose/libopenpose_pose.so filter=lfs diff=lfs merge=lfs -text
|
88 |
+
openpose/examples/media/video.avi filter=lfs diff=lfs merge=lfs -text
|
89 |
+
openpose/models/face/pose_iter_116000.caffemodel filter=lfs diff=lfs merge=lfs -text
|
90 |
+
openpose/models/hand/pose_iter_102000.caffemodel filter=lfs diff=lfs merge=lfs -text
|
91 |
+
openpose/models/pose/body_25/pose_iter_584000.caffemodel filter=lfs diff=lfs merge=lfs -text
|
SHOW/.gitignore
ADDED
@@ -0,0 +1,147 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*__pycache__*
|
2 |
+
__pycache__/
|
3 |
+
.neptune/
|
4 |
+
.ipynb_checkpoints/
|
5 |
+
.idea/
|
6 |
+
.history/
|
7 |
+
*.pyc
|
8 |
+
__pycache__
|
9 |
+
build
|
10 |
+
.idea
|
11 |
+
|
12 |
+
# Byte-compiled / optimized / DLL files
|
13 |
+
__pycache__/
|
14 |
+
*.py[cod]
|
15 |
+
*$py.class
|
16 |
+
|
17 |
+
# C extensions
|
18 |
+
*.so
|
19 |
+
|
20 |
+
# Distribution / packaging
|
21 |
+
.Python
|
22 |
+
build/
|
23 |
+
develop-eggs/
|
24 |
+
dist/
|
25 |
+
downloads/
|
26 |
+
eggs/
|
27 |
+
.eggs/
|
28 |
+
lib/
|
29 |
+
lib64/
|
30 |
+
parts/
|
31 |
+
sdist/
|
32 |
+
var/
|
33 |
+
wheels/
|
34 |
+
pip-wheel-metadata/
|
35 |
+
share/python-wheels/
|
36 |
+
*.egg-info/
|
37 |
+
.installed.cfg
|
38 |
+
*.egg
|
39 |
+
MANIFEST
|
40 |
+
|
41 |
+
# PyInstaller
|
42 |
+
# Usually these files are written by a python script from a template
|
43 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
44 |
+
*.manifest
|
45 |
+
*.spec
|
46 |
+
|
47 |
+
# Installer logs
|
48 |
+
pip-log.txt
|
49 |
+
pip-delete-this-directory.txt
|
50 |
+
|
51 |
+
# Unit test / coverage reports
|
52 |
+
htmlcov/
|
53 |
+
.tox/
|
54 |
+
.nox/
|
55 |
+
.coverage
|
56 |
+
.coverage.*
|
57 |
+
.cache
|
58 |
+
nosetests.xml
|
59 |
+
coverage.xml
|
60 |
+
*.cover
|
61 |
+
*.py,cover
|
62 |
+
.hypothesis/
|
63 |
+
.pytest_cache/
|
64 |
+
|
65 |
+
# Translations
|
66 |
+
*.mo
|
67 |
+
*.pot
|
68 |
+
|
69 |
+
# Django stuff:
|
70 |
+
*.log
|
71 |
+
local_settings.py
|
72 |
+
db.sqlite3
|
73 |
+
db.sqlite3-journal
|
74 |
+
|
75 |
+
# Flask stuff:
|
76 |
+
instance/
|
77 |
+
.webassets-cache
|
78 |
+
|
79 |
+
# Scrapy stuff:
|
80 |
+
.scrapy
|
81 |
+
|
82 |
+
# Sphinx documentation
|
83 |
+
docs/_build/
|
84 |
+
|
85 |
+
# PyBuilder
|
86 |
+
target/
|
87 |
+
|
88 |
+
# Jupyter Notebook
|
89 |
+
.ipynb_checkpoints
|
90 |
+
|
91 |
+
# IPython
|
92 |
+
profile_default/
|
93 |
+
ipython_config.py
|
94 |
+
|
95 |
+
# pyenv
|
96 |
+
.python-version
|
97 |
+
|
98 |
+
# pipenv
|
99 |
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
100 |
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
101 |
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
102 |
+
# install all needed dependencies.
|
103 |
+
#Pipfile.lock
|
104 |
+
|
105 |
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
|
106 |
+
__pypackages__/
|
107 |
+
|
108 |
+
# Celery stuff
|
109 |
+
celerybeat-schedule
|
110 |
+
celerybeat.pid
|
111 |
+
|
112 |
+
# SageMath parsed files
|
113 |
+
*.sage.py
|
114 |
+
|
115 |
+
# Environments
|
116 |
+
.env
|
117 |
+
.venv
|
118 |
+
env/
|
119 |
+
venv/
|
120 |
+
ENV/
|
121 |
+
env.bak/
|
122 |
+
venv.bak/
|
123 |
+
|
124 |
+
# Spyder project settings
|
125 |
+
.spyderproject
|
126 |
+
.spyproject
|
127 |
+
|
128 |
+
# Rope project settings
|
129 |
+
.ropeproject
|
130 |
+
|
131 |
+
# mkdocs documentation
|
132 |
+
/site
|
133 |
+
|
134 |
+
# mypy
|
135 |
+
.mypy_cache/
|
136 |
+
.dmypy.json
|
137 |
+
dmypy.json
|
138 |
+
|
139 |
+
# Pyre type checker
|
140 |
+
.pyre/
|
141 |
+
transformations/nerf
|
142 |
+
models/*
|
143 |
+
|
144 |
+
modules/MICA/demo
|
145 |
+
|
146 |
+
.vscode/
|
147 |
+
.vscode/PythonImportHelper-v2-Completion.json
|
SHOW/.gitignore copy
ADDED
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
body_templates
|
2 |
+
cfg_files_templates
|
3 |
+
*__pycache__*
|
4 |
+
__pycache__/
|
5 |
+
out/
|
6 |
+
videos/
|
7 |
+
results/
|
8 |
+
wandb/
|
9 |
+
draft/
|
10 |
+
Gestures/
|
11 |
+
Gestures_extras/
|
12 |
+
output/
|
13 |
+
.neptune/
|
14 |
+
|
15 |
+
.ipynb_checkpoints/
|
16 |
+
.idea/
|
17 |
+
.history/
|
18 |
+
crop_video_out/
|
19 |
+
pixie-env
|
20 |
+
eval-script
|
21 |
+
results
|
22 |
+
data/*
|
23 |
+
|
24 |
+
Testsamples/*/results
|
25 |
+
logs/
|
26 |
+
*.pyc
|
27 |
+
__pycache__
|
28 |
+
build
|
29 |
+
|
30 |
+
|
31 |
+
*.jpg
|
32 |
+
*.png
|
33 |
+
*.webm
|
34 |
+
*.avi
|
35 |
+
*.html
|
36 |
+
*.pyc
|
37 |
+
*.o
|
38 |
+
*.so
|
39 |
+
*.obj
|
40 |
+
*.npy
|
41 |
+
*.npz
|
42 |
+
*.pkl
|
43 |
+
*.zip
|
44 |
+
*.h5
|
45 |
+
*.json
|
46 |
+
|
47 |
+
test_results
|
48 |
+
input
|
49 |
+
images
|
50 |
+
FFHQ
|
51 |
+
FFHQ_seg
|
52 |
+
|
53 |
+
configs/logs
|
54 |
+
configs/jobs
|
55 |
+
slogs
|
56 |
+
|
57 |
+
.idea
|
58 |
+
|
59 |
+
# Byte-compiled / optimized / DLL files
|
60 |
+
__pycache__/
|
61 |
+
*.py[cod]
|
62 |
+
*$py.class
|
63 |
+
|
64 |
+
# C extensions
|
65 |
+
*.so
|
66 |
+
|
67 |
+
# Distribution / packaging
|
68 |
+
.Python
|
69 |
+
build/
|
70 |
+
develop-eggs/
|
71 |
+
dist/
|
72 |
+
downloads/
|
73 |
+
eggs/
|
74 |
+
.eggs/
|
75 |
+
lib/
|
76 |
+
lib64/
|
77 |
+
parts/
|
78 |
+
sdist/
|
79 |
+
var/
|
80 |
+
wheels/
|
81 |
+
pip-wheel-metadata/
|
82 |
+
share/python-wheels/
|
83 |
+
*.egg-info/
|
84 |
+
.installed.cfg
|
85 |
+
*.egg
|
86 |
+
MANIFEST
|
87 |
+
|
88 |
+
# PyInstaller
|
89 |
+
# Usually these files are written by a python script from a template
|
90 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
91 |
+
*.manifest
|
92 |
+
*.spec
|
93 |
+
|
94 |
+
# Installer logs
|
95 |
+
pip-log.txt
|
96 |
+
pip-delete-this-directory.txt
|
97 |
+
|
98 |
+
# Unit test / coverage reports
|
99 |
+
htmlcov/
|
100 |
+
.tox/
|
101 |
+
.nox/
|
102 |
+
.coverage
|
103 |
+
.coverage.*
|
104 |
+
.cache
|
105 |
+
nosetests.xml
|
106 |
+
coverage.xml
|
107 |
+
*.cover
|
108 |
+
*.py,cover
|
109 |
+
.hypothesis/
|
110 |
+
.pytest_cache/
|
111 |
+
|
112 |
+
# Translations
|
113 |
+
*.mo
|
114 |
+
*.pot
|
115 |
+
|
116 |
+
# Django stuff:
|
117 |
+
*.log
|
118 |
+
local_settings.py
|
119 |
+
db.sqlite3
|
120 |
+
db.sqlite3-journal
|
121 |
+
|
122 |
+
# Flask stuff:
|
123 |
+
instance/
|
124 |
+
.webassets-cache
|
125 |
+
|
126 |
+
# Scrapy stuff:
|
127 |
+
.scrapy
|
128 |
+
|
129 |
+
# Sphinx documentation
|
130 |
+
docs/_build/
|
131 |
+
|
132 |
+
# PyBuilder
|
133 |
+
target/
|
134 |
+
|
135 |
+
# Jupyter Notebook
|
136 |
+
.ipynb_checkpoints
|
137 |
+
|
138 |
+
# IPython
|
139 |
+
profile_default/
|
140 |
+
ipython_config.py
|
141 |
+
|
142 |
+
# pyenv
|
143 |
+
.python-version
|
144 |
+
|
145 |
+
# pipenv
|
146 |
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
147 |
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
148 |
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
149 |
+
# install all needed dependencies.
|
150 |
+
#Pipfile.lock
|
151 |
+
|
152 |
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
|
153 |
+
__pypackages__/
|
154 |
+
|
155 |
+
# Celery stuff
|
156 |
+
celerybeat-schedule
|
157 |
+
celerybeat.pid
|
158 |
+
|
159 |
+
# SageMath parsed files
|
160 |
+
*.sage.py
|
161 |
+
|
162 |
+
# Environments
|
163 |
+
.env
|
164 |
+
.venv
|
165 |
+
env/
|
166 |
+
venv/
|
167 |
+
ENV/
|
168 |
+
env.bak/
|
169 |
+
venv.bak/
|
170 |
+
|
171 |
+
# Spyder project settings
|
172 |
+
.spyderproject
|
173 |
+
.spyproject
|
174 |
+
|
175 |
+
# Rope project settings
|
176 |
+
.ropeproject
|
177 |
+
|
178 |
+
# mkdocs documentation
|
179 |
+
/site
|
180 |
+
|
181 |
+
# mypy
|
182 |
+
.mypy_cache/
|
183 |
+
.dmypy.json
|
184 |
+
dmypy.json
|
185 |
+
|
186 |
+
# Pyre type checker
|
187 |
+
.pyre/
|
188 |
+
transformations/nerf
|
189 |
+
models/*
|
190 |
+
|
191 |
+
test/demo_video
|
192 |
+
|
193 |
+
!test/demo_video/half.mp4
|
194 |
+
|
195 |
+
!models/*.py
|
196 |
+
!data/*.png
|
197 |
+
!data/id_pic
|
198 |
+
!test/*.py
|
199 |
+
|
200 |
+
!doc/*.jpg
|
201 |
+
!doc/*.png
|
SHOW/.vscode/PythonImportHelper-v2-Completion.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
SHOW/.vscode/launch.json
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
// Use IntelliSense to learn about possible attributes.
|
3 |
+
// Hover to view descriptions of existing attributes.
|
4 |
+
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
|
5 |
+
"version": "0.2.0",
|
6 |
+
"configurations": [
|
7 |
+
{
|
8 |
+
"name": "Python: Current File",
|
9 |
+
"type": "python",
|
10 |
+
"request": "launch",
|
11 |
+
"program": "${file}",
|
12 |
+
"console": "integratedTerminal",
|
13 |
+
"justMyCode": true
|
14 |
+
}
|
15 |
+
]
|
16 |
+
}
|
SHOW/EDIT_insightface/storage.py
ADDED
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
import os
|
3 |
+
import os.path as osp
|
4 |
+
import zipfile
|
5 |
+
from .download import download_file
|
6 |
+
|
7 |
+
#BASE_REPO_URL='http://storage.insightface.ai/files'
|
8 |
+
#BASE_REPO_URL='http://insightface.cn-sh2.ufileos.com'
|
9 |
+
BASE_REPO_URL='http://d1gsb2o3ihr2l5.cloudfront.net'
|
10 |
+
|
11 |
+
def download(sub_dir, name, force=False, root='~/.insightface'):
|
12 |
+
_root = os.path.expanduser(root)
|
13 |
+
dir_path = os.path.join(_root, sub_dir, name)
|
14 |
+
if osp.exists(dir_path) and not force:
|
15 |
+
return dir_path
|
16 |
+
print('download_path:', dir_path)
|
17 |
+
zip_file_path = os.path.join(_root, sub_dir, name + '.zip')
|
18 |
+
|
19 |
+
print("zip_file_path", zip_file_path)
|
20 |
+
|
21 |
+
model_url = "%s/%s/%s.zip"%(BASE_REPO_URL, sub_dir, name)
|
22 |
+
#model_url = "%s/%s.zip"%(BASE_REPO_URL, name)
|
23 |
+
|
24 |
+
if not os.path.exists(zip_file_path):
|
25 |
+
download_file(model_url,
|
26 |
+
path=zip_file_path,
|
27 |
+
overwrite=True)
|
28 |
+
if not os.path.exists(dir_path):
|
29 |
+
os.makedirs(dir_path)
|
30 |
+
with zipfile.ZipFile(zip_file_path) as zf:
|
31 |
+
zf.extractall(dir_path)
|
32 |
+
#os.remove(zip_file_path)
|
33 |
+
return dir_path
|
34 |
+
|
35 |
+
def ensure_available(sub_dir, name, root='~/.insightface'):
|
36 |
+
return download(sub_dir, name, force=False, root=root)
|
37 |
+
|
38 |
+
def download_onnx(sub_dir, model_file, force=False, root='~/.insightface', download_zip=False):
|
39 |
+
_root = os.path.expanduser(root)
|
40 |
+
model_root = osp.join(_root, sub_dir)
|
41 |
+
new_model_file = osp.join(model_root, model_file)
|
42 |
+
if osp.exists(new_model_file) and not force:
|
43 |
+
return new_model_file
|
44 |
+
if not osp.exists(model_root):
|
45 |
+
os.makedirs(model_root)
|
46 |
+
print('download_path:', new_model_file)
|
47 |
+
if not download_zip:
|
48 |
+
model_url = "%s/%s/%s"%(BASE_REPO_URL, sub_dir, model_file)
|
49 |
+
download_file(model_url,
|
50 |
+
path=new_model_file,
|
51 |
+
overwrite=True)
|
52 |
+
else:
|
53 |
+
model_url = "%s/%s/%s.zip"%(BASE_REPO_URL, sub_dir, model_file)
|
54 |
+
zip_file_path = new_model_file+".zip"
|
55 |
+
download_file(model_url,
|
56 |
+
path=zip_file_path,
|
57 |
+
overwrite=True)
|
58 |
+
with zipfile.ZipFile(zip_file_path) as zf:
|
59 |
+
zf.extractall(model_root)
|
60 |
+
return new_model_file
|
SHOW/LICENSE
ADDED
@@ -0,0 +1,88 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
License
|
2 |
+
|
3 |
+
Software Copyright License for non-commercial scientific research purposes
|
4 |
+
Please read carefully the following terms and conditions and any accompanying documentation before you download
|
5 |
+
and/or use the DECA model, data and software, (the "Model & Software"), including 3D meshes, software, and scripts.
|
6 |
+
By downloading and/or using the Model & Software (including downloading, cloning, installing, and any other use
|
7 |
+
of this github repository), you acknowledge that you have read these terms and conditions, understand them, and
|
8 |
+
agree to be bound by them. If you do not agree with these terms and conditions, you must not download and/or use
|
9 |
+
the Model & Software. Any infringement of the terms of this agreement will automatically terminate your rights
|
10 |
+
under this License
|
11 |
+
|
12 |
+
Ownership / Licensees
|
13 |
+
The Model & Software and the associated materials has been developed at the
|
14 |
+
Max Planck Institute for Intelligent Systems (hereinafter "MPI").
|
15 |
+
|
16 |
+
Any copyright or patent right is owned by and proprietary material of the
|
17 |
+
Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (hereinafter “MPG”; MPI and MPG hereinafter
|
18 |
+
collectively “Max-Planck”) hereinafter the “Licensor”.
|
19 |
+
|
20 |
+
License Grant
|
21 |
+
Licensor grants you (Licensee) personally a single-user, non-exclusive, non-transferable, free of charge right:
|
22 |
+
|
23 |
+
• To install the Model & Software on computers owned, leased or otherwise controlled by you and/or your organization.
|
24 |
+
• To use the Model & Software for the sole purpose of performing peaceful non-commercial scientific research,
|
25 |
+
non-commercial education, or non-commercial artistic projects.
|
26 |
+
|
27 |
+
Any other use, in particular any use for commercial, pornographic, military, or surveillance purposes is prohibited.
|
28 |
+
This includes, without limitation, incorporation in a commercial product, use in a commercial service,
|
29 |
+
or production of other artefacts for commercial purposes.
|
30 |
+
|
31 |
+
The Model & Software may not be used to create fake, libelous, misleading, or defamatory content of any kind, excluding
|
32 |
+
analyses in peer-reviewed scientific research.
|
33 |
+
|
34 |
+
The Model & Software may not be reproduced, modified and/or made available in any form to any third party
|
35 |
+
without Max-Planck’s prior written permission.
|
36 |
+
|
37 |
+
The Model & Software may not be used for pornographic purposes or to generate pornographic material whether
|
38 |
+
commercial or not. This license also prohibits the use of the Model & Software to train methods/algorithms/neural
|
39 |
+
networks/etc. for commercial use of any kind. By downloading the Model & Software, you agree not to reverse engineer it.
|
40 |
+
|
41 |
+
No Distribution
|
42 |
+
The Model & Software and the license herein granted shall not be copied, shared, distributed, re-sold, offered
|
43 |
+
for re-sale, transferred or sub-licensed in whole or in part except that you may make one copy for archive
|
44 |
+
purposes only.
|
45 |
+
|
46 |
+
Disclaimer of Representations and Warranties
|
47 |
+
You expressly acknowledge and agree that the Model & Software results from basic research, is provided “AS IS”,
|
48 |
+
may contain errors, and that any use of the Model & Software is at your sole risk.
|
49 |
+
LICENSOR MAKES NO REPRESENTATIONS
|
50 |
+
OR WARRANTIES OF ANY KIND CONCERNING THE MODEL & SOFTWARE, NEITHER EXPRESS NOR IMPLIED, AND THE ABSENCE OF ANY
|
51 |
+
LEGAL OR ACTUAL DEFECTS, WHETHER DISCOVERABLE OR NOT. Specifically, and not to limit the foregoing, licensor
|
52 |
+
makes no representations or warranties (i) regarding the merchantability or fitness for a particular purpose of
|
53 |
+
the Model & Software, (ii) that the use of the Model & Software will not infringe any patents, copyrights or other
|
54 |
+
intellectual property rights of a third party, and (iii) that the use of the Model & Software will not cause any
|
55 |
+
damage of any kind to you or a third party.
|
56 |
+
|
57 |
+
Limitation of Liability
|
58 |
+
Because this Model & Software License Agreement qualifies as a donation, according to Section 521 of the German
|
59 |
+
Civil Code (Bürgerliches Gesetzbuch – BGB) Licensor as a donor is liable for intent and gross negligence only.
|
60 |
+
If the Licensor fraudulently conceals a legal or material defect, they are obliged to compensate the Licensee
|
61 |
+
for the resulting damage.
|
62 |
+
|
63 |
+
Licensor shall be liable for loss of data only up to the amount of typical recovery costs which would have
|
64 |
+
arisen had proper and regular data backup measures been taken. For the avoidance of doubt Licensor shall be
|
65 |
+
liable in accordance with the German Product Liability Act in the event of product liability. The foregoing
|
66 |
+
applies also to Licensor’s legal representatives or assistants in performance. Any further liability shall
|
67 |
+
be excluded. Patent claims generated through the usage of the Model & Software cannot be directed towards the copyright holders.
|
68 |
+
The Model & Software is provided in the state of development the licensor defines. If modified or extended by
|
69 |
+
Licensee, the Licensor makes no claims about the fitness of the Model & Software and is not responsible
|
70 |
+
for any problems such modifications cause.
|
71 |
+
|
72 |
+
No Maintenance Services
|
73 |
+
You understand and agree that Licensor is under no obligation to provide either maintenance services,
|
74 |
+
update services, notices of latent defects, or corrections of defects with regard to the Model & Software.
|
75 |
+
Licensor nevertheless reserves the right to update, modify, or discontinue the Model & Software at any time.
|
76 |
+
|
77 |
+
Defects of the Model & Software must be notified in writing to the Licensor with a comprehensible description
|
78 |
+
of the error symptoms. The notification of the defect should enable the reproduction of the error.
|
79 |
+
The Licensee is encouraged to communicate any use, results, modification or publication.
|
80 |
+
|
81 |
+
Publications using the Model & Software
|
82 |
+
You acknowledge that the Model & Software is a valuable scientific resource and agree to appropriately reference
|
83 |
+
the following paper in any publication making use of the Model & Software.
|
84 |
+
|
85 |
+
Commercial licensing opportunities
|
86 |
+
For commercial uses of the Model & Software, please send email to [email protected]
|
87 |
+
|
88 |
+
This Agreement shall be governed by the laws of the Federal Republic of Germany except for the UN Sales Convention.
|
SHOW/README.md
ADDED
@@ -0,0 +1,260 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# SHOW: Synchronous HOlistic body in the Wild
|
2 |
+
<b>[CVPR2023] Generating Holistic 3D Human Motion from Speech</b>
|
3 |
+
|
4 |
+
[[Project Page](https://talkshow.is.tue.mpg.de)] [[Arxiv](https://export.arxiv.org/abs/2212.04420)] [[Colab](https://colab.research.google.com/drive/1ZGuRX-m_2xEZ2JGGpvyePTLDBPKFd41I?usp=sharing)]
|
5 |
+
|
6 |
+
<p align="center">
|
7 |
+
<img src="doc/images/overview.png">
|
8 |
+
</p>
|
9 |
+
|
10 |
+
This repository provides the official implementation of SHOW(Synchronous HOlistic body in the Wild). Given rgb images or videos only, SHOW can reconstruct holistic whole body mesh results. Please refer to the [arXiv paper](https://export.arxiv.org/abs/2212.04420) for more details.
|
11 |
+
|
12 |
+
|
13 |
+
**What you can use**:
|
14 |
+
|
15 |
+
* **easy and efficient:** adapts SMPLify-X to the videos of
|
16 |
+
talking persons with several good practices.
|
17 |
+
* **state-of-art:** Compared to other methods, ours produces more accurate and stable results with details.
|
18 |
+
|
19 |
+
<p align="center">
|
20 |
+
<img src="doc/show.gif">
|
21 |
+
</p>
|
22 |
+
|
23 |
+
## Getting Started
|
24 |
+
|
25 |
+
Take a quick tour on colab: [[Colab]](https://colab.research.google.com/drive/1ZGuRX-m_2xEZ2JGGpvyePTLDBPKFd41I?usp=sharing).
|
26 |
+
|
27 |
+
<!-- Alternatively, you can directly run the [ipynb file](SHOW_demo.ipynb) in the Jupyter environment. -->
|
28 |
+
|
29 |
+
### Installation
|
30 |
+
|
31 |
+
To install SHOW, please execute `pip install git+https://github.com/yhw-yhw/SHOW.git` or
|
32 |
+
|
33 |
+
```bash
|
34 |
+
git clone https://github.com/yhw-yhw/SHOW.git
|
35 |
+
cd SHOW && pip install -v -e .
|
36 |
+
```
|
37 |
+
|
38 |
+
### Preliminary
|
39 |
+
|
40 |
+
- [environment] Using virtual environment by runing
|
41 |
+
|
42 |
+
```bash
|
43 |
+
conda create -n env_SHOW python=3.9
|
44 |
+
eval "$(conda shell.bash hook)"
|
45 |
+
conda activate env_SHOW
|
46 |
+
```
|
47 |
+
|
48 |
+
install pytorch using `pip3 install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu116` or
|
49 |
+
|
50 |
+
```bash
|
51 |
+
conda install pytorch torchvision torchaudio pytorch-cuda=11.6 -c pytorch -c nvidia
|
52 |
+
```
|
53 |
+
|
54 |
+
You can run
|
55 |
+
|
56 |
+
```bash
|
57 |
+
cd SHOW
|
58 |
+
cd modules/MICA && pip install -r requirements.txt
|
59 |
+
cd ../PIXIE && pip install -r requirements.txt
|
60 |
+
cd ../PyMAF && pip install -r requirements.txt
|
61 |
+
cd ../DECA && pip install -r requirements.txt
|
62 |
+
cd ../.. && pip install -r requirements.txt
|
63 |
+
```
|
64 |
+
|
65 |
+
Note that Pytorch3D may require manuall installation (see instructions [here](https://github.com/facebookresearch/pytorch3d/blob/main/INSTALL.md)).
|
66 |
+
|
67 |
+
- [models] download packed model files, and put it in the same level directory as SHOW
|
68 |
+
|
69 |
+
```bash
|
70 |
+
wget https://www.dropbox.com/s/gqdcu51ilo44k3i/models.zip?dl=0 -O models.zip
|
71 |
+
wget https://www.dropbox.com/s/r14bl9mhvngohla/data.zip?dl=0 -O data.zip
|
72 |
+
unzip data.zip
|
73 |
+
7za x models.zip
|
74 |
+
```
|
75 |
+
|
76 |
+
- [OpenPose]: follow the code from [OpenPose Colab notebook](https://colab.research.google.com/github/tugstugi/dl-colab-notebooks/blob/master/notebooks/OpenPose.ipynb, and change OpenPose bin path `openpose_root_path` and `openpose_bin_path` in `configs\configs\machine_cfg.py`.
|
77 |
+
|
78 |
+
- [MMPose]: Make sure to install mmcv-full, and set env variable `mmpose_root`
|
79 |
+
|
80 |
+
```bash
|
81 |
+
pip install openmim
|
82 |
+
mim install mmcv-full
|
83 |
+
git clone https://github.com/open-mmlab/mmdetection
|
84 |
+
cd /content/mmdetection && python setup.py install
|
85 |
+
git clone https://github.com/open-mmlab/mmpose
|
86 |
+
export mmpose_root = $mmpose_root$
|
87 |
+
```
|
88 |
+
|
89 |
+
- models for `inisghtface`:
|
90 |
+
1) [antelopev2](https://keeper.mpdl.mpg.de/f/2d58b7fed5a74cb5be83/?dl=1)
|
91 |
+
2) [buffalo_l](https://keeper.mpdl.mpg.de/f/8faabd353cfc457fa5c5/?dl=1)
|
92 |
+
|
93 |
+
use the following command as reference
|
94 |
+
|
95 |
+
```bash
|
96 |
+
mkdir -p ~/.insightface/models
|
97 |
+
cd ~/.insightface/models
|
98 |
+
wget https://keeper.mpdl.mpg.de/f/2d58b7fed5a74cb5be83/?dl=1 -O antelopev2.zip
|
99 |
+
wget https://keeper.mpdl.mpg.de/f/8faabd353cfc457fa5c5/?dl=1 -O buffalo_l.zip
|
100 |
+
mkdir -p antelopev2 && cd antelopev2 && unzip -o ../antelopev2.zip
|
101 |
+
mkdir -p buffalo_l && cd buffalo_l && unzip -o ../buffalo_l.zip
|
102 |
+
```
|
103 |
+
|
104 |
+
|
105 |
+
- [others] `pip uninstall -y xtcocotools && pip install xtcocotools --no-binary xtcocotools`
|
106 |
+
|
107 |
+
### Run
|
108 |
+
|
109 |
+
Run SHOW on demo video
|
110 |
+
|
111 |
+
```bash
|
112 |
+
python main.py --speaker_name -1 --all_top_dir ./test/demo_video/half.mp4
|
113 |
+
```
|
114 |
+
|
115 |
+
It takes 15-20 minutes for 5s 30 FPS video on Colab Tesla T4.
|
116 |
+
|
117 |
+
The final results are stored in `./test/demo_video/ours_exp`. All the smplx variables can be found in `./test/demo_video/ours_exp/final_all.pkl`, and the visualization can be viewed in `./test/demo_video/ours_exp/final_vis.mp4`.
|
118 |
+
|
119 |
+
## Datasets
|
120 |
+
|
121 |
+
### Download Dataset Videos
|
122 |
+
|
123 |
+
Download all videos from youtube, please refer to (https://github.com/amirbar/speech2gesture), or using the following script: ```download_youtube.py```, remember to install `yt-dlp`. After downloading all videos, you can using `SHOW_intervals_subject4.csv` for video interval cropping.
|
124 |
+
|
125 |
+
### Visualize Dataset
|
126 |
+
|
127 |
+
After running SHOW, we will get processed data in a pkl file. Here we can visualize pkl from our provided dataset.
|
128 |
+
|
129 |
+
```bash
|
130 |
+
python render_pkl_release.py \
|
131 |
+
--pkl_file_path test/demo_pkl/all.pkl \
|
132 |
+
--out_images_path test/demo_pkl/ours_images \
|
133 |
+
--output_video_path test/demo_pkl/ours.mp4 \
|
134 |
+
--smplx_model_path ../models/smplx/SMPLX_NEUTRAL_2020_org.npz
|
135 |
+
```
|
136 |
+
|
137 |
+
### Download Links
|
138 |
+
|
139 |
+
The data reconstructed by SHOW is released, you can download it
|
140 |
+
|
141 |
+
- [[Dropbox]](https://www.dropbox.com/sh/f1gu531w5s2sbqd/AAA2I7oLolEkcXnWI6tnwUpAa?dl=0)
|
142 |
+
|
143 |
+
<!-- Larger datasets will be released later -->
|
144 |
+
|
145 |
+
### Dataset Description
|
146 |
+
|
147 |
+
- speaker=oliver/chemistry/conan/seth
|
148 |
+
- The maximum length of video clip is 10s with 30 fps
|
149 |
+
- Format of files in the compressed package:
|
150 |
+
- `{speaker}_wav_tar.tar.gz`:
|
151 |
+
- The path format of each file is: `speaker/video_fn/seq_fn.wav`
|
152 |
+
- Audio obtained from the original video at 22k sampling rate
|
153 |
+
- `{speaker}_pkl_tar.tar.gz`:
|
154 |
+
- The path format of each file is: `speaker/video_fn/seq_fn.pkl`
|
155 |
+
- Data contained in the pkl file:
|
156 |
+
|
157 |
+
```bash
|
158 |
+
width,height: the video width and height
|
159 |
+
center: the center point of the video
|
160 |
+
batch_size: the sequence length
|
161 |
+
camera_transl: the displacement of the camera
|
162 |
+
focal_length: the pixel focal length of a camera
|
163 |
+
body_pose_axis: (bs, 21, 3)
|
164 |
+
expression: (bs, 100)
|
165 |
+
jaw_pose: (bs,3)
|
166 |
+
betas: (300)
|
167 |
+
global_orient: (bs,3)
|
168 |
+
transl: (bs,3)
|
169 |
+
left_hand_pose: (bs,12)
|
170 |
+
right_hand_pose: (bs,12)
|
171 |
+
leye_pose: (bs,3)
|
172 |
+
reye_pose: (bs,3)
|
173 |
+
pose_embedding: (bs,32)
|
174 |
+
```
|
175 |
+
|
176 |
+
- Set the config of smplx model as follows:
|
177 |
+
|
178 |
+
```python
|
179 |
+
smplx_cfg=dict(
|
180 |
+
model_path='path_to_smplx_model'
|
181 |
+
model_type= 'smplx',
|
182 |
+
gender= 'neutral',
|
183 |
+
use_face_contour= True,
|
184 |
+
use_pca= True,
|
185 |
+
flat_hand_mean= False,
|
186 |
+
use_hands= True,
|
187 |
+
use_face= True,
|
188 |
+
num_pca_comps= 12,
|
189 |
+
num_betas= 300,
|
190 |
+
num_expression_coeffs= 100,
|
191 |
+
)
|
192 |
+
```
|
193 |
+
|
194 |
+
- In practice, global orient and transl parameters should be fixed as the first frame and the lower part of the body pose should be fixed as sitting or standing position: [code](post_process.py)
|
195 |
+
|
196 |
+
### SMPLX expression dim convert tool
|
197 |
+
|
198 |
+
[code](cvt_exp_dim_tool.py)
|
199 |
+
|
200 |
+
usage
|
201 |
+
```
|
202 |
+
python cvt_exp_dim_tool.py \
|
203 |
+
--target-exp-dim 50 \
|
204 |
+
--pkl-path ./rich.npy \
|
205 |
+
--model-path ../models/smplx/SMPLX_MALE_shape2019_exp2020.npz
|
206 |
+
```
|
207 |
+
|
208 |
+
## Citation
|
209 |
+
|
210 |
+
If you use this project in your research please cite this paper:
|
211 |
+
```
|
212 |
+
@inproceedings{yi2022generating,
|
213 |
+
title={Generating Holistic 3D Human Motion from Speech},
|
214 |
+
author={Yi, Hongwei and Liang, Hualin and Liu, Yifei and Cao, Qiong and Wen, Yandong and Bolkart, Timo and Tao, Dacheng and Black, Michael J},
|
215 |
+
booktitle={CVPR},
|
216 |
+
year={2023}
|
217 |
+
}
|
218 |
+
```
|
219 |
+
|
220 |
+
## Issues
|
221 |
+
|
222 |
+
- If following error is encountered
|
223 |
+
```
|
224 |
+
RuntimeError: Subtraction, the `-` operator, with a bool tensor is not supported. If you are trying to invert a mask, use the `~` or `logical_not()` operator instead.
|
225 |
+
```
|
226 |
+
|
227 |
+
Open torchgeometry/core/conversions.py file and replace line 304-306 with
|
228 |
+
|
229 |
+
```
|
230 |
+
mask_c1 = mask_d2 * (~ mask_d0_d1)
|
231 |
+
mask_c2 = (~ mask_d2) * mask_d0_nd1
|
232 |
+
mask_c3 = (~ mask_d2) * (~ mask_d0_nd1)
|
233 |
+
```
|
234 |
+
|
235 |
+
## License
|
236 |
+
|
237 |
+
This code and model are available for non-commercial scientific research purposes as defined in the [LICENSE](LICENSE) file.
|
238 |
+
By downloading and using the code and model you agree to the terms in the [LICENSE](LICENSE).
|
239 |
+
|
240 |
+
## Acknowledgements
|
241 |
+
|
242 |
+
For functions or scripts that are based on external sources, we acknowledge the origin individually in each file. We specifically thanks [WOJCIECH ZIELONKA](https://zielon.github.io/) and [Justus Thies](https://justusthies.github.io/) for sharing their face tracking codebase.
|
243 |
+
|
244 |
+
Here are some great resources we benefit:
|
245 |
+
|
246 |
+
- [SMPLify-X](https://smpl-x.is.tue.mpg.de/)
|
247 |
+
- [DECA](https://github.com/YadiraF/DECA) for face expression initialization
|
248 |
+
- [PIXIE](https://pixie.is.tue.mpg.de/) and [PyMAF-X](https://github.com/HongwenZhang/PyMAF-X) for SMPL-X parameters initialization
|
249 |
+
- [DeepLab](https://github.com/leimao/DeepLab-V3) for person segmentation
|
250 |
+
- [MICA](https://github.com/Zielon/MICA) and [https://github.com/HavenFeng/photometric_optimization] for face tracking
|
251 |
+
- [MICA_Tracker](https://github.com/Zielon/metrical-tracker)
|
252 |
+
- [Pytorch3D](https://pytorch3d.org/) for rendering
|
253 |
+
- [FAN](https://github.com/1adrianb/2D-and-3D-face-alignment) for landmark detection
|
254 |
+
- [arcface-pytorch](https://github.com/ronghuaiyang/arcface-pytorch)
|
255 |
+
|
256 |
+
## Contact
|
257 |
+
|
258 |
+
For questions, please contact [email protected] or [[email protected]](mailto:[email protected]) or [[email protected]](mailto:[email protected]).
|
259 |
+
|
260 |
+
For commercial licensing, please contact [email protected]
|
SHOW/SHOW/__init__.py
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import matplotlib
|
3 |
+
import platform
|
4 |
+
|
5 |
+
if platform.system() == 'Windows':
|
6 |
+
matplotlib.use('TkAgg')
|
7 |
+
|
8 |
+
if platform.system() == "Linux":
|
9 |
+
os.environ['PYOPENGL_PLATFORM'] = 'egl'
|
10 |
+
|
11 |
+
from .utils import *
|
12 |
+
from .face_iders import *
|
13 |
+
from .loggers import *
|
14 |
+
from .image import *
|
15 |
+
from .detector import *
|
16 |
+
|
17 |
+
from mmcv.runner import OPTIMIZERS
|
18 |
+
from mmcv.runner.builder import RUNNERS
|
19 |
+
|
20 |
+
|
21 |
+
def build_optim(cfg):
|
22 |
+
return OPTIMIZERS.build(cfg)
|
23 |
+
|
24 |
+
def build_runner(cfg):
|
25 |
+
return RUNNERS.build(cfg)
|
SHOW/SHOW/constants.py
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
sitting_pose = [
|
2 |
+
0.0, 0.0, 0.0, -1.1826512813568115, 0.23866955935955048, 0.15146760642528534, -1.2604516744613647,
|
3 |
+
-0.3160211145877838, -0.1603458970785141, 0.0, 0.0, 0.0, 1.1654603481292725, 0.0, 0.0,
|
4 |
+
1.2521806955337524, 0.041598282754421234, -0.06312154978513718, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
|
5 |
+
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
|
6 |
+
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
|
7 |
+
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
|
8 |
+
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
|
9 |
+
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
|
10 |
+
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
|
11 |
+
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
|
12 |
+
0.0, 0.0, 0.0]
|
SHOW/SHOW/datasets/__init__.py
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .op_base import *
|
2 |
+
from .op_dataset import *
|
3 |
+
from .op_post_process import *
|
4 |
+
from .pre_dataset import *
|
SHOW/SHOW/datasets/model_func_atach.py
ADDED
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import smplx
|
3 |
+
from smplx.body_models import SMPLXLayer
|
4 |
+
import numpy as np
|
5 |
+
|
6 |
+
|
7 |
+
# (PIXIE_init_hand-hand_mean)@inv_hand_comp=hand_pca_delta
|
8 |
+
# hand_pca_delta@hand_comp+hand_mean=PIXIE_init_hand
|
9 |
+
# hand_pca_full@hand_comp=PIXIE_init_hand
|
10 |
+
|
11 |
+
|
12 |
+
def hand_pca_to_axis(self, lhand_pca, rhand_pca):
|
13 |
+
# device=self.left_hand_mean.device
|
14 |
+
lhand_axis = torch.einsum('bi,ij->bj', [lhand_pca, self.left_hand_components])
|
15 |
+
rhand_axis = torch.einsum('bi,ij->bj', [rhand_pca, self.right_hand_components])
|
16 |
+
|
17 |
+
if not self.flat_hand_mean:
|
18 |
+
lhand_axis=lhand_axis+self.left_hand_mean
|
19 |
+
rhand_axis=rhand_axis+self.right_hand_mean
|
20 |
+
|
21 |
+
return lhand_axis,rhand_axis
|
22 |
+
|
23 |
+
|
24 |
+
def hand_axis_to_pca(self, lhand_axis, rhand_axis):
|
25 |
+
device=self.left_hand_mean.device
|
26 |
+
|
27 |
+
if isinstance(lhand_axis, np.ndarray):
|
28 |
+
lhand_axis = torch.from_numpy(lhand_axis)
|
29 |
+
if isinstance(rhand_axis, np.ndarray):
|
30 |
+
rhand_axis = torch.from_numpy(rhand_axis)
|
31 |
+
|
32 |
+
lhand_axis = lhand_axis.reshape(-1, 45).to(device)
|
33 |
+
rhand_axis = rhand_axis.reshape(-1, 45).to(device)
|
34 |
+
|
35 |
+
if not self.flat_hand_mean:
|
36 |
+
lhand_axis=lhand_axis-self.left_hand_mean
|
37 |
+
rhand_axis=rhand_axis-self.right_hand_mean
|
38 |
+
|
39 |
+
lhand_pca = torch.einsum('bi,ij->bj', [lhand_axis, self.l_comp])
|
40 |
+
rhand_pca = torch.einsum('bi,ij->bj', [rhand_axis, self.r_comp])
|
41 |
+
|
42 |
+
# return lhand_pca, rhand_pca
|
43 |
+
return lhand_pca.to('cpu'), rhand_pca.to('cpu')
|
44 |
+
|
45 |
+
|
46 |
+
def atach_model_func(model):
|
47 |
+
if not hasattr(model, 'hand_axis_to_pca'):
|
48 |
+
setattr(model, 'hand_axis_to_pca',hand_axis_to_pca)
|
49 |
+
|
50 |
+
if not hasattr(model, 'hand_pca_to_axis'):
|
51 |
+
setattr(model, 'hand_pca_to_axis',hand_pca_to_axis)
|
52 |
+
|
53 |
+
if not hasattr(model, 'l_comp'):
|
54 |
+
l_comp = torch.linalg.pinv(model.left_hand_components)
|
55 |
+
r_comp = torch.linalg.pinv(model.right_hand_components)
|
56 |
+
setattr(model, 'l_comp', l_comp)
|
57 |
+
setattr(model, 'r_comp', r_comp)
|
SHOW/SHOW/datasets/op_base.py
ADDED
@@ -0,0 +1,168 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
from scipy.io import savemat, loadmat
|
3 |
+
from collections import namedtuple
|
4 |
+
from collections import defaultdict
|
5 |
+
import numpy as np
|
6 |
+
import json
|
7 |
+
import torch
|
8 |
+
import glob
|
9 |
+
import cv2
|
10 |
+
import os.path as osp
|
11 |
+
import os
|
12 |
+
from pathlib import Path
|
13 |
+
from typing import Union
|
14 |
+
from functools import reduce, partial
|
15 |
+
from easydict import EasyDict
|
16 |
+
from loguru import logger
|
17 |
+
|
18 |
+
return_item_tuple = namedtuple(
|
19 |
+
'return_item_tuple',
|
20 |
+
['keypoints_2d', 'gender_gt']
|
21 |
+
)
|
22 |
+
return_item_tuple.__new__.__defaults__ = (None,)*len(return_item_tuple._fields)
|
23 |
+
|
24 |
+
|
25 |
+
class op_base(object):
|
26 |
+
|
27 |
+
def get_joint_weights(self) -> torch.Tensor:
|
28 |
+
# @return optim_weights: [1,135,1]
|
29 |
+
self.optim_weights = torch.ones(
|
30 |
+
self.num_joints +
|
31 |
+
2 * self.use_hands +
|
32 |
+
51 * self.use_face +
|
33 |
+
17 * self.use_face_contour,
|
34 |
+
dtype=self.dtype
|
35 |
+
).to(self.device)
|
36 |
+
|
37 |
+
self.optim_weights = self.optim_weights[None, ..., None]
|
38 |
+
|
39 |
+
return self.optim_weights
|
40 |
+
|
41 |
+
def get_smplx_to_o3d_R(self,) -> np.ndarray:
|
42 |
+
R = np.array([[-1, 0, 0], [0, -1, 0], [0, 0, 1]])
|
43 |
+
return R
|
44 |
+
|
45 |
+
def get_smplx_to_pyrender_R(self,) -> np.ndarray:
|
46 |
+
R = np.array([[1, 0, 0], [0, -1, 0], [0, 0, -1]])
|
47 |
+
return R
|
48 |
+
|
49 |
+
def get_smplx_to_pyrender_T(self,) -> np.ndarray:
|
50 |
+
cam_transl = self.pp.cam_transl
|
51 |
+
if isinstance(cam_transl, torch.Tensor):
|
52 |
+
cam_transl = cam_transl.cpu().detach().numpy()
|
53 |
+
cam_t = cam_transl[0].copy()
|
54 |
+
cam_t[0] *= -1
|
55 |
+
return cam_t
|
56 |
+
|
57 |
+
def get_smplx_to_o3d_T(self,) -> np.ndarray:
|
58 |
+
# assert(self.pp.cam_transl is not None)
|
59 |
+
return self.cvt_pixie_cam_to_o3d(self.pp.cam_transl)
|
60 |
+
|
61 |
+
def cvt_pixie_cam_to_o3d(self, cam_transl) -> np.ndarray:
|
62 |
+
if isinstance(cam_transl, torch.Tensor):
|
63 |
+
cam_transl = cam_transl.cpu().detach().numpy()
|
64 |
+
cam_t = cam_transl[0].copy()
|
65 |
+
cam_t[0] *= -1
|
66 |
+
cam_t[1] *= -1
|
67 |
+
return cam_t
|
68 |
+
|
69 |
+
def get_smplx_to_pyrender_K(self, cam_transl) -> np.ndarray:
|
70 |
+
if isinstance(cam_transl, torch.Tensor):
|
71 |
+
T = cam_transl.detach().cpu().numpy()
|
72 |
+
T[1] *= -1
|
73 |
+
K = np.eye(4)
|
74 |
+
K[:3, 3] = T
|
75 |
+
return K
|
76 |
+
|
77 |
+
def get_smplx_to_pyrender_K2(self,) -> np.ndarray:
|
78 |
+
from smplx.lbs import transform_mat
|
79 |
+
R = self.get_smplx_to_pyrender_R()
|
80 |
+
T = self.get_smplx_to_pyrender_T()
|
81 |
+
K = transform_mat(torch.from_numpy(R).unsqueeze(0),
|
82 |
+
torch.from_numpy(T).unsqueeze(0).unsqueeze(dim=-1)
|
83 |
+
).numpy()[0]
|
84 |
+
return K
|
85 |
+
|
86 |
+
@classmethod
|
87 |
+
def smpl_to_openpose(
|
88 |
+
cls,
|
89 |
+
use_face=True,
|
90 |
+
use_hands=True,
|
91 |
+
use_face_contour=True,
|
92 |
+
) -> np.ndarray:
|
93 |
+
# smplx-->openpose_body_25
|
94 |
+
body_mapping = np.array([55, 12, 17, 19, 21, 16, 18, 20, 0, 2, 5,
|
95 |
+
8, 1, 4, 7, 56, 57, 58, 59, 60, 61, 62,
|
96 |
+
63, 64, 65], dtype=np.int32) # 25
|
97 |
+
mapping = [body_mapping]
|
98 |
+
|
99 |
+
if use_hands:
|
100 |
+
lhand_mapping = np.array([20, 37, 38, 39, 66, 25, 26, 27,
|
101 |
+
67, 28, 29, 30, 68, 34, 35, 36, 69,
|
102 |
+
31, 32, 33, 70], dtype=np.int32)
|
103 |
+
rhand_mapping = np.array([21, 52, 53, 54, 71, 40, 41, 42, 72,
|
104 |
+
43, 44, 45, 73, 49, 50, 51, 74, 46,
|
105 |
+
47, 48, 75], dtype=np.int32)
|
106 |
+
mapping += [lhand_mapping, rhand_mapping]
|
107 |
+
|
108 |
+
if use_face:
|
109 |
+
# end_idx = 127 + 17 * use_face_contour
|
110 |
+
face_mapping = np.arange(76, 127 + 17 * use_face_contour,
|
111 |
+
dtype=np.int32)
|
112 |
+
mapping += [face_mapping]
|
113 |
+
|
114 |
+
return np.concatenate(mapping)
|
115 |
+
|
116 |
+
@classmethod
|
117 |
+
def read_keypoints(
|
118 |
+
cls,
|
119 |
+
keypoint_fn=None,
|
120 |
+
use_hands=True,
|
121 |
+
use_face=True,
|
122 |
+
use_face_contour=True
|
123 |
+
):
|
124 |
+
|
125 |
+
with open(keypoint_fn) as f:
|
126 |
+
data = json.load(f)
|
127 |
+
|
128 |
+
# body_size_list = []
|
129 |
+
keypoints = []
|
130 |
+
gender_gt = []
|
131 |
+
|
132 |
+
for _, person_data in enumerate(data['people']):
|
133 |
+
body_keypoints = np.array(
|
134 |
+
person_data['pose_keypoints_2d'], dtype=np.float32)
|
135 |
+
body_keypoints = body_keypoints.reshape([-1, 3])
|
136 |
+
|
137 |
+
if use_hands:
|
138 |
+
left_hand_keyp = np.array(
|
139 |
+
person_data['hand_left_keypoints_2d'],
|
140 |
+
dtype=np.float32).reshape([-1, 3])
|
141 |
+
right_hand_keyp = np.array(
|
142 |
+
person_data['hand_right_keypoints_2d'],
|
143 |
+
dtype=np.float32).reshape([-1, 3])
|
144 |
+
body_keypoints = np.concatenate(
|
145 |
+
[body_keypoints, left_hand_keyp, right_hand_keyp], axis=0)
|
146 |
+
|
147 |
+
if use_face:
|
148 |
+
face_keypoints = np.array(
|
149 |
+
person_data['face_keypoints_2d'],
|
150 |
+
dtype=np.float32).reshape([-1, 3])[17: 17 + 51, :]
|
151 |
+
contour_keyps = np.array(
|
152 |
+
[], dtype=body_keypoints.dtype).reshape(0, 3)
|
153 |
+
if use_face_contour:
|
154 |
+
contour_keyps = np.array(
|
155 |
+
person_data['face_keypoints_2d'],
|
156 |
+
dtype=np.float32).reshape([-1, 3])[:17, :]
|
157 |
+
body_keypoints = np.concatenate(
|
158 |
+
[body_keypoints, face_keypoints, contour_keyps], axis=0)
|
159 |
+
keypoints.append(body_keypoints)
|
160 |
+
|
161 |
+
if 'gender_gt' in person_data:
|
162 |
+
gender_gt.append(person_data['gender_gt'])
|
163 |
+
|
164 |
+
# keypoints: [B,135,3]
|
165 |
+
return return_item_tuple(
|
166 |
+
keypoints,
|
167 |
+
gender_gt
|
168 |
+
)
|
SHOW/SHOW/datasets/op_dataset.py
ADDED
@@ -0,0 +1,732 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
from collections import defaultdict
|
3 |
+
import numpy as np
|
4 |
+
import json
|
5 |
+
import torch
|
6 |
+
import glob
|
7 |
+
import cv2
|
8 |
+
import os.path as osp
|
9 |
+
import os
|
10 |
+
import sys
|
11 |
+
from pathlib import Path
|
12 |
+
from .op_post_process import op_post_process
|
13 |
+
from .pre_dataset import *
|
14 |
+
from ..image import *
|
15 |
+
from typing import Union
|
16 |
+
from functools import reduce, partial
|
17 |
+
from easydict import EasyDict
|
18 |
+
from loguru import logger
|
19 |
+
from .op_base import op_base
|
20 |
+
from ..utils import glob_exts_in_path
|
21 |
+
from ..utils import default_timers
|
22 |
+
from ..utils import is_empty_dir,ext_files_num_in_dir,img_files_num_in_dir,run_openpose
|
23 |
+
from ..face_iders import match_faces
|
24 |
+
from .pre_runner import run_pymafx
|
25 |
+
from tqdm import tqdm
|
26 |
+
|
27 |
+
|
28 |
+
from modules.PIXIE.demos.api_multi_pixie import api_multi_body
|
29 |
+
from modules.DECA.demos.api_multi_deca import api_multi_deca
|
30 |
+
from SHOW.detector.face_detector import FaceDetector
|
31 |
+
from SHOW.video_filter.deeplab_seg import deeplab_seg
|
32 |
+
from SHOW.utils.video import images_to_video
|
33 |
+
import joblib
|
34 |
+
import SHOW
|
35 |
+
|
36 |
+
|
37 |
+
class op_dataset(op_base):
|
38 |
+
NUM_BODY_JOINTS = 25
|
39 |
+
NUM_HAND_JOINTS = 20
|
40 |
+
|
41 |
+
def __init__(
|
42 |
+
self,
|
43 |
+
|
44 |
+
dtype=torch.float32,
|
45 |
+
device='cpu',
|
46 |
+
batch_size=-1,
|
47 |
+
config=None,
|
48 |
+
face_ider=None,
|
49 |
+
person_face_emb: np.ndarray = None,
|
50 |
+
):
|
51 |
+
self.config = config
|
52 |
+
self.use_hands = config.use_hands
|
53 |
+
self.use_face = config.use_face
|
54 |
+
self.use_face_contour = config.use_face_contour
|
55 |
+
|
56 |
+
self.person_face_emb = person_face_emb
|
57 |
+
|
58 |
+
|
59 |
+
self.batch_size = batch_size
|
60 |
+
self.device = device
|
61 |
+
self.dtype = dtype
|
62 |
+
|
63 |
+
self.all_processed_item = []
|
64 |
+
self.corr_center_pymaf = None
|
65 |
+
self.correspond_center = None
|
66 |
+
self.correspond_bbox = None
|
67 |
+
self.face_ider = face_ider
|
68 |
+
self.cnt = 0
|
69 |
+
|
70 |
+
assert(osp.exists(self.config.img_folder))
|
71 |
+
|
72 |
+
self.get_all_dict=None
|
73 |
+
self.focal = np.array(5000)
|
74 |
+
self.match_thres = 0.08 # 30/720=0.04
|
75 |
+
self.num_joints = (self.NUM_BODY_JOINTS + 2 * self.NUM_HAND_JOINTS * self.use_hands)
|
76 |
+
self.int_id_to_item = defaultdict(dict)
|
77 |
+
# self.all_item_list = self.int_id_to_item = {
|
78 |
+
# 0: {
|
79 |
+
# 'img': 'path/img'
|
80 |
+
# }
|
81 |
+
# 1: {
|
82 |
+
# 'img': 'path/img'
|
83 |
+
# 'fan': 'path/fan'
|
84 |
+
# }
|
85 |
+
# }
|
86 |
+
self.all_processed_item = []
|
87 |
+
# [
|
88 |
+
# {
|
89 |
+
# 'img': np.array,
|
90 |
+
# 'pixie': {
|
91 |
+
# 'face_kpt': np.array
|
92 |
+
# },
|
93 |
+
# }
|
94 |
+
# {
|
95 |
+
# 'img': np.array,
|
96 |
+
# 'pixie': {
|
97 |
+
# 'face_kpt': np.array
|
98 |
+
# },
|
99 |
+
# 'fan': None, # match failed
|
100 |
+
# }
|
101 |
+
# ]
|
102 |
+
self.find_item_template_list={}
|
103 |
+
ref_len = img_files_num_in_dir(self.config.img_folder)
|
104 |
+
|
105 |
+
|
106 |
+
def match_by_bbox(ret_list, get_bbox_center_func):
|
107 |
+
if (len(ret_list) < 1 or ret_list is None):
|
108 |
+
logger.error(f'length of ret_list < 1')
|
109 |
+
return None
|
110 |
+
|
111 |
+
if self.correspond_center is not None:
|
112 |
+
dist1 = dist2 = np.inf
|
113 |
+
for ret in ret_list:
|
114 |
+
bbox, cur_center = get_bbox_center_func(ret)
|
115 |
+
dist1 = np.abs(cur_center[0]-self.correspond_center[0])/self.width
|
116 |
+
dist2 = np.abs(cur_center[1]-self.correspond_center[1])/self.height
|
117 |
+
# 根据xy方向的偏移的比率判断
|
118 |
+
if dist1 < self.match_thres and dist2 < self.match_thres:
|
119 |
+
if bbox is not None:
|
120 |
+
self.correspond_bbox = bbox
|
121 |
+
self.correspond_center = cur_center
|
122 |
+
return ret
|
123 |
+
else:
|
124 |
+
logger.info(f'corresponding center is None')
|
125 |
+
ret = ret_list[0]
|
126 |
+
bbox, cur_center = get_bbox_center_func(ret)
|
127 |
+
self.correspond_bbox = bbox
|
128 |
+
self.correspond_center = cur_center
|
129 |
+
return ret
|
130 |
+
|
131 |
+
# self.correspond_center is not None and match failed
|
132 |
+
if len(ret_list)==1:
|
133 |
+
return ret_list[0]
|
134 |
+
return None
|
135 |
+
|
136 |
+
# images -----------------------------------
|
137 |
+
if True:
|
138 |
+
def empty_func(*args, **kwargs):
|
139 |
+
pass
|
140 |
+
|
141 |
+
find_item_template_img={
|
142 |
+
'name': 'img',
|
143 |
+
'exts': ['png', 'jpg'],
|
144 |
+
'dir': self.config.img_folder,
|
145 |
+
'judge_prepare_data_func': empty_func,
|
146 |
+
'run_prepare_data_func': empty_func,
|
147 |
+
'post_prepare_data_func': empty_func,
|
148 |
+
'match_item_func': empty_func,
|
149 |
+
'read_item_func': empty_func,
|
150 |
+
}
|
151 |
+
self.find_item_template_list['img']=find_item_template_img
|
152 |
+
|
153 |
+
# FAN -----------------------------------
|
154 |
+
if True:
|
155 |
+
def judge_prepare_data_func_fan():
|
156 |
+
return ext_files_num_in_dir(
|
157 |
+
self.config.fan_npy_folder,
|
158 |
+
exts=['*.npy', '*.npy.empty']
|
159 |
+
) < ref_len
|
160 |
+
|
161 |
+
def run_prepare_data_func_fan():
|
162 |
+
fan = SHOW.detector.fan_detector.FAN_Detector()
|
163 |
+
fan.predict(self.config.img_folder,
|
164 |
+
self.config.fan_npy_folder,
|
165 |
+
save_vis=self.config.saveVis,
|
166 |
+
fan_vis_dir=self.config.fan_npy_folder_vis)
|
167 |
+
del fan
|
168 |
+
|
169 |
+
def post_prepare_data_func_fan():
|
170 |
+
if (self.config.saveVis and
|
171 |
+
not Path(self.config.fan_npy_folder_v).exists() and
|
172 |
+
not is_empty_dir(self.config.fan_npy_folder_vis)
|
173 |
+
):
|
174 |
+
logger.info(f'converting {self.config.fan_npy_folder_vis} to {self.config.fan_npy_folder_v}')
|
175 |
+
images_to_video(
|
176 |
+
input_folder=self.config.fan_npy_folder_vis,
|
177 |
+
output_path=self.config.fan_npy_folder_v,
|
178 |
+
img_format = None,
|
179 |
+
fps=30,
|
180 |
+
)
|
181 |
+
|
182 |
+
def match_item_func_fan(fan_ret_list):
|
183 |
+
def get_bbox_center_func(input_ret):
|
184 |
+
face_kpt = input_ret
|
185 |
+
bbox = lmk2d_to_bbox(face_kpt, self.height, self.width)
|
186 |
+
xmin, ymin, xmax, ymax = [int(i) for i in bbox]
|
187 |
+
cur_center = [int((xmin+xmax)/2), int((ymin+ymax)/2)]
|
188 |
+
return bbox, cur_center
|
189 |
+
return match_by_bbox(
|
190 |
+
fan_ret_list,
|
191 |
+
get_bbox_center_func)
|
192 |
+
|
193 |
+
def read_item_func_fan(lmk_path):
|
194 |
+
return np.load(lmk_path, allow_pickle=True)
|
195 |
+
|
196 |
+
def post_read_func_fan(template):
|
197 |
+
# fan_valid_flag = [0 if i is None else 1 for i in template['files_list']]
|
198 |
+
fan_valid_flag = [0 if i.get('fan') is None else 1 for i in self.all_processed_item]
|
199 |
+
self.get_all_dict['fan_valid']=np.array(fan_valid_flag)
|
200 |
+
|
201 |
+
fan_fill = [np.zeros((68,2)) if i.get('fan') is None else i['fan'] for i in self.all_processed_item]
|
202 |
+
self.get_all_dict['fan'] = np.array(fan_fill)
|
203 |
+
|
204 |
+
find_item_template_fan={
|
205 |
+
'name': 'fan',
|
206 |
+
'exts': ['npy'],
|
207 |
+
'files_list': [],
|
208 |
+
'dir': self.config.fan_npy_folder,
|
209 |
+
'judge_prepare_data_func': judge_prepare_data_func_fan,
|
210 |
+
'run_prepare_data_func': run_prepare_data_func_fan,
|
211 |
+
'post_prepare_data_func': post_prepare_data_func_fan,
|
212 |
+
'match_item_func': match_item_func_fan,
|
213 |
+
'read_item_func': read_item_func_fan,
|
214 |
+
'post_read_func': post_read_func_fan,
|
215 |
+
}
|
216 |
+
self.find_item_template_list['fan']=find_item_template_fan
|
217 |
+
|
218 |
+
# openpose -----------------------------------
|
219 |
+
if True:
|
220 |
+
def judge_prepare_data_func_op():
|
221 |
+
return is_empty_dir(self.config.keyp_folder)
|
222 |
+
|
223 |
+
def run_prepare_data_func_op():
|
224 |
+
with default_timers['run_openpose']:
|
225 |
+
run_openpose(
|
226 |
+
low_res=self.config.low_res,
|
227 |
+
openpose_root_path=self.config.openpose_root_path,
|
228 |
+
openpose_bin_path=self.config.openpose_bin_path,
|
229 |
+
img_dir=self.config.img_folder,
|
230 |
+
out_dir=self.config.keyp_folder,
|
231 |
+
img_out=self.config.keyp_folder_vis if self.config.saveVis else None,
|
232 |
+
)
|
233 |
+
|
234 |
+
def post_prepare_data_func_op():
|
235 |
+
if (self.config.saveVis and
|
236 |
+
not Path(self.config.keyp_folder_v).exists() and
|
237 |
+
not is_empty_dir(self.config.keyp_folder_vis)
|
238 |
+
):
|
239 |
+
logger.info(f'converting {self.config.keyp_folder_vis} to {self.config.keyp_folder_v}')
|
240 |
+
images_to_video(
|
241 |
+
input_folder=self.config.keyp_folder_vis,
|
242 |
+
output_path=self.config.keyp_folder_v,
|
243 |
+
img_format = None,
|
244 |
+
fps=30,
|
245 |
+
)
|
246 |
+
|
247 |
+
def match_item_func_op(keypoints):
|
248 |
+
def get_bbox_center_func(keypoint):
|
249 |
+
body_j = keypoint[0:25, :]
|
250 |
+
head_j = body_j[0]
|
251 |
+
x, y, _ = head_j
|
252 |
+
cur_center = [x, y]
|
253 |
+
return None, cur_center
|
254 |
+
return match_by_bbox(
|
255 |
+
keypoints,
|
256 |
+
get_bbox_center_func)
|
257 |
+
|
258 |
+
def read_item_func_op(file_path):
|
259 |
+
keypoints, _ = self.read_keypoints(keypoint_fn=file_path)
|
260 |
+
return keypoints
|
261 |
+
|
262 |
+
find_item_template_op={
|
263 |
+
'name': 'op',
|
264 |
+
'exts': ['json'],
|
265 |
+
'dir':self.config.keyp_folder,
|
266 |
+
'judge_prepare_data_func': judge_prepare_data_func_op,
|
267 |
+
'run_prepare_data_func': run_prepare_data_func_op,
|
268 |
+
'post_prepare_data_func': post_prepare_data_func_op,
|
269 |
+
'match_item_func': match_item_func_op,
|
270 |
+
'read_item_func': read_item_func_op,
|
271 |
+
}
|
272 |
+
self.find_item_template_list['op']=find_item_template_op
|
273 |
+
|
274 |
+
# deca -----------------------------------
|
275 |
+
if True:
|
276 |
+
def judge_prepare_data_func_deca():
|
277 |
+
return ext_files_num_in_dir(
|
278 |
+
self.config.deca_mat_folder,
|
279 |
+
exts=['*.pkl', '*.pkl.empty']
|
280 |
+
) < ref_len
|
281 |
+
|
282 |
+
def run_prepare_data_func_deca():
|
283 |
+
with default_timers['api_multi_deca']:
|
284 |
+
logger.info(f'running api_multi_deca')
|
285 |
+
api_multi_deca(
|
286 |
+
inputpath=self.config.img_folder,
|
287 |
+
savefolder=self.config.deca_mat_folder,
|
288 |
+
visfolder=self.config.deca_mat_folder_vis,
|
289 |
+
saveVis=self.config.saveVis,
|
290 |
+
face_detector=self.config.face_detector
|
291 |
+
)
|
292 |
+
|
293 |
+
def post_prepare_data_func_deca():
|
294 |
+
if (self.config.saveVis and
|
295 |
+
not Path(self.config.deca_mat_folder_v).exists() and
|
296 |
+
not is_empty_dir(self.config.deca_mat_folder_vis)
|
297 |
+
):
|
298 |
+
logger.info(f'converting {self.config.deca_mat_folder_vis} to {self.config.deca_mat_folder_v}')
|
299 |
+
images_to_video(
|
300 |
+
input_folder=self.config.deca_mat_folder_vis,
|
301 |
+
output_path=self.config.deca_mat_folder_v,
|
302 |
+
img_format = None,
|
303 |
+
fps=30,
|
304 |
+
)
|
305 |
+
|
306 |
+
def match_item_func_deca(deca_ret_list):
|
307 |
+
def get_bbox_center_func(input_ret):
|
308 |
+
bbox = input_ret['face_bbox']
|
309 |
+
xmin, ymin, xmax, ymax = [int(i) for i in bbox]
|
310 |
+
cur_center = [int((xmin+xmax)/2), int((ymin+ymax)/2)]
|
311 |
+
return bbox, cur_center
|
312 |
+
return match_by_bbox(
|
313 |
+
deca_ret_list,
|
314 |
+
get_bbox_center_func)
|
315 |
+
|
316 |
+
def read_item_func_deca(deca):
|
317 |
+
return read_deca(deca)
|
318 |
+
|
319 |
+
find_item_template_deca={
|
320 |
+
'name': 'deca',
|
321 |
+
'exts': ['pkl'],
|
322 |
+
'dir':self.config.deca_mat_folder,
|
323 |
+
'judge_prepare_data_func': judge_prepare_data_func_deca,
|
324 |
+
'run_prepare_data_func': run_prepare_data_func_deca,
|
325 |
+
'post_prepare_data_func': post_prepare_data_func_deca,
|
326 |
+
'match_item_func': match_item_func_deca,
|
327 |
+
'read_item_func': read_item_func_deca,
|
328 |
+
}
|
329 |
+
self.find_item_template_list['deca']=find_item_template_deca
|
330 |
+
|
331 |
+
# pixie -----------------------------------
|
332 |
+
if True:
|
333 |
+
def judge_prepare_data_func_pixie():
|
334 |
+
return ext_files_num_in_dir(
|
335 |
+
self.config.pixie_mat_folder,
|
336 |
+
exts=['*.pkl', '*.pkl.empty']) < ref_len
|
337 |
+
|
338 |
+
def run_prepare_data_func_pixie():
|
339 |
+
with default_timers['api_multi_body']:
|
340 |
+
logger.info(r'running pixie')
|
341 |
+
api_multi_body(
|
342 |
+
imgfolder=self.config.img_folder,
|
343 |
+
savefolder=self.config.pixie_mat_folder,
|
344 |
+
visfolder=self.config.pixie_mat_folder_vis,
|
345 |
+
saveVis=self.config.saveVis,
|
346 |
+
rasterizer_type=self.config.rasterizer_type
|
347 |
+
)
|
348 |
+
|
349 |
+
def post_prepare_data_func_pixie():
|
350 |
+
if (self.config.saveVis and
|
351 |
+
not Path(self.config.pixie_mat_folder_v).exists() and
|
352 |
+
not is_empty_dir(self.config.pixie_mat_folder_vis)
|
353 |
+
):
|
354 |
+
logger.info(f'converting {self.config.pixie_mat_folder_vis} to {self.config.pixie_mat_folder_v}')
|
355 |
+
images_to_video(
|
356 |
+
input_folder=self.config.pixie_mat_folder_vis,
|
357 |
+
output_path=self.config.pixie_mat_folder_v,
|
358 |
+
img_format = None,
|
359 |
+
fps=30,
|
360 |
+
)
|
361 |
+
|
362 |
+
def match_item_func_pixie(pixie_ret_list):
|
363 |
+
def get_bbox_center_func(input_ret):
|
364 |
+
bbox = input_ret['face_bbox']
|
365 |
+
xmin, ymin, xmax, ymax = [int(i) for i in bbox]
|
366 |
+
cur_center = [int((xmin+xmax)/2), int((ymin+ymax)/2)]
|
367 |
+
return bbox, cur_center
|
368 |
+
return match_by_bbox(
|
369 |
+
pixie_ret_list,
|
370 |
+
get_bbox_center_func)
|
371 |
+
|
372 |
+
def read_item_func_pixie(pixie):
|
373 |
+
return read_pixie(
|
374 |
+
pixie, self.height, self.width,
|
375 |
+
cvt_hand_func=self.config.cvt_hand_func)
|
376 |
+
|
377 |
+
find_item_template_pixie={
|
378 |
+
'name': 'pixie',
|
379 |
+
'exts': ['pkl'],
|
380 |
+
'dir':self.config.pixie_mat_folder,
|
381 |
+
'judge_prepare_data_func': judge_prepare_data_func_pixie,
|
382 |
+
'run_prepare_data_func': run_prepare_data_func_pixie,
|
383 |
+
'post_prepare_data_func': post_prepare_data_func_pixie,
|
384 |
+
'match_item_func': match_item_func_pixie,
|
385 |
+
'read_item_func': read_item_func_pixie,
|
386 |
+
}
|
387 |
+
self.find_item_template_list['pixie']=find_item_template_pixie
|
388 |
+
|
389 |
+
# mp -----------------------------------
|
390 |
+
if True:
|
391 |
+
def judge_prepare_data_func_mp():
|
392 |
+
return self.config.use_mp_loss and (
|
393 |
+
ext_files_num_in_dir(self.config.mp_npz_folder,
|
394 |
+
exts=['*.npz', '*.npz.empty']) < ref_len
|
395 |
+
)
|
396 |
+
|
397 |
+
def run_prepare_data_func_mp():
|
398 |
+
if self.config.use_mp_loss:
|
399 |
+
with default_timers['face_detector']:
|
400 |
+
logger.info(f'running face detection')
|
401 |
+
if self.__dict__.get('face_detector',None) is None:
|
402 |
+
self.face_detector = FaceDetector()
|
403 |
+
|
404 |
+
self.face_detector.predict_batch(
|
405 |
+
img_folder=self.config.img_folder,
|
406 |
+
savefolder=self.config.mp_npz_folder,
|
407 |
+
visfolder=self.config.mp_npz_folder_vis,
|
408 |
+
saveVis=self.config.saveVis
|
409 |
+
)
|
410 |
+
|
411 |
+
def post_prepare_data_func_mp():
|
412 |
+
if (self.config.saveVis and
|
413 |
+
not Path(self.config.mp_npz_folder_v).exists() and
|
414 |
+
not is_empty_dir(self.config.mp_npz_folder_vis)
|
415 |
+
):
|
416 |
+
logger.info(f'converting {self.config.mp_npz_folder_vis} to {self.config.mp_npz_folder_v}')
|
417 |
+
images_to_video(
|
418 |
+
input_folder=self.config.mp_npz_folder_vis,
|
419 |
+
output_path=self.config.mp_npz_folder_v,
|
420 |
+
img_format = None,
|
421 |
+
fps=30,
|
422 |
+
)
|
423 |
+
|
424 |
+
def match_item_func_mp(mp_ret_list):
|
425 |
+
def get_bbox_center_func(input_ret):
|
426 |
+
face_kpt = input_ret['lmk2d']
|
427 |
+
bbox = lmk2d_to_bbox(face_kpt, self.height, self.width)
|
428 |
+
xmin, ymin, xmax, ymax = [int(i) for i in bbox]
|
429 |
+
cur_center = [int((xmin+xmax)/2), int((ymin+ymax)/2)]
|
430 |
+
return bbox, cur_center
|
431 |
+
return match_by_bbox(
|
432 |
+
mp_ret_list,
|
433 |
+
get_bbox_center_func)
|
434 |
+
|
435 |
+
def read_item_func_mp(mp):
|
436 |
+
return read_mp(mp, self.height, self.width)
|
437 |
+
|
438 |
+
find_item_template_mp={
|
439 |
+
'name': 'mp',
|
440 |
+
'exts': ['npz'],
|
441 |
+
'dir':self.config.mp_npz_folder,
|
442 |
+
'judge_prepare_data_func': judge_prepare_data_func_mp,
|
443 |
+
'run_prepare_data_func': run_prepare_data_func_mp,
|
444 |
+
'post_prepare_data_func': post_prepare_data_func_mp,
|
445 |
+
'match_item_func': match_item_func_mp,
|
446 |
+
'read_item_func': read_item_func_mp,
|
447 |
+
}
|
448 |
+
self.find_item_template_list['mp']=find_item_template_mp
|
449 |
+
|
450 |
+
# seg -----------------------------------
|
451 |
+
if True:
|
452 |
+
def judge_prepare_data_func_seg():
|
453 |
+
return self.config.use_silhouette_loss and (
|
454 |
+
ext_files_num_in_dir(self.config.seg_img_folder,
|
455 |
+
exts=['*.jpg', '*.png']) < ref_len
|
456 |
+
)
|
457 |
+
|
458 |
+
def run_prepare_data_func_seg():
|
459 |
+
if self.config.use_silhouette_loss:
|
460 |
+
with default_timers['deeplab_seg']:
|
461 |
+
logger.info(f'running deeplab segmentation')
|
462 |
+
if not hasattr(self,'deeplab_seg'):
|
463 |
+
self.deeplab_seg=deeplab_seg()
|
464 |
+
|
465 |
+
self.deeplab_seg.predict_batch(
|
466 |
+
img_folder=self.config.img_folder,
|
467 |
+
savefolder=self.config.seg_img_folder,
|
468 |
+
saveVis=True
|
469 |
+
)
|
470 |
+
|
471 |
+
def post_prepare_data_func_seg():
|
472 |
+
if (self.config.saveVis and
|
473 |
+
is_empty_dir(self.config.seg_img_folder_vis)
|
474 |
+
):
|
475 |
+
if not hasattr(self,'deeplab_seg'):
|
476 |
+
self.deeplab_seg=deeplab_seg()
|
477 |
+
logger.info(f'running deeplab segmentation visualization')
|
478 |
+
self.deeplab_seg.predict_batch(
|
479 |
+
img_folder=self.config.img_folder,
|
480 |
+
savefolder=self.config.seg_img_folder_vis,
|
481 |
+
saveVis=True,
|
482 |
+
save_mode='mask',
|
483 |
+
)
|
484 |
+
|
485 |
+
if (self.config.saveVis and
|
486 |
+
not Path(self.config.seg_img_folder_v).exists() and
|
487 |
+
not is_empty_dir(self.config.seg_img_folder)
|
488 |
+
):
|
489 |
+
logger.info(f'convert {self.config.seg_img_folder} to video')
|
490 |
+
images_to_video(
|
491 |
+
input_folder=self.config.seg_img_folder_vis,
|
492 |
+
output_path=self.config.seg_img_folder_v,
|
493 |
+
img_format = None,
|
494 |
+
fps=30,
|
495 |
+
)
|
496 |
+
|
497 |
+
def match_item_func_seg(seg):
|
498 |
+
return seg
|
499 |
+
|
500 |
+
def read_item_func_seg(seg):
|
501 |
+
return cv2.imread(seg,cv2.IMREAD_GRAYSCALE)/255
|
502 |
+
|
503 |
+
find_item_template_seg={
|
504 |
+
'name': 'seg',
|
505 |
+
'exts': ['jpg'],
|
506 |
+
'dir':self.config.seg_img_folder,
|
507 |
+
'judge_prepare_data_func': judge_prepare_data_func_seg,
|
508 |
+
'run_prepare_data_func': run_prepare_data_func_seg,
|
509 |
+
'post_prepare_data_func': post_prepare_data_func_seg,
|
510 |
+
'match_item_func': match_item_func_seg,
|
511 |
+
'read_item_func': read_item_func_seg,
|
512 |
+
}
|
513 |
+
self.find_item_template_list['seg']=find_item_template_seg
|
514 |
+
|
515 |
+
# pymaf -----------------------------------
|
516 |
+
if True:
|
517 |
+
def judge_prepare_data_func_pymaf():
|
518 |
+
return self.config.use_pymaf_hand
|
519 |
+
|
520 |
+
def run_prepare_data_func_pymaf():
|
521 |
+
if self.config.use_pymaf_hand:
|
522 |
+
if (not Path(self.config.pymaf_pkl_path).exists()):
|
523 |
+
logger.info('Running pymaf-x')
|
524 |
+
run_pymafx(
|
525 |
+
image_folder=self.config.img_folder,
|
526 |
+
output_folder=self.config.pymaf_pkl_folder,
|
527 |
+
no_render=not self.config.saveVis,
|
528 |
+
pymaf_code_dir=os.path.join(
|
529 |
+
os.path.dirname(os.path.abspath(__file__)),
|
530 |
+
'../../modules/PyMAF')
|
531 |
+
)
|
532 |
+
|
533 |
+
def post_prepare_data_func_pymaf():
|
534 |
+
if (
|
535 |
+
self.config.saveVis and
|
536 |
+
not Path(self.config.pymaf_pkl_folder_v).exists() and
|
537 |
+
not is_empty_dir(self.config.pymaf_folder_vis)
|
538 |
+
):
|
539 |
+
images_to_video(
|
540 |
+
input_folder=self.config.pymaf_folder_vis,
|
541 |
+
output_path=self.config.pymaf_pkl_folder_v,
|
542 |
+
img_format = None,
|
543 |
+
fps=30,
|
544 |
+
)
|
545 |
+
|
546 |
+
def match_item_func_pymaf():
|
547 |
+
pass
|
548 |
+
|
549 |
+
def read_item_func_pymaf():
|
550 |
+
pass
|
551 |
+
|
552 |
+
def post_read_func_pymaf(template):
|
553 |
+
|
554 |
+
if (
|
555 |
+
self.config.use_pymaf_hand and
|
556 |
+
Path(self.config.pymaf_pkl_path).exists()
|
557 |
+
):
|
558 |
+
logger.info(f'load pymaf file %s' % self.config.pymaf_pkl_path)
|
559 |
+
pymaf_out_data=joblib.load(self.config.pymaf_pkl_path)
|
560 |
+
smplx_params=pymaf_out_data['smplx_params']
|
561 |
+
joints2d=pymaf_out_data['joints2d']
|
562 |
+
nose_j_list=[i[0] for i in joints2d]
|
563 |
+
|
564 |
+
self.nose_j_list=nose_j_list
|
565 |
+
self.smplx_params=smplx_params
|
566 |
+
|
567 |
+
matched_smplx_params=[]
|
568 |
+
for idx,nose_j in enumerate(self.nose_j_list):
|
569 |
+
dist1 = np.abs(nose_j[0]-self.corr_center_pymaf[0])/self.width
|
570 |
+
dist2 = np.abs(nose_j[1]-self.corr_center_pymaf[1])/self.height
|
571 |
+
if dist1 < self.match_thres and dist2 < self.match_thres:
|
572 |
+
self.corr_center_pymaf=nose_j
|
573 |
+
matched_smplx_params.append(self.smplx_params[idx])
|
574 |
+
|
575 |
+
lhand_list=[e['left_hand_pose'] for e in matched_smplx_params]#(1,15,3,3)
|
576 |
+
rhand_list=[e['right_hand_pose'] for e in matched_smplx_params]
|
577 |
+
lhand_rot=torch.cat(lhand_list,dim=0)
|
578 |
+
rhand_rot=torch.cat(rhand_list,dim=0)
|
579 |
+
|
580 |
+
from pytorch3d.transforms import matrix_to_axis_angle
|
581 |
+
lhand_axis=matrix_to_axis_angle(lhand_rot)
|
582 |
+
rhand_axis=matrix_to_axis_angle(rhand_rot)
|
583 |
+
|
584 |
+
cvt_hand_func=self.config.cvt_hand_func
|
585 |
+
lhand_pca, rhand_pca=cvt_hand_func(lhand_axis,rhand_axis)
|
586 |
+
|
587 |
+
self.pymaf_lhand_pca=lhand_pca
|
588 |
+
self.pymaf_rhand_pca=rhand_pca
|
589 |
+
|
590 |
+
if 1:
|
591 |
+
logger.info(f'matched pymaf_lhand_pca shape {self.pymaf_lhand_pca.shape}')
|
592 |
+
self.pymaf_lhand_pca=self.pymaf_lhand_pca[:self.batch_size,:]
|
593 |
+
self.pymaf_rhand_pca=self.pymaf_rhand_pca[:self.batch_size,:]
|
594 |
+
|
595 |
+
if self.pymaf_lhand_pca.shape[0]==self.batch_size:
|
596 |
+
logger.warning(f'replaced r&l hand with pymaf')
|
597 |
+
self.get_all_dict['init_data']['lhand']=self.pymaf_lhand_pca
|
598 |
+
self.get_all_dict['init_data']['rhand']=self.pymaf_rhand_pca
|
599 |
+
|
600 |
+
|
601 |
+
find_item_template_pymaf={
|
602 |
+
'name': 'pymaf',
|
603 |
+
'exts': ['xxx'],
|
604 |
+
'dir':self.config.pymaf_pkl_folder,
|
605 |
+
'judge_prepare_data_func': judge_prepare_data_func_pymaf,
|
606 |
+
'run_prepare_data_func': run_prepare_data_func_pymaf,
|
607 |
+
'post_prepare_data_func': post_prepare_data_func_pymaf,
|
608 |
+
'match_item_func': match_item_func_pymaf,
|
609 |
+
'read_item_func': read_item_func_pymaf,
|
610 |
+
'post_read_func': post_read_func_pymaf,
|
611 |
+
}
|
612 |
+
self.find_item_template_list['pymaf']=find_item_template_pymaf
|
613 |
+
|
614 |
+
|
615 |
+
def initialize(self):
|
616 |
+
for template in self.find_item_template_list.values():
|
617 |
+
|
618 |
+
if template['judge_prepare_data_func']():
|
619 |
+
template['run_prepare_data_func']()
|
620 |
+
template['post_prepare_data_func']()
|
621 |
+
template['files_list'] = glob_exts_in_path(
|
622 |
+
template['dir'], img_ext = template['exts'])
|
623 |
+
|
624 |
+
for file_path in template['files_list']:
|
625 |
+
fn_id, _ = osp.splitext(osp.split(file_path)[1])
|
626 |
+
fn_id = int(fn_id.split('_')[0].split('.')[0])
|
627 |
+
self.int_id_to_item[fn_id][template['name']] = file_path
|
628 |
+
|
629 |
+
self.all_item_list = [[k, v] for k, v in self.int_id_to_item.items() if v.get('img') is not None]
|
630 |
+
self.all_item_list = sorted(self.all_item_list, key=lambda x: x[0])
|
631 |
+
self.all_item_list = [i[1] for i in self.all_item_list]
|
632 |
+
|
633 |
+
assert(Path(self.all_item_list[0]['img']).exists())
|
634 |
+
self.height, self.width, _ = cv2.imread(self.all_item_list[0]['img']).shape
|
635 |
+
|
636 |
+
if self.batch_size == -1:
|
637 |
+
self.batch_size = len(self.all_item_list)
|
638 |
+
else:
|
639 |
+
self.all_item_list = self.all_item_list[:self.batch_size]
|
640 |
+
|
641 |
+
if self.batch_size>300:
|
642 |
+
self.batch_size=300
|
643 |
+
|
644 |
+
def __len__(self):
|
645 |
+
return self.batch_size
|
646 |
+
|
647 |
+
def __iter__(self):
|
648 |
+
return self
|
649 |
+
|
650 |
+
def __next__(self):
|
651 |
+
if self.cnt >= self.__len__():
|
652 |
+
raise StopIteration
|
653 |
+
|
654 |
+
assert(self.cnt >= 0)
|
655 |
+
self.cnt += 1
|
656 |
+
|
657 |
+
return self.__getitem__(self.cnt-1)
|
658 |
+
|
659 |
+
def __getitem__(self, idx):
|
660 |
+
|
661 |
+
img_path = self.all_item_list[idx]['img']
|
662 |
+
|
663 |
+
# get_bbox_by_emb_and_deca
|
664 |
+
assert(Path(img_path).exists())
|
665 |
+
img = cv2.imread(img_path).astype(np.float32)
|
666 |
+
assert(img is not None)
|
667 |
+
img_fn, self.img_ext = osp.splitext(osp.split(img_path)[1])
|
668 |
+
self.height, self.width, _ = img.shape
|
669 |
+
# bgr,hw3,uint8
|
670 |
+
|
671 |
+
if self.person_face_emb is not None:
|
672 |
+
if self.correspond_center is None:
|
673 |
+
self.correspond_center, self.correspond_bbox = match_faces(
|
674 |
+
img, self.face_ider,
|
675 |
+
self.person_face_emb)
|
676 |
+
if self.corr_center_pymaf is None:
|
677 |
+
self.corr_center_pymaf=self.correspond_center
|
678 |
+
if self.correspond_center is None:
|
679 |
+
logger.warning("correspond_center return None")
|
680 |
+
|
681 |
+
img = img[:, :, ::-1] / 255.0
|
682 |
+
img = img.transpose(2, 0, 1)
|
683 |
+
# c,h,w; rgb 0-1
|
684 |
+
|
685 |
+
correspond_info = {}
|
686 |
+
for item_name,item_path in self.all_item_list[idx].items():
|
687 |
+
item_dict = self.find_item_template_list[item_name]
|
688 |
+
item_con = item_dict['read_item_func'](item_path)
|
689 |
+
correspond_info[item_name] = item_dict['match_item_func'](item_con)
|
690 |
+
|
691 |
+
correspond_info.update(
|
692 |
+
img=img,
|
693 |
+
img_fn=img_fn,
|
694 |
+
img_path=img_path)
|
695 |
+
return EasyDict(correspond_info)
|
696 |
+
|
697 |
+
|
698 |
+
def get_all(self):
|
699 |
+
|
700 |
+
for idx in tqdm(list(range(self.batch_size)),desc='reading raw files'):
|
701 |
+
assert(type(idx) == int)
|
702 |
+
self.all_processed_item.append(
|
703 |
+
self.__getitem__(idx)
|
704 |
+
)
|
705 |
+
|
706 |
+
self.pp = op_post_process(self.all_processed_item,
|
707 |
+
device=self.device,
|
708 |
+
dtype=self.dtype)
|
709 |
+
self.get_all_dict = self.pp.run()
|
710 |
+
|
711 |
+
for template in self.find_item_template_list.values():
|
712 |
+
post_read_func=template.get('post_read_func')
|
713 |
+
if post_read_func:
|
714 |
+
post_read_func(template)
|
715 |
+
|
716 |
+
return self.get_all_dict
|
717 |
+
|
718 |
+
def get_modify_jt_weight(self) -> torch.Tensor:
|
719 |
+
# @return optim_weights: [1,135,1]
|
720 |
+
self.get_joint_weights()
|
721 |
+
|
722 |
+
self.optim_weights[:, 2, :] *= self.config.op_shoulder_conf_weight # shoulder
|
723 |
+
self.optim_weights[:, 5, :] *= self.config.op_shoulder_conf_weight # shoulder
|
724 |
+
self.optim_weights[:, 8, :] *= self.config.op_root_conf_weight # root
|
725 |
+
|
726 |
+
if 0:
|
727 |
+
self.optim_weights[:, :25] = 1 # body
|
728 |
+
self.optim_weights[:, 25:67] = 2 # hand
|
729 |
+
self.optim_weights[:, 67:] = 0 # face
|
730 |
+
|
731 |
+
# print(self.optim_weights)
|
732 |
+
return self.optim_weights
|
SHOW/SHOW/datasets/op_post_process.py
ADDED
@@ -0,0 +1,212 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from scipy.io import savemat, loadmat
|
2 |
+
from collections import namedtuple
|
3 |
+
from collections import defaultdict
|
4 |
+
import numpy as np
|
5 |
+
import json
|
6 |
+
import torch
|
7 |
+
import glob
|
8 |
+
import cv2
|
9 |
+
import os.path as osp
|
10 |
+
import os
|
11 |
+
from pathlib import Path
|
12 |
+
from .pre_dataset import *
|
13 |
+
from typing import Union
|
14 |
+
from functools import reduce, partial
|
15 |
+
from ..utils import cvt_dict_to_tensor
|
16 |
+
from loguru import logger
|
17 |
+
|
18 |
+
|
19 |
+
class op_post_process(object):
|
20 |
+
def __init__(self, all_processed_item, device, dtype):
|
21 |
+
self.all_processed_item = all_processed_item
|
22 |
+
self.device = device
|
23 |
+
self.dtype = dtype
|
24 |
+
|
25 |
+
def run(self):
|
26 |
+
self.parse_batch()
|
27 |
+
self.merge_list_to_tensor()
|
28 |
+
return self.parse_data
|
29 |
+
|
30 |
+
def check_valid(self):
|
31 |
+
self.valid_dict = defaultdict(list)
|
32 |
+
|
33 |
+
for out in self.all_processed_item:
|
34 |
+
for key in ['pixie', 'mp', 'op', 'deca']:
|
35 |
+
self.valid_dict[key].append(
|
36 |
+
1 if out.get(key, None) is not None else 0)
|
37 |
+
|
38 |
+
for key, val in self.valid_dict.items():
|
39 |
+
logger.info(f'{key}:{val}')
|
40 |
+
|
41 |
+
def merge_list_to_tensor(self,):
|
42 |
+
|
43 |
+
self.exp = np.stack(self.exp_list) # (bs, 50)
|
44 |
+
self.pose = np.stack(self.pose_list) # (bs, 21, 3)
|
45 |
+
self.jaw = np.stack(self.jaw_list) # (batch_size, 3)
|
46 |
+
|
47 |
+
self.global_orient = np.stack(
|
48 |
+
self.global_orient_list) # (batch_size, 3)
|
49 |
+
self.cam_transl = np.stack(self.cam_transl_list) # (batch_size, 3)
|
50 |
+
self.rhand = np.concatenate(
|
51 |
+
self.rhand_list, axis=0) # (batch_size, 12)
|
52 |
+
self.lhand = np.concatenate(
|
53 |
+
self.lhand_list, axis=0) # (batch_size, 12)
|
54 |
+
|
55 |
+
for idx in range(len(self.all_processed_item)):
|
56 |
+
if self.all_processed_item[idx].get('pixie',None) is not None:
|
57 |
+
self.betas = self.all_processed_item[idx]['pixie']['shape'] # (200,)
|
58 |
+
break
|
59 |
+
|
60 |
+
if not hasattr(self, 'betas'):
|
61 |
+
self.betas = np.zeros(200)
|
62 |
+
|
63 |
+
self.op_kpts = np.stack(self.op_kpts_list, axis=0) # (bs, 135, 3)
|
64 |
+
self.mp_kpts = np.stack(self.mp_kpts_list, axis=0) # (bs, 478, 2)
|
65 |
+
self.deca_kpts = np.concatenate(
|
66 |
+
self.deca_kpts_list, axis=0) # (bs, 68, 2)
|
67 |
+
|
68 |
+
self.op_valid_flag = np.array(self.valid_dict['op']) # (bs)
|
69 |
+
self.mp_valid_flag = np.array(self.valid_dict['mp']) # (bs)
|
70 |
+
self.deca_valid_flag = np.array(self.valid_dict['deca']) # (bs)
|
71 |
+
# op_valid_flag=torch.tensor(op_valid_flag,device=self.device).long()
|
72 |
+
|
73 |
+
batch_size = self.exp.shape[0]
|
74 |
+
self.seg_stack=np.stack(self.seg_list)
|
75 |
+
|
76 |
+
self.exp = np.concatenate([(self.exp), np.zeros(
|
77 |
+
(batch_size, 50))], axis=-1) # torch.Size([bs, 100])
|
78 |
+
self.betas = np.concatenate([(self.betas), np.zeros(100)])[
|
79 |
+
None, ...] # torch.Size([1, 300])
|
80 |
+
|
81 |
+
self.mica_head_transl = np.zeros((1, 3))
|
82 |
+
self.leye_pose = np.zeros((batch_size, 3))
|
83 |
+
self.reye_pose = np.zeros((batch_size, 3))
|
84 |
+
self.transl = np.zeros((batch_size, 3))
|
85 |
+
|
86 |
+
ret_dict=dict(
|
87 |
+
init_data=dict(
|
88 |
+
betas=self.betas,
|
89 |
+
exp=self.exp,
|
90 |
+
jaw=self.jaw,
|
91 |
+
rhand=self.rhand,
|
92 |
+
lhand=self.lhand,
|
93 |
+
pose=self.pose,
|
94 |
+
global_orient=self.global_orient,
|
95 |
+
cam_transl=self.cam_transl,
|
96 |
+
mica_head_transl=self.mica_head_transl,
|
97 |
+
leye_pose=self.leye_pose,
|
98 |
+
reye_pose=self.reye_pose,
|
99 |
+
transl=self.transl
|
100 |
+
),
|
101 |
+
gt_data=dict(
|
102 |
+
op_kpts=self.op_kpts,
|
103 |
+
mp_kpts=self.mp_kpts,
|
104 |
+
deca_kpts=self.deca_kpts,
|
105 |
+
op_valid_flag=self.op_valid_flag,
|
106 |
+
mp_valid_flag=self.mp_valid_flag,
|
107 |
+
deca_valid_flag=self.deca_valid_flag,
|
108 |
+
seg_stack=self.seg_stack
|
109 |
+
)
|
110 |
+
)
|
111 |
+
self.parse_data = cvt_dict_to_tensor(ret_dict,self.device,self.dtype)
|
112 |
+
|
113 |
+
#expression (bs, 50)
|
114 |
+
#body_pose_axis (bs, 21, 3)
|
115 |
+
#jaw_pose (bs,3)
|
116 |
+
#global_orient (bs,3)
|
117 |
+
#transl (bs,3)
|
118 |
+
#left_hand_pose (bs,12)
|
119 |
+
#right_hand_pose (bs,12)
|
120 |
+
#betas (200)
|
121 |
+
|
122 |
+
# jaw: (B,3)
|
123 |
+
# body: (B,21,3)
|
124 |
+
# hands: 2*(B,15,3)
|
125 |
+
return self.parse_data
|
126 |
+
|
127 |
+
def parse_batch(self):
|
128 |
+
|
129 |
+
self.global_orient_list = []
|
130 |
+
self.cam_transl_list = []
|
131 |
+
self.jaw_list = []
|
132 |
+
self.exp_list = []
|
133 |
+
self.lhand_list = []
|
134 |
+
self.rhand_list = []
|
135 |
+
self.pose_list = []
|
136 |
+
self.mp_kpts_list = []
|
137 |
+
self.op_kpts_list = []
|
138 |
+
self.deca_kpts_list = []
|
139 |
+
self.seg_list = []
|
140 |
+
last_nonempty=dict(
|
141 |
+
pixie=None,
|
142 |
+
deca=None,
|
143 |
+
)
|
144 |
+
for i in range(len(self.all_processed_item)):
|
145 |
+
item_pos=len(self.all_processed_item)-1-i
|
146 |
+
item=self.all_processed_item[item_pos]
|
147 |
+
for key in last_nonempty.keys():
|
148 |
+
out_val=item.get(key,None)
|
149 |
+
if out_val is not None:
|
150 |
+
last_nonempty[key]=out_val
|
151 |
+
if out_val is None:
|
152 |
+
self.all_processed_item[item_pos][key]=last_nonempty[key]
|
153 |
+
|
154 |
+
for i in range(len(self.all_processed_item)):
|
155 |
+
item_pos=i
|
156 |
+
item=self.all_processed_item[item_pos]
|
157 |
+
for key in last_nonempty.keys():
|
158 |
+
out_val=item.get(key,None)
|
159 |
+
if out_val is not None:
|
160 |
+
last_nonempty[key]=out_val
|
161 |
+
if out_val is None:
|
162 |
+
self.all_processed_item[item_pos][key]=last_nonempty[key]
|
163 |
+
|
164 |
+
logger.info(f'after filling init datas: ')
|
165 |
+
self.check_valid()
|
166 |
+
|
167 |
+
for out in self.all_processed_item:
|
168 |
+
#########################
|
169 |
+
out_pixie = out.get('pixie', None)
|
170 |
+
if out_pixie is None:
|
171 |
+
out_pixie = {
|
172 |
+
'body_pose_63': np.zeros((21, 3)),
|
173 |
+
'left_hand_pose': np.zeros((1, 12)),
|
174 |
+
'right_hand_pose': np.zeros((1, 12)),
|
175 |
+
'global_orient': np.zeros((1, 3)),
|
176 |
+
'transl': np.zeros(3),
|
177 |
+
}
|
178 |
+
self.pose_list.append(out_pixie['body_pose_63'])
|
179 |
+
self.lhand_list.append(out_pixie['left_hand_pose'])
|
180 |
+
self.rhand_list.append(out_pixie['right_hand_pose'])
|
181 |
+
self.cam_transl_list.append(out_pixie['transl'])
|
182 |
+
self.global_orient_list.append(out_pixie['global_orient'])
|
183 |
+
|
184 |
+
#########################
|
185 |
+
out_deca = out.get('deca', None)
|
186 |
+
if out_deca is None:
|
187 |
+
out_deca = {
|
188 |
+
'exp': np.zeros(50),
|
189 |
+
'jaw': np.zeros(3),
|
190 |
+
'lmk2d': np.zeros((1, 68, 2)),
|
191 |
+
}
|
192 |
+
self.exp_list.append(out_deca['exp'])
|
193 |
+
self.jaw_list.append(out_deca['jaw'])
|
194 |
+
self.deca_kpts_list.append(out_deca['lmk2d'])
|
195 |
+
|
196 |
+
#########################
|
197 |
+
out_mp = out.get('mp', None)
|
198 |
+
if out_mp is None:
|
199 |
+
out_mp = {
|
200 |
+
'lmk2d': np.zeros((478, 2))
|
201 |
+
}
|
202 |
+
self.mp_kpts_list.append(out_mp['lmk2d'])
|
203 |
+
|
204 |
+
#########################
|
205 |
+
out_op = out.get('op', None)
|
206 |
+
if out_op is None:
|
207 |
+
out_op = np.zeros((135, 3))
|
208 |
+
self.op_kpts_list.append(out_op)
|
209 |
+
|
210 |
+
#########################
|
211 |
+
out_seg = out.get('seg', None)
|
212 |
+
self.seg_list.append(out_seg)
|
SHOW/SHOW/datasets/pre_dataset.py
ADDED
@@ -0,0 +1,111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from scipy.io import savemat, loadmat
|
2 |
+
from collections import namedtuple
|
3 |
+
import numpy as np
|
4 |
+
import json
|
5 |
+
import torch
|
6 |
+
import glob
|
7 |
+
import cv2
|
8 |
+
import os.path as osp
|
9 |
+
import os
|
10 |
+
import pickle
|
11 |
+
from ..image import lmk2d_to_bbox
|
12 |
+
import mmcv
|
13 |
+
|
14 |
+
deca_exp_to_smplx_X = np.load(
|
15 |
+
osp.join(os.path.dirname(__file__),
|
16 |
+
'../../../data/flame2020to2019_exp_trafo.npy')
|
17 |
+
)
|
18 |
+
|
19 |
+
|
20 |
+
def deca_exp_to_smplx(e_deca):
|
21 |
+
e_deca = np.concatenate([e_deca, np.zeros(50)])
|
22 |
+
e_smplx = deca_exp_to_smplx_X.dot(e_deca)
|
23 |
+
e_smplx = e_smplx[:50]
|
24 |
+
return e_smplx
|
25 |
+
|
26 |
+
|
27 |
+
def read_mp(mp_npz_file, height, width):
|
28 |
+
try:
|
29 |
+
mp_npz = np.load(mp_npz_file, allow_pickle=True)
|
30 |
+
except:
|
31 |
+
import traceback
|
32 |
+
traceback.print_exc()
|
33 |
+
return None
|
34 |
+
|
35 |
+
mp_npz = list(mp_npz.values())
|
36 |
+
return_list = []
|
37 |
+
for ret in mp_npz:
|
38 |
+
# (478,2)
|
39 |
+
return_list.append({
|
40 |
+
'lmk2d': ret[0],
|
41 |
+
})
|
42 |
+
|
43 |
+
return return_list
|
44 |
+
|
45 |
+
|
46 |
+
def read_pixie(pixie_mat_file, height, width, cvt_hand_func=None):
|
47 |
+
pixie_ret_list = mmcv.load(pixie_mat_file)
|
48 |
+
|
49 |
+
assert cvt_hand_func, 'cvt_hand_func must set'
|
50 |
+
|
51 |
+
return_list = []
|
52 |
+
for ret in pixie_ret_list:
|
53 |
+
for key, val in ret.items():
|
54 |
+
if isinstance(val, np.ndarray) and val.shape[0] == 1:
|
55 |
+
ret[key] = ret[key][0]
|
56 |
+
|
57 |
+
face_bbox = lmk2d_to_bbox(ret['face_kpt'], height, width)
|
58 |
+
|
59 |
+
if 1:
|
60 |
+
lhand, rhand = cvt_hand_func(
|
61 |
+
ret['left_hand_pose'],
|
62 |
+
ret['right_hand_pose'],
|
63 |
+
)
|
64 |
+
ret['left_hand_pose'] = lhand
|
65 |
+
ret['right_hand_pose'] = rhand
|
66 |
+
|
67 |
+
ret['face_bbox'] = face_bbox
|
68 |
+
|
69 |
+
return_list.append(ret)
|
70 |
+
|
71 |
+
return return_list
|
72 |
+
|
73 |
+
|
74 |
+
def read_deca(deca_mat_file):
|
75 |
+
assert(osp.exists(deca_mat_file))
|
76 |
+
|
77 |
+
deca_ret_list = mmcv.load(deca_mat_file)
|
78 |
+
|
79 |
+
assert(deca_ret_list != [])
|
80 |
+
return_list = []
|
81 |
+
|
82 |
+
for ret in deca_ret_list:
|
83 |
+
for key, val in ret.items():
|
84 |
+
if isinstance(val, np.ndarray) and val.shape[0] == 1:
|
85 |
+
ret[key] = ret[key][0]
|
86 |
+
|
87 |
+
deca_lmk = torch.tensor(ret['landmarks2d'])
|
88 |
+
org2deca_tform = torch.tensor(ret['tform'])
|
89 |
+
|
90 |
+
# deca_lmk=deca_lmk[:, :, :2]
|
91 |
+
deca_lmk = deca_lmk[None, ...][:, :, :2]
|
92 |
+
deca_lmk[:, :, 0] = (deca_lmk[:, :, 0] + 1) * 0.5 * 224
|
93 |
+
deca_lmk[:, :, 1] = (deca_lmk[:, :, 1] + 1) * 0.5 * 224
|
94 |
+
|
95 |
+
tform_T = torch.inverse(org2deca_tform[None, ...]).transpose(1, 2)
|
96 |
+
bs, n_points, _ = deca_lmk.shape
|
97 |
+
tmp_one = torch.ones(
|
98 |
+
[bs, n_points, 1], device=deca_lmk.device, dtype=deca_lmk.dtype)
|
99 |
+
deca_lmk = torch.cat([deca_lmk, tmp_one], dim=-1)
|
100 |
+
org_deca_lmk = torch.bmm(deca_lmk, tform_T)[:, :, :2]
|
101 |
+
|
102 |
+
smplx_exp = deca_exp_to_smplx(ret['expression_params'])
|
103 |
+
|
104 |
+
return_list.append({
|
105 |
+
'face_bbox': ret['bbox'], # array([774., 177., 969., 372.])
|
106 |
+
'lmk2d': org_deca_lmk, # torch.Size([1, 68, 2])
|
107 |
+
'exp': smplx_exp, # shape:(50,)
|
108 |
+
'jaw': ret['pose_params'][3:],
|
109 |
+
})
|
110 |
+
|
111 |
+
return return_list
|
SHOW/SHOW/datasets/pre_runner.py
ADDED
@@ -0,0 +1,108 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import sys
|
3 |
+
from loguru import logger
|
4 |
+
import subprocess
|
5 |
+
import torch
|
6 |
+
|
7 |
+
class path_enter(object):
|
8 |
+
def __init__(self,target_path=None):
|
9 |
+
self.origin_path=None
|
10 |
+
self.target_path=target_path
|
11 |
+
|
12 |
+
def __enter__(self):
|
13 |
+
if sys.path[0]!=self.target_path:
|
14 |
+
sys.path.insert(
|
15 |
+
0,self.target_path
|
16 |
+
)
|
17 |
+
|
18 |
+
if self.target_path:
|
19 |
+
self.origin_path=os.getcwd()
|
20 |
+
os.chdir(self.target_path)
|
21 |
+
logger.info(f'entered: {self.target_path}; origin_path: {self.origin_path}')
|
22 |
+
|
23 |
+
def __exit__(self, exc_type, exc_value, trace):
|
24 |
+
if self.origin_path:
|
25 |
+
os.chdir(self.origin_path)
|
26 |
+
logger.info(f'exit to origin_path: {self.origin_path}')
|
27 |
+
|
28 |
+
|
29 |
+
def run_smplifyx_org(
|
30 |
+
image_folder,
|
31 |
+
output_folder,
|
32 |
+
smplifyx_code_dir,
|
33 |
+
log_cmds=True,
|
34 |
+
**kwargs,
|
35 |
+
):
|
36 |
+
with path_enter(smplifyx_code_dir):
|
37 |
+
data_folder=os.path.dirname(image_folder)
|
38 |
+
cmds=[
|
39 |
+
'python smplifyx/main.py --config cfg_files/fit_smplx.yaml',
|
40 |
+
'--data_folder', data_folder,
|
41 |
+
'--output_folder',output_folder,
|
42 |
+
'--img_folder','image',
|
43 |
+
'--keyp_folder','op',
|
44 |
+
'--model_folder ../../../models/smplx_model',
|
45 |
+
'--vposer_ckpt ../../../models/vposer_v1_0',
|
46 |
+
'--visualize="True"',
|
47 |
+
# '--visualize="False"',
|
48 |
+
]
|
49 |
+
cmds=[str(i) for i in cmds]
|
50 |
+
cmds=' '.join(cmds)
|
51 |
+
if log_cmds:
|
52 |
+
logger.info(f'log_cmds: {cmds}')
|
53 |
+
subprocess.run(cmds,shell=True)
|
54 |
+
logger.info(f'done')
|
55 |
+
|
56 |
+
|
57 |
+
def run_pymafx(
|
58 |
+
image_folder,
|
59 |
+
output_folder,
|
60 |
+
pymaf_code_dir,
|
61 |
+
log_cmds=True,
|
62 |
+
no_render=True,
|
63 |
+
):
|
64 |
+
with path_enter(pymaf_code_dir):
|
65 |
+
cmds=[
|
66 |
+
'python apps/demo_smplx.py',
|
67 |
+
# 'python -m apps.demo_smplx',
|
68 |
+
'--image_folder',f'"{image_folder}"',
|
69 |
+
'--output_folder',f'"{output_folder}"',
|
70 |
+
'--detection_threshold 0.3',
|
71 |
+
]
|
72 |
+
|
73 |
+
if no_render:
|
74 |
+
cmds+=['--no_render']
|
75 |
+
|
76 |
+
cmds+=[
|
77 |
+
'--pretrained_model data/pretrained_model/PyMAF-X_model_checkpoint.pt',
|
78 |
+
'--misc TRAIN.BHF_MODE full_body MODEL.EVAL_MODE True MODEL.PyMAF.HAND_VIS_TH 0.1'
|
79 |
+
]
|
80 |
+
|
81 |
+
cmds=[str(i) for i in cmds]
|
82 |
+
cmds=' '.join(cmds)
|
83 |
+
if log_cmds:
|
84 |
+
logger.info(f'log_cmds: {cmds}')
|
85 |
+
subprocess.run(cmds,shell=True)
|
86 |
+
logger.info(f'run_pymafx done')
|
87 |
+
|
88 |
+
|
89 |
+
def run_psfr(
|
90 |
+
image_folder,
|
91 |
+
image_sup_folder,
|
92 |
+
log_cmds=True,
|
93 |
+
):
|
94 |
+
psfr_code_dir=os.path.join(os.path.dirname(__file__),'../../modules/PSFRGAN')
|
95 |
+
with path_enter(psfr_code_dir):
|
96 |
+
cmds=[
|
97 |
+
'python test_enhance_dir_unalign.py',
|
98 |
+
'--src_dir',f'"{image_folder}"',
|
99 |
+
'--results_dir',f'"{image_sup_folder}"',
|
100 |
+
]
|
101 |
+
cmds=[str(i) for i in cmds]
|
102 |
+
cmds=' '.join(cmds)
|
103 |
+
if log_cmds:
|
104 |
+
logger.info(f'log_cmds: {cmds}')
|
105 |
+
subprocess.run(cmds,shell=True)
|
106 |
+
logger.info(f'done')
|
107 |
+
|
108 |
+
|
SHOW/SHOW/detector/__init__.py
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
from .face_detector import *
|
2 |
+
from .fan_detector import *
|
SHOW/SHOW/detector/face_detector.py
ADDED
@@ -0,0 +1,106 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import mediapipe as mp
|
2 |
+
import numpy as np
|
3 |
+
import PIL
|
4 |
+
import cv2
|
5 |
+
from glob import glob
|
6 |
+
import os
|
7 |
+
from pathlib import Path
|
8 |
+
import tqdm
|
9 |
+
from ..utils.paths import glob_exts_in_path
|
10 |
+
mp_drawing = mp.solutions.drawing_utils
|
11 |
+
mp_drawing_styles = mp.solutions.drawing_styles
|
12 |
+
mp_face_mesh = mp.solutions.face_mesh
|
13 |
+
|
14 |
+
|
15 |
+
class FaceDetector:
|
16 |
+
def __init__(self, type='google', device='cpu'):
|
17 |
+
self.type = type
|
18 |
+
self.detector = mp_face_mesh.FaceMesh(
|
19 |
+
static_image_mode=False,
|
20 |
+
max_num_faces=3,
|
21 |
+
refine_landmarks=True,
|
22 |
+
min_detection_confidence=0.5,
|
23 |
+
min_tracking_confidence=0.5)
|
24 |
+
|
25 |
+
|
26 |
+
def dense_multi_face(self, image):
|
27 |
+
# assert(self.type == 'google','FaceDetector => Wrong type for dense detection!')
|
28 |
+
results = self.detector.process(image)
|
29 |
+
|
30 |
+
if results.multi_face_landmarks is None:
|
31 |
+
return None,None
|
32 |
+
|
33 |
+
lmks_list=[]
|
34 |
+
for i in results.multi_face_landmarks:
|
35 |
+
lmks = i.landmark
|
36 |
+
lmks = np.array(list(map(lambda l: np.array([l.x, l.y]), lmks)))
|
37 |
+
lmks[:, 0] = lmks[:, 0] * image.shape[1]
|
38 |
+
lmks[:, 1] = lmks[:, 1] * image.shape[0]
|
39 |
+
lmks_list.append(lmks)
|
40 |
+
|
41 |
+
# lmks_list: (num,478,2)
|
42 |
+
return np.array(lmks_list),results
|
43 |
+
|
44 |
+
|
45 |
+
def predict_batch(self,
|
46 |
+
img_folder,
|
47 |
+
savefolder,
|
48 |
+
visfolder,
|
49 |
+
saveVis=True):
|
50 |
+
Path(savefolder).mkdir(exist_ok=True,parents=True)
|
51 |
+
Path(visfolder).mkdir(exist_ok=True,parents=True)
|
52 |
+
|
53 |
+
self.imagepath_list = glob_exts_in_path(img_folder,img_ext=['png', 'jpg','jpeg'])
|
54 |
+
|
55 |
+
for imagepath in tqdm.tqdm(self.imagepath_list):
|
56 |
+
imagename=Path(imagepath).stem
|
57 |
+
out_npz_name=os.path.join(savefolder,imagename+'_dense.npz')
|
58 |
+
out_img_name=os.path.join(visfolder,imagename+'.jpg')
|
59 |
+
self.predict(
|
60 |
+
imagepath,
|
61 |
+
out_npz_name=out_npz_name,
|
62 |
+
out_img_name=out_img_name,
|
63 |
+
saveVis=saveVis
|
64 |
+
)
|
65 |
+
|
66 |
+
|
67 |
+
def predict(self,full_file_name,out_npz_name,out_img_name,saveVis):
|
68 |
+
pil_im = PIL.Image.open(full_file_name).convert('RGB')
|
69 |
+
image = np.array(pil_im)
|
70 |
+
|
71 |
+
lmks_list,results=self.dense_multi_face(image)
|
72 |
+
if lmks_list is not None:
|
73 |
+
np.savez(out_npz_name, lmks_list)
|
74 |
+
if saveVis:
|
75 |
+
image=self.draw(image,results)
|
76 |
+
cv2.imwrite(out_img_name, image)
|
77 |
+
else:
|
78 |
+
open(out_npz_name+'.empty', 'a').close()
|
79 |
+
|
80 |
+
|
81 |
+
def draw(self,image,results):
|
82 |
+
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
|
83 |
+
image.flags.writeable = True
|
84 |
+
for face_landmarks in results.multi_face_landmarks:
|
85 |
+
mp_drawing.draw_landmarks(
|
86 |
+
image=image,
|
87 |
+
landmark_list=face_landmarks,
|
88 |
+
connections=mp_face_mesh.FACEMESH_TESSELATION,
|
89 |
+
landmark_drawing_spec=None,
|
90 |
+
connection_drawing_spec=mp_drawing_styles
|
91 |
+
.get_default_face_mesh_tesselation_style())
|
92 |
+
mp_drawing.draw_landmarks(
|
93 |
+
image=image,
|
94 |
+
landmark_list=face_landmarks,
|
95 |
+
connections=mp_face_mesh.FACEMESH_CONTOURS,
|
96 |
+
landmark_drawing_spec=None,
|
97 |
+
connection_drawing_spec=mp_drawing_styles
|
98 |
+
.get_default_face_mesh_contours_style())
|
99 |
+
mp_drawing.draw_landmarks(
|
100 |
+
image=image,
|
101 |
+
landmark_list=face_landmarks,
|
102 |
+
connections=mp_face_mesh.FACEMESH_IRISES,
|
103 |
+
landmark_drawing_spec=None,
|
104 |
+
connection_drawing_spec=mp_drawing_styles
|
105 |
+
.get_default_face_mesh_iris_connections_style())
|
106 |
+
return image
|
SHOW/SHOW/detector/fan_detector.py
ADDED
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from pathlib import Path
|
2 |
+
from tqdm import tqdm
|
3 |
+
import os
|
4 |
+
import numpy as np
|
5 |
+
import SHOW
|
6 |
+
import cv2
|
7 |
+
import torch
|
8 |
+
import torchvision.transforms.functional as F
|
9 |
+
from PIL import Image
|
10 |
+
|
11 |
+
|
12 |
+
class FAN_Detector(object):
|
13 |
+
def __init__(self,device='cuda'):
|
14 |
+
if self.__dict__.get('face_detector',None) is None:
|
15 |
+
import face_alignment
|
16 |
+
self.face_detector = face_alignment.FaceAlignment(face_alignment.LandmarksType._2D, device=device)
|
17 |
+
|
18 |
+
def predict(self,
|
19 |
+
img_folder,fan_npy_folder,
|
20 |
+
save_vis,fan_vis_dir):
|
21 |
+
if save_vis:
|
22 |
+
os.makedirs(fan_vis_dir,exist_ok=True)
|
23 |
+
|
24 |
+
Path(fan_npy_folder).mkdir(exist_ok=True, parents=True)
|
25 |
+
imagepath_list = SHOW.glob_exts_in_path(
|
26 |
+
img_folder,
|
27 |
+
img_ext=['png', 'jpg', 'jpeg'])
|
28 |
+
|
29 |
+
for imagepath in tqdm(imagepath_list):
|
30 |
+
imagename = Path(imagepath).stem
|
31 |
+
lmk_path = os.path.join(fan_npy_folder, imagename + '.npy')
|
32 |
+
|
33 |
+
if not os.path.exists(lmk_path):
|
34 |
+
|
35 |
+
# cv2_image = cv2.imread(imagepath)
|
36 |
+
pil_image = Image.open(imagepath).convert("RGB")
|
37 |
+
# orig_width, orig_height = pil_image.size
|
38 |
+
image_np = np.array(pil_image)
|
39 |
+
|
40 |
+
lmks = self.face_detector.get_landmarks(image_np) # list
|
41 |
+
|
42 |
+
if lmks is not None:
|
43 |
+
lmks = np.array(lmks)
|
44 |
+
np.save(lmk_path, lmks)
|
45 |
+
else:
|
46 |
+
open(
|
47 |
+
os.path.join(
|
48 |
+
fan_npy_folder,
|
49 |
+
imagename + '.npy.empty'),
|
50 |
+
'a'
|
51 |
+
).close()
|
52 |
+
|
53 |
+
if save_vis:
|
54 |
+
image_torch = F.to_tensor(pil_image)
|
55 |
+
ret_img = SHOW.tensor_vis_landmarks(
|
56 |
+
image_torch,
|
57 |
+
torch.from_numpy(lmks[0])
|
58 |
+
)
|
59 |
+
m_img = SHOW.merge_views([[ret_img[0]]])
|
60 |
+
cv2.imwrite(f'{fan_vis_dir}/{imagename}.jpg', m_img)
|
61 |
+
|
62 |
+
|
SHOW/SHOW/detector/pifpaf_detector.py
ADDED
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import openpifpaf
|
2 |
+
import matplotlib.pyplot as plt
|
3 |
+
import PIL
|
4 |
+
import json
|
5 |
+
import cv2
|
6 |
+
|
7 |
+
class pifpaf_detector(object):
|
8 |
+
# chk_type:str = 'shufflenetv2k30-wholebody'
|
9 |
+
|
10 |
+
# def __post_init__(self):
|
11 |
+
def __init__(self):
|
12 |
+
chk_type:str = 'shufflenetv2k30-wholebody'
|
13 |
+
self.predictor = openpifpaf.Predictor(checkpoint=chk_type)
|
14 |
+
|
15 |
+
def process(self,full_file_name,out_file_name,out_img_name):
|
16 |
+
pil_im = PIL.Image.open(full_file_name).convert('RGB')
|
17 |
+
predictions, _, _ = self.predictor.pil_image(pil_im)
|
18 |
+
|
19 |
+
# sample_img=cv2.cvtColor(sample_img,cv2.COLOR_BGR2RGB)
|
20 |
+
# pil_im = PIL.Image.fromarray(sample_img)
|
21 |
+
# predictions, gt_anns, image_meta = self.pose_predictor.pil_image(pil_im)
|
22 |
+
# ret = [i.json_data() for i in predictions]
|
23 |
+
|
24 |
+
with open(out_file_name, "w") as fp:
|
25 |
+
r = [i.json_data() for i in predictions]
|
26 |
+
json.dump(r, fp)
|
27 |
+
|
28 |
+
self.save_vis(pil_im,predictions,out_img_name)
|
29 |
+
|
30 |
+
def save_vis(self,pil_im,predictions,out_img_name):
|
31 |
+
annotation_painter = openpifpaf.show.AnnotationPainter()
|
32 |
+
with openpifpaf.show.image_canvas(pil_im) as ax:
|
33 |
+
annotation_painter.annotations(ax, predictions)
|
34 |
+
ax.figure.savefig(out_img_name)
|
35 |
+
|
36 |
+
def predict( self, sample_img):
|
37 |
+
|
38 |
+
# self.pose_ret_list.append(ret)
|
39 |
+
annotation_painter = openpifpaf.show.AnnotationPainter()
|
40 |
+
with openpifpaf.show.image_canvas(pil_im) as ax:
|
41 |
+
annotation_painter.annotations(ax, predictions)
|
42 |
+
canvas = FigureCanvasAgg(ax.figure)
|
43 |
+
canvas.draw()
|
44 |
+
buf = canvas.buffer_rgba()
|
45 |
+
X = np.asarray(buf)
|
46 |
+
X=cv2.cvtColor(X,cv2.COLOR_RGBA2RGB)
|
47 |
+
self.pose_ret_img=cv2.resize(X,self.predict_size)
|
48 |
+
# PIL.Image.fromarray(X).show()
|
49 |
+
# fig.set_tight_layout(True)
|
50 |
+
# ax.figure.savefig('./test.jpg')
|
51 |
+
# import pdb;pdb.set_trace()
|
52 |
+
return ret,self.pose_ret_img
|
53 |
+
|
SHOW/SHOW/face_iders/__init__.py
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .builder import build_ider,build_ider2
|
2 |
+
from .arcface_ider import arcface_ider
|
3 |
+
from .base import insightface_ider
|
4 |
+
from .utils import match_faces
|
5 |
+
|
6 |
+
__all__ = ['build_ider','build_ider2',
|
7 |
+
'base','arcface_ider']
|
8 |
+
|
9 |
+
# from .builder import *
|
10 |
+
# from .base import *
|
11 |
+
# from .arcface_ider import *
|
SHOW/SHOW/face_iders/arcface_ider.py
ADDED
@@ -0,0 +1,122 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import sys
|
3 |
+
import torch
|
4 |
+
import mmcv
|
5 |
+
import cv2
|
6 |
+
import numpy as np
|
7 |
+
from pathlib import Path
|
8 |
+
from easydict import EasyDict
|
9 |
+
from .builder import IDER
|
10 |
+
from .base import ider_base
|
11 |
+
from loguru import logger
|
12 |
+
|
13 |
+
default_weight_path=os.path.join(os.path.dirname(__file__),
|
14 |
+
'../../../models/arcface/glink360k_cosface_r100_fp16_0.1.pth')
|
15 |
+
|
16 |
+
@IDER.register_module()
|
17 |
+
class arcface_ider(ider_base):
|
18 |
+
def __init__(self,
|
19 |
+
weight=default_weight_path,
|
20 |
+
name='r100', fp16=True,
|
21 |
+
det='fan', threshold=0.45, **kwargs
|
22 |
+
):
|
23 |
+
|
24 |
+
self.threshold = threshold
|
25 |
+
self.det = det
|
26 |
+
|
27 |
+
from modules.arcface_torch.backbones import get_model
|
28 |
+
self.net = get_model(name, fp16=fp16)
|
29 |
+
self.net.load_state_dict(torch.load(weight))
|
30 |
+
self.net.eval()
|
31 |
+
|
32 |
+
if self.det == 'fan':
|
33 |
+
import face_alignment
|
34 |
+
self.fan = face_alignment.FaceAlignment(
|
35 |
+
face_alignment.LandmarksType._2D,
|
36 |
+
flip_input=False)
|
37 |
+
if self.det == 'mtcnn':
|
38 |
+
from facenet_pytorch import MTCNN as mtcnn
|
39 |
+
self.mt = mtcnn(keep_all=True)
|
40 |
+
|
41 |
+
@torch.no_grad()
|
42 |
+
def get_bbox_mtcnn(self, img):
|
43 |
+
# image: 0-255, uint8, rgb, [h, w, 3]
|
44 |
+
out = self.mt.detect(img[None, ...])
|
45 |
+
# [747.456 94.97711 889.748 282.031 ]a[0]
|
46 |
+
if out[0].any():
|
47 |
+
return out[0].squeeze(0), 'bbox'
|
48 |
+
else:
|
49 |
+
logger.warning('img det return None bbox')
|
50 |
+
return (None, None)
|
51 |
+
|
52 |
+
@torch.no_grad()
|
53 |
+
def get_bbox_fan(self, img):
|
54 |
+
# image: 0-255, uint8, rgb, [h, w, 3]
|
55 |
+
h, w, _ = img.shape
|
56 |
+
lmk_list = self.fan.get_landmarks(img)
|
57 |
+
|
58 |
+
if lmk_list:
|
59 |
+
bbox_list = []
|
60 |
+
for lmk in lmk_list:
|
61 |
+
kpt = lmk.squeeze()
|
62 |
+
left = np.min(kpt[:, 0])
|
63 |
+
right = np.max(kpt[:, 0])
|
64 |
+
top = np.min(kpt[:, 1])
|
65 |
+
bottom = np.max(kpt[:, 1])
|
66 |
+
bbox = [left, top, right, bottom]
|
67 |
+
bbox_list.append(bbox)
|
68 |
+
# [[746.0, 140.0, 894.0, 283.0]]
|
69 |
+
return bbox_list, 'kpt68'
|
70 |
+
|
71 |
+
logger.warning('img det return None bbox')
|
72 |
+
return (None, None)
|
73 |
+
|
74 |
+
@torch.no_grad()
|
75 |
+
def get_face_info_from_img(self, img):
|
76 |
+
# img: rgb,hw3,uint8
|
77 |
+
ret_list = []
|
78 |
+
|
79 |
+
if self.det == 'fan':
|
80 |
+
bboxes, _ = self.get_bbox_fan(img)
|
81 |
+
if self.det == 'mtcnn':
|
82 |
+
bboxes, _ = self.get_bbox_mtcnn(img)
|
83 |
+
|
84 |
+
if bboxes is None or (
|
85 |
+
isinstance(bboxes,np.ndarray) and
|
86 |
+
not bboxes.any()
|
87 |
+
):
|
88 |
+
logger.warning(f'img det return None bbox')
|
89 |
+
return None
|
90 |
+
|
91 |
+
crop_im_bs = mmcv.image.imcrop(img, np.array(bboxes))
|
92 |
+
|
93 |
+
for crop_im, bbox in zip(crop_im_bs, bboxes):
|
94 |
+
_img = cv2.resize(crop_im, (112, 112))
|
95 |
+
_img = np.transpose(_img, (2, 0, 1))
|
96 |
+
_img = torch.from_numpy(_img).unsqueeze(0).float()
|
97 |
+
_img.div_(255).sub_(0.5).div_(0.5)
|
98 |
+
|
99 |
+
|
100 |
+
feat = self.net(_img).numpy()[0]
|
101 |
+
feat = feat/np.linalg.norm(feat)
|
102 |
+
|
103 |
+
ret_list.append(EasyDict({
|
104 |
+
'normed_embedding': feat,
|
105 |
+
'crop_im': crop_im,
|
106 |
+
'bbox': bbox}))
|
107 |
+
|
108 |
+
return ret_list
|
109 |
+
|
110 |
+
def get(self, img):
|
111 |
+
# img: bgr,hw3,uint8
|
112 |
+
if isinstance(img, str):
|
113 |
+
if Path(img).exists():
|
114 |
+
img = cv2.imread(img)
|
115 |
+
else:
|
116 |
+
logger.info(f'img not exists: {img}')
|
117 |
+
return None
|
118 |
+
|
119 |
+
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
120 |
+
face_info = self.get_face_info_from_img(img)
|
121 |
+
|
122 |
+
return face_info
|
SHOW/SHOW/face_iders/base.py
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import sys
|
3 |
+
import mmcv
|
4 |
+
import cv2
|
5 |
+
import torch
|
6 |
+
import numpy as np
|
7 |
+
from pathlib import Path
|
8 |
+
from typing import Optional, Union, NewType
|
9 |
+
from .builder import IDER
|
10 |
+
|
11 |
+
|
12 |
+
class ider_base(object):
|
13 |
+
def get_all_emb(self, im: np.ndarray = None) -> np.ndarray:
|
14 |
+
# im:bgr
|
15 |
+
faces = self.app.get(im)
|
16 |
+
return faces
|
17 |
+
|
18 |
+
def get_face_emb(self, im: np.ndarray = None) -> np.ndarray:
|
19 |
+
# im:bgr
|
20 |
+
faces = self.app.get(im)
|
21 |
+
emb = faces[0].normed_embedding
|
22 |
+
return emb
|
23 |
+
|
24 |
+
def cal_emb_sim(self, emb1: np.ndarray = None, emb2: np.ndarray = None) -> np.ndarray:
|
25 |
+
return np.dot(emb1, emb2.T)
|
26 |
+
|
27 |
+
def get(self, img):
|
28 |
+
return self.app.get(img)
|
29 |
+
|
30 |
+
|
31 |
+
@IDER.register_module()
|
32 |
+
class insightface_ider(ider_base):
|
33 |
+
def __init__(self, threshold=0.6, **kwargs):
|
34 |
+
self.threshold = threshold
|
35 |
+
|
36 |
+
from insightface.app import FaceAnalysis
|
37 |
+
self.app = FaceAnalysis(providers=['CUDAExecutionProvider'])
|
38 |
+
self.app.prepare(ctx_id=-1, det_size=(640, 640))
|
SHOW/SHOW/face_iders/builder.py
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from mmcv.utils import Registry
|
2 |
+
import mmcv
|
3 |
+
|
4 |
+
IDER = Registry('ider')
|
5 |
+
|
6 |
+
def build_ider(config):
|
7 |
+
return mmcv.build_from_cfg(config,IDER)
|
8 |
+
|
9 |
+
def build_ider2(config):
|
10 |
+
return IDER.build(config)
|
SHOW/SHOW/face_iders/utils.py
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
from loguru import logger
|
3 |
+
|
4 |
+
|
5 |
+
def match_faces(img, face_ider, person_face_emb):
|
6 |
+
# img: bgr,hw3,uint8
|
7 |
+
faces = face_ider.get(img)
|
8 |
+
if faces is None:
|
9 |
+
return None, None
|
10 |
+
# face_ider: 1.func:get(np_img) --> {2.normed_embedding,3.bbox}
|
11 |
+
for face in faces:
|
12 |
+
cur_emb = face.normed_embedding
|
13 |
+
sim = face_ider.cal_emb_sim(cur_emb, person_face_emb)
|
14 |
+
if sim >= face_ider.threshold:
|
15 |
+
logger.info(f'found sim:{sim}')
|
16 |
+
correspond_bbox = face.bbox
|
17 |
+
xmin, ymin, xmax, ymax = correspond_bbox
|
18 |
+
correspond_center = [
|
19 |
+
int((xmin+xmax)/2),
|
20 |
+
int((ymin+ymax)/2)]
|
21 |
+
return correspond_center, correspond_bbox
|
22 |
+
logger.info(f'not found: {sim}')
|
23 |
+
return None, None
|
SHOW/SHOW/flame/FLAME.py
ADDED
@@ -0,0 +1,283 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding: utf-8 -*-
|
2 |
+
#
|
3 |
+
# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
|
4 |
+
# holder of all proprietary rights on this computer program.
|
5 |
+
# Using this computer program means that you agree to the terms
|
6 |
+
# in the LICENSE file included with this software distribution.
|
7 |
+
# Any use not explicitly granted by the LICENSE is prohibited.
|
8 |
+
#
|
9 |
+
# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
|
10 |
+
# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
|
11 |
+
# for Intelligent Systems. All rights reserved.
|
12 |
+
#
|
13 |
+
# For comments or questions, please email us at [email protected]
|
14 |
+
# For commercial licensing contact, please contact [email protected]
|
15 |
+
|
16 |
+
import pickle
|
17 |
+
|
18 |
+
import numpy as np
|
19 |
+
# Modified from smplx code for FLAME
|
20 |
+
import torch
|
21 |
+
import torch.nn as nn
|
22 |
+
import torch.nn.functional as F
|
23 |
+
from pytorch3d.transforms import rotation_6d_to_matrix, matrix_to_rotation_6d
|
24 |
+
|
25 |
+
from .lbs import lbs
|
26 |
+
|
27 |
+
|
28 |
+
|
29 |
+
def to_tensor(array, dtype=torch.float32):
|
30 |
+
if 'torch.tensor' not in str(type(array)):
|
31 |
+
return torch.tensor(array, dtype=dtype)
|
32 |
+
|
33 |
+
|
34 |
+
def to_np(array, dtype=np.float32):
|
35 |
+
if 'scipy.sparse' in str(type(array)):
|
36 |
+
array = array.todense()
|
37 |
+
return np.array(array, dtype=dtype)
|
38 |
+
|
39 |
+
|
40 |
+
class Struct(object):
|
41 |
+
def __init__(self, **kwargs):
|
42 |
+
for key, val in kwargs.items():
|
43 |
+
setattr(self, key, val)
|
44 |
+
|
45 |
+
|
46 |
+
def rot_mat_to_euler(rot_mats):
|
47 |
+
# Calculates rotation matrix to euler angles
|
48 |
+
# Careful for extreme cases of eular angles like [0.0, pi, 0.0]
|
49 |
+
|
50 |
+
sy = torch.sqrt(rot_mats[:, 0, 0] * rot_mats[:, 0, 0] +
|
51 |
+
rot_mats[:, 1, 0] * rot_mats[:, 1, 0])
|
52 |
+
return torch.atan2(-rot_mats[:, 2, 0], sy)
|
53 |
+
|
54 |
+
|
55 |
+
class FLAME(nn.Module):
|
56 |
+
"""
|
57 |
+
borrowed from https://github.com/soubhiksanyal/FLAME_PyTorch/blob/master/FLAME.py
|
58 |
+
Given FLAME parameters for shape, pose, and expression, this class generates a differentiable FLAME function
|
59 |
+
which outputs the a mesh and 2D/3D facial landmarks
|
60 |
+
"""
|
61 |
+
|
62 |
+
def __init__(self, config):
|
63 |
+
super(FLAME, self).__init__()
|
64 |
+
print("Creating the FLAME Decoder")
|
65 |
+
with open(config.flame_geom_path, 'rb') as f:
|
66 |
+
ss = pickle.load(f, encoding='latin1')
|
67 |
+
flame_model = Struct(**ss)
|
68 |
+
|
69 |
+
self.dtype = torch.float32
|
70 |
+
self.register_buffer('faces', to_tensor(to_np(flame_model.f, dtype=np.int64), dtype=torch.long))
|
71 |
+
# The vertices of the template model
|
72 |
+
self.register_buffer('v_template', to_tensor(to_np(flame_model.v_template), dtype=self.dtype))
|
73 |
+
# The shape components and expression
|
74 |
+
shapedirs = to_tensor(to_np(flame_model.shapedirs), dtype=self.dtype)
|
75 |
+
shapedirs = torch.cat([shapedirs[:, :, :config.num_shape_params], shapedirs[:, :, 300:300 + config.num_exp_params]], 2)
|
76 |
+
self.register_buffer('shapedirs', shapedirs)
|
77 |
+
# The pose components
|
78 |
+
num_pose_basis = flame_model.posedirs.shape[-1]
|
79 |
+
posedirs = np.reshape(flame_model.posedirs, [-1, num_pose_basis]).T
|
80 |
+
self.register_buffer('posedirs', to_tensor(to_np(posedirs), dtype=self.dtype))
|
81 |
+
#
|
82 |
+
self.register_buffer('J_regressor', to_tensor(to_np(flame_model.J_regressor), dtype=self.dtype))
|
83 |
+
parents = to_tensor(to_np(flame_model.kintree_table[0])).long();
|
84 |
+
parents[0] = -1
|
85 |
+
self.register_buffer('parents', parents)
|
86 |
+
self.register_buffer('lbs_weights', to_tensor(to_np(flame_model.weights), dtype=self.dtype))
|
87 |
+
|
88 |
+
# Register default parameters
|
89 |
+
self._register_default_params('neck_pose_params', 6)
|
90 |
+
self._register_default_params('jaw_pose_params', 6)
|
91 |
+
self._register_default_params('eye_pose_params', 12)
|
92 |
+
self._register_default_params('shape_params', config.num_shape_params)
|
93 |
+
self._register_default_params('expression_params', config.num_exp_params)
|
94 |
+
|
95 |
+
# Static and Dynamic Landmark embeddings for FLAME
|
96 |
+
# static_lmk_embedding = np.load(config.flame_static_lmk_path,allow_pickle=True)
|
97 |
+
static_lmk_embedding = np.load(config.flame_static_lmk_path)
|
98 |
+
dynamic_lmk_embedding = np.load(config.flame_dynamic_lmk_path, allow_pickle=True, encoding='latin1').item()
|
99 |
+
self.num_lmk_angles = len(dynamic_lmk_embedding['lmk_face_idx'])
|
100 |
+
|
101 |
+
self.register_buffer('lmk_faces_idx', torch.from_numpy(static_lmk_embedding['lmk_face_idx'][17:].astype(int)).to(torch.int64))
|
102 |
+
self.register_buffer('lmk_bary_coords', torch.from_numpy(static_lmk_embedding['lmk_b_coords'][17:, :]).to(self.dtype))
|
103 |
+
self.register_buffer('dynamic_lmk_faces_idx', torch.from_numpy(np.array(dynamic_lmk_embedding['lmk_face_idx']).astype(int)).to(torch.int64))
|
104 |
+
self.register_buffer('dynamic_lmk_bary_coords', torch.from_numpy(np.array(dynamic_lmk_embedding['lmk_b_coords'])).to(self.dtype))
|
105 |
+
|
106 |
+
neck_kin_chain = [];
|
107 |
+
NECK_IDX = 1
|
108 |
+
curr_idx = torch.tensor(NECK_IDX, dtype=torch.long)
|
109 |
+
while curr_idx != -1:
|
110 |
+
neck_kin_chain.append(curr_idx)
|
111 |
+
curr_idx = self.parents[curr_idx]
|
112 |
+
self.register_buffer('neck_kin_chain', torch.stack(neck_kin_chain))
|
113 |
+
|
114 |
+
def _find_dynamic_lmk_idx_and_bcoords(self, pose, dynamic_lmk_faces_idx,
|
115 |
+
dynamic_lmk_b_coords,
|
116 |
+
neck_kin_chain, dtype=torch.float32):
|
117 |
+
"""
|
118 |
+
Selects the face contour depending on the reletive position of the head
|
119 |
+
Input:
|
120 |
+
vertices: N X num_of_vertices X 3
|
121 |
+
pose: N X full pose
|
122 |
+
dynamic_lmk_faces_idx: The list of contour face indexes
|
123 |
+
dynamic_lmk_b_coords: The list of contour barycentric weights
|
124 |
+
neck_kin_chain: The tree to consider for the relative rotation
|
125 |
+
dtype: Data type
|
126 |
+
return:
|
127 |
+
The contour face indexes and the corresponding barycentric weights
|
128 |
+
"""
|
129 |
+
|
130 |
+
batch_size = pose.shape[0]
|
131 |
+
|
132 |
+
aa_pose = torch.index_select(pose.view(batch_size, -1, 6), 1,
|
133 |
+
neck_kin_chain)
|
134 |
+
|
135 |
+
rot_mats = rotation_6d_to_matrix(aa_pose.view(-1, 6)).view([batch_size, -1, 3, 3])
|
136 |
+
rel_rot_mat = torch.eye(3, device=pose.device, dtype=dtype).unsqueeze_(dim=0).expand(batch_size, -1, -1)
|
137 |
+
|
138 |
+
for idx in range(len(neck_kin_chain)):
|
139 |
+
rel_rot_mat = torch.bmm(rot_mats[:, idx], rel_rot_mat)
|
140 |
+
|
141 |
+
y_rot_angle = torch.round(
|
142 |
+
torch.clamp(rot_mat_to_euler(rel_rot_mat) * 180.0 / np.pi,
|
143 |
+
max=(self.num_lmk_angles - 1) // 2)).to(dtype=torch.long)
|
144 |
+
|
145 |
+
neg_mask = y_rot_angle.lt(0).to(dtype=torch.long)
|
146 |
+
mask = y_rot_angle.lt(-(self.num_lmk_angles - 1) // 2).to(dtype=torch.long)
|
147 |
+
neg_vals = mask * (self.num_lmk_angles - 1) + (1 - mask) * ((self.num_lmk_angles - 1) // 2 - y_rot_angle)
|
148 |
+
y_rot_angle = (neg_mask * neg_vals + (1 - neg_mask) * y_rot_angle)
|
149 |
+
|
150 |
+
dyn_lmk_faces_idx = torch.index_select(dynamic_lmk_faces_idx,
|
151 |
+
0, y_rot_angle)
|
152 |
+
dyn_lmk_b_coords = torch.index_select(dynamic_lmk_b_coords,
|
153 |
+
0, y_rot_angle)
|
154 |
+
return dyn_lmk_faces_idx, dyn_lmk_b_coords
|
155 |
+
|
156 |
+
def _vertices2landmarks(self, vertices, faces, lmk_faces_idx, lmk_bary_coords):
|
157 |
+
"""
|
158 |
+
Calculates landmarks by barycentric interpolation
|
159 |
+
Input:
|
160 |
+
vertices: torch.tensor NxVx3, dtype = torch.float32
|
161 |
+
The tensor of input vertices
|
162 |
+
faces: torch.tensor (N*F)x3, dtype = torch.long
|
163 |
+
The faces of the mesh
|
164 |
+
lmk_faces_idx: torch.tensor N X L, dtype = torch.long
|
165 |
+
The tensor with the indices of the faces used to calculate the
|
166 |
+
landmarks.
|
167 |
+
lmk_bary_coords: torch.tensor N X L X 3, dtype = torch.float32
|
168 |
+
The tensor of barycentric coordinates that are used to interpolate
|
169 |
+
the landmarks
|
170 |
+
|
171 |
+
Returns:
|
172 |
+
landmarks: torch.tensor NxLx3, dtype = torch.float32
|
173 |
+
The coordinates of the landmarks for each mesh in the batch
|
174 |
+
"""
|
175 |
+
# Extract the indices of the vertices for each face
|
176 |
+
# NxLx3
|
177 |
+
batch_size, num_verts = vertices.shape[:2]
|
178 |
+
lmk_faces = torch.index_select(faces, 0, lmk_faces_idx.view(-1)).view(
|
179 |
+
1, -1, 3).view(batch_size, lmk_faces_idx.shape[1], -1)
|
180 |
+
|
181 |
+
lmk_faces += torch.arange(batch_size, dtype=torch.long).view(-1, 1, 1).to(
|
182 |
+
device=vertices.device) * num_verts
|
183 |
+
|
184 |
+
lmk_vertices = vertices.view(-1, 3)[lmk_faces]
|
185 |
+
landmarks = torch.einsum('blfi,blf->bli', [lmk_vertices, lmk_bary_coords])
|
186 |
+
return landmarks
|
187 |
+
|
188 |
+
def forward(self, shape_params, trans_params=None, rot_params=None, neck_pose_params=None, jaw_pose_params=None, eye_pose_params=None, expression_params=None):
|
189 |
+
|
190 |
+
"""
|
191 |
+
Input:
|
192 |
+
trans_params: N X 3 global translation
|
193 |
+
rot_params: N X 3 global rotation around the root joint of the kinematic tree (rotation is NOT around the origin!)
|
194 |
+
neck_pose_params (optional): N X 3 rotation of the head vertices around the neck joint
|
195 |
+
jaw_pose_params (optional): N X 3 rotation of the jaw
|
196 |
+
eye_pose_params (optional): N X 6 rotations of left (parameters [0:3]) and right eyeball (parameters [3:6])
|
197 |
+
shape_params (optional): N X number of shape parameters
|
198 |
+
expression_params (optional): N X number of expression parameters
|
199 |
+
return:d
|
200 |
+
vertices: N X V X 3
|
201 |
+
landmarks: N X number of landmarks X 3
|
202 |
+
"""
|
203 |
+
batch_size = shape_params.shape[0]
|
204 |
+
I = matrix_to_rotation_6d(torch.eye(3)[None].cuda())
|
205 |
+
|
206 |
+
if trans_params is None:
|
207 |
+
trans_params = torch.zeros(batch_size, 3).cuda()
|
208 |
+
if rot_params is None:
|
209 |
+
rot_params = I.clone()
|
210 |
+
if neck_pose_params is None:
|
211 |
+
neck_pose_params = I.clone()
|
212 |
+
if jaw_pose_params is None:
|
213 |
+
jaw_pose_params = self.jaw_pose_params.expand(batch_size, -1)
|
214 |
+
if eye_pose_params is None:
|
215 |
+
eye_pose_params = self.eye_pose_params.expand(batch_size, -1)
|
216 |
+
if shape_params is None:
|
217 |
+
shape_params = self.shape_params.expand(batch_size, -1)
|
218 |
+
if expression_params is None:
|
219 |
+
expression_params = self.expression_params.expand(batch_size, -1)
|
220 |
+
|
221 |
+
# Concatenate identity shape and expression parameters
|
222 |
+
betas = torch.cat([shape_params, expression_params], dim=1)
|
223 |
+
|
224 |
+
# The pose vector contains global rotation, and neck, jaw, and eyeball rotations
|
225 |
+
full_pose = torch.cat([rot_params, neck_pose_params, jaw_pose_params, eye_pose_params], dim=1)
|
226 |
+
|
227 |
+
# FLAME models shape and expression deformations as vertex offset from the mean face in 'zero pose', called v_template
|
228 |
+
template_vertices = self.v_template.unsqueeze(0).expand(batch_size, -1, -1)
|
229 |
+
|
230 |
+
# Use linear blendskinning to model pose roations
|
231 |
+
vertices, _ = lbs(betas, full_pose, template_vertices,
|
232 |
+
self.shapedirs, self.posedirs,
|
233 |
+
self.J_regressor, self.parents,
|
234 |
+
self.lbs_weights, dtype=self.dtype)
|
235 |
+
|
236 |
+
# Add translation to the vertices
|
237 |
+
vertices = vertices + trans_params[:, None, :]
|
238 |
+
|
239 |
+
lmk_faces_idx = self.lmk_faces_idx.unsqueeze(dim=0).expand(batch_size, -1)
|
240 |
+
lmk_bary_coords = self.lmk_bary_coords.unsqueeze(dim=0).expand(batch_size, -1, -1)
|
241 |
+
|
242 |
+
dyn_lmk_faces_idx, dyn_lmk_bary_coords = self._find_dynamic_lmk_idx_and_bcoords(
|
243 |
+
full_pose, self.dynamic_lmk_faces_idx,
|
244 |
+
self.dynamic_lmk_bary_coords,
|
245 |
+
self.neck_kin_chain, dtype=self.dtype)
|
246 |
+
|
247 |
+
lmk_faces_idx = torch.cat([dyn_lmk_faces_idx, lmk_faces_idx], 1)
|
248 |
+
lmk_bary_coords = torch.cat([dyn_lmk_bary_coords, lmk_bary_coords], 1)
|
249 |
+
landmarks2d = self._vertices2landmarks(vertices, self.faces, lmk_faces_idx, lmk_bary_coords)
|
250 |
+
|
251 |
+
return vertices, landmarks2d
|
252 |
+
|
253 |
+
def _register_default_params(self, param_fname, dim):
|
254 |
+
default_params = torch.zeros([1, dim], dtype=self.dtype, requires_grad=False)
|
255 |
+
self.register_parameter(param_fname, nn.Parameter(default_params, requires_grad=False))
|
256 |
+
|
257 |
+
|
258 |
+
class FLAMETex(nn.Module):
|
259 |
+
"""
|
260 |
+
current FLAME texture are adapted from BFM Texture Model
|
261 |
+
"""
|
262 |
+
|
263 |
+
def __init__(self, config):
|
264 |
+
super(FLAMETex, self).__init__()
|
265 |
+
tex_space = np.load(config.tex_space_path)
|
266 |
+
mu_key = 'MU'
|
267 |
+
pc_key = 'PC'
|
268 |
+
n_pc = 199
|
269 |
+
texture_mean = tex_space[mu_key].reshape(1, -1)
|
270 |
+
texture_basis = tex_space[pc_key].reshape(-1, n_pc)
|
271 |
+
n_tex = config.tex_params
|
272 |
+
texture_mean = torch.from_numpy(texture_mean).float()[None, ...] * 255.0
|
273 |
+
texture_basis = torch.from_numpy(texture_basis[:, :n_tex]).float()[None, ...] * 255.0
|
274 |
+
self.register_buffer('texture_mean', texture_mean)
|
275 |
+
self.register_buffer('texture_basis', texture_basis)
|
276 |
+
self.image_size = config.image_size
|
277 |
+
|
278 |
+
def forward(self, texcode):
|
279 |
+
texture = self.texture_mean + (self.texture_basis * texcode[:, None, :]).sum(-1)
|
280 |
+
texture = texture.reshape(texcode.shape[0], 512, 512, 3).permute(0, 3, 1, 2)
|
281 |
+
texture = F.interpolate(texture, self.image_size, mode='bilinear')
|
282 |
+
texture = texture[:, [2, 1, 0], :, :]
|
283 |
+
return texture
|
SHOW/SHOW/flame/__init__.py
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
from .FLAME import FLAME
|
2 |
+
from .lbs import lbs
|
SHOW/SHOW/flame/lbs.py
ADDED
@@ -0,0 +1,406 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding: utf-8 -*-
|
2 |
+
|
3 |
+
# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
|
4 |
+
# holder of all proprietary rights on this computer program.
|
5 |
+
# You can only use this computer program if you have closed
|
6 |
+
# a license agreement with MPG or you get the right to use the computer
|
7 |
+
# program from someone who is authorized to grant you that right.
|
8 |
+
# Any use of the computer program without a valid license is prohibited and
|
9 |
+
# liable to prosecution.
|
10 |
+
#
|
11 |
+
# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
|
12 |
+
# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
|
13 |
+
# for Intelligent Systems. All rights reserved.
|
14 |
+
#
|
15 |
+
# Contact: [email protected]
|
16 |
+
|
17 |
+
from __future__ import absolute_import
|
18 |
+
from __future__ import division
|
19 |
+
from __future__ import print_function
|
20 |
+
|
21 |
+
import numpy as np
|
22 |
+
import torch
|
23 |
+
import torch.nn.functional as F
|
24 |
+
|
25 |
+
|
26 |
+
# in batch*5
|
27 |
+
# out batch*6
|
28 |
+
from pytorch3d.transforms import rotation_6d_to_matrix
|
29 |
+
|
30 |
+
|
31 |
+
def stereographic_unproject_old(a):
|
32 |
+
s2 = torch.pow(a, 2).sum(1) # batch
|
33 |
+
unproj = 2 * a / (s2 + 1).view(-1, 1).repeat(1, 5) # batch*5
|
34 |
+
w = (s2 - 1) / (s2 + 1) # batch
|
35 |
+
out = torch.cat((unproj, w.view(-1, 1)), 1) # batch*6
|
36 |
+
|
37 |
+
return out
|
38 |
+
|
39 |
+
|
40 |
+
# in a batch*5, axis int
|
41 |
+
def stereographic_unproject(a, axis=None):
|
42 |
+
"""
|
43 |
+
Inverse of stereographic projection: increases dimension by one.
|
44 |
+
"""
|
45 |
+
batch = a.shape[0]
|
46 |
+
if axis is None:
|
47 |
+
axis = a.shape[1]
|
48 |
+
s2 = torch.pow(a, 2).sum(1) # batch
|
49 |
+
ans = torch.autograd.Variable(torch.zeros(batch, a.shape[1] + 1).cuda()) # batch*6
|
50 |
+
unproj = 2 * a / (s2 + 1).view(batch, 1).repeat(1, a.shape[1]) # batch*5
|
51 |
+
if (axis > 0):
|
52 |
+
ans[:, :axis] = unproj[:, :axis] # batch*(axis-0)
|
53 |
+
ans[:, axis] = (s2 - 1) / (s2 + 1) # batch
|
54 |
+
ans[:, axis + 1:] = unproj[:, axis:] # batch*(5-axis) # Note that this is a no-op if the default option (last axis) is used
|
55 |
+
return ans
|
56 |
+
|
57 |
+
|
58 |
+
def rot_mat_to_euler(rot_mats):
|
59 |
+
# Calculates rotation matrix to euler angles
|
60 |
+
# Careful for extreme cases of eular angles like [0.0, pi, 0.0]
|
61 |
+
|
62 |
+
sy = torch.sqrt(rot_mats[:, 0, 0] * rot_mats[:, 0, 0] +
|
63 |
+
rot_mats[:, 1, 0] * rot_mats[:, 1, 0])
|
64 |
+
return torch.atan2(-rot_mats[:, 2, 0], sy)
|
65 |
+
|
66 |
+
|
67 |
+
def find_dynamic_lmk_idx_and_bcoords(vertices, pose, dynamic_lmk_faces_idx,
|
68 |
+
dynamic_lmk_b_coords,
|
69 |
+
neck_kin_chain, dtype=torch.float32):
|
70 |
+
''' Compute the faces, barycentric coordinates for the dynamic landmarks
|
71 |
+
|
72 |
+
|
73 |
+
To do so, we first compute the rotation of the neck around the y-axis
|
74 |
+
and then use a pre-computed look-up table to find the faces and the
|
75 |
+
barycentric coordinates that will be used.
|
76 |
+
|
77 |
+
Special thanks to Soubhik Sanyal ([email protected])
|
78 |
+
for providing the original TensorFlow implementation and for the LUT.
|
79 |
+
|
80 |
+
Parameters
|
81 |
+
----------
|
82 |
+
vertices: torch.tensor BxVx3, dtype = torch.float32
|
83 |
+
The tensor of input vertices
|
84 |
+
pose: torch.tensor Bx(Jx3), dtype = torch.float32
|
85 |
+
The current pose of the body model
|
86 |
+
dynamic_lmk_faces_idx: torch.tensor L, dtype = torch.long
|
87 |
+
The look-up table from neck rotation to faces
|
88 |
+
dynamic_lmk_b_coords: torch.tensor Lx3, dtype = torch.float32
|
89 |
+
The look-up table from neck rotation to barycentric coordinates
|
90 |
+
neck_kin_chain: list
|
91 |
+
A python list that contains the indices of the joints that form the
|
92 |
+
kinematic chain of the neck.
|
93 |
+
dtype: torch.dtype, optional
|
94 |
+
|
95 |
+
Returns
|
96 |
+
-------
|
97 |
+
dyn_lmk_faces_idx: torch.tensor, dtype = torch.long
|
98 |
+
A tensor of size BxL that contains the indices of the faces that
|
99 |
+
will be used to compute the current dynamic landmarks.
|
100 |
+
dyn_lmk_b_coords: torch.tensor, dtype = torch.float32
|
101 |
+
A tensor of size BxL that contains the indices of the faces that
|
102 |
+
will be used to compute the current dynamic landmarks.
|
103 |
+
'''
|
104 |
+
|
105 |
+
batch_size = vertices.shape[0]
|
106 |
+
|
107 |
+
aa_pose = torch.index_select(pose.view(batch_size, -1, 6), 1, neck_kin_chain)
|
108 |
+
rot_mats = rotation_6d_to_matrix(aa_pose.view(-1, 6)).view(batch_size, -1, 3, 3)
|
109 |
+
|
110 |
+
rel_rot_mat = torch.eye(3, device=vertices.device,
|
111 |
+
dtype=dtype).unsqueeze_(dim=0)
|
112 |
+
for idx in range(len(neck_kin_chain)):
|
113 |
+
rel_rot_mat = torch.bmm(rot_mats[:, idx], rel_rot_mat)
|
114 |
+
|
115 |
+
y_rot_angle = torch.round(
|
116 |
+
torch.clamp(-rot_mat_to_euler(rel_rot_mat) * 180.0 / np.pi,
|
117 |
+
max=39)).to(dtype=torch.long)
|
118 |
+
neg_mask = y_rot_angle.lt(0).to(dtype=torch.long)
|
119 |
+
mask = y_rot_angle.lt(-39).to(dtype=torch.long)
|
120 |
+
neg_vals = mask * 78 + (1 - mask) * (39 - y_rot_angle)
|
121 |
+
y_rot_angle = (neg_mask * neg_vals +
|
122 |
+
(1 - neg_mask) * y_rot_angle)
|
123 |
+
|
124 |
+
dyn_lmk_faces_idx = torch.index_select(dynamic_lmk_faces_idx,
|
125 |
+
0, y_rot_angle)
|
126 |
+
dyn_lmk_b_coords = torch.index_select(dynamic_lmk_b_coords,
|
127 |
+
0, y_rot_angle)
|
128 |
+
|
129 |
+
return dyn_lmk_faces_idx, dyn_lmk_b_coords
|
130 |
+
|
131 |
+
|
132 |
+
def vertices2landmarks(vertices, faces, lmk_faces_idx, lmk_bary_coords):
|
133 |
+
''' Calculates landmarks by barycentric interpolation
|
134 |
+
|
135 |
+
Parameters
|
136 |
+
----------
|
137 |
+
vertices: torch.tensor BxVx3, dtype = torch.float32
|
138 |
+
The tensor of input vertices
|
139 |
+
faces: torch.tensor Fx3, dtype = torch.long
|
140 |
+
The faces of the mesh
|
141 |
+
lmk_faces_idx: torch.tensor L, dtype = torch.long
|
142 |
+
The tensor with the indices of the faces used to calculate the
|
143 |
+
landmarks.
|
144 |
+
lmk_bary_coords: torch.tensor Lx3, dtype = torch.float32
|
145 |
+
The tensor of barycentric coordinates that are used to interpolate
|
146 |
+
the landmarks
|
147 |
+
|
148 |
+
Returns
|
149 |
+
-------
|
150 |
+
landmarks: torch.tensor BxLx3, dtype = torch.float32
|
151 |
+
The coordinates of the landmarks for each mesh in the batch
|
152 |
+
'''
|
153 |
+
# Extract the indices of the vertices for each face
|
154 |
+
# BxLx3
|
155 |
+
batch_size, num_verts = vertices.shape[:2]
|
156 |
+
device = vertices.device
|
157 |
+
|
158 |
+
lmk_faces = torch.index_select(faces, 0, lmk_faces_idx.view(-1)).view(
|
159 |
+
batch_size, -1, 3)
|
160 |
+
|
161 |
+
lmk_faces += torch.arange(
|
162 |
+
batch_size, dtype=torch.long, device=device).view(-1, 1, 1) * num_verts
|
163 |
+
|
164 |
+
lmk_vertices = vertices.view(-1, 3)[lmk_faces].view(
|
165 |
+
batch_size, -1, 3, 3)
|
166 |
+
|
167 |
+
landmarks = torch.einsum('blfi,blf->bli', [lmk_vertices, lmk_bary_coords])
|
168 |
+
return landmarks
|
169 |
+
|
170 |
+
|
171 |
+
def lbs(betas, pose, v_template, shapedirs, posedirs, J_regressor, parents,
|
172 |
+
lbs_weights, pose2rot=True, dtype=torch.float32):
|
173 |
+
''' Performs Linear Blend Skinning with the given shape and pose parameters
|
174 |
+
|
175 |
+
Parameters
|
176 |
+
----------
|
177 |
+
betas : torch.tensor BxNB
|
178 |
+
The tensor of shape parameters
|
179 |
+
pose : torch.tensor Bx(J + 1) * 3
|
180 |
+
The pose parameters in axis-angle format
|
181 |
+
v_template torch.tensor BxVx3
|
182 |
+
The template mesh that will be deformed
|
183 |
+
shapedirs : torch.tensor 1xNB
|
184 |
+
The tensor of PCA shape displacements
|
185 |
+
posedirs : torch.tensor Px(V * 3)
|
186 |
+
The pose PCA coefficients
|
187 |
+
J_regressor : torch.tensor JxV
|
188 |
+
The regressor array that is used to calculate the joints from
|
189 |
+
the position of the vertices
|
190 |
+
parents: torch.tensor J
|
191 |
+
The array that describes the kinematic tree for the model
|
192 |
+
lbs_weights: torch.tensor N x V x (J + 1)
|
193 |
+
The linear blend skinning weights that represent how much the
|
194 |
+
rotation matrix of each part affects each vertex
|
195 |
+
pose2rot: bool, optional
|
196 |
+
Flag on whether to convert the input pose tensor to rotation
|
197 |
+
matrices. The default value is True. If False, then the pose tensor
|
198 |
+
should already contain rotation matrices and have a size of
|
199 |
+
Bx(J + 1)x9
|
200 |
+
dtype: torch.dtype, optional
|
201 |
+
|
202 |
+
Returns
|
203 |
+
-------
|
204 |
+
verts: torch.tensor BxVx3
|
205 |
+
The vertices of the mesh after applying the shape and pose
|
206 |
+
displacements.
|
207 |
+
joints: torch.tensor BxJx3
|
208 |
+
The joints of the model
|
209 |
+
'''
|
210 |
+
|
211 |
+
batch_size = max(betas.shape[0], pose.shape[0])
|
212 |
+
device = betas.device
|
213 |
+
|
214 |
+
# Add shape contribution
|
215 |
+
v_shaped = v_template + blend_shapes(betas, shapedirs)
|
216 |
+
|
217 |
+
# Get the joints
|
218 |
+
# NxJx3 array
|
219 |
+
J = vertices2joints(J_regressor, v_shaped)
|
220 |
+
|
221 |
+
# 3. Add pose blend shapes
|
222 |
+
# N x J x 3 x 3
|
223 |
+
ident = torch.eye(3, dtype=dtype, device=device)
|
224 |
+
if pose2rot:
|
225 |
+
# rot_mats = batch_rodrigues(pose.view(-1, 3), dtype=dtype).view([batch_size, -1, 3, 3])
|
226 |
+
rot_mats = rotation_6d_to_matrix(pose.view(-1, 6)).view([batch_size, -1, 3, 3])
|
227 |
+
|
228 |
+
pose_feature = (rot_mats[:, 1:, :, :] - ident).view([batch_size, -1])
|
229 |
+
# (N x P) x (P, V * 3) -> N x V x 3
|
230 |
+
pose_offsets = torch.matmul(pose_feature, posedirs) \
|
231 |
+
.view(batch_size, -1, 3)
|
232 |
+
else:
|
233 |
+
pose_feature = pose[:, 1:].view(batch_size, -1, 3, 3) - ident
|
234 |
+
rot_mats = pose.view(batch_size, -1, 3, 3)
|
235 |
+
|
236 |
+
pose_offsets = torch.matmul(pose_feature.view(batch_size, -1),
|
237 |
+
posedirs).view(batch_size, -1, 3)
|
238 |
+
|
239 |
+
v_posed = pose_offsets + v_shaped
|
240 |
+
# 4. Get the global joint location
|
241 |
+
J_transformed, A = batch_rigid_transform(rot_mats, J, parents, dtype=dtype)
|
242 |
+
|
243 |
+
# 5. Do skinning:
|
244 |
+
# W is N x V x (J + 1)
|
245 |
+
W = lbs_weights.unsqueeze(dim=0).expand([batch_size, -1, -1])
|
246 |
+
# (N x V x (J + 1)) x (N x (J + 1) x 16)
|
247 |
+
num_joints = J_regressor.shape[0]
|
248 |
+
T = torch.matmul(W, A.view(batch_size, num_joints, 16)) \
|
249 |
+
.view(batch_size, -1, 4, 4)
|
250 |
+
|
251 |
+
homogen_coord = torch.ones([batch_size, v_posed.shape[1], 1],
|
252 |
+
dtype=dtype, device=device)
|
253 |
+
v_posed_homo = torch.cat([v_posed, homogen_coord], dim=2)
|
254 |
+
v_homo = torch.matmul(T, torch.unsqueeze(v_posed_homo, dim=-1))
|
255 |
+
|
256 |
+
verts = v_homo[:, :, :3, 0]
|
257 |
+
|
258 |
+
return verts, J_transformed
|
259 |
+
|
260 |
+
|
261 |
+
def vertices2joints(J_regressor, vertices):
|
262 |
+
''' Calculates the 3D joint locations from the vertices
|
263 |
+
|
264 |
+
Parameters
|
265 |
+
----------
|
266 |
+
J_regressor : torch.tensor JxV
|
267 |
+
The regressor array that is used to calculate the joints from the
|
268 |
+
position of the vertices
|
269 |
+
vertices : torch.tensor BxVx3
|
270 |
+
The tensor of mesh vertices
|
271 |
+
|
272 |
+
Returns
|
273 |
+
-------
|
274 |
+
torch.tensor BxJx3
|
275 |
+
The location of the joints
|
276 |
+
'''
|
277 |
+
|
278 |
+
return torch.einsum('bik,ji->bjk', [vertices, J_regressor])
|
279 |
+
|
280 |
+
|
281 |
+
def blend_shapes(betas, shape_disps):
|
282 |
+
''' Calculates the per vertex displacement due to the blend shapes
|
283 |
+
|
284 |
+
|
285 |
+
Parameters
|
286 |
+
----------
|
287 |
+
betas : torch.tensor Bx(num_betas)
|
288 |
+
Blend shape coefficients
|
289 |
+
shape_disps: torch.tensor Vx3x(num_betas)
|
290 |
+
Blend shapes
|
291 |
+
|
292 |
+
Returns
|
293 |
+
-------
|
294 |
+
torch.tensor BxVx3
|
295 |
+
The per-vertex displacement due to shape deformation
|
296 |
+
'''
|
297 |
+
|
298 |
+
# Displacement[b, m, k] = sum_{l} betas[b, l] * shape_disps[m, k, l]
|
299 |
+
# i.e. Multiply each shape displacement by its corresponding beta and
|
300 |
+
# then sum them.
|
301 |
+
blend_shape = torch.einsum('bl,mkl->bmk', [betas, shape_disps])
|
302 |
+
return blend_shape
|
303 |
+
|
304 |
+
|
305 |
+
def _batch_rodrigues(rot_vecs, epsilon=1e-8, dtype=torch.float32):
|
306 |
+
''' Calculates the rotation matrices for a batch of rotation vectors
|
307 |
+
Parameters
|
308 |
+
----------
|
309 |
+
rot_vecs: torch.tensor Nx3
|
310 |
+
array of N axis-angle vectors
|
311 |
+
Returns
|
312 |
+
-------
|
313 |
+
R: torch.tensor Nx3x3
|
314 |
+
The rotation matrices for the given axis-angle parameters
|
315 |
+
'''
|
316 |
+
|
317 |
+
batch_size = rot_vecs.shape[0]
|
318 |
+
device = rot_vecs.device
|
319 |
+
|
320 |
+
angle = torch.norm(rot_vecs + 1e-8, dim=1, keepdim=True)
|
321 |
+
rot_dir = rot_vecs / angle
|
322 |
+
|
323 |
+
cos = torch.unsqueeze(torch.cos(angle), dim=1)
|
324 |
+
sin = torch.unsqueeze(torch.sin(angle), dim=1)
|
325 |
+
|
326 |
+
# Bx1 arrays
|
327 |
+
rx, ry, rz = torch.split(rot_dir, 1, dim=1)
|
328 |
+
K = torch.zeros((batch_size, 3, 3), dtype=dtype, device=device)
|
329 |
+
|
330 |
+
zeros = torch.zeros((batch_size, 1), dtype=dtype, device=device)
|
331 |
+
K = torch.cat([zeros, -rz, ry, rz, zeros, -rx, -ry, rx, zeros], dim=1) \
|
332 |
+
.view((batch_size, 3, 3))
|
333 |
+
|
334 |
+
ident = torch.eye(3, dtype=dtype, device=device).unsqueeze(dim=0)
|
335 |
+
rot_mat = ident + sin * K + (1 - cos) * torch.bmm(K, K)
|
336 |
+
return rot_mat
|
337 |
+
|
338 |
+
|
339 |
+
def transform_mat(R, t):
|
340 |
+
''' Creates a batch of transformation matrices
|
341 |
+
Args:
|
342 |
+
- R: Bx3x3 array of a batch of rotation matrices
|
343 |
+
- t: Bx3x1 array of a batch of translation vectors
|
344 |
+
Returns:
|
345 |
+
- T: Bx4x4 Transformation matrix
|
346 |
+
'''
|
347 |
+
# No padding left or right, only add an extra row
|
348 |
+
return torch.cat([F.pad(R, [0, 0, 0, 1]),
|
349 |
+
F.pad(t, [0, 0, 0, 1], value=1)], dim=2)
|
350 |
+
|
351 |
+
|
352 |
+
def batch_rigid_transform(rot_mats, joints, parents, dtype=torch.float32):
|
353 |
+
"""
|
354 |
+
Applies a batch of rigid transformations to the joints
|
355 |
+
|
356 |
+
Parameters
|
357 |
+
----------
|
358 |
+
rot_mats : torch.tensor BxNx3x3
|
359 |
+
Tensor of rotation matrices
|
360 |
+
joints : torch.tensor BxNx3
|
361 |
+
Locations of joints
|
362 |
+
parents : torch.tensor BxN
|
363 |
+
The kinematic tree of each object
|
364 |
+
dtype : torch.dtype, optional:
|
365 |
+
The data type of the created tensors, the default is torch.float32
|
366 |
+
|
367 |
+
Returns
|
368 |
+
-------
|
369 |
+
posed_joints : torch.tensor BxNx3
|
370 |
+
The locations of the joints after applying the pose rotations
|
371 |
+
rel_transforms : torch.tensor BxNx4x4
|
372 |
+
The relative (with respect to the root joint) rigid transformations
|
373 |
+
for all the joints
|
374 |
+
"""
|
375 |
+
|
376 |
+
joints = torch.unsqueeze(joints, dim=-1)
|
377 |
+
|
378 |
+
rel_joints = joints.clone()
|
379 |
+
rel_joints[:, 1:] -= joints[:, parents[1:]]
|
380 |
+
|
381 |
+
transforms_mat = transform_mat(
|
382 |
+
rot_mats.view(-1, 3, 3),
|
383 |
+
rel_joints.reshape(-1, 3, 1)).reshape(-1, joints.shape[1], 4, 4)
|
384 |
+
|
385 |
+
transform_chain = [transforms_mat[:, 0]]
|
386 |
+
for i in range(1, parents.shape[0]):
|
387 |
+
# Subtract the joint location at the rest pose
|
388 |
+
# No need for rotation, since it's identity when at rest
|
389 |
+
curr_res = torch.matmul(transform_chain[parents[i]],
|
390 |
+
transforms_mat[:, i])
|
391 |
+
transform_chain.append(curr_res)
|
392 |
+
|
393 |
+
transforms = torch.stack(transform_chain, dim=1)
|
394 |
+
|
395 |
+
# The last column of the transformations contains the posed joints
|
396 |
+
posed_joints = transforms[:, :, :3, 3]
|
397 |
+
|
398 |
+
# The last column of the transformations contains the posed joints
|
399 |
+
posed_joints = transforms[:, :, :3, 3]
|
400 |
+
|
401 |
+
joints_homogen = F.pad(joints, [0, 0, 0, 1])
|
402 |
+
|
403 |
+
rel_transforms = transforms - F.pad(
|
404 |
+
torch.matmul(transforms, joints_homogen), [3, 0, 0, 0, 0, 0, 0, 0])
|
405 |
+
|
406 |
+
return posed_joints, rel_transforms
|
SHOW/SHOW/image.py
ADDED
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding: utf-8 -*-
|
2 |
+
|
3 |
+
# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
|
4 |
+
# holder of all proprietary rights on this computer program.
|
5 |
+
# You can only use this computer program if you have closed
|
6 |
+
# a license agreement with MPG or you get the right to use the computer
|
7 |
+
# program from someone who is authorized to grant you that right.
|
8 |
+
# Any use of the computer program without a valid license is prohibited and
|
9 |
+
# liable to prosecution.
|
10 |
+
#
|
11 |
+
# Copyright2023 Max-Planck-Gesellschaft zur Förderung
|
12 |
+
# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
|
13 |
+
# for Intelligent Systems. All rights reserved.
|
14 |
+
#
|
15 |
+
# Authors: paper author.
|
16 |
+
# Special Acknowlegement: Wojciech Zielonka and Justus Thies
|
17 |
+
# Contact: [email protected]
|
18 |
+
|
19 |
+
import numpy as np
|
20 |
+
import torch
|
21 |
+
import torch.nn.functional as F
|
22 |
+
|
23 |
+
|
24 |
+
def lmk2d_to_bbox(lmks,h,w,bb_scale=2.0):
|
25 |
+
# lmks:68,2
|
26 |
+
x_min, x_max, y_min, y_max = np.min(lmks[:, 0]), np.max(lmks[:, 0]), np.min(lmks[:, 1]), np.max(lmks[:, 1])
|
27 |
+
x_center, y_center = int((x_max + x_min) / 2.0), int((y_max + y_min) / 2.0)
|
28 |
+
size = int(bb_scale * 2 * max(x_center - x_min, y_center - y_min))
|
29 |
+
xb_min, xb_max, yb_min, yb_max = max(x_center - size // 2, 0), min(x_center + size // 2, w - 1), \
|
30 |
+
max(y_center - size // 2, 0), min(y_center + size // 2, h - 1)
|
31 |
+
|
32 |
+
yb_max = min(yb_max, h-1)
|
33 |
+
xb_max = min(xb_max, w-1)
|
34 |
+
yb_min = max(yb_min, 0)
|
35 |
+
xb_min = max(xb_min, 0)
|
36 |
+
return [xb_min,yb_min, xb_max,yb_max]
|
37 |
+
|
38 |
+
|
39 |
+
def landmark_crop(image, lmks, dense_lmk, bb_scale=2.0):
|
40 |
+
h, w = image.shape[1:]
|
41 |
+
|
42 |
+
xb_min,yb_min, xb_max,yb_max=lmk2d_to_bbox(lmks,h,w,bb_scale=bb_scale)
|
43 |
+
|
44 |
+
if (xb_max - xb_min) % 2 != 0:
|
45 |
+
xb_min += 1
|
46 |
+
|
47 |
+
if (yb_max - yb_min) % 2 != 0:
|
48 |
+
yb_min += 1
|
49 |
+
|
50 |
+
cropped_image = crop_image(image, xb_min, yb_min, xb_max, yb_max)
|
51 |
+
cropped_image_lmks = np.vstack((lmks[:, 0] - xb_min, lmks[:, 1] - yb_min)).T
|
52 |
+
cropped_dense_lmk = np.vstack((dense_lmk[:, 0] - xb_min, dense_lmk[:, 1] - yb_min)).T
|
53 |
+
return cropped_image_lmks, cropped_dense_lmk, cropped_image, {'xb_min': xb_min, 'xb_max': xb_max, 'yb_min': yb_min, 'yb_max': yb_max}
|
54 |
+
|
55 |
+
|
56 |
+
def crop_image(image, x_min, y_min, x_max, y_max):
|
57 |
+
# image:C,H,W or c,y,x
|
58 |
+
return image[:, max(y_min, 0):min(y_max, image.shape[1] - 1), max(x_min, 0):min(x_max, image.shape[2] - 1)]
|
59 |
+
|
60 |
+
|
61 |
+
def squarefiy(image, lmk, dense_lmk, size=512):
|
62 |
+
_, h, w = image.shape
|
63 |
+
px = py = 0
|
64 |
+
max_wh = max(w, h)
|
65 |
+
|
66 |
+
if w != h:
|
67 |
+
px = int((max_wh - w) / 2)
|
68 |
+
py = int((max_wh - h) / 2)
|
69 |
+
image = F.pad(image, (px, px, py, py), 'constant', 0)
|
70 |
+
|
71 |
+
img = F.interpolate(image[None], (size, size), mode='bilinear', align_corners=False)[0]
|
72 |
+
|
73 |
+
if False:
|
74 |
+
scale_x = size / (w + px)
|
75 |
+
scale_y = size / (h + py)
|
76 |
+
lmk[:, 0] *= scale_x
|
77 |
+
lmk[:, 1] *= scale_y
|
78 |
+
dense_lmk[:, 0] *= scale_x
|
79 |
+
dense_lmk[:, 1] *= scale_y
|
80 |
+
else:
|
81 |
+
lmk[:, 0] = (lmk[:, 0] + px)*size/max_wh
|
82 |
+
lmk[:, 1] = (lmk[:, 1] + py)*size/max_wh
|
83 |
+
dense_lmk[:, 0] = (dense_lmk[:, 0] + px)*size/max_wh
|
84 |
+
dense_lmk[:, 1] = (dense_lmk[:, 1] + py)*size/max_wh
|
85 |
+
|
86 |
+
return img, lmk, dense_lmk, px, py
|
87 |
+
|
88 |
+
|
89 |
+
def tensor2im(input_image, imtype=np.uint8):
|
90 |
+
if isinstance(input_image, torch.Tensor):
|
91 |
+
input_image = torch.clamp(input_image, -1.0, 1.0)
|
92 |
+
image_tensor = input_image.data
|
93 |
+
else:
|
94 |
+
return input_image.reshape(3, 512, 512).transpose()
|
95 |
+
image_numpy = image_tensor[0].cpu().float().numpy()
|
96 |
+
if image_numpy.shape[0] == 1:
|
97 |
+
image_numpy = np.tile(image_numpy, (3, 1, 1))
|
98 |
+
image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0
|
99 |
+
return image_numpy.astype(imtype)
|
100 |
+
|
101 |
+
def get_heatmap( values):
|
102 |
+
import cv2
|
103 |
+
l2 = tensor2im(values)
|
104 |
+
l2 = cv2.cvtColor(l2, cv2.COLOR_RGB2BGR)
|
105 |
+
l2 = cv2.normalize(l2, None, 0, 255, cv2.NORM_MINMAX)
|
106 |
+
heatmap = cv2.applyColorMap(l2, cv2.COLORMAP_JET)
|
107 |
+
heatmap = cv2.cvtColor(cv2.addWeighted(heatmap, 0.75, l2, 0.25, 0).astype(np.uint8), cv2.COLOR_BGR2RGB) / 255.
|
108 |
+
heatmap = torch.from_numpy(heatmap).permute(2, 0, 1)
|
109 |
+
return heatmap
|
110 |
+
|
111 |
+
def crop_image_bbox(image, lmks, dense_lmk, bbox):
|
112 |
+
xb_min = bbox['xb_min']
|
113 |
+
yb_min = bbox['yb_min']
|
114 |
+
xb_max = bbox['xb_max']
|
115 |
+
yb_max = bbox['yb_max']
|
116 |
+
cropped = crop_image(image, xb_min, yb_min, xb_max, yb_max)
|
117 |
+
cropped_image_lmks = np.vstack((lmks[:, 0] - xb_min, lmks[:, 1] - yb_min)).T
|
118 |
+
cropped_image_dense_lmk = np.vstack((dense_lmk[:, 0] - xb_min, dense_lmk[:, 1] - yb_min)).T
|
119 |
+
return cropped_image_lmks, cropped_image_dense_lmk, cropped
|
SHOW/SHOW/load_assets.py
ADDED
@@ -0,0 +1,243 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
import os
|
3 |
+
import pickle
|
4 |
+
import torch
|
5 |
+
import numpy as np
|
6 |
+
from plyfile import PlyData
|
7 |
+
from easydict import EasyDict
|
8 |
+
from pathlib import Path
|
9 |
+
import random
|
10 |
+
import string
|
11 |
+
import mmcv
|
12 |
+
from loguru import logger
|
13 |
+
from modules.MICA.api_MICA import api_MICA
|
14 |
+
from SHOW.video_filter.MMposer import MMPoseAnalyzer
|
15 |
+
import cv2
|
16 |
+
import SHOW
|
17 |
+
import tempfile
|
18 |
+
|
19 |
+
|
20 |
+
def get_possible_person(poser, template_im):
|
21 |
+
|
22 |
+
def is_small_person(bbox, org_im: np.ndarray):
|
23 |
+
img_height, img_width, _ = org_im.shape
|
24 |
+
box_height = bbox[3] - bbox[1]
|
25 |
+
box_width = bbox[2] - bbox[0]
|
26 |
+
is_small_person = 0
|
27 |
+
if ((box_height / img_height) < 0.4 or (box_width / img_width) < 0.3):
|
28 |
+
is_small_person = 1
|
29 |
+
return is_small_person
|
30 |
+
|
31 |
+
def is_kpts_whole(kpts):
|
32 |
+
is_whole = 1
|
33 |
+
poit_to_check = list(range(0, 13))
|
34 |
+
for idx in poit_to_check:
|
35 |
+
if kpts[idx][-1] < 0.3:
|
36 |
+
is_whole = 0
|
37 |
+
break
|
38 |
+
return is_whole
|
39 |
+
|
40 |
+
if isinstance(template_im, str):
|
41 |
+
org_im = cv2.imread(template_im)
|
42 |
+
else:
|
43 |
+
org_im = template_im
|
44 |
+
|
45 |
+
pose_results = poser.predict(template_im)
|
46 |
+
|
47 |
+
#################
|
48 |
+
logger.info(f'mmpose det length before: {len(pose_results)}')
|
49 |
+
# pose_results = [i for i in pose_results if i['bbox'][-1]>0.3]
|
50 |
+
# pose_results = [i for i in pose_results if is_small_person(i['bbox'],org_im)]
|
51 |
+
pose_results = [i for i in pose_results if is_kpts_whole(i['keypoints'])]
|
52 |
+
if len(pose_results) == 0:
|
53 |
+
logger.info(f'no whole person detected')
|
54 |
+
return None
|
55 |
+
logger.info(f'mmpose det length after: {len(pose_results)}')
|
56 |
+
#################
|
57 |
+
|
58 |
+
for idx in range(len(pose_results)):
|
59 |
+
bbox = pose_results[idx]['bbox']
|
60 |
+
box_height = bbox[3] - bbox[1]
|
61 |
+
box_width = bbox[2] - bbox[0]
|
62 |
+
pose_results[idx]['size'] = box_height * box_width
|
63 |
+
|
64 |
+
pose_results.sort(key=lambda x: x['size'], reverse=True)
|
65 |
+
pose_results_size_list = [i['size'] for i in pose_results]
|
66 |
+
logger.info(f'pose_results_size_list: {pose_results_size_list}')
|
67 |
+
|
68 |
+
max_ret = pose_results[0]
|
69 |
+
left, top, right, bottom = [int(i) for i in max_ret['bbox'][:4]]
|
70 |
+
max_person_crop_im = org_im[top:bottom, left:right, :]
|
71 |
+
logger.info(
|
72 |
+
f'cropped image from left:{left},top:{top},right:{right},bottom:{bottom}'
|
73 |
+
)
|
74 |
+
#################
|
75 |
+
|
76 |
+
return max_person_crop_im
|
77 |
+
|
78 |
+
|
79 |
+
def read_shape(speaker_ply_file_path):
|
80 |
+
# return: (5023, 3)
|
81 |
+
ply_data = PlyData.read(speaker_ply_file_path)
|
82 |
+
speaker_shape = np.stack([
|
83 |
+
ply_data['vertex']['x'], ply_data['vertex']['y'],
|
84 |
+
ply_data['vertex']['z']
|
85 |
+
], 1)
|
86 |
+
return speaker_shape
|
87 |
+
|
88 |
+
|
89 |
+
def load_assets(config, face_ider=None, template_im=None, **kwargs):
|
90 |
+
|
91 |
+
assets_root = config.assets_root
|
92 |
+
dtype = config.dtype
|
93 |
+
device = config.device
|
94 |
+
ret_dict = EasyDict({})
|
95 |
+
|
96 |
+
shape_res_factory_dir = f'{assets_root}/id_pic/shape_factory2'
|
97 |
+
emb_res_factory_dir = f'{assets_root}/id_pic/{config.ider_cfg.npy_folder_name}'
|
98 |
+
emb_res_factory_path = f'{assets_root}/id_pic/emb_factory2.pkl'
|
99 |
+
|
100 |
+
emb_res_factory_is_changed_flag = False
|
101 |
+
if Path(emb_res_factory_path).exists():
|
102 |
+
emb_res_factory = mmcv.load(emb_res_factory_path)
|
103 |
+
else:
|
104 |
+
emb_res_factory = {'0' * 12: [np.zeros((512, ))]}
|
105 |
+
emb_res_factory_is_changed_flag = True
|
106 |
+
|
107 |
+
use_direct_face_emb = False
|
108 |
+
max_person_crop_im = None
|
109 |
+
shape_id = config.speaker_name
|
110 |
+
logger.info(f'shape_id/speaker_name: {shape_id}')
|
111 |
+
registered_id = ['oliver', 'seth', 'conan', 'chemistry']
|
112 |
+
|
113 |
+
if shape_id not in registered_id:
|
114 |
+
logger.info(f'current shape_id: {shape_id}')
|
115 |
+
|
116 |
+
poser = MMPoseAnalyzer()
|
117 |
+
config.load_betas = False
|
118 |
+
config.save_betas = False
|
119 |
+
|
120 |
+
max_person_crop_im = get_possible_person(poser, template_im)
|
121 |
+
if max_person_crop_im is None:
|
122 |
+
logger.error(f'max_person_crop_im is None')
|
123 |
+
return
|
124 |
+
|
125 |
+
face_ider_ret = face_ider.get(max_person_crop_im)
|
126 |
+
if face_ider_ret is None:
|
127 |
+
logger.error(f'face_ider_ret is None')
|
128 |
+
ret_dict.person_face_emb = None
|
129 |
+
else:
|
130 |
+
cur_speaker_feat = face_ider_ret[0].normed_embedding
|
131 |
+
use_direct_face_emb = True
|
132 |
+
|
133 |
+
shape_id=os.urandom(24).hex()
|
134 |
+
mica_out_ply = os.path.join(tempfile.gettempdir(), f'{shape_id}.ply')
|
135 |
+
|
136 |
+
mica_out_img = None
|
137 |
+
mica_out_npy = None
|
138 |
+
|
139 |
+
ret_dict.person_face_emb = cur_speaker_feat
|
140 |
+
|
141 |
+
del poser
|
142 |
+
|
143 |
+
else:
|
144 |
+
person_face_emb_path = os.path.abspath(
|
145 |
+
os.path.join(assets_root, emb_res_factory_dir, f'{shape_id}.npy'))
|
146 |
+
|
147 |
+
if Path(person_face_emb_path).exists():
|
148 |
+
ret_dict.person_face_emb = np.load(person_face_emb_path)
|
149 |
+
else:
|
150 |
+
ret_dict.person_face_emb = None
|
151 |
+
|
152 |
+
logger.info(f'loaded specific speaker: {shape_id}')
|
153 |
+
mica_out_ply = os.path.join(shape_res_factory_dir, shape_id,
|
154 |
+
'out.ply')
|
155 |
+
mica_out_img = os.path.join(shape_res_factory_dir, shape_id,
|
156 |
+
'out.jpg')
|
157 |
+
mica_out_npy = os.path.join(shape_res_factory_dir, shape_id,
|
158 |
+
'out.npy')
|
159 |
+
|
160 |
+
#######################################save pkl
|
161 |
+
if emb_res_factory_is_changed_flag:
|
162 |
+
logger.info(f'saving emb_res_factory...')
|
163 |
+
mmcv.dump(emb_res_factory, emb_res_factory_path)
|
164 |
+
|
165 |
+
|
166 |
+
#######################################run MICA
|
167 |
+
if ret_dict.person_face_emb is not None:
|
168 |
+
if not Path(mica_out_ply).exists():
|
169 |
+
mica_ins = api_MICA()
|
170 |
+
mica_ins.predict(
|
171 |
+
input_img_path=max_person_crop_im
|
172 |
+
if max_person_crop_im is not None else template_im,
|
173 |
+
output_ply_path=mica_out_ply,
|
174 |
+
output_render_path=mica_out_img,
|
175 |
+
output_param_npy_path=mica_out_npy
|
176 |
+
)
|
177 |
+
del mica_ins
|
178 |
+
|
179 |
+
if Path(mica_out_ply).exists():
|
180 |
+
ret_dict.speaker_shape_vertices = torch.from_numpy(
|
181 |
+
read_shape(mica_out_ply)).to(device).to(dtype)
|
182 |
+
else:
|
183 |
+
ret_dict.speaker_shape_vertices = None
|
184 |
+
|
185 |
+
if use_direct_face_emb:
|
186 |
+
# mica_out_ply_f.close()
|
187 |
+
if os.path.exists(mica_out_ply):
|
188 |
+
os.remove(mica_out_ply)
|
189 |
+
else:
|
190 |
+
ret_dict.speaker_shape_vertices = None
|
191 |
+
|
192 |
+
#######################################others
|
193 |
+
config.speaker_name = shape_id
|
194 |
+
logger.info(f'shape_id/speaker_name: {shape_id}')
|
195 |
+
|
196 |
+
|
197 |
+
flame2020to2019_exp_trafo = './flame2020to2019_exp_trafo.npy'
|
198 |
+
flame2020to2019_exp_trafo = os.path.abspath(
|
199 |
+
os.path.join(assets_root, flame2020to2019_exp_trafo))
|
200 |
+
flame2020to2019_exp_trafo = np.load(flame2020to2019_exp_trafo)
|
201 |
+
flame2020to2019_exp_trafo = torch.from_numpy(flame2020to2019_exp_trafo).to(
|
202 |
+
device).to(dtype)
|
203 |
+
ret_dict.flame2020to2019_exp_trafo = flame2020to2019_exp_trafo
|
204 |
+
|
205 |
+
mediapipe_landmark_embedding = './mediapipe_landmark_embedding__smplx.npz'
|
206 |
+
mediapipe_landmark_embedding = os.path.abspath(
|
207 |
+
os.path.join(assets_root, mediapipe_landmark_embedding))
|
208 |
+
mp_lmk_emb = {}
|
209 |
+
mediapipe_landmark_embedding = np.load(mediapipe_landmark_embedding)
|
210 |
+
mp_lmk_emb['lmk_face_idx'] = torch.from_numpy(
|
211 |
+
mediapipe_landmark_embedding['lmk_face_idx'].astype(int)).to(
|
212 |
+
device).to(dtype).long()
|
213 |
+
mp_lmk_emb['lmk_b_coords'] = torch.from_numpy(
|
214 |
+
mediapipe_landmark_embedding['lmk_b_coords'].astype(float)).to(
|
215 |
+
device).to(dtype)
|
216 |
+
mp_lmk_emb['landmark_indices'] = torch.from_numpy(
|
217 |
+
mediapipe_landmark_embedding['landmark_indices'].astype(int)).to(
|
218 |
+
device).to(dtype)
|
219 |
+
ret_dict.mp_lmk_emb = mp_lmk_emb
|
220 |
+
|
221 |
+
FLAME_masks = './FLAME_masks.pkl'
|
222 |
+
FLAME_masks = os.path.abspath(os.path.join(assets_root, FLAME_masks))
|
223 |
+
with open(FLAME_masks, 'rb') as f:
|
224 |
+
FLAME_masks = pickle.load(f, encoding='latin1')
|
225 |
+
for key in FLAME_masks.keys():
|
226 |
+
FLAME_masks[key] = torch.from_numpy(
|
227 |
+
FLAME_masks[key]).to(device).to(dtype)
|
228 |
+
ret_dict.FLAME_masks = FLAME_masks
|
229 |
+
|
230 |
+
smplx2flame_idx = './SMPL-X__FLAME_vertex_ids.npy'
|
231 |
+
smplx2flame_idx = os.path.abspath(
|
232 |
+
os.path.join(assets_root, smplx2flame_idx))
|
233 |
+
smplx2flame_idx = np.load(smplx2flame_idx)
|
234 |
+
smplx2flame_idx = torch.from_numpy(smplx2flame_idx).to(device).to(
|
235 |
+
dtype).long()
|
236 |
+
ret_dict.smplx2flame_idx = smplx2flame_idx
|
237 |
+
|
238 |
+
vertex_colors = r'./smplx_verts_colors.txt'
|
239 |
+
vertex_colors = os.path.abspath(os.path.join(assets_root, vertex_colors))
|
240 |
+
vertex_colors = np.loadtxt(vertex_colors)
|
241 |
+
ret_dict.vertex_colors = vertex_colors
|
242 |
+
|
243 |
+
return ret_dict
|
SHOW/SHOW/load_models.py
ADDED
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import smplx
|
2 |
+
from human_body_prior.tools.model_loader import load_vposer
|
3 |
+
import torch
|
4 |
+
import torch.nn as nn
|
5 |
+
import os
|
6 |
+
import pickle
|
7 |
+
import os.path as osp
|
8 |
+
from .datasets import op_dataset
|
9 |
+
import numpy as np
|
10 |
+
import mmcv
|
11 |
+
|
12 |
+
DEFAULT_SMPLX_CONFIG2 = dict(
|
13 |
+
dtype=torch.float32,
|
14 |
+
num_betas=200,
|
15 |
+
num_expression_coeffs=50,
|
16 |
+
num_pca_comps=12,
|
17 |
+
flat_hand_mean=True,
|
18 |
+
use_pca=True,
|
19 |
+
model_type='smplx',
|
20 |
+
use_face_contour=True,
|
21 |
+
)
|
22 |
+
|
23 |
+
DEFAULT_SMPLX_CONFIG = dict(
|
24 |
+
create_global_orient=True,
|
25 |
+
create_body_pose=True,
|
26 |
+
create_betas=True,
|
27 |
+
create_left_hand_pose=True,
|
28 |
+
create_right_hand_pose=True,
|
29 |
+
create_expression=True,
|
30 |
+
create_jaw_pose=True,
|
31 |
+
create_leye_pose=True,
|
32 |
+
create_reye_pose=True,
|
33 |
+
create_transl=True,
|
34 |
+
)
|
35 |
+
|
36 |
+
|
37 |
+
class JointMapper(nn.Module):
|
38 |
+
|
39 |
+
def __init__(self, joint_maps=None):
|
40 |
+
super().__init__()
|
41 |
+
self.register_buffer('joint_maps',
|
42 |
+
torch.tensor(joint_maps, dtype=torch.long))
|
43 |
+
|
44 |
+
def forward(self, joints, **kwargs):
|
45 |
+
return torch.index_select(joints, 1, self.joint_maps)
|
46 |
+
|
47 |
+
|
48 |
+
def load_save_pkl(ours_pkl_file_path, device='cuda'):
|
49 |
+
data = mmcv.load(ours_pkl_file_path)[0]
|
50 |
+
|
51 |
+
for key in data.keys():
|
52 |
+
if isinstance(data[key], np.ndarray):
|
53 |
+
data[key] = torch.from_numpy(data[key]).to(device)
|
54 |
+
data['batch_size'] = data['expression'].shape[0]
|
55 |
+
|
56 |
+
return data
|
57 |
+
|
58 |
+
|
59 |
+
def load_smplx_model(device='cuda', **kwargs):
|
60 |
+
body_model = smplx.create(joint_mapper=JointMapper(
|
61 |
+
op_dataset.smpl_to_openpose()),
|
62 |
+
**DEFAULT_SMPLX_CONFIG,
|
63 |
+
**kwargs).to(device=device)
|
64 |
+
return body_model
|
65 |
+
|
66 |
+
|
67 |
+
def load_vposer_model(device='cuda', vposer_ckpt=''):
|
68 |
+
vposer_ckpt = osp.expandvars(vposer_ckpt)
|
69 |
+
vposer, _ = load_vposer(vposer_ckpt, vp_model='snapshot')
|
70 |
+
vposer = vposer.to(device=device)
|
71 |
+
vposer.eval()
|
72 |
+
return vposer
|
SHOW/SHOW/loggers/MyNeptuneLogger.py
ADDED
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
|
3 |
+
|
4 |
+
import torch
|
5 |
+
from pathlib import Path
|
6 |
+
import numpy as np
|
7 |
+
import os.path as osp
|
8 |
+
import shutil
|
9 |
+
import os
|
10 |
+
|
11 |
+
from mmcv.runner.hooks.logger import NeptuneLoggerHook
|
12 |
+
from loguru import logger
|
13 |
+
from .builder import MMYLOGGER
|
14 |
+
from .base import *
|
15 |
+
|
16 |
+
|
17 |
+
|
18 |
+
@MMYLOGGER.register_module()
|
19 |
+
class MyNeptuneLogger(NeptuneLoggerHook):
|
20 |
+
def __init__(self,*args, **kwargs):
|
21 |
+
super().__init__(*args, **kwargs)
|
22 |
+
from neptune.new.types import File
|
23 |
+
self.File=File
|
24 |
+
|
25 |
+
def initialize(self):
|
26 |
+
if self.init_kwargs:
|
27 |
+
self.run = self.neptune.init(**self.init_kwargs)
|
28 |
+
else:
|
29 |
+
self.run = self.neptune.init()
|
30 |
+
|
31 |
+
@logger.catch
|
32 |
+
def log(self, tag_name:str, tag_value, append=True):
|
33 |
+
if append:
|
34 |
+
self.run[tag_name].log(tag_value)
|
35 |
+
else:
|
36 |
+
self.run[tag_name]=(tag_value)
|
37 |
+
|
38 |
+
@logger.catch
|
39 |
+
def log_bs(self,dict_to_log={},append=True):
|
40 |
+
for key,val in dict_to_log.items():
|
41 |
+
# if self.is_scalar(val):
|
42 |
+
self.log(key,val,append=append)
|
43 |
+
|
44 |
+
@logger.catch
|
45 |
+
def log_image(self,key:str,img):
|
46 |
+
img=img_preprocess(img)
|
47 |
+
self.run[key].upload(self.File.as_image(img))
|
48 |
+
|
49 |
+
@logger.catch
|
50 |
+
def update_config(self,config_dict):
|
51 |
+
self.run['parameters']=config_dict
|
52 |
+
|
53 |
+
@logger.catch
|
54 |
+
def create_proj(self,
|
55 |
+
workshops='lithiumice',
|
56 |
+
proj_name='smplifyx',
|
57 |
+
):
|
58 |
+
from neptune import management
|
59 |
+
api_token=self.init_kwargs.api_token
|
60 |
+
|
61 |
+
management.get_project_list(api_token=api_token)
|
62 |
+
management.create_project(
|
63 |
+
name=f'{workshops}/{proj_name}',
|
64 |
+
key=proj_name,
|
65 |
+
api_token=api_token,
|
66 |
+
visibility='pub')
|
SHOW/SHOW/loggers/MyTFLogger.py
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
|
3 |
+
|
4 |
+
import torch
|
5 |
+
from pathlib import Path
|
6 |
+
import numpy as np
|
7 |
+
import os.path as osp
|
8 |
+
import shutil
|
9 |
+
import os
|
10 |
+
|
11 |
+
from mmcv.runner.hooks.logger import TensorboardLoggerHook
|
12 |
+
from loguru import logger
|
13 |
+
from .builder import MMYLOGGER
|
14 |
+
from .base import *
|
15 |
+
|
16 |
+
|
17 |
+
|
18 |
+
@MMYLOGGER.register_module()
|
19 |
+
class MyTFLogger(TensorboardLoggerHook,my_logger):
|
20 |
+
def __init__(self,*args, **kwargs):
|
21 |
+
super().__init__(*args, **kwargs)
|
22 |
+
if Path(self.log_dir).exists():
|
23 |
+
shutil.rmtree(self.log_dir)
|
24 |
+
|
25 |
+
@logger.catch
|
26 |
+
def log(self, tags:dict,iters=0):
|
27 |
+
for tag, val in tags.items():
|
28 |
+
if isinstance(val, str):
|
29 |
+
self.writer.add_text(tag, val, iters)
|
30 |
+
else:
|
31 |
+
self.writer.add_scalar(tag, val, iters)
|
SHOW/SHOW/loggers/MyTextLogger.py
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
import torch
|
3 |
+
from pathlib import Path
|
4 |
+
import numpy as np
|
5 |
+
import os.path as osp
|
6 |
+
import shutil
|
7 |
+
import os
|
8 |
+
|
9 |
+
from mmcv.runner.hooks.logger import NeptuneLoggerHook
|
10 |
+
from loguru import logger
|
11 |
+
from .builder import MMYLOGGER
|
12 |
+
from .base import *
|
13 |
+
|
14 |
+
|
15 |
+
@MMYLOGGER.register_module()
|
16 |
+
class MyTextLogger(object):
|
17 |
+
def __init__(self,save_dir,filename="log.txt", mode="a",*args, **kwargs):
|
18 |
+
from SHOW.loggers.logger import setup_logger
|
19 |
+
setup_logger(save_dir,filename,mode=mode)
|
20 |
+
|
21 |
+
@logger.catch
|
22 |
+
def log(self, tag_name:str, tag_value,print_to_screen=False,**kwargs):
|
23 |
+
logger.log(f"{tag_name}:{tag_value}")
|
24 |
+
|
25 |
+
@logger.catch
|
26 |
+
def log_bs(self,append=True,print_to_screen=False,**kwargs):
|
27 |
+
for key,val in kwargs.items():
|
28 |
+
# if self.is_scalar(val):
|
29 |
+
self.log(key,val,print_to_screen=print_to_screen)
|
SHOW/SHOW/loggers/MyWandbLogger.py
ADDED
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
|
3 |
+
|
4 |
+
import torch
|
5 |
+
from pathlib import Path
|
6 |
+
import numpy as np
|
7 |
+
import os.path as osp
|
8 |
+
import shutil
|
9 |
+
import os
|
10 |
+
|
11 |
+
from mmcv.runner.hooks.logger import WandbLoggerHook
|
12 |
+
from loguru import logger
|
13 |
+
from .builder import MMYLOGGER
|
14 |
+
from .base import *
|
15 |
+
|
16 |
+
|
17 |
+
|
18 |
+
@MMYLOGGER.register_module()
|
19 |
+
class MyWandbLogger(WandbLoggerHook):
|
20 |
+
def __init__(self,wandb_key,wandb_name,*args, **kwargs):
|
21 |
+
os.environ['WANDB_API_KEY'] = wandb_key
|
22 |
+
os.environ['WANDB_NAME'] = wandb_name
|
23 |
+
super().__init__(*args, **kwargs)
|
24 |
+
|
25 |
+
def initialize(self):
|
26 |
+
if self.wandb is None:
|
27 |
+
self.import_wandb()
|
28 |
+
if self.init_kwargs:
|
29 |
+
self.wandb.init(**self.init_kwargs)
|
30 |
+
else:
|
31 |
+
self.wandb.init()
|
32 |
+
|
33 |
+
@logger.catch
|
34 |
+
def log(self, tag_name,tag_value,**kwargs):
|
35 |
+
self.wandb.log({tag_name:tag_value})
|
36 |
+
|
37 |
+
@logger.catch
|
38 |
+
def log_bs(self,dict_to_log={},**kwargs):
|
39 |
+
self.wandb.log(dict_to_log)
|
40 |
+
|
41 |
+
@logger.catch
|
42 |
+
def update_config(self,config_dict):
|
43 |
+
self.wandb.config.update(config_dict)
|
44 |
+
|
45 |
+
@logger.catch
|
46 |
+
def alert(self,title='',msg=''):
|
47 |
+
self.wandb.alert(
|
48 |
+
title=title,
|
49 |
+
text=msg
|
50 |
+
)
|
51 |
+
|
52 |
+
@logger.catch
|
53 |
+
def log_image(self,key:str,img):
|
54 |
+
img=img_preprocess(img)*255
|
55 |
+
upload_list=[self.wandb.Image(img,caption=key)]
|
56 |
+
self.wandb.log({key:upload_list})
|
57 |
+
|
58 |
+
|
59 |
+
|
SHOW/SHOW/loggers/__init__.py
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .builder import build_my_logger
|
2 |
+
from .MyNeptuneLogger import MyNeptuneLogger
|
3 |
+
from .MyWandbLogger import MyWandbLogger
|
4 |
+
|
5 |
+
# __all__ = ['build_my_logger',
|
6 |
+
# 'MyNeptuneLogger','MyWandbLogger','MyTFLogger']
|