Spaces:
Running
on
Zero
Running
on
Zero
IsshikiHugh
commited on
Commit
•
6b3779f
1
Parent(s):
8d3ae99
update: add examples and allow quota adjust
Browse files- .gitattributes +1 -0
- README.md +1 -1
- app/entry.py +4 -5
- app/gui.py +15 -3
- app/handler.py +41 -32
- examples/cxk.mp4 +3 -0
- examples/tennis.mp4 +3 -0
.gitattributes
CHANGED
@@ -34,3 +34,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
*.tar.gz filter=lfs diff=lfs merge=lfs -text
|
|
|
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
*.tar.gz filter=lfs diff=lfs merge=lfs -text
|
37 |
+
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
README.md
CHANGED
@@ -10,4 +10,4 @@ app_file: app.py
|
|
10 |
pinned: false
|
11 |
---
|
12 |
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
10 |
pinned: false
|
11 |
---
|
12 |
|
13 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app/entry.py
CHANGED
@@ -7,11 +7,10 @@ from app.handler import handler
|
|
7 |
|
8 |
if __name__ == '__main__':
|
9 |
demo = gr.Interface(
|
10 |
-
fn
|
11 |
-
inputs
|
12 |
-
outputs
|
13 |
-
|
14 |
-
allow_flagging = 'never',
|
15 |
)
|
16 |
|
17 |
demo.launch()
|
|
|
7 |
|
8 |
if __name__ == '__main__':
|
9 |
demo = gr.Interface(
|
10 |
+
fn = handler,
|
11 |
+
inputs = get_inputs_components(),
|
12 |
+
outputs = get_outputs_components(),
|
13 |
+
examples = get_examples(),
|
|
|
14 |
)
|
15 |
|
16 |
demo.launch()
|
app/gui.py
CHANGED
@@ -1,4 +1,6 @@
|
|
|
|
1 |
import gradio as gr
|
|
|
2 |
|
3 |
|
4 |
def get_inputs_components():
|
@@ -10,7 +12,13 @@ def get_inputs_components():
|
|
10 |
gr.Radio(
|
11 |
choices = ['Static Camera', 'Dynamic Camera'],
|
12 |
label = 'Camera Status',
|
13 |
-
info = 'If the camera is static, DPVO will be skipped.'
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
]
|
15 |
|
16 |
|
@@ -28,5 +36,9 @@ def get_outputs_components():
|
|
28 |
|
29 |
|
30 |
def get_examples():
|
31 |
-
|
32 |
-
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
import gradio as gr
|
3 |
+
from glob import glob
|
4 |
|
5 |
|
6 |
def get_inputs_components():
|
|
|
12 |
gr.Radio(
|
13 |
choices = ['Static Camera', 'Dynamic Camera'],
|
14 |
label = 'Camera Status',
|
15 |
+
info = 'If the camera is static, DPVO will be skipped.'
|
16 |
+
),
|
17 |
+
gr.Number(
|
18 |
+
value = 60,
|
19 |
+
label = 'GPU quota',
|
20 |
+
info = 'Decrease this value if you have insufficient GPU quota left. (Allocating too less may cause wasting of GPU quota while allocating too much won\'t. We suggest to set it as much as possible.)',
|
21 |
+
),
|
22 |
]
|
23 |
|
24 |
|
|
|
36 |
|
37 |
|
38 |
def get_examples():
|
39 |
+
REPO_ROOT = str(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
|
40 |
+
|
41 |
+
return [
|
42 |
+
[os.path.join(REPO_ROOT, 'examples/cxk.mp4'), 'Static Camera', 60],
|
43 |
+
[os.path.join(REPO_ROOT, 'examples/tennis.mp4'), 'Static Camera', 60],
|
44 |
+
]
|
app/handler.py
CHANGED
@@ -41,39 +41,44 @@ def prepare_cfg(is_static:bool, video_path:str, demo_id:str):
|
|
41 |
return cfg
|
42 |
|
43 |
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
|
|
|
|
|
|
|
|
74 |
return
|
75 |
|
76 |
-
|
|
|
77 |
# 0. Check validity of inputs.
|
78 |
if cam_status not in ['Static Camera', 'Dynamic Camera']:
|
79 |
raise gr.Error('Please define the camera status!', duration=5)
|
@@ -85,6 +90,10 @@ def handler(video_path, cam_status, progress=gr.Progress()):
|
|
85 |
Log.info(f"[Input Args] is_static: {is_static}")
|
86 |
Log.info(f"[Input Args] video_path: {video_path}")
|
87 |
|
|
|
|
|
|
|
|
|
88 |
# 2. Prepare cfg.
|
89 |
Log.info(f"[Video]: {video_path}")
|
90 |
demo_id = f'{Path(video_path).stem}_{np.random.randint(0, 1024):04d}'
|
@@ -93,7 +102,7 @@ def handler(video_path, cam_status, progress=gr.Progress()):
|
|
93 |
# 3. Run demo.
|
94 |
cfg = OmegaConf.to_container(cfg, resolve=True)
|
95 |
cfg = OmegaConf.create(cfg)
|
96 |
-
run_demo(cfg, progress)
|
97 |
|
98 |
# 4. Prepare the output.
|
99 |
return cfg.paths.incam_video, cfg.paths.global_video
|
|
|
41 |
return cfg
|
42 |
|
43 |
|
44 |
+
def run_demo(cfg, progress, GPU_quota):
|
45 |
+
''' Allow user to adjust GPU quota. '''
|
46 |
+
|
47 |
+
@spaces.GPU(duration=GPU_quota)
|
48 |
+
def run_GPU_task():
|
49 |
+
paths = cfg.paths
|
50 |
+
Log.info(f"[GPU]: {torch.cuda.get_device_name()}")
|
51 |
+
Log.info(f'[GPU]: {torch.cuda.get_device_properties("cuda")}')
|
52 |
+
|
53 |
+
# ===== Preprocess and save to disk ===== #
|
54 |
+
run_preprocess(cfg)
|
55 |
+
data = load_data_dict(cfg)
|
56 |
+
|
57 |
+
# ===== HMR4D ===== #
|
58 |
+
if not Path(paths.hmr4d_results).exists():
|
59 |
+
Log.info("[HMR4D] Predicting")
|
60 |
+
model: DemoPL = hydra.utils.instantiate(cfg.model, _recursive_=False)
|
61 |
+
model.load_pretrained_model(cfg.ckpt_path)
|
62 |
+
model = model.eval().cuda()
|
63 |
+
tic = Log.sync_time()
|
64 |
+
pred = model.predict(data, static_cam=cfg.static_cam)
|
65 |
+
pred = detach_to_cpu(pred)
|
66 |
+
data_time = data["length"] / 30
|
67 |
+
Log.info(f"[HMR4D] Elapsed: {Log.sync_time() - tic:.2f}s for data-length={data_time:.1f}s")
|
68 |
+
torch.save(pred, paths.hmr4d_results)
|
69 |
+
|
70 |
+
# ===== Render ===== #
|
71 |
+
render_incam(cfg)
|
72 |
+
render_global(cfg)
|
73 |
+
if not Path(paths.incam_global_horiz_video).exists():
|
74 |
+
Log.info("[Merge Videos]")
|
75 |
+
merge_videos_horizontal([paths.incam_video, paths.global_video], paths.incam_global_horiz_video)
|
76 |
+
|
77 |
+
run_GPU_task()
|
78 |
return
|
79 |
|
80 |
+
|
81 |
+
def handler(video_path, cam_status, GPU_quota, progress=gr.Progress()):
|
82 |
# 0. Check validity of inputs.
|
83 |
if cam_status not in ['Static Camera', 'Dynamic Camera']:
|
84 |
raise gr.Error('Please define the camera status!', duration=5)
|
|
|
90 |
Log.info(f"[Input Args] is_static: {is_static}")
|
91 |
Log.info(f"[Input Args] video_path: {video_path}")
|
92 |
|
93 |
+
if not is_static:
|
94 |
+
Log.info("[Warning] Dynamic Camera is not supported yet.")
|
95 |
+
raise gr.Error('DPVO is not supported in spaces yet. Try to run videos with static camera instead!', duration=20)
|
96 |
+
|
97 |
# 2. Prepare cfg.
|
98 |
Log.info(f"[Video]: {video_path}")
|
99 |
demo_id = f'{Path(video_path).stem}_{np.random.randint(0, 1024):04d}'
|
|
|
102 |
# 3. Run demo.
|
103 |
cfg = OmegaConf.to_container(cfg, resolve=True)
|
104 |
cfg = OmegaConf.create(cfg)
|
105 |
+
run_demo(cfg, progress, GPU_quota)
|
106 |
|
107 |
# 4. Prepare the output.
|
108 |
return cfg.paths.incam_video, cfg.paths.global_video
|
examples/cxk.mp4
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ca27a614c055ba2a971184e6878a9b6cef7b71bffe30bdcae1d811b4b28c3841
|
3 |
+
size 935863
|
examples/tennis.mp4
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9fb3c7170b4b1afcf1750e5b6506d97745a16841f855a7cfb3ec869601cc5447
|
3 |
+
size 2116761
|