Spaces:
Running
on
Zero
Running
on
Zero
Commit
•
8f7fee0
1
Parent(s):
4f8fecc
Move tensors to gpu (#3)
Browse files- Move tensors to gpu (bd169c9ebae85e188a23109f5b291937288fe8c4)
- Clean (e685dd2789c0d279c2373c6db1b1e8b788949385)
Co-authored-by: Apolinário from multimodal AI art <[email protected]>
- app.py +1 -1
- src/gradio_pipeline.py +7 -5
app.py
CHANGED
@@ -96,7 +96,7 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
|
96 |
)
|
97 |
gr.Markdown(load_description("assets/gradio_description_animation.md"))
|
98 |
with gr.Row():
|
99 |
-
with gr.Accordion(open=
|
100 |
with gr.Row():
|
101 |
flag_relative_input = gr.Checkbox(value=True, label="relative motion")
|
102 |
flag_do_crop_input = gr.Checkbox(value=True, label="do crop")
|
|
|
96 |
)
|
97 |
gr.Markdown(load_description("assets/gradio_description_animation.md"))
|
98 |
with gr.Row():
|
99 |
+
with gr.Accordion(open=False, label="Animation Options"):
|
100 |
with gr.Row():
|
101 |
flag_relative_input = gr.Checkbox(value=True, label="relative motion")
|
102 |
flag_do_crop_input = gr.Checkbox(value=True, label="do crop")
|
src/gradio_pipeline.py
CHANGED
@@ -83,17 +83,19 @@ class GradioPipeline(LivePortraitPipeline):
|
|
83 |
duration=5
|
84 |
)
|
85 |
else:
|
|
|
|
|
86 |
# ∆_eyes,i = R_eyes(x_s; c_s,eyes, c_d,eyes,i)
|
87 |
combined_eye_ratio_tensor = self.live_portrait_wrapper.calc_combined_eye_ratio([[input_eye_ratio]], self.source_lmk_user)
|
88 |
-
eyes_delta = self.live_portrait_wrapper.retarget_eye(
|
89 |
# ∆_lip,i = R_lip(x_s; c_s,lip, c_d,lip,i)
|
90 |
combined_lip_ratio_tensor = self.live_portrait_wrapper.calc_combined_lip_ratio([[input_lip_ratio]], self.source_lmk_user)
|
91 |
-
lip_delta = self.live_portrait_wrapper.retarget_lip(
|
92 |
-
num_kp =
|
93 |
# default: use x_s
|
94 |
-
x_d_new =
|
95 |
# D(W(f_s; x_s, x′_d))
|
96 |
-
out = self.live_portrait_wrapper.warp_decode(
|
97 |
out = self.live_portrait_wrapper.parse_output(out['out'])[0]
|
98 |
out_to_ori_blend = paste_back(out, self.crop_M_c2o, self.img_rgb, self.mask_ori)
|
99 |
# gr.Info("Run successfully!", duration=2)
|
|
|
83 |
duration=5
|
84 |
)
|
85 |
else:
|
86 |
+
x_s_user = self.x_s_user.to("cuda")
|
87 |
+
f_s_user = self.f_s_user.to("cuda")
|
88 |
# ∆_eyes,i = R_eyes(x_s; c_s,eyes, c_d,eyes,i)
|
89 |
combined_eye_ratio_tensor = self.live_portrait_wrapper.calc_combined_eye_ratio([[input_eye_ratio]], self.source_lmk_user)
|
90 |
+
eyes_delta = self.live_portrait_wrapper.retarget_eye(x_s_user, combined_eye_ratio_tensor)
|
91 |
# ∆_lip,i = R_lip(x_s; c_s,lip, c_d,lip,i)
|
92 |
combined_lip_ratio_tensor = self.live_portrait_wrapper.calc_combined_lip_ratio([[input_lip_ratio]], self.source_lmk_user)
|
93 |
+
lip_delta = self.live_portrait_wrapper.retarget_lip(x_s_user, combined_lip_ratio_tensor)
|
94 |
+
num_kp = x_s_user.shape[1]
|
95 |
# default: use x_s
|
96 |
+
x_d_new = x_s_user + eyes_delta.reshape(-1, num_kp, 3) + lip_delta.reshape(-1, num_kp, 3)
|
97 |
# D(W(f_s; x_s, x′_d))
|
98 |
+
out = self.live_portrait_wrapper.warp_decode(f_s_user, x_s_user, x_d_new)
|
99 |
out = self.live_portrait_wrapper.parse_output(out['out'])[0]
|
100 |
out_to_ori_blend = paste_back(out, self.crop_M_c2o, self.img_rgb, self.mask_ori)
|
101 |
# gr.Info("Run successfully!", duration=2)
|