Spaces:
Running
Running
victorisgeek
commited on
Commit
โข
9157b98
1
Parent(s):
9fdfbbe
Upload app.py
Browse files
app.py
CHANGED
@@ -26,7 +26,7 @@ from utils import trim_video, StreamerThread, ProcessBar, open_directory, split_
|
|
26 |
|
27 |
## ------------------------------ USER ARGS ------------------------------
|
28 |
|
29 |
-
parser = argparse.ArgumentParser(description="
|
30 |
parser.add_argument("--out_dir", help="Default Output directory", default=os.getcwd())
|
31 |
parser.add_argument("--batch_size", help="Gpu batch size", default=32)
|
32 |
parser.add_argument("--cuda", action="store_true", help="Enable cuda", default=False)
|
@@ -189,21 +189,21 @@ def process(
|
|
189 |
|
190 |
|
191 |
|
192 |
-
yield "### \n
|
193 |
load_face_analyser_model()
|
194 |
|
195 |
-
yield "### \n
|
196 |
load_face_swapper_model()
|
197 |
|
198 |
if face_enhancer_name != "NONE":
|
199 |
if face_enhancer_name not in cv2_interpolations:
|
200 |
-
yield f"### \n
|
201 |
FACE_ENHANCER = load_face_enhancer_model(name=face_enhancer_name, device=device)
|
202 |
else:
|
203 |
FACE_ENHANCER = None
|
204 |
|
205 |
if enable_face_parser:
|
206 |
-
yield "### \n
|
207 |
load_face_parser_model()
|
208 |
|
209 |
includes = mask_regions_to_list(mask_includes)
|
@@ -221,7 +221,7 @@ def process(
|
|
221 |
## ------------------------------ CONTENT CHECK ------------------------------
|
222 |
|
223 |
|
224 |
-
yield "### \n
|
225 |
if condition != "Specific Face":
|
226 |
source_data = source_path, age
|
227 |
else:
|
@@ -237,7 +237,7 @@ def process(
|
|
237 |
|
238 |
## ------------------------------ SWAP FUNC ------------------------------
|
239 |
|
240 |
-
yield "### \n
|
241 |
preds = []
|
242 |
matrs = []
|
243 |
count = 0
|
@@ -251,13 +251,13 @@ def process(
|
|
251 |
if USE_CUDA:
|
252 |
image_grid = create_image_grid(batch_pred, size=128)
|
253 |
PREVIEW = image_grid[:, :, ::-1]
|
254 |
-
yield f"### \n
|
255 |
|
256 |
## ------------------------------ FACE ENHANCEMENT ------------------------------
|
257 |
|
258 |
generated_len = len(preds)
|
259 |
if face_enhancer_name != "NONE":
|
260 |
-
yield f"### \n
|
261 |
for idx, pred in tqdm(enumerate(preds), total=generated_len, desc=f"Upscaling with {face_enhancer_name}"):
|
262 |
enhancer_model, enhancer_model_runner = FACE_ENHANCER
|
263 |
pred = enhancer_model_runner(pred, enhancer_model)
|
@@ -267,7 +267,7 @@ def process(
|
|
267 |
## ------------------------------ FACE PARSING ------------------------------
|
268 |
|
269 |
if enable_face_parser:
|
270 |
-
yield "### \n
|
271 |
masks = []
|
272 |
count = 0
|
273 |
for batch_mask in get_parsed_mask(FACE_PARSER, preds, classes=includes, device=device, batch_size=BATCH_SIZE, softness=int(mask_soft_iterations)):
|
@@ -278,7 +278,7 @@ def process(
|
|
278 |
if len(batch_mask) > 1:
|
279 |
image_grid = create_image_grid(batch_mask, size=128)
|
280 |
PREVIEW = image_grid[:, :, ::-1]
|
281 |
-
yield f"### \n
|
282 |
masks = np.concatenate(masks, axis=0) if len(masks) >= 1 else masks
|
283 |
else:
|
284 |
masks = [None] * generated_len
|
@@ -294,7 +294,7 @@ def process(
|
|
294 |
|
295 |
## ------------------------------ PASTE-BACK ------------------------------
|
296 |
|
297 |
-
yield "### \n
|
298 |
def post_process(frame_idx, frame_img, split_preds, split_matrs, split_masks, enable_laplacian_blend, crop_mask, blur_amount, erode_amount):
|
299 |
whole_img_path = frame_img
|
300 |
whole_img = cv2.imread(whole_img_path)
|
@@ -350,7 +350,7 @@ def process(
|
|
350 |
temp_path = os.path.join(output_path, output_name, "sequence")
|
351 |
os.makedirs(temp_path, exist_ok=True)
|
352 |
|
353 |
-
yield "### \n
|
354 |
image_sequence = []
|
355 |
cap = cv2.VideoCapture(video_path)
|
356 |
curr_idx = 0
|
@@ -367,12 +367,12 @@ def process(
|
|
367 |
for info_update in swap_process(image_sequence):
|
368 |
yield info_update
|
369 |
|
370 |
-
yield "### \n
|
371 |
output_video_path = os.path.join(output_path, output_name + ".mp4")
|
372 |
merge_img_sequence_from_ref(video_path, image_sequence, output_video_path)
|
373 |
|
374 |
if os.path.exists(temp_path) and not keep_output_sequence:
|
375 |
-
yield "### \n
|
376 |
shutil.rmtree(temp_path)
|
377 |
|
378 |
WORKSPACE = output_path
|
@@ -490,7 +490,7 @@ def video_changed(video_path):
|
|
490 |
|
491 |
|
492 |
def analyse_settings_changed(detect_condition, detection_size, detection_threshold):
|
493 |
-
yield "### \n
|
494 |
global FACE_ANALYSER
|
495 |
global DETECT_CONDITION
|
496 |
DETECT_CONDITION = detect_condition
|
@@ -526,14 +526,14 @@ def slider_changed(show_frame, video_path, frame_index):
|
|
526 |
|
527 |
|
528 |
def trim_and_reload(video_path, output_path, output_name, start_frame, stop_frame):
|
529 |
-
yield video_path, f"### \n
|
530 |
try:
|
531 |
output_path = os.path.join(output_path, output_name)
|
532 |
trimmed_video = trim_video(video_path, output_path, start_frame, stop_frame)
|
533 |
yield trimmed_video, "### \n โ๏ธ Video trimmed and reloaded."
|
534 |
except Exception as e:
|
535 |
print(e)
|
536 |
-
yield video_path, "### \n
|
537 |
|
538 |
|
539 |
## ------------------------------ GRADIO GUI ------------------------------
|
@@ -542,13 +542,12 @@ css = """
|
|
542 |
footer{display:none !important}
|
543 |
"""
|
544 |
|
545 |
-
with gr.Blocks(
|
546 |
-
gr.Markdown("#
|
547 |
-
gr.Markdown("### ๐ฅ insightface inswapper bypass NSFW.")
|
548 |
with gr.Row():
|
549 |
with gr.Row():
|
550 |
with gr.Column(scale=0.4):
|
551 |
-
with gr.Tab("
|
552 |
swap_option = gr.Dropdown(
|
553 |
swap_options_list,
|
554 |
info="Choose which face or faces in the target image to swap.",
|
@@ -561,7 +560,7 @@ with gr.Blocks(theme='gradio_theme_darkmode_grey_red') as interface:
|
|
561 |
value=25, label="Value", interactive=True, visible=False
|
562 |
)
|
563 |
|
564 |
-
with gr.Tab("
|
565 |
detect_condition_dropdown = gr.Dropdown(
|
566 |
detect_conditions,
|
567 |
label="Condition",
|
@@ -579,7 +578,7 @@ with gr.Blocks(theme='gradio_theme_darkmode_grey_red') as interface:
|
|
579 |
)
|
580 |
apply_detection_settings = gr.Button("Apply settings")
|
581 |
|
582 |
-
with gr.Tab("
|
583 |
output_directory = gr.Text(
|
584 |
label="Output Directory",
|
585 |
value=DEF_OUTPUT_PATH,
|
@@ -592,7 +591,7 @@ with gr.Blocks(theme='gradio_theme_darkmode_grey_red') as interface:
|
|
592 |
label="Keep output sequence", value=False, interactive=True
|
593 |
)
|
594 |
|
595 |
-
with gr.Tab("
|
596 |
face_scale = gr.Slider(
|
597 |
label="Face Scale",
|
598 |
minimum=0,
|
@@ -671,7 +670,7 @@ with gr.Blocks(theme='gradio_theme_darkmode_grey_red') as interface:
|
|
671 |
label="Source face", type="filepath", interactive=True
|
672 |
)
|
673 |
|
674 |
-
with gr.
|
675 |
for i in range(NUM_OF_SRC_SPECIFIC):
|
676 |
idx = i + 1
|
677 |
code = "\n"
|
@@ -697,17 +696,17 @@ with gr.Blocks(theme='gradio_theme_darkmode_grey_red') as interface:
|
|
697 |
value="Image",
|
698 |
)
|
699 |
|
700 |
-
with gr.
|
701 |
image_input = gr.Image(
|
702 |
label="Target Image", interactive=True, type="filepath"
|
703 |
)
|
704 |
|
705 |
-
with gr.
|
706 |
vid_widget = gr.Video if USE_COLAB else gr.Text
|
707 |
video_input = gr.Video(
|
708 |
label="Target Video", interactive=True
|
709 |
)
|
710 |
-
with gr.Accordion("
|
711 |
with gr.Column():
|
712 |
with gr.Row():
|
713 |
set_slider_range_btn = gr.Button(
|
@@ -747,15 +746,15 @@ with gr.Blocks(theme='gradio_theme_darkmode_grey_red') as interface:
|
|
747 |
"Trim and Reload", interactive=True
|
748 |
)
|
749 |
|
750 |
-
with gr.
|
751 |
direc_input = gr.Text(label="Path", interactive=True)
|
752 |
|
753 |
with gr.Column(scale=0.6):
|
754 |
info = gr.Markdown(value="...")
|
755 |
|
756 |
with gr.Row():
|
757 |
-
swap_button = gr.Button("
|
758 |
-
cancel_button = gr.Button("
|
759 |
|
760 |
preview_image = gr.Image(label="Output", interactive=False)
|
761 |
preview_video = gr.Video(
|
@@ -764,29 +763,18 @@ with gr.Blocks(theme='gradio_theme_darkmode_grey_red') as interface:
|
|
764 |
|
765 |
with gr.Row():
|
766 |
output_directory_button = gr.Button(
|
767 |
-
"
|
768 |
)
|
769 |
output_video_button = gr.Button(
|
770 |
-
"
|
771 |
)
|
772 |
|
773 |
-
with gr.
|
774 |
with gr.Row():
|
775 |
gr.Markdown(
|
776 |
-
"### [
|
777 |
-
)
|
778 |
-
gr.Markdown(
|
779 |
-
"### [๐ฅ๏ธ Source code](https://huggingface.co/spaces/victorisgeek/SwapFace2Pon)"
|
780 |
-
)
|
781 |
-
gr.Markdown(
|
782 |
-
"### [ ๐งฉ Playground](https://huggingface.co/spaces/victorisgeek/SwapFace2Pon)"
|
783 |
-
)
|
784 |
-
gr.Markdown(
|
785 |
-
"### [๐ธ Run in Colab](https://colab.research.google.com/github/victorgeel/FaceSwapNoNfsw/blob/main/SwapFace.ipynb)"
|
786 |
-
)
|
787 |
-
gr.Markdown(
|
788 |
-
"### [๐ค Modified Version](https://github.com/victorgeel/FaceSwapNoNfsw)"
|
789 |
)
|
|
|
790 |
|
791 |
## ------------------------------ GRADIO EVENTS ------------------------------
|
792 |
|
@@ -878,7 +866,8 @@ with gr.Blocks(theme='gradio_theme_darkmode_grey_red') as interface:
|
|
878 |
swap_event = swap_button.click(
|
879 |
fn=process, inputs=swap_inputs, outputs=swap_outputs, show_progress=True
|
880 |
)
|
881 |
-
|
|
|
882 |
cancel_button.click(
|
883 |
fn=stop_running,
|
884 |
inputs=None,
|
@@ -891,6 +880,7 @@ with gr.Blocks(theme='gradio_theme_darkmode_grey_red') as interface:
|
|
891 |
end_frame_event,
|
892 |
],
|
893 |
show_progress=True,
|
|
|
894 |
)
|
895 |
output_directory_button.click(
|
896 |
lambda: open_directory(path=WORKSPACE), inputs=None, outputs=None
|
@@ -903,4 +893,5 @@ if __name__ == "__main__":
|
|
903 |
if USE_COLAB:
|
904 |
print("Running in colab mode")
|
905 |
|
906 |
-
|
|
|
|
26 |
|
27 |
## ------------------------------ USER ARGS ------------------------------
|
28 |
|
29 |
+
parser = argparse.ArgumentParser(description="Free Face Swapper")
|
30 |
parser.add_argument("--out_dir", help="Default Output directory", default=os.getcwd())
|
31 |
parser.add_argument("--batch_size", help="Gpu batch size", default=32)
|
32 |
parser.add_argument("--cuda", action="store_true", help="Enable cuda", default=False)
|
|
|
189 |
|
190 |
|
191 |
|
192 |
+
yield "### \n ๐ Loading face analyser model...", *ui_before()
|
193 |
load_face_analyser_model()
|
194 |
|
195 |
+
yield "### \n โ๏ธ Loading face swapper model...", *ui_before()
|
196 |
load_face_swapper_model()
|
197 |
|
198 |
if face_enhancer_name != "NONE":
|
199 |
if face_enhancer_name not in cv2_interpolations:
|
200 |
+
yield f"### \n ๐ก Loading {face_enhancer_name} model...", *ui_before()
|
201 |
FACE_ENHANCER = load_face_enhancer_model(name=face_enhancer_name, device=device)
|
202 |
else:
|
203 |
FACE_ENHANCER = None
|
204 |
|
205 |
if enable_face_parser:
|
206 |
+
yield "### \n ๐ Loading face parsing model...", *ui_before()
|
207 |
load_face_parser_model()
|
208 |
|
209 |
includes = mask_regions_to_list(mask_includes)
|
|
|
221 |
## ------------------------------ CONTENT CHECK ------------------------------
|
222 |
|
223 |
|
224 |
+
yield "### \n ๐งฟ Analysing face data...", *ui_before()
|
225 |
if condition != "Specific Face":
|
226 |
source_data = source_path, age
|
227 |
else:
|
|
|
237 |
|
238 |
## ------------------------------ SWAP FUNC ------------------------------
|
239 |
|
240 |
+
yield "### \n ๐งถ Generating faces...", *ui_before()
|
241 |
preds = []
|
242 |
matrs = []
|
243 |
count = 0
|
|
|
251 |
if USE_CUDA:
|
252 |
image_grid = create_image_grid(batch_pred, size=128)
|
253 |
PREVIEW = image_grid[:, :, ::-1]
|
254 |
+
yield f"### \n ๐งฉ Generating face Batch {count}", *ui_before()
|
255 |
|
256 |
## ------------------------------ FACE ENHANCEMENT ------------------------------
|
257 |
|
258 |
generated_len = len(preds)
|
259 |
if face_enhancer_name != "NONE":
|
260 |
+
yield f"### \n ๐ฒ Upscaling faces with {face_enhancer_name}...", *ui_before()
|
261 |
for idx, pred in tqdm(enumerate(preds), total=generated_len, desc=f"Upscaling with {face_enhancer_name}"):
|
262 |
enhancer_model, enhancer_model_runner = FACE_ENHANCER
|
263 |
pred = enhancer_model_runner(pred, enhancer_model)
|
|
|
267 |
## ------------------------------ FACE PARSING ------------------------------
|
268 |
|
269 |
if enable_face_parser:
|
270 |
+
yield "### \n ๐จ Face-parsing mask...", *ui_before()
|
271 |
masks = []
|
272 |
count = 0
|
273 |
for batch_mask in get_parsed_mask(FACE_PARSER, preds, classes=includes, device=device, batch_size=BATCH_SIZE, softness=int(mask_soft_iterations)):
|
|
|
278 |
if len(batch_mask) > 1:
|
279 |
image_grid = create_image_grid(batch_mask, size=128)
|
280 |
PREVIEW = image_grid[:, :, ::-1]
|
281 |
+
yield f"### \n ๐ช Face parsing Batch {count}", *ui_before()
|
282 |
masks = np.concatenate(masks, axis=0) if len(masks) >= 1 else masks
|
283 |
else:
|
284 |
masks = [None] * generated_len
|
|
|
294 |
|
295 |
## ------------------------------ PASTE-BACK ------------------------------
|
296 |
|
297 |
+
yield "### \n ๐งฟ Pasting back...", *ui_before()
|
298 |
def post_process(frame_idx, frame_img, split_preds, split_matrs, split_masks, enable_laplacian_blend, crop_mask, blur_amount, erode_amount):
|
299 |
whole_img_path = frame_img
|
300 |
whole_img = cv2.imread(whole_img_path)
|
|
|
350 |
temp_path = os.path.join(output_path, output_name, "sequence")
|
351 |
os.makedirs(temp_path, exist_ok=True)
|
352 |
|
353 |
+
yield "### \n โ Extracting video frames...", *ui_before()
|
354 |
image_sequence = []
|
355 |
cap = cv2.VideoCapture(video_path)
|
356 |
curr_idx = 0
|
|
|
367 |
for info_update in swap_process(image_sequence):
|
368 |
yield info_update
|
369 |
|
370 |
+
yield "### \n โ Merging sequence...", *ui_before()
|
371 |
output_video_path = os.path.join(output_path, output_name + ".mp4")
|
372 |
merge_img_sequence_from_ref(video_path, image_sequence, output_video_path)
|
373 |
|
374 |
if os.path.exists(temp_path) and not keep_output_sequence:
|
375 |
+
yield "### \n โ Removing temporary files...", *ui_before()
|
376 |
shutil.rmtree(temp_path)
|
377 |
|
378 |
WORKSPACE = output_path
|
|
|
490 |
|
491 |
|
492 |
def analyse_settings_changed(detect_condition, detection_size, detection_threshold):
|
493 |
+
yield "### \n โ Applying new values..."
|
494 |
global FACE_ANALYSER
|
495 |
global DETECT_CONDITION
|
496 |
DETECT_CONDITION = detect_condition
|
|
|
526 |
|
527 |
|
528 |
def trim_and_reload(video_path, output_path, output_name, start_frame, stop_frame):
|
529 |
+
yield video_path, f"### \n ๐ Trimming video frame {start_frame} to {stop_frame}..."
|
530 |
try:
|
531 |
output_path = os.path.join(output_path, output_name)
|
532 |
trimmed_video = trim_video(video_path, output_path, start_frame, stop_frame)
|
533 |
yield trimmed_video, "### \n โ๏ธ Video trimmed and reloaded."
|
534 |
except Exception as e:
|
535 |
print(e)
|
536 |
+
yield video_path, "### \n ๐ฅ Video trimming failed. See console for more info."
|
537 |
|
538 |
|
539 |
## ------------------------------ GRADIO GUI ------------------------------
|
|
|
542 |
footer{display:none !important}
|
543 |
"""
|
544 |
|
545 |
+
with gr.Blocks(css=css) as interface:
|
546 |
+
gr.Markdown("# ๐ฆ FaceSwap with Enhnacer ๐ฆ")
|
|
|
547 |
with gr.Row():
|
548 |
with gr.Row():
|
549 |
with gr.Column(scale=0.4):
|
550 |
+
with gr.Tab("๐ Swap Condition"):
|
551 |
swap_option = gr.Dropdown(
|
552 |
swap_options_list,
|
553 |
info="Choose which face or faces in the target image to swap.",
|
|
|
560 |
value=25, label="Value", interactive=True, visible=False
|
561 |
)
|
562 |
|
563 |
+
with gr.Tab("โค๏ธโ๐ฉน Detection Settings"):
|
564 |
detect_condition_dropdown = gr.Dropdown(
|
565 |
detect_conditions,
|
566 |
label="Condition",
|
|
|
578 |
)
|
579 |
apply_detection_settings = gr.Button("Apply settings")
|
580 |
|
581 |
+
with gr.Tab("๐ Output Settings"):
|
582 |
output_directory = gr.Text(
|
583 |
label="Output Directory",
|
584 |
value=DEF_OUTPUT_PATH,
|
|
|
591 |
label="Keep output sequence", value=False, interactive=True
|
592 |
)
|
593 |
|
594 |
+
with gr.Tab("๐ค Other Settings"):
|
595 |
face_scale = gr.Slider(
|
596 |
label="Face Scale",
|
597 |
minimum=0,
|
|
|
670 |
label="Source face", type="filepath", interactive=True
|
671 |
)
|
672 |
|
673 |
+
with gr.Group(visible=False) as specific_face:
|
674 |
for i in range(NUM_OF_SRC_SPECIFIC):
|
675 |
idx = i + 1
|
676 |
code = "\n"
|
|
|
696 |
value="Image",
|
697 |
)
|
698 |
|
699 |
+
with gr.Group(visible=True) as input_image_group:
|
700 |
image_input = gr.Image(
|
701 |
label="Target Image", interactive=True, type="filepath"
|
702 |
)
|
703 |
|
704 |
+
with gr.Group(visible=False) as input_video_group:
|
705 |
vid_widget = gr.Video if USE_COLAB else gr.Text
|
706 |
video_input = gr.Video(
|
707 |
label="Target Video", interactive=True
|
708 |
)
|
709 |
+
with gr.Accordion("๐ Trim video", open=False):
|
710 |
with gr.Column():
|
711 |
with gr.Row():
|
712 |
set_slider_range_btn = gr.Button(
|
|
|
746 |
"Trim and Reload", interactive=True
|
747 |
)
|
748 |
|
749 |
+
with gr.Group(visible=False) as input_directory_group:
|
750 |
direc_input = gr.Text(label="Path", interactive=True)
|
751 |
|
752 |
with gr.Column(scale=0.6):
|
753 |
info = gr.Markdown(value="...")
|
754 |
|
755 |
with gr.Row():
|
756 |
+
swap_button = gr.Button("๐ Swap", variant="primary")
|
757 |
+
cancel_button = gr.Button("๐ Cancel")
|
758 |
|
759 |
preview_image = gr.Image(label="Output", interactive=False)
|
760 |
preview_video = gr.Video(
|
|
|
763 |
|
764 |
with gr.Row():
|
765 |
output_directory_button = gr.Button(
|
766 |
+
"๐", interactive=False, visible=False
|
767 |
)
|
768 |
output_video_button = gr.Button(
|
769 |
+
"๐", interactive=False, visible=False
|
770 |
)
|
771 |
|
772 |
+
with gr.Group():
|
773 |
with gr.Row():
|
774 |
gr.Markdown(
|
775 |
+
"### [๐ค Welcome to my GitHub ๐ค](https://github.com/victorgeel)"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
776 |
)
|
777 |
+
|
778 |
|
779 |
## ------------------------------ GRADIO EVENTS ------------------------------
|
780 |
|
|
|
866 |
swap_event = swap_button.click(
|
867 |
fn=process, inputs=swap_inputs, outputs=swap_outputs, show_progress=True
|
868 |
)
|
869 |
+
|
870 |
+
|
871 |
cancel_button.click(
|
872 |
fn=stop_running,
|
873 |
inputs=None,
|
|
|
880 |
end_frame_event,
|
881 |
],
|
882 |
show_progress=True,
|
883 |
+
|
884 |
)
|
885 |
output_directory_button.click(
|
886 |
lambda: open_directory(path=WORKSPACE), inputs=None, outputs=None
|
|
|
893 |
if USE_COLAB:
|
894 |
print("Running in colab mode")
|
895 |
|
896 |
+
|
897 |
+
interface.launch(share=USE_COLAB, max_threads=10)
|