swap_face / app.py
sandrocalzada's picture
Update app.py
59057f1
raw
history blame
2.62 kB
import numpy as np
import gradio as gr
import glob
import cv2
import matplotlib.pyplot as plt
import insightface
from insightface.app import FaceAnalysis
from insightface.data import get_image as ins_get_image
def predict(image_in_video, image_in_img):
if image_in_video == None and image_in_img == None:
raise gr.Error("Please capture an image using the webcam or upload an image.")
image = image_in_video or image_in_img
return swapi(image)
app = FaceAnalysis(name='buffalo_l')
app.prepare(ctx_id=0, det_size=(640, 640))
swapper = insightface.model_zoo.get_model('inswapper_128.onnx')
def swapi(imagen):
# Use the uploaded image to extract features
img_user = cv2.imread(imagen)
faces_user = app.get(img_user)
# Use another image "background1" for modifications
img_background = cv2.imread('background1.jpg')
faces_background = app.get(img_background)
# Assuming the user image has a face and we are using its features
source_face = faces_user[0]
# Apply modifications to the "background1" image
res = img_background.copy()
for face in faces_background:
res = swapper.get(res, face, source_face, paste_back=True)
# Convert from BGR to RGB
res_rgb = cv2.cvtColor(res, cv2.COLOR_BGR2RGB)
return res_rgb
with gr.Blocks() as blocks:
gr.Markdown("### Capture Image Using WebCam or Upload")
with gr.Row():
with gr.Column():
image_or_file_opt = gr.Radio(["webcam", "file"], value="webcam",
label="How would you like to upload your image?")
image_in_video = gr.Image(source="webcam", type="filepath")
image_in_img = gr.Image(source="upload", visible=False, type="filepath")
# Update visibility based on selection
def toggle(choice):
if choice == "webcam":
return gr.update(visible=True, value=None), gr.update(visible=False, value=None)
else:
return gr.update(visible=False, value=None), gr.update(visible=True, value=None)
image_or_file_opt.change(fn=toggle, inputs=[image_or_file_opt],
outputs=[image_in_video, image_in_img], queue=False, show_progress=False)
with gr.Column():
image_out = gr.Image()
run_btn = gr.Button("Run")
run_btn.click(fn=predict, inputs=[image_in_img, image_in_video], outputs=[image_out])
gr.Examples(fn=predict, examples=[], inputs=[image_in_img, image_in_video], outputs=[image_out])
blocks.queue()
blocks.launch(debug=True)