File size: 2,619 Bytes
b39090b
d1ffd11
b39090b
d1ffd11
b39090b
1b9d9ae
 
b39090b
1b9d9ae
7702267
 
 
 
061fa24
 
 
 
b39090b
 
061fa24
 
59057f1
 
 
b39090b
59057f1
 
 
b39090b
59057f1
 
b39090b
59057f1
 
 
 
061fa24
59057f1
 
e70a1d4
59057f1
e70a1d4
b39090b
5990ce9
 
7702267
4225814
 
 
7702267
 
4225814
7702267
 
 
 
 
 
 
 
 
 
 
4225814
 
7702267
4225814
7702267
 
5990ce9
 
b39090b
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
import numpy as np
import gradio as gr
import glob
import cv2
import matplotlib.pyplot as plt
import insightface
from insightface.app import FaceAnalysis
from insightface.data import get_image as ins_get_image

def predict(image_in_video, image_in_img):
    if image_in_video == None and image_in_img == None:
        raise gr.Error("Please capture an image using the webcam or upload an image.")
    image = image_in_video or image_in_img
    return swapi(image)

app = FaceAnalysis(name='buffalo_l')
app.prepare(ctx_id=0, det_size=(640, 640))
swapper = insightface.model_zoo.get_model('inswapper_128.onnx')


def swapi(imagen):
    # Use the uploaded image to extract features
    img_user = cv2.imread(imagen)
    faces_user = app.get(img_user)

    # Use another image "background1" for modifications
    img_background = cv2.imread('background1.jpg')
    faces_background = app.get(img_background)

    # Assuming the user image has a face and we are using its features
    source_face = faces_user[0]

    # Apply modifications to the "background1" image
    res = img_background.copy()
    for face in faces_background:
        res = swapper.get(res, face, source_face, paste_back=True)

    # Convert from BGR to RGB
    res_rgb = cv2.cvtColor(res, cv2.COLOR_BGR2RGB)

    return res_rgb



with gr.Blocks() as blocks:
    gr.Markdown("### Capture Image Using WebCam or Upload") 

    with gr.Row():
        with gr.Column():
            image_or_file_opt = gr.Radio(["webcam", "file"], value="webcam",
                                         label="How would you like to upload your image?")
            image_in_video = gr.Image(source="webcam", type="filepath")
            image_in_img = gr.Image(source="upload", visible=False, type="filepath")

            # Update visibility based on selection
            def toggle(choice):
                if choice == "webcam":
                    return gr.update(visible=True, value=None), gr.update(visible=False, value=None)
                else:
                    return gr.update(visible=False, value=None), gr.update(visible=True, value=None)

            image_or_file_opt.change(fn=toggle, inputs=[image_or_file_opt],
                                     outputs=[image_in_video, image_in_img], queue=False, show_progress=False)
        with gr.Column():
            image_out = gr.Image()

    run_btn = gr.Button("Run")
    run_btn.click(fn=predict, inputs=[image_in_img, image_in_video], outputs=[image_out])
    gr.Examples(fn=predict, examples=[], inputs=[image_in_img, image_in_video], outputs=[image_out])

blocks.queue()
blocks.launch(debug=True)