Spaces:
Running
Running
File size: 2,839 Bytes
d1ffd11 1b9d9ae d1ffd11 9d84828 d1ffd11 5990ce9 7702267 5990ce9 270b511 5990ce9 7702267 4225814 7702267 4225814 7702267 4225814 7702267 4225814 7702267 5990ce9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 |
import gradio as gr
import numpy as np
import cv2
import numpy as np
import os
import glob
import cv2
import matplotlib.pyplot as plt
import insightface
from insightface.app import FaceAnalysis
from insightface.data import get_image as ins_get_image
# Load your trained model
#model = tf.keras.models.load_model('path_to_your_model.h5')
def predict_gender(image):
# Convert image to format expected by your model & preprocess
img = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
img = cv2.resize(img, (224, 224)) # Example size
img = img / 255.0 # Normalizing
img = np.expand_dims(img, axis=0)
prediction = model.predict(img)
# Assuming binary classification with a single output neuron
return "Male" if prediction[0] < 0.5 else "Female"
def predict(image_in_video, image_in_img):
if image_in_video == None and image_in_img == None:
raise gr.Error("Please capture an image using the webcam or upload an image.")
image = image_in_video or image_in_img
return image
app = FaceAnalysis(name='buffalo_l')
app.prepare(ctx_id=0, det_size=(640, 640))
swapper = insightface.model_zoo.get_model('inswapper_128.onnx', download='FALSE', download_zip= 'FALSE')
img = ins_get_image('t1')
faces = app.get(img)
source_face = faces[0]
bbox = source_face['bbox']
bbox = [int(b) for b in bbox]
source_face = faces[0]
bbox = source_face['bbox']
bbox = [int(b) for b in bbox]
res = img.copy()
for face in faces:
res = swapper.get(res, face, source_face, paste_back=True)
with gr.Blocks() as blocks:
gr.Markdown("### Capture Image Using WebCam or Upload")
with gr.Row():
with gr.Column():
image_or_file_opt = gr.Radio(["webcam", "file"], value="webcam",
label="How would you like to upload your image?")
image_in_video = gr.Image(source="webcam", type="filepath")
image_in_img = gr.Image(source="upload", visible=False, type="filepath")
# Update visibility based on selection
def toggle(choice):
if choice == "webcam":
return gr.update(visible=True, value=None), gr.update(visible=False, value=None)
else:
return gr.update(visible=False, value=None), gr.update(visible=True, value=None)
image_or_file_opt.change(fn=toggle, inputs=[image_or_file_opt],
outputs=[image_in_video, image_in_img], queue=False, show_progress=False)
with gr.Column():
image_out = gr.Image()
run_btn = gr.Button("Run")
run_btn.click(fn=predict, inputs=[image_in_img, image_in_video], outputs=[image_out])
gr.Examples(fn=predict, examples=[], inputs=[image_in_img, image_in_video], outputs=[image_out])
blocks.queue()
blocks.launch() |