from ultralyticsplus import YOLO from PIL import Image import numpy as np from tensorflow.keras.models import Model, Sequential from tensorflow.keras.layers import ( Convolution2D, LocallyConnected2D, MaxPooling2D, Flatten, Dense, Dropout, ) import os import zipfile import gdown import tensorflow as tf def load_detector(): # load model model = YOLO('https://github.com/akanametov/yolov8-face/releases/download/v0.0.0/yolov8n-face.pt') # set model parameters model.overrides['conf'] = 0.25 # NMS confidence threshold model.overrides['iou'] = 0.45 # NMS IoU threshold model.overrides['agnostic_nms'] = False # NMS class-agnostic model.overrides['max_det'] = 50 # maximum number of detections per image return model def extract_faces(model, image): # perform inference results = model.predict(image) ids = np.array(results[0].boxes.xyxy).astype(np.int32) img = Image.open(image) crops = [] for id in ids: crops.append(Image.fromarray(np.array(img)[id[1] : id[3], id[0]: id[2]])) return crops def load_model( url="https://github.com/swghosh/DeepFace/releases/download/weights-vggface2-2d-aligned/VGGFace2_DeepFace_weights_val-0.9034.h5.zip", ): base_model = Sequential() base_model.add( Convolution2D(32, (11, 11), activation="relu", name="C1", input_shape=(152, 152, 3)) ) base_model.add(MaxPooling2D(pool_size=3, strides=2, padding="same", name="M2")) base_model.add(Convolution2D(16, (9, 9), activation="relu", name="C3")) base_model.add(LocallyConnected2D(16, (9, 9), activation="relu", name="L4")) base_model.add(LocallyConnected2D(16, (7, 7), strides=2, activation="relu", name="L5")) base_model.add(LocallyConnected2D(16, (5, 5), activation="relu", name="L6")) base_model.add(Flatten(name="F0")) base_model.add(Dense(4096, activation="relu", name="F7")) base_model.add(Dropout(rate=0.5, name="D0")) base_model.add(Dense(8631, activation="softmax", name="F8")) # --------------------------------- home = os.getcwd() if os.path.isfile(home + "/VGGFace2_DeepFace_weights_val-0.9034.h5") != True: print("VGGFace2_DeepFace_weights_val-0.9034.h5 will be downloaded...") output = home + "/VGGFace2_DeepFace_weights_val-0.9034.h5.zip" gdown.download(url, output, quiet=False) # unzip VGGFace2_DeepFace_weights_val-0.9034.h5.zip with zipfile.ZipFile(output, "r") as zip_ref: zip_ref.extractall(home) base_model.load_weights(home + "/VGGFace2_DeepFace_weights_val-0.9034.h5") # drop F8 and D0. F7 is the representation layer. deepface_model = Model(inputs=base_model.layers[0].input, outputs=base_model.layers[-3].output) return deepface_model def findCosineDistance(source_representation, test_representation): a = np.matmul(np.transpose(source_representation), test_representation) b = np.sum(np.multiply(source_representation, source_representation)) c = np.sum(np.multiply(test_representation, test_representation)) return 1 - (a / (np.sqrt(b) * np.sqrt(c))) def get_embeddings(model, imgs): embeddings = [] for img in imgs: img = np.expand_dims(np.array(img.resize((152,152))), axis = 0) embedding = model.predict(img, verbose=0)[0] embeddings.append(embedding) return embeddings