File size: 3,331 Bytes
fd8702d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
from ultralyticsplus import YOLO
from PIL import Image
import numpy as np
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.layers import (
    Convolution2D,
    LocallyConnected2D,
    MaxPooling2D,
    Flatten,
    Dense,
    Dropout,
)
import os
import zipfile
import gdown
import tensorflow as tf


def load_detector():
  # load model
  model = YOLO('https://github.com/akanametov/yolov8-face/releases/download/v0.0.0/yolov8n-face.pt')

  # set model parameters
  model.overrides['conf'] = 0.25  # NMS confidence threshold
  model.overrides['iou'] = 0.45  # NMS IoU threshold
  model.overrides['agnostic_nms'] = False  # NMS class-agnostic
  model.overrides['max_det'] = 50  # maximum number of detections per image
  return model

def extract_faces(model, image):
  # perform inference
  results = model.predict(image)
  ids = np.array(results[0].boxes.xyxy).astype(np.int32)
  img = Image.open(image)
  crops = []
  for id in ids:
    crops.append(Image.fromarray(np.array(img)[id[1] : id[3], id[0]: id[2]]))
  return crops

def load_model(
    url="https://github.com/swghosh/DeepFace/releases/download/weights-vggface2-2d-aligned/VGGFace2_DeepFace_weights_val-0.9034.h5.zip",
):
    base_model = Sequential()
    base_model.add(
        Convolution2D(32, (11, 11), activation="relu", name="C1", input_shape=(152, 152, 3))
    )
    base_model.add(MaxPooling2D(pool_size=3, strides=2, padding="same", name="M2"))
    base_model.add(Convolution2D(16, (9, 9), activation="relu", name="C3"))
    base_model.add(LocallyConnected2D(16, (9, 9), activation="relu", name="L4"))
    base_model.add(LocallyConnected2D(16, (7, 7), strides=2, activation="relu", name="L5"))
    base_model.add(LocallyConnected2D(16, (5, 5), activation="relu", name="L6"))
    base_model.add(Flatten(name="F0"))
    base_model.add(Dense(4096, activation="relu", name="F7"))
    base_model.add(Dropout(rate=0.5, name="D0"))
    base_model.add(Dense(8631, activation="softmax", name="F8"))

    # ---------------------------------

    home = os.getcwd()

    if os.path.isfile(home + "/VGGFace2_DeepFace_weights_val-0.9034.h5") != True:
        print("VGGFace2_DeepFace_weights_val-0.9034.h5 will be downloaded...")

        output = home + "/VGGFace2_DeepFace_weights_val-0.9034.h5.zip"

        gdown.download(url, output, quiet=False)

        # unzip VGGFace2_DeepFace_weights_val-0.9034.h5.zip
        with zipfile.ZipFile(output, "r") as zip_ref:
            zip_ref.extractall(home)

    base_model.load_weights(home + "/VGGFace2_DeepFace_weights_val-0.9034.h5")

    # drop F8 and D0. F7 is the representation layer.
    deepface_model = Model(inputs=base_model.layers[0].input, outputs=base_model.layers[-3].output)

    return deepface_model

def findCosineDistance(source_representation, test_representation):
    a = np.matmul(np.transpose(source_representation), test_representation)
    b = np.sum(np.multiply(source_representation, source_representation))
    c = np.sum(np.multiply(test_representation, test_representation))
    return 1 - (a / (np.sqrt(b) * np.sqrt(c)))

def get_embeddings(model, imgs):
  embeddings = []
  for img in imgs:
    img = np.expand_dims(np.array(img.resize((152,152))), axis = 0)
    embedding = model.predict(img, verbose=0)[0]
    embeddings.append(embedding)
  return embeddings