Spaces:
Runtime error
Runtime error
import face_recognition | |
import cv2 | |
import numpy as np | |
import os | |
import pickle | |
# This is a demo of running face recognition on live video from your webcam. It's a little more complicated than the | |
# other example, but it includes some basic performance tweaks to make things run a lot faster: | |
# 1. Process each video frame at 1/4 resolution (though still display it at full resolution) | |
# 2. Only detect faces in every other frame of video. | |
# PLEASE NOTE: This example requires OpenCV (the `cv2` library) to be installed only to read from your webcam. | |
# OpenCV is *not* required to use the face_recognition library. It's only required if you want to run this | |
# specific demo. If you have trouble installing it, try any of the other demos that don't require it instead. | |
# Get a reference to webcam #0 (the default one) | |
def get_emb(file_name): | |
if os.path.exists(file_name): | |
file_ = face_recognition.load_image_file(file_name) | |
emb = face_recognition.face_encodings(file_)[0] | |
np.save(file_name.replace(".jpg",'.npy'), emb) | |
else: | |
emb = np.load(file_name) | |
return emb | |
def input_an_image(image_file, person_name, ori_img_dir='images/ori_images',img_emb_dir='images/img_emb'): | |
image_file_dir=os.path.join(ori_img_dir,person_name) | |
emb_file_dir=os.path.join(img_emb_dir,person_name) | |
if not os.path.exists(image_file_dir): | |
os.mkdir(image_file_dir) | |
os.mkdir(emb_file_dir) | |
file_ind=0 | |
else: | |
file_ind=len(os.listdir(image_file_dir)) | |
file_ = face_recognition.load_image_file(image_file) | |
emb = face_recognition.face_encodings(file_)[0] | |
emb_file=image_file.split('.')[0]+f'_{file_ind}.npy' | |
emb_file_out_path=os.path.join(emb_file_dir,emb_file) | |
np.save(emb_file_out_path, emb) | |
return emb | |
def init_load_embs(img_emb_dir='images/img_emb'): | |
persons=os.listdir(img_emb_dir) | |
i=0 | |
ind2person=dict() | |
for oneperson in persons: | |
oneperson_dir=os.path.join(img_emb_dir,oneperson) | |
oneperson_list=os.listdir(oneperson_dir) | |
for oneperson_j in oneperson_list: | |
emb_id=i | |
i+=1 | |
emb=np.load(os.path.join(oneperson_dir,oneperson_j)) | |
ind2person[emb_id]=dict(person=oneperson,emb=emb) | |
return ind2person | |
if __name__=="__main__": | |
ind2person=init_load_embs() | |
video_capture = cv2.VideoCapture(0) | |
emb=input_an_image('youpeng.jpg', "youpeng") | |
ind2person[len(list(ind2person.values()))]=dict(person="youpeng",emb=emb) | |
# img_emb_dir='images/img_emb' | |
# ori_img_dir='images/ori_images' | |
# if not os.path.exists(img_emb_dir): | |
# os.mkdir(img_emb_dir) | |
# if not os.path.exists(ori_img_dir): | |
# os.mkdir(ori_img_dir) | |
# # os.listdir() | |
# Load a sample picture and learn how to recognize it. | |
# file_list=["obama.jpg","biden.jpg","mengqi.jpg","xinyi.jpg","sixian.jpg","wang.jpg","chenmengqi.jpg",'yilin.jpg','youpeng.jpg','wangyibo.jpg'] | |
# Create arrays of known face encodings and their names | |
# known_face_encodings = [ | |
# obama_face_encoding, | |
# biden_face_encoding, | |
# me_face_encoding, | |
# wang_face_encoding | |
# ] | |
# known_face_names = [ | |
# "Barack Obama", | |
# "Joe Biden", | |
# "me", | |
# "wang" | |
# ] | |
known_face_encodings=[v['emb'] for k,v in ind2person.items()] | |
# known_face_encodings=[get_emb(f) for f in file_list] | |
# known_face_names=[st.replace('.jpg','')for st in file_list] | |
# Initialize some variables | |
face_locations = [] | |
face_encodings = [] | |
face_names = [] | |
process_this_frame = True | |
while True: | |
# Grab a single frame of video | |
ret, frame = video_capture.read() | |
# Only process every other frame of video to save time | |
if process_this_frame: | |
# Resize frame of video to 1/4 size for faster face recognition processing | |
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25) | |
# Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses) | |
rgb_small_frame = small_frame[:, :, ::-1] | |
# Find all the faces and face encodings in the current frame of video | |
face_locations = face_recognition.face_locations(rgb_small_frame, number_of_times_to_upsample=1)#, model="cnn") | |
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations) | |
face_names = [] | |
for face_encoding in face_encodings: | |
# See if the face is a match for the known face(s) | |
matches = face_recognition.compare_faces(known_face_encodings, face_encoding) | |
name = "Unknown" | |
# # If a match was found in known_face_encodings, just use the first one. | |
# if True in matches: | |
# first_match_index = matches.index(True) | |
# name = known_face_names[first_match_index] | |
# Or instead, use the known face with the smallest distance to the new face | |
face_distances = face_recognition.face_distance(known_face_encodings, face_encoding) | |
best_match_index = np.argmin(face_distances) | |
if matches[best_match_index]: | |
# name = known_face_names[best_match_index] | |
name = ind2person[best_match_index]['person'] | |
face_names.append(name) | |
process_this_frame = not process_this_frame | |
# Display the results | |
for (top, right, bottom, left), name in zip(face_locations, face_names): | |
# Scale back up face locations since the frame we detected in was scaled to 1/4 size | |
top *= 4 | |
right *= 4 | |
bottom *= 4 | |
left *= 4 | |
# Draw a box around the face | |
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2) | |
# Draw a label with a name below the face | |
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED) | |
font = cv2.FONT_HERSHEY_DUPLEX | |
cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1) | |
# Display the resulting image | |
cv2.imshow('Video', frame) | |
# Hit 'q' on the keyboard to quit! | |
if cv2.waitKey(1) & 0xFF == ord('q'): | |
break | |
# Release handle to the webcam | |
video_capture.release() | |
cv2.destroyAllWindows() |