Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def __init__(self, filename):
"""
Constructor for the face object. Initializes the keypoints, sets up some
data structures that keep track of the locations of the keypoints to help
later with interpolation, and computes barycentric coordinates
Parameters
----------
filename: string
Path to image file with at least one face in it
"""
self.img = dlib.load_rgb_image(filename)
self.getFaceKeypts()
self.tri = Delaunay(self.XKey)
X, Y = np.meshgrid(np.arange(self.img.shape[1]), np.arange(self.img.shape[0]))
XGrid = np.array([X.flatten(), Y.flatten()], dtype=np.float).T
allidxs = self.tri.find_simplex(XGrid)
self.idxs = allidxs[allidxs > -1] # Indices into the simplices
XGrid = XGrid[allidxs > -1, :]
imgidx = np.arange(self.img.shape[0]*self.img.shape[1])
imgidx = imgidx[allidxs > -1]
self.imgidxi, self.imgidxj = np.unravel_index(imgidx, (self.img.shape[0], self.img.shape[1]))
colors = self.img[self.imgidxi, self.imgidxj, :]
self.colors = colors/255.0
self.pixx = np.arange(np.min(self.imgidxj), np.max(self.imgidxj)+1)
self.pixy = np.arange(np.min(self.imgidxi), np.max(self.imgidxi)+1)
self.grididxx, self.grididxy = np.meshgrid(self.pixx, self.pixy)
self.XGrid = XGrid
prewhitened = facenet.prewhiten(img)
img_list[0] = prewhitened
# Fixed normalization
controlArray = np.expand_dims(np.zeros(1,dtype=np.int32),1)
controlArray += np.expand_dims(np.ones(1,dtype=np.int32),1) * facenet.FIXED_STANDARDIZATION
# Run forward pass to calculate embeddings
feed_dict = {self.images_placeholder: img_list, self.phase_train_placeholder: False, self.control_placeholder:controlArray}
img_encoding = sess.run(self.embeddings, feed_dict=feed_dict)
if n_jitters:
imgEncodings = img_encoding
img = dlib.load_rgb_image(imgPath)
augmented_images = dlib.jitter_image(img, num_jitters=n_jitters)
for augmented_image in augmented_images:
prewhitened = facenet.prewhiten(augmented_image)
img_list[0] = prewhitened
# Run forward pass to calculate embeddings
feed_dict = {self.images_placeholder: img_list, self.phase_train_placeholder: False, self.control_placeholder:controlArray}
img_encoding = sess.run(self.embeddings, feed_dict=feed_dict)
imgEncodings = np.concatenate((imgEncodings,img_encoding),axis=0)
return np.average(imgEncodings,axis=0)
return img_encoding[0]
"""
num_images = len(images.keys())
if verbose:
print(f"\nStarting face landmark detection...")
print(f"Processing {num_images} images.")
N = max(round(print_freq * num_images), 1)
# Look for face landmarks in each image
num_skips = 0
all_landmarks, all_faces = [], []
for n, (file, image) in enumerate(images.items()):
if verbose and n % N == 0:
print(f"({n + 1} / {num_images}): {file}")
# Try to detect a face in the image
imageForDlib = dlib.load_rgb_image(file) # Kludge for now
found_faces = detector(imageForDlib, 1)
# Only save landmarks when num_faces = 1
if len(found_faces) == 0 or len(found_faces) > max_faces:
num_skips += 1
continue
# Find landmarks, save to CSV
for num, face in enumerate(found_faces):
landmarks = predictor(imageForDlib, face)
if not landmarks:
continue
# Add this image to be averaged later
all_faces.append(image)
face_rec_model_path = sys.argv[2]
faces_folder_path = sys.argv[3]
# Load all the models we need: a detector to find the faces, a shape predictor
# to find face landmarks so we can precisely localize the face, and finally the
# face recognition model.
detector = dlib.get_frontal_face_detector()
sp = dlib.shape_predictor(predictor_path)
facerec = dlib.face_recognition_model_v1(face_rec_model_path)
win = dlib.image_window()
# Now process all the images
for f in glob.glob(os.path.join(faces_folder_path, "*.jpg")):
print("Processing file: {}".format(f))
img = dlib.load_rgb_image(f)
win.clear_overlay()
win.set_image(img)
# Ask the detector to find the bounding boxes of each face. The 1 in the
# second argument indicates that we should upsample the image 1 time. This
# will make everything bigger and allow us to detect more faces.
dets = detector(img, 1)
print("Number of faces detected: {}".format(len(dets)))
# Now process each face we found.
for k, d in enumerate(dets):
print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(
k, d.left(), d.top(), d.right(), d.bottom()))
# Get the landmarks/parts for the face in box d.
shape = sp(img, d)