How to use the dlib.get_frontal_face_detector function in dlib

To help you get started, we’ve selected a few dlib examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github charlielito / snapchat-filters-opencv / faceswap / Webcam_face_swapping.py View on Github external
import time
import dlib
from utils import applyAffineTransform, rectContains, calculateDelaunayTriangles, warpTriangle, face_swap3

if __name__ == '__main__' :

    # Make sure OpenCV is version 3.0 or above
    (major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.')

    if int(major_ver) < 3 :
        print >>sys.stderr, 'ERROR: Script needs OpenCV 3.0 or higher'
        sys.exit(1)

    print("[INFO] loading facial landmark predictor...")
    model = "shape_predictor_68_face_landmarks.dat"
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor(model)

    # Read images will swap image1 into image2
    filename1 = 'ted_cruz.jpg'
    filename1 = 'brad.jpg'
    #filename1 = 'hillary_clinton.jpg'

    img1 = cv2.imread(filename1);
    gray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
    # detect faces in the grayscale frame
    rects1 = detector(gray1, 0)
    shape1 = predictor(gray1, rects1[0])
    points1 = face_utils.shape_to_np(shape1) #type is a array of arrays (list of lists)
    #need to convert to a list of tuples
    points1 = list(map(tuple, points1))
github vaaaaanquish-xx / dlib_detection_python_script / detector_dlib.py View on Github external
def facedetector_dlib(img, image_path):
    try:
        # dlib標準の顔検出 frontal_face_detector クラス
        # detector = dlib.simple_object_detector(svm)
        detector = dlib.get_frontal_face_detector()

        # RGB変換 (opencv形式からskimage形式に変換)
        # 公式のデモだとskimage使ってるのでそちらが良いかも
        img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

        # frontal_face_detectorクラスは矩形, スコア, サブ検出器の結果を返す
        # 引数のupsample_numを設定すると画像の見る枚数が増えるため精度が向上するがその分探索時間とメモリを要する
        # dets = detector(img_rgb, 0)
        dets, scores, idx = detector.run(img_rgb, 0)

        # 矩形の色
        color = (0, 0, 255)
        s = ''
        if len(dets) > 0:
            # 顔画像ありと判断された場合
            for i, rect in enumerate(dets):
github jinfagang / alfred / alfred / modules / vision / face_extractor.py View on Github external
def __init__(self):
        self.detector = dlib.get_frontal_face_detector()
        # self.predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
github joseph-zhong / LipReading / src / utils / data / face.py View on Github external
def _getSharedDetector():
  global _detector
  if _detector is None:
    _detector = dlib.get_frontal_face_detector()
  return _detector
github Shade5 / FaceFitting / get_landmarks.py View on Github external
import cv2
import dlib
import numpy as np
from MorphabelModel import MorphabelModel
import mesh

im = cv2.imread('data/female.jpg')
gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
h, w, c = im.shape

detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor("models/shape_predictor_68_face_landmarks.dat")

rects = detector(gray, 1)
shape = predictor(gray, rects[0])
rects = [(rects[0].tl_corner().x, rects[0].tl_corner().y), (rects[0].br_corner().x, rects[0].br_corner().y)]
landmarks = np.zeros((68, 2))

for i, p in enumerate(shape.parts()):
    landmarks[i] = [p.x, p.y]
    im = cv2.circle(im, (p.x, p.y), radius=3, color=(0, 0, 255), thickness=5)

bfm = MorphabelModel('models/BFM.mat')
x = mesh.transform.from_image(landmarks, h, w)
X_ind = bfm.kpt_ind
github guoqiangqi / PFLD / euler_angles.py View on Github external
#These are the camera matrix values estimated on my webcam with
        # the calibration code (see: src/calibration):
        #camera_matrix = np.float32([[602.10618226,          0.0, 320.27333589],
                                   #[         0.0, 603.55869786,  229.7537026],
                                   #[         0.0,          0.0,          1.0] ])

        #Distortion coefficients
        self.camera_distortion = np.float32([0.0, 0.0, 0.0, 0.0, 0.0])
        # self.camera_distortion = np.float32([7.0834633684407095e-002, 6.9140193737175351e-002, 0.0, 0.0, -1.3073460323689292e+000])
        #Distortion coefficients estimated by calibration in my webcam
        #camera_distortion = np.float32([ 0.06232237, -0.41559805,  0.00125389, -0.00402566,  0.04879263])

        if(DEBUG==True): print("[DEEPGAZE] PnpHeadPoseEstimator: estimated camera matrix: \n" + str(self.camera_matrix) + "\n")

        #Declaring the dlib shape predictor object
        self._detector = dlib.get_frontal_face_detector()
        self._shape_predictor = dlib.shape_predictor(dlib_shape_predictor_file_path)
        # self.landmarks = []
github singnet / face-services / services / face_detect_server.py View on Github external
def get_detector(algorithm):
    cnn_face_detector_path = "models/mmod_human_face_detector.dat"
    cascade_path = cv2.data.haarcascades + "haarcascade_frontalface_default.xml"

    face_detector = None
    if algorithm == 'haar_cascade':
        face_detector = cv2.CascadeClassifier(cascade_path)
    elif algorithm == 'dlib_hog':
        face_detector = dlib.get_frontal_face_detector()
    elif algorithm == 'dlib_cnn':
        face_detector = dlib.cnn_face_detection_model_v1(cnn_face_detector_path)
    return face_detector
github XiuweiHe / EmotionClassifier / src / image_emotion_rec.py View on Github external
faces = []
    if method =='cv':
        detector = cv2.CascadeClassifier(detect_model_path)
        dets = detector.detectMultiScale(image,scaleFactor=1.1,minNeighbors= 5,minSize=(48,48))
        for face_coordinates in dets:
            x, y, w, h = face_coordinates
            x1,x2, y1, y2 = x - offset, x + w + offset, y - offset, y + h + offset
            face = image[y1:y2, x1: x2]
            try:
                faces.append(cv2.resize(face,face_size))
            except:
                continue
        return faces, dets
    elif method =='dlib':
        detector = dlib.get_frontal_face_detector()
        dets, _ = detector(image, 1), []
        rect = []
        for _, d in enumerate(dets):
                left, right, top, bottom = d.left() - offset, d.right() + offset, d.top() - offset, d.bottom() + offset
                face = image[top:bottom, left:right]
                try:
                    faces.append(cv2.resize(face,face_size))
                    rect.append((d.left(),d.top(),d.right() - d.left(),d.bottom() - d.top()))
                except:
                    continue
        return faces, rect
def draw_bounding_box(face_coordinates, image_array, color):