diff options
| author | Jules Laplace <julescarbon@gmail.com> | 2018-12-15 16:41:07 +0100 |
|---|---|---|
| committer | Jules Laplace <julescarbon@gmail.com> | 2018-12-15 16:41:07 +0100 |
| commit | c5b02ffab8d388e8a2925e51736b902a48a95e71 (patch) | |
| tree | 694ede9e97c667ca4fdda8ccccad3676bccd3479 /megapixels/app/processors | |
| parent | 485cf0e4665c660d4e5e1fba00a95bc8036809c6 (diff) | |
| parent | 1690cfb4cc7b7277afca4016c295927cc4f7fafb (diff) | |
Merge branch 'master' of github.com:adamhrv/megapixels_dev
Diffstat (limited to 'megapixels/app/processors')
| -rw-r--r-- | megapixels/app/processors/face_detector.py | 48 | ||||
| -rw-r--r-- | megapixels/app/processors/face_pose.py | 148 | ||||
| -rw-r--r-- | megapixels/app/processors/face_recognition.py | 29 |
3 files changed, 115 insertions, 110 deletions
diff --git a/megapixels/app/processors/face_detector.py b/megapixels/app/processors/face_detector.py index 593e9feb..3a90c557 100644 --- a/megapixels/app/processors/face_detector.py +++ b/megapixels/app/processors/face_detector.py @@ -24,15 +24,15 @@ class DetectorMTCNN: from mtcnn.mtcnn import MTCNN self.detector = MTCNN() - def detect(self, im, opt_size=(400,400), opt_conf_thresh=None, opt_pyramids=None, opt_largest=False): + def detect(self, im, size=(400,400), conf_thresh=None, pyramids=None, largest=False): '''Detects face using MTCNN and returns (list) of BBox :param im: (numpy.ndarray) image :returns list of BBox ''' bboxes = [] - #conf_thresh = self.conf_thresh if opt_conf_thresh is None else opt_conf_thresh - #pyramids = self.pyramids if opt_pyramids is None else opt_pyramids - dnn_size = self.dnn_size if opt_size is None else opt_size + #conf_thresh = self.conf_thresh if conf_thresh is None else conf_thresh + #pyramids = self.pyramids if pyramids is None else pyramids + dnn_size = self.dnn_size if size is None else size im = im_utils.resize(im, width=dnn_size[0], height=dnn_size[1]) dim = im.shape[:2][::-1] @@ -43,7 +43,7 @@ class DetectorMTCNN: bbox = BBox.from_xywh_dim(*rect, dim) bboxes.append(bbox) - if opt_largest and len(bboxes) > 1: + if largest and len(bboxes) > 1: # only keep largest bboxes.sort(key=operator.attrgetter('area'), reverse=True) bboxes = [bboxes[0]] @@ -70,34 +70,33 @@ class DetectorDLIBCNN: pyramids = 0 conf_thresh = 0.85 - def __init__(self, opt_gpu=0): + def __init__(self, gpu=0): import dlib self.log = logger_utils.Logger.getLogger() cuda_visible_devices = os.getenv('CUDA_VISIBLE_DEVICES', '') - os.environ['CUDA_VISIBLE_DEVICES'] = str(opt_gpu) + os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu) self.log.info('load model: {}'.format(cfg.DIR_MODELS_DLIB_CNN)) self.detector = dlib.cnn_face_detection_model_v1(cfg.DIR_MODELS_DLIB_CNN) os.environ['CUDA_VISIBLE_DEVICES'] = cuda_visible_devices # reset - def detect(self, im, opt_size=None, opt_conf_thresh=None, opt_pyramids=None, opt_largest=False): + def detect(self, im, size=None, conf_thresh=None, pyramids=None, largest=False): bboxes = [] - conf_thresh = self.conf_thresh if opt_conf_thresh is None else opt_conf_thresh - pyramids = self.pyramids if opt_pyramids is None else opt_pyramids - dnn_size = self.dnn_size if opt_size is None else opt_size + conf_thresh = self.conf_thresh if conf_thresh is None else conf_thresh + pyramids = self.pyramids if pyramids is None else pyramids + dnn_size = self.dnn_size if size is None else size # resize image im = im_utils.resize(im, width=dnn_size[0], height=dnn_size[1]) dim = im.shape[:2][::-1] im = im_utils.bgr2rgb(im) # convert to RGB for dlib # run detector - mmod_rects = self.detector(im, opt_pyramids) + mmod_rects = self.detector(im, pyramids) # sort results for mmod_rect in mmod_rects: - self.log.debug('conf: {}, this: {}'.format(conf_thresh, mmod_rect.confidence)) if mmod_rect.confidence > conf_thresh: bbox = BBox.from_dlib_dim(mmod_rect.rect, dim) bboxes.append(bbox) - if opt_largest and len(bboxes) > 1: + if largest and len(bboxes) > 1: # only keep largest bboxes.sort(key=operator.attrgetter('area'), reverse=True) bboxes = [bboxes[0]] @@ -116,25 +115,24 @@ class DetectorDLIBHOG: self.log = logger_utils.Logger.getLogger() self.detector = dlib.get_frontal_face_detector() - def detect(self, im, opt_size=None, opt_conf_thresh=None, opt_pyramids=0, opt_largest=False): - conf_thresh = self.conf_thresh if opt_conf_thresh is None else opt_conf_thresh - dnn_size = self.size if opt_size is None else opt_size - pyramids = self.pyramids if opt_pyramids is None else opt_pyramids + def detect(self, im, size=None, conf_thresh=None, pyramids=0, largest=False): + conf_thresh = self.conf_thresh if conf_thresh is None else conf_thresh + dnn_size = self.size if size is None else size + pyramids = self.pyramids if pyramids is None else pyramids - im = im_utils.resize(im, width=opt_size[0], height=opt_size[1]) + im = im_utils.resize(im, width=dnn_size[0], height=dnn_size[1]) dim = im.shape[:2][::-1] im = im_utils.bgr2rgb(im) # ? hog_results = self.detector.run(im, pyramids) bboxes = [] if len(hog_results[0]) > 0: - self.log.debug(hog_results) for rect, score, direction in zip(*hog_results): if score > conf_thresh: bbox = BBox.from_dlib_dim(rect, dim) bboxes.append(bbox) - if opt_largest and len(bboxes) > 1: + if largest and len(bboxes) > 1: # only keep largest bboxes.sort(key=operator.attrgetter('area'), reverse=True) bboxes = [bboxes[0]] @@ -157,10 +155,10 @@ class DetectorCVDNN: self.net.setPreferableBackend(cv.dnn.DNN_BACKEND_OPENCV) self.net.setPreferableTarget(cv.dnn.DNN_TARGET_CPU) - def detect(self, im, opt_size=None, opt_conf_thresh=None, opt_largest=False, opt_pyramids=None): + def detect(self, im, size=None, conf_thresh=None, largest=False, pyramids=None): """Detects faces and returns (list) of (BBox)""" - conf_thresh = self.conf_thresh if opt_conf_thresh is None else opt_conf_thresh - dnn_size = self.size if opt_size is None else opt_size + conf_thresh = self.conf_thresh if conf_thresh is None else conf_thresh + dnn_size = self.size if size is None else size im = cv.resize(im, dnn_size) blob = cv.dnn.blobFromImage(im, self.dnn_scale, dnn_size, self.dnn_mean) self.net.setInput(blob) @@ -173,7 +171,7 @@ class DetectorCVDNN: rect_norm = net_outputs[0, 0, i, 3:7] bboxes.append(BBox(*rect_norm)) - if opt_largest and len(bboxes) > 1: + if largest and len(bboxes) > 1: # only keep largest bboxes.sort(key=operator.attrgetter('area'), reverse=True) bboxes = [bboxes[0]] diff --git a/megapixels/app/processors/face_pose.py b/megapixels/app/processors/face_pose.py index 67ac685d..f2548b32 100644 --- a/megapixels/app/processors/face_pose.py +++ b/megapixels/app/processors/face_pose.py @@ -22,89 +22,83 @@ class FacePoseDLIB: def __init__(self): pass - def pose(self, landmarks, dim): - '''Calculates pose - ''' - degrees = compute_pose_degrees(landmarks, dim) - return degrees + def pose(self, landmarks, dim, project_points=False): + # computes pose using 6 / 68 points from dlib face landmarks + # based on learnopencv.com and + # https://github.com/jerryhouuu/Face-Yaw-Roll-Pitch-from-Pose-Estimation-using-OpenCV/ + # NB: not as accurate as MTCNN, see @jerryhouuu for ideas + + pose_points_idx = (30, 8, 36, 45, 48, 54) + axis = np.float32([[500,0,0], [0,500,0], [0,0,500]]) + + # 3D model points. + model_points = np.array([ + (0.0, 0.0, 0.0), # Nose tip + (0.0, -330.0, -65.0), # Chin + (-225.0, 170.0, -135.0), # Left eye left corner + (225.0, 170.0, -135.0), # Right eye right corne + (-150.0, -150.0, -125.0), # Left Mouth corner + (150.0, -150.0, -125.0) # Right mouth corner + ]) + + # Assuming no lens distortion + dist_coeffs = np.zeros((4,1)) -# ----------------------------------------------------------- -# utilities -# ----------------------------------------------------------- + # find 6 pose points + pose_points = [] + for j, idx in enumerate(pose_points_idx): + pt = landmarks[idx] + pose_points.append((pt[0], pt[1])) + pose_points = np.array(pose_points, dtype='double') # convert to double + + # create camera matrix + focal_length = dim[0] + center = (dim[0]/2, dim[1]/2) + cam_mat = np.array( + [[focal_length, 0, center[0]], + [0, focal_length, center[1]], + [0, 1, 1]], dtype = "double") + + # solve PnP for rotation and translation + (success, rot_vec, tran_vec) = cv.solvePnP(model_points, pose_points, + cam_mat, dist_coeffs, + flags=cv.SOLVEPNP_ITERATIVE) -def compute_pose_degrees(landmarks, dim): - # computes pose using 6 / 68 points from dlib face landmarks - # based on learnopencv.com and - # https://github.com/jerryhouuu/Face-Yaw-Roll-Pitch-from-Pose-Estimation-using-OpenCV/ - # NB: not as accurate as MTCNN, see @jerryhouuu for ideas - - pose_points_idx = (30, 8, 36, 45, 48, 54) - axis = np.float32([[500,0,0], [0,500,0], [0,0,500]]) - - # 3D model points. - model_points = np.array([ - (0.0, 0.0, 0.0), # Nose tip - (0.0, -330.0, -65.0), # Chin - (-225.0, 170.0, -135.0), # Left eye left corner - (225.0, 170.0, -135.0), # Right eye right corne - (-150.0, -150.0, -125.0), # Left Mouth corner - (150.0, -150.0, -125.0) # Right mouth corner - ]) - - # Assuming no lens distortion - dist_coeffs = np.zeros((4,1)) + result = {} - # find 6 pose points - pose_points = [] - for j, idx in enumerate(pose_points_idx): - pt = landmarks[idx] - pose_points.append((pt[0], pt[1])) - pose_points = np.array(pose_points, dtype='double') # convert to double - - # create camera matrix - focal_length = dim[0] - center = (dim[0]/2, dim[1]/2) - cam_mat = np.array( - [[focal_length, 0, center[0]], - [0, focal_length, center[1]], - [0, 1, 1]], dtype = "double") - - # solve PnP for rotation and translation - (success, rot_vec, tran_vec) = cv.solvePnP(model_points, pose_points, - cam_mat, dist_coeffs, - flags=cv.SOLVEPNP_ITERATIVE) + # project points + if project_points: + pts_im, jac = cv.projectPoints(axis, rot_vec, tran_vec, cam_mat, dist_coeffs) + pts_model, jac2 = cv.projectPoints(model_points, rot_vec, tran_vec, cam_mat, dist_coeffs) + result['points_model'] = pts_model + result['points_image'] = pts_im + result['point_nose'] = tuple(landmarks[pose_points_idx[0]]) - # project points - #pts_im, jac = cv.projectPoints(axis, rot_vec, tran_vec, cam_mat, dist_coeffs) - #pts_model, jac2 = cv.projectPoints(model_points, rot_vec, tran_vec, cam_mat, dist_coeffs) - rvec_matrix = cv.Rodrigues(rot_vec)[0] - - # convert to degrees - proj_matrix = np.hstack((rvec_matrix, tran_vec)) - eulerAngles = cv.decomposeProjectionMatrix(proj_matrix)[6] - pitch, yaw, roll = [math.radians(x) for x in eulerAngles] - pitch = math.degrees(math.asin(math.sin(pitch))) - roll = -math.degrees(math.asin(math.sin(roll))) - yaw = math.degrees(math.asin(math.sin(yaw))) - degrees = {'pitch': pitch, 'roll': roll, 'yaw': yaw} - - # add nose point - #pt_nose = tuple(landmarks[pose_points_idx[0]]) - return degrees - #return pts_im, pts_model, degrees, pt_nose + rvec_matrix = cv.Rodrigues(rot_vec)[0] + + # convert to degrees + proj_matrix = np.hstack((rvec_matrix, tran_vec)) + eulerAngles = cv.decomposeProjectionMatrix(proj_matrix)[6] + pitch, yaw, roll = [math.radians(x) for x in eulerAngles] + pitch = math.degrees(math.asin(math.sin(pitch))) + roll = -math.degrees(math.asin(math.sin(roll))) + yaw = math.degrees(math.asin(math.sin(yaw))) + degrees = {'pitch': pitch, 'roll': roll, 'yaw': yaw} + result['degrees'] = degrees + return result -def draw_pose(im, pts_im, pts_model, pt_nose): - cv.line(im, pt_nose, tuple(pts_im[1].ravel()), (0,255,0), 3) #GREEN - cv.line(im, pt_nose, tuple(pts_im[0].ravel()), (255,0,), 3) #BLUE - cv.line(im, pt_nose, tuple(pts_im[2].ravel()), (0,0,255), 3) #RED - return im + def draw_pose(self, im, pts_im, pts_model, pt_nose): + cv.line(im, pt_nose, tuple(pts_im[1].ravel()), (0,255,0), 3) #GREEN + cv.line(im, pt_nose, tuple(pts_im[0].ravel()), (255,0,), 3) #BLUE + cv.line(im, pt_nose, tuple(pts_im[2].ravel()), (0,0,255), 3) #RED -def draw_degrees(im, degrees, color=(0,255,0)): - for i, item in enumerate(degrees.items()): - k, v = item - t = '{}: {:.2f}'.format(k, v) - origin = (10, 30 + (25 * i)) - cv.putText(im, t, origin, cv.FONT_HERSHEY_SIMPLEX, 0.5, color, thickness=2, lineType=2)
\ No newline at end of file + + def draw_degrees(self, im, degrees, color=(0,255,0)): + for i, item in enumerate(degrees.items()): + k, v = item + t = '{}: {:.2f}'.format(k, v) + origin = (10, 30 + (25 * i)) + cv.putText(im, t, origin, cv.FONT_HERSHEY_SIMPLEX, 0.5, color, thickness=2, lineType=2)
\ No newline at end of file diff --git a/megapixels/app/processors/face_recognition.py b/megapixels/app/processors/face_recognition.py index 9c3a301d..e0b9f752 100644 --- a/megapixels/app/processors/face_recognition.py +++ b/megapixels/app/processors/face_recognition.py @@ -17,25 +17,38 @@ class RecognitionDLIB: # https://github.com/davisking/dlib/blob/master/python_examples/face_recognition.py # facerec.compute_face_descriptor(img, shape, 100, 0.25) - def __init__(self, opt_gpu=0): + def __init__(self, gpu=0): self.log = logger_utils.Logger.getLogger() - if opt_gpu > 0: + + if gpu > -1: cuda_visible_devices = os.getenv('CUDA_VISIBLE_DEVICES', '') - os.environ['CUDA_VISIBLE_DEVICES'] = str(opt_gpu) - self.predictor = dlib.shape_predictor(cfg.DIR_MODELS_DLIB_5PT) - self.facerec = dlib.face_recognition_model_v1(cfg.DIR_MODELS_DLIB_FACEREC_RESNET) + os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu) + + self.predictor = dlib.shape_predictor(cfg.DIR_MODELS_DLIB_5PT) + self.facerec = dlib.face_recognition_model_v1(cfg.DIR_MODELS_DLIB_FACEREC_RESNET) + + if gpu > -1: os.environ['CUDA_VISIBLE_DEVICES'] = cuda_visible_devices # reset GPU env + def vec(self, im, bbox, width=100, - jitters=cfg.DLIB_FACEREC_JITTERS, padding=cfg.DLIB_FACEREC_PADDING): + jitters=cfg.DLIB_FACEREC_JITTERS, padding=cfg.DLIB_FACEREC_PADDING): # Converts image and bbox into 128d vector # scale the image so the face is always 100x100 pixels + #self.log.debug('compute scale') scale = width / bbox.width - im = cv.resize(im, (scale, scale), interploation=cv.INTER_LANCZOS4) + #im = cv.resize(im, (scale, scale), cv.INTER_LANCZOS4) + #self.log.debug('resize') + cv.resize(im, None, fx=scale, fy=scale, interpolation=cv.INTER_LANCZOS4) + #self.log.debug('to dlib') bbox_dlib = bbox.to_dlib() + #self.log.debug('precitor') face_shape = self.predictor(im, bbox_dlib) - vec = self.facerec.compute_face_descriptor(im, face_shape, jitters, padding) + # vec = self.facerec.compute_face_descriptor(im, face_shape, jitters, padding) + #self.log.debug('vec') + vec = self.facerec.compute_face_descriptor(im, face_shape, jitters) + #vec = self.facerec.compute_face_descriptor(im, face_shape) return vec |
