summaryrefslogtreecommitdiff
path: root/megapixels/app/processors/face_landmarks.py
diff options
context:
space:
mode:
Diffstat (limited to 'megapixels/app/processors/face_landmarks.py')
-rw-r--r--megapixels/app/processors/face_landmarks.py194
1 files changed, 194 insertions, 0 deletions
diff --git a/megapixels/app/processors/face_landmarks.py b/megapixels/app/processors/face_landmarks.py
new file mode 100644
index 00000000..8086ba1e
--- /dev/null
+++ b/megapixels/app/processors/face_landmarks.py
@@ -0,0 +1,194 @@
+from os.path import join
+from pathlib import Path
+
+import cv2 as cv
+import numpy as np
+import imutils
+
+from app.utils import im_utils, logger_utils
+from app.models.bbox import BBox
+from app.settings import app_cfg as cfg
+from app.settings import types
+from app.models.bbox import BBox
+
+
+# ----------------------------------------------------------------------
+#
+# 2D landmarks: 5pt and 68pt
+#
+# ----------------------------------------------------------------------
+
+class Landmarks2D:
+
+ # Abstract class
+
+ def __init__(self):
+ self.log = logger_utils.Logger.getLogger()
+
+ def landmarks(self, im, bbox):
+ # override
+ self.log.warn('Define landmarks() function')
+ pass
+
+ def flatten(self, points):
+ '''Converts list of point-tupes into a flattened list for CSV
+ :param points: (list) of x,y points
+ :returns dict item for each point (eg {'x1':100, 'y1':200})
+ '''
+ points_formatted = {}
+ for idx, pt in enumerate(points, 1):
+ for j, d in enumerate('xy'):
+ points_formatted[f'{d}{idx}'] = pt[j]
+ return points_formatted
+
+ def normalize(self, points, dim):
+ return [np.array(p)/dim for p in points] # divides each point by w,h dim
+
+
+
+import face_alignment
+
+class FaceAlignment2D_68(Landmarks2D):
+
+ # https://github.com/1adrianb/face-alignment
+ # Estimates 2D facial landmarks
+
+ def __init__(self, gpu=0, flip_input=False):
+ t = face_alignment.LandmarksType._2D
+ device = f'cuda:{gpu}' if gpu > -1 else 'cpu'
+ self.fa = face_alignment.FaceAlignment(t, device=device, flip_input=flip_input)
+ super().__init__()
+ self.log.debug(f'{device}')
+ self.log.debug(f'{t}')
+
+ def landmarks(self, im):
+ '''Calculates the 2D facial landmarks
+ :param im: (numpy.ndarray) BGR image
+ :returns (list) of 68 (int) (tuples) as (x,y)
+ '''
+ # predict landmarks
+ points = self.fa.get_landmarks(im) # returns array of arrays of 68 2D pts/face
+ # convert to data type
+ points = [list(map(int, p)) for p in points[0]]
+ return points
+
+
+class Dlib2D(Landmarks2D):
+
+ def __init__(self, model):
+ super().__init__()
+ # init dlib
+ import dlib
+ self.predictor = dlib.shape_predictor(model)
+ self.log.info(f'loaded predictor model: {model}')
+
+ def landmarks(self, im, bbox):
+ # Draw high-confidence faces
+ dim_wh = im.shape[:2][::-1]
+ bbox = bbox.to_dlib()
+ im_gray = cv.cvtColor(im, cv.COLOR_BGR2GRAY)
+ points = [[p.x, p.y] for p in self.predictor(im_gray, bbox).parts()]
+ return points
+
+
+class Dlib2D_68(Dlib2D):
+
+ def __init__(self):
+ # Get 68-point landmarks using DLIB
+ super().__init__(cfg.DIR_MODELS_DLIB_68PT)
+
+
+class Dlib2D_5(Dlib2D):
+
+ def __init__(self):
+ # Get 5-point landmarks using DLIB
+ super().__init__(cfg.DIR_MODELS_DLIB_5PT)
+
+
+class MTCNN2D_5(Landmarks2D):
+
+ # Get 5-point landmarks using MTCNN
+ # https://github.com/ipazc/mtcnn
+ # pip install mtcnn
+
+ def __init__(self):
+ super().__init__()
+ self.log.warn('NB: MTCNN runs both face detector and landmark predictor together.')
+ self.log.warn(' this will use face with most similar ROI')
+ from mtcnn.mtcnn import MTCNN
+ self.detector = MTCNN()
+
+ def landmarks(self, im, bbox):
+ '''Detects face using MTCNN and returns (list) of BBox
+ :param im: (numpy.ndarray) image
+ :returns list of BBox
+ '''
+ results = []
+ dim_wh = im.shape[:2][::-1] # (w, h)
+
+ # run MTCNN to get bbox and landmarks
+ dets = self.detector.detect_faces(im)
+ keypoints = []
+ bboxes = []
+ #iterate detections and convert to BBox
+ for det in dets:
+ #rect = det['box']
+ points = det['keypoints']
+ # convert to normalized for contain-comparison
+ points_norm = [np.array(pt)/dim_wh for pname, pt in points.items()]
+ contains = False not in [bbox.contains(pn) for pn in points_norm]
+ if contains:
+ results.append(points) # append original points
+
+ return results
+
+
+# ----------------------------------------------------------------------
+#
+# 3D landmarks
+#
+# ----------------------------------------------------------------------
+
+class Landmarks3D:
+
+ def __init__(self):
+ self.log = logger_utils.Logger.getLogger()
+
+ def landmarks(self, im, bbox):
+ pass
+
+ def flatten(self, points):
+ '''Converts list of point-tupes into a flattened list for CSV
+ :param points: (list) of x,y points
+ :returns dict item for each point (eg {'x1':100, 'y1':200})
+ '''
+ points_formatted = {}
+ for idx, pt in enumerate(points, 1):
+ for j, d in enumerate('xyz'):
+ points_formatted[f'{d}{idx}'] = pt[j]
+ return points_formatted
+
+ def normalize(self, points, dim):
+ return [np.array(p)/dim for p in points] # divides each point by w,h dim
+
+
+class FaceAlignment3D_68(Landmarks3D):
+
+ # Estimates 3D facial landmarks
+ import face_alignment
+
+ def __init__(self, gpu=0, flip_input=False):
+ super().__init__()
+ device = f'cuda:{gpu}' if gpu > -1 else 'cpu'
+ self.fa = face_alignment.FaceAlignment(face_alignment.LandmarksType._3D, device=device, flip_input=flip_input)
+
+ def landmarks(self, im, as_type=str):
+ '''Calculates the 3D facial landmarks
+ :param im: (numpy.ndarray) BGR image
+ :returns (list) of 68 (int) (tuples) as (x,y, z)
+ '''
+ # predict landmarks
+ points = self.fa.get_landmarks(im) # returns array of arrays of 68 3D pts/face
+ # convert to data type
+ points = [list(map(int, p)) for p in points[0]]
+ return points \ No newline at end of file