summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--megapixels/app/models/bbox.py20
-rw-r--r--megapixels/app/processors/face_detector.py27
-rw-r--r--megapixels/app/processors/face_landmarks.py194
-rw-r--r--megapixels/app/processors/face_landmarks_2d.py87
-rw-r--r--megapixels/app/processors/face_landmarks_3d.py38
-rw-r--r--megapixels/app/processors/face_pose.py15
-rw-r--r--megapixels/app/settings/app_cfg.py8
-rw-r--r--megapixels/app/settings/types.py27
-rw-r--r--megapixels/app/utils/display_utils.py16
-rw-r--r--megapixels/app/utils/draw_utils.py65
-rw-r--r--megapixels/commands/cv/face_landmark.py96
-rw-r--r--megapixels/commands/cv/face_landmark_2d_5.py (renamed from megapixels/commands/cv/face_pose_mt.py)114
-rw-r--r--megapixels/commands/cv/face_landmark_2d_68.py150
-rw-r--r--megapixels/commands/cv/face_landmark_3d_68.py144
-rw-r--r--megapixels/commands/cv/face_pose.py76
-rw-r--r--megapixels/commands/cv/face_roi.py42
-rw-r--r--megapixels/commands/cv/face_vector.py10
-rw-r--r--megapixels/commands/cv/face_vector_mt.py118
-rw-r--r--megapixels/commands/datasets/file_record.py4
19 files changed, 764 insertions, 487 deletions
diff --git a/megapixels/app/models/bbox.py b/megapixels/app/models/bbox.py
index 55a92512..04ee4a70 100644
--- a/megapixels/app/models/bbox.py
+++ b/megapixels/app/models/bbox.py
@@ -29,6 +29,7 @@ class BBox:
def __init__(self, x1, y1, x2, y2):
"""Represents a bounding box and provides methods for accessing and modifying
+ All values are normalized unless otherwise specified
:param x1: normalized left coord
:param y1: normalized top coord
:param x2: normalized right coord
@@ -40,8 +41,8 @@ class BBox:
self._y2 = y2
self._width = x2 - x1
self._height = y2 - y1
- self._cx = x1 + (self._width // 2)
- self._cy = y1 + (self._height // 2)
+ self._cx = x1 + (self._width / 2)
+ self._cy = y1 + (self._height / 2)
self._tl = (x1, y1)
self._br = (x2, y2)
self._rect = (self._x1, self._y1, self._x2, self._y2)
@@ -111,7 +112,14 @@ class BBox:
# # -----------------------------------------------------------------
# # Utils
- # def constrain(self, dim):
+ def contains(self, pt_norm):
+ '''Returns Checks if this BBox contains the normalized point
+ :param pt: (int|float, int|float) x, y
+ :returns (bool)
+ '''
+ x, y = pt_norm
+ return (x > self._x1 and x < self._x2 and y > self._y1 and y < self._y2)
+
def distance(self, b):
a = self
dcx = self._cx - b.cx
@@ -168,6 +176,12 @@ class BBox:
# -----------------------------------------------------------------
# Convert to
+ def to_square(self, bounds):
+ '''Forces bbox to square dimensions
+ :param bounds: (int, int) w, h of the image
+ :returns (BBox) in square ratio
+ '''
+
def to_dim(self, dim):
"""scale is (w, h) is tuple of dimensions"""
w, h = dim
diff --git a/megapixels/app/processors/face_detector.py b/megapixels/app/processors/face_detector.py
index a805a474..6bf27576 100644
--- a/megapixels/app/processors/face_detector.py
+++ b/megapixels/app/processors/face_detector.py
@@ -65,8 +65,6 @@ class DetectorHaar:
class DetectorDLIBCNN:
-
- dnn_size = (300, 300)
pyramids = 0
conf_thresh = 0.85
@@ -79,13 +77,10 @@ class DetectorDLIBCNN:
self.detector = dlib.cnn_face_detection_model_v1(cfg.DIR_MODELS_DLIB_CNN)
os.environ['CUDA_VISIBLE_DEVICES'] = cuda_visible_devices # reset
- def detect(self, im, size=None, conf_thresh=None, pyramids=None, largest=False, zone=None):
+ def detect(self, im, conf_thresh=None, pyramids=None, largest=False, zone=None):
bboxes = []
conf_thresh = self.conf_thresh if conf_thresh is None else conf_thresh
pyramids = self.pyramids if pyramids is None else pyramids
- dnn_size = self.dnn_size if size is None else size
- # resize image
- im = im_utils.resize(im, width=dnn_size[0], height=dnn_size[1])
dim = im.shape[:2][::-1]
im = im_utils.bgr2rgb(im) # convert to RGB for dlib
# run detector
@@ -110,7 +105,6 @@ class DetectorDLIBCNN:
class DetectorDLIBHOG:
- size = (320, 240)
pyramids = 0
conf_thresh = 0.85
@@ -119,12 +113,9 @@ class DetectorDLIBHOG:
self.log = logger_utils.Logger.getLogger()
self.detector = dlib.get_frontal_face_detector()
- def detect(self, im, size=None, conf_thresh=None, pyramids=0, largest=False, zone=False):
+ def detect(self, im, conf_thresh=None, pyramids=0, largest=False, zone=False):
conf_thresh = self.conf_thresh if conf_thresh is None else conf_thresh
- dnn_size = self.size if size is None else size
pyramids = self.pyramids if pyramids is None else pyramids
-
- im = im_utils.resize(im, width=dnn_size[0], height=dnn_size[1])
dim = im.shape[:2][::-1]
im = im_utils.bgr2rgb(im) # ?
hog_results = self.detector.run(im, pyramids)
@@ -153,23 +144,23 @@ class DetectorCVDNN:
dnn_scale = 1.0 # fixed
dnn_mean = (104.0, 177.0, 123.0) # fixed
dnn_crop = False # crop or force resize
- size = (300, 300)
- conf_thresh = 0.85
+ blob_size = (300, 300)
+ conf_thresh = 0.95
def __init__(self):
- import dlib
+ self.log = logger_utils.Logger.getLogger()
fp_prototxt = join(cfg.DIR_MODELS_CAFFE, 'face_detect', 'opencv_face_detector.prototxt')
fp_model = join(cfg.DIR_MODELS_CAFFE, 'face_detect', 'opencv_face_detector.caffemodel')
self.net = cv.dnn.readNet(fp_prototxt, fp_model)
self.net.setPreferableBackend(cv.dnn.DNN_BACKEND_OPENCV)
self.net.setPreferableTarget(cv.dnn.DNN_TARGET_CPU)
- def detect(self, im, size=None, conf_thresh=None, largest=False, pyramids=None, zone=False):
+ def detect(self, im, conf_thresh=None, largest=False, pyramids=None, zone=False):
"""Detects faces and returns (list) of (BBox)"""
conf_thresh = self.conf_thresh if conf_thresh is None else conf_thresh
- dnn_size = self.size if size is None else size
- im = cv.resize(im, dnn_size)
- blob = cv.dnn.blobFromImage(im, self.dnn_scale, dnn_size, self.dnn_mean)
+ im = cv.resize(im, self.blob_size)
+ dim = im.shape[:2][::-1]
+ blob = cv.dnn.blobFromImage(im, self.dnn_scale, dim, self.dnn_mean)
self.net.setInput(blob)
net_outputs = self.net.forward()
diff --git a/megapixels/app/processors/face_landmarks.py b/megapixels/app/processors/face_landmarks.py
new file mode 100644
index 00000000..8086ba1e
--- /dev/null
+++ b/megapixels/app/processors/face_landmarks.py
@@ -0,0 +1,194 @@
+from os.path import join
+from pathlib import Path
+
+import cv2 as cv
+import numpy as np
+import imutils
+
+from app.utils import im_utils, logger_utils
+from app.models.bbox import BBox
+from app.settings import app_cfg as cfg
+from app.settings import types
+from app.models.bbox import BBox
+
+
+# ----------------------------------------------------------------------
+#
+# 2D landmarks: 5pt and 68pt
+#
+# ----------------------------------------------------------------------
+
+class Landmarks2D:
+
+ # Abstract class
+
+ def __init__(self):
+ self.log = logger_utils.Logger.getLogger()
+
+ def landmarks(self, im, bbox):
+ # override
+ self.log.warn('Define landmarks() function')
+ pass
+
+ def flatten(self, points):
+ '''Converts list of point-tupes into a flattened list for CSV
+ :param points: (list) of x,y points
+ :returns dict item for each point (eg {'x1':100, 'y1':200})
+ '''
+ points_formatted = {}
+ for idx, pt in enumerate(points, 1):
+ for j, d in enumerate('xy'):
+ points_formatted[f'{d}{idx}'] = pt[j]
+ return points_formatted
+
+ def normalize(self, points, dim):
+ return [np.array(p)/dim for p in points] # divides each point by w,h dim
+
+
+
+import face_alignment
+
+class FaceAlignment2D_68(Landmarks2D):
+
+ # https://github.com/1adrianb/face-alignment
+ # Estimates 2D facial landmarks
+
+ def __init__(self, gpu=0, flip_input=False):
+ t = face_alignment.LandmarksType._2D
+ device = f'cuda:{gpu}' if gpu > -1 else 'cpu'
+ self.fa = face_alignment.FaceAlignment(t, device=device, flip_input=flip_input)
+ super().__init__()
+ self.log.debug(f'{device}')
+ self.log.debug(f'{t}')
+
+ def landmarks(self, im):
+ '''Calculates the 2D facial landmarks
+ :param im: (numpy.ndarray) BGR image
+ :returns (list) of 68 (int) (tuples) as (x,y)
+ '''
+ # predict landmarks
+ points = self.fa.get_landmarks(im) # returns array of arrays of 68 2D pts/face
+ # convert to data type
+ points = [list(map(int, p)) for p in points[0]]
+ return points
+
+
+class Dlib2D(Landmarks2D):
+
+ def __init__(self, model):
+ super().__init__()
+ # init dlib
+ import dlib
+ self.predictor = dlib.shape_predictor(model)
+ self.log.info(f'loaded predictor model: {model}')
+
+ def landmarks(self, im, bbox):
+ # Draw high-confidence faces
+ dim_wh = im.shape[:2][::-1]
+ bbox = bbox.to_dlib()
+ im_gray = cv.cvtColor(im, cv.COLOR_BGR2GRAY)
+ points = [[p.x, p.y] for p in self.predictor(im_gray, bbox).parts()]
+ return points
+
+
+class Dlib2D_68(Dlib2D):
+
+ def __init__(self):
+ # Get 68-point landmarks using DLIB
+ super().__init__(cfg.DIR_MODELS_DLIB_68PT)
+
+
+class Dlib2D_5(Dlib2D):
+
+ def __init__(self):
+ # Get 5-point landmarks using DLIB
+ super().__init__(cfg.DIR_MODELS_DLIB_5PT)
+
+
+class MTCNN2D_5(Landmarks2D):
+
+ # Get 5-point landmarks using MTCNN
+ # https://github.com/ipazc/mtcnn
+ # pip install mtcnn
+
+ def __init__(self):
+ super().__init__()
+ self.log.warn('NB: MTCNN runs both face detector and landmark predictor together.')
+ self.log.warn(' this will use face with most similar ROI')
+ from mtcnn.mtcnn import MTCNN
+ self.detector = MTCNN()
+
+ def landmarks(self, im, bbox):
+ '''Detects face using MTCNN and returns (list) of BBox
+ :param im: (numpy.ndarray) image
+ :returns list of BBox
+ '''
+ results = []
+ dim_wh = im.shape[:2][::-1] # (w, h)
+
+ # run MTCNN to get bbox and landmarks
+ dets = self.detector.detect_faces(im)
+ keypoints = []
+ bboxes = []
+ #iterate detections and convert to BBox
+ for det in dets:
+ #rect = det['box']
+ points = det['keypoints']
+ # convert to normalized for contain-comparison
+ points_norm = [np.array(pt)/dim_wh for pname, pt in points.items()]
+ contains = False not in [bbox.contains(pn) for pn in points_norm]
+ if contains:
+ results.append(points) # append original points
+
+ return results
+
+
+# ----------------------------------------------------------------------
+#
+# 3D landmarks
+#
+# ----------------------------------------------------------------------
+
+class Landmarks3D:
+
+ def __init__(self):
+ self.log = logger_utils.Logger.getLogger()
+
+ def landmarks(self, im, bbox):
+ pass
+
+ def flatten(self, points):
+ '''Converts list of point-tupes into a flattened list for CSV
+ :param points: (list) of x,y points
+ :returns dict item for each point (eg {'x1':100, 'y1':200})
+ '''
+ points_formatted = {}
+ for idx, pt in enumerate(points, 1):
+ for j, d in enumerate('xyz'):
+ points_formatted[f'{d}{idx}'] = pt[j]
+ return points_formatted
+
+ def normalize(self, points, dim):
+ return [np.array(p)/dim for p in points] # divides each point by w,h dim
+
+
+class FaceAlignment3D_68(Landmarks3D):
+
+ # Estimates 3D facial landmarks
+ import face_alignment
+
+ def __init__(self, gpu=0, flip_input=False):
+ super().__init__()
+ device = f'cuda:{gpu}' if gpu > -1 else 'cpu'
+ self.fa = face_alignment.FaceAlignment(face_alignment.LandmarksType._3D, device=device, flip_input=flip_input)
+
+ def landmarks(self, im, as_type=str):
+ '''Calculates the 3D facial landmarks
+ :param im: (numpy.ndarray) BGR image
+ :returns (list) of 68 (int) (tuples) as (x,y, z)
+ '''
+ # predict landmarks
+ points = self.fa.get_landmarks(im) # returns array of arrays of 68 3D pts/face
+ # convert to data type
+ points = [list(map(int, p)) for p in points[0]]
+ return points \ No newline at end of file
diff --git a/megapixels/app/processors/face_landmarks_2d.py b/megapixels/app/processors/face_landmarks_2d.py
deleted file mode 100644
index e8ce93c1..00000000
--- a/megapixels/app/processors/face_landmarks_2d.py
+++ /dev/null
@@ -1,87 +0,0 @@
-import os
-from os.path import join
-from pathlib import Path
-
-import cv2 as cv
-import numpy as np
-import imutils
-from app.utils import im_utils, logger_utils
-from app.models.bbox import BBox
-from app.settings import app_cfg as cfg
-from app.settings import types
-from app.models.bbox import BBox
-
-class LandmarksFaceAlignment:
-
- # Estimates 2D facial landmarks
- import face_alignment
-
- def __init__(self, gpu=0):
- self.log = logger_utils.Logger.getLogger()
- device = f'cuda:{gpu}' if gpu > -1 else 'cpu'
- self.fa = face_alignment.FaceAlignment(face_alignment.LandmarksType._2D, device=device, flip_input=True)
-
- def landmarks(self, im, as_type=str):
- '''Calculates the 3D facial landmarks
- :param im: (numpy.ndarray) image
- :param as_type: (str) or (list) type to return data
- '''
- preds = self.fa.get_landmarks(im)
- # convert to comma separated ints
- # storing data as "[1,2], [3,4]" is larger file size than storing as "1,2,3,4"
- # storing a list object in Pandas seems to result in 30% larger CSV files
- # TODO optimize this
- preds_int = [list(map(int, x)) for x in preds[0]] # list of ints
- if as_type is str:
- return ','.join([','.join(list(map(str,[x,y]))) for x,y in preds_int])
- else:
- return preds_int
-
-
-class LandmarksDLIB:
-
- def __init__(self):
- # init dlib
- import dlib
- self.log = logger_utils.Logger.getLogger()
- self.predictor = dlib.shape_predictor(cfg.DIR_MODELS_DLIB_68PT)
-
- def landmarks(self, im, bbox):
- # Draw high-confidence faces
- dim = im.shape[:2][::-1]
- bbox = bbox.to_dlib()
- im_gray = cv.cvtColor(im, cv.COLOR_BGR2GRAY)
- landmarks = [[p.x, p.y] for p in self.predictor(im_gray, bbox).parts()]
- return landmarks
-
-
-class LandmarksMTCNN:
-
- # https://github.com/ipazc/mtcnn
- # pip install mtcnn
-
- dnn_size = (400, 400)
-
- def __init__(self, size=(400,400)):
- from mtcnn.mtcnn import MTCNN
- self.detector = MTCNN()
-
- def landmarks(self, im, opt_size=None, opt_conf_thresh=None, opt_pyramids=None):
- '''Detects face using MTCNN and returns (list) of BBox
- :param im: (numpy.ndarray) image
- :returns list of BBox
- '''
- rois = []
- dnn_size = self.dnn_size if opt_size is None else opt_size
- im = im_utils.resize(im, width=dnn_size[0], height=dnn_size[1])
- dim = im.shape[:2][::-1]
-
- # run MTCNN
- dets = self.detector.detect_faces(im)
-
- for det in dets:
- rect = det['box']
- keypoints = det['keypoints'] # not using here. see 'face_landmarks.py'
- bbox = BBox.from_xywh_dim(*rect, dim)
- rois.append(bbox)
- return rois \ No newline at end of file
diff --git a/megapixels/app/processors/face_landmarks_3d.py b/megapixels/app/processors/face_landmarks_3d.py
index 3663364c..470d263c 100644
--- a/megapixels/app/processors/face_landmarks_3d.py
+++ b/megapixels/app/processors/face_landmarks_3d.py
@@ -12,43 +12,24 @@ from app.models.bbox import BBox
from app.settings import app_cfg as cfg
from app.settings import types
+class Landmarks3D:
-class FaceLandmarks2D:
-
- # Estimates 2D facial landmarks
- import face_alignment
-
- def __init__(self, gpu=0):
+ def __init__(self):
self.log = logger_utils.Logger.getLogger()
- device = f'cuda:{gpu}' if gpu > -1 else 'cpu'
- self.fa = face_alignment.FaceAlignment(face_alignment.LandmarksType._2D, device=device, flip_input=True)
-
- def landmarks(self, im, as_type=str):
- '''Calculates the 3D facial landmarks
- :param im: (numpy.ndarray) image
- :param as_type: (str) or (list) type to return data
- '''
- preds = self.fa.get_landmarks(im)
- # convert to comma separated ints
- # storing data as "[1,2], [3,4]" is larger file size than storing as "1,2,3,4"
- # storing a list object in Pandas seems to result in 30% larger CSV files
- # TODO optimize this
- preds_int = [list(map(int, x)) for x in preds[0]] # list of ints
- if as_type is str:
- return ','.join([','.join(list(map(str,[x,y]))) for x,y in preds_int])
- else
- return preds_int
+ def landmarks(self, im, bbox):
+ pass
-class FaceLandmarks3D:
+
+class FaceAlignment3D(Landmarks3D):
# Estimates 3D facial landmarks
import face_alignment
- def __init__(self, gpu=0):
- self.log = logger_utils.Logger.getLogger()
+ def __init__(self, gpu=0, flip_input=False):
+ super().__init__()
device = f'cuda:{gpu}' if gpu > -1 else 'cpu'
- self.fa = face_alignment.FaceAlignment(face_alignment.LandmarksType._3D, device=device, flip_input=False)
+ self.fa = face_alignment.FaceAlignment(face_alignment.LandmarksType._3D, device=device, flip_input=flip_input)
def landmarks(self, im, as_type=str):
'''Calculates the 3D facial landmarks
@@ -66,6 +47,7 @@ class FaceLandmarks3D:
else
return preds_int
+
def draw(self, im):
'''draws landmarks in 3d scene'''
diff --git a/megapixels/app/processors/face_pose.py b/megapixels/app/processors/face_pose.py
index 96281637..8bc95f8d 100644
--- a/megapixels/app/processors/face_pose.py
+++ b/megapixels/app/processors/face_pose.py
@@ -95,18 +95,3 @@ class FacePoseDLIB:
result['yaw'] = yaw
return result
-
-
- def draw_pose(self, im, pt_nose, image_pts):
- cv.line(im, pt_nose, tuple(image_pts['pitch'].ravel()), self.pose_types['pitch'], 3)
- cv.line(im, pt_nose, tuple(image_pts['yaw'].ravel()), self.pose_types['yaw'], 3)
- cv.line(im, pt_nose, tuple(image_pts['roll'].ravel()), self.pose_types['roll'], 3)
-
-
- def draw_degrees(self, im, pose_data, color=(0,255,0)):
- for i, pose_type in enumerate(self.pose_types.items()):
- k, clr = pose_type
- v = pose_data[k]
- t = '{}: {:.2f}'.format(k, v)
- origin = (10, 30 + (25 * i))
- cv.putText(im, t, origin, cv.FONT_HERSHEY_SIMPLEX, 0.5, clr, thickness=2, lineType=2) \ No newline at end of file
diff --git a/megapixels/app/settings/app_cfg.py b/megapixels/app/settings/app_cfg.py
index 55fed166..b13ff8ec 100644
--- a/megapixels/app/settings/app_cfg.py
+++ b/megapixels/app/settings/app_cfg.py
@@ -14,12 +14,16 @@ codecs.register(lambda name: codecs.lookup('utf8') if name == 'utf8mb4' else Non
# Enun lists used for custom Click Params
# -----------------------------------------------------------------------------
-FaceDetectNetVar = click_utils.ParamVar(types.FaceDetectNet)
-HaarCascadeVar = click_utils.ParamVar(types.HaarCascade)
LogLevelVar = click_utils.ParamVar(types.LogLevel)
MetadataVar = click_utils.ParamVar(types.Metadata)
DatasetVar = click_utils.ParamVar(types.Dataset)
DataStoreVar = click_utils.ParamVar(types.DataStore)
+# Face analysis
+HaarCascadeVar = click_utils.ParamVar(types.HaarCascade)
+FaceDetectNetVar = click_utils.ParamVar(types.FaceDetectNet)
+FaceLandmark2D_5Var = click_utils.ParamVar(types.FaceLandmark2D_5)
+FaceLandmark2D_68Var = click_utils.ParamVar(types.FaceLandmark2D_68)
+FaceLandmark3D_68Var = click_utils.ParamVar(types.FaceLandmark3D_68)
# # data_store
DATA_STORE = '/data_store_hdd/'
diff --git a/megapixels/app/settings/types.py b/megapixels/app/settings/types.py
index c2e2caf7..50e395e0 100644
--- a/megapixels/app/settings/types.py
+++ b/megapixels/app/settings/types.py
@@ -6,10 +6,7 @@ def find_type(name, enum_type):
return enum_opt
return None
-
-class FaceDetectNet(Enum):
- """Scene text detector networks"""
- HAAR, DLIB_CNN, DLIB_HOG, CVDNN, MTCNN = range(5)
+
class CVBackend(Enum):
"""OpenCV 3.4.2+ DNN target type"""
@@ -45,16 +42,32 @@ class LogLevel(Enum):
# --------------------------------------------------------------------
class Metadata(Enum):
- IDENTITY, FILE_RECORD, FACE_VECTOR, FACE_POSE, FACE_ROI, FACE_LANDMARKS_2D_68, \
- FACE_LANDMARKS_3D_68 = range(7)
+ IDENTITY, FILE_RECORD, FACE_VECTOR, FACE_POSE, \
+ FACE_ROI, FACE_LANDMARK_2D_68, FACE_LANDMARK_2D_5,FACE_LANDMARK_3D_68 = range(8)
class Dataset(Enum):
- LFW, VGG_FACE2, MSCELEB, UCCS, UMD_FACES = range(5)
+ LFW, VGG_FACE2, MSCELEB, UCCS, UMD_FACES, SCUT_FBP, SELFIE_DATASET = range(7)
# ---------------------------------------------------------------------
# Face analysis types
# --------------------------------------------------------------------
+class FaceDetectNet(Enum):
+ """Scene text detector networks"""
+ HAAR, DLIB_CNN, DLIB_HOG, CVDNN, MTCNN = range(5)
+
+class FaceLandmark2D_5(Enum):
+ DLIB, MTCNN = range(2)
+
+class FaceLandmark2D_68(Enum):
+ DLIB, FACE_ALIGNMENT = range(2)
+
+class FaceLandmark3D_68(Enum):
+ FACE_ALIGNMENT = range(1)
+
+class FaceLandmark3D(Enum):
+ FACE_ALIGNMENT = range(1)
+
class FaceEmotion(Enum):
# Map these to text strings for web display
NEUTRAL, HAPPY, SAD, ANGRY, FRUSTURATED = range(5)
diff --git a/megapixels/app/utils/display_utils.py b/megapixels/app/utils/display_utils.py
new file mode 100644
index 00000000..58e2feec
--- /dev/null
+++ b/megapixels/app/utils/display_utils.py
@@ -0,0 +1,16 @@
+import sys
+
+import cv2 as cv
+
+
+def handle_keyboard():
+ '''Used with cv.imshow('title', image) to wait for keyboard press
+ '''
+ while True:
+ k = cv.waitKey(1) & 0xFF
+ if k == 27 or k == ord('q'): # ESC
+ cv.destroyAllWindows()
+ sys.exit()
+ elif k != 255:
+ # any key to continue
+ break \ No newline at end of file
diff --git a/megapixels/app/utils/draw_utils.py b/megapixels/app/utils/draw_utils.py
new file mode 100644
index 00000000..f6d53609
--- /dev/null
+++ b/megapixels/app/utils/draw_utils.py
@@ -0,0 +1,65 @@
+import sys
+
+import cv2 as cv
+
+
+# ---------------------------------------------------------------------------
+#
+# OpenCV drawing functions
+#
+# ---------------------------------------------------------------------------
+
+pose_types = {'pitch': (0,0,255), 'roll': (255,0,0), 'yaw': (0,255,0)}
+
+
+def draw_landmarks2D(im, points, radius=3, color=(0,255,0), stroke_weight=2):
+ '''Draws facial landmarks, either 5pt or 68pt
+ '''
+ for x,y in points:
+ cv.circle(im, (x,y), radius, color, -1, cv.LINE_AA)
+
+
+def draw_landmarks3D(im, points, radius=3, color=(0,255,0), stroke_weight=2):
+ '''Draws 3D facial landmarks
+ '''
+ for x,y,z in points:
+ cv.circle(im, (x,y), radius, color, -1, cv.LINE_AA)
+
+
+def draw_bbox(im, bbox, color=(0,255,0), stroke_weight=2):
+ '''Draws a dimensioned (not-normalized) BBox onto cv2 image
+ '''
+ cv.rectangle(im, bbox.pt_tl, bbox.pt_br, color, stroke_weight)
+
+
+def draw_pose(im, pt_nose, image_pts):
+ '''Draws 3-axis pose over image
+ '''
+ cv.line(im, pt_nose, tuple(image_pts['pitch'].ravel()), pose_types['pitch'], 3)
+ cv.line(im, pt_nose, tuple(image_pts['yaw'].ravel()), pose_types['yaw'], 3)
+ cv.line(im, pt_nose, tuple(image_pts['roll'].ravel()), pose_types['roll'], 3)
+
+
+def draw_degrees(im, pose_data, color=(0,255,0)):
+ '''Draws degrees as text over image
+ '''
+ for i, pose_type in enumerate(pose_types.items()):
+ k, clr = pose_type
+ v = pose_data[k]
+ t = '{}: {:.2f}'.format(k, v)
+ origin = (10, 30 + (25 * i))
+ cv.putText(im, t, origin, cv.FONT_HERSHEY_SIMPLEX, 0.5, clr, thickness=2, lineType=2)
+
+
+# ---------------------------------------------------------------------------
+#
+# Matplotlib drawing functions
+#
+# ---------------------------------------------------------------------------
+
+def plot_landmarks3D(im, points, radius=3, color=(0,255,0), stroke_weight=2):
+ '''Draws facial landmarks, either 5pt or 68pt
+ '''
+ for pt in points:
+ cv.circle(im, tuple(pt), radius, color, -1, cv.LINE_AA)
+
diff --git a/megapixels/commands/cv/face_landmark.py b/megapixels/commands/cv/face_landmark.py
deleted file mode 100644
index 03ef8fc2..00000000
--- a/megapixels/commands/cv/face_landmark.py
+++ /dev/null
@@ -1,96 +0,0 @@
-"""
-
-"""
-
-import click
-
-from app.settings import types
-from app.utils import click_utils
-from app.settings import app_cfg as cfg
-
-color_filters = {'color': 1, 'gray': 2, 'all': 3}
-
-@click.command()
-@click.option('-i', '--input', 'opt_dirs_in', required=True, multiple=True,
- help='Input directory')
-@click.option('-o', '--output', 'opt_fp_out', required=True,
- help='Output CSV')
-@click.option('-e', '--ext', 'opt_ext',
- default='jpg', type=click.Choice(['jpg', 'png']),
- help='File glob ext')
-@click.option('--size', 'opt_size',
- type=(int, int), default=(300, 300),
- help='Output image size')
-@click.option('-g', '--gpu', 'opt_gpu', default=0,
- help='GPU index')
-@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None),
- help='Slice list of files')
-@click.option('--recursive/--no-recursive', 'opt_recursive', is_flag=True, default=False,
- help='Use glob recursion (slower)')
-@click.option('-f', '--force', 'opt_force', is_flag=True,
- help='Force overwrite file')
-@click.pass_context
-def cli(ctx, opt_dirs_in, opt_fp_out, opt_ext, opt_size, opt_gpu, opt_slice,
- opt_recursive, opt_force):
- """Converts face imges to 3D landmarks"""
-
- import sys
- import os
- from os.path import join
- from pathlib import Path
- from glob import glob
-
- from tqdm import tqdm
- import numpy as np
- import dlib # must keep a local reference for dlib
- import cv2 as cv
- import pandas as pd
- from face_alignment import FaceAlignment, LandmarksType
- from skimage import io
-
- from app.utils import logger_utils, file_utils
- from app.processors import face_detector
-
- # -------------------------------------------------
- # init here
-
-
- log = logger_utils.Logger.getLogger()
-
- if not opt_force and Path(opt_fp_out).exists():
- log.error('File exists. Use "-f / --force" to overwite')
- return
-
- device = 'cuda' if opt_gpu > -1 else 'cpu'
- fa = FaceAlignment(LandmarksType._3D, flip_input=False, device=device)
-
- # get list of files to process
- fp_ims = []
- for opt_dir_in in opt_dirs_in:
- if opt_recursive:
- fp_glob = join(opt_dir_in, '**/*.{}'.format(opt_ext))
- fp_ims += glob(fp_glob, recursive=True)
- else:
- fp_glob = join(opt_dir_in, '*.{}'.format(opt_ext))
- fp_ims += glob(fp_glob)
- log.debug(fp_glob)
-
-
- if opt_slice:
- fp_ims = fp_ims[opt_slice[0]:opt_slice[1]]
- log.debug('processing {:,} files'.format(len(fp_ims)))
-
-
- data = {}
-
- for fp_im in tqdm(fp_ims):
- fpp_im = Path(fp_im)
- im = io.imread(fp_im)
- preds = fa.get_landmarks(im)
- if preds and len(preds) > 0:
- data[fpp_im.name] = preds[0].tolist()
-
- # save date
- file_utils.mkdirs(opt_fp_out)
-
- file_utils.write_json(data, opt_fp_out, verbose=True) \ No newline at end of file
diff --git a/megapixels/commands/cv/face_pose_mt.py b/megapixels/commands/cv/face_landmark_2d_5.py
index 8fef2c2c..40ec6f41 100644
--- a/megapixels/commands/cv/face_pose_mt.py
+++ b/megapixels/commands/cv/face_landmark_2d_5.py
@@ -1,5 +1,5 @@
"""
-Converts ROIs to pose: yaw, roll, pitch
+
"""
import click
@@ -8,6 +8,8 @@ from app.settings import types
from app.utils import click_utils
from app.settings import app_cfg as cfg
+color_filters = {'color': 1, 'gray': 2, 'all': 3}
+
@click.command()
@click.option('-i', '--input', 'opt_fp_in', default=None,
help='Override enum input filename CSV')
@@ -15,9 +17,9 @@ from app.settings import app_cfg as cfg
help='Override enum output filename CSV')
@click.option('-m', '--media', 'opt_dir_media', default=None,
help='Override enum media directory')
-@click.option('--data_store', 'opt_data_store',
+@click.option('--store', 'opt_data_store',
type=cfg.DataStoreVar,
- default=click_utils.get_default(types.DataStore.SSD),
+ default=click_utils.get_default(types.DataStore.HDD),
show_default=True,
help=click_utils.show_help(types.Dataset))
@click.option('--dataset', 'opt_dataset',
@@ -25,6 +27,10 @@ from app.settings import app_cfg as cfg
required=True,
show_default=True,
help=click_utils.show_help(types.Dataset))
+@click.option('-d', '--detector', 'opt_detector_type',
+ type=cfg.FaceLandmark2D_5Var,
+ default=click_utils.get_default(types.FaceLandmark2D_5.DLIB),
+ help=click_utils.show_help(types.FaceLandmark2D_5))
@click.option('--size', 'opt_size',
type=(int, int), default=(300, 300),
help='Output image size')
@@ -35,9 +41,9 @@ from app.settings import app_cfg as cfg
@click.option('-d', '--display', 'opt_display', is_flag=True,
help='Display image for debugging')
@click.pass_context
-def cli(ctx, opt_fp_in, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset, opt_size,
- opt_slice, opt_force, opt_display):
- """Converts ROIs to pose: roll, yaw, pitch"""
+def cli(ctx, opt_fp_in, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset, opt_detector_type,
+ opt_size, opt_slice, opt_force, opt_display):
+ """Creates 2D 5-point landmarks"""
import sys
import os
@@ -47,33 +53,39 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset,
from tqdm import tqdm
import numpy as np
- import dlib # must keep a local reference for dlib
import cv2 as cv
import pandas as pd
- from app.models.bbox import BBox
- from app.utils import logger_utils, file_utils, im_utils
- from app.processors.face_landmarks import LandmarksDLIB
- from app.processors.face_pose import FacePoseDLIB
+ from app.utils import logger_utils, file_utils, im_utils, display_utils, draw_utils
+ from app.processors import face_landmarks
from app.models.data_store import DataStore
+ from app.models.bbox import BBox
# -------------------------------------------------
# init here
log = logger_utils.Logger.getLogger()
-
- # set data_store
+ # init filepaths
data_store = DataStore(opt_data_store, opt_dataset)
-
- # get filepath out
- fp_out = data_store.metadata(types.Metadata.FACE_POSE) if opt_fp_out is None else opt_fp_out
+ # set file output path
+ metadata_type = types.Metadata.FACE_LANDMARK_2D_5
+ fp_out = data_store.metadata(metadata_type) if opt_fp_out is None else opt_fp_out
if not opt_force and Path(fp_out).exists():
log.error('File exists. Use "-f / --force" to overwite')
return
- # init face processors
- face_pose = FacePoseDLIB()
- face_landmarks = LandmarksDLIB()
+ # init face landmark processors
+ if opt_detector_type == types.FaceLandmark2D_5.DLIB:
+ # use dlib 68 point detector
+ landmark_detector = face_landmarks.Dlib2D_5()
+ elif opt_detector_type == types.FaceLandmark2D_5.MTCNN:
+ # use dlib 5 point detector
+ landmark_detector = face_landmarks.MTCNN2D_5()
+ else:
+ log.error('{} not yet implemented'.format(opt_detector_type.name))
+ return
+
+ log.info(f'Using landmark detector: {opt_detector_type.name}')
# load filepath data
fp_record = data_store.metadata(types.Metadata.FILE_RECORD)
@@ -88,51 +100,47 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset,
df_img_groups = df_roi.groupby('record_index')
log.debug('processing {:,} groups'.format(len(df_img_groups)))
- # store poses and convert to DataFrame
- poses = []
+ # store landmarks in list
+ results = []
- # iterate
+ # iterate groups with file/record index as key
for record_index, df_img_group in tqdm(df_img_groups):
- # make fp
+
+ # acces file record
ds_record = df_record.iloc[record_index]
+
+ # load image
fp_im = data_store.face(ds_record.subdir, ds_record.fn, ds_record.ext)
im = cv.imread(fp_im)
- for roi_id, df_img in df_img_group.iterrows():
+ im_resized = im_utils.resize(im, width=opt_size[0], height=opt_size[1])
+
+ # iterate image group dataframe with roi index as key
+ for roi_index, df_img in df_img_group.iterrows():
+
# get bbox
x, y, w, h = df_img.x, df_img.y, df_img.w, df_img.h
- dim = im.shape[:2][::-1]
+ dim = im_resized.shape[:2][::-1]
bbox = BBox.from_xywh(x, y, w, h).to_dim(dim)
- # get pose
- landmarks = face_landmarks.landmarks(im, bbox)
- pose_data = face_pose.pose(landmarks, dim, project_points=opt_display)
- pose_degrees = pose_data['degrees'] # only keep the degrees data
- # use the project point data if display flag set
- if opt_display:
- pts_im = pose_data['points_image']
- pts_model = pose_data['points_model']
- pt_nose = pose_data['point_nose']
- dst = im.copy()
- face_pose.draw_pose(dst, pts_im, pts_model, pt_nose)
- face_pose.draw_degrees(dst, pose_degrees)
- # display to cv window
- cv.imshow('', dst)
- while True:
- k = cv.waitKey(1) & 0xFF
- if k == 27 or k == ord('q'): # ESC
- cv.destroyAllWindows()
- sys.exit()
- elif k != 255:
- # any key to continue
- break
+ # get landmark points
+ points = landmark_detector.landmarks(im_resized, bbox)
+ points_norm = landmark_detector.normalize(points, dim)
+ points_flat = landmark_detector.flatten(points_norm)
- # add image index and append to result CSV data
- pose_degrees['record_index'] = record_index
- poses.append(pose_degrees)
+ # display to screen if optioned
+ if opt_display:
+ draw_utils.draw_landmarks2D(im_resized, points)
+ draw_utils.draw_bbox(im_resized, bbox)
+ cv.imshow('', im_resized)
+ display_utils.handle_keyboard()
+ results.append(points_flat)
- # save date
+ # create DataFrame and save to CSV
file_utils.mkdirs(fp_out)
- df = pd.DataFrame.from_dict(poses)
+ df = pd.DataFrame.from_dict(results)
df.index.name = 'index'
- df.to_csv(fp_out) \ No newline at end of file
+ df.to_csv(fp_out)
+
+ # save script
+ file_utils.write_text(' '.join(sys.argv), '{}.sh'.format(fp_out)) \ No newline at end of file
diff --git a/megapixels/commands/cv/face_landmark_2d_68.py b/megapixels/commands/cv/face_landmark_2d_68.py
new file mode 100644
index 00000000..e24d4b60
--- /dev/null
+++ b/megapixels/commands/cv/face_landmark_2d_68.py
@@ -0,0 +1,150 @@
+"""
+
+"""
+
+import click
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+
+
+@click.command()
+@click.option('-i', '--input', 'opt_fp_in', default=None,
+ help='Override enum input filename CSV')
+@click.option('-o', '--output', 'opt_fp_out', default=None,
+ help='Override enum output filename CSV')
+@click.option('-m', '--media', 'opt_dir_media', default=None,
+ help='Override enum media directory')
+@click.option('--store', 'opt_data_store',
+ type=cfg.DataStoreVar,
+ default=click_utils.get_default(types.DataStore.HDD),
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--dataset', 'opt_dataset',
+ type=cfg.DatasetVar,
+ required=True,
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('-d', '--detector', 'opt_detector_type',
+ type=cfg.FaceLandmark2D_68Var,
+ default=click_utils.get_default(types.FaceLandmark2D_68.DLIB),
+ help=click_utils.show_help(types.FaceLandmark2D_68))
+@click.option('--size', 'opt_size',
+ type=(int, int), default=(300, 300),
+ help='Output image size')
+@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None),
+ help='Slice list of files')
+@click.option('-f', '--force', 'opt_force', is_flag=True,
+ help='Force overwrite file')
+@click.option('-d', '--display', 'opt_display', is_flag=True,
+ help='Display image for debugging')
+@click.pass_context
+def cli(ctx, opt_fp_in, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset, opt_detector_type,
+ opt_size, opt_slice, opt_force, opt_display):
+ """Creates 2D 68-point landmarks"""
+
+ import sys
+ import os
+ from os.path import join
+ from pathlib import Path
+ from glob import glob
+
+ from tqdm import tqdm
+ import numpy as np
+ import cv2 as cv
+ import pandas as pd
+
+ from app.utils import logger_utils, file_utils, im_utils, display_utils, draw_utils
+ from app.processors import face_landmarks
+ from app.models.data_store import DataStore
+ from app.models.bbox import BBox
+
+ # -------------------------------------------------------------------------
+ # init here
+
+ log = logger_utils.Logger.getLogger()
+ # init filepaths
+ data_store = DataStore(opt_data_store, opt_dataset)
+ # set file output path
+ metadata_type = types.Metadata.FACE_LANDMARK_2D_68
+ fp_out = data_store.metadata(metadata_type) if opt_fp_out is None else opt_fp_out
+ if not opt_force and Path(fp_out).exists():
+ log.error('File exists. Use "-f / --force" to overwite')
+ return
+
+ # init face landmark processors
+ if opt_detector_type == types.FaceLandmark2D_68.DLIB:
+ # use dlib 68 point detector
+ landmark_detector = face_landmarks.Dlib2D_68()
+ elif opt_detector_type == types.FaceLandmark2D_68.FACE_ALIGNMENT:
+ # use dlib 5 point detector
+ landmark_detector = face_landmarks.FaceAlignment2D_68()
+ else:
+ log.error('{} not yet implemented'.format(opt_detector_type.name))
+ return
+
+ log.info(f'Using landmark detector: {opt_detector_type.name}')
+
+ # -------------------------------------------------------------------------
+ # load filepath data
+ fp_record = data_store.metadata(types.Metadata.FILE_RECORD)
+ df_record = pd.read_csv(fp_record).set_index('index')
+ # load ROI data
+ fp_roi = data_store.metadata(types.Metadata.FACE_ROI)
+ df_roi = pd.read_csv(fp_roi).set_index('index')
+ # slice if you want
+ if opt_slice:
+ df_roi = df_roi[opt_slice[0]:opt_slice[1]]
+ # group by image index (speedup if multiple faces per image)
+ df_img_groups = df_roi.groupby('record_index')
+ log.debug('processing {:,} groups'.format(len(df_img_groups)))
+
+ # store landmarks in list
+ results = []
+
+ # -------------------------------------------------------------------------
+ # iterate groups with file/record index as key
+
+ for record_index, df_img_group in tqdm(df_img_groups):
+
+ # access file_record DataSeries
+ file_record = df_record.iloc[record_index]
+
+ # load image
+ fp_im = data_store.face(file_record.subdir, file_record.fn, file_record.ext)
+ im = cv.imread(fp_im)
+ im_resized = im_utils.resize(im, width=opt_size[0], height=opt_size[1])
+ dim = im_resized.shape[:2][::-1]
+
+ # iterate ROIs in this image
+ for roi_index, df_img in df_img_group.iterrows():
+
+ # find landmarks
+ x, y, w, h = df_img.x, df_img.y, df_img.w, df_img.h # normalized values
+ #dim = (file_record.width, file_record.height) # original w,h
+ bbox = BBox.from_xywh(x, y, w, h).to_dim(dim)
+ points = landmark_detector.landmarks(im_resized, bbox)
+ points_norm = landmark_detector.normalize(points, dim)
+ points_flat = landmark_detector.flatten(points_norm)
+
+ # display if optioned
+ if opt_display:
+ dst = im_resized.copy()
+ draw_utils.draw_landmarks2D(dst, points)
+ draw_utils.draw_bbox(dst, bbox)
+ cv.imshow('', dst)
+ display_utils.handle_keyboard()
+
+ # add to results for CSV
+ results.append(points_flat)
+
+
+ # create DataFrame and save to CSV
+ file_utils.mkdirs(fp_out)
+ df = pd.DataFrame.from_dict(results)
+ df.index.name = 'index'
+ df.to_csv(fp_out)
+
+ # save script
+ file_utils.write_text(' '.join(sys.argv), '{}.sh'.format(fp_out)) \ No newline at end of file
diff --git a/megapixels/commands/cv/face_landmark_3d_68.py b/megapixels/commands/cv/face_landmark_3d_68.py
new file mode 100644
index 00000000..56e60cda
--- /dev/null
+++ b/megapixels/commands/cv/face_landmark_3d_68.py
@@ -0,0 +1,144 @@
+"""
+
+"""
+
+import click
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+
+color_filters = {'color': 1, 'gray': 2, 'all': 3}
+
+@click.command()
+@click.option('-i', '--input', 'opt_fp_in', default=None,
+ help='Override enum input filename CSV')
+@click.option('-o', '--output', 'opt_fp_out', default=None,
+ help='Override enum output filename CSV')
+@click.option('-m', '--media', 'opt_dir_media', default=None,
+ help='Override enum media directory')
+@click.option('--store', 'opt_data_store',
+ type=cfg.DataStoreVar,
+ default=click_utils.get_default(types.DataStore.HDD),
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--dataset', 'opt_dataset',
+ type=cfg.DatasetVar,
+ required=True,
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('-d', '--detector', 'opt_detector_type',
+ type=cfg.FaceLandmark3D_68Var,
+ default=click_utils.get_default(types.FaceLandmark3D_68.FACE_ALIGNMENT),
+ help=click_utils.show_help(types.FaceLandmark3D_68))
+@click.option('--size', 'opt_size',
+ type=(int, int), default=(300, 300),
+ help='Output image size')
+@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None),
+ help='Slice list of files')
+@click.option('-f', '--force', 'opt_force', is_flag=True,
+ help='Force overwrite file')
+@click.option('-d', '--display', 'opt_display', is_flag=True,
+ help='Display image for debugging')
+@click.pass_context
+def cli(ctx, opt_fp_in, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset, opt_detector_type,
+ opt_size, opt_slice, opt_force, opt_display):
+ """Generate 3D 68-point landmarks"""
+
+ import sys
+ import os
+ from os.path import join
+ from pathlib import Path
+ from glob import glob
+
+ from tqdm import tqdm
+ import numpy as np
+ import cv2 as cv
+ import pandas as pd
+
+ from app.utils import logger_utils, file_utils, im_utils, display_utils, draw_utils
+ from app.processors import face_landmarks
+ from app.models.data_store import DataStore
+ from app.models.bbox import BBox
+
+ # --------------------------------------------------------------------------
+ # init here
+
+ log = logger_utils.Logger.getLogger()
+ log.warn('3D landmark points are normalized in a (200, 200, 200) XYZ space')
+ # init filepaths
+ data_store = DataStore(opt_data_store, opt_dataset)
+ # set file output path
+ metadata_type = types.Metadata.FACE_LANDMARK_3D_68
+ fp_out = data_store.metadata(metadata_type) if opt_fp_out is None else opt_fp_out
+ if not opt_force and Path(fp_out).exists():
+ log.error('File exists. Use "-f / --force" to overwite')
+ return
+
+ # init face landmark processors
+ if opt_detector_type == types.FaceLandmark2D_5.FACE_ALIGNMENT:
+ # use FaceAlignment 68 point 3D detector
+ landmark_detector = face_landmarks.FaceAlignment3D_68()
+ else:
+ log.error('{} not yet implemented'.format(opt_detector_type.name))
+ return
+
+ log.info(f'Using landmark detector: {opt_detector_type.name}')
+
+ # -------------------------------------------------------------------------
+ # load data
+
+ fp_record = data_store.metadata(types.Metadata.FILE_RECORD) # file_record.csv
+ df_record = pd.read_csv(fp_record).set_index('index')
+ fp_roi = data_store.metadata(types.Metadata.FACE_ROI) # face_roi.csv
+ df_roi = pd.read_csv(fp_roi).set_index('index')
+ if opt_slice:
+ df_roi = df_roi[opt_slice[0]:opt_slice[1]] # slice if you want
+ df_img_groups = df_roi.groupby('record_index') # groups by image index (load once)
+ log.debug('processing {:,} groups'.format(len(df_img_groups)))
+
+ # store landmarks in list
+ results = []
+
+ # iterate groups with file/record index as key
+ for record_index, df_img_group in tqdm(df_img_groups):
+
+ # acces file record
+ ds_record = df_record.iloc[record_index]
+
+ # load image
+ fp_im = data_store.face(ds_record.subdir, ds_record.fn, ds_record.ext)
+ im = cv.imread(fp_im)
+ im_resized = im_utils.resize(im, width=opt_size[0], height=opt_size[1])
+
+ # iterate image group dataframe with roi index as key
+ for roi_index, df_img in df_img_group.iterrows():
+
+ # get bbox
+ x, y, w, h = df_img.x, df_img.y, df_img.w, df_img.h
+ dim = im_resized.shape[:2][::-1]
+ bbox = BBox.from_xywh(x, y, w, h).to_dim(dim)
+
+ # get landmark points
+ points = landmark_detector.landmarks(im_resized, bbox)
+ # NB can't really normalize these points, but are normalized against 3D space
+ points_norm = landmark_detector.normalize(points, dim) # normalized using 200
+ points_flattenend = landmark_detector.flatten(points)
+
+ # display to screen if optioned
+ if opt_display:
+ draw_utils.draw_landmarks2D(im_resized, points)
+ draw_utils.draw_bbox(im_resized, bbox)
+ cv.imshow('', im_resized)
+ display_utils.handle_keyboard()
+
+ results.append(points_flattenend)
+
+ # create DataFrame and save to CSV
+ file_utils.mkdirs(fp_out)
+ df = pd.DataFrame.from_dict(results)
+ df.index.name = 'index'
+ df.to_csv(fp_out)
+
+ # save script
+ file_utils.write_text(' '.join(sys.argv), '{}.sh'.format(fp_out)) \ No newline at end of file
diff --git a/megapixels/commands/cv/face_pose.py b/megapixels/commands/cv/face_pose.py
index 4e35210c..70ea1f30 100644
--- a/megapixels/commands/cv/face_pose.py
+++ b/megapixels/commands/cv/face_pose.py
@@ -1,4 +1,6 @@
"""
+NB: This only works with the DLIB 68-point landmarks.
+
Converts ROIs to pose: yaw, roll, pitch
pitch: looking down or up in yes gesture
roll: tilting head towards shoulder
@@ -6,6 +8,13 @@ yaw: twisting head left to right in no gesture
"""
+"""
+TODO
+- check compatibility with MTCNN 68 point detector
+- improve accuracy by using MTCNN 5-point
+- refer to https://github.com/jerryhouuu/Face-Yaw-Roll-Pitch-from-Pose-Estimation-using-OpenCV/
+"""
+
import click
from app.settings import types
@@ -19,7 +28,7 @@ from app.settings import app_cfg as cfg
help='Override enum output filename CSV')
@click.option('-m', '--media', 'opt_dir_media', default=None,
help='Override enum media directory')
-@click.option('--data_store', 'opt_data_store',
+@click.option('--store', 'opt_data_store',
type=cfg.DataStoreVar,
default=click_utils.get_default(types.DataStore.HDD),
show_default=True,
@@ -56,8 +65,8 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset,
import pandas as pd
from app.models.bbox import BBox
- from app.utils import logger_utils, file_utils, im_utils
- from app.processors.face_landmarks_2d import LandmarksDLIB
+ from app.utils import logger_utils, file_utils, im_utils, display_utils, draw_utils
+ from app.processors.face_landmarks import Dlib2D_68
from app.processors.face_pose import FacePoseDLIB
from app.models.data_store import DataStore
@@ -77,9 +86,11 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset,
# init face processors
face_pose = FacePoseDLIB()
- face_landmarks = LandmarksDLIB()
+ face_landmarks = Dlib2D_68()
+
+ # -------------------------------------------------
+ # load data
- # load filepath data
fp_record = data_store.metadata(types.Metadata.FILE_RECORD)
df_record = pd.read_csv(fp_record).set_index('index')
# load ROI data
@@ -93,59 +104,60 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset,
log.debug('processing {:,} groups'.format(len(df_img_groups)))
# store poses and convert to DataFrame
- poses = []
+ results = []
+ # -------------------------------------------------
# iterate groups with file/record index as key
for record_index, df_img_group in tqdm(df_img_groups):
- # make fp
- ds_record = df_record.iloc[record_index]
- fp_im = data_store.face(ds_record.subdir, ds_record.fn, ds_record.ext)
+
+ # access the file_record
+ file_record = df_record.iloc[record_index] # pands.DataSeries
+
+ # load image
+ fp_im = data_store.face(file_record.subdir, file_record.fn, file_record.ext)
im = cv.imread(fp_im)
+ im_resized = im_utils.resize(im, width=opt_size[0], height=opt_size[1])
+
# iterate image group dataframe with roi index as key
for roi_index, df_img in df_img_group.iterrows():
+
# get bbox
x, y, w, h = df_img.x, df_img.y, df_img.w, df_img.h
- dim = (ds_record.width, ds_record.height)
- #dim = im.shape[:2][::-1]
+ #dim = (file_record.width, file_record.height)
+ dim = im_resized.shape[:2][::-1]
bbox = BBox.from_xywh(x, y, w, h).to_dim(dim)
+
# get pose
- landmarks = face_landmarks.landmarks(im, bbox)
+ landmarks = face_landmarks.landmarks(im_resized, bbox)
pose_data = face_pose.pose(landmarks, dim)
#pose_degrees = pose_data['degrees'] # only keep the degrees data
#pose_degrees['points_nose'] = pose_data
- # use the project point data if display flag set
+
+ # draw landmarks if optioned
if opt_display:
- dst = im.copy()
- face_pose.draw_pose(dst, pose_data['point_nose'], pose_data['points'])
- face_pose.draw_degrees(dst, pose_data)
- # display to cv window
- cv.imshow('', dst)
- while True:
- k = cv.waitKey(1) & 0xFF
- if k == 27 or k == ord('q'): # ESC
- cv.destroyAllWindows()
- sys.exit()
- elif k != 255:
- # any key to continue
- break
+ draw_utils.draw_pose(im_resized, pose_data['point_nose'], pose_data['points'])
+ draw_utils.draw_degrees(im_resized, pose_data)
+ cv.imshow('', im_resized)
+ display_utils.handle_keyboard()
# add image index and append to result CSV data
pose_data['roi_index'] = roi_index
for k, v in pose_data['points'].items():
pose_data[f'point_{k}_x'] = v[0][0] / dim[0]
pose_data[f'point_{k}_y'] = v[0][1] / dim[1]
+
+ # rearrange data structure for DataFrame
pose_data.pop('points')
pose_data['point_nose_x'] = pose_data['point_nose'][0] / dim[0]
pose_data['point_nose_y'] = pose_data['point_nose'][1] / dim[1]
pose_data.pop('point_nose')
- poses.append(pose_data)
+ results.append(pose_data)
- # create dataframe
+ # create DataFrame and save to CSV
file_utils.mkdirs(fp_out)
- df = pd.DataFrame.from_dict(poses)
- # save date
+ df = pd.DataFrame.from_dict(results)
df.index.name = 'index'
df.to_csv(fp_out)
+
# save script
- cmd_line = ' '.join(sys.argv)
- file_utils.write_text(cmd_line, '{}.sh'.format(fp_out)) \ No newline at end of file
+ file_utils.write_text(' '.join(sys.argv), '{}.sh'.format(fp_out)) \ No newline at end of file
diff --git a/megapixels/commands/cv/face_roi.py b/megapixels/commands/cv/face_roi.py
index c3c2ac05..6d42924e 100644
--- a/megapixels/commands/cv/face_roi.py
+++ b/megapixels/commands/cv/face_roi.py
@@ -29,7 +29,7 @@ color_filters = {'color': 1, 'gray': 2, 'all': 3}
show_default=True,
help=click_utils.show_help(types.Dataset))
@click.option('--size', 'opt_size',
- type=(int, int), default=(300, 300),
+ type=(int, int), default=(480, 480),
help='Output image size')
@click.option('-d', '--detector', 'opt_detector_type',
type=cfg.FaceDetectNetVar,
@@ -50,7 +50,7 @@ color_filters = {'color': 1, 'gray': 2, 'all': 3}
@click.option('--color', 'opt_color_filter',
type=click.Choice(color_filters.keys()), default='all',
help='Filter to keep color or grayscale images (color = keep color')
-@click.option('--largest/--all-faces', 'opt_largest', is_flag=True, default=True,
+@click.option('--keep', 'opt_largest', type=click.Choice(['largest', 'all']), default='all',
help='Only keep largest face')
@click.option('--zone', 'opt_zone', default=(0.0, 0.0), type=(float, float),
help='Face center must be located within zone region (0.5 = half width/height)')
@@ -72,7 +72,7 @@ def cli(ctx, opt_fp_in, opt_dir_media, opt_fp_out, opt_data_store, opt_dataset,
import cv2 as cv
import pandas as pd
- from app.utils import logger_utils, file_utils, im_utils
+ from app.utils import logger_utils, file_utils, im_utils, display_utils, draw_utils
from app.processors import face_detector
from app.models.data_store import DataStore
@@ -113,13 +113,15 @@ def cli(ctx, opt_fp_in, opt_dir_media, opt_fp_out, opt_data_store, opt_dataset,
# filter out grayscale
color_filter = color_filters[opt_color_filter]
+ # set largest flag, to keep all or only largest
+ opt_largest = opt_largest == 'largest'
data = []
for df_record in tqdm(df_records.itertuples(), total=len(df_records)):
fp_im = data_store.face(str(df_record.subdir), str(df_record.fn), str(df_record.ext))
im = cv.imread(fp_im)
-
+ im_resized = im_utils.resize(im, width=opt_size[0], height=opt_size[1])
# filter out color or grayscale iamges
if color_filter != color_filters['all']:
try:
@@ -130,9 +132,10 @@ def cli(ctx, opt_fp_in, opt_dir_media, opt_fp_out, opt_data_store, opt_dataset,
except Exception as e:
log.error('Could not check grayscale: {}'.format(fp_im))
continue
-
+
try:
- bboxes = detector.detect(im, size=opt_size, pyramids=opt_pyramids, largest=opt_largest, zone=opt_zone)
+ bboxes = detector.detect(im_resized, pyramids=opt_pyramids, largest=opt_largest,
+ zone=opt_zone, conf_thresh=opt_conf_thresh)
except Exception as e:
log.error('could not detect: {}'.format(fp_im))
log.error('{}'.format(e))
@@ -150,27 +153,22 @@ def cli(ctx, opt_fp_in, opt_dir_media, opt_fp_out, opt_data_store, opt_dataset,
if len(bboxes) == 0:
log.warn(f'no faces in: {fp_im}')
- # debug display
+ # if display optined
if opt_display and len(bboxes):
- im_md = im_utils.resize(im, width=min(1200, opt_size[0]))
+ # draw each box
for bbox in bboxes:
- bbox_dim = bbox.to_dim(im_md.shape[:2][::-1])
- cv.rectangle(im_md, bbox_dim.pt_tl, bbox_dim.pt_br, (0,255,0), 3)
- cv.imshow('', im_md)
- while True:
- k = cv.waitKey(1) & 0xFF
- if k == 27 or k == ord('q'): # ESC
- cv.destroyAllWindows()
- sys.exit()
- elif k != 255:
- # any key to continue
- break
+ bbox_dim = bbox.to_dim(im_resized.shape[:2][::-1])
+ draw_utils.draw_bbox(im_resized, bbox_dim)
- # save date
+ # display and wait
+ cv.imshow('', im_resized)
+ display_utils.handle_keyboard()
+
+ # create DataFrame and save to CSV
file_utils.mkdirs(fp_out)
df = pd.DataFrame.from_dict(data)
df.index.name = 'index'
df.to_csv(fp_out)
+
# save script
- cmd_line = ' '.join(sys.argv)
- file_utils.write_text(cmd_line, '{}.sh'.format(fp_out)) \ No newline at end of file
+ file_utils.write_text(' '.join(sys.argv), '{}.sh'.format(fp_out)) \ No newline at end of file
diff --git a/megapixels/commands/cv/face_vector.py b/megapixels/commands/cv/face_vector.py
index 7c03205c..9251c053 100644
--- a/megapixels/commands/cv/face_vector.py
+++ b/megapixels/commands/cv/face_vector.py
@@ -103,15 +103,17 @@ def cli(ctx, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset, opt_size,
# get face vector
bbox_dim = BBox.from_xywh(x, y, w, h).to_dim(dim) # convert to int real dimensions
# compute vec
- # padding=opt_padding not yet implemented in 19.16 but merged in master
+ # padding=opt_padding not yet implemented in dlib===19.16 but merged in master
vec = facerec.vec(im, bbox_dim, jitters=opt_jitters)
vec_str = ','.join([repr(x) for x in vec]) # convert to string for CSV
vecs.append( {'roi_index': roi_index, 'record_index': record_index, 'vec': vec_str})
- # create dataframe
+ # create DataFrame and save to CSV
df = pd.DataFrame.from_dict(vecs)
df.index.name = 'index'
- # save CSV
file_utils.mkdirs(fp_out)
- df.to_csv(fp_out) \ No newline at end of file
+ df.to_csv(fp_out)
+
+ # save script
+ file_utils.write_text(' '.join(sys.argv), '{}.sh'.format(fp_out)) \ No newline at end of file
diff --git a/megapixels/commands/cv/face_vector_mt.py b/megapixels/commands/cv/face_vector_mt.py
deleted file mode 100644
index 412f9806..00000000
--- a/megapixels/commands/cv/face_vector_mt.py
+++ /dev/null
@@ -1,118 +0,0 @@
-"""
-Converts ROIs to face vector
-"""
-
-import click
-
-from app.settings import types
-from app.utils import click_utils
-from app.settings import app_cfg as cfg
-
-@click.command()
-@click.option('-o', '--output', 'opt_fp_out', default=None,
- help='Override enum output filename CSV')
-@click.option('-m', '--media', 'opt_dir_media', default=None,
- help='Override enum media directory')
-@click.option('--data_store', 'opt_data_store',
- type=cfg.DataStoreVar,
- default=click_utils.get_default(types.DataStore.SSD),
- show_default=True,
- help=click_utils.show_help(types.Dataset))
-@click.option('--dataset', 'opt_dataset',
- type=cfg.DatasetVar,
- required=True,
- show_default=True,
- help=click_utils.show_help(types.Dataset))
-@click.option('--size', 'opt_size',
- type=(int, int), default=(300, 300),
- help='Output image size')
-@click.option('-j', '--jitters', 'opt_jitters', default=cfg.DLIB_FACEREC_JITTERS,
- help='Number of jitters')
-@click.option('-p', '--padding', 'opt_padding', default=cfg.DLIB_FACEREC_PADDING,
- help='Percentage padding')
-@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None),
- help='Slice list of files')
-@click.option('-f', '--force', 'opt_force', is_flag=True,
- help='Force overwrite file')
-@click.option('-g', '--gpu', 'opt_gpu', default=0,
- help='GPU index')
-@click.pass_context
-def cli(ctx, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset, opt_size,
- opt_slice, opt_force, opt_gpu, opt_jitters, opt_padding):
- """Converts face ROIs to vectors"""
-
- import sys
- import os
- from os.path import join
- from pathlib import Path
- from glob import glob
-
- from tqdm import tqdm
- import numpy as np
- import dlib # must keep a local reference for dlib
- import cv2 as cv
- import pandas as pd
-
- from app.models.bbox import BBox
- from app.models.data_store import DataStore
- from app.utils import logger_utils, file_utils, im_utils
- from app.processors import face_recognition
-
-
- # -------------------------------------------------
- # init here
-
- log = logger_utils.Logger.getLogger()
- # set data_store
- data_store = DataStore(opt_data_store, opt_dataset)
-
- # get filepath out
- fp_out = data_store.metadata(types.Metadata.FACE_VECTOR) if opt_fp_out is None else opt_fp_out
- if not opt_force and Path(fp_out).exists():
- log.error('File exists. Use "-f / --force" to overwite')
- return
-
- # init face processors
- facerec = face_recognition.RecognitionDLIB()
-
- # load data
- fp_record = data_store.metadata(types.Metadata.FILE_RECORD)
- df_record = pd.read_csv(fp_record).set_index('index')
- fp_roi = data_store.metadata(types.Metadata.FACE_ROI)
- df_roi = pd.read_csv(fp_roi).set_index('index')
-
- if opt_slice:
- df_roi = df_roi[opt_slice[0]:opt_slice[1]]
-
- # -------------------------------------------------
- # process here
- df_img_groups = df_roi.groupby('record_index')
- log.debug('processing {:,} groups'.format(len(df_img_groups)))
-
- vecs = []
- for record_index, df_img_group in tqdm(df_img_groups):
- # make fp
- ds_record = df_record.iloc[record_index]
- fp_im = data_store.face(ds_record.subdir, ds_record.fn, ds_record.ext)
- im = cv.imread(fp_im)
- for roi_index, df_img in df_img_group.iterrows():
- # get bbox
- x, y, w, h = df_img.x, df_img.y, df_img.w, df_img.h
- imw = df_img.image_width
- imh = df_img.image_height
- dim = im.shape[:2][::-1]
- # get face vector
- dim = (imw, imh)
- bbox_dim = BBox.from_xywh(x, y, w, h).to_dim(dim) # convert to int real dimensions
- # compute vec
- # padding=opt_padding not yet implemented in 19.16 but merged in master
- vec = facerec.vec(im, bbox_dim, jitters=opt_jitters)
- vec_str = ','.join([repr(x) for x in vec]) # convert to string for CSV
- vecs.append( {'roi_index': roi_index, 'record_index': record_index, 'vec': vec_str})
-
-
- # save date
- df = pd.DataFrame.from_dict(vecs)
- df.index.name = 'index'
- file_utils.mkdirs(fp_out)
- df.to_csv(fp_out) \ No newline at end of file
diff --git a/megapixels/commands/datasets/file_record.py b/megapixels/commands/datasets/file_record.py
index 355b22f2..d3f790d4 100644
--- a/megapixels/commands/datasets/file_record.py
+++ b/megapixels/commands/datasets/file_record.py
@@ -41,8 +41,8 @@ identity_sources = ['subdir', 'numeric']
@click.option('-f', '--force', 'opt_force', is_flag=True,
help='Force overwrite file')
@click.option('--identity', 'opt_identity', type=click.Choice(identity_sources),
- default='numeric',
- help='Identity source, blank for no identity')
+ required=True,
+ help='Identity source key')
@click.option('--recursive/--no-recursive', 'opt_recursive', is_flag=True, default=False,
help='Use glob recursion (slower)')
@click.pass_context