summaryrefslogtreecommitdiff
path: root/megapixels/app/processors/face_detector.py
diff options
context:
space:
mode:
authorAdam Harvey <adam@ahprojects.com>2018-12-23 01:37:03 +0100
committerAdam Harvey <adam@ahprojects.com>2018-12-23 01:37:03 +0100
commit4452e02e8b04f3476273574a875bb60cfbb4568b (patch)
tree3ffa44f9621b736250a8b94da14a187dc785c2fe /megapixels/app/processors/face_detector.py
parent2a65f7a157bd4bace970cef73529867b0e0a374d (diff)
parent5340bee951c18910fd764241945f1f136b5a22b4 (diff)
.
Diffstat (limited to 'megapixels/app/processors/face_detector.py')
-rw-r--r--megapixels/app/processors/face_detector.py132
1 files changed, 104 insertions, 28 deletions
diff --git a/megapixels/app/processors/face_detector.py b/megapixels/app/processors/face_detector.py
index 02d068dc..3a90c557 100644
--- a/megapixels/app/processors/face_detector.py
+++ b/megapixels/app/processors/face_detector.py
@@ -4,71 +4,140 @@ from pathlib import Path
import cv2 as cv
import numpy as np
-import dlib
-# import imutils
+import imutils
+import operator
from app.utils import im_utils, logger_utils
from app.models.bbox import BBox
from app.settings import app_cfg as cfg
+from app.settings import types
+
+
+class DetectorMTCNN:
+
+ # https://github.com/ipazc/mtcnn
+ # pip install mtcnn
+
+ dnn_size = (300, 300)
+
+ def __init__(self, size=(400,400)):
+ from mtcnn.mtcnn import MTCNN
+ self.detector = MTCNN()
+
+ def detect(self, im, size=(400,400), conf_thresh=None, pyramids=None, largest=False):
+ '''Detects face using MTCNN and returns (list) of BBox
+ :param im: (numpy.ndarray) image
+ :returns list of BBox
+ '''
+ bboxes = []
+ #conf_thresh = self.conf_thresh if conf_thresh is None else conf_thresh
+ #pyramids = self.pyramids if pyramids is None else pyramids
+ dnn_size = self.dnn_size if size is None else size
+
+ im = im_utils.resize(im, width=dnn_size[0], height=dnn_size[1])
+ dim = im.shape[:2][::-1]
+ dets = self.detector.detect_faces(im)
+ for det in dets:
+ rect = det['box']
+ #keypoints = det['keypoints'] # not using here. see 'face_landmarks.py'
+ bbox = BBox.from_xywh_dim(*rect, dim)
+ bboxes.append(bbox)
+
+ if largest and len(bboxes) > 1:
+ # only keep largest
+ bboxes.sort(key=operator.attrgetter('area'), reverse=True)
+ bboxes = [bboxes[0]]
+
+ return bboxes
+
+
+class DetectorHaar:
+
+ im_size = (400, 400)
+ cascade_name = types.HaarCascade.FRONTAL
+
+ def __init__(self, cascade=types.HaarCascade.FRONTAL):
+ self.log = logger_utils.Logger.getLogger()
+
+ def detect(self, im, scale_factor=1.05, overlaps=5):
+ pass
+
class DetectorDLIBCNN:
+
dnn_size = (300, 300)
pyramids = 0
conf_thresh = 0.85
- def __init__(self, opt_gpu):
+ def __init__(self, gpu=0):
+ import dlib
self.log = logger_utils.Logger.getLogger()
cuda_visible_devices = os.getenv('CUDA_VISIBLE_DEVICES', '')
- os.environ['CUDA_VISIBLE_DEVICES'] = str(opt_gpu)
+ os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu)
self.log.info('load model: {}'.format(cfg.DIR_MODELS_DLIB_CNN))
self.detector = dlib.cnn_face_detection_model_v1(cfg.DIR_MODELS_DLIB_CNN)
os.environ['CUDA_VISIBLE_DEVICES'] = cuda_visible_devices # reset
- def detect(self, im, opt_size=None, opt_conf_thresh=None, opt_pyramids=None):
- rois = []
- conf_thresh = self.conf_thresh if opt_conf_thresh is None else opt_conf_thresh
- pyramids = self.pyramids if opt_pyramids is None else opt_pyramids
- dnn_size = self.dnn_size if opt_size is None else opt_size
+ def detect(self, im, size=None, conf_thresh=None, pyramids=None, largest=False):
+ bboxes = []
+ conf_thresh = self.conf_thresh if conf_thresh is None else conf_thresh
+ pyramids = self.pyramids if pyramids is None else pyramids
+ dnn_size = self.dnn_size if size is None else size
# resize image
im = im_utils.resize(im, width=dnn_size[0], height=dnn_size[1])
dim = im.shape[:2][::-1]
im = im_utils.bgr2rgb(im) # convert to RGB for dlib
# run detector
- mmod_rects = self.detector(im, 1)
+ mmod_rects = self.detector(im, pyramids)
# sort results
for mmod_rect in mmod_rects:
if mmod_rect.confidence > conf_thresh:
bbox = BBox.from_dlib_dim(mmod_rect.rect, dim)
- rois.append(bbox)
- return rois
+ bboxes.append(bbox)
+
+ if largest and len(bboxes) > 1:
+ # only keep largest
+ bboxes.sort(key=operator.attrgetter('area'), reverse=True)
+ bboxes = [bboxes[0]]
+
+ return bboxes
class DetectorDLIBHOG:
size = (320, 240)
pyramids = 0
+ conf_thresh = 0.85
def __init__(self):
+ import dlib
+ self.log = logger_utils.Logger.getLogger()
self.detector = dlib.get_frontal_face_detector()
- def detect(self, im, opt_size=None, opt_conf_thresh=None, opt_pyramids=0):
- conf_thresh = self.conf_thresh if opt_conf_thresh is None else opt_conf_thresh
- dnn_size = self.size if opt_size is None else opt_size
- pyramids = self.pyramids if opt_pyramids is None else opt_pyramids
+ def detect(self, im, size=None, conf_thresh=None, pyramids=0, largest=False):
+ conf_thresh = self.conf_thresh if conf_thresh is None else conf_thresh
+ dnn_size = self.size if size is None else size
+ pyramids = self.pyramids if pyramids is None else pyramids
- im = im_utils.resize(im, width=opt_size[0], height=opt_size[1])
+ im = im_utils.resize(im, width=dnn_size[0], height=dnn_size[1])
dim = im.shape[:2][::-1]
im = im_utils.bgr2rgb(im) # ?
hog_results = self.detector.run(im, pyramids)
- rois = []
+ bboxes = []
if len(hog_results[0]) > 0:
for rect, score, direction in zip(*hog_results):
- if score > opt_conf_thresh:
+ if score > conf_thresh:
bbox = BBox.from_dlib_dim(rect, dim)
- rois.append(bbox)
- return rois
+ bboxes.append(bbox)
+
+ if largest and len(bboxes) > 1:
+ # only keep largest
+ bboxes.sort(key=operator.attrgetter('area'), reverse=True)
+ bboxes = [bboxes[0]]
+
+ return bboxes
class DetectorCVDNN:
@@ -79,25 +148,32 @@ class DetectorCVDNN:
conf_thresh = 0.85
def __init__(self):
+ import dlib
fp_prototxt = join(cfg.DIR_MODELS_CAFFE, 'face_detect', 'opencv_face_detector.prototxt')
fp_model = join(cfg.DIR_MODELS_CAFFE, 'face_detect', 'opencv_face_detector.caffemodel')
self.net = cv.dnn.readNet(fp_prototxt, fp_model)
self.net.setPreferableBackend(cv.dnn.DNN_BACKEND_OPENCV)
self.net.setPreferableTarget(cv.dnn.DNN_TARGET_CPU)
- def detect(self, im, opt_size=None, opt_conf_thresh=None):
+ def detect(self, im, size=None, conf_thresh=None, largest=False, pyramids=None):
"""Detects faces and returns (list) of (BBox)"""
- conf_thresh = self.conf_thresh if opt_conf_thresh is None else opt_conf_thresh
- dnn_size = self.size if opt_size is None else opt_size
+ conf_thresh = self.conf_thresh if conf_thresh is None else conf_thresh
+ dnn_size = self.size if size is None else size
im = cv.resize(im, dnn_size)
blob = cv.dnn.blobFromImage(im, self.dnn_scale, dnn_size, self.dnn_mean)
self.net.setInput(blob)
net_outputs = self.net.forward()
- rois = []
+ bboxes = []
for i in range(0, net_outputs.shape[2]):
conf = net_outputs[0, 0, i, 2]
- if conf > opt_conf_thresh:
+ if conf > conf_thresh:
rect_norm = net_outputs[0, 0, i, 3:7]
- rois.append(BBox(*rect_norm))
- return rois \ No newline at end of file
+ bboxes.append(BBox(*rect_norm))
+
+ if largest and len(bboxes) > 1:
+ # only keep largest
+ bboxes.sort(key=operator.attrgetter('area'), reverse=True)
+ bboxes = [bboxes[0]]
+
+ return bboxes \ No newline at end of file