import sys import os from os.path import join from pathlib import Path import cv2 as cv import numpy as np import imutils import operator from app.utils import im_utils, logger_utils from app.models.bbox import BBox from app.settings import app_cfg as cfg from app.settings import types class DetectorMTCNN: # https://github.com/ipazc/mtcnn # pip install mtcnn dnn_size = (300, 300) def __init__(self, size=(400,400), gpu=0): self.log = logger_utils.Logger.getLogger() device_cur = os.getenv('CUDA_VISIBLE_DEVICES', '') self.log.info(f'Change CUDA_VISIBLE_DEVICES from "{device_cur}" to "{gpu}"') os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu) from mtcnn.mtcnn import MTCNN self.detector = MTCNN() os.environ['CUDA_VISIBLE_DEVICES'] = device_cur # reset def detect(self, im, size=(400,400), conf_thresh=None, pyramids=None, largest=False, zone=None): '''Detects face using MTCNN and returns (list) of BBox :param im: (numpy.ndarray) image :returns list of BBox ''' bboxes = [] dnn_size = self.dnn_size if size is None else size im = im_utils.resize(im, width=dnn_size[0], height=dnn_size[1]) dim = im.shape[:2][::-1] dets = self.detector.detect_faces(im) for det in dets: rect = det['box'] #keypoints = det['keypoints'] # not using here. see 'face_landmarks.py' bbox = BBox.from_xywh_dim(*rect, dim) bboxes.append(bbox) if largest and len(bboxes) > 1: # only keep largest bboxes.sort(key=operator.attrgetter('area'), reverse=True) bboxes = [bboxes[0]] return bboxes class DetectorHaar: im_size = (400, 400) cascade_name = types.HaarCascade.FRONTAL def __init__(self, cascade=types.HaarCascade.FRONTAL): self.log = logger_utils.Logger.getLogger() def detect(self, im, scale_factor=1.05, overlaps=5): pass class DetectorDLIBCNN: pyramids = 0 conf_thresh = 0.85 def __init__(self, gpu=0): import dlib self.log = logger_utils.Logger.getLogger() device_cur = os.getenv('CUDA_VISIBLE_DEVICES', '') if dlib.DLIB_USE_CUDA and gpu < 0: self.log.error('dlib was compiled with CUDA but you selected CPU. Use GPU >= 0 if dlib.DLIB_USE_CUDA') sys.exit() os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu) self.log.info('load model: {}'.format(cfg.DIR_MODELS_DLIB_CNN)) self.detector = dlib.cnn_face_detection_model_v1(cfg.DIR_MODELS_DLIB_CNN) os.environ['CUDA_VISIBLE_DEVICES'] = device_cur # reset def detect(self, im, conf_thresh=None, pyramids=None, largest=False, zone=None): bboxes = [] conf_thresh = self.conf_thresh if conf_thresh is None else conf_thresh pyramids = self.pyramids if pyramids is None else pyramids dim = im.shape[:2][::-1] im = im_utils.bgr2rgb(im) # convert to RGB for dlib # run detector mmod_rects = self.detector(im, pyramids) # sort results for mmod_rect in mmod_rects: if mmod_rect.confidence > conf_thresh: bbox = BBox.from_dlib_dim(mmod_rect.rect, dim) bboxes.append(bbox) if zone: bboxes = [b for b in bboxes if b.cx > zone[0] and b.cx < 1.0 - zone[0] \ and b.cy > zone[1] and b.cy < 1.0 - zone[1]] if largest and len(bboxes) > 1: # only keep largest bboxes.sort(key=operator.attrgetter('area'), reverse=True) bboxes = [bboxes[0]] return bboxes class DetectorDLIBHOG: pyramids = 0 conf_thresh = 0.85 def __init__(self): import dlib self.log = logger_utils.Logger.getLogger() self.detector = dlib.get_frontal_face_detector() def detect(self, im, conf_thresh=None, pyramids=0, largest=False, zone=False): conf_thresh = self.conf_thresh if conf_thresh is None else conf_thresh pyramids = self.pyramids if pyramids is None else pyramids dim = im.shape[:2][::-1] im = im_utils.bgr2rgb(im) # ? hog_results = self.detector.run(im, pyramids) bboxes = [] if len(hog_results[0]) > 0: for rect, score, direction in zip(*hog_results): if score > conf_thresh: bbox = BBox.from_dlib_dim(rect, dim) bboxes.append(bbox) # filter to keep on faces inside zone if zone: bboxes = [b for b in bboxes if b.cx > zone[0] and b.cx < 1.0 - zone[0] \ and b.cy > zone[1] and b.cy < 1.0 - zone[1]] # filter to keep only largest face if largest and len(bboxes) > 1: bboxes.sort(key=operator.attrgetter('area'), reverse=True) bboxes = [bboxes[0]] return bboxes class DetectorCVDNN: dnn_scale = 1.0 # fixed dnn_mean = (104.0, 177.0, 123.0) # fixed dnn_crop = False # crop or force resize blob_size = (300, 300) conf_thresh = 0.95 def __init__(self): self.log = logger_utils.Logger.getLogger() fp_prototxt = join(cfg.DIR_MODELS_CAFFE, 'face_detect', 'opencv_face_detector.prototxt') fp_model = join(cfg.DIR_MODELS_CAFFE, 'face_detect', 'opencv_face_detector.caffemodel') self.net = cv.dnn.readNet(fp_prototxt, fp_model) self.net.setPreferableBackend(cv.dnn.DNN_BACKEND_OPENCV) self.net.setPreferableTarget(cv.dnn.DNN_TARGET_CPU) def detect(self, im, conf_thresh=None, largest=False, pyramids=None, zone=False): """Detects faces and returns (list) of (BBox)""" conf_thresh = self.conf_thresh if conf_thresh is None else conf_thresh im = cv.resize(im, self.blob_size) dim = im.shape[:2][::-1] blob = cv.dnn.blobFromImage(im, self.dnn_scale, dim, self.dnn_mean) self.net.setInput(blob) net_outputs = self.net.forward() bboxes = [] for i in range(0, net_outputs.shape[2]): conf = net_outputs[0, 0, i, 2] if conf > conf_thresh: rect_norm = net_outputs[0, 0, i, 3:7] bboxes.append(BBox(*rect_norm)) if zone: bboxes = [b for b in bboxes if b.cx > zone[0] and b.cx < 1.0 - zone[0] \ and b.cy > zone[1] and b.cy < 1.0 - zone[1]] if largest and len(bboxes) > 1: # only keep largest bboxes.sort(key=operator.attrgetter('area'), reverse=True) bboxes = [bboxes[0]] return bboxes