diff options
Diffstat (limited to 'megapixels/app')
| -rw-r--r-- | megapixels/app/models/bbox.py | 16 | ||||
| -rw-r--r-- | megapixels/app/processors/face_extractor.py | 127 | ||||
| -rw-r--r-- | megapixels/app/settings/app_cfg.py | 6 | ||||
| -rw-r--r-- | megapixels/app/settings/types.py | 7 |
4 files changed, 153 insertions, 3 deletions
diff --git a/megapixels/app/models/bbox.py b/megapixels/app/models/bbox.py index 40874691..f1216698 100644 --- a/megapixels/app/models/bbox.py +++ b/megapixels/app/models/bbox.py @@ -130,6 +130,22 @@ class BBox: # ----------------------------------------------------------------- # Modify + def expand(self, per): + """Expands BBox by percentage + :param per: (float) percentage to expand 0.0 - 1.0 + :param dim: (int, int) image width, height + :returns (BBox) expanded + """ + # expand + dw, dh = [(self._width * per), (self._height * per)] + r = list(np.array(self._rect) + np.array([-dw, -dh, dw, dh])) + # threshold expanded rectangle + r[0] = max(r[0], 0.0) + r[1] = max(r[1], 0.0) + r[2] = min(r[2], 1.0) + r[3] = min(r[3], 1.0) + return BBox(*r) + def expand_dim(self, amt, bounds): """Expands BBox within dim :param box: (tuple) left, top, right, bottom diff --git a/megapixels/app/processors/face_extractor.py b/megapixels/app/processors/face_extractor.py new file mode 100644 index 00000000..2666e090 --- /dev/null +++ b/megapixels/app/processors/face_extractor.py @@ -0,0 +1,127 @@ +import os +from os.path import join +from pathlib import Path + +import cv2 as cv +import numpy as np +import dlib +import imutils + +from app.utils import im_utils, logger_utils +from app.models.bbox import BBox +from app.settings import app_cfg as cfg +from app.settings import types + +def similarity(self, query_enc, known_enc): + return np.linalg.norm(query_enc - known_enc, axis=1) + +def flatten(vec): + '''Converts N-D vector into a flattened list for CSV + :param points: (list) a feature vector as list of floats + :returns dict item for each point (eg {'d1':0.28442156, 'd1': 0.1868632}) + ''' + vec_flat = {} + for idx, val in enumerate(vec, 1): + vec_flat[f'd{idx}'] = val + return vec_flat + + + +class Extractor: + + n_dim = None # override + + def __init__(self): + self.log = logger_utils.Logger.getLogger() + + def flatten(self, vec): + '''Converts N-D vector into a flattened list for CSV + :param points: (list) a feature vector as list of floats + :returns dict item for each point (eg {'d1':0.28442156, 'd1': 0.1868632}) + ''' + vec_flat = {} + for idx, val in enumerate(vec, 1): + vec_flat[f'd{idx}'] = val + return vec_flat + + def unflatten_df(self, df): + # convert from + return [df[f'd{i}'] for i in range(1,257)] + + +class ExtractorVGG(Extractor): + + # https://github.com/ox-vgg/vgg_face2 + # Uses OpenCV DNN to extract feature vector for VGG Face 2 models + n_dim = 256 + dnn_dim = (224,224) + dnn_mean = (91.4953, 103.8827, 131.0912) + + def __init__(self): + super().__init__() + fp_model = '/data_store_hdd/apps/megapixels/models/caffe/vgg_face2/resnet50_256_caffe/resnet50_256.caffemodel' + fp_prototxt = '/data_store_hdd/apps/megapixels/models/caffe/vgg_face2/resnet50_256_caffe/resnet50_256.prototxt' + self.dnn = cv.dnn.readNetFromCaffe(fp_prototxt, fp_model) + self.feat_layer = self.dnn.getLayerNames()[-2] + + def extract(self, im, bbox_norm, padding=0.3): + '''Extracts feature vector for face crop + :param im: + :param bbox_norm: (BBox) normalized + :param padding: (float) percent to extend ROI + :param jitters: not used here + :returns (list) of (float)''' + + bbox_ext = bbox_norm.expand(padding) + dim = im.shape[:2][::-1] + bbox_ext_dim = bbox_ext.to_dim(dim) + x1,y1,x2,y2 = bbox_ext_dim.to_xyxy() + im = im[y1:y2, x1:x2] + # According to VGG, model trained using Bilinear interpolation (INTER_LINEAR) + im = cv.resize(im, self.dnn_dim, interpolation=cv.INTER_LINEAR) + blob = cv.dnn.blobFromImage(im, 1.0, self.dnn_dim, self.dnn_mean) + self.dnn.setInput(blob) + vec = np.array(self.dnn.forward(self.feat_layer)[0]) + vec_norm = np.array(vec)/np.linalg.norm(vec) # normalize + return vec_norm + + +class ExtractorDLIB(Extractor): + + # https://github.com/davisking/dlib/blob/master/python_examples/face_recognition.py + # facerec.compute_face_descriptor(img, shape, 100, 0.25) + # padding=opt_padding not yet implemented in dlib===19.16 but merged in master + n_dim = 128 + process_width = 100 + + def __init__(self, gpu=0, jitters=cfg.DLIB_FACEREC_JITTERS): + super().__init__() + self.num_jitters = cfg.DLIB_FACEREC_JITTERS + # set and swap GPU visibility + if gpu > -1: + cuda_visible_devices = os.getenv('CUDA_VISIBLE_DEVICES', '') + os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu) + self.predictor = dlib.shape_predictor(cfg.DIR_MODELS_DLIB_5PT) + self.facerec = dlib.face_recognition_model_v1(cfg.DIR_MODELS_DLIB_FACEREC_RESNET) + # unset and swap GPU visibility + if gpu > -1: + os.environ['CUDA_VISIBLE_DEVICES'] = cuda_visible_devices # reset GPU env + + + def extract(self, im, bbox_norm): + '''Converts image and bbox into 128d vector + :param im: (numpy.ndarray) BGR image + :param bbox_norm: (BBox) normalized + ''' + # scale the image so the face is always 100x100 pixels + dim = im.shape[:2][::-1] + bbox_dim = bbox_norm.to_dim(dim) + scale = self.process_width / bbox_dim.width + cv.resize(im, None, fx=scale, fy=scale, interpolation=cv.INTER_LANCZOS4) + bbox_dim_dlib = bbox_dim.to_dlib() + face_shape = self.predictor(im, bbox_dim_dlib) + # this is only in dlib version 19.6++? + # vec = self.facerec.compute_face_descriptor(im, face_shape, self.num_jitters, self.padding) + # vectors are already normalized + vec = self.facerec.compute_face_descriptor(im, face_shape, self.num_jitters) + return vec diff --git a/megapixels/app/settings/app_cfg.py b/megapixels/app/settings/app_cfg.py index fea47572..14e2493c 100644 --- a/megapixels/app/settings/app_cfg.py +++ b/megapixels/app/settings/app_cfg.py @@ -21,6 +21,7 @@ DataStoreVar = click_utils.ParamVar(types.DataStore) # Face analysis HaarCascadeVar = click_utils.ParamVar(types.HaarCascade) FaceDetectNetVar = click_utils.ParamVar(types.FaceDetectNet) +FaceExtractorVar = click_utils.ParamVar(types.FaceExtractor) FaceLandmark2D_5Var = click_utils.ParamVar(types.FaceLandmark2D_5) FaceLandmark2D_68Var = click_utils.ParamVar(types.FaceLandmark2D_68) FaceLandmark3D_68Var = click_utils.ParamVar(types.FaceLandmark3D_68) @@ -41,7 +42,7 @@ DIR_PEOPLE = 'people' DIR_MODELS_CAFFE = join(DIR_MODELS,'caffe') DIR_MODELS_DARKNET = join(DIR_MODELS,'darknet') DIR_MODELS_DARKNET_PJREDDIE = join(DIR_MODELS_DARKNET, 'pjreddie') -DIR_MODELS_PYTORCH = join(DIR_MODELS,'pytorch') +DIR_MODELS_PYTORCHq = join(DIR_MODELS,'pytorch') DIR_MODELS_TORCH = join(DIR_MODELS,'torch') DIR_MODELS_MXNET = join(DIR_MODELS,'mxnet') DIR_MODELS_KERAS = join(DIR_MODELS,'keras') @@ -96,7 +97,8 @@ HASH_TREE_DEPTH = 3 HASH_BRANCH_SIZE = 3 DLIB_FACEREC_JITTERS = 5 # number of face recognition jitters -DLIB_FACEREC_PADDING = 0.25 # default dlib +#DLIB_FACEREC_PADDING = 0.25 # default dlib +FACEREC_PADDING = 0.3 # VGG FACE2 recommended POSE_MINMAX_YAW = (-25,25) POSE_MINMAX_ROLL = (-15,15) diff --git a/megapixels/app/settings/types.py b/megapixels/app/settings/types.py index 1d77fdbd..940c8b6d 100644 --- a/megapixels/app/settings/types.py +++ b/megapixels/app/settings/types.py @@ -54,7 +54,12 @@ class Dataset(Enum): # -------------------------------------------------------------------- class FaceDetectNet(Enum): """Scene text detector networks""" - HAAR, DLIB_CNN, DLIB_HOG, CVDNN, MTCNN = range(5) + HAAR, DLIB_CNN, DLIB_HOG, CVDNN, MTCNN_TF, MTCNN_PT, MTCNN_CAFFE = range(7) + +class FaceExtractor(Enum): + """Type of face recognition feature extractor""" + # TODO deprecate DLIB resnet and use only CVDNN Caffe models + DLIB_RESNET, VGG_FACE2 = range(2) class FaceLandmark2D_5(Enum): DLIB, MTCNN = range(2) |
