summaryrefslogtreecommitdiff
path: root/megapixels/app
diff options
context:
space:
mode:
Diffstat (limited to 'megapixels/app')
-rw-r--r--megapixels/app/processors/person_detector.py65
-rw-r--r--megapixels/app/settings/app_cfg.py5
-rw-r--r--megapixels/app/settings/types.py3
-rw-r--r--megapixels/app/utils/display_utils.py7
-rw-r--r--megapixels/app/utils/identity_utils.py19
5 files changed, 96 insertions, 3 deletions
diff --git a/megapixels/app/processors/person_detector.py b/megapixels/app/processors/person_detector.py
new file mode 100644
index 00000000..6daa8c40
--- /dev/null
+++ b/megapixels/app/processors/person_detector.py
@@ -0,0 +1,65 @@
+import sys
+import os
+from os.path import join
+from pathlib import Path
+
+import cv2 as cv
+import numpy as np
+import imutils
+import operator
+
+from app.utils import im_utils, logger_utils
+from app.models.bbox import BBox
+from app.settings import app_cfg as cfg
+from app.settings import types
+
+
+class DetectorCVDNN:
+
+ # MobileNet SSD
+ dnn_scale = 0.007843 # fixed
+ dnn_mean = (127.5, 127.5, 127.5) # fixed
+ dnn_crop = False # crop or force resize
+ blob_size = (300, 300)
+ conf = 0.95
+
+ # detect
+ CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat",
+ "bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
+ "dog", "horse", "motorbike", "person", "pottedplant", "sheep",
+ "sofa", "train", "tvmonitor"]
+
+ def __init__(self):
+ self.log = logger_utils.Logger.getLogger()
+ fp_prototxt = join(cfg.DIR_MODELS_CAFFE, 'mobilenet_ssd', 'MobileNetSSD_deploy.prototxt')
+ fp_model = join(cfg.DIR_MODELS_CAFFE, 'mobilenet_ssd', 'MobileNetSSD_deploy.caffemodel')
+ self.net = cv.dnn.readNet(fp_prototxt, fp_model)
+ self.net.setPreferableBackend(cv.dnn.DNN_BACKEND_OPENCV)
+ self.net.setPreferableTarget(cv.dnn.DNN_TARGET_CPU)
+
+ def detect(self, im, conf=None, largest=False, pyramids=None, zone=False, blob_size=None):
+ """Detects bodies and returns (list) of (BBox)"""
+ conf = self.conf if conf is None else conf
+ blob_size = self.blob_size if blob_size is None else blob_size
+ im = cv.resize(im, blob_size)
+ dim = im.shape[:2][::-1]
+ blob = cv.dnn.blobFromImage(im, self.dnn_scale, dim, self.dnn_mean)
+ self.net.setInput(blob)
+ net_outputs = self.net.forward()
+
+ bboxes = []
+ for i in range(0, net_outputs.shape[2]):
+ det_conf = float(net_outputs[0, 0, i, 2])
+ bounds = np.array(net_outputs[0, 0, i, 3:7]) # bug: ensure all x,y within 1.0 ?
+ if det_conf > conf and np.all(bounds < 1):
+ idx = int(net_outputs[0, 0, i, 1])
+ if self.CLASSES[idx] == "person":
+ rect_norm = net_outputs[0, 0, i, 3:7]
+ bboxes.append(BBox(*rect_norm))
+
+ if largest and len(bboxes) > 1:
+ # only keep largest
+ bboxes.sort(key=operator.attrgetter('area'), reverse=True)
+ bboxes = [bboxes[0]]
+
+ return bboxes \ No newline at end of file
diff --git a/megapixels/app/settings/app_cfg.py b/megapixels/app/settings/app_cfg.py
index 1eed1a41..98d36b5f 100644
--- a/megapixels/app/settings/app_cfg.py
+++ b/megapixels/app/settings/app_cfg.py
@@ -19,6 +19,7 @@ LogLevelVar = click_utils.ParamVar(types.LogLevel)
MetadataVar = click_utils.ParamVar(types.Metadata)
DatasetVar = click_utils.ParamVar(types.Dataset)
DataStoreVar = click_utils.ParamVar(types.DataStore)
+
# Face analysis
HaarCascadeVar = click_utils.ParamVar(types.HaarCascade)
FaceDetectNetVar = click_utils.ParamVar(types.FaceDetectNet)
@@ -27,6 +28,10 @@ FaceLandmark2D_5Var = click_utils.ParamVar(types.FaceLandmark2D_5)
FaceLandmark2D_68Var = click_utils.ParamVar(types.FaceLandmark2D_68)
FaceLandmark3D_68Var = click_utils.ParamVar(types.FaceLandmark3D_68)
+# Person/Body detector
+BodyDetectNetVar = click_utils.ParamVar(types.BodyDetectNet)
+
+
# base path
DIR_SELF = os.path.dirname(os.path.realpath(__file__))
DIR_ROOT = Path(DIR_SELF).parent.parent.parent
diff --git a/megapixels/app/settings/types.py b/megapixels/app/settings/types.py
index 3d7e96c0..2609ece7 100644
--- a/megapixels/app/settings/types.py
+++ b/megapixels/app/settings/types.py
@@ -59,6 +59,9 @@ class FaceDetectNet(Enum):
"""Scene text detector networks"""
HAAR, DLIB_CNN, DLIB_HOG, CVDNN, MTCNN_TF, MTCNN_PT, MTCNN_CAFFE = range(7)
+class BodyDetectNet(Enum):
+ CVDNN = range(1)
+
class FaceExtractor(Enum):
"""Type of face recognition feature extractor"""
# TODO deprecate DLIB resnet and use only CVDNN Caffe models
diff --git a/megapixels/app/utils/display_utils.py b/megapixels/app/utils/display_utils.py
index 43328ae9..8e265ae7 100644
--- a/megapixels/app/utils/display_utils.py
+++ b/megapixels/app/utils/display_utils.py
@@ -19,3 +19,10 @@ def handle_keyboard(delay_amt=1):
break
elif k != 255:
log.debug(f'k: {k}')
+
+def handle_keyboard_video(delay_amt=1):
+ key = cv.waitKey(1) & 0xFF
+ # if the `q` key was pressed, break from the loop
+ if key == ord("q"):
+ cv.destroyAllWindows()
+ sys.exit()
diff --git a/megapixels/app/utils/identity_utils.py b/megapixels/app/utils/identity_utils.py
index 775652dc..5855fbbd 100644
--- a/megapixels/app/utils/identity_utils.py
+++ b/megapixels/app/utils/identity_utils.py
@@ -29,6 +29,13 @@ def names_match_strict(a, b):
return len(clean_a) == len(clean_b) and letter_match(clean_a, clean_b) and letter_match(clean_b, clean_a)
+def sanitize_name(name, as_str=False):
+ splits = [unidecode.unidecode(x.strip().lower()) for x in name.strip().split(' ')]
+ if as_str:
+ return ' '.join(splits)
+ else:
+ return splits
+
'''
class Dataset(Enum):
LFW, VGG_FACE, VGG_FACE2, MSCELEB, UCCS, UMD_FACES, SCUT_FBP, UCF_SELFIE, UTK, \
@@ -106,12 +113,18 @@ def get_names(opt_dataset, opt_data_store=types.DataStore.HDD):
def similarity(a, b):
return difflib.SequenceMatcher(a=a.lower(), b=b.lower()).ratio()
-def names_match(name_a, name_b, threshold=0.9, as_float=False, compound_score=False):
+def names_match(name_a, name_b, threshold=0.9, as_float=False, compound_score=False, name_a_pre=False, name_b_pre=False):
'''Returns boolean if names are similar enough
'''
# strip spaces and split names into list of plain text words
- name_a_clean = [unidecode.unidecode(x.strip().lower()) for x in name_a.strip().split(' ')]
- name_b_clean = [unidecode.unidecode(x.strip().lower()) for x in name_b.strip().split(' ')]
+ if name_a_pre:
+ name_a_clean = name_a
+ else:
+ name_a_clean = [unidecode.unidecode(x.strip().lower()) for x in name_a.strip().split(' ')]
+ if name_b_pre:
+ name_b_clean = name_b
+ else:
+ name_b_clean = [unidecode.unidecode(x.strip().lower()) for x in name_b.strip().split(' ')]
# assign short long vars
len_a = len(name_a_clean)