summaryrefslogtreecommitdiff
path: root/megapixels/app
diff options
context:
space:
mode:
authorjules@lens <julescarbon@gmail.com>2019-04-18 16:55:14 +0200
committerjules@lens <julescarbon@gmail.com>2019-04-18 16:55:14 +0200
commit2e4daed06264f3dc3bbabd8fa4fc0d8ceed4c5af (patch)
tree1a17bb4459776ac91f7006a2a407ca12edd3471e /megapixels/app
parent3d32e5b4ddbfbfe5d4abeda57ff200adf1532f4c (diff)
parentf8012f88641b0bb378ba79393f277c8918ebe452 (diff)
Merge branch 'master' of asdf.us:megapixels_dev
Diffstat (limited to 'megapixels/app')
-rw-r--r--megapixels/app/processors/person_detector.py65
-rw-r--r--megapixels/app/settings/app_cfg.py5
-rw-r--r--megapixels/app/settings/types.py3
-rw-r--r--megapixels/app/site/parser.py13
-rw-r--r--megapixels/app/utils/display_utils.py7
-rw-r--r--megapixels/app/utils/identity_utils.py19
6 files changed, 106 insertions, 6 deletions
diff --git a/megapixels/app/processors/person_detector.py b/megapixels/app/processors/person_detector.py
new file mode 100644
index 00000000..6daa8c40
--- /dev/null
+++ b/megapixels/app/processors/person_detector.py
@@ -0,0 +1,65 @@
+import sys
+import os
+from os.path import join
+from pathlib import Path
+
+import cv2 as cv
+import numpy as np
+import imutils
+import operator
+
+from app.utils import im_utils, logger_utils
+from app.models.bbox import BBox
+from app.settings import app_cfg as cfg
+from app.settings import types
+
+
+class DetectorCVDNN:
+
+ # MobileNet SSD
+ dnn_scale = 0.007843 # fixed
+ dnn_mean = (127.5, 127.5, 127.5) # fixed
+ dnn_crop = False # crop or force resize
+ blob_size = (300, 300)
+ conf = 0.95
+
+ # detect
+ CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat",
+ "bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
+ "dog", "horse", "motorbike", "person", "pottedplant", "sheep",
+ "sofa", "train", "tvmonitor"]
+
+ def __init__(self):
+ self.log = logger_utils.Logger.getLogger()
+ fp_prototxt = join(cfg.DIR_MODELS_CAFFE, 'mobilenet_ssd', 'MobileNetSSD_deploy.prototxt')
+ fp_model = join(cfg.DIR_MODELS_CAFFE, 'mobilenet_ssd', 'MobileNetSSD_deploy.caffemodel')
+ self.net = cv.dnn.readNet(fp_prototxt, fp_model)
+ self.net.setPreferableBackend(cv.dnn.DNN_BACKEND_OPENCV)
+ self.net.setPreferableTarget(cv.dnn.DNN_TARGET_CPU)
+
+ def detect(self, im, conf=None, largest=False, pyramids=None, zone=False, blob_size=None):
+ """Detects bodies and returns (list) of (BBox)"""
+ conf = self.conf if conf is None else conf
+ blob_size = self.blob_size if blob_size is None else blob_size
+ im = cv.resize(im, blob_size)
+ dim = im.shape[:2][::-1]
+ blob = cv.dnn.blobFromImage(im, self.dnn_scale, dim, self.dnn_mean)
+ self.net.setInput(blob)
+ net_outputs = self.net.forward()
+
+ bboxes = []
+ for i in range(0, net_outputs.shape[2]):
+ det_conf = float(net_outputs[0, 0, i, 2])
+ bounds = np.array(net_outputs[0, 0, i, 3:7]) # bug: ensure all x,y within 1.0 ?
+ if det_conf > conf and np.all(bounds < 1):
+ idx = int(net_outputs[0, 0, i, 1])
+ if self.CLASSES[idx] == "person":
+ rect_norm = net_outputs[0, 0, i, 3:7]
+ bboxes.append(BBox(*rect_norm))
+
+ if largest and len(bboxes) > 1:
+ # only keep largest
+ bboxes.sort(key=operator.attrgetter('area'), reverse=True)
+ bboxes = [bboxes[0]]
+
+ return bboxes \ No newline at end of file
diff --git a/megapixels/app/settings/app_cfg.py b/megapixels/app/settings/app_cfg.py
index 1eed1a41..98d36b5f 100644
--- a/megapixels/app/settings/app_cfg.py
+++ b/megapixels/app/settings/app_cfg.py
@@ -19,6 +19,7 @@ LogLevelVar = click_utils.ParamVar(types.LogLevel)
MetadataVar = click_utils.ParamVar(types.Metadata)
DatasetVar = click_utils.ParamVar(types.Dataset)
DataStoreVar = click_utils.ParamVar(types.DataStore)
+
# Face analysis
HaarCascadeVar = click_utils.ParamVar(types.HaarCascade)
FaceDetectNetVar = click_utils.ParamVar(types.FaceDetectNet)
@@ -27,6 +28,10 @@ FaceLandmark2D_5Var = click_utils.ParamVar(types.FaceLandmark2D_5)
FaceLandmark2D_68Var = click_utils.ParamVar(types.FaceLandmark2D_68)
FaceLandmark3D_68Var = click_utils.ParamVar(types.FaceLandmark3D_68)
+# Person/Body detector
+BodyDetectNetVar = click_utils.ParamVar(types.BodyDetectNet)
+
+
# base path
DIR_SELF = os.path.dirname(os.path.realpath(__file__))
DIR_ROOT = Path(DIR_SELF).parent.parent.parent
diff --git a/megapixels/app/settings/types.py b/megapixels/app/settings/types.py
index 3d7e96c0..2609ece7 100644
--- a/megapixels/app/settings/types.py
+++ b/megapixels/app/settings/types.py
@@ -59,6 +59,9 @@ class FaceDetectNet(Enum):
"""Scene text detector networks"""
HAAR, DLIB_CNN, DLIB_HOG, CVDNN, MTCNN_TF, MTCNN_PT, MTCNN_CAFFE = range(7)
+class BodyDetectNet(Enum):
+ CVDNN = range(1)
+
class FaceExtractor(Enum):
"""Type of face recognition feature extractor"""
# TODO deprecate DLIB resnet and use only CVDNN Caffe models
diff --git a/megapixels/app/site/parser.py b/megapixels/app/site/parser.py
index dc2a09f2..92d950f8 100644
--- a/megapixels/app/site/parser.py
+++ b/megapixels/app/site/parser.py
@@ -59,10 +59,13 @@ def parse_markdown(metadata, sections, s3_path, skip_h1=False):
if 'sidebar' not in section.lower():
current_group.append(section)
in_stats = True
+ if 'end sidebar' in section.lower():
+ current_group = [format_section(current_group, s3_path, 'right-sidebar', tag='div')]
+ in_stats = False
elif in_stats and not section.strip().startswith('## ') and 'end sidebar' not in section.lower():
current_group.append(section)
elif in_stats and section.strip().startswith('## ') or 'end sidebar' in section.lower():
- current_group = [format_section(current_group, s3_path, 'left-sidebar', tag='div')]
+ current_group = [format_section(current_group, s3_path, 'right-sidebar', tag='div')]
if 'end sidebar' not in section.lower():
current_group.append(section)
in_stats = False
@@ -145,8 +148,11 @@ def parse_markdown(metadata, sections, s3_path, skip_h1=False):
return '<a class="footnote_shim" name="{}_{}"> </a><a href="#{}" class="footnote" title="Footnote {}">{}</a>'.format(key, footnote_count, key, index, index)
key_regex = re.compile(key.replace('[', '\\[').replace('^', '\\^').replace(']', '\\]'))
content = key_regex.sub(footnote_tag, content)
- footnote_txt = footnote_txt.replace("{}_BACKLINKS".format(index), "".join(footnote_backlinks))
+ footnote_txt = footnote_txt.replace("{}_BACKLINKS".format(key), "".join(footnote_backlinks))
+ content += '<section>'
+ content += '<h3>References</h3>'
content += footnote_txt
+ content += '</section>'
return content
@@ -254,7 +260,8 @@ def format_footnotes(footnotes, s3_path):
continue
key, note = footnote.split(': ', 1)
footnote_index_lookup[key] = index
- footnote_list.append('<a name="{}" class="footnote_shim"></a><span class="backlinks">{}_BACKLINKS</span>'.format(key, index) + markdown(note))
+ note_markup = markdown(note).replace('<p>', '', 1).replace('</p>', '', 1)
+ footnote_list.append('{} <a name="{}" class="footnote_shim"></a><span class="backlinks">{}_BACKLINKS</span>'.format(index, key, key) + note_markup)
index += 1
footnote_txt = '<section><ul class="footnotes"><li>' + '</li><li>'.join(footnote_list) + '</li></ul></section>'
diff --git a/megapixels/app/utils/display_utils.py b/megapixels/app/utils/display_utils.py
index 43328ae9..8e265ae7 100644
--- a/megapixels/app/utils/display_utils.py
+++ b/megapixels/app/utils/display_utils.py
@@ -19,3 +19,10 @@ def handle_keyboard(delay_amt=1):
break
elif k != 255:
log.debug(f'k: {k}')
+
+def handle_keyboard_video(delay_amt=1):
+ key = cv.waitKey(1) & 0xFF
+ # if the `q` key was pressed, break from the loop
+ if key == ord("q"):
+ cv.destroyAllWindows()
+ sys.exit()
diff --git a/megapixels/app/utils/identity_utils.py b/megapixels/app/utils/identity_utils.py
index 775652dc..5855fbbd 100644
--- a/megapixels/app/utils/identity_utils.py
+++ b/megapixels/app/utils/identity_utils.py
@@ -29,6 +29,13 @@ def names_match_strict(a, b):
return len(clean_a) == len(clean_b) and letter_match(clean_a, clean_b) and letter_match(clean_b, clean_a)
+def sanitize_name(name, as_str=False):
+ splits = [unidecode.unidecode(x.strip().lower()) for x in name.strip().split(' ')]
+ if as_str:
+ return ' '.join(splits)
+ else:
+ return splits
+
'''
class Dataset(Enum):
LFW, VGG_FACE, VGG_FACE2, MSCELEB, UCCS, UMD_FACES, SCUT_FBP, UCF_SELFIE, UTK, \
@@ -106,12 +113,18 @@ def get_names(opt_dataset, opt_data_store=types.DataStore.HDD):
def similarity(a, b):
return difflib.SequenceMatcher(a=a.lower(), b=b.lower()).ratio()
-def names_match(name_a, name_b, threshold=0.9, as_float=False, compound_score=False):
+def names_match(name_a, name_b, threshold=0.9, as_float=False, compound_score=False, name_a_pre=False, name_b_pre=False):
'''Returns boolean if names are similar enough
'''
# strip spaces and split names into list of plain text words
- name_a_clean = [unidecode.unidecode(x.strip().lower()) for x in name_a.strip().split(' ')]
- name_b_clean = [unidecode.unidecode(x.strip().lower()) for x in name_b.strip().split(' ')]
+ if name_a_pre:
+ name_a_clean = name_a
+ else:
+ name_a_clean = [unidecode.unidecode(x.strip().lower()) for x in name_a.strip().split(' ')]
+ if name_b_pre:
+ name_b_clean = name_b
+ else:
+ name_b_clean = [unidecode.unidecode(x.strip().lower()) for x in name_b.strip().split(' ')]
# assign short long vars
len_a = len(name_a_clean)