summaryrefslogtreecommitdiff
path: root/megapixels/notebooks/utils/HaarSaliency.py
diff options
context:
space:
mode:
authoradamhrv <adam@ahprojects.com>2018-11-19 12:46:02 +0100
committeradamhrv <adam@ahprojects.com>2018-11-19 12:46:02 +0100
commit4a6e67760692b9426ccfdc529619677016a57e6a (patch)
tree6d50bd77b69f4f3bead90ca50816ab26de82469d /megapixels/notebooks/utils/HaarSaliency.py
parent8c52d20132be81121d1c789951c2571bfa667119 (diff)
move notebooks
Diffstat (limited to 'megapixels/notebooks/utils/HaarSaliency.py')
-rw-r--r--megapixels/notebooks/utils/HaarSaliency.py77
1 files changed, 77 insertions, 0 deletions
diff --git a/megapixels/notebooks/utils/HaarSaliency.py b/megapixels/notebooks/utils/HaarSaliency.py
new file mode 100644
index 00000000..6a511efb
--- /dev/null
+++ b/megapixels/notebooks/utils/HaarSaliency.py
@@ -0,0 +1,77 @@
+import os
+from os.path import join
+import cv2
+import numpy as np
+import xml.etree.ElementTree
+class HaarSaliency:
+
+ # Input images should be between 320x240 and 640x480
+ classifiers = []
+ face_matrix = []
+ flags = 0
+ nstages = 0
+
+ def __init__(self, cascade, min_size=60, max_size=400,
+ face_neighbors=3, sal_neighbors=0, blur_kernel=(31,31), scale_factor=1.1, stage_start=1):
+
+ self.face_neighbors = face_neighbors
+ self.sal_neighbors = sal_neighbors
+ self.scale_factor = scale_factor
+ self.blur_kernel = blur_kernel
+ self.min_size = (min_size, min_size)
+ self.max_size = (max_size, max_size)
+ self.stage_start = stage_start
+
+ cdir, cname = os.path.split(cascade)
+ cname, ext = os.path.splitext(cname)
+
+ root = xml.etree.ElementTree.parse(cascade)
+ #width = int(root.find('./cascade/width').text.strip())
+ #height = int(root.find('./cascade/height').text.strip())
+ self.nstages = int(root.find('./cascade/stageNum').text.strip())
+
+ # initialize classifiers
+ cascades = [join(cdir,cname,str(c)+'.xml') for c in range(stage_start,self.nstages+1) ]
+ self.classifiers = [cv2.CascadeClassifier(c) for c in cascades]
+
+ def get_saliency(self,src):
+
+ # conver to grayscale
+ src_gray = cv2.cvtColor(src,cv2.COLOR_BGR2GRAY)
+
+ # run face detector on all stage-classifiers
+ self.face_matrix = [self.detect_faces(src_gray,c) for c in self.classifiers]
+
+ # create saliency map in grayscale
+ w,h = src.shape[1],src.shape[0]
+ saliency = np.zeros((h,w), dtype=np.float32).reshape(h,w)
+
+ # draw face-roi as stage-weighted grayscale vals
+ # min_neighbors sets max value for grayscale --> white
+ for i,face_list in enumerate(self.face_matrix,1):
+ inc = round(255./float(self.face_neighbors)/float(self.nstages-self.stage_start)) * i
+ if face_list is not None:
+ for x1,y1,fw,fh in face_list:
+ saliency[y1:y1+fh,x1:x1+fw] += inc
+
+ # normalize, clip, and recast as uint8
+ smax = saliency.max()
+ if smax > 255:
+ saliency /= (smax/255)
+ saliency = np.clip(saliency,0,255)
+ saliency = np.array(saliency,dtype=np.uint8)
+
+ # blur, colormap, and composite
+ saliency = cv2.GaussianBlur(saliency,self.blur_kernel,0)
+ dst = cv2.applyColorMap(saliency, cv2.COLORMAP_JET)
+ return dst
+
+ def detect_faces(self,src,classifier):
+
+ matches = classifier.detectMultiScale(src, self.scale_factor,
+ self.sal_neighbors, self.flags, self.min_size, self.max_size)
+
+ if matches is None or len(matches) < 1:
+ return None
+ else:
+ return sorted(map(tuple,matches),reverse=True) # lg --> sm \ No newline at end of file