summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--megapixels/app/models/bbox.py17
-rw-r--r--megapixels/app/models/dataset.py25
-rw-r--r--megapixels/app/processors/face_age_gender.py20
-rw-r--r--megapixels/app/processors/face_beauty.py15
-rw-r--r--megapixels/app/processors/face_detector.py51
-rw-r--r--megapixels/app/processors/face_extractor.py42
-rw-r--r--megapixels/app/processors/face_landmarks.py31
-rw-r--r--megapixels/app/processors/face_pose.py23
-rw-r--r--megapixels/app/processors/face_recognition.py68
-rw-r--r--megapixels/app/settings/app_cfg.py7
-rw-r--r--megapixels/app/settings/types.py6
-rw-r--r--megapixels/app/utils/display_utils.py9
-rw-r--r--megapixels/app/utils/draw_utils.py56
-rw-r--r--megapixels/commands/cv/face_attributes.py139
-rw-r--r--megapixels/commands/cv/face_landmark_2d_68.py4
-rw-r--r--megapixels/commands/cv/face_pose.py11
-rw-r--r--megapixels/commands/cv/face_roi.py14
-rw-r--r--megapixels/commands/cv/face_vector.py13
-rw-r--r--megapixels/commands/datasets/file_record.py40
-rw-r--r--megapixels/commands/demo/face_3ddfa.py85
-rw-r--r--megapixels/commands/demo/face_age_gender.py31
-rw-r--r--megapixels/commands/demo/face_beauty.py12
-rw-r--r--megapixels/commands/demo/face_detect.py (renamed from megapixels/commands/demo/face_detection.py)57
-rw-r--r--megapixels/commands/demo/face_landmarks_2d.py155
-rw-r--r--megapixels/commands/demo/face_landmarks_3d.py82
-rw-r--r--megapixels/commands/demo/face_pose.py25
-rw-r--r--megapixels/commands/demo/face_search.py40
-rw-r--r--megapixels/commands/demo/face_vector.py28
28 files changed, 563 insertions, 543 deletions
diff --git a/megapixels/app/models/bbox.py b/megapixels/app/models/bbox.py
index f1216698..f65f7373 100644
--- a/megapixels/app/models/bbox.py
+++ b/megapixels/app/models/bbox.py
@@ -1,4 +1,5 @@
import math
+import random
from dlib import rectangle as dlib_rectangle
import numpy as np
@@ -127,9 +128,23 @@ class BBox:
d = int(math.sqrt(math.pow(dcx, 2) + math.pow(dcy, 2)))
return d
+
# -----------------------------------------------------------------
# Modify
+ def jitter(self, amt):
+ '''Jitters BBox in x,y,w,h values. Used for face feature extraction
+ :param amt: (float) percentage of BBox for maximum translation
+ :returns (BBox)
+ '''
+ w = self._width + (self._width * random.uniform(-amt, amt))
+ h = self._height + (self._height * random.uniform(-amt, amt))
+ cx = self._cx + (self._cx * random.uniform(-amt, amt))
+ cy = self._cy + (self._cy * random.uniform(-amt, amt))
+ x1, y1 = np.clip((cx - w/2, cy - h/2), 0.0, 1.0)
+ x2, y2 = np.clip((cx + w/2, cy + h/2), 0.0, 1.0)
+ return BBox(x1, y1, x2, y2)
+
def expand(self, per):
"""Expands BBox by percentage
:param per: (float) percentage to expand 0.0 - 1.0
@@ -186,7 +201,7 @@ class BBox:
# print(adj)
r = np.add(np.array(r), adj)
- return BBox(*r)
+ return BBox(*r) # updats all BBox values
# -----------------------------------------------------------------
diff --git a/megapixels/app/models/dataset.py b/megapixels/app/models/dataset.py
index eb0109a7..bbef9ff5 100644
--- a/megapixels/app/models/dataset.py
+++ b/megapixels/app/models/dataset.py
@@ -44,6 +44,9 @@ class Dataset:
self.log.info(f'build face vector dict: {len(self._face_vectors)}')
# remove the face vector column, it can be several GB of memory
self._metadata[metadata_type].drop('vec', axis=1, inplace=True)
+ #n_dims = len(self._metadata[metadata_type].keys()) - 2
+ #drop_keys = [f'd{i}' for i in range(1,n_dims+1)]
+ #self._metadata[metadata_type].drop(drop_keys, axis=1, inplace=True)
else:
self.log.error(f'File not found: {fp_csv}. Exiting.')
sys.exit()
@@ -53,7 +56,7 @@ class Dataset:
fp_csv = self.data_store.metadata(metadata_type)
self.log.info(f'loading: {fp_csv}')
if Path(fp_csv).is_file():
- self._metadata[metadata_type] = pd.read_csv(fp_csv).set_index('index')
+ self._metadata[metadata_type] = pd.read_csv(fp_csv, dtype={'fn':str}).set_index('index')
else:
self.log.error(f'File not found: {fp_csv}. Exiting.')
sys.exit()
@@ -142,33 +145,37 @@ class Dataset:
# find most similar feature vectors indexes
#match_idxs = self.similar(query_vec, n_results, threshold)
sim_scores = np.linalg.norm(np.array([query_vec]) - np.array(self._face_vectors), axis=1)
- match_idxs = np.argpartition(sim_scores, n_results)[:n_results]
+ match_idxs = np.argpartition(sim_scores, range(n_results))[:n_results]
+ df_vector = self._metadata[types.Metadata.FACE_VECTOR]
+ df_record = self._metadata[types.Metadata.FILE_RECORD]
+
for match_idx in match_idxs:
# get the corresponding face vector row
roi_index = self._face_vector_roi_idxs[match_idx]
- df_record = self._metadata[types.Metadata.FILE_RECORD]
- ds_record = df_record.iloc[roi_index]
+ record_idx = df_vector.iloc[roi_index].record_index
+ ds_record = df_record.iloc[record_idx]
self.log.debug(f'find match index: {match_idx}, --> roi_index: {roi_index}')
fp_im = self.data_store.face(ds_record.subdir, ds_record.fn, ds_record.ext)
s3_url = self.data_store_s3.face(ds_record.uuid)
image_record = ImageRecord(ds_record, fp_im, s3_url)
- #roi_index = self._face_vector_roi_idxs[match_idx]
- #image_record = self.roi_idx_to_record(roi_index)
image_records.append(image_record)
return image_records
# ----------------------------------------------------------------------
# utilities
- def df_vecs_to_dict(self, df):
+ def df_vecs_to_dict(self, df_vec):
# convert the DataFrame CSV to float list of vecs
- return [list(map(float,x.vec.split(','))) for x in df.itertuples()]
+ # n_dims = len(df_vec.keys()) - 2 # number of columns with 'd1, d2,...d256'
+ #return [[df[f'd{i}'] for i in range(1,n_dims+1)] for df_idx, df in df_vec.iterrows()]
+ # return [[df[f'd{i}'] for i in range(1,n_dims+1)] for df_idx, df in df_vec.iterrows()]
+ return [list(map(float, x.vec.split(','))) for x in df_vec.itertuples()]
def df_vec_roi_idxs_to_dict(self, df):
# convert the DataFrame CSV to float list of vecs
#return [x.roi_index for x in df.itertuples()]
- return [x.roi_index for x in df.itertuples()]
+ return [int(x.roi_index) for i,x in df.iterrows()]
def similar(self, query_vec, n_results):
'''Finds most similar N indices of query face vector
diff --git a/megapixels/app/processors/face_age_gender.py b/megapixels/app/processors/face_age_gender.py
index 95efa8fc..66c51fa8 100644
--- a/megapixels/app/processors/face_age_gender.py
+++ b/megapixels/app/processors/face_age_gender.py
@@ -32,19 +32,21 @@ class _FaceAgeGender:
'''
dnn_size = (224,224)
- dnn_mean = (104.0, 177.0, 123.0)
+ dnn_mean = (104.0, 177.0, 123.0) # ?
+ # authors used imagenet mean
+ #dnn_mean = [103.939, 116.779, 123.68]
ages = np.arange(0, 101).reshape(101, 1)
+ padding = 0.4
def __init__(self, fp_prototxt, fp_model):
self.log = logger_utils.Logger.getLogger()
self.net = cv.dnn.readNetFromCaffe(fp_prototxt, fp_model)
- def _preprocess(self, im, bbox_dim):
+ def _preprocess(self, im, bbox_norm):
# isolate face ROI, expand bbox by 40% according to authors
# https://data.vision.ee.ethz.ch/cvl/rrothe/imdb-wiki/
dim = im.shape[:2][::-1]
- bbox_dim_exp = bbox_dim.expand_dim( int(0.4*bbox_dim.width), dim)
- roi = bbox_dim_exp.to_xyxy()
+ roi = bbox_norm.expand(self.padding).to_dim(dim).to_xyxy()
im_face_crop = im[roi[1]:roi[3], roi[0]:roi[2]] # isolate face roi
# resize for blob
@@ -52,6 +54,7 @@ class _FaceAgeGender:
blob = cv.dnn.blobFromImage(im_resized, 1.0, self.dnn_size, self.dnn_mean)
return blob
+
class FaceGender(_FaceAgeGender):
# use "apparent" age models
@@ -61,17 +64,18 @@ class FaceGender(_FaceAgeGender):
def __init__(self):
super().__init__(self.fp_prototxt, self.fp_model)
- def predict(self, im, bbox_dim):
+ def predict(self, im, bbox_norm):
'''Predicts gender from face crop
:param im: (numpy.ndarray) BGR image
:param bbox_dim: (BBox) dimensioned
:returns (dict) with scores for male and female
'''
- im_blob = self._preprocess(im, bbox_dim)
+ im_blob = self._preprocess(im, bbox_norm)
self.net.setInput(im_blob)
preds = self.net.forward()[0]
return {'f': preds[0], 'm': preds[1]}
+
class FaceAgeApparent(_FaceAgeGender):
# use "apparent" age models
@@ -81,13 +85,13 @@ class FaceAgeApparent(_FaceAgeGender):
def __init__(self):
super().__init__(self.fp_prototxt, self.fp_model)
- def predict(self, im, bbox_dim):
+ def predict(self, im, bbox_norm):
'''Predicts apparent age from face crop
:param im: (numpy.ndarray) BGR image
:param bbox_dim: (BBox) dimensioned
:returns (float) predicted age
'''
- im_blob = self._preprocess(im, bbox_dim)
+ im_blob = self._preprocess(im, bbox_norm)
self.net.setInput(im_blob)
preds = self.net.forward()[0]
age = preds.dot(self.ages).flatten()[0]
diff --git a/megapixels/app/processors/face_beauty.py b/megapixels/app/processors/face_beauty.py
index a01c6834..e2d54c98 100644
--- a/megapixels/app/processors/face_beauty.py
+++ b/megapixels/app/processors/face_beauty.py
@@ -1,3 +1,7 @@
+"""
+https://github.com/ustcqidi/BeautyPredict
+"""
+
import sys
import os
from os.path import join
@@ -45,18 +49,15 @@ class FaceBeauty:
self.model.load_weights(fp_model)
- def beauty(self, im, bbox_dim):
+ def beauty(self, im, bbox_norm):
'''Predicts facial "beauty" score based on SCUT-FBP attractiveness labels
:param im: (numpy.ndarray) BGR image
:param bbox_dim: (BBox) dimensioned BBox
:returns (float) 0.0-1.0 with 1 being most attractive
'''
-
- face = bbox_dim.to_xyxy()
- self.log.debug(f'face: {face}')
-
- cropped_im = im[face[1]:face[3], face[0]:face[2]]
-
+ dim = im.shape[:2][::-1]
+ roi = bbox_norm.to_dim(dim).to_xyxy()
+ cropped_im = im[roi[1]:roi[3], roi[0]:roi[2]]
im_resized = cv.resize(cropped_im, (224, 224)) # force size
im_norm = np.array([(im_resized - 127.5) / 127.5]) # subtract mean
diff --git a/megapixels/app/processors/face_detector.py b/megapixels/app/processors/face_detector.py
index 0e194f7d..fbf91071 100644
--- a/megapixels/app/processors/face_detector.py
+++ b/megapixels/app/processors/face_detector.py
@@ -14,8 +14,57 @@ from app.settings import app_cfg as cfg
from app.settings import types
-class DetectorMTCNN:
+class DetectorMTCNN_CVDNN:
+
+ # https://github.com/CongWeilin/mtcnn-caffe
+
+ def __init__(self):
+ pass
+
+
+class DetectorMTCNN_PT:
+
+ # https://github.com/TropComplique/mtcnn-pytorch/
+ # pip install mtcnn
+
+ dnn_size = (300, 300)
+
+ def __init__(self, size=(400,400), gpu=0):
+ self.log = logger_utils.Logger.getLogger()
+ device_cur = os.getenv('CUDA_VISIBLE_DEVICES', '')
+ self.log.info(f'Change CUDA_VISIBLE_DEVICES from "{device_cur}" to "{gpu}"')
+ os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu)
+ from mtcnn.mtcnn import MTCNN
+ self.detector = MTCNN()
+ os.environ['CUDA_VISIBLE_DEVICES'] = device_cur # reset
+
+ def detect(self, im, size=(400,400), conf_thresh=None, pyramids=None, largest=False, zone=None):
+ '''Detects face using MTCNN and returns (list) of BBox
+ :param im: (numpy.ndarray) image
+ :returns list of BBox
+ '''
+ bboxes = []
+ dnn_size = self.dnn_size if size is None else size
+
+ im = im_utils.resize(im, width=dnn_size[0], height=dnn_size[1])
+ dim = im.shape[:2][::-1]
+ dets = self.detector.detect_faces(im)
+ for det in dets:
+ rect = det['box']
+ #keypoints = det['keypoints'] # not using here. see 'face_landmarks.py'
+ bbox = BBox.from_xywh_dim(*rect, dim)
+ bboxes.append(bbox)
+
+ if largest and len(bboxes) > 1:
+ # only keep largest
+ bboxes.sort(key=operator.attrgetter('area'), reverse=True)
+ bboxes = [bboxes[0]]
+
+ return bboxes
+
+class DetectorMTCNN_TF:
+ # using TF for inference can cause GPU issues with other frameworks
# https://github.com/ipazc/mtcnn
# pip install mtcnn
diff --git a/megapixels/app/processors/face_extractor.py b/megapixels/app/processors/face_extractor.py
index 2666e090..f618cd36 100644
--- a/megapixels/app/processors/face_extractor.py
+++ b/megapixels/app/processors/face_extractor.py
@@ -44,6 +44,9 @@ class Extractor:
vec_flat[f'd{idx}'] = val
return vec_flat
+ def to_str(self, vec):
+ return ','.join([str(x) for x in vec])
+
def unflatten_df(self, df):
# convert from
return [df[f'd{i}'] for i in range(1,257)]
@@ -64,25 +67,54 @@ class ExtractorVGG(Extractor):
self.dnn = cv.dnn.readNetFromCaffe(fp_prototxt, fp_model)
self.feat_layer = self.dnn.getLayerNames()[-2]
- def extract(self, im, bbox_norm, padding=0.3):
+ def extract_jitter(self, im, bbox_norm):
+ '''(experimental) Extracts feature vector for face crop
+ :param im:
+ :param bbox_norm: (BBox) normalized
+ :param padding: (float) percent to extend ROI
+ :param jitters: not used here
+ :returns (list) of (float)'''
+ dim = im.shape[:2][::-1]
+ num_jitters = cfg.DEFAULT_NUM_JITTERS
+ padding = cfg.DEFAULT_FACE_PADDING_VGG_FACE2
+ pad_adj = .00875 * padding # percentage of padding to vary
+ paddings = np.linspace(padding - pad_adj, padding + pad_adj, num=num_jitters)
+ jitter_amt = cfg.DEFAULT_JITTER_AMT
+ vecs = []
+ for i in range(num_jitters):
+ bbox_norm_jit = bbox_norm.jitter(jitter_amt) # jitters w, h, center
+ bbox_ext = bbox_norm_jit.expand(paddings[i])
+ #bbox_ext = bbox_norm.expand(paddings[i])
+ x1,y1,x2,y2 = bbox_ext.to_dim(dim).to_xyxy()
+ im_crop = im[y1:y2, x1:x2]
+ # According to VGG, model trained using Bilinear interpolation (INTER_LINEAR)
+ im_crop = cv.resize(im_crop, self.dnn_dim, interpolation=cv.INTER_LINEAR)
+ blob = cv.dnn.blobFromImage(im_crop, 1.0, self.dnn_dim, self.dnn_mean)
+ self.dnn.setInput(blob)
+ vec = np.array(self.dnn.forward(self.feat_layer)[0])
+ vec_norm = vec/np.linalg.norm(vec) # normalize
+ vecs.append(vec_norm)
+ vec_norm = np.mean(np.array(vecs), axis=0)
+ return vec_norm
+
+ def extract(self, im, bbox_norm):
'''Extracts feature vector for face crop
:param im:
:param bbox_norm: (BBox) normalized
:param padding: (float) percent to extend ROI
:param jitters: not used here
:returns (list) of (float)'''
-
+ padding = cfg.DEFAULT_FACE_PADDING_VGG_FACE2
bbox_ext = bbox_norm.expand(padding)
dim = im.shape[:2][::-1]
- bbox_ext_dim = bbox_ext.to_dim(dim)
- x1,y1,x2,y2 = bbox_ext_dim.to_xyxy()
+ x1,y1,x2,y2 = bbox_ext.to_dim(dim).to_xyxy()
im = im[y1:y2, x1:x2]
# According to VGG, model trained using Bilinear interpolation (INTER_LINEAR)
im = cv.resize(im, self.dnn_dim, interpolation=cv.INTER_LINEAR)
blob = cv.dnn.blobFromImage(im, 1.0, self.dnn_dim, self.dnn_mean)
self.dnn.setInput(blob)
vec = np.array(self.dnn.forward(self.feat_layer)[0])
- vec_norm = np.array(vec)/np.linalg.norm(vec) # normalize
+ vec_norm = vec/np.linalg.norm(vec) # normalize
return vec_norm
diff --git a/megapixels/app/processors/face_landmarks.py b/megapixels/app/processors/face_landmarks.py
index 171fc666..231e378f 100644
--- a/megapixels/app/processors/face_landmarks.py
+++ b/megapixels/app/processors/face_landmarks.py
@@ -30,6 +30,9 @@ class Landmarks2D:
self.log.warn('Define landmarks() function')
pass
+ def to_str(self, vec):
+ return ','.join([','.join(list(map(str,[x,y]))) for x,y in vec])
+
def flatten(self, points):
'''Converts list of point-tupes into a flattened list for CSV
:param points: (list) of x,y points
@@ -69,9 +72,9 @@ class FaceAlignment2D_68(Landmarks2D):
# predict landmarks
points = self.fa.get_landmarks(im) # returns array of arrays of 68 2D pts/face
# convert to data type
- points = [list(map(int, p)) for p in points[0]]
- return points
-
+ w,h = im.shape[:2][::-1]
+ points = [tuple(x/w, y/h) for x,y in points[0]]
+ return points # normalized
class Dlib2D(Landmarks2D):
@@ -82,15 +85,16 @@ class Dlib2D(Landmarks2D):
self.predictor = dlib.shape_predictor(model)
self.log.info(f'loaded predictor model: {model}')
- def landmarks(self, im, bbox):
+ def landmarks(self, im, bbox_norm):
'''Generates 68-pt landmarks using dlib predictor
:param im: (numpy.ndarray) BGR image
:param bbox: (app.models.BBox) dimensioned
- :returns (list) of (int, int) for x,y values
+ :returns (list) of (float, float) for normalized x,y values
'''
- bbox = bbox.to_dlib()
+ dim = im.shape[:2][::-1]
+ roi_dlib = bbox_norm.to_dim(dim).to_dlib()
im_gray = cv.cvtColor(im, cv.COLOR_BGR2GRAY)
- points = [[p.x, p.y] for p in self.predictor(im_gray, bbox).parts()]
+ points = [[p.x/dim[0], p.y/dim[1]] for p in self.predictor(im_gray, roi_dlib).parts()]
return points
@@ -121,13 +125,13 @@ class MTCNN2D_5(Landmarks2D):
from mtcnn.mtcnn import MTCNN
self.detector = MTCNN()
- def landmarks(self, im, bbox):
+ def landmarks(self, im, bbox_norm):
'''Detects face using MTCNN and returns (list) of BBox
:param im: (numpy.ndarray) image
:returns list of BBox
'''
results = []
- dim_wh = im.shape[:2][::-1] # (w, h)
+ dim = im.shape[:2][::-1] # (w, h)
# run MTCNN to get bbox and landmarks
dets = self.detector.detect_faces(im)
@@ -138,7 +142,7 @@ class MTCNN2D_5(Landmarks2D):
#rect = det['box']
points = det['keypoints']
# convert to normalized for contain-comparison
- points_norm = [np.array(pt)/dim_wh for pname, pt in points.items()]
+ points_norm = [np.array(pt)/dim for pname, pt in points.items()]
contains = False not in [bbox.contains(pn) for pn in points_norm]
if contains:
results.append(points) # append original points
@@ -185,14 +189,17 @@ class FaceAlignment3D_68(Landmarks3D):
device = f'cuda:{gpu}' if gpu > -1 else 'cpu'
self.fa = face_alignment.FaceAlignment(face_alignment.LandmarksType._3D, device=device, flip_input=flip_input)
- def landmarks(self, im, rect):
+ def landmarks(self, im, bbox_norm):
'''Calculates the 3D facial landmarks
:param im: (numpy.ndarray) BGR image
- :param rect: (list) of face (x1, y1, x2, y2)
+ :param bbox_norm: (BBox) of face roi
:returns (list) of 68 (int) (tuples) as (x,y, z)
'''
# predict landmarks
+ dim = im.shape[:2][::-1]
+ rect = bbox_norm.to_dim(dim).to_xyxy()
points = self.fa.get_landmarks(im, [rect]) # returns array of arrays of 68 3D pts/face
# convert to data type
+ # TODO normalize this, but how to norm 3D?
points = [list(map(int, p)) for p in points[0]]
return points \ No newline at end of file
diff --git a/megapixels/app/processors/face_pose.py b/megapixels/app/processors/face_pose.py
index 5ac510ec..49a39a53 100644
--- a/megapixels/app/processors/face_pose.py
+++ b/megapixels/app/processors/face_pose.py
@@ -21,10 +21,10 @@ class FacePoseDLIB:
pose_types = {'pitch': (0,0,255), 'roll': (255,0,0), 'yaw': (0,255,0)}
def __init__(self):
- pass
+ self.log = logger_utils.Logger.getLogger()
- def pose(self, landmarks, dim):
+ def pose(self, landmarks_norm, dim):
'''Returns face pose information
:param landmarks: (list) of 68 (int, int) xy tuples
:param dim: (tuple|list) of image (width, height)
@@ -55,9 +55,10 @@ class FacePoseDLIB:
# find 6 pose points
pose_points = []
for j, idx in enumerate(pose_points_idx):
- pt = landmarks[idx]
- pose_points.append((pt[0], pt[1]))
- pose_points = np.array(pose_points, dtype='double') # convert to double
+ x,y = landmarks_norm[idx]
+ pt = (int(x*dim[0]), int(y*dim[1]))
+ pose_points.append(pt)
+ pose_points = np.array(pose_points, dtype='double') # convert to double, real dimensions
# create camera matrix
focal_length = dim[0]
@@ -75,18 +76,16 @@ class FacePoseDLIB:
result = {}
# project points
- #if project_points:
pts_im, jac = cv.projectPoints(axis, rot_vec, tran_vec, cam_mat, dist_coeffs)
pts_model, jac2 = cv.projectPoints(model_points, rot_vec, tran_vec, cam_mat, dist_coeffs)
- #result['points_model'] = pts_model
- #result['points_image'] = pts_im
+
result['points'] = {
- 'pitch': pts_im[0],
- 'roll': pts_im[2],
- 'yaw': pts_im[1]
+ 'pitch': list(map(int,pts_im[0][0])),
+ 'roll': list(map(int,pts_im[2][0])),
+ 'yaw': list(map(int,pts_im[1][0]))
}
- result['point_nose'] = tuple(landmarks[pose_points_idx[0]])
+ result['point_nose'] = tuple(map(int,pose_points[0]))
rvec_matrix = cv.Rodrigues(rot_vec)[0]
# convert to degrees
diff --git a/megapixels/app/processors/face_recognition.py b/megapixels/app/processors/face_recognition.py
deleted file mode 100644
index 76f00aa1..00000000
--- a/megapixels/app/processors/face_recognition.py
+++ /dev/null
@@ -1,68 +0,0 @@
-import os
-from os.path import join
-from pathlib import Path
-
-import cv2 as cv
-import numpy as np
-import dlib
-import imutils
-
-from app.utils import im_utils, logger_utils
-from app.models.bbox import BBox
-from app.settings import app_cfg as cfg
-from app.settings import types
-
-class RecognitionDLIB:
-
- # https://github.com/davisking/dlib/blob/master/python_examples/face_recognition.py
- # facerec.compute_face_descriptor(img, shape, 100, 0.25)
-
- def __init__(self, gpu=0):
- self.log = logger_utils.Logger.getLogger()
-
- if gpu > -1:
- cuda_visible_devices = os.getenv('CUDA_VISIBLE_DEVICES', '')
- os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu)
-
- self.predictor = dlib.shape_predictor(cfg.DIR_MODELS_DLIB_5PT)
- self.facerec = dlib.face_recognition_model_v1(cfg.DIR_MODELS_DLIB_FACEREC_RESNET)
-
- if gpu > -1:
- os.environ['CUDA_VISIBLE_DEVICES'] = cuda_visible_devices # reset GPU env
-
-
- def vec(self, im, bbox, width=100,
- jitters=cfg.DLIB_FACEREC_JITTERS, padding=cfg.DLIB_FACEREC_PADDING):
- '''Converts image and bbox into 128d vector
- :param im: (numpy.ndarray) BGR image
- :param bbox: (BBox)
- '''
- # scale the image so the face is always 100x100 pixels
-
- #self.log.debug('compute scale')
- scale = width / bbox.width
- #im = cv.resize(im, (scale, scale), cv.INTER_LANCZOS4)
- #self.log.debug('resize')
- cv.resize(im, None, fx=scale, fy=scale, interpolation=cv.INTER_LANCZOS4)
- #self.log.debug('to dlib')
- bbox_dlib = bbox.to_dlib()
- #self.log.debug('precitor')
- face_shape = self.predictor(im, bbox_dlib)
- # vec = self.facerec.compute_face_descriptor(im, face_shape, jitters, padding)
- #self.log.debug('vec')
- vec = self.facerec.compute_face_descriptor(im, face_shape, jitters)
- #vec = self.facerec.compute_face_descriptor(im, face_shape)
- return vec
-
- def flatten(self, vec):
- '''Converts 128D vector into a flattened list for CSV
- :param points: (list) a feature vector as list of floats
- :returns dict item for each point (eg {'d1':0.28442156, 'd1': 0.1868632})
- '''
- vec_flat = {}
- for idx, val in enumerate(vec, 1):
- vec_flat[f'd{idx}'] = val
- return vec_flat
-
- def similarity(self, query_enc, known_enc):
- return np.linalg.norm(query_enc - known_enc, axis=1)
diff --git a/megapixels/app/settings/app_cfg.py b/megapixels/app/settings/app_cfg.py
index 14e2493c..42e37b7a 100644
--- a/megapixels/app/settings/app_cfg.py
+++ b/megapixels/app/settings/app_cfg.py
@@ -42,7 +42,7 @@ DIR_PEOPLE = 'people'
DIR_MODELS_CAFFE = join(DIR_MODELS,'caffe')
DIR_MODELS_DARKNET = join(DIR_MODELS,'darknet')
DIR_MODELS_DARKNET_PJREDDIE = join(DIR_MODELS_DARKNET, 'pjreddie')
-DIR_MODELS_PYTORCHq = join(DIR_MODELS,'pytorch')
+DIR_MODELS_PYTORCH = join(DIR_MODELS,'pytorch')
DIR_MODELS_TORCH = join(DIR_MODELS,'torch')
DIR_MODELS_MXNET = join(DIR_MODELS,'mxnet')
DIR_MODELS_KERAS = join(DIR_MODELS,'keras')
@@ -99,7 +99,10 @@ HASH_BRANCH_SIZE = 3
DLIB_FACEREC_JITTERS = 5 # number of face recognition jitters
#DLIB_FACEREC_PADDING = 0.25 # default dlib
FACEREC_PADDING = 0.3 # VGG FACE2 recommended
-
+DEFAULT_SIZE_FACE_DETECT = (480,480)
+DEFAULT_JITTER_AMT = 0.015 # used for OpenCV DNN face detector with VGG2 face feature extractor
+DEFAULT_NUM_JITTERS = 4 # used for smothing the facial feature extraction
+DEFAULT_FACE_PADDING_VGG_FACE2 = 0.3
POSE_MINMAX_YAW = (-25,25)
POSE_MINMAX_ROLL = (-15,15)
POSE_MINMAX_PITCH = (-10,10)
diff --git a/megapixels/app/settings/types.py b/megapixels/app/settings/types.py
index 940c8b6d..9325fc3c 100644
--- a/megapixels/app/settings/types.py
+++ b/megapixels/app/settings/types.py
@@ -43,10 +43,10 @@ class LogLevel(Enum):
class Metadata(Enum):
IDENTITY, FILE_RECORD, FACE_VECTOR, FACE_POSE, \
- FACE_ROI, FACE_LANDMARK_2D_68, FACE_LANDMARK_2D_5,FACE_LANDMARK_3D_68 = range(8)
+ FACE_ROI, FACE_LANDMARK_2D_68, FACE_LANDMARK_2D_5,FACE_LANDMARK_3D_68, FACE_ATTRIBUTES = range(9)
class Dataset(Enum):
- LFW, VGG_FACE2, MSCELEB, UCCS, UMD_FACES, SCUT_FBP, SELFIE_DATASET = range(7)
+ LFW, VGG_FACE2, MSCELEB, UCCS, UMD_FACES, SCUT_FBP, UCF_SELFIE, UTK = range(8)
# ---------------------------------------------------------------------
@@ -59,7 +59,7 @@ class FaceDetectNet(Enum):
class FaceExtractor(Enum):
"""Type of face recognition feature extractor"""
# TODO deprecate DLIB resnet and use only CVDNN Caffe models
- DLIB_RESNET, VGG_FACE2 = range(2)
+ DLIB, VGG = range(2)
class FaceLandmark2D_5(Enum):
DLIB, MTCNN = range(2)
diff --git a/megapixels/app/utils/display_utils.py b/megapixels/app/utils/display_utils.py
index 7b74aa46..e72cc0f0 100644
--- a/megapixels/app/utils/display_utils.py
+++ b/megapixels/app/utils/display_utils.py
@@ -2,6 +2,10 @@ import sys
import cv2 as cv
+from app.utils.logger_utils import Logger
+
+
+log = Logger.getLogger()
def handle_keyboard(delay_amt=1):
'''Used with cv.imshow('title', image) to wait for keyboard press
@@ -11,6 +15,5 @@ def handle_keyboard(delay_amt=1):
if k == 27 or k == ord('q'): # ESC
cv.destroyAllWindows()
sys.exit()
- elif k != 255:
- # any key to continue
- break \ No newline at end of file
+ #else:
+ #log.info('Press Q, q, or ESC to exit')
diff --git a/megapixels/app/utils/draw_utils.py b/megapixels/app/utils/draw_utils.py
index 3a389e68..3378e3e8 100644
--- a/megapixels/app/utils/draw_utils.py
+++ b/megapixels/app/utils/draw_utils.py
@@ -4,6 +4,9 @@ from math import sqrt
import numpy as np
import cv2 as cv
+from app.utils import logger_utils
+
+log = logger_utils.Logger.getLogger()
end_list = np.array([17, 22, 27, 42, 48, 31, 36, 68], dtype=np.int32) - 1
@@ -105,46 +108,61 @@ def plot_pose_box(im, Ps, pts68s, color=(40, 255, 0), line_width=2):
pose_types = {'pitch': (0,0,255), 'roll': (255,0,0), 'yaw': (0,255,0)}
-def draw_landmarks2D(im, points, radius=3, color=(0,255,0), stroke_weight=2):
+def draw_landmarks2D(im, points_norm, radius=3, color=(0,255,0)):
'''Draws facial landmarks, either 5pt or 68pt
'''
- for x,y in points:
- cv.circle(im, (x,y), radius, color, -1, cv.LINE_AA)
-
+ im_dst = im.copy()
+ dim = im.shape[:2][::-1]
+ for x,y in points_norm:
+ pt = (int(x*dim[0]), int(y*dim[1]))
+ cv.circle(im_dst, pt, radius, color, -1, cv.LINE_AA)
+ return im_dst
-def draw_landmarks3D(im, points, radius=3, color=(0,255,0), stroke_weight=2):
+def draw_landmarks3D(im, points, radius=3, color=(0,255,0)):
'''Draws 3D facial landmarks
'''
+ im_dst = im.copy()
for x,y,z in points:
- cv.circle(im, (x,y), radius, color, -1, cv.LINE_AA)
-
+ cv.circle(im_dst, (x,y), radius, color, -1, cv.LINE_AA)
+ return im_dst
-def draw_bbox(im, bbox, color=(0,255,0), stroke_weight=2):
- '''Draws a dimensioned (not-normalized) BBox onto cv image
+def draw_bbox(im, bbox_norm, color=(0,255,0), stroke_weight=2):
+ '''Draws BBox onto cv image
'''
- cv.rectangle(im, bbox.pt_tl, bbox.pt_br, color, stroke_weight)
-
+ im_dst = im.copy()
+ bbox_dim = bbox_norm.to_dim(im.shape[:2][::-1])
+ cv.rectangle(im_dst, bbox_dim.pt_tl, bbox_dim.pt_br, color, stroke_weight)
+ return im_dst
def draw_pose(im, pt_nose, image_pts):
'''Draws 3-axis pose over image
+ TODO: normalize point data
'''
- cv.line(im, pt_nose, tuple(image_pts['pitch'].ravel()), pose_types['pitch'], 3)
- cv.line(im, pt_nose, tuple(image_pts['yaw'].ravel()), pose_types['yaw'], 3)
- cv.line(im, pt_nose, tuple(image_pts['roll'].ravel()), pose_types['roll'], 3)
-
+ im_dst = im.copy()
+ log.debug(f'pt_nose: {pt_nose}')
+ log.debug(f'image_pts pitch: {image_pts["pitch"]}')
+ cv.line(im_dst, pt_nose, tuple(image_pts['pitch']), pose_types['pitch'], 3)
+ cv.line(im_dst, pt_nose, tuple(image_pts['yaw']), pose_types['yaw'], 3)
+ cv.line(im_dst, pt_nose, tuple(image_pts['roll']), pose_types['roll'], 3)
+ return im_dst
-def draw_text(im, pt, text, color=(0,255,0)):
+def draw_text(im, pt_norm, text, color=(0,255,0)):
'''Draws degrees as text over image
'''
- cv.putText(im, text, pt, cv.FONT_HERSHEY_SIMPLEX, 0.75, color, thickness=1, lineType=cv.LINE_AA)
-
+ im_dst = im.copy()
+ dim = im.shape[:2][::-1]
+ pt = tuple(map(int, (pt_norm[0]*dim[0], pt_norm[1]*dim[1])))
+ cv.putText(im_dst, text, pt, cv.FONT_HERSHEY_SIMPLEX, 0.75, color, thickness=1, lineType=cv.LINE_AA)
+ return im_dst
def draw_degrees(im, pose_data, color=(0,255,0)):
'''Draws degrees as text over image
'''
+ im_dst = im.copy()
for i, pose_type in enumerate(pose_types.items()):
k, clr = pose_type
v = pose_data[k]
t = '{}: {:.2f}'.format(k, v)
origin = (10, 30 + (25 * i))
- cv.putText(im, t, origin, cv.FONT_HERSHEY_SIMPLEX, 0.5, clr, thickness=2, lineType=2)
+ cv.putText(im_dst, t, origin, cv.FONT_HERSHEY_SIMPLEX, 0.5, clr, thickness=2, lineType=2)
+ return im_dst \ No newline at end of file
diff --git a/megapixels/commands/cv/face_attributes.py b/megapixels/commands/cv/face_attributes.py
new file mode 100644
index 00000000..bb7978f7
--- /dev/null
+++ b/megapixels/commands/cv/face_attributes.py
@@ -0,0 +1,139 @@
+"""
+
+"""
+
+import click
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+
+
+@click.command()
+@click.option('-i', '--input', 'opt_fp_in', default=None,
+ help='Override enum input filename CSV')
+@click.option('-o', '--output', 'opt_fp_out', default=None,
+ help='Override enum output filename CSV')
+@click.option('-m', '--media', 'opt_dir_media', default=None,
+ help='Override enum media directory')
+@click.option('--store', 'opt_data_store',
+ type=cfg.DataStoreVar,
+ default=click_utils.get_default(types.DataStore.HDD),
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--dataset', 'opt_dataset',
+ type=cfg.DatasetVar,
+ required=True,
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--size', 'opt_size',
+ type=(int, int), default=cfg.DEFAULT_SIZE_FACE_DETECT,
+ help='Processing size for detection')
+@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None),
+ help='Slice list of files')
+@click.option('-f', '--force', 'opt_force', is_flag=True,
+ help='Force overwrite file')
+@click.option('-d', '--display', 'opt_display', is_flag=True,
+ help='Display image for debugging')
+@click.pass_context
+def cli(ctx, opt_fp_in, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset,
+ opt_size, opt_slice, opt_force, opt_display):
+ """Creates 2D 68-point landmarks"""
+
+ import sys
+ import os
+ from os.path import join
+ from pathlib import Path
+ from glob import glob
+
+ from tqdm import tqdm
+ import numpy as np
+ import cv2 as cv
+ import pandas as pd
+
+ from app.utils import logger_utils, file_utils, im_utils, display_utils, draw_utils
+ from app.processors import face_age_gender
+ from app.models.data_store import DataStore
+ from app.models.bbox import BBox
+
+ # -------------------------------------------------------------------------
+ # init here
+
+ log = logger_utils.Logger.getLogger()
+ # init face processors
+ age_estimator_apnt = face_age_gender.FaceAgeApparent()
+ age_estimator_real = face_age_gender.FaceAgeReal()
+ gender_estimator = face_age_gender.FaceGender()
+
+ # init filepaths
+ data_store = DataStore(opt_data_store, opt_dataset)
+ # set file output path
+ metadata_type = types.Metadata.FACE_ATTRIBUTES
+ fp_out = data_store.metadata(metadata_type) if opt_fp_out is None else opt_fp_out
+ if not opt_force and Path(fp_out).exists():
+ log.error('File exists. Use "-f / --force" to overwite')
+ return
+
+ # -------------------------------------------------------------------------
+ # load filepath data
+ fp_record = data_store.metadata(types.Metadata.FILE_RECORD)
+ df_record = pd.read_csv(fp_record, dtype={'fn':str}).set_index('index')
+ # load ROI data
+ fp_roi = data_store.metadata(types.Metadata.FACE_ROI)
+ df_roi = pd.read_csv(fp_roi).set_index('index')
+ # slice if you want
+ if opt_slice:
+ df_roi = df_roi[opt_slice[0]:opt_slice[1]]
+ # group by image index (speedup if multiple faces per image)
+ df_img_groups = df_roi.groupby('record_index')
+ log.debug('processing {:,} groups'.format(len(df_img_groups)))
+
+ # store landmarks in list
+ results = []
+
+ # -------------------------------------------------------------------------
+ # iterate groups with file/record index as key
+
+ for record_index, df_img_group in tqdm(df_img_groups):
+
+ # access file_record DataSeries
+ file_record = df_record.iloc[record_index]
+
+ # load image
+ fp_im = data_store.face(file_record.subdir, file_record.fn, file_record.ext)
+ im = cv.imread(fp_im)
+ im_resized = im_utils.resize(im, width=opt_size[0], height=opt_size[1])
+ dim = im_resized.shape[:2][::-1]
+
+ # iterate ROIs in this image
+ for roi_index, df_img in df_img_group.iterrows():
+
+ # find landmarks
+ bbox_norm = BBox.from_xywh(df_img.x, df_img.y, df_img.w, df_img.h)
+ bbox_dim = bbox_norm.to_dim(dim)
+
+ #age_apnt = age_estimator_apnt.predict(im_resized, bbox_norm)
+ #age_real = age_estimator_real.predict(im_resized, bbox_norm)
+ #gender = gender_estimator.predict(im_resized, bbox_norm)
+
+ # attr_obj = {
+ # 'age_real':float(f'{age_real:.2f}'),
+ # 'age_apparent': float(f'{age_apnt:.2f}'),
+ # 'm': float(f'{gender["m"]:.4f}'),
+ # 'f': float(f'{gender["f"]:.4f}'),
+ # 'roi_index': roi_index
+ # }
+ attr_obj = {
+ 'roi_index': roi_index
+ }
+ results.append(attr_obj)
+
+
+ # create DataFrame and save to CSV
+ file_utils.mkdirs(fp_out)
+ df = pd.DataFrame.from_dict(results)
+ df.index.name = 'index'
+ df.to_csv(fp_out)
+
+ # save script
+ file_utils.write_text(' '.join(sys.argv), '{}.sh'.format(fp_out)) \ No newline at end of file
diff --git a/megapixels/commands/cv/face_landmark_2d_68.py b/megapixels/commands/cv/face_landmark_2d_68.py
index e24d4b60..c6978a40 100644
--- a/megapixels/commands/cv/face_landmark_2d_68.py
+++ b/megapixels/commands/cv/face_landmark_2d_68.py
@@ -126,7 +126,7 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset,
bbox = BBox.from_xywh(x, y, w, h).to_dim(dim)
points = landmark_detector.landmarks(im_resized, bbox)
points_norm = landmark_detector.normalize(points, dim)
- points_flat = landmark_detector.flatten(points_norm)
+ points_str = landmark_detector.to_str(points_norm)
# display if optioned
if opt_display:
@@ -137,7 +137,7 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset,
display_utils.handle_keyboard()
# add to results for CSV
- results.append(points_flat)
+ results.append({'vec': points_str, 'roi_index':roi_index})
# create DataFrame and save to CSV
diff --git a/megapixels/commands/cv/face_pose.py b/megapixels/commands/cv/face_pose.py
index 70ea1f30..75db603b 100644
--- a/megapixels/commands/cv/face_pose.py
+++ b/megapixels/commands/cv/face_pose.py
@@ -92,7 +92,7 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset,
# load data
fp_record = data_store.metadata(types.Metadata.FILE_RECORD)
- df_record = pd.read_csv(fp_record).set_index('index')
+ df_record = pd.read_csv(fp_record, dtype={'fn':str}).set_index('index')
# load ROI data
fp_roi = data_store.metadata(types.Metadata.FACE_ROI)
df_roi = pd.read_csv(fp_roi).set_index('index')
@@ -125,10 +125,11 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset,
x, y, w, h = df_img.x, df_img.y, df_img.w, df_img.h
#dim = (file_record.width, file_record.height)
dim = im_resized.shape[:2][::-1]
- bbox = BBox.from_xywh(x, y, w, h).to_dim(dim)
+ bbox_norm = BBox.from_xywh(x, y, w, h)
+ bbox_dim = bbox_norm.to_dim(dim)
# get pose
- landmarks = face_landmarks.landmarks(im_resized, bbox)
+ landmarks = face_landmarks.landmarks(im_resized, bbox_norm)
pose_data = face_pose.pose(landmarks, dim)
#pose_degrees = pose_data['degrees'] # only keep the degrees data
#pose_degrees['points_nose'] = pose_data
@@ -143,8 +144,8 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset,
# add image index and append to result CSV data
pose_data['roi_index'] = roi_index
for k, v in pose_data['points'].items():
- pose_data[f'point_{k}_x'] = v[0][0] / dim[0]
- pose_data[f'point_{k}_y'] = v[0][1] / dim[1]
+ pose_data[f'point_{k}_x'] = v[0] / dim[0]
+ pose_data[f'point_{k}_y'] = v[1] / dim[1]
# rearrange data structure for DataFrame
pose_data.pop('points')
diff --git a/megapixels/commands/cv/face_roi.py b/megapixels/commands/cv/face_roi.py
index 70fff401..950936cf 100644
--- a/megapixels/commands/cv/face_roi.py
+++ b/megapixels/commands/cv/face_roi.py
@@ -33,7 +33,7 @@ color_filters = {'color': 1, 'gray': 2, 'all': 3}
help='Output image size')
@click.option('-d', '--detector', 'opt_detector_type',
type=cfg.FaceDetectNetVar,
- default=click_utils.get_default(types.FaceDetectNet.DLIB_CNN),
+ default=click_utils.get_default(types.FaceDetectNet.CVDNN),
help=click_utils.show_help(types.FaceDetectNet))
@click.option('-g', '--gpu', 'opt_gpu', default=0,
help='GPU index')
@@ -97,8 +97,8 @@ def cli(ctx, opt_fp_in, opt_dir_media, opt_fp_out, opt_data_store, opt_dataset,
detector = face_detector.DetectorDLIBCNN(gpu=opt_gpu)
elif opt_detector_type == types.FaceDetectNet.DLIB_HOG:
detector = face_detector.DetectorDLIBHOG()
- elif opt_detector_type == types.FaceDetectNet.MTCNN:
- detector = face_detector.DetectorMTCNN(gpu=opt_gpu)
+ elif opt_detector_type == types.FaceDetectNet.MTCNN_TF:
+ detector = face_detector.DetectorMTCNN_TF(gpu=opt_gpu)
elif opt_detector_type == types.FaceDetectNet.HAAR:
log.error('{} not yet implemented'.format(opt_detector_type.name))
return
@@ -106,7 +106,7 @@ def cli(ctx, opt_fp_in, opt_dir_media, opt_fp_out, opt_data_store, opt_dataset,
# get list of files to process
fp_in = data_store.metadata(types.Metadata.FILE_RECORD) if opt_fp_in is None else opt_fp_in
- df_records = pd.read_csv(fp_in).set_index('index')
+ df_records = pd.read_csv(fp_in, dtype={'fn':str}).set_index('index')
if opt_slice:
df_records = df_records[opt_slice[0]:opt_slice[1]]
log.debug('processing {:,} files'.format(len(df_records)))
@@ -144,9 +144,9 @@ def cli(ctx, opt_fp_in, opt_dir_media, opt_fp_out, opt_data_store, opt_dataset,
for bbox in bboxes:
roi = {
'record_index': int(df_record.Index),
- 'x': bbox.x,
- 'y': bbox.y,
- 'w': bbox.w,
+ 'x': bbox.x,
+ 'y': bbox.y,
+ 'w': bbox.w,
'h': bbox.h
}
data.append(roi)
diff --git a/megapixels/commands/cv/face_vector.py b/megapixels/commands/cv/face_vector.py
index 9e9f6396..9a527bc3 100644
--- a/megapixels/commands/cv/face_vector.py
+++ b/megapixels/commands/cv/face_vector.py
@@ -27,10 +27,10 @@ from app.settings import app_cfg as cfg
show_default=True,
help=click_utils.show_help(types.Dataset))
@click.option('--size', 'opt_size',
- type=(int, int), default=(300, 300),
+ type=(int, int), default=cfg.DEFAULT_SIZE_FACE_DETECT,
help='Output image size')
@click.option('-e', '--extractor', 'opt_extractor',
- default=types.FaceExtractor.VGG,
+ default=click_utils.get_default(types.FaceExtractor.VGG),
type=cfg.FaceExtractorVar,
help='Type of extractor framework/network to use')
@click.option('-j', '--jitters', 'opt_jitters', default=cfg.DLIB_FACEREC_JITTERS,
@@ -88,7 +88,7 @@ def cli(ctx, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset, opt_size,
# load data
fp_record = data_store.metadata(types.Metadata.FILE_RECORD)
- df_record = pd.read_csv(fp_record).set_index('index')
+ df_record = pd.read_csv(fp_record, dtype={'fn':str}).set_index('index')
fp_roi = data_store.metadata(types.Metadata.FACE_ROI)
df_roi = pd.read_csv(fp_roi).set_index('index')
@@ -115,10 +115,9 @@ def cli(ctx, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset, opt_size,
bbox = BBox.from_xywh(x, y, w, h) # norm
# compute vec
vec = extractor.extract(im, bbox) # use normalized BBox
- vec_flat = extractor.flatten(vec)
- vec_flat['roi_index'] = roi_index
- vec_flat['record_index'] = record_index
- vecs.append(vec_flat)
+ vec_str = extractor.to_str(vec)
+ vec_obj = {'vec':vec_str, 'roi_index': roi_index, 'record_index':record_index}
+ vecs.append(vec_obj)
# -------------------------------------------------
# save data
diff --git a/megapixels/commands/datasets/file_record.py b/megapixels/commands/datasets/file_record.py
index d3f790d4..b5daef4e 100644
--- a/megapixels/commands/datasets/file_record.py
+++ b/megapixels/commands/datasets/file_record.py
@@ -45,9 +45,11 @@ identity_sources = ['subdir', 'numeric']
help='Identity source key')
@click.option('--recursive/--no-recursive', 'opt_recursive', is_flag=True, default=False,
help='Use glob recursion (slower)')
+@click.option('--max-depth', 'opt_max_depth', default=None, type=int,
+ help='Max number of images per subdirectory')
@click.pass_context
def cli(ctx, opt_fp_in, opt_fp_out, opt_dataset, opt_data_store, opt_dir_media, opt_slice, opt_threads,
- opt_identity, opt_force, opt_recursive):
+ opt_identity, opt_force, opt_recursive, opt_max_depth):
"""Generates sha256, uuid, and identity index CSV file"""
import sys, os
@@ -59,6 +61,7 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_dataset, opt_data_store, opt_dir_media,
import random
import uuid
+ from PIL import Image
import cv2 as cv
import pandas as pd
from tqdm import tqdm
@@ -84,6 +87,26 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_dataset, opt_data_store, opt_dir_media,
fp_in = opt_fp_in if opt_fp_in is not None else data_store.media_images_original()
log.info(f'Globbing {fp_in}')
fp_ims = file_utils.glob_multi(fp_in, ['jpg', 'png'], recursive=opt_recursive)
+
+ log.info('Found {:,} images'.format(len(fp_ims)))
+ subdir_groups = {}
+ if opt_max_depth:
+ log.debug(f'using max depth: {opt_max_depth}')
+ for fp_im in fp_ims:
+ fpp_im = Path(fp_im)
+
+ subdir = fp_im.split('/')[-2]
+ if not subdir in subdir_groups.keys():
+ subdir_groups[subdir] = []
+ else:
+ subdir_groups[subdir].append(fp_im)
+ # for each subgroup, limit number of files
+ fp_ims = []
+ for subdir_name, items in subdir_groups.items():
+ ims = items[0:opt_max_depth]
+ fp_ims += ims
+
+ log.debug(f'num subdirs: {len(subdir_groups.keys())}')
# fail if none
if not fp_ims:
log.error('No images. Try with "--recursive"')
@@ -93,7 +116,6 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_dataset, opt_data_store, opt_dir_media,
fp_ims = fp_ims[opt_slice[0]:opt_slice[1]]
log.info('Found {:,} images'.format(len(fp_ims)))
-
# ----------------------------------------------------------------
# multithread process into SHA256
@@ -101,7 +123,14 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_dataset, opt_data_store, opt_dir_media,
def pool_mapper(fp_im):
pbar.update(1)
- sha256 = file_utils.sha256(fp_im)
+ try:
+ sha256 = file_utils.sha256(fp_im)
+ im = Image.open(fp_im)
+ im.verify() # throws error if bad file
+ assert(im.size[0] > 100 and im.size[1] > 100)
+ except Exception as e:
+ log.warn(f'skipping file: {fp_im}')
+ return None
im = cv.imread(fp_im)
w, h = im.shape[:2][::-1]
file_size_kb = os.stat(fp_im).st_size // 1000
@@ -128,10 +157,11 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_dataset, opt_data_store, opt_dir_media,
data = []
indentity_count = 0
for pool_map, fp_im in zip(pool_maps, fp_ims):
+ if pool_map is None:
+ log.warn(f'skipping file: {fp_im}')
+ continue # skip error files
fpp_im = Path(fp_im)
subdir = str(fpp_im.parent.relative_to(fp_in))
- #subdir = '' if subdir is '.' else subdir
- log.debug(subdir)
if opt_identity:
subdirs = subdir.split('/')
diff --git a/megapixels/commands/demo/face_3ddfa.py b/megapixels/commands/demo/face_3ddfa.py
index 6182aeb6..90359159 100644
--- a/megapixels/commands/demo/face_3ddfa.py
+++ b/megapixels/commands/demo/face_3ddfa.py
@@ -1,7 +1,7 @@
'''
Combines 3D face mode + rendering
-https://github.com/cleardusk/3DDFA
-https://github.com/YadiraF/face3d
+https://github.com/cleardusk/3DDFA --> 3d landmarks
+https://github.com/YadiraF/face3d --> render 3D with lighting as 2.5d image
'''
import click
@@ -13,8 +13,8 @@ from app.settings import app_cfg as cfg
@click.command()
@click.option('-i', '--input', 'opt_fp_in', default=None, required=True,
help='Image filepath')
-@click.option('-o', '--output', 'opt_fp_out', default=None,
- help='GIF output path')
+@click.option('-o', '--output', 'opt_dir_out', default=None,
+ help='Directory for output files')
@click.option('--size', 'opt_size',
type=(int, int), default=(300, 300),
help='Output image size')
@@ -27,11 +27,13 @@ from app.settings import app_cfg as cfg
@click.option('--size', 'opt_render_dim',
type=(int, int), default=(512, 512),
help='2.5D render image size')
-@click.option('--display/--no-display', 'opt_display', is_flag=True, default=False,
+@click.option('--display/--no-display', 'opt_display', is_flag=True, default=True,
help='Display detections to debug')
+@click.option('--save/--no-save', 'opt_save', is_flag=True, default=True,
+ help='Save output images/files')
@click.pass_context
-def cli(ctx, opt_fp_in, opt_fp_out, opt_gpu, opt_bbox_init,
- opt_size, opt_render_dim, opt_force, opt_display):
+def cli(ctx, opt_fp_in, opt_dir_out, opt_gpu, opt_bbox_init,
+ opt_size, opt_render_dim, opt_force, opt_display, opt_save):
"""3D face demo"""
import sys
@@ -58,6 +60,7 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_gpu, opt_bbox_init,
import scipy.io as sio
sys.path.append(join(Path.cwd().parent, '3rdparty'))
+ # git clone https://github.com/cleardusk/3DDFA 3rdparty/d3ddfa
# change name of 3DDFA to d3DDFA because can't start with number
from d3DDFA import mobilenet_v1
from d3DDFA.utils.ddfa import ToTensorGjz, NormalizeGjz, str2bool
@@ -70,7 +73,7 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_gpu, opt_bbox_init,
from d3DDFA.utils.render import get_depths_image, cget_depths_image, cpncc
from d3DDFA.utils import paf as d3dfa_paf_utils
- # https://github.com/YadiraF/face3d
+ # git clone https://github.com/YadiraF/face3d 3rdparty/face3d
# compile cython module in face3d/mesh/cython/ python setup.py build_ext -i
from face3d.face3d import mesh as face3d_mesh
@@ -82,13 +85,11 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_gpu, opt_bbox_init,
fpp_in = Path(opt_fp_in)
im = cv.imread(opt_fp_in)
- #im = im_utils.resize(im_orig, width=opt_size[0], height=opt_size[1])
- # im = im_orig.copy()
# ----------------------------------------------------------------------------
# detect face
- face_detector = face_detector.DetectorDLIBCNN(gpu=opt_gpu) # -1 for CPU
+ face_detector = face_detector.DetectorCVDNN() # -1 for CPU
bboxes = face_detector.detect(im, largest=True)
bbox = bboxes[0]
dim = im.shape[:2][::-1]
@@ -165,7 +166,6 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_gpu, opt_bbox_init,
# dense face 3d vertices
vertices = d3dfa_utils.predict_dense(param, roi_box)
vertices_lst.append(vertices)
-
log.info(f'generated 3d data in: {(time.time() - st):.2f}s')
# filepath helper function
@@ -183,28 +183,20 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_gpu, opt_bbox_init,
sio.savemat(fp_mat_3df, {'vertices': vertices, 'colors': colors, 'triangles': triangles})
# save PAF
- #fp_paf = to_fp(fpp_in, 'jpg', suffix='paf')
- #opt_paf_size = 3 # PAF feature kernel size
- #im_paf = d3dfa_paf_utils.gen_img_paf(img_crop=im_crop, param=param, kernel_size=opt_paf_size)
- #cv.imwrite(fp_paf, im_paf)
+ im_paf = d3dfa_paf_utils.gen_img_paf(img_crop=im_crop, param=param, kernel_size=3)
# save pose image
# P, pose = parse_pose(param) # Camera matrix (without scale), and pose (yaw, pitch, roll, to verify)
- img_pose = draw_utils.plot_pose_box(im, Ps, pts_res)
- fp_pose = to_fp(fpp_in, 'jpg', suffix='pose')
- cv.imwrite(fp_pose, img_pose)
+ im_pose = draw_utils.plot_pose_box(im, Ps, pts_res)
# save depth image
- fp_depth = to_fp(fpp_in, 'png', suffix='depth')
# depths_img = get_depths_image(im, vertices_lst, tri-1) # python version
im_depth = cget_depths_image(im, vertices_lst, triangles - 1) # cython version
- cv.imwrite(fp_depth, im_depth)
# save pncc image
- fp_pose = to_fp(fpp_in, 'png', suffix='pncc')
pncc_feature = cpncc(im, vertices_lst, triangles - 1) # cython version
- cv.imwrite(fp_pose, pncc_feature[:, :, ::-1]) # cv.imwrite will swap RGB -> BGR
+ im_pncc = pncc_feature[:, :, ::-1] # swap BGR
# save .ply
#fp_ply = to_fp(fpp_in, 'ply')
@@ -228,8 +220,6 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_gpu, opt_bbox_init,
# save obj
colors = d3dfa_utils.get_colors(im, vertices_orig)
- fp_obj = to_fp(fpp_in, 'obj')
- write_obj_with_colors(fp_obj, vertices_orig, triangles, colors)
#fp_landmarks = to_fp(fpp_in, 'jpg', suffix='3DDFA')
# show_flg?
@@ -276,30 +266,39 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_gpu, opt_bbox_init,
vertices_proj = face3d_mesh.transform.orthographic_project(vertices_cam)
# -------------------------------------------------------------------------
- # render 2D image
+ # render 2D images
w = h = max(opt_render_dim)
vertices_im = face3d_mesh.transform.to_image(vertices_proj, h, w)
- rendering = face3d_mesh.render.render_colors(vertices_im, triangles, colors_lit, h, w)
-
- cv.imshow('', rendering)
- display_utils.handle_keyboard()
+ im_render = face3d_mesh.render.render_colors(vertices_im, triangles, colors_lit, h, w)
+ im_render = (255* im_render).astype(np.uint8)
+ im_pncc = im_pncc.astype(np.uint8)
+ im_depth = im_depth.astype(np.uint8)
+ im_paf = im_paf.astype(np.uint8)
# ----------------------------------------------------------------------------
# save
- if opt_fp_out:
- # save pose only
- fpp_out = Path(opt_fp_out)
+ if opt_save:
+ fpp_out = Path(opt_dir_out) if opt_dir_out is not None else Path(opt_fp_in).parent
+ fpp_in = Path(opt_fp_in)
+ fp_out = join(fpp_out, f'{fpp_in.stem}_render.png')
+ cv.imwrite(fp_out, im_render)
+
+ fp_out = join(fpp_out, f'{fpp_in.stem}_pose.png')
+ cv.imwrite(fp_out, im_pose)
- fp_out = join(fpp_out.parent, f'{fpp_out.stem}_real{fpp_out.suffix}')
- cv.imwrite(fp_out, im_age_real)
+ fp_out = join(fpp_out, f'{fpp_in.stem}_depth.png')
+ cv.imwrite(fp_out, im_depth)
+
+ fp_out = join(fpp_out, f'{fpp_in.stem}_pncc.png')
+ cv.imwrite(fp_out, im_pncc)
- fp_out = join(fpp_out.parent, f'{fpp_out.stem}_apparent{fpp_out.suffix}')
- cv.imwrite(fp_out, im_age_apparent)
+ fp_out = join(fpp_out, f'{fpp_in.stem}_paf.png')
+ cv.imwrite(fp_out, im_paf)
- fp_out = join(fpp_out.parent, f'{fpp_out.stem}_gender{fpp_out.suffix}')
- cv.imwrite(fp_out, im_age_apparent)
+ fp_out = join(fpp_out, f'{fpp_in.stem}.obj')
+ write_obj_with_colors(fp_out, vertices_orig, triangles, colors)
# ----------------------------------------------------------------------------
@@ -307,8 +306,10 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_gpu, opt_bbox_init,
if opt_display:
# show all images here
- cv.imshow('real', im_age_real)
- cv.imshow('apparent', im_age_apparent)
- cv.imshow('gender', im_gender)
+ cv.imshow('3d', im_render)
+ cv.imshow('depth', im_depth)
+ cv.imshow('pncc', im_pncc)
+ cv.imshow('pose', im_pose)
+ cv.imshow('paf', im_paf)
display_utils.handle_keyboard()
diff --git a/megapixels/commands/demo/face_age_gender.py b/megapixels/commands/demo/face_age_gender.py
index c74f1e45..c4f09c13 100644
--- a/megapixels/commands/demo/face_age_gender.py
+++ b/megapixels/commands/demo/face_age_gender.py
@@ -17,7 +17,7 @@ from app.settings import app_cfg as cfg
help='GPU index')
@click.option('-f', '--force', 'opt_force', is_flag=True,
help='Force overwrite file')
-@click.option('--display/--no-display', 'opt_display', is_flag=True, default=False,
+@click.option('--display/--no-display', 'opt_display', is_flag=True, default=True,
help='Display detections to debug')
@click.pass_context
def cli(ctx, opt_fp_in, opt_fp_out, opt_gpu, opt_size, opt_force, opt_display):
@@ -52,12 +52,12 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_gpu, opt_size, opt_force, opt_display):
# ----------------------------------------------------------------------------
# detect face
- face_detector = face_detector.DetectorDLIBCNN(gpu=opt_gpu) # -1 for CPU
+ face_detector = face_detector.DetectorCVDNN()
bboxes = face_detector.detect(im_resized, largest=True)
- bbox = bboxes[0]
+ bbox_norm = bboxes[0]
dim = im_resized.shape[:2][::-1]
- bbox_dim = bbox.to_dim(dim)
- if not bbox:
+ bbox_dim = bbox_norm.to_dim(dim)
+ if not bbox_norm:
log.error('no face detected')
return
else:
@@ -70,21 +70,24 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_gpu, opt_size, opt_force, opt_display):
# real
age_real_predictor = face_age_gender.FaceAgeReal()
st = time.time()
- age_real = age_real_predictor.predict(im_resized, bbox_dim)
+ age_real = age_real_predictor.predict(im_resized, bbox_norm)
log.info(f'age real took: {(time.time()-st)/1000:.5f}s')
# apparent
age_apparent_predictor = face_age_gender.FaceAgeApparent()
st = time.time()
- age_apparent = age_apparent_predictor.predict(im_resized, bbox_dim)
+ age_apparent = age_apparent_predictor.predict(im_resized, bbox_norm)
log.info(f'age apparent took: {(time.time()-st)/1000:.5f}s')
# gender
gender_predictor = face_age_gender.FaceGender()
st = time.time()
- gender = gender_predictor.predict(im_resized, bbox_dim)
+ gender = gender_predictor.predict(im_resized, bbox_norm)
log.info(f'gender took: {(time.time()-st)/1000:.5f}s')
+ # ethnicity
+ # TODO
+
# ----------------------------------------------------------------------------
# output
@@ -99,21 +102,21 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_gpu, opt_size, opt_force, opt_display):
# draw real age
im_age_real = im_resized.copy()
- draw_utils.draw_bbox(im_age_real, bbox_dim)
+ im_age_real = draw_utils.draw_bbox(im_age_real, bbox_norm)
txt = f'{(age_real):.2f}'
- draw_utils.draw_text(im_age_real, bbox_dim.pt_tl, txt)
+ im_age_real = draw_utils.draw_text(im_age_real, bbox_norm.pt_tl, txt)
# apparent age
im_age_apparent = im_resized.copy()
- draw_utils.draw_bbox(im_age_apparent, bbox_dim)
+ im_age_apparent = draw_utils.draw_bbox(im_age_apparent, bbox_norm)
txt = f'{(age_apparent):.2f}'
- draw_utils.draw_text(im_age_apparent, bbox_dim.pt_tl, txt)
+ im_age_apparent = draw_utils.draw_text(im_age_apparent, bbox_norm.pt_tl, txt)
# gender
im_gender = im_resized.copy()
- draw_utils.draw_bbox(im_age_apparent, bbox_dim)
+ im_gender = draw_utils.draw_bbox(im_gender, bbox_norm)
txt = f"M: {gender['m']:.2f}, F: {gender['f']:.2f}"
- draw_utils.draw_text(im_gender, (10, dim[1]-20), txt)
+ im_gender = draw_utils.draw_text(im_gender, (.1, .9), txt)
# ----------------------------------------------------------------------------
diff --git a/megapixels/commands/demo/face_beauty.py b/megapixels/commands/demo/face_beauty.py
index d31c5cee..45643c61 100644
--- a/megapixels/commands/demo/face_beauty.py
+++ b/megapixels/commands/demo/face_beauty.py
@@ -66,10 +66,10 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_gpu, opt_size, opt_force, opt_display):
face_detector = face_detector.DetectorDLIBCNN(gpu=opt_gpu) # -1 for CPU
bboxes = face_detector.detect(im_resized, largest=True)
- bbox = bboxes[0]
+ bbox_norm = bboxes[0]
dim = im_resized.shape[:2][::-1]
- bbox_dim = bbox.to_dim(dim)
- if not bbox:
+ bbox_dim = bbox_norm.to_dim(dim)
+ if not bbox_norm:
log.error('no face detected')
return
else:
@@ -78,7 +78,7 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_gpu, opt_size, opt_force, opt_display):
# ----------------------------------------------------------------------------
# beauty
- beauty_score = beauty_predictor.beauty(im_resized, bbox_dim)
+ beauty_score = beauty_predictor.beauty(im_resized, bbox_norm)
# ----------------------------------------------------------------------------
@@ -93,9 +93,9 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_gpu, opt_size, opt_force, opt_display):
# draw 2d landmarks
im_beauty = im_resized.copy()
- draw_utils.draw_bbox(im_beauty, bbox_dim)
+ im_beauty = draw_utils.draw_bbox(im_beauty, bbox_dim)
txt = f'Beauty score: {(100*beauty_score):.2f}'
- draw_utils.draw_text(im_beauty, bbox_dim.pt_tl, txt)
+ im_beauty = draw_utils.draw_text(im_beauty, bbox_dim.pt_tl, txt)
# ----------------------------------------------------------------------------
diff --git a/megapixels/commands/demo/face_detection.py b/megapixels/commands/demo/face_detect.py
index 488cc80d..b92db7cb 100644
--- a/megapixels/commands/demo/face_detection.py
+++ b/megapixels/commands/demo/face_detect.py
@@ -59,68 +59,27 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_gpu, opt_size, opt_force, opt_display):
# ----------------------------------------------------------------------------
# detect face
- face_detector = face_detector.DetectorDLIBCNN(gpu=opt_gpu) # -1 for CPU
+ face_detector = face_detector.DetectorCVDNN()
bboxes = face_detector.detect(im_resized, largest=True)
- bbox = bboxes[0]
- dim = im_resized.shape[:2][::-1]
- bbox_dim = bbox.to_dim(dim)
- if not bbox:
+ if not bboxes:
log.error('no face detected')
return
-
-
- # ----------------------------------------------------------------------------
- # generate 68 point landmarks using dlib
-
- from app.processors import face_landmarks
- landmark_detector_2d_68 = face_landmarks.Dlib2D_68()
- points_2d_68 = landmark_detector_2d_68.landmarks(im_resized, bbox_dim)
-
-
- # ----------------------------------------------------------------------------
- # generate pose from 68 point 2D landmarks
-
- from app.processors import face_pose
- pose_detector = face_pose.FacePoseDLIB()
- pose_data = pose_detector.pose(points_2d_68, dim)
-
- # ----------------------------------------------------------------------------
- # output
-
- log.info(f'Face coords: {bbox_dim} face')
- log.info(f'pitch: {pose_data["pitch"]}, roll: {pose_data["roll"]}, yaw: {pose_data["yaw"]}')
+ bbox_norm = bboxes[0]
+ dim = im_resized.shape[:2][::-1]
+ bbox_dim = bbox_norm.to_dim(dim)
# ----------------------------------------------------------------------------
# draw
- # draw 2d landmarks
- im_landmarks_2d_68 = im_resized.copy()
- draw_utils.draw_landmarks2D(im_landmarks_2d_68, points_2d_68)
- draw_utils.draw_bbox(im_landmarks_2d_68, bbox_dim)
-
- # draw pose
- im_pose = im_resized.copy()
- draw_utils.draw_pose(im_pose, pose_data['point_nose'], pose_data['points'])
- draw_utils.draw_degrees(im_pose, pose_data)
-
-
- # ----------------------------------------------------------------------------
- # save
-
- if opt_fp_out:
- # save pose only
- cv.imwrite(opt_fp_out, im_pose)
-
+ im_face = im_resized.copy()
+ im_face = draw_utils.draw_bbox(im_face, bbox_norm)
# ----------------------------------------------------------------------------
# display
if opt_display:
-
# show all images here
- cv.imshow('Original', im_resized)
- cv.imshow('2D 68PT Landmarks', im_landmarks_2d_68)
- cv.imshow('Pose', im_pose)
+ cv.imshow('Face', im_face)
display_utils.handle_keyboard() \ No newline at end of file
diff --git a/megapixels/commands/demo/face_landmarks_2d.py b/megapixels/commands/demo/face_landmarks_2d.py
index 22e09297..145a12a6 100644
--- a/megapixels/commands/demo/face_landmarks_2d.py
+++ b/megapixels/commands/demo/face_landmarks_2d.py
@@ -3,7 +3,6 @@ Crop images to prepare for training
"""
import click
-# from PIL import Image, ImageOps, ImageFilter, ImageDraw
from app.settings import types
from app.utils import click_utils
@@ -13,26 +12,14 @@ from app.settings import app_cfg as cfg
@click.command()
@click.option('-i', '--input', 'opt_fp_in', default=None, required=True,
help='Image filepath')
-@click.option('-o', '--output', 'opt_fp_out', default=None,
- help='GIF output path')
@click.option('--size', 'opt_size',
type=(int, int), default=(300, 300),
help='Output image size')
-@click.option('--gif-size', 'opt_gif_size',
- type=(int, int), default=(480, 480),
- help='GIF output size')
-@click.option('--gif-frames', 'opt_gif_frames', default=15,
- help='GIF frames')
-@click.option('-g', '--gpu', 'opt_gpu', default=0,
- help='GPU index')
-@click.option('-f', '--force', 'opt_force', is_flag=True,
- help='Force overwrite file')
-@click.option('--display/--no-display', 'opt_display', is_flag=True, default=False,
+@click.option('--display/--no-display', 'opt_display', is_flag=True, default=True,
help='Display detections to debug')
@click.pass_context
-def cli(ctx, opt_fp_in, opt_fp_out, opt_gpu, opt_gif_frames,
- opt_size, opt_gif_size, opt_force, opt_display):
- """Generates 3D landmark animations from CSV files"""
+def cli(ctx, opt_fp_in, opt_size, opt_display):
+ """2D 68-point landmarks"""
import sys
import os
@@ -52,12 +39,6 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_gpu, opt_gif_frames,
from app.utils import plot_utils
from app.processors import face_detector, face_landmarks
from app.models.data_store import DataStore
-
- # TOOD add selective testing
- opt_run_pose = True
- opt_run_2d_68 = True
- opt_run_3d_68 = True
- opt_run_3d_68 = True
# -------------------------------------------------
@@ -66,7 +47,6 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_gpu, opt_gif_frames,
log = logger_utils.Logger.getLogger()
- # load image
im = cv.imread(opt_fp_in)
im_resized = im_utils.resize(im, width=opt_size[0], height=opt_size[1])
@@ -74,146 +54,41 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_gpu, opt_gif_frames,
# ----------------------------------------------------------------------------
# detect face
- face_detector = face_detector.DetectorDLIBCNN(gpu=opt_gpu) # -1 for CPU
+ face_detector = face_detector.DetectorCVDNN()
log.info('detecting face...')
st = time.time()
bboxes = face_detector.detect(im_resized, largest=True)
- bbox = bboxes[0]
+ bbox_norm = bboxes[0]
dim = im_resized.shape[:2][::-1]
- bbox_dim = bbox.to_dim(dim)
- if not bbox:
+ bbox_dim = bbox_norm.to_dim(dim)
+ if not bbox_norm:
log.error('no face detected')
return
else:
log.info(f'Detected face in {(time.time() - st):.2f}s')
- log.info('')
-
-
- # ----------------------------------------------------------------------------
- # detect 3D landmarks
-
- log.info('loading 3D landmark generator files...')
- landmark_detector_3d_68 = face_landmarks.FaceAlignment3D_68(gpu=opt_gpu) # -1 for CPU
- log.info('generating 3D landmarks...')
- st = time.time()
- points_3d_68 = landmark_detector_3d_68.landmarks(im_resized, bbox_dim.to_xyxy())
- log.info(f'generated 3D landmarks in {(time.time() - st):.2f}s')
- log.info('')
-
-
- # ----------------------------------------------------------------------------
- # generate 3D GIF animation
- log.info('generating 3D animation...')
- if not opt_fp_out:
- fpp_im = Path(opt_fp_in)
- fp_out = join(fpp_im.parent, f'{fpp_im.stem}_anim.gif')
- else:
- fp_out = opt_fp_out
- st = time.time()
- plot_utils.generate_3d_landmark_anim(np.array(points_3d_68), fp_out,
- size=opt_gif_size, num_frames=opt_gif_frames)
- log.info(f'Generated animation in {(time.time() - st):.2f}s')
- log.info(f'Saved to: {fp_out}')
- log.info('')
-
-
- # ----------------------------------------------------------------------------
- # generate face vectors, only to test if feature extraction works
-
- log.info('initialize face recognition model...')
- from app.processors import face_recognition
- face_rec = face_recognition.RecognitionDLIB()
- st = time.time()
- log.info('generating face vector...')
- vec = face_rec.vec(im_resized, bbox_dim)
- log.info(f'generated face vector in {(time.time() - st):.2f}s')
- log.info('')
-
# ----------------------------------------------------------------------------
# generate 68 point landmarks using dlib
log.info('initializing face landmarks 68 dlib...')
- from app.processors import face_landmarks
landmark_detector_2d_68 = face_landmarks.Dlib2D_68()
log.info('generating 2D 68PT landmarks...')
st = time.time()
- points_2d_68 = landmark_detector_2d_68.landmarks(im_resized, bbox_dim)
+ points_norm = landmark_detector_2d_68.landmarks(im_resized, bbox_norm)
log.info(f'generated 2D 68PT face landmarks in {(time.time() - st):.2f}s')
- log.info('')
# ----------------------------------------------------------------------------
- # generate pose from 68 point 2D landmarks
-
- if opt_run_pose:
- log.info('initialize pose...')
- from app.processors import face_pose
- pose_detector = face_pose.FacePoseDLIB()
- log.info('generating pose...')
- st = time.time()
- pose_data = pose_detector.pose(points_2d_68, dim)
- log.info(f'generated pose {(time.time() - st):.2f}s')
- log.info('')
-
-
- # x
-
-
-
# display
+
if opt_display:
-
- # draw bbox
-
- # draw 3d landmarks
- im_landmarks_3d_68 = im_resized.copy()
- draw_utils.draw_landmarks3D(im_landmarks_3d_68, points_3d_68)
- draw_utils.draw_bbox(im_landmarks_3d_68, bbox_dim)
# draw 2d landmarks
- im_landmarks_2d_68 = im_resized.copy()
- draw_utils.draw_landmarks2D(im_landmarks_2d_68, points_2d_68)
- draw_utils.draw_bbox(im_landmarks_2d_68, bbox_dim)
-
- # draw pose
- if opt_run_pose:
- im_pose = im_resized.copy()
- draw_utils.draw_pose(im_pose, pose_data['point_nose'], pose_data['points'])
- draw_utils.draw_degrees(im_pose, pose_data)
-
- # draw animated GIF
- im = Image.open(opt_fp_out)
- im_frames = []
- duration = im.info['duration']
- try:
- while True:
- im.seek(len(im_frames))
- mypalette = im.getpalette()
- im.putpalette(mypalette)
- im_jpg = Image.new("RGB", im.size)
- im_jpg.paste(im)
- im_np = im_utils.pil2np(im_jpg.copy())
- im_frames.append(im_np)
- except EOFError:
- pass # end of GIF sequence
-
- n_frames = len(im_frames)
- frame_number = 0
+ im_lmarks = im_resized.copy()
+ im_lmarks = draw_utils.draw_bbox(im_lmarks, bbox_norm)
+ im_lmarks = draw_utils.draw_landmarks2D(im_lmarks, points_norm)
- while True:
- # show all images here
- cv.imshow('Original', im_resized)
- cv.imshow('2D 68PT Landmarks', im_landmarks_2d_68)
- cv.imshow('3D 68PT Landmarks', im_landmarks_3d_68)
- cv.imshow('Pose', im_pose)
- cv.imshow('3D 68pt GIF', im_frames[frame_number])
- frame_number = (frame_number + 1) % n_frames
- k = cv.waitKey(duration) & 0xFF
- if k == 27 or k == ord('q'): # ESC
- cv.destroyAllWindows()
- sys.exit()
- elif k != 255:
- # any key to continue
- break \ No newline at end of file
+ # show all images here
+ cv.imshow('2D 68PT Landmarks', im_lmarks)
+ display_utils.handle_keyboard() \ No newline at end of file
diff --git a/megapixels/commands/demo/face_landmarks_3d.py b/megapixels/commands/demo/face_landmarks_3d.py
index 22e09297..ed5a00d5 100644
--- a/megapixels/commands/demo/face_landmarks_3d.py
+++ b/megapixels/commands/demo/face_landmarks_3d.py
@@ -3,7 +3,6 @@ Crop images to prepare for training
"""
import click
-# from PIL import Image, ImageOps, ImageFilter, ImageDraw
from app.settings import types
from app.utils import click_utils
@@ -27,7 +26,7 @@ from app.settings import app_cfg as cfg
help='GPU index')
@click.option('-f', '--force', 'opt_force', is_flag=True,
help='Force overwrite file')
-@click.option('--display/--no-display', 'opt_display', is_flag=True, default=False,
+@click.option('--display/--no-display', 'opt_display', is_flag=True, default=True,
help='Display detections to debug')
@click.pass_context
def cli(ctx, opt_fp_in, opt_fp_out, opt_gpu, opt_gif_frames,
@@ -52,12 +51,6 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_gpu, opt_gif_frames,
from app.utils import plot_utils
from app.processors import face_detector, face_landmarks
from app.models.data_store import DataStore
-
- # TOOD add selective testing
- opt_run_pose = True
- opt_run_2d_68 = True
- opt_run_3d_68 = True
- opt_run_3d_68 = True
# -------------------------------------------------
@@ -74,14 +67,14 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_gpu, opt_gif_frames,
# ----------------------------------------------------------------------------
# detect face
- face_detector = face_detector.DetectorDLIBCNN(gpu=opt_gpu) # -1 for CPU
+ face_detector = face_detector.DetectorCVDNN()
log.info('detecting face...')
st = time.time()
bboxes = face_detector.detect(im_resized, largest=True)
- bbox = bboxes[0]
+ bbox_norm = bboxes[0]
dim = im_resized.shape[:2][::-1]
- bbox_dim = bbox.to_dim(dim)
- if not bbox:
+ bbox_dim = bbox_norm.to_dim(dim)
+ if not bbox_norm:
log.error('no face detected')
return
else:
@@ -96,7 +89,7 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_gpu, opt_gif_frames,
landmark_detector_3d_68 = face_landmarks.FaceAlignment3D_68(gpu=opt_gpu) # -1 for CPU
log.info('generating 3D landmarks...')
st = time.time()
- points_3d_68 = landmark_detector_3d_68.landmarks(im_resized, bbox_dim.to_xyxy())
+ points_3d_68 = landmark_detector_3d_68.landmarks(im_resized, bbox_norm)
log.info(f'generated 3D landmarks in {(time.time() - st):.2f}s')
log.info('')
@@ -119,19 +112,6 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_gpu, opt_gif_frames,
# ----------------------------------------------------------------------------
- # generate face vectors, only to test if feature extraction works
-
- log.info('initialize face recognition model...')
- from app.processors import face_recognition
- face_rec = face_recognition.RecognitionDLIB()
- st = time.time()
- log.info('generating face vector...')
- vec = face_rec.vec(im_resized, bbox_dim)
- log.info(f'generated face vector in {(time.time() - st):.2f}s')
- log.info('')
-
-
- # ----------------------------------------------------------------------------
# generate 68 point landmarks using dlib
log.info('initializing face landmarks 68 dlib...')
@@ -139,54 +119,25 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_gpu, opt_gif_frames,
landmark_detector_2d_68 = face_landmarks.Dlib2D_68()
log.info('generating 2D 68PT landmarks...')
st = time.time()
- points_2d_68 = landmark_detector_2d_68.landmarks(im_resized, bbox_dim)
+ points_2d_68 = landmark_detector_2d_68.landmarks(im_resized, bbox_norm)
log.info(f'generated 2D 68PT face landmarks in {(time.time() - st):.2f}s')
log.info('')
- # ----------------------------------------------------------------------------
- # generate pose from 68 point 2D landmarks
-
- if opt_run_pose:
- log.info('initialize pose...')
- from app.processors import face_pose
- pose_detector = face_pose.FacePoseDLIB()
- log.info('generating pose...')
- st = time.time()
- pose_data = pose_detector.pose(points_2d_68, dim)
- log.info(f'generated pose {(time.time() - st):.2f}s')
- log.info('')
-
-
- # x
-
-
-
# display
if opt_display:
- # draw bbox
-
- # draw 3d landmarks
- im_landmarks_3d_68 = im_resized.copy()
- draw_utils.draw_landmarks3D(im_landmarks_3d_68, points_3d_68)
- draw_utils.draw_bbox(im_landmarks_3d_68, bbox_dim)
-
- # draw 2d landmarks
- im_landmarks_2d_68 = im_resized.copy()
- draw_utils.draw_landmarks2D(im_landmarks_2d_68, points_2d_68)
- draw_utils.draw_bbox(im_landmarks_2d_68, bbox_dim)
-
- # draw pose
- if opt_run_pose:
- im_pose = im_resized.copy()
- draw_utils.draw_pose(im_pose, pose_data['point_nose'], pose_data['points'])
- draw_utils.draw_degrees(im_pose, pose_data)
+ # draw landmarks
+ im_lmarks = im_resized.copy()
+ im_lmarks = draw_utils.draw_bbox(im_lmarks, bbox_norm)
+ im_lmarks = draw_utils.draw_landmarks2D(im_lmarks, points_2d_68, radius=1, color=(0,0,255))
+ im_lmarks = draw_utils.draw_landmarks3D(im_lmarks, points_3d_68, radius=3, color=(0,255,0))
# draw animated GIF
- im = Image.open(opt_fp_out)
+ im = Image.open(fp_out)
im_frames = []
duration = im.info['duration']
+
try:
while True:
im.seek(len(im_frames))
@@ -204,10 +155,7 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_gpu, opt_gif_frames,
while True:
# show all images here
- cv.imshow('Original', im_resized)
- cv.imshow('2D 68PT Landmarks', im_landmarks_2d_68)
- cv.imshow('3D 68PT Landmarks', im_landmarks_3d_68)
- cv.imshow('Pose', im_pose)
+ cv.imshow('2D/3D 68PT Landmarks', im_lmarks)
cv.imshow('3D 68pt GIF', im_frames[frame_number])
frame_number = (frame_number + 1) % n_frames
k = cv.waitKey(duration) & 0xFF
diff --git a/megapixels/commands/demo/face_pose.py b/megapixels/commands/demo/face_pose.py
index 3918adac..48214e0d 100644
--- a/megapixels/commands/demo/face_pose.py
+++ b/megapixels/commands/demo/face_pose.py
@@ -22,7 +22,7 @@ from app.settings import app_cfg as cfg
help='GPU index')
@click.option('-f', '--force', 'opt_force', is_flag=True,
help='Force overwrite file')
-@click.option('--display/--no-display', 'opt_display', is_flag=True, default=False,
+@click.option('--display/--no-display', 'opt_display', is_flag=True, default=True,
help='Display detections to debug')
@click.pass_context
def cli(ctx, opt_fp_in, opt_fp_out, opt_gpu, opt_size, opt_force, opt_display):
@@ -61,12 +61,12 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_gpu, opt_size, opt_force, opt_display):
# ----------------------------------------------------------------------------
# detect face
- face_detector = face_detector.DetectorDLIBCNN(gpu=opt_gpu) # -1 for CPU
+ face_detector = face_detector.DetectorCVDNN()
bboxes = face_detector.detect(im_resized, largest=True)
- bbox = bboxes[0]
+ bbox_norm = bboxes[0]
dim = im_resized.shape[:2][::-1]
- bbox_dim = bbox.to_dim(dim)
- if not bbox:
+ bbox_dim = bbox_norm.to_dim(dim)
+ if not bbox_norm:
log.error('no face detected')
return
@@ -76,7 +76,7 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_gpu, opt_size, opt_force, opt_display):
from app.processors import face_landmarks
landmark_detector_2d_68 = face_landmarks.Dlib2D_68()
- points_2d_68 = landmark_detector_2d_68.landmarks(im_resized, bbox_dim)
+ points_2d_68 = landmark_detector_2d_68.landmarks(im_resized, bbox_norm)
# ----------------------------------------------------------------------------
@@ -97,14 +97,14 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_gpu, opt_size, opt_force, opt_display):
# draw
# draw 2d landmarks
- im_landmarks_2d_68 = im_resized.copy()
- draw_utils.draw_landmarks2D(im_landmarks_2d_68, points_2d_68)
- draw_utils.draw_bbox(im_landmarks_2d_68, bbox_dim)
+ im_landmarks = im_resized.copy()
+ im_landmarks = draw_utils.draw_landmarks2D(im_landmarks, points_2d_68)
+ im_landmarks = draw_utils.draw_bbox(im_landmarks, bbox_norm)
# draw pose
im_pose = im_resized.copy()
- draw_utils.draw_pose(im_pose, pose_data['point_nose'], pose_data['points'])
- draw_utils.draw_degrees(im_pose, pose_data)
+ im_pose = draw_utils.draw_pose(im_pose, pose_data['point_nose'], pose_data['points'])
+ im_pose = draw_utils.draw_degrees(im_pose, pose_data)
# ----------------------------------------------------------------------------
@@ -120,9 +120,8 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_gpu, opt_size, opt_force, opt_display):
if opt_display:
-
# show all images here
cv.imshow('Original', im_resized)
- cv.imshow('2D 68PT Landmarks', im_landmarks_2d_68)
+ cv.imshow('2D 68PT Landmarks', im_landmarks)
cv.imshow('Pose', im_pose)
display_utils.handle_keyboard() \ No newline at end of file
diff --git a/megapixels/commands/demo/face_search.py b/megapixels/commands/demo/face_search.py
index ca0b8016..d50f5c73 100644
--- a/megapixels/commands/demo/face_search.py
+++ b/megapixels/commands/demo/face_search.py
@@ -13,7 +13,7 @@ log = Logger.getLogger()
help='File to lookup')
@click.option('--data_store', 'opt_data_store',
type=cfg.DataStoreVar,
- default=click_utils.get_default(types.DataStore.SSD),
+ default=click_utils.get_default(types.DataStore.HDD),
show_default=True,
help=click_utils.show_help(types.DataStore))
@click.option('--dataset', 'opt_dataset',
@@ -40,44 +40,47 @@ def cli(ctx, opt_fp_in, opt_data_store, opt_dataset, opt_results, opt_gpu):
from tqdm import tqdm
import imutils
- from app.utils import file_utils, im_utils
+ from app.utils import file_utils, im_utils, display_utils, draw_utils
from app.models.data_store import DataStore
from app.processors import face_detector
- from app.processors import face_recognition
+ from app.processors import face_extractor
log = Logger.getLogger()
+
# init dataset
dataset = Dataset(opt_data_store, opt_dataset)
dataset.load_face_vectors()
dataset.load_records()
- dataset.load_identities()
+ # dataset.load_identities()
# init face detection
- detector = face_detector.DetectorDLIBHOG()
+ detector = face_detector.DetectorCVDNN()
- # init face recognition
- recognition = face_recognition.RecognitionDLIB(gpu=opt_gpu)
+ # init face extractor
+ extractor = face_extractor.ExtractorVGG()
# load query image
im_query = cv.imread(opt_fp_in)
# get detection as BBox object
bboxes = detector.detect(im_query, largest=True)
- bbox = bboxes[0]
+ bbox_norm = bboxes[0]
dim = im_query.shape[:2][::-1]
- bbox = bbox.to_dim(dim) # convert back to real dimensions
+ bbox_dim = bbox_norm.to_dim(dim) # convert back to real dimensions
- if not bbox:
+ if not bbox_norm:
log.error('No face detected. Exiting')
return
# extract the face vectors
- vec_query = recognition.vec(im_query, bbox)
+ vec_query = extractor.extract(im_query, bbox_norm)
+ log.debug(f'len query: {len(vec_query)}')
# find matches
image_records = dataset.find_matches(vec_query, n_results=opt_results)
# summary
+ im_query = draw_utils.draw_bbox(im_query, bbox_norm, stroke_weight=8)
ims_match = [im_query]
for image_record in image_records:
image_record.summarize()
@@ -85,16 +88,11 @@ def cli(ctx, opt_fp_in, opt_data_store, opt_dataset, opt_results, opt_gpu):
im_match = cv.imread(image_record.filepath)
ims_match.append(im_match)
+ # make montages of most similar faces
montages = imutils.build_montages(ims_match, (256, 256), (3,2))
+ # display
for i, montage in enumerate(montages):
- cv.imshow(f'{i}', montage)
- # cv gui
- while True:
- k = cv.waitKey(1) & 0xFF
- if k == 27 or k == ord('q'): # ESC
- cv.destroyAllWindows()
- sys.exit()
- elif k != 255:
- # any key to continue
- break
+ cv.imshow(f'{opt_dataset.name.upper()}: page {i}', montage)
+
+ display_utils.handle_keyboard()
diff --git a/megapixels/commands/demo/face_vector.py b/megapixels/commands/demo/face_vector.py
index 3ff68001..c7b5ef2e 100644
--- a/megapixels/commands/demo/face_vector.py
+++ b/megapixels/commands/demo/face_vector.py
@@ -1,9 +1,8 @@
"""
-Crop images to prepare for training
+Tests if the feature vector generator works
"""
import click
-# from PIL import Image, ImageOps, ImageFilter, ImageDraw
from app.settings import types
from app.utils import click_utils
@@ -14,11 +13,11 @@ from app.settings import app_cfg as cfg
@click.option('-i', '--input', 'opt_fp_in', default=None, required=True,
help='Image filepath')
@click.option('--size', 'opt_size',
- type=(int, int), default=(300, 300),
+ type=(int, int), default=cfg.DEFAULT_SIZE_FACE_DETECT,
help='Output image size')
@click.option('-g', '--gpu', 'opt_gpu', default=0,
help='GPU index')
-@click.option('--display/--no-display', 'opt_display', is_flag=True, default=False,
+@click.option('--display/--no-display', 'opt_display', is_flag=True, default=True,
help='Display detections to debug')
@click.pass_context
def cli(ctx, opt_fp_in, opt_gpu, opt_size, opt_display):
@@ -54,12 +53,12 @@ def cli(ctx, opt_fp_in, opt_gpu, opt_size, opt_display):
# ----------------------------------------------------------------------------
# detect face
- face_detector = face_detector.DetectorDLIBCNN(gpu=opt_gpu) # -1 for CPU
+ face_detector = face_detector.DetectorCVDNN() # -1 for CPU
bboxes = face_detector.detect(im_resized, largest=True)
- bbox = bboxes[0]
+ bbox_norm = bboxes[0]
dim = im_resized.shape[:2][::-1]
- bbox_dim = bbox.to_dim(dim)
- if not bbox:
+ bbox_dim = bbox_norm.to_dim(dim)
+ if not bbox_norm:
log.error('no face detected')
return
@@ -67,14 +66,13 @@ def cli(ctx, opt_fp_in, opt_gpu, opt_size, opt_display):
# ----------------------------------------------------------------------------
# generate face vectors, only to test if feature extraction works
- from app.processors import face_recognition
- facerec = face_recognition.RecognitionDLIB()
- vec = facerec.vec(im_resized, bbox_dim)
- vec_flat = facerec.flatten(vec)
- log.info(f'generated vector. showing vec[0:10]:')
- log.info(f'\n{vec_flat}')
+ from app.processors import face_extractor
+ extractor = face_extractor.ExtractorVGG()
+ vec = extractor.extract(im_resized, bbox_norm)
+ vec_str = extractor.to_str(vec)
+ log.info(f'\n{vec_str}')
if opt_display:
- draw_utils.draw_bbox(im_resized, bbox_dim)
+ im_resized = draw_utils.draw_bbox(im_resized, bbox_dim)
cv.imshow('Original', im_resized)
display_utils.handle_keyboard() \ No newline at end of file