summaryrefslogtreecommitdiff
path: root/megapixels/commands
diff options
context:
space:
mode:
Diffstat (limited to 'megapixels/commands')
-rw-r--r--megapixels/commands/cv/face_attributes.py136
-rw-r--r--megapixels/commands/cv/face_landmark_2d_68.py4
-rw-r--r--megapixels/commands/cv/face_pose.py11
-rw-r--r--megapixels/commands/cv/face_roi.py67
-rw-r--r--megapixels/commands/cv/face_vector.py46
-rw-r--r--megapixels/commands/cv/resize.py13
-rw-r--r--megapixels/commands/cv/resize_dataset.py149
-rw-r--r--megapixels/commands/datasets/file_record.py40
-rw-r--r--megapixels/commands/demo/face_3ddfa.py85
-rw-r--r--megapixels/commands/demo/face_age_gender.py31
-rw-r--r--megapixels/commands/demo/face_beauty.py12
-rw-r--r--megapixels/commands/demo/face_detect.py (renamed from megapixels/commands/demo/face_detection.py)57
-rw-r--r--megapixels/commands/demo/face_landmarks_2d.py155
-rw-r--r--megapixels/commands/demo/face_landmarks_3d.py82
-rw-r--r--megapixels/commands/demo/face_pose.py25
-rw-r--r--megapixels/commands/demo/face_search.py49
-rw-r--r--megapixels/commands/demo/face_vector.py28
17 files changed, 559 insertions, 431 deletions
diff --git a/megapixels/commands/cv/face_attributes.py b/megapixels/commands/cv/face_attributes.py
new file mode 100644
index 00000000..01fe3bd1
--- /dev/null
+++ b/megapixels/commands/cv/face_attributes.py
@@ -0,0 +1,136 @@
+"""
+
+"""
+
+import click
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+
+
+@click.command()
+@click.option('-i', '--input', 'opt_fp_in', default=None,
+ help='Override enum input filename CSV')
+@click.option('-o', '--output', 'opt_fp_out', default=None,
+ help='Override enum output filename CSV')
+@click.option('-m', '--media', 'opt_dir_media', default=None,
+ help='Override enum media directory')
+@click.option('--store', 'opt_data_store',
+ type=cfg.DataStoreVar,
+ default=click_utils.get_default(types.DataStore.HDD),
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--dataset', 'opt_dataset',
+ type=cfg.DatasetVar,
+ required=True,
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--size', 'opt_size',
+ type=(int, int), default=cfg.DEFAULT_SIZE_FACE_DETECT,
+ help='Processing size for detection')
+@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None),
+ help='Slice list of files')
+@click.option('-f', '--force', 'opt_force', is_flag=True,
+ help='Force overwrite file')
+@click.option('-d', '--display', 'opt_display', is_flag=True,
+ help='Display image for debugging')
+@click.pass_context
+def cli(ctx, opt_fp_in, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset,
+ opt_size, opt_slice, opt_force, opt_display):
+ """Creates 2D 68-point landmarks"""
+
+ import sys
+ import os
+ from os.path import join
+ from pathlib import Path
+ from glob import glob
+
+ from tqdm import tqdm
+ import numpy as np
+ import cv2 as cv
+ import pandas as pd
+
+ from app.utils import logger_utils, file_utils, im_utils, display_utils, draw_utils
+ from app.processors import face_age_gender
+ from app.models.data_store import DataStore
+ from app.models.bbox import BBox
+
+ # -------------------------------------------------------------------------
+ # init here
+
+ log = logger_utils.Logger.getLogger()
+ # init face processors
+ age_estimator_apnt = face_age_gender.FaceAgeApparent()
+ age_estimator_real = face_age_gender.FaceAgeReal()
+ gender_estimator = face_age_gender.FaceGender()
+
+ # init filepaths
+ data_store = DataStore(opt_data_store, opt_dataset)
+ # set file output path
+ metadata_type = types.Metadata.FACE_ATTRIBUTES
+ fp_out = data_store.metadata(metadata_type) if opt_fp_out is None else opt_fp_out
+ if not opt_force and Path(fp_out).exists():
+ log.error('File exists. Use "-f / --force" to overwite')
+ return
+
+ # -------------------------------------------------------------------------
+ # load filepath data
+ fp_record = data_store.metadata(types.Metadata.FILE_RECORD)
+ df_record = pd.read_csv(fp_record, dtype=cfg.FILE_RECORD_DTYPES).set_index('index')
+ # load ROI data
+ fp_roi = data_store.metadata(types.Metadata.FACE_ROI)
+ df_roi = pd.read_csv(fp_roi).set_index('index')
+ # slice if you want
+ if opt_slice:
+ df_roi = df_roi[opt_slice[0]:opt_slice[1]]
+ # group by image index (speedup if multiple faces per image)
+ df_img_groups = df_roi.groupby('record_index')
+ log.debug('processing {:,} groups'.format(len(df_img_groups)))
+
+ # store landmarks in list
+ results = []
+
+ # -------------------------------------------------------------------------
+ # iterate groups with file/record index as key
+
+ for record_index, df_img_group in tqdm(df_img_groups):
+
+ # access file_record DataSeries
+ file_record = df_record.iloc[record_index]
+
+ # load image
+ fp_im = data_store.face(file_record.subdir, file_record.fn, file_record.ext)
+ im = cv.imread(fp_im)
+ im_resized = im_utils.resize(im, width=opt_size[0], height=opt_size[1])
+ dim = im_resized.shape[:2][::-1]
+
+ # iterate ROIs in this image
+ for roi_index, df_img in df_img_group.iterrows():
+
+ # find landmarks
+ bbox_norm = BBox.from_xywh(df_img.x, df_img.y, df_img.w, df_img.h)
+ bbox_dim = bbox_norm.to_dim(dim)
+
+ age_apnt = age_estimator_apnt.predict(im_resized, bbox_norm)
+ age_real = age_estimator_real.predict(im_resized, bbox_norm)
+ gender = gender_estimator.predict(im_resized, bbox_norm)
+
+ attr_obj = {
+ 'age_real':float(f'{age_real:.2f}'),
+ 'age_apparent': float(f'{age_apnt:.2f}'),
+ 'm': float(f'{gender["m"]:.4f}'),
+ 'f': float(f'{gender["f"]:.4f}'),
+ 'roi_index': roi_index
+ }
+ results.append(attr_obj)
+
+
+ # create DataFrame and save to CSV
+ file_utils.mkdirs(fp_out)
+ df = pd.DataFrame.from_dict(results)
+ df.index.name = 'index'
+ df.to_csv(fp_out)
+
+ # save script
+ file_utils.write_text(' '.join(sys.argv), '{}.sh'.format(fp_out)) \ No newline at end of file
diff --git a/megapixels/commands/cv/face_landmark_2d_68.py b/megapixels/commands/cv/face_landmark_2d_68.py
index e24d4b60..c6978a40 100644
--- a/megapixels/commands/cv/face_landmark_2d_68.py
+++ b/megapixels/commands/cv/face_landmark_2d_68.py
@@ -126,7 +126,7 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset,
bbox = BBox.from_xywh(x, y, w, h).to_dim(dim)
points = landmark_detector.landmarks(im_resized, bbox)
points_norm = landmark_detector.normalize(points, dim)
- points_flat = landmark_detector.flatten(points_norm)
+ points_str = landmark_detector.to_str(points_norm)
# display if optioned
if opt_display:
@@ -137,7 +137,7 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset,
display_utils.handle_keyboard()
# add to results for CSV
- results.append(points_flat)
+ results.append({'vec': points_str, 'roi_index':roi_index})
# create DataFrame and save to CSV
diff --git a/megapixels/commands/cv/face_pose.py b/megapixels/commands/cv/face_pose.py
index 70ea1f30..cb7ec56c 100644
--- a/megapixels/commands/cv/face_pose.py
+++ b/megapixels/commands/cv/face_pose.py
@@ -92,7 +92,7 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset,
# load data
fp_record = data_store.metadata(types.Metadata.FILE_RECORD)
- df_record = pd.read_csv(fp_record).set_index('index')
+ df_record = pd.read_csv(fp_record, dtype=cfg.FILE_RECORD_DTYPES).set_index('index')
# load ROI data
fp_roi = data_store.metadata(types.Metadata.FACE_ROI)
df_roi = pd.read_csv(fp_roi).set_index('index')
@@ -125,10 +125,11 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset,
x, y, w, h = df_img.x, df_img.y, df_img.w, df_img.h
#dim = (file_record.width, file_record.height)
dim = im_resized.shape[:2][::-1]
- bbox = BBox.from_xywh(x, y, w, h).to_dim(dim)
+ bbox_norm = BBox.from_xywh(x, y, w, h)
+ bbox_dim = bbox_norm.to_dim(dim)
# get pose
- landmarks = face_landmarks.landmarks(im_resized, bbox)
+ landmarks = face_landmarks.landmarks(im_resized, bbox_norm)
pose_data = face_pose.pose(landmarks, dim)
#pose_degrees = pose_data['degrees'] # only keep the degrees data
#pose_degrees['points_nose'] = pose_data
@@ -143,8 +144,8 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset,
# add image index and append to result CSV data
pose_data['roi_index'] = roi_index
for k, v in pose_data['points'].items():
- pose_data[f'point_{k}_x'] = v[0][0] / dim[0]
- pose_data[f'point_{k}_y'] = v[0][1] / dim[1]
+ pose_data[f'point_{k}_x'] = v[0] / dim[0]
+ pose_data[f'point_{k}_y'] = v[1] / dim[1]
# rearrange data structure for DataFrame
pose_data.pop('points')
diff --git a/megapixels/commands/cv/face_roi.py b/megapixels/commands/cv/face_roi.py
index 70fff401..e83b0f61 100644
--- a/megapixels/commands/cv/face_roi.py
+++ b/megapixels/commands/cv/face_roi.py
@@ -33,7 +33,7 @@ color_filters = {'color': 1, 'gray': 2, 'all': 3}
help='Output image size')
@click.option('-d', '--detector', 'opt_detector_type',
type=cfg.FaceDetectNetVar,
- default=click_utils.get_default(types.FaceDetectNet.DLIB_CNN),
+ default=click_utils.get_default(types.FaceDetectNet.CVDNN),
help=click_utils.show_help(types.FaceDetectNet))
@click.option('-g', '--gpu', 'opt_gpu', default=0,
help='GPU index')
@@ -97,31 +97,37 @@ def cli(ctx, opt_fp_in, opt_dir_media, opt_fp_out, opt_data_store, opt_dataset,
detector = face_detector.DetectorDLIBCNN(gpu=opt_gpu)
elif opt_detector_type == types.FaceDetectNet.DLIB_HOG:
detector = face_detector.DetectorDLIBHOG()
- elif opt_detector_type == types.FaceDetectNet.MTCNN:
- detector = face_detector.DetectorMTCNN(gpu=opt_gpu)
+ elif opt_detector_type == types.FaceDetectNet.MTCNN_TF:
+ detector = face_detector.DetectorMTCNN_TF(gpu=opt_gpu)
elif opt_detector_type == types.FaceDetectNet.HAAR:
log.error('{} not yet implemented'.format(opt_detector_type.name))
return
# get list of files to process
- fp_in = data_store.metadata(types.Metadata.FILE_RECORD) if opt_fp_in is None else opt_fp_in
- df_records = pd.read_csv(fp_in).set_index('index')
+ fp_record = data_store.metadata(types.Metadata.FILE_RECORD) if opt_fp_in is None else opt_fp_in
+ df_record = pd.read_csv(fp_record, dtype=cfg.FILE_RECORD_DTYPES).set_index('index')
if opt_slice:
- df_records = df_records[opt_slice[0]:opt_slice[1]]
- log.debug('processing {:,} files'.format(len(df_records)))
+ df_record = df_record[opt_slice[0]:opt_slice[1]]
+ log.debug('processing {:,} files'.format(len(df_record)))
# filter out grayscale
color_filter = color_filters[opt_color_filter]
# set largest flag, to keep all or only largest
- opt_largest = opt_largest == 'largest'
+ opt_largest = (opt_largest == 'largest')
data = []
+ skipped_files = []
+ processed_files = []
- for df_record in tqdm(df_records.itertuples(), total=len(df_records)):
+ for df_record in tqdm(df_record.itertuples(), total=len(df_record)):
fp_im = data_store.face(str(df_record.subdir), str(df_record.fn), str(df_record.ext))
- im = cv.imread(fp_im)
- im_resized = im_utils.resize(im, width=opt_size[0], height=opt_size[1])
+ try:
+ im = cv.imread(fp_im)
+ im_resized = im_utils.resize(im, width=opt_size[0], height=opt_size[1])
+ except Exception as e:
+ log.debug(f'could not read: {fp_im}')
+ return
# filter out color or grayscale iamges
if color_filter != color_filters['all']:
try:
@@ -134,31 +140,38 @@ def cli(ctx, opt_fp_in, opt_dir_media, opt_fp_out, opt_data_store, opt_dataset,
continue
try:
- bboxes = detector.detect(im_resized, pyramids=opt_pyramids, largest=opt_largest,
+ bboxes_norm = detector.detect(im_resized, pyramids=opt_pyramids, largest=opt_largest,
zone=opt_zone, conf_thresh=opt_conf_thresh)
except Exception as e:
log.error('could not detect: {}'.format(fp_im))
log.error('{}'.format(e))
continue
- for bbox in bboxes:
- roi = {
- 'record_index': int(df_record.Index),
- 'x': bbox.x,
- 'y': bbox.y,
- 'w': bbox.w,
- 'h': bbox.h
- }
- data.append(roi)
- if len(bboxes) == 0:
+ if len(bboxes_norm) == 0:
+ skipped_files.append(fp_im)
log.warn(f'no faces in: {fp_im}')
-
+ log.warn(f'skipped: {len(skipped_files)}. found:{len(processed_files)} files')
+ else:
+ processed_files.append(fp_im)
+ for bbox in bboxes_norm:
+ roi = {
+ 'record_index': int(df_record.Index),
+ 'x': bbox.x,
+ 'y': bbox.y,
+ 'w': bbox.w,
+ 'h': bbox.h
+ }
+ data.append(roi)
+
# if display optined
- if opt_display and len(bboxes):
+ if opt_display and len(bboxes_norm):
# draw each box
- for bbox in bboxes:
- bbox_dim = bbox.to_dim(im_resized.shape[:2][::-1])
- draw_utils.draw_bbox(im_resized, bbox_dim)
+ for bbox_norm in bboxes_norm:
+ dim = im_resized.shape[:2][::-1]
+ bbox_dim = bbox.to_dim(dim)
+ if dim[0] > 1000:
+ im_resized = im_utils.resize(im_resized, width=1000)
+ im_resized = draw_utils.draw_bbox(im_resized, bbox_norm)
# display and wait
cv.imshow('', im_resized)
diff --git a/megapixels/commands/cv/face_vector.py b/megapixels/commands/cv/face_vector.py
index 4df647f5..cb155d08 100644
--- a/megapixels/commands/cv/face_vector.py
+++ b/megapixels/commands/cv/face_vector.py
@@ -1,5 +1,8 @@
"""
Converts ROIs to face vector
+NB: the VGG Face2 extractor should be used with MTCNN ROIs (not square)
+ the DLIB face extractor should be used with DLIB ROIs (square)
+see https://github.com/ox-vgg/vgg_face2 for TAR@FAR
"""
import click
@@ -24,12 +27,16 @@ from app.settings import app_cfg as cfg
show_default=True,
help=click_utils.show_help(types.Dataset))
@click.option('--size', 'opt_size',
- type=(int, int), default=(300, 300),
+ type=(int, int), default=cfg.DEFAULT_SIZE_FACE_DETECT,
help='Output image size')
+@click.option('-e', '--extractor', 'opt_extractor',
+ default=click_utils.get_default(types.FaceExtractor.VGG),
+ type=cfg.FaceExtractorVar,
+ help='Type of extractor framework/network to use')
@click.option('-j', '--jitters', 'opt_jitters', default=cfg.DLIB_FACEREC_JITTERS,
- help='Number of jitters')
-@click.option('-p', '--padding', 'opt_padding', default=cfg.DLIB_FACEREC_PADDING,
- help='Percentage padding')
+ help='Number of jitters (only for dlib')
+@click.option('-p', '--padding', 'opt_padding', default=cfg.FACEREC_PADDING,
+ help='Percentage ROI padding')
@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None),
help='Slice list of files')
@click.option('-f', '--force', 'opt_force', is_flag=True,
@@ -38,7 +45,7 @@ from app.settings import app_cfg as cfg
help='GPU index')
@click.pass_context
def cli(ctx, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset, opt_size,
- opt_slice, opt_force, opt_gpu, opt_jitters, opt_padding):
+ opt_extractor, opt_slice, opt_force, opt_gpu, opt_jitters, opt_padding):
"""Converts face ROIs to vectors"""
import sys
@@ -56,7 +63,7 @@ def cli(ctx, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset, opt_size,
from app.models.bbox import BBox
from app.models.data_store import DataStore
from app.utils import logger_utils, file_utils, im_utils
- from app.processors import face_recognition
+ from app.processors import face_extractor
# -------------------------------------------------
@@ -73,11 +80,15 @@ def cli(ctx, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset, opt_size,
return
# init face processors
- facerec = face_recognition.RecognitionDLIB()
+ if opt_extractor == types.FaceExtractor.DLIB:
+ log.debug('set dlib')
+ extractor = face_extractor.ExtractorDLIB(gpu=opt_gpu, jitters=opt_jitters)
+ elif opt_extractor == types.FaceExtractor.VGG:
+ extractor = face_extractor.ExtractorVGG()
# load data
fp_record = data_store.metadata(types.Metadata.FILE_RECORD)
- df_record = pd.read_csv(fp_record).set_index('index')
+ df_record = pd.read_csv(fp_record, dtype=cfg.FILE_RECORD_DTYPES).set_index('index')
fp_roi = data_store.metadata(types.Metadata.FACE_ROI)
df_roi = pd.read_csv(fp_roi).set_index('index')
@@ -85,7 +96,8 @@ def cli(ctx, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset, opt_size,
df_roi = df_roi[opt_slice[0]:opt_slice[1]]
# -------------------------------------------------
- # process here
+ # process images
+
df_img_groups = df_roi.groupby('record_index')
log.debug('processing {:,} groups'.format(len(df_img_groups)))
@@ -95,21 +107,21 @@ def cli(ctx, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset, opt_size,
ds_record = df_record.iloc[record_index]
fp_im = data_store.face(ds_record.subdir, ds_record.fn, ds_record.ext)
im = cv.imread(fp_im)
+ im = im_utils.resize(im, width=opt_size[0], height=opt_size[1])
for roi_index, df_img in df_img_group.iterrows():
# get bbox
x, y, w, h = df_img.x, df_img.y, df_img.w, df_img.h
dim = (ds_record.width, ds_record.height)
- #dim = im.shape[:2][::-1]
# get face vector
- bbox_dim = BBox.from_xywh(x, y, w, h).to_dim(dim) # convert to int real dimensions
+ bbox = BBox.from_xywh(x, y, w, h) # norm
# compute vec
- # padding=opt_padding not yet implemented in dlib===19.16 but merged in master
- vec = facerec.vec(im, bbox_dim, jitters=opt_jitters)
- vec_flat = facerec.flatten(vec)
- vec_flat['roi_index'] = roi_index
- vec_flat['record_index'] = record_index
- vecs.append(vec_flat)
+ vec = extractor.extract(im, bbox) # use normalized BBox
+ vec_str = extractor.to_str(vec)
+ vec_obj = {'vec':vec_str, 'roi_index': roi_index, 'record_index':record_index}
+ vecs.append(vec_obj)
+ # -------------------------------------------------
+ # save data
# create DataFrame and save to CSV
df = pd.DataFrame.from_dict(vecs)
diff --git a/megapixels/commands/cv/resize.py b/megapixels/commands/cv/resize.py
index dcd621b3..7409ee6f 100644
--- a/megapixels/commands/cv/resize.py
+++ b/megapixels/commands/cv/resize.py
@@ -49,7 +49,7 @@ centerings = {
help='File glob ext')
@click.option('--size', 'opt_size',
type=(int, int), default=(256, 256),
- help='Output image size (square)')
+ help='Max output size')
@click.option('--method', 'opt_scale_method',
type=click.Choice(methods.keys()),
default='lanczos',
@@ -88,7 +88,7 @@ def cli(ctx, opt_dir_in, opt_dir_out, opt_glob_ext, opt_size, opt_scale_method,
# -------------------------------------------------
# process here
- def pool_resize(fp_im, opt_size, scale_method, centering):
+ def pool_resize(fp_im, opt_size, scale_method):
# Threaded image resize function
try:
pbar.update(1)
@@ -100,7 +100,7 @@ def cli(ctx, opt_dir_in, opt_dir_out, opt_glob_ext, opt_size, opt_scale_method,
log.error(e)
return False
- im = ImageOps.fit(im, opt_size, method=scale_method, centering=centering)
+ #im = ImageOps.fit(im, opt_size, method=scale_method, centering=centering)
if opt_equalize:
im_np = im_utils.pil2np(im)
@@ -117,8 +117,8 @@ def cli(ctx, opt_dir_in, opt_dir_out, opt_glob_ext, opt_size, opt_scale_method,
except:
return False
- centering = centerings[opt_center]
- scale_method = methods[opt_scale_method]
+ #centering = centerings[opt_center]
+ #scale_method = methods[opt_scale_method]
# get list of files to process
fp_ims = glob(join(opt_dir_in, '*.{}'.format(opt_glob_ext)))
@@ -132,7 +132,8 @@ def cli(ctx, opt_dir_in, opt_dir_out, opt_glob_ext, opt_size, opt_scale_method,
# setup multithreading
pbar = tqdm(total=len(fp_ims))
- pool_resize = partial(pool_resize, opt_size=opt_size, scale_method=scale_method, centering=centering)
+ #pool_resize = partial(pool_resize, opt_size=opt_size, scale_method=scale_method, centering=centering)
+ pool_resize = partial(pool_resize, opt_size=opt_size)
#result_list = pool.map(prod_x, data_list)
pool = ThreadPool(opt_threads)
with tqdm(total=len(fp_ims)) as pbar:
diff --git a/megapixels/commands/cv/resize_dataset.py b/megapixels/commands/cv/resize_dataset.py
new file mode 100644
index 00000000..3a6ec15f
--- /dev/null
+++ b/megapixels/commands/cv/resize_dataset.py
@@ -0,0 +1,149 @@
+"""
+Crop images to prepare for training
+"""
+
+import click
+import cv2 as cv
+from PIL import Image, ImageOps, ImageFilter
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+
+cv_resize_algos = {
+ 'area': cv.INTER_AREA,
+ 'lanco': cv.INTER_LANCZOS4,
+ 'linear': cv.INTER_LINEAR,
+ 'linear_exact': cv.INTER_LINEAR_EXACT,
+ 'nearest': cv.INTER_NEAREST
+}
+"""
+Filter Q-Down Q-Up Speed
+NEAREST ⭐⭐⭐⭐⭐
+BOX ⭐ ⭐⭐⭐⭐
+BILINEAR ⭐ ⭐ ⭐⭐⭐
+HAMMING ⭐⭐ ⭐⭐⭐
+BICUBIC ⭐⭐⭐ ⭐⭐⭐ ⭐⭐
+LANCZOS ⭐⭐⭐⭐ ⭐⭐⭐⭐ ⭐
+"""
+pil_resize_algos = {
+ 'antialias': Image.ANTIALIAS,
+ 'lanczos': Image.LANCZOS,
+ 'bicubic': Image.BICUBIC,
+ 'hamming': Image.HAMMING,
+ 'bileaner': Image.BILINEAR,
+ 'box': Image.BOX,
+ 'nearest': Image.NEAREST
+ }
+
+@click.command()
+@click.option('--dataset', 'opt_dataset',
+ type=cfg.DatasetVar,
+ required=True,
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--store', 'opt_data_store',
+ type=cfg.DataStoreVar,
+ default=click_utils.get_default(types.DataStore.HDD),
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('-o', '--output', 'opt_dir_out', required=True,
+ help='Output directory')
+@click.option('-e', '--ext', 'opt_glob_ext',
+ default='png', type=click.Choice(['jpg', 'png']),
+ help='File glob ext')
+@click.option('--size', 'opt_size',
+ type=(int, int), default=(256, 256),
+ help='Output image size max (w,h)')
+@click.option('--interp', 'opt_interp_algo',
+ type=click.Choice(pil_resize_algos.keys()),
+ default='bicubic',
+ help='Interpolation resizing algorithms')
+@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None),
+ help='Slice the input list')
+@click.option('-t', '--threads', 'opt_threads', default=8,
+ help='Number of threads')
+@click.option('--recursive/--no-recursive', 'opt_recursive', is_flag=True, default=False,
+ help='Use glob recursion (slower)')
+@click.pass_context
+def cli(ctx, opt_dataset, opt_data_store, opt_dir_out, opt_glob_ext, opt_size, opt_interp_algo,
+ opt_slice, opt_threads, opt_recursive):
+ """Resize dataset images"""
+
+ import os
+ from os.path import join
+ from pathlib import Path
+ from glob import glob
+ from tqdm import tqdm
+ from multiprocessing.dummy import Pool as ThreadPool
+ from functools import partial
+ import pandas as pd
+ import numpy as np
+
+ from app.utils import logger_utils, file_utils, im_utils
+ from app.models.data_store import DataStore
+
+ # -------------------------------------------------
+ # init
+
+ log = logger_utils.Logger.getLogger()
+
+
+ # -------------------------------------------------
+ # process here
+
+ def pool_resize(fp_in, dir_in, dir_out, im_size, interp_algo):
+ # Threaded image resize function
+ pbar.update(1)
+ try:
+ im = Image.open(fp_in).convert('RGB')
+ im.verify() # throws error if image is corrupt
+ im.thumbnail(im_size, interp_algo)
+ fp_out = fp_in.replace(dir_in, dir_out)
+ file_utils.mkdirs(fp_out)
+ im.save(fp_out, quality=100)
+ except Exception as e:
+ log.warn(f'Could not open: {fp_in}, Error: {e}')
+ return False
+ return True
+
+
+ data_store = DataStore(opt_data_store, opt_dataset)
+ fp_records = data_store.metadata(types.Metadata.FILE_RECORD)
+ df_records = pd.read_csv(fp_records, dtype=cfg.FILE_RECORD_DTYPES).set_index('index')
+ dir_in = data_store.media_images_original()
+
+ # get list of files to process
+ #fp_ims = file_utils.glob_multi(opt_dir_in, ['jpg', 'png'], recursive=opt_recursive)
+ fp_ims = []
+ for ds_record in df_records.itertuples():
+ fp_im = data_store.face(ds_record.subdir, ds_record.fn, ds_record.ext)
+ fp_ims.append(fp_im)
+
+ if opt_slice:
+ fp_ims = fp_ims[opt_slice[0]:opt_slice[1]]
+ if not fp_ims:
+ log.error('No images. Try with "--recursive"')
+ return
+ log.info(f'processing {len(fp_ims):,} images')
+
+ # algorithm to use for resizing
+ interp_algo = pil_resize_algos[opt_interp_algo]
+ log.info(f'using {interp_algo} for interpoloation')
+
+ # ensure output dir exists
+ file_utils.mkdirs(opt_dir_out)
+
+ # setup multithreading
+ pbar = tqdm(total=len(fp_ims))
+ # fixed arguments for pool function
+ map_pool_resize = partial(pool_resize, dir_in=dir_in, dir_out=opt_dir_out, im_size=opt_size, interp_algo=interp_algo)
+ #result_list = pool.map(prod_x, data_list) # simple
+ pool = ThreadPool(opt_threads)
+ # start multithreading
+ with tqdm(total=len(fp_ims)) as pbar:
+ results = pool.map(map_pool_resize, fp_ims)
+ # end multithreading
+ pbar.close()
+
+ log.info(f'Resized: {results.count(True)} / {len(fp_ims)} images') \ No newline at end of file
diff --git a/megapixels/commands/datasets/file_record.py b/megapixels/commands/datasets/file_record.py
index d3f790d4..41a5df28 100644
--- a/megapixels/commands/datasets/file_record.py
+++ b/megapixels/commands/datasets/file_record.py
@@ -45,9 +45,11 @@ identity_sources = ['subdir', 'numeric']
help='Identity source key')
@click.option('--recursive/--no-recursive', 'opt_recursive', is_flag=True, default=False,
help='Use glob recursion (slower)')
+@click.option('--max-depth', 'opt_max_depth', default=None, type=int,
+ help='Max number of images per subdirectory')
@click.pass_context
def cli(ctx, opt_fp_in, opt_fp_out, opt_dataset, opt_data_store, opt_dir_media, opt_slice, opt_threads,
- opt_identity, opt_force, opt_recursive):
+ opt_identity, opt_force, opt_recursive, opt_max_depth):
"""Generates sha256, uuid, and identity index CSV file"""
import sys, os
@@ -59,6 +61,7 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_dataset, opt_data_store, opt_dir_media,
import random
import uuid
+ from PIL import Image
import cv2 as cv
import pandas as pd
from tqdm import tqdm
@@ -84,6 +87,26 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_dataset, opt_data_store, opt_dir_media,
fp_in = opt_fp_in if opt_fp_in is not None else data_store.media_images_original()
log.info(f'Globbing {fp_in}')
fp_ims = file_utils.glob_multi(fp_in, ['jpg', 'png'], recursive=opt_recursive)
+
+ log.info('Found {:,} images'.format(len(fp_ims)))
+ subdir_groups = {}
+ if opt_max_depth:
+ log.debug(f'using max depth: {opt_max_depth}')
+ for fp_im in fp_ims:
+ fpp_im = Path(fp_im)
+
+ subdir = fp_im.split('/')[-2]
+ if not subdir in subdir_groups.keys():
+ subdir_groups[subdir] = []
+ else:
+ subdir_groups[subdir].append(fp_im)
+ # for each subgroup, limit number of files
+ fp_ims = []
+ for subdir_name, items in subdir_groups.items():
+ ims = items[0:opt_max_depth]
+ fp_ims += ims
+
+ log.debug(f'num subdirs: {len(subdir_groups.keys())}')
# fail if none
if not fp_ims:
log.error('No images. Try with "--recursive"')
@@ -93,7 +116,6 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_dataset, opt_data_store, opt_dir_media,
fp_ims = fp_ims[opt_slice[0]:opt_slice[1]]
log.info('Found {:,} images'.format(len(fp_ims)))
-
# ----------------------------------------------------------------
# multithread process into SHA256
@@ -101,7 +123,14 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_dataset, opt_data_store, opt_dir_media,
def pool_mapper(fp_im):
pbar.update(1)
- sha256 = file_utils.sha256(fp_im)
+ try:
+ sha256 = file_utils.sha256(fp_im)
+ im = Image.open(fp_im)
+ im.verify() # throws error if bad file
+ assert(im.size[0] > 60 and im.size[1] > 60)
+ except Exception as e:
+ log.warn(f'skipping file: {fp_im}')
+ return None
im = cv.imread(fp_im)
w, h = im.shape[:2][::-1]
file_size_kb = os.stat(fp_im).st_size // 1000
@@ -128,10 +157,11 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_dataset, opt_data_store, opt_dir_media,
data = []
indentity_count = 0
for pool_map, fp_im in zip(pool_maps, fp_ims):
+ if pool_map is None:
+ log.warn(f'skipping file: {fp_im}')
+ continue # skip error files
fpp_im = Path(fp_im)
subdir = str(fpp_im.parent.relative_to(fp_in))
- #subdir = '' if subdir is '.' else subdir
- log.debug(subdir)
if opt_identity:
subdirs = subdir.split('/')
diff --git a/megapixels/commands/demo/face_3ddfa.py b/megapixels/commands/demo/face_3ddfa.py
index 6182aeb6..90359159 100644
--- a/megapixels/commands/demo/face_3ddfa.py
+++ b/megapixels/commands/demo/face_3ddfa.py
@@ -1,7 +1,7 @@
'''
Combines 3D face mode + rendering
-https://github.com/cleardusk/3DDFA
-https://github.com/YadiraF/face3d
+https://github.com/cleardusk/3DDFA --> 3d landmarks
+https://github.com/YadiraF/face3d --> render 3D with lighting as 2.5d image
'''
import click
@@ -13,8 +13,8 @@ from app.settings import app_cfg as cfg
@click.command()
@click.option('-i', '--input', 'opt_fp_in', default=None, required=True,
help='Image filepath')
-@click.option('-o', '--output', 'opt_fp_out', default=None,
- help='GIF output path')
+@click.option('-o', '--output', 'opt_dir_out', default=None,
+ help='Directory for output files')
@click.option('--size', 'opt_size',
type=(int, int), default=(300, 300),
help='Output image size')
@@ -27,11 +27,13 @@ from app.settings import app_cfg as cfg
@click.option('--size', 'opt_render_dim',
type=(int, int), default=(512, 512),
help='2.5D render image size')
-@click.option('--display/--no-display', 'opt_display', is_flag=True, default=False,
+@click.option('--display/--no-display', 'opt_display', is_flag=True, default=True,
help='Display detections to debug')
+@click.option('--save/--no-save', 'opt_save', is_flag=True, default=True,
+ help='Save output images/files')
@click.pass_context
-def cli(ctx, opt_fp_in, opt_fp_out, opt_gpu, opt_bbox_init,
- opt_size, opt_render_dim, opt_force, opt_display):
+def cli(ctx, opt_fp_in, opt_dir_out, opt_gpu, opt_bbox_init,
+ opt_size, opt_render_dim, opt_force, opt_display, opt_save):
"""3D face demo"""
import sys
@@ -58,6 +60,7 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_gpu, opt_bbox_init,
import scipy.io as sio
sys.path.append(join(Path.cwd().parent, '3rdparty'))
+ # git clone https://github.com/cleardusk/3DDFA 3rdparty/d3ddfa
# change name of 3DDFA to d3DDFA because can't start with number
from d3DDFA import mobilenet_v1
from d3DDFA.utils.ddfa import ToTensorGjz, NormalizeGjz, str2bool
@@ -70,7 +73,7 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_gpu, opt_bbox_init,
from d3DDFA.utils.render import get_depths_image, cget_depths_image, cpncc
from d3DDFA.utils import paf as d3dfa_paf_utils
- # https://github.com/YadiraF/face3d
+ # git clone https://github.com/YadiraF/face3d 3rdparty/face3d
# compile cython module in face3d/mesh/cython/ python setup.py build_ext -i
from face3d.face3d import mesh as face3d_mesh
@@ -82,13 +85,11 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_gpu, opt_bbox_init,
fpp_in = Path(opt_fp_in)
im = cv.imread(opt_fp_in)
- #im = im_utils.resize(im_orig, width=opt_size[0], height=opt_size[1])
- # im = im_orig.copy()
# ----------------------------------------------------------------------------
# detect face
- face_detector = face_detector.DetectorDLIBCNN(gpu=opt_gpu) # -1 for CPU
+ face_detector = face_detector.DetectorCVDNN() # -1 for CPU
bboxes = face_detector.detect(im, largest=True)
bbox = bboxes[0]
dim = im.shape[:2][::-1]
@@ -165,7 +166,6 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_gpu, opt_bbox_init,
# dense face 3d vertices
vertices = d3dfa_utils.predict_dense(param, roi_box)
vertices_lst.append(vertices)
-
log.info(f'generated 3d data in: {(time.time() - st):.2f}s')
# filepath helper function
@@ -183,28 +183,20 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_gpu, opt_bbox_init,
sio.savemat(fp_mat_3df, {'vertices': vertices, 'colors': colors, 'triangles': triangles})
# save PAF
- #fp_paf = to_fp(fpp_in, 'jpg', suffix='paf')
- #opt_paf_size = 3 # PAF feature kernel size
- #im_paf = d3dfa_paf_utils.gen_img_paf(img_crop=im_crop, param=param, kernel_size=opt_paf_size)
- #cv.imwrite(fp_paf, im_paf)
+ im_paf = d3dfa_paf_utils.gen_img_paf(img_crop=im_crop, param=param, kernel_size=3)
# save pose image
# P, pose = parse_pose(param) # Camera matrix (without scale), and pose (yaw, pitch, roll, to verify)
- img_pose = draw_utils.plot_pose_box(im, Ps, pts_res)
- fp_pose = to_fp(fpp_in, 'jpg', suffix='pose')
- cv.imwrite(fp_pose, img_pose)
+ im_pose = draw_utils.plot_pose_box(im, Ps, pts_res)
# save depth image
- fp_depth = to_fp(fpp_in, 'png', suffix='depth')
# depths_img = get_depths_image(im, vertices_lst, tri-1) # python version
im_depth = cget_depths_image(im, vertices_lst, triangles - 1) # cython version
- cv.imwrite(fp_depth, im_depth)
# save pncc image
- fp_pose = to_fp(fpp_in, 'png', suffix='pncc')
pncc_feature = cpncc(im, vertices_lst, triangles - 1) # cython version
- cv.imwrite(fp_pose, pncc_feature[:, :, ::-1]) # cv.imwrite will swap RGB -> BGR
+ im_pncc = pncc_feature[:, :, ::-1] # swap BGR
# save .ply
#fp_ply = to_fp(fpp_in, 'ply')
@@ -228,8 +220,6 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_gpu, opt_bbox_init,
# save obj
colors = d3dfa_utils.get_colors(im, vertices_orig)
- fp_obj = to_fp(fpp_in, 'obj')
- write_obj_with_colors(fp_obj, vertices_orig, triangles, colors)
#fp_landmarks = to_fp(fpp_in, 'jpg', suffix='3DDFA')
# show_flg?
@@ -276,30 +266,39 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_gpu, opt_bbox_init,
vertices_proj = face3d_mesh.transform.orthographic_project(vertices_cam)
# -------------------------------------------------------------------------
- # render 2D image
+ # render 2D images
w = h = max(opt_render_dim)
vertices_im = face3d_mesh.transform.to_image(vertices_proj, h, w)
- rendering = face3d_mesh.render.render_colors(vertices_im, triangles, colors_lit, h, w)
-
- cv.imshow('', rendering)
- display_utils.handle_keyboard()
+ im_render = face3d_mesh.render.render_colors(vertices_im, triangles, colors_lit, h, w)
+ im_render = (255* im_render).astype(np.uint8)
+ im_pncc = im_pncc.astype(np.uint8)
+ im_depth = im_depth.astype(np.uint8)
+ im_paf = im_paf.astype(np.uint8)
# ----------------------------------------------------------------------------
# save
- if opt_fp_out:
- # save pose only
- fpp_out = Path(opt_fp_out)
+ if opt_save:
+ fpp_out = Path(opt_dir_out) if opt_dir_out is not None else Path(opt_fp_in).parent
+ fpp_in = Path(opt_fp_in)
+ fp_out = join(fpp_out, f'{fpp_in.stem}_render.png')
+ cv.imwrite(fp_out, im_render)
+
+ fp_out = join(fpp_out, f'{fpp_in.stem}_pose.png')
+ cv.imwrite(fp_out, im_pose)
- fp_out = join(fpp_out.parent, f'{fpp_out.stem}_real{fpp_out.suffix}')
- cv.imwrite(fp_out, im_age_real)
+ fp_out = join(fpp_out, f'{fpp_in.stem}_depth.png')
+ cv.imwrite(fp_out, im_depth)
+
+ fp_out = join(fpp_out, f'{fpp_in.stem}_pncc.png')
+ cv.imwrite(fp_out, im_pncc)
- fp_out = join(fpp_out.parent, f'{fpp_out.stem}_apparent{fpp_out.suffix}')
- cv.imwrite(fp_out, im_age_apparent)
+ fp_out = join(fpp_out, f'{fpp_in.stem}_paf.png')
+ cv.imwrite(fp_out, im_paf)
- fp_out = join(fpp_out.parent, f'{fpp_out.stem}_gender{fpp_out.suffix}')
- cv.imwrite(fp_out, im_age_apparent)
+ fp_out = join(fpp_out, f'{fpp_in.stem}.obj')
+ write_obj_with_colors(fp_out, vertices_orig, triangles, colors)
# ----------------------------------------------------------------------------
@@ -307,8 +306,10 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_gpu, opt_bbox_init,
if opt_display:
# show all images here
- cv.imshow('real', im_age_real)
- cv.imshow('apparent', im_age_apparent)
- cv.imshow('gender', im_gender)
+ cv.imshow('3d', im_render)
+ cv.imshow('depth', im_depth)
+ cv.imshow('pncc', im_pncc)
+ cv.imshow('pose', im_pose)
+ cv.imshow('paf', im_paf)
display_utils.handle_keyboard()
diff --git a/megapixels/commands/demo/face_age_gender.py b/megapixels/commands/demo/face_age_gender.py
index c74f1e45..c4f09c13 100644
--- a/megapixels/commands/demo/face_age_gender.py
+++ b/megapixels/commands/demo/face_age_gender.py
@@ -17,7 +17,7 @@ from app.settings import app_cfg as cfg
help='GPU index')
@click.option('-f', '--force', 'opt_force', is_flag=True,
help='Force overwrite file')
-@click.option('--display/--no-display', 'opt_display', is_flag=True, default=False,
+@click.option('--display/--no-display', 'opt_display', is_flag=True, default=True,
help='Display detections to debug')
@click.pass_context
def cli(ctx, opt_fp_in, opt_fp_out, opt_gpu, opt_size, opt_force, opt_display):
@@ -52,12 +52,12 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_gpu, opt_size, opt_force, opt_display):
# ----------------------------------------------------------------------------
# detect face
- face_detector = face_detector.DetectorDLIBCNN(gpu=opt_gpu) # -1 for CPU
+ face_detector = face_detector.DetectorCVDNN()
bboxes = face_detector.detect(im_resized, largest=True)
- bbox = bboxes[0]
+ bbox_norm = bboxes[0]
dim = im_resized.shape[:2][::-1]
- bbox_dim = bbox.to_dim(dim)
- if not bbox:
+ bbox_dim = bbox_norm.to_dim(dim)
+ if not bbox_norm:
log.error('no face detected')
return
else:
@@ -70,21 +70,24 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_gpu, opt_size, opt_force, opt_display):
# real
age_real_predictor = face_age_gender.FaceAgeReal()
st = time.time()
- age_real = age_real_predictor.predict(im_resized, bbox_dim)
+ age_real = age_real_predictor.predict(im_resized, bbox_norm)
log.info(f'age real took: {(time.time()-st)/1000:.5f}s')
# apparent
age_apparent_predictor = face_age_gender.FaceAgeApparent()
st = time.time()
- age_apparent = age_apparent_predictor.predict(im_resized, bbox_dim)
+ age_apparent = age_apparent_predictor.predict(im_resized, bbox_norm)
log.info(f'age apparent took: {(time.time()-st)/1000:.5f}s')
# gender
gender_predictor = face_age_gender.FaceGender()
st = time.time()
- gender = gender_predictor.predict(im_resized, bbox_dim)
+ gender = gender_predictor.predict(im_resized, bbox_norm)
log.info(f'gender took: {(time.time()-st)/1000:.5f}s')
+ # ethnicity
+ # TODO
+
# ----------------------------------------------------------------------------
# output
@@ -99,21 +102,21 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_gpu, opt_size, opt_force, opt_display):
# draw real age
im_age_real = im_resized.copy()
- draw_utils.draw_bbox(im_age_real, bbox_dim)
+ im_age_real = draw_utils.draw_bbox(im_age_real, bbox_norm)
txt = f'{(age_real):.2f}'
- draw_utils.draw_text(im_age_real, bbox_dim.pt_tl, txt)
+ im_age_real = draw_utils.draw_text(im_age_real, bbox_norm.pt_tl, txt)
# apparent age
im_age_apparent = im_resized.copy()
- draw_utils.draw_bbox(im_age_apparent, bbox_dim)
+ im_age_apparent = draw_utils.draw_bbox(im_age_apparent, bbox_norm)
txt = f'{(age_apparent):.2f}'
- draw_utils.draw_text(im_age_apparent, bbox_dim.pt_tl, txt)
+ im_age_apparent = draw_utils.draw_text(im_age_apparent, bbox_norm.pt_tl, txt)
# gender
im_gender = im_resized.copy()
- draw_utils.draw_bbox(im_age_apparent, bbox_dim)
+ im_gender = draw_utils.draw_bbox(im_gender, bbox_norm)
txt = f"M: {gender['m']:.2f}, F: {gender['f']:.2f}"
- draw_utils.draw_text(im_gender, (10, dim[1]-20), txt)
+ im_gender = draw_utils.draw_text(im_gender, (.1, .9), txt)
# ----------------------------------------------------------------------------
diff --git a/megapixels/commands/demo/face_beauty.py b/megapixels/commands/demo/face_beauty.py
index d31c5cee..45643c61 100644
--- a/megapixels/commands/demo/face_beauty.py
+++ b/megapixels/commands/demo/face_beauty.py
@@ -66,10 +66,10 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_gpu, opt_size, opt_force, opt_display):
face_detector = face_detector.DetectorDLIBCNN(gpu=opt_gpu) # -1 for CPU
bboxes = face_detector.detect(im_resized, largest=True)
- bbox = bboxes[0]
+ bbox_norm = bboxes[0]
dim = im_resized.shape[:2][::-1]
- bbox_dim = bbox.to_dim(dim)
- if not bbox:
+ bbox_dim = bbox_norm.to_dim(dim)
+ if not bbox_norm:
log.error('no face detected')
return
else:
@@ -78,7 +78,7 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_gpu, opt_size, opt_force, opt_display):
# ----------------------------------------------------------------------------
# beauty
- beauty_score = beauty_predictor.beauty(im_resized, bbox_dim)
+ beauty_score = beauty_predictor.beauty(im_resized, bbox_norm)
# ----------------------------------------------------------------------------
@@ -93,9 +93,9 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_gpu, opt_size, opt_force, opt_display):
# draw 2d landmarks
im_beauty = im_resized.copy()
- draw_utils.draw_bbox(im_beauty, bbox_dim)
+ im_beauty = draw_utils.draw_bbox(im_beauty, bbox_dim)
txt = f'Beauty score: {(100*beauty_score):.2f}'
- draw_utils.draw_text(im_beauty, bbox_dim.pt_tl, txt)
+ im_beauty = draw_utils.draw_text(im_beauty, bbox_dim.pt_tl, txt)
# ----------------------------------------------------------------------------
diff --git a/megapixels/commands/demo/face_detection.py b/megapixels/commands/demo/face_detect.py
index 488cc80d..b92db7cb 100644
--- a/megapixels/commands/demo/face_detection.py
+++ b/megapixels/commands/demo/face_detect.py
@@ -59,68 +59,27 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_gpu, opt_size, opt_force, opt_display):
# ----------------------------------------------------------------------------
# detect face
- face_detector = face_detector.DetectorDLIBCNN(gpu=opt_gpu) # -1 for CPU
+ face_detector = face_detector.DetectorCVDNN()
bboxes = face_detector.detect(im_resized, largest=True)
- bbox = bboxes[0]
- dim = im_resized.shape[:2][::-1]
- bbox_dim = bbox.to_dim(dim)
- if not bbox:
+ if not bboxes:
log.error('no face detected')
return
-
-
- # ----------------------------------------------------------------------------
- # generate 68 point landmarks using dlib
-
- from app.processors import face_landmarks
- landmark_detector_2d_68 = face_landmarks.Dlib2D_68()
- points_2d_68 = landmark_detector_2d_68.landmarks(im_resized, bbox_dim)
-
-
- # ----------------------------------------------------------------------------
- # generate pose from 68 point 2D landmarks
-
- from app.processors import face_pose
- pose_detector = face_pose.FacePoseDLIB()
- pose_data = pose_detector.pose(points_2d_68, dim)
-
- # ----------------------------------------------------------------------------
- # output
-
- log.info(f'Face coords: {bbox_dim} face')
- log.info(f'pitch: {pose_data["pitch"]}, roll: {pose_data["roll"]}, yaw: {pose_data["yaw"]}')
+ bbox_norm = bboxes[0]
+ dim = im_resized.shape[:2][::-1]
+ bbox_dim = bbox_norm.to_dim(dim)
# ----------------------------------------------------------------------------
# draw
- # draw 2d landmarks
- im_landmarks_2d_68 = im_resized.copy()
- draw_utils.draw_landmarks2D(im_landmarks_2d_68, points_2d_68)
- draw_utils.draw_bbox(im_landmarks_2d_68, bbox_dim)
-
- # draw pose
- im_pose = im_resized.copy()
- draw_utils.draw_pose(im_pose, pose_data['point_nose'], pose_data['points'])
- draw_utils.draw_degrees(im_pose, pose_data)
-
-
- # ----------------------------------------------------------------------------
- # save
-
- if opt_fp_out:
- # save pose only
- cv.imwrite(opt_fp_out, im_pose)
-
+ im_face = im_resized.copy()
+ im_face = draw_utils.draw_bbox(im_face, bbox_norm)
# ----------------------------------------------------------------------------
# display
if opt_display:
-
# show all images here
- cv.imshow('Original', im_resized)
- cv.imshow('2D 68PT Landmarks', im_landmarks_2d_68)
- cv.imshow('Pose', im_pose)
+ cv.imshow('Face', im_face)
display_utils.handle_keyboard() \ No newline at end of file
diff --git a/megapixels/commands/demo/face_landmarks_2d.py b/megapixels/commands/demo/face_landmarks_2d.py
index 22e09297..145a12a6 100644
--- a/megapixels/commands/demo/face_landmarks_2d.py
+++ b/megapixels/commands/demo/face_landmarks_2d.py
@@ -3,7 +3,6 @@ Crop images to prepare for training
"""
import click
-# from PIL import Image, ImageOps, ImageFilter, ImageDraw
from app.settings import types
from app.utils import click_utils
@@ -13,26 +12,14 @@ from app.settings import app_cfg as cfg
@click.command()
@click.option('-i', '--input', 'opt_fp_in', default=None, required=True,
help='Image filepath')
-@click.option('-o', '--output', 'opt_fp_out', default=None,
- help='GIF output path')
@click.option('--size', 'opt_size',
type=(int, int), default=(300, 300),
help='Output image size')
-@click.option('--gif-size', 'opt_gif_size',
- type=(int, int), default=(480, 480),
- help='GIF output size')
-@click.option('--gif-frames', 'opt_gif_frames', default=15,
- help='GIF frames')
-@click.option('-g', '--gpu', 'opt_gpu', default=0,
- help='GPU index')
-@click.option('-f', '--force', 'opt_force', is_flag=True,
- help='Force overwrite file')
-@click.option('--display/--no-display', 'opt_display', is_flag=True, default=False,
+@click.option('--display/--no-display', 'opt_display', is_flag=True, default=True,
help='Display detections to debug')
@click.pass_context
-def cli(ctx, opt_fp_in, opt_fp_out, opt_gpu, opt_gif_frames,
- opt_size, opt_gif_size, opt_force, opt_display):
- """Generates 3D landmark animations from CSV files"""
+def cli(ctx, opt_fp_in, opt_size, opt_display):
+ """2D 68-point landmarks"""
import sys
import os
@@ -52,12 +39,6 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_gpu, opt_gif_frames,
from app.utils import plot_utils
from app.processors import face_detector, face_landmarks
from app.models.data_store import DataStore
-
- # TOOD add selective testing
- opt_run_pose = True
- opt_run_2d_68 = True
- opt_run_3d_68 = True
- opt_run_3d_68 = True
# -------------------------------------------------
@@ -66,7 +47,6 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_gpu, opt_gif_frames,
log = logger_utils.Logger.getLogger()
- # load image
im = cv.imread(opt_fp_in)
im_resized = im_utils.resize(im, width=opt_size[0], height=opt_size[1])
@@ -74,146 +54,41 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_gpu, opt_gif_frames,
# ----------------------------------------------------------------------------
# detect face
- face_detector = face_detector.DetectorDLIBCNN(gpu=opt_gpu) # -1 for CPU
+ face_detector = face_detector.DetectorCVDNN()
log.info('detecting face...')
st = time.time()
bboxes = face_detector.detect(im_resized, largest=True)
- bbox = bboxes[0]
+ bbox_norm = bboxes[0]
dim = im_resized.shape[:2][::-1]
- bbox_dim = bbox.to_dim(dim)
- if not bbox:
+ bbox_dim = bbox_norm.to_dim(dim)
+ if not bbox_norm:
log.error('no face detected')
return
else:
log.info(f'Detected face in {(time.time() - st):.2f}s')
- log.info('')
-
-
- # ----------------------------------------------------------------------------
- # detect 3D landmarks
-
- log.info('loading 3D landmark generator files...')
- landmark_detector_3d_68 = face_landmarks.FaceAlignment3D_68(gpu=opt_gpu) # -1 for CPU
- log.info('generating 3D landmarks...')
- st = time.time()
- points_3d_68 = landmark_detector_3d_68.landmarks(im_resized, bbox_dim.to_xyxy())
- log.info(f'generated 3D landmarks in {(time.time() - st):.2f}s')
- log.info('')
-
-
- # ----------------------------------------------------------------------------
- # generate 3D GIF animation
- log.info('generating 3D animation...')
- if not opt_fp_out:
- fpp_im = Path(opt_fp_in)
- fp_out = join(fpp_im.parent, f'{fpp_im.stem}_anim.gif')
- else:
- fp_out = opt_fp_out
- st = time.time()
- plot_utils.generate_3d_landmark_anim(np.array(points_3d_68), fp_out,
- size=opt_gif_size, num_frames=opt_gif_frames)
- log.info(f'Generated animation in {(time.time() - st):.2f}s')
- log.info(f'Saved to: {fp_out}')
- log.info('')
-
-
- # ----------------------------------------------------------------------------
- # generate face vectors, only to test if feature extraction works
-
- log.info('initialize face recognition model...')
- from app.processors import face_recognition
- face_rec = face_recognition.RecognitionDLIB()
- st = time.time()
- log.info('generating face vector...')
- vec = face_rec.vec(im_resized, bbox_dim)
- log.info(f'generated face vector in {(time.time() - st):.2f}s')
- log.info('')
-
# ----------------------------------------------------------------------------
# generate 68 point landmarks using dlib
log.info('initializing face landmarks 68 dlib...')
- from app.processors import face_landmarks
landmark_detector_2d_68 = face_landmarks.Dlib2D_68()
log.info('generating 2D 68PT landmarks...')
st = time.time()
- points_2d_68 = landmark_detector_2d_68.landmarks(im_resized, bbox_dim)
+ points_norm = landmark_detector_2d_68.landmarks(im_resized, bbox_norm)
log.info(f'generated 2D 68PT face landmarks in {(time.time() - st):.2f}s')
- log.info('')
# ----------------------------------------------------------------------------
- # generate pose from 68 point 2D landmarks
-
- if opt_run_pose:
- log.info('initialize pose...')
- from app.processors import face_pose
- pose_detector = face_pose.FacePoseDLIB()
- log.info('generating pose...')
- st = time.time()
- pose_data = pose_detector.pose(points_2d_68, dim)
- log.info(f'generated pose {(time.time() - st):.2f}s')
- log.info('')
-
-
- # x
-
-
-
# display
+
if opt_display:
-
- # draw bbox
-
- # draw 3d landmarks
- im_landmarks_3d_68 = im_resized.copy()
- draw_utils.draw_landmarks3D(im_landmarks_3d_68, points_3d_68)
- draw_utils.draw_bbox(im_landmarks_3d_68, bbox_dim)
# draw 2d landmarks
- im_landmarks_2d_68 = im_resized.copy()
- draw_utils.draw_landmarks2D(im_landmarks_2d_68, points_2d_68)
- draw_utils.draw_bbox(im_landmarks_2d_68, bbox_dim)
-
- # draw pose
- if opt_run_pose:
- im_pose = im_resized.copy()
- draw_utils.draw_pose(im_pose, pose_data['point_nose'], pose_data['points'])
- draw_utils.draw_degrees(im_pose, pose_data)
-
- # draw animated GIF
- im = Image.open(opt_fp_out)
- im_frames = []
- duration = im.info['duration']
- try:
- while True:
- im.seek(len(im_frames))
- mypalette = im.getpalette()
- im.putpalette(mypalette)
- im_jpg = Image.new("RGB", im.size)
- im_jpg.paste(im)
- im_np = im_utils.pil2np(im_jpg.copy())
- im_frames.append(im_np)
- except EOFError:
- pass # end of GIF sequence
-
- n_frames = len(im_frames)
- frame_number = 0
+ im_lmarks = im_resized.copy()
+ im_lmarks = draw_utils.draw_bbox(im_lmarks, bbox_norm)
+ im_lmarks = draw_utils.draw_landmarks2D(im_lmarks, points_norm)
- while True:
- # show all images here
- cv.imshow('Original', im_resized)
- cv.imshow('2D 68PT Landmarks', im_landmarks_2d_68)
- cv.imshow('3D 68PT Landmarks', im_landmarks_3d_68)
- cv.imshow('Pose', im_pose)
- cv.imshow('3D 68pt GIF', im_frames[frame_number])
- frame_number = (frame_number + 1) % n_frames
- k = cv.waitKey(duration) & 0xFF
- if k == 27 or k == ord('q'): # ESC
- cv.destroyAllWindows()
- sys.exit()
- elif k != 255:
- # any key to continue
- break \ No newline at end of file
+ # show all images here
+ cv.imshow('2D 68PT Landmarks', im_lmarks)
+ display_utils.handle_keyboard() \ No newline at end of file
diff --git a/megapixels/commands/demo/face_landmarks_3d.py b/megapixels/commands/demo/face_landmarks_3d.py
index 22e09297..ed5a00d5 100644
--- a/megapixels/commands/demo/face_landmarks_3d.py
+++ b/megapixels/commands/demo/face_landmarks_3d.py
@@ -3,7 +3,6 @@ Crop images to prepare for training
"""
import click
-# from PIL import Image, ImageOps, ImageFilter, ImageDraw
from app.settings import types
from app.utils import click_utils
@@ -27,7 +26,7 @@ from app.settings import app_cfg as cfg
help='GPU index')
@click.option('-f', '--force', 'opt_force', is_flag=True,
help='Force overwrite file')
-@click.option('--display/--no-display', 'opt_display', is_flag=True, default=False,
+@click.option('--display/--no-display', 'opt_display', is_flag=True, default=True,
help='Display detections to debug')
@click.pass_context
def cli(ctx, opt_fp_in, opt_fp_out, opt_gpu, opt_gif_frames,
@@ -52,12 +51,6 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_gpu, opt_gif_frames,
from app.utils import plot_utils
from app.processors import face_detector, face_landmarks
from app.models.data_store import DataStore
-
- # TOOD add selective testing
- opt_run_pose = True
- opt_run_2d_68 = True
- opt_run_3d_68 = True
- opt_run_3d_68 = True
# -------------------------------------------------
@@ -74,14 +67,14 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_gpu, opt_gif_frames,
# ----------------------------------------------------------------------------
# detect face
- face_detector = face_detector.DetectorDLIBCNN(gpu=opt_gpu) # -1 for CPU
+ face_detector = face_detector.DetectorCVDNN()
log.info('detecting face...')
st = time.time()
bboxes = face_detector.detect(im_resized, largest=True)
- bbox = bboxes[0]
+ bbox_norm = bboxes[0]
dim = im_resized.shape[:2][::-1]
- bbox_dim = bbox.to_dim(dim)
- if not bbox:
+ bbox_dim = bbox_norm.to_dim(dim)
+ if not bbox_norm:
log.error('no face detected')
return
else:
@@ -96,7 +89,7 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_gpu, opt_gif_frames,
landmark_detector_3d_68 = face_landmarks.FaceAlignment3D_68(gpu=opt_gpu) # -1 for CPU
log.info('generating 3D landmarks...')
st = time.time()
- points_3d_68 = landmark_detector_3d_68.landmarks(im_resized, bbox_dim.to_xyxy())
+ points_3d_68 = landmark_detector_3d_68.landmarks(im_resized, bbox_norm)
log.info(f'generated 3D landmarks in {(time.time() - st):.2f}s')
log.info('')
@@ -119,19 +112,6 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_gpu, opt_gif_frames,
# ----------------------------------------------------------------------------
- # generate face vectors, only to test if feature extraction works
-
- log.info('initialize face recognition model...')
- from app.processors import face_recognition
- face_rec = face_recognition.RecognitionDLIB()
- st = time.time()
- log.info('generating face vector...')
- vec = face_rec.vec(im_resized, bbox_dim)
- log.info(f'generated face vector in {(time.time() - st):.2f}s')
- log.info('')
-
-
- # ----------------------------------------------------------------------------
# generate 68 point landmarks using dlib
log.info('initializing face landmarks 68 dlib...')
@@ -139,54 +119,25 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_gpu, opt_gif_frames,
landmark_detector_2d_68 = face_landmarks.Dlib2D_68()
log.info('generating 2D 68PT landmarks...')
st = time.time()
- points_2d_68 = landmark_detector_2d_68.landmarks(im_resized, bbox_dim)
+ points_2d_68 = landmark_detector_2d_68.landmarks(im_resized, bbox_norm)
log.info(f'generated 2D 68PT face landmarks in {(time.time() - st):.2f}s')
log.info('')
- # ----------------------------------------------------------------------------
- # generate pose from 68 point 2D landmarks
-
- if opt_run_pose:
- log.info('initialize pose...')
- from app.processors import face_pose
- pose_detector = face_pose.FacePoseDLIB()
- log.info('generating pose...')
- st = time.time()
- pose_data = pose_detector.pose(points_2d_68, dim)
- log.info(f'generated pose {(time.time() - st):.2f}s')
- log.info('')
-
-
- # x
-
-
-
# display
if opt_display:
- # draw bbox
-
- # draw 3d landmarks
- im_landmarks_3d_68 = im_resized.copy()
- draw_utils.draw_landmarks3D(im_landmarks_3d_68, points_3d_68)
- draw_utils.draw_bbox(im_landmarks_3d_68, bbox_dim)
-
- # draw 2d landmarks
- im_landmarks_2d_68 = im_resized.copy()
- draw_utils.draw_landmarks2D(im_landmarks_2d_68, points_2d_68)
- draw_utils.draw_bbox(im_landmarks_2d_68, bbox_dim)
-
- # draw pose
- if opt_run_pose:
- im_pose = im_resized.copy()
- draw_utils.draw_pose(im_pose, pose_data['point_nose'], pose_data['points'])
- draw_utils.draw_degrees(im_pose, pose_data)
+ # draw landmarks
+ im_lmarks = im_resized.copy()
+ im_lmarks = draw_utils.draw_bbox(im_lmarks, bbox_norm)
+ im_lmarks = draw_utils.draw_landmarks2D(im_lmarks, points_2d_68, radius=1, color=(0,0,255))
+ im_lmarks = draw_utils.draw_landmarks3D(im_lmarks, points_3d_68, radius=3, color=(0,255,0))
# draw animated GIF
- im = Image.open(opt_fp_out)
+ im = Image.open(fp_out)
im_frames = []
duration = im.info['duration']
+
try:
while True:
im.seek(len(im_frames))
@@ -204,10 +155,7 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_gpu, opt_gif_frames,
while True:
# show all images here
- cv.imshow('Original', im_resized)
- cv.imshow('2D 68PT Landmarks', im_landmarks_2d_68)
- cv.imshow('3D 68PT Landmarks', im_landmarks_3d_68)
- cv.imshow('Pose', im_pose)
+ cv.imshow('2D/3D 68PT Landmarks', im_lmarks)
cv.imshow('3D 68pt GIF', im_frames[frame_number])
frame_number = (frame_number + 1) % n_frames
k = cv.waitKey(duration) & 0xFF
diff --git a/megapixels/commands/demo/face_pose.py b/megapixels/commands/demo/face_pose.py
index 3918adac..48214e0d 100644
--- a/megapixels/commands/demo/face_pose.py
+++ b/megapixels/commands/demo/face_pose.py
@@ -22,7 +22,7 @@ from app.settings import app_cfg as cfg
help='GPU index')
@click.option('-f', '--force', 'opt_force', is_flag=True,
help='Force overwrite file')
-@click.option('--display/--no-display', 'opt_display', is_flag=True, default=False,
+@click.option('--display/--no-display', 'opt_display', is_flag=True, default=True,
help='Display detections to debug')
@click.pass_context
def cli(ctx, opt_fp_in, opt_fp_out, opt_gpu, opt_size, opt_force, opt_display):
@@ -61,12 +61,12 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_gpu, opt_size, opt_force, opt_display):
# ----------------------------------------------------------------------------
# detect face
- face_detector = face_detector.DetectorDLIBCNN(gpu=opt_gpu) # -1 for CPU
+ face_detector = face_detector.DetectorCVDNN()
bboxes = face_detector.detect(im_resized, largest=True)
- bbox = bboxes[0]
+ bbox_norm = bboxes[0]
dim = im_resized.shape[:2][::-1]
- bbox_dim = bbox.to_dim(dim)
- if not bbox:
+ bbox_dim = bbox_norm.to_dim(dim)
+ if not bbox_norm:
log.error('no face detected')
return
@@ -76,7 +76,7 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_gpu, opt_size, opt_force, opt_display):
from app.processors import face_landmarks
landmark_detector_2d_68 = face_landmarks.Dlib2D_68()
- points_2d_68 = landmark_detector_2d_68.landmarks(im_resized, bbox_dim)
+ points_2d_68 = landmark_detector_2d_68.landmarks(im_resized, bbox_norm)
# ----------------------------------------------------------------------------
@@ -97,14 +97,14 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_gpu, opt_size, opt_force, opt_display):
# draw
# draw 2d landmarks
- im_landmarks_2d_68 = im_resized.copy()
- draw_utils.draw_landmarks2D(im_landmarks_2d_68, points_2d_68)
- draw_utils.draw_bbox(im_landmarks_2d_68, bbox_dim)
+ im_landmarks = im_resized.copy()
+ im_landmarks = draw_utils.draw_landmarks2D(im_landmarks, points_2d_68)
+ im_landmarks = draw_utils.draw_bbox(im_landmarks, bbox_norm)
# draw pose
im_pose = im_resized.copy()
- draw_utils.draw_pose(im_pose, pose_data['point_nose'], pose_data['points'])
- draw_utils.draw_degrees(im_pose, pose_data)
+ im_pose = draw_utils.draw_pose(im_pose, pose_data['point_nose'], pose_data['points'])
+ im_pose = draw_utils.draw_degrees(im_pose, pose_data)
# ----------------------------------------------------------------------------
@@ -120,9 +120,8 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_gpu, opt_size, opt_force, opt_display):
if opt_display:
-
# show all images here
cv.imshow('Original', im_resized)
- cv.imshow('2D 68PT Landmarks', im_landmarks_2d_68)
+ cv.imshow('2D 68PT Landmarks', im_landmarks)
cv.imshow('Pose', im_pose)
display_utils.handle_keyboard() \ No newline at end of file
diff --git a/megapixels/commands/demo/face_search.py b/megapixels/commands/demo/face_search.py
index ca0b8016..f551cafd 100644
--- a/megapixels/commands/demo/face_search.py
+++ b/megapixels/commands/demo/face_search.py
@@ -13,7 +13,7 @@ log = Logger.getLogger()
help='File to lookup')
@click.option('--data_store', 'opt_data_store',
type=cfg.DataStoreVar,
- default=click_utils.get_default(types.DataStore.SSD),
+ default=click_utils.get_default(types.DataStore.HDD),
show_default=True,
help=click_utils.show_help(types.DataStore))
@click.option('--dataset', 'opt_dataset',
@@ -39,62 +39,65 @@ def cli(ctx, opt_fp_in, opt_data_store, opt_dataset, opt_results, opt_gpu):
import cv2 as cv
from tqdm import tqdm
import imutils
+ from PIL import Image, ImageOps
- from app.utils import file_utils, im_utils
+ from app.utils import file_utils, im_utils, display_utils, draw_utils
from app.models.data_store import DataStore
from app.processors import face_detector
- from app.processors import face_recognition
+ from app.processors import face_extractor
log = Logger.getLogger()
+
# init dataset
dataset = Dataset(opt_data_store, opt_dataset)
- dataset.load_face_vectors()
- dataset.load_records()
- dataset.load_identities()
+ dataset.load_metadata(types.Metadata.FILE_RECORD)
+ dataset.load_metadata(types.Metadata.FACE_VECTOR)
+ dataset.load_metadata(types.Metadata.FACE_ROI)
+ # dataset.load_metadata(types.Metadata.IDENTITY)
# init face detection
- detector = face_detector.DetectorDLIBHOG()
+ detector = face_detector.DetectorCVDNN()
- # init face recognition
- recognition = face_recognition.RecognitionDLIB(gpu=opt_gpu)
+ # init face extractor
+ extractor = face_extractor.ExtractorVGG()
# load query image
im_query = cv.imread(opt_fp_in)
# get detection as BBox object
bboxes = detector.detect(im_query, largest=True)
- bbox = bboxes[0]
+ bbox_norm = bboxes[0]
dim = im_query.shape[:2][::-1]
- bbox = bbox.to_dim(dim) # convert back to real dimensions
+ bbox_dim = bbox_norm.to_dim(dim) # convert back to real dimensions
- if not bbox:
+ if not bbox_norm:
log.error('No face detected. Exiting')
return
# extract the face vectors
- vec_query = recognition.vec(im_query, bbox)
+ vec_query = extractor.extract(im_query, bbox_norm)
+ log.debug(f'len query: {len(vec_query)}')
# find matches
image_records = dataset.find_matches(vec_query, n_results=opt_results)
# summary
+ im_query = draw_utils.draw_bbox(im_query, bbox_norm, stroke_weight=8)
ims_match = [im_query]
for image_record in image_records:
image_record.summarize()
log.info(f'{image_record.filepath}')
im_match = cv.imread(image_record.filepath)
+
+ im_match_pil = Image.open(image_record.filepath).convert('RGB')
+ # bbox =
ims_match.append(im_match)
+ # make montages of most similar faces
montages = imutils.build_montages(ims_match, (256, 256), (3,2))
+ # display
for i, montage in enumerate(montages):
- cv.imshow(f'{i}', montage)
- # cv gui
- while True:
- k = cv.waitKey(1) & 0xFF
- if k == 27 or k == ord('q'): # ESC
- cv.destroyAllWindows()
- sys.exit()
- elif k != 255:
- # any key to continue
- break
+ cv.imshow(f'{opt_dataset.name.upper()}: page {i}', montage)
+
+ display_utils.handle_keyboard()
diff --git a/megapixels/commands/demo/face_vector.py b/megapixels/commands/demo/face_vector.py
index 3ff68001..c7b5ef2e 100644
--- a/megapixels/commands/demo/face_vector.py
+++ b/megapixels/commands/demo/face_vector.py
@@ -1,9 +1,8 @@
"""
-Crop images to prepare for training
+Tests if the feature vector generator works
"""
import click
-# from PIL import Image, ImageOps, ImageFilter, ImageDraw
from app.settings import types
from app.utils import click_utils
@@ -14,11 +13,11 @@ from app.settings import app_cfg as cfg
@click.option('-i', '--input', 'opt_fp_in', default=None, required=True,
help='Image filepath')
@click.option('--size', 'opt_size',
- type=(int, int), default=(300, 300),
+ type=(int, int), default=cfg.DEFAULT_SIZE_FACE_DETECT,
help='Output image size')
@click.option('-g', '--gpu', 'opt_gpu', default=0,
help='GPU index')
-@click.option('--display/--no-display', 'opt_display', is_flag=True, default=False,
+@click.option('--display/--no-display', 'opt_display', is_flag=True, default=True,
help='Display detections to debug')
@click.pass_context
def cli(ctx, opt_fp_in, opt_gpu, opt_size, opt_display):
@@ -54,12 +53,12 @@ def cli(ctx, opt_fp_in, opt_gpu, opt_size, opt_display):
# ----------------------------------------------------------------------------
# detect face
- face_detector = face_detector.DetectorDLIBCNN(gpu=opt_gpu) # -1 for CPU
+ face_detector = face_detector.DetectorCVDNN() # -1 for CPU
bboxes = face_detector.detect(im_resized, largest=True)
- bbox = bboxes[0]
+ bbox_norm = bboxes[0]
dim = im_resized.shape[:2][::-1]
- bbox_dim = bbox.to_dim(dim)
- if not bbox:
+ bbox_dim = bbox_norm.to_dim(dim)
+ if not bbox_norm:
log.error('no face detected')
return
@@ -67,14 +66,13 @@ def cli(ctx, opt_fp_in, opt_gpu, opt_size, opt_display):
# ----------------------------------------------------------------------------
# generate face vectors, only to test if feature extraction works
- from app.processors import face_recognition
- facerec = face_recognition.RecognitionDLIB()
- vec = facerec.vec(im_resized, bbox_dim)
- vec_flat = facerec.flatten(vec)
- log.info(f'generated vector. showing vec[0:10]:')
- log.info(f'\n{vec_flat}')
+ from app.processors import face_extractor
+ extractor = face_extractor.ExtractorVGG()
+ vec = extractor.extract(im_resized, bbox_norm)
+ vec_str = extractor.to_str(vec)
+ log.info(f'\n{vec_str}')
if opt_display:
- draw_utils.draw_bbox(im_resized, bbox_dim)
+ im_resized = draw_utils.draw_bbox(im_resized, bbox_dim)
cv.imshow('Original', im_resized)
display_utils.handle_keyboard() \ No newline at end of file