summaryrefslogtreecommitdiff
path: root/megapixels/commands/cv
diff options
context:
space:
mode:
authoradamhrv <adam@ahprojects.com>2019-01-06 17:16:18 +0100
committeradamhrv <adam@ahprojects.com>2019-01-06 17:16:18 +0100
commit4bcb82c0f295d79d3d247252e7e98b2d986ae821 (patch)
treea51105698c46ecfcb0a09c5ba294f9d9ffa43e7a /megapixels/commands/cv
parent2efde746810a0264ad2cf09dc9b003bfcd17a4d5 (diff)
externalize drawing, cleanup
Diffstat (limited to 'megapixels/commands/cv')
-rw-r--r--megapixels/commands/cv/face_landmark.py96
-rw-r--r--megapixels/commands/cv/face_landmark_2d_5.py (renamed from megapixels/commands/cv/face_pose_mt.py)114
-rw-r--r--megapixels/commands/cv/face_landmark_2d_68.py150
-rw-r--r--megapixels/commands/cv/face_landmark_3d_68.py144
-rw-r--r--megapixels/commands/cv/face_pose.py76
-rw-r--r--megapixels/commands/cv/face_roi.py42
-rw-r--r--megapixels/commands/cv/face_vector.py10
-rw-r--r--megapixels/commands/cv/face_vector_mt.py118
8 files changed, 425 insertions, 325 deletions
diff --git a/megapixels/commands/cv/face_landmark.py b/megapixels/commands/cv/face_landmark.py
deleted file mode 100644
index 03ef8fc2..00000000
--- a/megapixels/commands/cv/face_landmark.py
+++ /dev/null
@@ -1,96 +0,0 @@
-"""
-
-"""
-
-import click
-
-from app.settings import types
-from app.utils import click_utils
-from app.settings import app_cfg as cfg
-
-color_filters = {'color': 1, 'gray': 2, 'all': 3}
-
-@click.command()
-@click.option('-i', '--input', 'opt_dirs_in', required=True, multiple=True,
- help='Input directory')
-@click.option('-o', '--output', 'opt_fp_out', required=True,
- help='Output CSV')
-@click.option('-e', '--ext', 'opt_ext',
- default='jpg', type=click.Choice(['jpg', 'png']),
- help='File glob ext')
-@click.option('--size', 'opt_size',
- type=(int, int), default=(300, 300),
- help='Output image size')
-@click.option('-g', '--gpu', 'opt_gpu', default=0,
- help='GPU index')
-@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None),
- help='Slice list of files')
-@click.option('--recursive/--no-recursive', 'opt_recursive', is_flag=True, default=False,
- help='Use glob recursion (slower)')
-@click.option('-f', '--force', 'opt_force', is_flag=True,
- help='Force overwrite file')
-@click.pass_context
-def cli(ctx, opt_dirs_in, opt_fp_out, opt_ext, opt_size, opt_gpu, opt_slice,
- opt_recursive, opt_force):
- """Converts face imges to 3D landmarks"""
-
- import sys
- import os
- from os.path import join
- from pathlib import Path
- from glob import glob
-
- from tqdm import tqdm
- import numpy as np
- import dlib # must keep a local reference for dlib
- import cv2 as cv
- import pandas as pd
- from face_alignment import FaceAlignment, LandmarksType
- from skimage import io
-
- from app.utils import logger_utils, file_utils
- from app.processors import face_detector
-
- # -------------------------------------------------
- # init here
-
-
- log = logger_utils.Logger.getLogger()
-
- if not opt_force and Path(opt_fp_out).exists():
- log.error('File exists. Use "-f / --force" to overwite')
- return
-
- device = 'cuda' if opt_gpu > -1 else 'cpu'
- fa = FaceAlignment(LandmarksType._3D, flip_input=False, device=device)
-
- # get list of files to process
- fp_ims = []
- for opt_dir_in in opt_dirs_in:
- if opt_recursive:
- fp_glob = join(opt_dir_in, '**/*.{}'.format(opt_ext))
- fp_ims += glob(fp_glob, recursive=True)
- else:
- fp_glob = join(opt_dir_in, '*.{}'.format(opt_ext))
- fp_ims += glob(fp_glob)
- log.debug(fp_glob)
-
-
- if opt_slice:
- fp_ims = fp_ims[opt_slice[0]:opt_slice[1]]
- log.debug('processing {:,} files'.format(len(fp_ims)))
-
-
- data = {}
-
- for fp_im in tqdm(fp_ims):
- fpp_im = Path(fp_im)
- im = io.imread(fp_im)
- preds = fa.get_landmarks(im)
- if preds and len(preds) > 0:
- data[fpp_im.name] = preds[0].tolist()
-
- # save date
- file_utils.mkdirs(opt_fp_out)
-
- file_utils.write_json(data, opt_fp_out, verbose=True) \ No newline at end of file
diff --git a/megapixels/commands/cv/face_pose_mt.py b/megapixels/commands/cv/face_landmark_2d_5.py
index 8fef2c2c..40ec6f41 100644
--- a/megapixels/commands/cv/face_pose_mt.py
+++ b/megapixels/commands/cv/face_landmark_2d_5.py
@@ -1,5 +1,5 @@
"""
-Converts ROIs to pose: yaw, roll, pitch
+
"""
import click
@@ -8,6 +8,8 @@ from app.settings import types
from app.utils import click_utils
from app.settings import app_cfg as cfg
+color_filters = {'color': 1, 'gray': 2, 'all': 3}
+
@click.command()
@click.option('-i', '--input', 'opt_fp_in', default=None,
help='Override enum input filename CSV')
@@ -15,9 +17,9 @@ from app.settings import app_cfg as cfg
help='Override enum output filename CSV')
@click.option('-m', '--media', 'opt_dir_media', default=None,
help='Override enum media directory')
-@click.option('--data_store', 'opt_data_store',
+@click.option('--store', 'opt_data_store',
type=cfg.DataStoreVar,
- default=click_utils.get_default(types.DataStore.SSD),
+ default=click_utils.get_default(types.DataStore.HDD),
show_default=True,
help=click_utils.show_help(types.Dataset))
@click.option('--dataset', 'opt_dataset',
@@ -25,6 +27,10 @@ from app.settings import app_cfg as cfg
required=True,
show_default=True,
help=click_utils.show_help(types.Dataset))
+@click.option('-d', '--detector', 'opt_detector_type',
+ type=cfg.FaceLandmark2D_5Var,
+ default=click_utils.get_default(types.FaceLandmark2D_5.DLIB),
+ help=click_utils.show_help(types.FaceLandmark2D_5))
@click.option('--size', 'opt_size',
type=(int, int), default=(300, 300),
help='Output image size')
@@ -35,9 +41,9 @@ from app.settings import app_cfg as cfg
@click.option('-d', '--display', 'opt_display', is_flag=True,
help='Display image for debugging')
@click.pass_context
-def cli(ctx, opt_fp_in, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset, opt_size,
- opt_slice, opt_force, opt_display):
- """Converts ROIs to pose: roll, yaw, pitch"""
+def cli(ctx, opt_fp_in, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset, opt_detector_type,
+ opt_size, opt_slice, opt_force, opt_display):
+ """Creates 2D 5-point landmarks"""
import sys
import os
@@ -47,33 +53,39 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset,
from tqdm import tqdm
import numpy as np
- import dlib # must keep a local reference for dlib
import cv2 as cv
import pandas as pd
- from app.models.bbox import BBox
- from app.utils import logger_utils, file_utils, im_utils
- from app.processors.face_landmarks import LandmarksDLIB
- from app.processors.face_pose import FacePoseDLIB
+ from app.utils import logger_utils, file_utils, im_utils, display_utils, draw_utils
+ from app.processors import face_landmarks
from app.models.data_store import DataStore
+ from app.models.bbox import BBox
# -------------------------------------------------
# init here
log = logger_utils.Logger.getLogger()
-
- # set data_store
+ # init filepaths
data_store = DataStore(opt_data_store, opt_dataset)
-
- # get filepath out
- fp_out = data_store.metadata(types.Metadata.FACE_POSE) if opt_fp_out is None else opt_fp_out
+ # set file output path
+ metadata_type = types.Metadata.FACE_LANDMARK_2D_5
+ fp_out = data_store.metadata(metadata_type) if opt_fp_out is None else opt_fp_out
if not opt_force and Path(fp_out).exists():
log.error('File exists. Use "-f / --force" to overwite')
return
- # init face processors
- face_pose = FacePoseDLIB()
- face_landmarks = LandmarksDLIB()
+ # init face landmark processors
+ if opt_detector_type == types.FaceLandmark2D_5.DLIB:
+ # use dlib 68 point detector
+ landmark_detector = face_landmarks.Dlib2D_5()
+ elif opt_detector_type == types.FaceLandmark2D_5.MTCNN:
+ # use dlib 5 point detector
+ landmark_detector = face_landmarks.MTCNN2D_5()
+ else:
+ log.error('{} not yet implemented'.format(opt_detector_type.name))
+ return
+
+ log.info(f'Using landmark detector: {opt_detector_type.name}')
# load filepath data
fp_record = data_store.metadata(types.Metadata.FILE_RECORD)
@@ -88,51 +100,47 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset,
df_img_groups = df_roi.groupby('record_index')
log.debug('processing {:,} groups'.format(len(df_img_groups)))
- # store poses and convert to DataFrame
- poses = []
+ # store landmarks in list
+ results = []
- # iterate
+ # iterate groups with file/record index as key
for record_index, df_img_group in tqdm(df_img_groups):
- # make fp
+
+ # acces file record
ds_record = df_record.iloc[record_index]
+
+ # load image
fp_im = data_store.face(ds_record.subdir, ds_record.fn, ds_record.ext)
im = cv.imread(fp_im)
- for roi_id, df_img in df_img_group.iterrows():
+ im_resized = im_utils.resize(im, width=opt_size[0], height=opt_size[1])
+
+ # iterate image group dataframe with roi index as key
+ for roi_index, df_img in df_img_group.iterrows():
+
# get bbox
x, y, w, h = df_img.x, df_img.y, df_img.w, df_img.h
- dim = im.shape[:2][::-1]
+ dim = im_resized.shape[:2][::-1]
bbox = BBox.from_xywh(x, y, w, h).to_dim(dim)
- # get pose
- landmarks = face_landmarks.landmarks(im, bbox)
- pose_data = face_pose.pose(landmarks, dim, project_points=opt_display)
- pose_degrees = pose_data['degrees'] # only keep the degrees data
- # use the project point data if display flag set
- if opt_display:
- pts_im = pose_data['points_image']
- pts_model = pose_data['points_model']
- pt_nose = pose_data['point_nose']
- dst = im.copy()
- face_pose.draw_pose(dst, pts_im, pts_model, pt_nose)
- face_pose.draw_degrees(dst, pose_degrees)
- # display to cv window
- cv.imshow('', dst)
- while True:
- k = cv.waitKey(1) & 0xFF
- if k == 27 or k == ord('q'): # ESC
- cv.destroyAllWindows()
- sys.exit()
- elif k != 255:
- # any key to continue
- break
+ # get landmark points
+ points = landmark_detector.landmarks(im_resized, bbox)
+ points_norm = landmark_detector.normalize(points, dim)
+ points_flat = landmark_detector.flatten(points_norm)
- # add image index and append to result CSV data
- pose_degrees['record_index'] = record_index
- poses.append(pose_degrees)
+ # display to screen if optioned
+ if opt_display:
+ draw_utils.draw_landmarks2D(im_resized, points)
+ draw_utils.draw_bbox(im_resized, bbox)
+ cv.imshow('', im_resized)
+ display_utils.handle_keyboard()
+ results.append(points_flat)
- # save date
+ # create DataFrame and save to CSV
file_utils.mkdirs(fp_out)
- df = pd.DataFrame.from_dict(poses)
+ df = pd.DataFrame.from_dict(results)
df.index.name = 'index'
- df.to_csv(fp_out) \ No newline at end of file
+ df.to_csv(fp_out)
+
+ # save script
+ file_utils.write_text(' '.join(sys.argv), '{}.sh'.format(fp_out)) \ No newline at end of file
diff --git a/megapixels/commands/cv/face_landmark_2d_68.py b/megapixels/commands/cv/face_landmark_2d_68.py
new file mode 100644
index 00000000..e24d4b60
--- /dev/null
+++ b/megapixels/commands/cv/face_landmark_2d_68.py
@@ -0,0 +1,150 @@
+"""
+
+"""
+
+import click
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+
+
+@click.command()
+@click.option('-i', '--input', 'opt_fp_in', default=None,
+ help='Override enum input filename CSV')
+@click.option('-o', '--output', 'opt_fp_out', default=None,
+ help='Override enum output filename CSV')
+@click.option('-m', '--media', 'opt_dir_media', default=None,
+ help='Override enum media directory')
+@click.option('--store', 'opt_data_store',
+ type=cfg.DataStoreVar,
+ default=click_utils.get_default(types.DataStore.HDD),
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--dataset', 'opt_dataset',
+ type=cfg.DatasetVar,
+ required=True,
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('-d', '--detector', 'opt_detector_type',
+ type=cfg.FaceLandmark2D_68Var,
+ default=click_utils.get_default(types.FaceLandmark2D_68.DLIB),
+ help=click_utils.show_help(types.FaceLandmark2D_68))
+@click.option('--size', 'opt_size',
+ type=(int, int), default=(300, 300),
+ help='Output image size')
+@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None),
+ help='Slice list of files')
+@click.option('-f', '--force', 'opt_force', is_flag=True,
+ help='Force overwrite file')
+@click.option('-d', '--display', 'opt_display', is_flag=True,
+ help='Display image for debugging')
+@click.pass_context
+def cli(ctx, opt_fp_in, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset, opt_detector_type,
+ opt_size, opt_slice, opt_force, opt_display):
+ """Creates 2D 68-point landmarks"""
+
+ import sys
+ import os
+ from os.path import join
+ from pathlib import Path
+ from glob import glob
+
+ from tqdm import tqdm
+ import numpy as np
+ import cv2 as cv
+ import pandas as pd
+
+ from app.utils import logger_utils, file_utils, im_utils, display_utils, draw_utils
+ from app.processors import face_landmarks
+ from app.models.data_store import DataStore
+ from app.models.bbox import BBox
+
+ # -------------------------------------------------------------------------
+ # init here
+
+ log = logger_utils.Logger.getLogger()
+ # init filepaths
+ data_store = DataStore(opt_data_store, opt_dataset)
+ # set file output path
+ metadata_type = types.Metadata.FACE_LANDMARK_2D_68
+ fp_out = data_store.metadata(metadata_type) if opt_fp_out is None else opt_fp_out
+ if not opt_force and Path(fp_out).exists():
+ log.error('File exists. Use "-f / --force" to overwite')
+ return
+
+ # init face landmark processors
+ if opt_detector_type == types.FaceLandmark2D_68.DLIB:
+ # use dlib 68 point detector
+ landmark_detector = face_landmarks.Dlib2D_68()
+ elif opt_detector_type == types.FaceLandmark2D_68.FACE_ALIGNMENT:
+ # use dlib 5 point detector
+ landmark_detector = face_landmarks.FaceAlignment2D_68()
+ else:
+ log.error('{} not yet implemented'.format(opt_detector_type.name))
+ return
+
+ log.info(f'Using landmark detector: {opt_detector_type.name}')
+
+ # -------------------------------------------------------------------------
+ # load filepath data
+ fp_record = data_store.metadata(types.Metadata.FILE_RECORD)
+ df_record = pd.read_csv(fp_record).set_index('index')
+ # load ROI data
+ fp_roi = data_store.metadata(types.Metadata.FACE_ROI)
+ df_roi = pd.read_csv(fp_roi).set_index('index')
+ # slice if you want
+ if opt_slice:
+ df_roi = df_roi[opt_slice[0]:opt_slice[1]]
+ # group by image index (speedup if multiple faces per image)
+ df_img_groups = df_roi.groupby('record_index')
+ log.debug('processing {:,} groups'.format(len(df_img_groups)))
+
+ # store landmarks in list
+ results = []
+
+ # -------------------------------------------------------------------------
+ # iterate groups with file/record index as key
+
+ for record_index, df_img_group in tqdm(df_img_groups):
+
+ # access file_record DataSeries
+ file_record = df_record.iloc[record_index]
+
+ # load image
+ fp_im = data_store.face(file_record.subdir, file_record.fn, file_record.ext)
+ im = cv.imread(fp_im)
+ im_resized = im_utils.resize(im, width=opt_size[0], height=opt_size[1])
+ dim = im_resized.shape[:2][::-1]
+
+ # iterate ROIs in this image
+ for roi_index, df_img in df_img_group.iterrows():
+
+ # find landmarks
+ x, y, w, h = df_img.x, df_img.y, df_img.w, df_img.h # normalized values
+ #dim = (file_record.width, file_record.height) # original w,h
+ bbox = BBox.from_xywh(x, y, w, h).to_dim(dim)
+ points = landmark_detector.landmarks(im_resized, bbox)
+ points_norm = landmark_detector.normalize(points, dim)
+ points_flat = landmark_detector.flatten(points_norm)
+
+ # display if optioned
+ if opt_display:
+ dst = im_resized.copy()
+ draw_utils.draw_landmarks2D(dst, points)
+ draw_utils.draw_bbox(dst, bbox)
+ cv.imshow('', dst)
+ display_utils.handle_keyboard()
+
+ # add to results for CSV
+ results.append(points_flat)
+
+
+ # create DataFrame and save to CSV
+ file_utils.mkdirs(fp_out)
+ df = pd.DataFrame.from_dict(results)
+ df.index.name = 'index'
+ df.to_csv(fp_out)
+
+ # save script
+ file_utils.write_text(' '.join(sys.argv), '{}.sh'.format(fp_out)) \ No newline at end of file
diff --git a/megapixels/commands/cv/face_landmark_3d_68.py b/megapixels/commands/cv/face_landmark_3d_68.py
new file mode 100644
index 00000000..56e60cda
--- /dev/null
+++ b/megapixels/commands/cv/face_landmark_3d_68.py
@@ -0,0 +1,144 @@
+"""
+
+"""
+
+import click
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+
+color_filters = {'color': 1, 'gray': 2, 'all': 3}
+
+@click.command()
+@click.option('-i', '--input', 'opt_fp_in', default=None,
+ help='Override enum input filename CSV')
+@click.option('-o', '--output', 'opt_fp_out', default=None,
+ help='Override enum output filename CSV')
+@click.option('-m', '--media', 'opt_dir_media', default=None,
+ help='Override enum media directory')
+@click.option('--store', 'opt_data_store',
+ type=cfg.DataStoreVar,
+ default=click_utils.get_default(types.DataStore.HDD),
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--dataset', 'opt_dataset',
+ type=cfg.DatasetVar,
+ required=True,
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('-d', '--detector', 'opt_detector_type',
+ type=cfg.FaceLandmark3D_68Var,
+ default=click_utils.get_default(types.FaceLandmark3D_68.FACE_ALIGNMENT),
+ help=click_utils.show_help(types.FaceLandmark3D_68))
+@click.option('--size', 'opt_size',
+ type=(int, int), default=(300, 300),
+ help='Output image size')
+@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None),
+ help='Slice list of files')
+@click.option('-f', '--force', 'opt_force', is_flag=True,
+ help='Force overwrite file')
+@click.option('-d', '--display', 'opt_display', is_flag=True,
+ help='Display image for debugging')
+@click.pass_context
+def cli(ctx, opt_fp_in, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset, opt_detector_type,
+ opt_size, opt_slice, opt_force, opt_display):
+ """Generate 3D 68-point landmarks"""
+
+ import sys
+ import os
+ from os.path import join
+ from pathlib import Path
+ from glob import glob
+
+ from tqdm import tqdm
+ import numpy as np
+ import cv2 as cv
+ import pandas as pd
+
+ from app.utils import logger_utils, file_utils, im_utils, display_utils, draw_utils
+ from app.processors import face_landmarks
+ from app.models.data_store import DataStore
+ from app.models.bbox import BBox
+
+ # --------------------------------------------------------------------------
+ # init here
+
+ log = logger_utils.Logger.getLogger()
+ log.warn('3D landmark points are normalized in a (200, 200, 200) XYZ space')
+ # init filepaths
+ data_store = DataStore(opt_data_store, opt_dataset)
+ # set file output path
+ metadata_type = types.Metadata.FACE_LANDMARK_3D_68
+ fp_out = data_store.metadata(metadata_type) if opt_fp_out is None else opt_fp_out
+ if not opt_force and Path(fp_out).exists():
+ log.error('File exists. Use "-f / --force" to overwite')
+ return
+
+ # init face landmark processors
+ if opt_detector_type == types.FaceLandmark2D_5.FACE_ALIGNMENT:
+ # use FaceAlignment 68 point 3D detector
+ landmark_detector = face_landmarks.FaceAlignment3D_68()
+ else:
+ log.error('{} not yet implemented'.format(opt_detector_type.name))
+ return
+
+ log.info(f'Using landmark detector: {opt_detector_type.name}')
+
+ # -------------------------------------------------------------------------
+ # load data
+
+ fp_record = data_store.metadata(types.Metadata.FILE_RECORD) # file_record.csv
+ df_record = pd.read_csv(fp_record).set_index('index')
+ fp_roi = data_store.metadata(types.Metadata.FACE_ROI) # face_roi.csv
+ df_roi = pd.read_csv(fp_roi).set_index('index')
+ if opt_slice:
+ df_roi = df_roi[opt_slice[0]:opt_slice[1]] # slice if you want
+ df_img_groups = df_roi.groupby('record_index') # groups by image index (load once)
+ log.debug('processing {:,} groups'.format(len(df_img_groups)))
+
+ # store landmarks in list
+ results = []
+
+ # iterate groups with file/record index as key
+ for record_index, df_img_group in tqdm(df_img_groups):
+
+ # acces file record
+ ds_record = df_record.iloc[record_index]
+
+ # load image
+ fp_im = data_store.face(ds_record.subdir, ds_record.fn, ds_record.ext)
+ im = cv.imread(fp_im)
+ im_resized = im_utils.resize(im, width=opt_size[0], height=opt_size[1])
+
+ # iterate image group dataframe with roi index as key
+ for roi_index, df_img in df_img_group.iterrows():
+
+ # get bbox
+ x, y, w, h = df_img.x, df_img.y, df_img.w, df_img.h
+ dim = im_resized.shape[:2][::-1]
+ bbox = BBox.from_xywh(x, y, w, h).to_dim(dim)
+
+ # get landmark points
+ points = landmark_detector.landmarks(im_resized, bbox)
+ # NB can't really normalize these points, but are normalized against 3D space
+ points_norm = landmark_detector.normalize(points, dim) # normalized using 200
+ points_flattenend = landmark_detector.flatten(points)
+
+ # display to screen if optioned
+ if opt_display:
+ draw_utils.draw_landmarks2D(im_resized, points)
+ draw_utils.draw_bbox(im_resized, bbox)
+ cv.imshow('', im_resized)
+ display_utils.handle_keyboard()
+
+ results.append(points_flattenend)
+
+ # create DataFrame and save to CSV
+ file_utils.mkdirs(fp_out)
+ df = pd.DataFrame.from_dict(results)
+ df.index.name = 'index'
+ df.to_csv(fp_out)
+
+ # save script
+ file_utils.write_text(' '.join(sys.argv), '{}.sh'.format(fp_out)) \ No newline at end of file
diff --git a/megapixels/commands/cv/face_pose.py b/megapixels/commands/cv/face_pose.py
index 4e35210c..70ea1f30 100644
--- a/megapixels/commands/cv/face_pose.py
+++ b/megapixels/commands/cv/face_pose.py
@@ -1,4 +1,6 @@
"""
+NB: This only works with the DLIB 68-point landmarks.
+
Converts ROIs to pose: yaw, roll, pitch
pitch: looking down or up in yes gesture
roll: tilting head towards shoulder
@@ -6,6 +8,13 @@ yaw: twisting head left to right in no gesture
"""
+"""
+TODO
+- check compatibility with MTCNN 68 point detector
+- improve accuracy by using MTCNN 5-point
+- refer to https://github.com/jerryhouuu/Face-Yaw-Roll-Pitch-from-Pose-Estimation-using-OpenCV/
+"""
+
import click
from app.settings import types
@@ -19,7 +28,7 @@ from app.settings import app_cfg as cfg
help='Override enum output filename CSV')
@click.option('-m', '--media', 'opt_dir_media', default=None,
help='Override enum media directory')
-@click.option('--data_store', 'opt_data_store',
+@click.option('--store', 'opt_data_store',
type=cfg.DataStoreVar,
default=click_utils.get_default(types.DataStore.HDD),
show_default=True,
@@ -56,8 +65,8 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset,
import pandas as pd
from app.models.bbox import BBox
- from app.utils import logger_utils, file_utils, im_utils
- from app.processors.face_landmarks_2d import LandmarksDLIB
+ from app.utils import logger_utils, file_utils, im_utils, display_utils, draw_utils
+ from app.processors.face_landmarks import Dlib2D_68
from app.processors.face_pose import FacePoseDLIB
from app.models.data_store import DataStore
@@ -77,9 +86,11 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset,
# init face processors
face_pose = FacePoseDLIB()
- face_landmarks = LandmarksDLIB()
+ face_landmarks = Dlib2D_68()
+
+ # -------------------------------------------------
+ # load data
- # load filepath data
fp_record = data_store.metadata(types.Metadata.FILE_RECORD)
df_record = pd.read_csv(fp_record).set_index('index')
# load ROI data
@@ -93,59 +104,60 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset,
log.debug('processing {:,} groups'.format(len(df_img_groups)))
# store poses and convert to DataFrame
- poses = []
+ results = []
+ # -------------------------------------------------
# iterate groups with file/record index as key
for record_index, df_img_group in tqdm(df_img_groups):
- # make fp
- ds_record = df_record.iloc[record_index]
- fp_im = data_store.face(ds_record.subdir, ds_record.fn, ds_record.ext)
+
+ # access the file_record
+ file_record = df_record.iloc[record_index] # pands.DataSeries
+
+ # load image
+ fp_im = data_store.face(file_record.subdir, file_record.fn, file_record.ext)
im = cv.imread(fp_im)
+ im_resized = im_utils.resize(im, width=opt_size[0], height=opt_size[1])
+
# iterate image group dataframe with roi index as key
for roi_index, df_img in df_img_group.iterrows():
+
# get bbox
x, y, w, h = df_img.x, df_img.y, df_img.w, df_img.h
- dim = (ds_record.width, ds_record.height)
- #dim = im.shape[:2][::-1]
+ #dim = (file_record.width, file_record.height)
+ dim = im_resized.shape[:2][::-1]
bbox = BBox.from_xywh(x, y, w, h).to_dim(dim)
+
# get pose
- landmarks = face_landmarks.landmarks(im, bbox)
+ landmarks = face_landmarks.landmarks(im_resized, bbox)
pose_data = face_pose.pose(landmarks, dim)
#pose_degrees = pose_data['degrees'] # only keep the degrees data
#pose_degrees['points_nose'] = pose_data
- # use the project point data if display flag set
+
+ # draw landmarks if optioned
if opt_display:
- dst = im.copy()
- face_pose.draw_pose(dst, pose_data['point_nose'], pose_data['points'])
- face_pose.draw_degrees(dst, pose_data)
- # display to cv window
- cv.imshow('', dst)
- while True:
- k = cv.waitKey(1) & 0xFF
- if k == 27 or k == ord('q'): # ESC
- cv.destroyAllWindows()
- sys.exit()
- elif k != 255:
- # any key to continue
- break
+ draw_utils.draw_pose(im_resized, pose_data['point_nose'], pose_data['points'])
+ draw_utils.draw_degrees(im_resized, pose_data)
+ cv.imshow('', im_resized)
+ display_utils.handle_keyboard()
# add image index and append to result CSV data
pose_data['roi_index'] = roi_index
for k, v in pose_data['points'].items():
pose_data[f'point_{k}_x'] = v[0][0] / dim[0]
pose_data[f'point_{k}_y'] = v[0][1] / dim[1]
+
+ # rearrange data structure for DataFrame
pose_data.pop('points')
pose_data['point_nose_x'] = pose_data['point_nose'][0] / dim[0]
pose_data['point_nose_y'] = pose_data['point_nose'][1] / dim[1]
pose_data.pop('point_nose')
- poses.append(pose_data)
+ results.append(pose_data)
- # create dataframe
+ # create DataFrame and save to CSV
file_utils.mkdirs(fp_out)
- df = pd.DataFrame.from_dict(poses)
- # save date
+ df = pd.DataFrame.from_dict(results)
df.index.name = 'index'
df.to_csv(fp_out)
+
# save script
- cmd_line = ' '.join(sys.argv)
- file_utils.write_text(cmd_line, '{}.sh'.format(fp_out)) \ No newline at end of file
+ file_utils.write_text(' '.join(sys.argv), '{}.sh'.format(fp_out)) \ No newline at end of file
diff --git a/megapixels/commands/cv/face_roi.py b/megapixels/commands/cv/face_roi.py
index c3c2ac05..6d42924e 100644
--- a/megapixels/commands/cv/face_roi.py
+++ b/megapixels/commands/cv/face_roi.py
@@ -29,7 +29,7 @@ color_filters = {'color': 1, 'gray': 2, 'all': 3}
show_default=True,
help=click_utils.show_help(types.Dataset))
@click.option('--size', 'opt_size',
- type=(int, int), default=(300, 300),
+ type=(int, int), default=(480, 480),
help='Output image size')
@click.option('-d', '--detector', 'opt_detector_type',
type=cfg.FaceDetectNetVar,
@@ -50,7 +50,7 @@ color_filters = {'color': 1, 'gray': 2, 'all': 3}
@click.option('--color', 'opt_color_filter',
type=click.Choice(color_filters.keys()), default='all',
help='Filter to keep color or grayscale images (color = keep color')
-@click.option('--largest/--all-faces', 'opt_largest', is_flag=True, default=True,
+@click.option('--keep', 'opt_largest', type=click.Choice(['largest', 'all']), default='all',
help='Only keep largest face')
@click.option('--zone', 'opt_zone', default=(0.0, 0.0), type=(float, float),
help='Face center must be located within zone region (0.5 = half width/height)')
@@ -72,7 +72,7 @@ def cli(ctx, opt_fp_in, opt_dir_media, opt_fp_out, opt_data_store, opt_dataset,
import cv2 as cv
import pandas as pd
- from app.utils import logger_utils, file_utils, im_utils
+ from app.utils import logger_utils, file_utils, im_utils, display_utils, draw_utils
from app.processors import face_detector
from app.models.data_store import DataStore
@@ -113,13 +113,15 @@ def cli(ctx, opt_fp_in, opt_dir_media, opt_fp_out, opt_data_store, opt_dataset,
# filter out grayscale
color_filter = color_filters[opt_color_filter]
+ # set largest flag, to keep all or only largest
+ opt_largest = opt_largest == 'largest'
data = []
for df_record in tqdm(df_records.itertuples(), total=len(df_records)):
fp_im = data_store.face(str(df_record.subdir), str(df_record.fn), str(df_record.ext))
im = cv.imread(fp_im)
-
+ im_resized = im_utils.resize(im, width=opt_size[0], height=opt_size[1])
# filter out color or grayscale iamges
if color_filter != color_filters['all']:
try:
@@ -130,9 +132,10 @@ def cli(ctx, opt_fp_in, opt_dir_media, opt_fp_out, opt_data_store, opt_dataset,
except Exception as e:
log.error('Could not check grayscale: {}'.format(fp_im))
continue
-
+
try:
- bboxes = detector.detect(im, size=opt_size, pyramids=opt_pyramids, largest=opt_largest, zone=opt_zone)
+ bboxes = detector.detect(im_resized, pyramids=opt_pyramids, largest=opt_largest,
+ zone=opt_zone, conf_thresh=opt_conf_thresh)
except Exception as e:
log.error('could not detect: {}'.format(fp_im))
log.error('{}'.format(e))
@@ -150,27 +153,22 @@ def cli(ctx, opt_fp_in, opt_dir_media, opt_fp_out, opt_data_store, opt_dataset,
if len(bboxes) == 0:
log.warn(f'no faces in: {fp_im}')
- # debug display
+ # if display optined
if opt_display and len(bboxes):
- im_md = im_utils.resize(im, width=min(1200, opt_size[0]))
+ # draw each box
for bbox in bboxes:
- bbox_dim = bbox.to_dim(im_md.shape[:2][::-1])
- cv.rectangle(im_md, bbox_dim.pt_tl, bbox_dim.pt_br, (0,255,0), 3)
- cv.imshow('', im_md)
- while True:
- k = cv.waitKey(1) & 0xFF
- if k == 27 or k == ord('q'): # ESC
- cv.destroyAllWindows()
- sys.exit()
- elif k != 255:
- # any key to continue
- break
+ bbox_dim = bbox.to_dim(im_resized.shape[:2][::-1])
+ draw_utils.draw_bbox(im_resized, bbox_dim)
- # save date
+ # display and wait
+ cv.imshow('', im_resized)
+ display_utils.handle_keyboard()
+
+ # create DataFrame and save to CSV
file_utils.mkdirs(fp_out)
df = pd.DataFrame.from_dict(data)
df.index.name = 'index'
df.to_csv(fp_out)
+
# save script
- cmd_line = ' '.join(sys.argv)
- file_utils.write_text(cmd_line, '{}.sh'.format(fp_out)) \ No newline at end of file
+ file_utils.write_text(' '.join(sys.argv), '{}.sh'.format(fp_out)) \ No newline at end of file
diff --git a/megapixels/commands/cv/face_vector.py b/megapixels/commands/cv/face_vector.py
index 7c03205c..9251c053 100644
--- a/megapixels/commands/cv/face_vector.py
+++ b/megapixels/commands/cv/face_vector.py
@@ -103,15 +103,17 @@ def cli(ctx, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset, opt_size,
# get face vector
bbox_dim = BBox.from_xywh(x, y, w, h).to_dim(dim) # convert to int real dimensions
# compute vec
- # padding=opt_padding not yet implemented in 19.16 but merged in master
+ # padding=opt_padding not yet implemented in dlib===19.16 but merged in master
vec = facerec.vec(im, bbox_dim, jitters=opt_jitters)
vec_str = ','.join([repr(x) for x in vec]) # convert to string for CSV
vecs.append( {'roi_index': roi_index, 'record_index': record_index, 'vec': vec_str})
- # create dataframe
+ # create DataFrame and save to CSV
df = pd.DataFrame.from_dict(vecs)
df.index.name = 'index'
- # save CSV
file_utils.mkdirs(fp_out)
- df.to_csv(fp_out) \ No newline at end of file
+ df.to_csv(fp_out)
+
+ # save script
+ file_utils.write_text(' '.join(sys.argv), '{}.sh'.format(fp_out)) \ No newline at end of file
diff --git a/megapixels/commands/cv/face_vector_mt.py b/megapixels/commands/cv/face_vector_mt.py
deleted file mode 100644
index 412f9806..00000000
--- a/megapixels/commands/cv/face_vector_mt.py
+++ /dev/null
@@ -1,118 +0,0 @@
-"""
-Converts ROIs to face vector
-"""
-
-import click
-
-from app.settings import types
-from app.utils import click_utils
-from app.settings import app_cfg as cfg
-
-@click.command()
-@click.option('-o', '--output', 'opt_fp_out', default=None,
- help='Override enum output filename CSV')
-@click.option('-m', '--media', 'opt_dir_media', default=None,
- help='Override enum media directory')
-@click.option('--data_store', 'opt_data_store',
- type=cfg.DataStoreVar,
- default=click_utils.get_default(types.DataStore.SSD),
- show_default=True,
- help=click_utils.show_help(types.Dataset))
-@click.option('--dataset', 'opt_dataset',
- type=cfg.DatasetVar,
- required=True,
- show_default=True,
- help=click_utils.show_help(types.Dataset))
-@click.option('--size', 'opt_size',
- type=(int, int), default=(300, 300),
- help='Output image size')
-@click.option('-j', '--jitters', 'opt_jitters', default=cfg.DLIB_FACEREC_JITTERS,
- help='Number of jitters')
-@click.option('-p', '--padding', 'opt_padding', default=cfg.DLIB_FACEREC_PADDING,
- help='Percentage padding')
-@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None),
- help='Slice list of files')
-@click.option('-f', '--force', 'opt_force', is_flag=True,
- help='Force overwrite file')
-@click.option('-g', '--gpu', 'opt_gpu', default=0,
- help='GPU index')
-@click.pass_context
-def cli(ctx, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset, opt_size,
- opt_slice, opt_force, opt_gpu, opt_jitters, opt_padding):
- """Converts face ROIs to vectors"""
-
- import sys
- import os
- from os.path import join
- from pathlib import Path
- from glob import glob
-
- from tqdm import tqdm
- import numpy as np
- import dlib # must keep a local reference for dlib
- import cv2 as cv
- import pandas as pd
-
- from app.models.bbox import BBox
- from app.models.data_store import DataStore
- from app.utils import logger_utils, file_utils, im_utils
- from app.processors import face_recognition
-
-
- # -------------------------------------------------
- # init here
-
- log = logger_utils.Logger.getLogger()
- # set data_store
- data_store = DataStore(opt_data_store, opt_dataset)
-
- # get filepath out
- fp_out = data_store.metadata(types.Metadata.FACE_VECTOR) if opt_fp_out is None else opt_fp_out
- if not opt_force and Path(fp_out).exists():
- log.error('File exists. Use "-f / --force" to overwite')
- return
-
- # init face processors
- facerec = face_recognition.RecognitionDLIB()
-
- # load data
- fp_record = data_store.metadata(types.Metadata.FILE_RECORD)
- df_record = pd.read_csv(fp_record).set_index('index')
- fp_roi = data_store.metadata(types.Metadata.FACE_ROI)
- df_roi = pd.read_csv(fp_roi).set_index('index')
-
- if opt_slice:
- df_roi = df_roi[opt_slice[0]:opt_slice[1]]
-
- # -------------------------------------------------
- # process here
- df_img_groups = df_roi.groupby('record_index')
- log.debug('processing {:,} groups'.format(len(df_img_groups)))
-
- vecs = []
- for record_index, df_img_group in tqdm(df_img_groups):
- # make fp
- ds_record = df_record.iloc[record_index]
- fp_im = data_store.face(ds_record.subdir, ds_record.fn, ds_record.ext)
- im = cv.imread(fp_im)
- for roi_index, df_img in df_img_group.iterrows():
- # get bbox
- x, y, w, h = df_img.x, df_img.y, df_img.w, df_img.h
- imw = df_img.image_width
- imh = df_img.image_height
- dim = im.shape[:2][::-1]
- # get face vector
- dim = (imw, imh)
- bbox_dim = BBox.from_xywh(x, y, w, h).to_dim(dim) # convert to int real dimensions
- # compute vec
- # padding=opt_padding not yet implemented in 19.16 but merged in master
- vec = facerec.vec(im, bbox_dim, jitters=opt_jitters)
- vec_str = ','.join([repr(x) for x in vec]) # convert to string for CSV
- vecs.append( {'roi_index': roi_index, 'record_index': record_index, 'vec': vec_str})
-
-
- # save date
- df = pd.DataFrame.from_dict(vecs)
- df.index.name = 'index'
- file_utils.mkdirs(fp_out)
- df.to_csv(fp_out) \ No newline at end of file