diff options
Diffstat (limited to 'megapixels/commands/processor/face_landmark_2d_68.py')
| -rw-r--r-- | megapixels/commands/processor/face_landmark_2d_68.py | 150 |
1 files changed, 150 insertions, 0 deletions
diff --git a/megapixels/commands/processor/face_landmark_2d_68.py b/megapixels/commands/processor/face_landmark_2d_68.py new file mode 100644 index 00000000..c6978a40 --- /dev/null +++ b/megapixels/commands/processor/face_landmark_2d_68.py @@ -0,0 +1,150 @@ +""" + +""" + +import click + +from app.settings import types +from app.utils import click_utils +from app.settings import app_cfg as cfg + + +@click.command() +@click.option('-i', '--input', 'opt_fp_in', default=None, + help='Override enum input filename CSV') +@click.option('-o', '--output', 'opt_fp_out', default=None, + help='Override enum output filename CSV') +@click.option('-m', '--media', 'opt_dir_media', default=None, + help='Override enum media directory') +@click.option('--store', 'opt_data_store', + type=cfg.DataStoreVar, + default=click_utils.get_default(types.DataStore.HDD), + show_default=True, + help=click_utils.show_help(types.Dataset)) +@click.option('--dataset', 'opt_dataset', + type=cfg.DatasetVar, + required=True, + show_default=True, + help=click_utils.show_help(types.Dataset)) +@click.option('-d', '--detector', 'opt_detector_type', + type=cfg.FaceLandmark2D_68Var, + default=click_utils.get_default(types.FaceLandmark2D_68.DLIB), + help=click_utils.show_help(types.FaceLandmark2D_68)) +@click.option('--size', 'opt_size', + type=(int, int), default=(300, 300), + help='Output image size') +@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None), + help='Slice list of files') +@click.option('-f', '--force', 'opt_force', is_flag=True, + help='Force overwrite file') +@click.option('-d', '--display', 'opt_display', is_flag=True, + help='Display image for debugging') +@click.pass_context +def cli(ctx, opt_fp_in, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset, opt_detector_type, + opt_size, opt_slice, opt_force, opt_display): + """Creates 2D 68-point landmarks""" + + import sys + import os + from os.path import join + from pathlib import Path + from glob import glob + + from tqdm import tqdm + import numpy as np + import cv2 as cv + import pandas as pd + + from app.utils import logger_utils, file_utils, im_utils, display_utils, draw_utils + from app.processors import face_landmarks + from app.models.data_store import DataStore + from app.models.bbox import BBox + + # ------------------------------------------------------------------------- + # init here + + log = logger_utils.Logger.getLogger() + # init filepaths + data_store = DataStore(opt_data_store, opt_dataset) + # set file output path + metadata_type = types.Metadata.FACE_LANDMARK_2D_68 + fp_out = data_store.metadata(metadata_type) if opt_fp_out is None else opt_fp_out + if not opt_force and Path(fp_out).exists(): + log.error('File exists. Use "-f / --force" to overwite') + return + + # init face landmark processors + if opt_detector_type == types.FaceLandmark2D_68.DLIB: + # use dlib 68 point detector + landmark_detector = face_landmarks.Dlib2D_68() + elif opt_detector_type == types.FaceLandmark2D_68.FACE_ALIGNMENT: + # use dlib 5 point detector + landmark_detector = face_landmarks.FaceAlignment2D_68() + else: + log.error('{} not yet implemented'.format(opt_detector_type.name)) + return + + log.info(f'Using landmark detector: {opt_detector_type.name}') + + # ------------------------------------------------------------------------- + # load filepath data + fp_record = data_store.metadata(types.Metadata.FILE_RECORD) + df_record = pd.read_csv(fp_record).set_index('index') + # load ROI data + fp_roi = data_store.metadata(types.Metadata.FACE_ROI) + df_roi = pd.read_csv(fp_roi).set_index('index') + # slice if you want + if opt_slice: + df_roi = df_roi[opt_slice[0]:opt_slice[1]] + # group by image index (speedup if multiple faces per image) + df_img_groups = df_roi.groupby('record_index') + log.debug('processing {:,} groups'.format(len(df_img_groups))) + + # store landmarks in list + results = [] + + # ------------------------------------------------------------------------- + # iterate groups with file/record index as key + + for record_index, df_img_group in tqdm(df_img_groups): + + # access file_record DataSeries + file_record = df_record.iloc[record_index] + + # load image + fp_im = data_store.face(file_record.subdir, file_record.fn, file_record.ext) + im = cv.imread(fp_im) + im_resized = im_utils.resize(im, width=opt_size[0], height=opt_size[1]) + dim = im_resized.shape[:2][::-1] + + # iterate ROIs in this image + for roi_index, df_img in df_img_group.iterrows(): + + # find landmarks + x, y, w, h = df_img.x, df_img.y, df_img.w, df_img.h # normalized values + #dim = (file_record.width, file_record.height) # original w,h + bbox = BBox.from_xywh(x, y, w, h).to_dim(dim) + points = landmark_detector.landmarks(im_resized, bbox) + points_norm = landmark_detector.normalize(points, dim) + points_str = landmark_detector.to_str(points_norm) + + # display if optioned + if opt_display: + dst = im_resized.copy() + draw_utils.draw_landmarks2D(dst, points) + draw_utils.draw_bbox(dst, bbox) + cv.imshow('', dst) + display_utils.handle_keyboard() + + # add to results for CSV + results.append({'vec': points_str, 'roi_index':roi_index}) + + + # create DataFrame and save to CSV + file_utils.mkdirs(fp_out) + df = pd.DataFrame.from_dict(results) + df.index.name = 'index' + df.to_csv(fp_out) + + # save script + file_utils.write_text(' '.join(sys.argv), '{}.sh'.format(fp_out))
\ No newline at end of file |
