From e06af50389f849be0bfe4fa97d39f4519ef2c711 Mon Sep 17 00:00:00 2001 From: adamhrv Date: Fri, 18 Jan 2019 11:00:18 +0100 Subject: change to cli_proc --- megapixels/app/models/bbox.py | 7 +- megapixels/app/models/dataset.py | 27 +- megapixels/app/settings/app_cfg.py | 2 +- megapixels/app/settings/types.py | 2 +- megapixels/cli_cv.py | 36 - megapixels/cli_proc.py | 36 + megapixels/commands/cv/_old_files_to_face_rois.py | 168 - megapixels/commands/cv/cluster.py | 47 - megapixels/commands/cv/crop.py | 104 - megapixels/commands/cv/csv_to_faces.py | 105 - megapixels/commands/cv/csv_to_faces_mt.py | 105 - megapixels/commands/cv/face_3ddfa.py | 331 - megapixels/commands/cv/face_attributes.py | 136 - megapixels/commands/cv/face_frames.py | 82 - megapixels/commands/cv/face_landmark_2d_5.py | 146 - megapixels/commands/cv/face_landmark_2d_68.py | 150 - megapixels/commands/cv/face_landmark_3d_68.py | 147 - megapixels/commands/cv/face_pose.py | 164 - megapixels/commands/cv/face_roi.py | 187 - megapixels/commands/cv/face_vector.py | 133 - megapixels/commands/cv/mirror.py | 57 - megapixels/commands/cv/resize.py | 150 - megapixels/commands/cv/resize_dataset.py | 149 - megapixels/commands/cv/videos_to_frames.py | 73 - megapixels/commands/datasets/preproc_wiki_imdb.py | 205 + megapixels/commands/demo/face_search.py | 42 +- .../commands/processor/_old_files_to_face_rois.py | 168 + megapixels/commands/processor/cluster.py | 47 + megapixels/commands/processor/crop.py | 104 + megapixels/commands/processor/csv_to_faces.py | 105 + megapixels/commands/processor/csv_to_faces_mt.py | 105 + megapixels/commands/processor/face_3ddfa.py | 331 + megapixels/commands/processor/face_attributes.py | 136 + megapixels/commands/processor/face_frames.py | 82 + .../commands/processor/face_landmark_2d_5.py | 146 + .../commands/processor/face_landmark_2d_68.py | 150 + .../commands/processor/face_landmark_3d_68.py | 147 + megapixels/commands/processor/face_pose.py | 164 + megapixels/commands/processor/face_roi.py | 187 + megapixels/commands/processor/face_vector.py | 133 + megapixels/commands/processor/mirror.py | 57 + megapixels/commands/processor/resize.py | 150 + megapixels/commands/processor/resize_dataset.py | 149 + megapixels/commands/processor/videos_to_frames.py | 73 + .../notebooks/face_analysis/3d_face_plot.ipynb | 1662 +++- .../face_analysis/3d_face_plot_cpdp.ipynb | 2967 +++++++ .../face_analysis/face_recognition_vgg.ipynb | 8111 +++++++++++++++++++- 47 files changed, 15312 insertions(+), 2653 deletions(-) delete mode 100644 megapixels/cli_cv.py create mode 100644 megapixels/cli_proc.py delete mode 100644 megapixels/commands/cv/_old_files_to_face_rois.py delete mode 100644 megapixels/commands/cv/cluster.py delete mode 100644 megapixels/commands/cv/crop.py delete mode 100644 megapixels/commands/cv/csv_to_faces.py delete mode 100644 megapixels/commands/cv/csv_to_faces_mt.py delete mode 100644 megapixels/commands/cv/face_3ddfa.py delete mode 100644 megapixels/commands/cv/face_attributes.py delete mode 100644 megapixels/commands/cv/face_frames.py delete mode 100644 megapixels/commands/cv/face_landmark_2d_5.py delete mode 100644 megapixels/commands/cv/face_landmark_2d_68.py delete mode 100644 megapixels/commands/cv/face_landmark_3d_68.py delete mode 100644 megapixels/commands/cv/face_pose.py delete mode 100644 megapixels/commands/cv/face_roi.py delete mode 100644 megapixels/commands/cv/face_vector.py delete mode 100644 megapixels/commands/cv/mirror.py delete mode 100644 megapixels/commands/cv/resize.py delete mode 100644 megapixels/commands/cv/resize_dataset.py delete mode 100644 megapixels/commands/cv/videos_to_frames.py create mode 100644 megapixels/commands/datasets/preproc_wiki_imdb.py create mode 100644 megapixels/commands/processor/_old_files_to_face_rois.py create mode 100644 megapixels/commands/processor/cluster.py create mode 100644 megapixels/commands/processor/crop.py create mode 100644 megapixels/commands/processor/csv_to_faces.py create mode 100644 megapixels/commands/processor/csv_to_faces_mt.py create mode 100644 megapixels/commands/processor/face_3ddfa.py create mode 100644 megapixels/commands/processor/face_attributes.py create mode 100644 megapixels/commands/processor/face_frames.py create mode 100644 megapixels/commands/processor/face_landmark_2d_5.py create mode 100644 megapixels/commands/processor/face_landmark_2d_68.py create mode 100644 megapixels/commands/processor/face_landmark_3d_68.py create mode 100644 megapixels/commands/processor/face_pose.py create mode 100644 megapixels/commands/processor/face_roi.py create mode 100644 megapixels/commands/processor/face_vector.py create mode 100644 megapixels/commands/processor/mirror.py create mode 100644 megapixels/commands/processor/resize.py create mode 100644 megapixels/commands/processor/resize_dataset.py create mode 100644 megapixels/commands/processor/videos_to_frames.py create mode 100644 megapixels/notebooks/face_analysis/3d_face_plot_cpdp.ipynb diff --git a/megapixels/app/models/bbox.py b/megapixels/app/models/bbox.py index 608aaaf8..8ecc8971 100644 --- a/megapixels/app/models/bbox.py +++ b/megapixels/app/models/bbox.py @@ -252,11 +252,14 @@ class BBox: # Create from @classmethod - def from_xywh_norm(cls, x, y, w, h): + def from_xywh_norm_dim(cls, x, y, w, h, dim): """Converts w, y, w, h to normalized BBox :returns BBox """ - return cls(x, y, x + w, y + h) + x1, y1 = (x * dim[0], y * dim[1]) + x2, y2 = (w * dim[0]) + x1, (h * dim[1]) + y1 + rect = cls.normalize(cls, (x1, y1, x2, y2), dim) + return cls(*rect) @classmethod def from_xyxy_dim(cls, x1, y1, x2, y2, dim): diff --git a/megapixels/app/models/dataset.py b/megapixels/app/models/dataset.py index 88986873..a7227a70 100644 --- a/megapixels/app/models/dataset.py +++ b/megapixels/app/models/dataset.py @@ -9,6 +9,7 @@ import logging import pandas as pd import numpy as np +import cv2 as cv from app.settings import app_cfg as cfg from app.settings import types @@ -68,7 +69,7 @@ class Dataset: self._metadata[metadata_type] = pd.read_csv(fp_csv).set_index('index') else: self.log.error(f'File not found: {fp_csv}. Exiting.') - sys.exit() + #sys.exit() def load_metadata(self, metadata_type): if metadata_type == types.Metadata.FILE_RECORD: @@ -107,7 +108,7 @@ class Dataset: ds_roi = df_face_roi.iloc[image_index] # create BBox dim = (ds_roi.image_width, ds_roi.image_height) - bbox = BBox.from_xywh_dim(ds_roi.x, ds_roi.y, ds_roi.w, ds_roi.y, dim) + bbox = BBox.from_xywh_dim(ds_roi.x, ds_roi.y, ds_roi.w, ds_roi.h, dim) # use the ROI index to get identity index from the identity DataFrame df_sha256 = self._metadata[types.Metadata.SHA256] ds_sha256 = df_sha256.iloc[image_index] @@ -169,17 +170,21 @@ class Dataset: roi_index = self._face_vector_roi_idxs[match_idx] ds_roi = df_roi.iloc[roi_index] record_idx = int(ds_roi.record_index) - ds_record = df_record.iloc[record_idx] - self.log.debug(f'find match index: {match_idx}, --> roi_index: {roi_index}') + ds_record = df_record.iloc[record_idx] fp_im = self.data_store.face(ds_record.subdir, ds_record.fn, ds_record.ext) - s3_url = self.data_store_s3.face(ds_record.uuid) - identities = [] - - bbox_norm = BBox.from_xywh_norm(ds_roi.x, ds_roi.y, ds_roi.w, ds_roi.w) + dim = (ds_record.width, ds_record.height) + im = cv.imread(fp_im) + dim = im.shape[:2][::-1] + self.log.debug(f'dim: {dim}') + s3_url = self.data_store_s3.face(ds_record.uuid) + bbox_norm = BBox.from_xywh_norm_dim(ds_roi.x, ds_roi.y, ds_roi.w, ds_roi.h, dim) + self.log.debug(f'bbox_norm: {bbox_norm}') + score = sim_scores[match_idx] if types.Metadata.IDENTITY in self._metadata.keys(): ds_id = df_identity.loc[df_identity['identity_key'] == ds_record.identity_key].iloc[0] + identity = Identity(record_idx, name_display=ds_id.name_display, description=ds_id.description, @@ -189,7 +194,7 @@ class Dataset: num_images=ds_id.num_images) else: identity = None - image_record = ImageRecord(ds_record, fp_im, s3_url, bbox_norm, identity=identity) + image_record = ImageRecord(ds_record, fp_im, s3_url, bbox_norm, score, identity=identity) image_records.append(image_record) return image_records @@ -222,7 +227,7 @@ class Dataset: class ImageRecord: - def __init__(self, ds_record, fp, url, bbox_norm, identity=None): + def __init__(self, ds_record, fp, url, bbox_norm, score, identity=None): # maybe more other meta will go there self.image_index = ds_record.index self.sha256 = ds_record.sha256 @@ -232,7 +237,9 @@ class ImageRecord: self.height = ds_record.height self.url = url self.bbox = bbox_norm + self.score = score self.identity = identity + # image records contain ROIs # ROIs are linked to identities diff --git a/megapixels/app/settings/app_cfg.py b/megapixels/app/settings/app_cfg.py index c256635b..2b10f9f0 100644 --- a/megapixels/app/settings/app_cfg.py +++ b/megapixels/app/settings/app_cfg.py @@ -77,7 +77,7 @@ FP_FONT = join(DIR_ASSETS, 'font') # ----------------------------------------------------------------------------- # click chair settings # ----------------------------------------------------------------------------- -DIR_COMMANDS_CV = 'commands/cv' +DIR_COMMANDS_PROC = 'commands/processor' DIR_COMMANDS_VIZ = 'commands/visualize' DIR_COMMANDS_ADMIN = 'commands/admin' DIR_COMMANDS_DATASETS = 'commands/datasets' diff --git a/megapixels/app/settings/types.py b/megapixels/app/settings/types.py index 7a34ccc2..208215c2 100644 --- a/megapixels/app/settings/types.py +++ b/megapixels/app/settings/types.py @@ -48,7 +48,7 @@ class Metadata(Enum): class Dataset(Enum): LFW, VGG_FACE2, MSCELEB, UCCS, UMD_FACES, SCUT_FBP, UCF_SELFIE, UTK, \ - CASIA_WEBFACE, AFW, PUBFIG83, HELEN, PIPA, MEGAFACE = range(14) + CASIA_WEBFACE, AFW, PUBFIG83, HELEN, PIPA, MEGAFACE, BRAINWASH, IMDB_WIKI = range(16) # --------------------------------------------------------------------- diff --git a/megapixels/cli_cv.py b/megapixels/cli_cv.py deleted file mode 100644 index 86246157..00000000 --- a/megapixels/cli_cv.py +++ /dev/null @@ -1,36 +0,0 @@ -# -------------------------------------------------------- -# add/edit commands in commands/datasets directory -# -------------------------------------------------------- - -import click - -from app.settings import app_cfg as cfg -from app.utils import logger_utils -from app.models.click_factory import ClickSimple - -# click cli factory -cc = ClickSimple.create(cfg.DIR_COMMANDS_CV) - -# -------------------------------------------------------- -# CLI -# -------------------------------------------------------- -@click.group(cls=cc, chain=False) -@click.option('-v', '--verbose', 'verbosity', count=True, default=4, - show_default=True, - help='Verbosity: -v DEBUG, -vv INFO, -vvv WARN, -vvvv ERROR, -vvvvv CRITICAL') -@click.pass_context -def cli(ctx, **kwargs): - """\033[1m\033[94mMegaPixels: Dataset Image Scripts\033[0m - """ - ctx.opts = {} - # init logger - logger_utils.Logger.create(verbosity=kwargs['verbosity']) - - - -# -------------------------------------------------------- -# Entrypoint -# -------------------------------------------------------- -if __name__ == '__main__': - cli() - diff --git a/megapixels/cli_proc.py b/megapixels/cli_proc.py new file mode 100644 index 00000000..74031bb4 --- /dev/null +++ b/megapixels/cli_proc.py @@ -0,0 +1,36 @@ +# -------------------------------------------------------- +# add/edit commands in commands/datasets directory +# -------------------------------------------------------- + +import click + +from app.settings import app_cfg as cfg +from app.utils import logger_utils +from app.models.click_factory import ClickSimple + +# click cli factory +cc = ClickSimple.create(cfg.DIR_COMMANDS_PROC) + +# -------------------------------------------------------- +# CLI +# -------------------------------------------------------- +@click.group(cls=cc, chain=False) +@click.option('-v', '--verbose', 'verbosity', count=True, default=4, + show_default=True, + help='Verbosity: -v DEBUG, -vv INFO, -vvv WARN, -vvvv ERROR, -vvvvv CRITICAL') +@click.pass_context +def cli(ctx, **kwargs): + """\033[1m\033[94mMegaPixels: Image Processor Scripts\033[0m + """ + ctx.opts = {} + # init logger + logger_utils.Logger.create(verbosity=kwargs['verbosity']) + + + +# -------------------------------------------------------- +# Entrypoint +# -------------------------------------------------------- +if __name__ == '__main__': + cli() + diff --git a/megapixels/commands/cv/_old_files_to_face_rois.py b/megapixels/commands/cv/_old_files_to_face_rois.py deleted file mode 100644 index d92cbd74..00000000 --- a/megapixels/commands/cv/_old_files_to_face_rois.py +++ /dev/null @@ -1,168 +0,0 @@ -""" -Crop images to prepare for training -""" - -import click -# from PIL import Image, ImageOps, ImageFilter, ImageDraw - -from app.settings import types -from app.utils import click_utils -from app.settings import app_cfg as cfg - -color_filters = {'color': 1, 'gray': 2, 'all': 3} - -@click.command() -@click.option('-i', '--input', 'opt_fp_files', required=True, - help='Input file meta CSV') -@click.option('-o', '--output', 'opt_fp_out', required=True, - help='Output CSV') -@click.option('-e', '--ext', 'opt_ext', - default='jpg', type=click.Choice(['jpg', 'png']), - help='File glob ext') -@click.option('--size', 'opt_size', - type=(int, int), default=(300, 300), - help='Output image size') -@click.option('-t', '--detector-type', 'opt_detector_type', - type=cfg.FaceDetectNetVar, - default=click_utils.get_default(types.FaceDetectNet.DLIB_CNN), - help=click_utils.show_help(types.FaceDetectNet)) -@click.option('-g', '--gpu', 'opt_gpu', default=0, - help='GPU index') -@click.option('--conf', 'opt_conf_thresh', default=0.85, type=click.FloatRange(0,1), - help='Confidence minimum threshold') -@click.option('-p', '--pyramids', 'opt_pyramids', default=0, type=click.IntRange(0,4), - help='Number pyramids to upscale for DLIB detectors') -@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None), - help='Slice list of files') -@click.option('--display/--no-display', 'opt_display', is_flag=True, default=False, - help='Display detections to debug') -@click.option('--recursive/--no-recursive', 'opt_recursive', is_flag=True, default=False, - help='Use glob recursion (slower)') -@click.option('-f', '--force', 'opt_force', is_flag=True, - help='Force overwrite file') -@click.option('--color', 'opt_color_filter', - type=click.Choice(color_filters.keys()), default='color', - help='Filter to keep color or grayscale images (color = keep color') -@click.pass_context -def cli(ctx, opt_dirs_in, opt_fp_out, opt_ext, opt_size, opt_detector_type, - opt_gpu, opt_conf_thresh, opt_pyramids, opt_slice, opt_display, opt_recursive, opt_force, opt_color_filter): - """Converts frames with faces to CSV of ROIs""" - - import sys - import os - from os.path import join - from pathlib import Path - from glob import glob - - from tqdm import tqdm - import numpy as np - import dlib # must keep a local reference for dlib - import cv2 as cv - import pandas as pd - - from app.utils import logger_utils, file_utils, im_utils - from app.processors import face_detector - - # ------------------------------------------------- - # init here - - log = logger_utils.Logger.getLogger() - - if not opt_force and Path(opt_fp_out).exists(): - log.error('File exists. Use "-f / --force" to overwite') - return - - if opt_detector_type == types.FaceDetectNet.CVDNN: - detector = face_detector.DetectorCVDNN() - elif opt_detector_type == types.FaceDetectNet.DLIB_CNN: - detector = face_detector.DetectorDLIBCNN(opt_gpu) - elif opt_detector_type == types.FaceDetectNet.DLIB_HOG: - detector = face_detector.DetectorDLIBHOG() - elif opt_detector_type == types.FaceDetectNet.MTCNN: - detector = face_detector.DetectorMTCNN() - elif opt_detector_type == types.FaceDetectNet.HAAR: - log.error('{} not yet implemented'.format(opt_detector_type.name)) - return - - - # ------------------------------------------------- - # process here - color_filter = color_filters[opt_color_filter] - - # get list of files to process - fp_ims = [] - for opt_dir_in in opt_dirs_in: - if opt_recursive: - fp_glob = join(opt_dir_in, '**/*.{}'.format(opt_ext)) - fp_ims += glob(fp_glob, recursive=True) - else: - fp_glob = join(opt_dir_in, '*.{}'.format(opt_ext)) - fp_ims += glob(fp_glob) - log.debug(fp_glob) - - - if opt_slice: - fp_ims = fp_ims[opt_slice[0]:opt_slice[1]] - log.debug('processing {:,} files'.format(len(fp_ims))) - - - data = [] - - for fp_im in tqdm(fp_ims): - im = cv.imread(fp_im) - - # filter out color or grayscale iamges - if color_filter != color_filters['all']: - try: - is_gray = im_utils.is_grayscale(im) - if is_gray and color_filter != color_filters['gray']: - log.debug('Skipping grayscale image: {}'.format(fp_im)) - continue - except Exception as e: - log.error('Could not check grayscale: {}'.format(fp_im)) - continue - - try: - bboxes = detector.detect(im, opt_size=opt_size, opt_pyramids=opt_pyramids) - except Exception as e: - log.error('could not detect: {}'.format(fp_im)) - log.error('{}'.format(e)) - fpp_im = Path(fp_im) - subdir = str(fpp_im.parent.relative_to(opt_dir_in)) - - for bbox in bboxes: - # log.debug('is square: {}'.format(bbox.w == bbox.h)) - nw,nh = int(bbox.w * im.shape[1]), int(bbox.h * im.shape[0]) - roi = { - 'fn': fpp_im.stem, - 'ext': fpp_im.suffix.replace('.',''), - 'x': bbox.x, - 'y': bbox.y, - 'w': bbox.w, - 'h': bbox.h, - 'image_height': im.shape[0], - 'image_width': im.shape[1], - 'subdir': subdir} - bbox_dim = bbox.to_dim(im.shape[:2][::-1]) # w,h - data.append(roi) - - # debug display - if opt_display and len(bboxes): - im_md = im_utils.resize(im, width=min(1200, opt_size[0])) - for bbox in bboxes: - bbox_dim = bbox.to_dim(im_md.shape[:2][::-1]) - cv.rectangle(im_md, bbox_dim.pt_tl, bbox_dim.pt_br, (0,255,0), 3) - cv.imshow('', im_md) - while True: - k = cv.waitKey(1) & 0xFF - if k == 27 or k == ord('q'): # ESC - cv.destroyAllWindows() - sys.exit() - elif k != 255: - # any key to continue - break - - # save date - file_utils.mkdirs(opt_fp_out) - df = pd.DataFrame.from_dict(data) - df.to_csv(opt_fp_out, index=False) \ No newline at end of file diff --git a/megapixels/commands/cv/cluster.py b/megapixels/commands/cv/cluster.py deleted file mode 100644 index 419091a0..00000000 --- a/megapixels/commands/cv/cluster.py +++ /dev/null @@ -1,47 +0,0 @@ -import click - -from app.settings import types -from app.utils import click_utils -from app.settings import app_cfg as cfg -from app.utils.logger_utils import Logger - -@click.command() -@click.option('--data_store', 'opt_data_store', - type=cfg.DataStoreVar, - default=click_utils.get_default(types.DataStore.NAS), - show_default=True, - help=click_utils.show_help(types.Dataset)) -@click.option('--dataset', 'opt_dataset', - type=cfg.DatasetVar, - required=True, - show_default=True, - help=click_utils.show_help(types.Dataset)) -@click.option('--metadata', 'opt_metadata', required=True, - type=cfg.MetadataVar, - show_default=True, - help=click_utils.show_help(types.Metadata)) -@click.pass_context -def cli(ctx, opt_data_store, opt_dataset, opt_metadata): - """Display image info""" - - # cluster the embeddings - print("[INFO] clustering...") - clt = DBSCAN(metric="euclidean", n_jobs=args["jobs"]) - clt.fit(encodings) - - # determine the total number of unique faces found in the dataset - labelIDs = np.unique(clt.labels_) - numUniqueFaces = len(np.where(labelIDs > -1)[0]) - print("[INFO] # unique faces: {}".format(numUniqueFaces)) - # load and display image - im = cv.imread(fp_im) - cv.imshow('', im) - - while True: - k = cv.waitKey(1) & 0xFF - if k == 27 or k == ord('q'): # ESC - cv.destroyAllWindows() - sys.exit() - elif k != 255: - # any key to continue - break \ No newline at end of file diff --git a/megapixels/commands/cv/crop.py b/megapixels/commands/cv/crop.py deleted file mode 100644 index 778be0c4..00000000 --- a/megapixels/commands/cv/crop.py +++ /dev/null @@ -1,104 +0,0 @@ -""" -Crop images to prepare for training -""" - -import click -from PIL import Image, ImageOps, ImageFilter, ImageDraw - -from app.settings import types -from app.utils import click_utils -from app.settings import app_cfg as cfg - -@click.command() -@click.option('-i', '--input', 'opt_dir_in', required=True, - help='Input directory') -@click.option('-o', '--output', 'opt_dir_out', required=True, - help='Output directory') -@click.option('-e', '--ext', 'opt_ext', - default='jpg', type=click.Choice(['jpg', 'png']), - help='File glob ext') -@click.option('--size', 'opt_size', - type=(int, int), default=(256, 256), - help='Output image size') -@click.option('-t', '--crop-type', 'opt_crop_type', - default='center', type=click.Choice(['center', 'mirror', 'face', 'person', 'none']), - help='Force fit image center location') -@click.pass_context -def cli(ctx, opt_dir_in, opt_dir_out, opt_ext, opt_size, opt_crop_type): - """Crop, mirror images""" - - import os - from os.path import join - from pathlib import Path - from glob import glob - from tqdm import tqdm - - - from app.utils import logger_utils, file_utils, im_utils - - # ------------------------------------------------- - # process here - - log = logger_utils.Logger.getLogger() - log.info('crop images') - - # get list of files to process - fp_ims = glob(join(opt_dir_in, '*.{}'.format(opt_ext))) - log.debug('files: {}'.format(len(fp_ims))) - - # ensure output dir exists - file_utils.mkdirs(opt_dir_out) - - for fp_im in tqdm(fp_ims): - im = process_crop(fp_im, opt_size, opt_crop_type) - fp_out = join(opt_dir_out, Path(fp_im).name) - im.save(fp_out) - - -def process_crop(fp_im, opt_size, crop_type): - im = Image.open(fp_im) - if crop_type == 'center': - im = crop_square_fit(im, opt_size) - elif crop_type == 'mirror': - im = mirror_crop_square(im, opt_size) - return im - -def crop_square_fit(im, size, center=(0.5, 0.5)): - return ImageOps.fit(im, size, method=Image.BICUBIC, centering=center) - -def mirror_crop_square(im, size): - # force to even dims - if im.size[0] % 2 or im.size[1] % 2: - im = ImageOps.fit(im, ((im.size[0] // 2) * 2, (im.size[1] // 2) * 2)) - - # create new square image - min_size, max_size = (min(im.size), max(im.size)) - orig_w, orig_h = im.size - margin = (max_size - min_size) // 2 - w, h = (max_size, max_size) - im_new = Image.new('RGB', (w, h), color=(0, 0, 0)) - - #crop (l, t, r, b) - if orig_w > orig_h: - # landscape, mirror expand T/B - im_top = ImageOps.mirror(im.crop((0, 0, margin, w))) - im_bot = ImageOps.mirror(im.crop((orig_h - margin, 0, orig_h, w))) - im_new.paste(im_top, (0, 0)) - im_new.paste(im, (margin, 0, orig_h + margin, w)) - im_new.paste(im_bot, (h - margin, 0)) - elif orig_h > orig_w: - # portrait, mirror expand L/R - im_left = ImageOps.mirror(im.crop((0, 0, margin, h))) - im_right = ImageOps.mirror(im.crop((orig_w - margin, 0, orig_w, h))) - im_new.paste(im_left, (0, 0)) - im_new.paste(im, (margin, 0, orig_w + margin, h)) - im_new.paste(im_right, (w - margin, 0)) - - return im_new.resize(size) - - -def center_crop_face(): - pass - -def center_crop_person(): - pass \ No newline at end of file diff --git a/megapixels/commands/cv/csv_to_faces.py b/megapixels/commands/cv/csv_to_faces.py deleted file mode 100644 index 64c8b965..00000000 --- a/megapixels/commands/cv/csv_to_faces.py +++ /dev/null @@ -1,105 +0,0 @@ -""" -Reads in CSV of ROIs and extracts facial regions with padding -""" - -import click - -from app.settings import types -from app.utils import click_utils -from app.settings import app_cfg as cfg - -@click.command() -@click.option('-i', '--input', 'opt_fp_in', required=True, - help='Input CSV') -@click.option('-m', '--media', 'opt_dir_media', required=True, - help='Input image/video directory') -@click.option('-o', '--output', 'opt_dir_out', required=True, - help='Output directory for extracted ROI images') -@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None), - help='Slice list of files') -@click.option('--padding', 'opt_padding', default=0.25, - help='Facial padding as percentage of face width') -@click.option('--ext', 'opt_ext_out', default='png', type=click.Choice(['jpg', 'png']), - help='Output image type') -@click.option('--min', 'opt_min', default=(60, 60), - help='Minimum original face size') -@click.pass_context -def cli(ctx, opt_fp_in, opt_dir_media, opt_dir_out, opt_slice, - opt_padding, opt_ext_out, opt_min): - """Converts ROIs to images""" - - import os - from os.path import join - from pathlib import Path - from glob import glob - - from tqdm import tqdm - import numpy as np - from PIL import Image, ImageOps, ImageFilter, ImageDraw - import cv2 as cv - import pandas as pd - - from app.utils import logger_utils, file_utils, im_utils - from app.models.bbox import BBox - - # ------------------------------------------------- - # process here - log = logger_utils.Logger.getLogger() - - df_rois = pd.read_csv(opt_fp_in, dtype={'subdir': str, 'fn': str}) - if opt_slice: - df_rois = df_rois[opt_slice[0]:opt_slice[1]] - - log.info('Processing {:,} rows'.format(len(df_rois))) - - file_utils.mkdirs(opt_dir_out) - - df_rois_grouped = df_rois.groupby(['fn']) # group by fn/filename - groups = df_rois_grouped.groups - skipped = [] - - for group in tqdm(groups): - # get image - group_rows = df_rois_grouped.get_group(group) - - row = group_rows.iloc[0] - fp_im = join(opt_dir_media, str(row['subdir']), '{fn}.{ext}'.format(**row)) # TODO change to ext - try: - im = Image.open(fp_im).convert('RGB') - im.verify() - except Exception as e: - log.warn('Could not open: {}'.format(fp_im)) - log.error(e) - continue - - for idx, roi in group_rows.iterrows(): - # get bbox to im dimensions - xywh = [roi['x'], roi['y'], roi['w'] , roi['h']] - bbox = BBox.from_xywh(*xywh) - dim = im.size - bbox_dim = bbox.to_dim(dim) - # expand - opt_padding_px = int(opt_padding * bbox_dim.width) - bbox_dim_exp = bbox_dim.expand_dim(opt_padding_px, dim) - # crop - x1y2 = bbox_dim_exp.pt_tl + bbox_dim_exp.pt_br - im_crop = im.crop(box=x1y2) - - # strip exif, create new image and paste data - im_crop_data = list(im_crop.getdata()) - im_crop_no_exif = Image.new(im_crop.mode, im_crop.size) - im_crop_no_exif.putdata(im_crop_data) - - # save - idx_zpad = file_utils.zpad(idx, zeros=3) - subdir = '' if roi['subdir'] == '.' else '{}_'.format(roi['subdir']) - subdir = subdir.replace('/', '_') - fp_im_out = join(opt_dir_out, '{}{}{}.{}'.format(subdir, roi['fn'], idx_zpad, opt_ext_out)) - # threshold size and save - if im_crop_no_exif.size[0] < opt_min[0] or im_crop_no_exif.size[1] < opt_min[1]: - skipped.append(fp_im_out) - log.info('Face too small: {}, idx: {}'.format(fp_im, idx)) - else: - im_crop_no_exif.save(fp_im_out) - - log.info('Skipped {:,} images'.format(len(skipped))) diff --git a/megapixels/commands/cv/csv_to_faces_mt.py b/megapixels/commands/cv/csv_to_faces_mt.py deleted file mode 100644 index 64c8b965..00000000 --- a/megapixels/commands/cv/csv_to_faces_mt.py +++ /dev/null @@ -1,105 +0,0 @@ -""" -Reads in CSV of ROIs and extracts facial regions with padding -""" - -import click - -from app.settings import types -from app.utils import click_utils -from app.settings import app_cfg as cfg - -@click.command() -@click.option('-i', '--input', 'opt_fp_in', required=True, - help='Input CSV') -@click.option('-m', '--media', 'opt_dir_media', required=True, - help='Input image/video directory') -@click.option('-o', '--output', 'opt_dir_out', required=True, - help='Output directory for extracted ROI images') -@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None), - help='Slice list of files') -@click.option('--padding', 'opt_padding', default=0.25, - help='Facial padding as percentage of face width') -@click.option('--ext', 'opt_ext_out', default='png', type=click.Choice(['jpg', 'png']), - help='Output image type') -@click.option('--min', 'opt_min', default=(60, 60), - help='Minimum original face size') -@click.pass_context -def cli(ctx, opt_fp_in, opt_dir_media, opt_dir_out, opt_slice, - opt_padding, opt_ext_out, opt_min): - """Converts ROIs to images""" - - import os - from os.path import join - from pathlib import Path - from glob import glob - - from tqdm import tqdm - import numpy as np - from PIL import Image, ImageOps, ImageFilter, ImageDraw - import cv2 as cv - import pandas as pd - - from app.utils import logger_utils, file_utils, im_utils - from app.models.bbox import BBox - - # ------------------------------------------------- - # process here - log = logger_utils.Logger.getLogger() - - df_rois = pd.read_csv(opt_fp_in, dtype={'subdir': str, 'fn': str}) - if opt_slice: - df_rois = df_rois[opt_slice[0]:opt_slice[1]] - - log.info('Processing {:,} rows'.format(len(df_rois))) - - file_utils.mkdirs(opt_dir_out) - - df_rois_grouped = df_rois.groupby(['fn']) # group by fn/filename - groups = df_rois_grouped.groups - skipped = [] - - for group in tqdm(groups): - # get image - group_rows = df_rois_grouped.get_group(group) - - row = group_rows.iloc[0] - fp_im = join(opt_dir_media, str(row['subdir']), '{fn}.{ext}'.format(**row)) # TODO change to ext - try: - im = Image.open(fp_im).convert('RGB') - im.verify() - except Exception as e: - log.warn('Could not open: {}'.format(fp_im)) - log.error(e) - continue - - for idx, roi in group_rows.iterrows(): - # get bbox to im dimensions - xywh = [roi['x'], roi['y'], roi['w'] , roi['h']] - bbox = BBox.from_xywh(*xywh) - dim = im.size - bbox_dim = bbox.to_dim(dim) - # expand - opt_padding_px = int(opt_padding * bbox_dim.width) - bbox_dim_exp = bbox_dim.expand_dim(opt_padding_px, dim) - # crop - x1y2 = bbox_dim_exp.pt_tl + bbox_dim_exp.pt_br - im_crop = im.crop(box=x1y2) - - # strip exif, create new image and paste data - im_crop_data = list(im_crop.getdata()) - im_crop_no_exif = Image.new(im_crop.mode, im_crop.size) - im_crop_no_exif.putdata(im_crop_data) - - # save - idx_zpad = file_utils.zpad(idx, zeros=3) - subdir = '' if roi['subdir'] == '.' else '{}_'.format(roi['subdir']) - subdir = subdir.replace('/', '_') - fp_im_out = join(opt_dir_out, '{}{}{}.{}'.format(subdir, roi['fn'], idx_zpad, opt_ext_out)) - # threshold size and save - if im_crop_no_exif.size[0] < opt_min[0] or im_crop_no_exif.size[1] < opt_min[1]: - skipped.append(fp_im_out) - log.info('Face too small: {}, idx: {}'.format(fp_im, idx)) - else: - im_crop_no_exif.save(fp_im_out) - - log.info('Skipped {:,} images'.format(len(skipped))) diff --git a/megapixels/commands/cv/face_3ddfa.py b/megapixels/commands/cv/face_3ddfa.py deleted file mode 100644 index ffc74180..00000000 --- a/megapixels/commands/cv/face_3ddfa.py +++ /dev/null @@ -1,331 +0,0 @@ -import click - -from app.settings import types -from app.utils import click_utils -from app.settings import app_cfg as cfg - - -@click.command() -@click.option('-i', '--input', 'opt_fp_in', default=None, required=True, - help='Image filepath') -@click.option('-o', '--output', 'opt_fp_out', default=None, - help='GIF output path') -@click.option('--size', 'opt_size', - type=(int, int), default=(300, 300), - help='Output image size') -@click.option('-g', '--gpu', 'opt_gpu', default=0, - help='GPU index') -@click.option('-f', '--force', 'opt_force', is_flag=True, - help='Force overwrite file') -@click.option('--display/--no-display', 'opt_display', is_flag=True, default=False, - help='Display detections to debug') -@click.pass_context -def cli(ctx, opt_fp_in, opt_fp_out, opt_gpu, opt_size, opt_force, opt_display): - """Face detector demo""" - - import sys - import os - from os.path import join - from pathlib import Path - import time - - from tqdm import tqdm - import numpy as np - import pandas as pd - import cv2 as cv - import dlib - - from app.utils import logger_utils, file_utils, im_utils, display_utils, draw_utils - from app.utils import plot_utils - from app.processors import face_detector, face_age - from app.models.data_store import DataStore - - # 3DDFA - # git clone https://github.com/cleardusk/3DDFA/ 3rdparty/ - - import torch - import torchvision.transforms as transforms - import mobilenet_v1 - from utils.ddfa import ToTensorGjz, NormalizeGjz, str2bool - import scipy.io as sio - from utils.inference import get_suffix, parse_roi_box_from_landmark, crop_img, predict_68pts, dump_to_ply, dump_vertex, \ - draw_landmarks, predict_dense, parse_roi_box_from_bbox, get_colors, write_obj_with_colors - from utils.cv_plot import plot_pose_box - from utils.estimate_pose import parse_pose - from utils.render import get_depths_image, cget_depths_image, cpncc - from utils.paf import gen_img_paf - import argparse - import torch.backends.cudnn as cudnn - - - log = logger_utils.Logger.getLogger() - - - # ------------------------------------------------- - # load image - - im = cv.imread(opt_fp_in) - im_resized = im_utils.resize(im, width=opt_size[0], height=opt_size[1]) - - # ---------------------------------------------------------------------------- - # detect face - - face_detector = face_detector.DetectorDLIBCNN(gpu=opt_gpu) # -1 for CPU - bboxes = face_detector.detect(im_resized, largest=True) - bbox = bboxes[0] - dim = im_resized.shape[:2][::-1] - bbox_dim = bbox.to_dim(dim) - if not bbox: - log.error('no face detected') - return - else: - log.info(f'face detected: {bbox_dim.to_xyxy()}') - - - # ---------------------------------------------------------------------------- - # age - - age_apparent_predictor = face_age.FaceAgeApparent() - age_real_predictor = face_age.FaceAgeReal() - - st = time.time() - age_real = age_real_predictor.age(im_resized, bbox_dim) - log.info(f'age real took: {(time.time()-st)/1000:.5f}s') - st = time.time() - age_apparent = age_apparent_predictor.age(im_resized, bbox_dim) - log.info(f'age apparent took: {(time.time()-st)/1000:.5f}s') - - - # ---------------------------------------------------------------------------- - # output - - log.info(f'Face coords: {bbox_dim} face') - log.info(f'Age (real): {(age_real):.2f}') - log.info(f'Age (apparent): {(age_apparent):.2f}') - - - # ---------------------------------------------------------------------------- - # draw - - # draw real age - im_age_real = im_resized.copy() - draw_utils.draw_bbox(im_age_real, bbox_dim) - txt = f'{(age_real):.2f}' - draw_utils.draw_text(im_age_real, bbox_dim.pt_tl, txt) - - # apparent - im_age_apparent = im_resized.copy() - draw_utils.draw_bbox(im_age_apparent, bbox_dim) - txt = f'{(age_apparent):.2f}' - draw_utils.draw_text(im_age_apparent, bbox_dim.pt_tl, txt) - - - # ---------------------------------------------------------------------------- - # save - - if opt_fp_out: - # save pose only - fpp_out = Path(opt_fp_out) - - fp_out = join(fpp_out.parent, f'{fpp_out.stem}_real{fpp_out.suffix}') - cv.imwrite(fp_out, im_age_real) - - fp_out = join(fpp_out.parent, f'{fpp_out.stem}_apparent{fpp_out.suffix}') - cv.imwrite(fp_out, im_age_apparent) - - - # ---------------------------------------------------------------------------- - # display - - if opt_display: - # show all images here - cv.imshow('real', im_age_real) - cv.imshow('apparent', im_age_apparent) - display_utils.handle_keyboard() - - - - - -STD_SIZE = 120 - - -if __name__ == '__main__': - parser = argparse.ArgumentParser(description='3DDFA inference pipeline') - parser.add_argument('-f', '--files', nargs='+', - help='image files paths fed into network, single or multiple images') - parser.add_argument('-m', '--mode', default='cpu', type=str, help='gpu or cpu mode') - parser.add_argument('--show_flg', default='true', type=str2bool, help='whether show the visualization result') - parser.add_argument('--bbox_init', default='one', type=str, - help='one|two: one-step bbox initialization or two-step') - parser.add_argument('--dump_res', default='true', type=str2bool, help='whether write out the visualization image') - parser.add_argument('--dump_vertex', default='true', type=str2bool, - help='whether write out the dense face vertices to mat') - parser.add_argument('--dump_ply', default='true', type=str2bool) - parser.add_argument('--dump_pts', default='true', type=str2bool) - parser.add_argument('--dump_roi_box', default='true', type=str2bool) - parser.add_argument('--dump_pose', default='true', type=str2bool) - parser.add_argument('--dump_depth', default='true', type=str2bool) - parser.add_argument('--dump_pncc', default='true', type=str2bool) - parser.add_argument('--dump_paf', default='true', type=str2bool) - parser.add_argument('--paf_size', default=3, type=int, help='PAF feature kernel size') - parser.add_argument('--dump_obj', default='true', type=str2bool) - parser.add_argument('--dlib_bbox', default='true', type=str2bool, help='whether use dlib to predict bbox') - parser.add_argument('--dlib_landmark', default='true', type=str2bool, - help='whether use dlib landmark to crop image') - - args = parser.parse_args() - main(args) - - - -def main(args): - # 1. load pre-tained model - checkpoint_fp = 'models/phase1_wpdc_vdc_v2.pth.tar' - arch = 'mobilenet_1' - - checkpoint = torch.load(checkpoint_fp, map_location=lambda storage, loc: storage)['state_dict'] - model = getattr(mobilenet_v1, arch)(num_classes=62) # 62 = 12(pose) + 40(shape) +10(expression) - model_dict = model.state_dict() - # because the model is trained by multiple gpus, prefix module should be removed - for k in checkpoint.keys(): - model_dict[k.replace('module.', '')] = checkpoint[k] - model.load_state_dict(model_dict, strict=False) - if args.mode == 'gpu': - cudnn.benchmark = True - model = model.cuda() - model.eval() - - # 2. load dlib model for face detection and landmark used for face cropping - if args.dlib_landmark: - dlib_landmark_model = 'models/shape_predictor_68_face_landmarks.dat' - face_regressor = dlib.shape_predictor(dlib_landmark_model) - if args.dlib_bbox: - face_detector = dlib.get_frontal_face_detector() - - # 3. forward - tri = sio.loadmat('visualize/tri.mat')['tri'] - transform = transforms.Compose([ToTensorGjz(), NormalizeGjz(mean=127.5, std=128)]) - for img_fp in args.files: - img_ori = cv2.imread(img_fp) - if args.dlib_bbox: - rects = face_detector(img_ori, 1) - else: - rects = [] - - if len(rects) == 0: - rects = dlib.rectangles() - rect_fp = img_fp + '.bbox' - lines = open(rect_fp).read().strip().split('\n')[1:] - for l in lines: - l, r, t, b = [int(_) for _ in l.split(' ')[1:]] - rect = dlib.rectangle(l, r, t, b) - rects.append(rect) - - pts_res = [] - Ps = [] # Camera matrix collection - poses = [] # pose collection, [todo: validate it] - vertices_lst = [] # store multiple face vertices - ind = 0 - suffix = get_suffix(img_fp) - for rect in rects: - # whether use dlib landmark to crop image, if not, use only face bbox to calc roi bbox for cropping - if args.dlib_landmark: - # - use landmark for cropping - pts = face_regressor(img_ori, rect).parts() - pts = np.array([[pt.x, pt.y] for pt in pts]).T - roi_box = parse_roi_box_from_landmark(pts) - else: - # - use detected face bbox - bbox = [rect.left(), rect.top(), rect.right(), rect.bottom()] - roi_box = parse_roi_box_from_bbox(bbox) - - img = crop_img(img_ori, roi_box) - - # forward: one step - img = cv2.resize(img, dsize=(STD_SIZE, STD_SIZE), interpolation=cv2.INTER_LINEAR) - input = transform(img).unsqueeze(0) - with torch.no_grad(): - if args.mode == 'gpu': - input = input.cuda() - param = model(input) - param = param.squeeze().cpu().numpy().flatten().astype(np.float32) - - # 68 pts - pts68 = predict_68pts(param, roi_box) - - # two-step for more accurate bbox to crop face - if args.bbox_init == 'two': - roi_box = parse_roi_box_from_landmark(pts68) - img_step2 = crop_img(img_ori, roi_box) - img_step2 = cv2.resize(img_step2, dsize=(STD_SIZE, STD_SIZE), interpolation=cv2.INTER_LINEAR) - input = transform(img_step2).unsqueeze(0) - with torch.no_grad(): - if args.mode == 'gpu': - input = input.cuda() - param = model(input) - param = param.squeeze().cpu().numpy().flatten().astype(np.float32) - - pts68 = predict_68pts(param, roi_box) - - pts_res.append(pts68) - P, pose = parse_pose(param) - Ps.append(P) - poses.append(pose) - - # dense face 3d vertices - if args.dump_ply or args.dump_vertex or args.dump_depth or args.dump_pncc or args.dump_obj: - vertices = predict_dense(param, roi_box) - vertices_lst.append(vertices) - if args.dump_ply: - dump_to_ply(vertices, tri, '{}_{}.ply'.format(img_fp.replace(suffix, ''), ind)) - if args.dump_vertex: - dump_vertex(vertices, '{}_{}.mat'.format(img_fp.replace(suffix, ''), ind)) - - # save .mat for 3d Face - wfp = '{}_{}_face3d.mat'.format(img_fp.replace(suffix, ''), ind) - colors = get_colors(img_ori, vertices) - sio.savemat(wfp, {'vertices': vertices, 'colors': colors, 'triangles': tri}) - - if args.dump_pts: - wfp = '{}_{}.txt'.format(img_fp.replace(suffix, ''), ind) - np.savetxt(wfp, pts68, fmt='%.3f') - print('Save 68 3d landmarks to {}'.format(wfp)) - if args.dump_roi_box: - wfp = '{}_{}.roibox'.format(img_fp.replace(suffix, ''), ind) - np.savetxt(wfp, roi_box, fmt='%.3f') - print('Save roi box to {}'.format(wfp)) - if args.dump_paf: - wfp_paf = '{}_{}_paf.jpg'.format(img_fp.replace(suffix, ''), ind) - wfp_crop = '{}_{}_crop.jpg'.format(img_fp.replace(suffix, ''), ind) - paf_feature = gen_img_paf(img_crop=img, param=param, kernel_size=args.paf_size) - - cv2.imwrite(wfp_paf, paf_feature) - cv2.imwrite(wfp_crop, img) - print('Dump to {} and {}'.format(wfp_crop, wfp_paf)) - if args.dump_obj: - wfp = '{}_{}.obj'.format(img_fp.replace(suffix, ''), ind) - colors = get_colors(img_ori, vertices) - write_obj_with_colors(wfp, vertices, tri, colors) - print('Dump obj with sampled texture to {}'.format(wfp)) - ind += 1 - - if args.dump_pose: - # P, pose = parse_pose(param) # Camera matrix (without scale), and pose (yaw, pitch, roll, to verify) - img_pose = plot_pose_box(img_ori, Ps, pts_res) - wfp = img_fp.replace(suffix, '_pose.jpg') - cv2.imwrite(wfp, img_pose) - print('Dump to {}'.format(wfp)) - if args.dump_depth: - wfp = img_fp.replace(suffix, '_depth.png') - # depths_img = get_depths_image(img_ori, vertices_lst, tri-1) # python version - depths_img = cget_depths_image(img_ori, vertices_lst, tri - 1) # cython version - cv2.imwrite(wfp, depths_img) - print('Dump to {}'.format(wfp)) - if args.dump_pncc: - wfp = img_fp.replace(suffix, '_pncc.png') - pncc_feature = cpncc(img_ori, vertices_lst, tri - 1) # cython version - cv2.imwrite(wfp, pncc_feature[:, :, ::-1]) # cv2.imwrite will swap RGB -> BGR - print('Dump to {}'.format(wfp)) - if args.dump_res: - draw_landmarks(img_ori, pts_res, wfp=img_fp.replace(suffix, '_3DDFA.jpg'), show_flg=args.show_flg) diff --git a/megapixels/commands/cv/face_attributes.py b/megapixels/commands/cv/face_attributes.py deleted file mode 100644 index 01fe3bd1..00000000 --- a/megapixels/commands/cv/face_attributes.py +++ /dev/null @@ -1,136 +0,0 @@ -""" - -""" - -import click - -from app.settings import types -from app.utils import click_utils -from app.settings import app_cfg as cfg - - -@click.command() -@click.option('-i', '--input', 'opt_fp_in', default=None, - help='Override enum input filename CSV') -@click.option('-o', '--output', 'opt_fp_out', default=None, - help='Override enum output filename CSV') -@click.option('-m', '--media', 'opt_dir_media', default=None, - help='Override enum media directory') -@click.option('--store', 'opt_data_store', - type=cfg.DataStoreVar, - default=click_utils.get_default(types.DataStore.HDD), - show_default=True, - help=click_utils.show_help(types.Dataset)) -@click.option('--dataset', 'opt_dataset', - type=cfg.DatasetVar, - required=True, - show_default=True, - help=click_utils.show_help(types.Dataset)) -@click.option('--size', 'opt_size', - type=(int, int), default=cfg.DEFAULT_SIZE_FACE_DETECT, - help='Processing size for detection') -@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None), - help='Slice list of files') -@click.option('-f', '--force', 'opt_force', is_flag=True, - help='Force overwrite file') -@click.option('-d', '--display', 'opt_display', is_flag=True, - help='Display image for debugging') -@click.pass_context -def cli(ctx, opt_fp_in, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset, - opt_size, opt_slice, opt_force, opt_display): - """Creates 2D 68-point landmarks""" - - import sys - import os - from os.path import join - from pathlib import Path - from glob import glob - - from tqdm import tqdm - import numpy as np - import cv2 as cv - import pandas as pd - - from app.utils import logger_utils, file_utils, im_utils, display_utils, draw_utils - from app.processors import face_age_gender - from app.models.data_store import DataStore - from app.models.bbox import BBox - - # ------------------------------------------------------------------------- - # init here - - log = logger_utils.Logger.getLogger() - # init face processors - age_estimator_apnt = face_age_gender.FaceAgeApparent() - age_estimator_real = face_age_gender.FaceAgeReal() - gender_estimator = face_age_gender.FaceGender() - - # init filepaths - data_store = DataStore(opt_data_store, opt_dataset) - # set file output path - metadata_type = types.Metadata.FACE_ATTRIBUTES - fp_out = data_store.metadata(metadata_type) if opt_fp_out is None else opt_fp_out - if not opt_force and Path(fp_out).exists(): - log.error('File exists. Use "-f / --force" to overwite') - return - - # ------------------------------------------------------------------------- - # load filepath data - fp_record = data_store.metadata(types.Metadata.FILE_RECORD) - df_record = pd.read_csv(fp_record, dtype=cfg.FILE_RECORD_DTYPES).set_index('index') - # load ROI data - fp_roi = data_store.metadata(types.Metadata.FACE_ROI) - df_roi = pd.read_csv(fp_roi).set_index('index') - # slice if you want - if opt_slice: - df_roi = df_roi[opt_slice[0]:opt_slice[1]] - # group by image index (speedup if multiple faces per image) - df_img_groups = df_roi.groupby('record_index') - log.debug('processing {:,} groups'.format(len(df_img_groups))) - - # store landmarks in list - results = [] - - # ------------------------------------------------------------------------- - # iterate groups with file/record index as key - - for record_index, df_img_group in tqdm(df_img_groups): - - # access file_record DataSeries - file_record = df_record.iloc[record_index] - - # load image - fp_im = data_store.face(file_record.subdir, file_record.fn, file_record.ext) - im = cv.imread(fp_im) - im_resized = im_utils.resize(im, width=opt_size[0], height=opt_size[1]) - dim = im_resized.shape[:2][::-1] - - # iterate ROIs in this image - for roi_index, df_img in df_img_group.iterrows(): - - # find landmarks - bbox_norm = BBox.from_xywh(df_img.x, df_img.y, df_img.w, df_img.h) - bbox_dim = bbox_norm.to_dim(dim) - - age_apnt = age_estimator_apnt.predict(im_resized, bbox_norm) - age_real = age_estimator_real.predict(im_resized, bbox_norm) - gender = gender_estimator.predict(im_resized, bbox_norm) - - attr_obj = { - 'age_real':float(f'{age_real:.2f}'), - 'age_apparent': float(f'{age_apnt:.2f}'), - 'm': float(f'{gender["m"]:.4f}'), - 'f': float(f'{gender["f"]:.4f}'), - 'roi_index': roi_index - } - results.append(attr_obj) - - - # create DataFrame and save to CSV - file_utils.mkdirs(fp_out) - df = pd.DataFrame.from_dict(results) - df.index.name = 'index' - df.to_csv(fp_out) - - # save script - file_utils.write_text(' '.join(sys.argv), '{}.sh'.format(fp_out)) \ No newline at end of file diff --git a/megapixels/commands/cv/face_frames.py b/megapixels/commands/cv/face_frames.py deleted file mode 100644 index 76f23af1..00000000 --- a/megapixels/commands/cv/face_frames.py +++ /dev/null @@ -1,82 +0,0 @@ -from glob import glob -import os -from os.path import join -from pathlib import Path - -import click - - - - -@click.command() -@click.option('-i', '--input', 'opt_fp_in', required=True, - help='Input directory to glob') -@click.option('-o', '--output', 'opt_fp_out', required=True, - help='Output directory for face frames') -@click.option('--size', 'opt_size', - type=(int, int), default=(300, 300), - help='Output image size') -@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None), - help='Slice list of files') -@click.pass_context -def cli(ctx, opt_fp_in, opt_fp_out, opt_size, opt_slice): - """Split video to face frames""" - - from tqdm import tqdm - import dlib - import pandas as pd - from PIL import Image, ImageOps, ImageFilter - import cv2 as cv - import numpy as np - - from app.processors import face_detector - from app.utils import logger_utils, file_utils, im_utils - from app.settings import types - from app.utils import click_utils - from app.settings import app_cfg as cfg - from app.models.bbox import BBox - - log = logger_utils.Logger.getLogger() - - # ------------------------------------------------- - # process - - detector = face_detector.DetectorDLIBCNN() - - # get file list - fp_videos = glob(join(opt_fp_in, '*.mp4')) - fp_videos += glob(join(opt_fp_in, '*.webm')) - fp_videos += glob(join(opt_fp_in, '*.mkv')) - - min_distance_per = .025 # minimum distance percentage to save new face image - face_interval = 5 - frame_interval_count = 0 - frame_count = 0 - bbox_prev = BBox(0,0,0,0) - file_utils.mkdirs(opt_fp_out) - dnn_size = opt_size - max_dim = max(dnn_size) - px_thresh = int(max_dim * min_distance_per) - - for fp_video in tqdm(fp_videos): - # load video - video = cv.VideoCapture(fp_video) - # iterate through frames - while video.isOpened(): - res, frame = video.read() - if not res: - break - # increment frames, save frame if interval has passed - frame_count += 1 # for naming - frame_interval_count += 1 # for interval - bboxes = detector.detect(frame, opt_size=dnn_size, opt_pyramids=0) - if len(bboxes) > 0 and frame_interval_count >= face_interval: - dim = frame.shape[:2][::-1] - d = bboxes[0].to_dim(dim).distance(bbox_prev) - if d > px_thresh: - # save frame - zfc = file_utils.zpad(frame_count) - fp_frame = join(opt_fp_out, '{}_{}.jpg'.format(Path(fp_video).stem, zfc)) - cv.imwrite(fp_frame, frame) - frame_interval_count = 0 - bbox_prev = bboxes[0] diff --git a/megapixels/commands/cv/face_landmark_2d_5.py b/megapixels/commands/cv/face_landmark_2d_5.py deleted file mode 100644 index 40ec6f41..00000000 --- a/megapixels/commands/cv/face_landmark_2d_5.py +++ /dev/null @@ -1,146 +0,0 @@ -""" - -""" - -import click - -from app.settings import types -from app.utils import click_utils -from app.settings import app_cfg as cfg - -color_filters = {'color': 1, 'gray': 2, 'all': 3} - -@click.command() -@click.option('-i', '--input', 'opt_fp_in', default=None, - help='Override enum input filename CSV') -@click.option('-o', '--output', 'opt_fp_out', default=None, - help='Override enum output filename CSV') -@click.option('-m', '--media', 'opt_dir_media', default=None, - help='Override enum media directory') -@click.option('--store', 'opt_data_store', - type=cfg.DataStoreVar, - default=click_utils.get_default(types.DataStore.HDD), - show_default=True, - help=click_utils.show_help(types.Dataset)) -@click.option('--dataset', 'opt_dataset', - type=cfg.DatasetVar, - required=True, - show_default=True, - help=click_utils.show_help(types.Dataset)) -@click.option('-d', '--detector', 'opt_detector_type', - type=cfg.FaceLandmark2D_5Var, - default=click_utils.get_default(types.FaceLandmark2D_5.DLIB), - help=click_utils.show_help(types.FaceLandmark2D_5)) -@click.option('--size', 'opt_size', - type=(int, int), default=(300, 300), - help='Output image size') -@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None), - help='Slice list of files') -@click.option('-f', '--force', 'opt_force', is_flag=True, - help='Force overwrite file') -@click.option('-d', '--display', 'opt_display', is_flag=True, - help='Display image for debugging') -@click.pass_context -def cli(ctx, opt_fp_in, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset, opt_detector_type, - opt_size, opt_slice, opt_force, opt_display): - """Creates 2D 5-point landmarks""" - - import sys - import os - from os.path import join - from pathlib import Path - from glob import glob - - from tqdm import tqdm - import numpy as np - import cv2 as cv - import pandas as pd - - from app.utils import logger_utils, file_utils, im_utils, display_utils, draw_utils - from app.processors import face_landmarks - from app.models.data_store import DataStore - from app.models.bbox import BBox - - # ------------------------------------------------- - # init here - - log = logger_utils.Logger.getLogger() - # init filepaths - data_store = DataStore(opt_data_store, opt_dataset) - # set file output path - metadata_type = types.Metadata.FACE_LANDMARK_2D_5 - fp_out = data_store.metadata(metadata_type) if opt_fp_out is None else opt_fp_out - if not opt_force and Path(fp_out).exists(): - log.error('File exists. Use "-f / --force" to overwite') - return - - # init face landmark processors - if opt_detector_type == types.FaceLandmark2D_5.DLIB: - # use dlib 68 point detector - landmark_detector = face_landmarks.Dlib2D_5() - elif opt_detector_type == types.FaceLandmark2D_5.MTCNN: - # use dlib 5 point detector - landmark_detector = face_landmarks.MTCNN2D_5() - else: - log.error('{} not yet implemented'.format(opt_detector_type.name)) - return - - log.info(f'Using landmark detector: {opt_detector_type.name}') - - # load filepath data - fp_record = data_store.metadata(types.Metadata.FILE_RECORD) - df_record = pd.read_csv(fp_record).set_index('index') - # load ROI data - fp_roi = data_store.metadata(types.Metadata.FACE_ROI) - df_roi = pd.read_csv(fp_roi).set_index('index') - # slice if you want - if opt_slice: - df_roi = df_roi[opt_slice[0]:opt_slice[1]] - # group by image index (speedup if multiple faces per image) - df_img_groups = df_roi.groupby('record_index') - log.debug('processing {:,} groups'.format(len(df_img_groups))) - - # store landmarks in list - results = [] - - # iterate groups with file/record index as key - for record_index, df_img_group in tqdm(df_img_groups): - - # acces file record - ds_record = df_record.iloc[record_index] - - # load image - fp_im = data_store.face(ds_record.subdir, ds_record.fn, ds_record.ext) - im = cv.imread(fp_im) - im_resized = im_utils.resize(im, width=opt_size[0], height=opt_size[1]) - - # iterate image group dataframe with roi index as key - for roi_index, df_img in df_img_group.iterrows(): - - # get bbox - x, y, w, h = df_img.x, df_img.y, df_img.w, df_img.h - dim = im_resized.shape[:2][::-1] - bbox = BBox.from_xywh(x, y, w, h).to_dim(dim) - - # get landmark points - points = landmark_detector.landmarks(im_resized, bbox) - points_norm = landmark_detector.normalize(points, dim) - points_flat = landmark_detector.flatten(points_norm) - - # display to screen if optioned - if opt_display: - draw_utils.draw_landmarks2D(im_resized, points) - draw_utils.draw_bbox(im_resized, bbox) - cv.imshow('', im_resized) - display_utils.handle_keyboard() - - results.append(points_flat) - - # create DataFrame and save to CSV - file_utils.mkdirs(fp_out) - df = pd.DataFrame.from_dict(results) - df.index.name = 'index' - df.to_csv(fp_out) - - # save script - file_utils.write_text(' '.join(sys.argv), '{}.sh'.format(fp_out)) \ No newline at end of file diff --git a/megapixels/commands/cv/face_landmark_2d_68.py b/megapixels/commands/cv/face_landmark_2d_68.py deleted file mode 100644 index c6978a40..00000000 --- a/megapixels/commands/cv/face_landmark_2d_68.py +++ /dev/null @@ -1,150 +0,0 @@ -""" - -""" - -import click - -from app.settings import types -from app.utils import click_utils -from app.settings import app_cfg as cfg - - -@click.command() -@click.option('-i', '--input', 'opt_fp_in', default=None, - help='Override enum input filename CSV') -@click.option('-o', '--output', 'opt_fp_out', default=None, - help='Override enum output filename CSV') -@click.option('-m', '--media', 'opt_dir_media', default=None, - help='Override enum media directory') -@click.option('--store', 'opt_data_store', - type=cfg.DataStoreVar, - default=click_utils.get_default(types.DataStore.HDD), - show_default=True, - help=click_utils.show_help(types.Dataset)) -@click.option('--dataset', 'opt_dataset', - type=cfg.DatasetVar, - required=True, - show_default=True, - help=click_utils.show_help(types.Dataset)) -@click.option('-d', '--detector', 'opt_detector_type', - type=cfg.FaceLandmark2D_68Var, - default=click_utils.get_default(types.FaceLandmark2D_68.DLIB), - help=click_utils.show_help(types.FaceLandmark2D_68)) -@click.option('--size', 'opt_size', - type=(int, int), default=(300, 300), - help='Output image size') -@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None), - help='Slice list of files') -@click.option('-f', '--force', 'opt_force', is_flag=True, - help='Force overwrite file') -@click.option('-d', '--display', 'opt_display', is_flag=True, - help='Display image for debugging') -@click.pass_context -def cli(ctx, opt_fp_in, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset, opt_detector_type, - opt_size, opt_slice, opt_force, opt_display): - """Creates 2D 68-point landmarks""" - - import sys - import os - from os.path import join - from pathlib import Path - from glob import glob - - from tqdm import tqdm - import numpy as np - import cv2 as cv - import pandas as pd - - from app.utils import logger_utils, file_utils, im_utils, display_utils, draw_utils - from app.processors import face_landmarks - from app.models.data_store import DataStore - from app.models.bbox import BBox - - # ------------------------------------------------------------------------- - # init here - - log = logger_utils.Logger.getLogger() - # init filepaths - data_store = DataStore(opt_data_store, opt_dataset) - # set file output path - metadata_type = types.Metadata.FACE_LANDMARK_2D_68 - fp_out = data_store.metadata(metadata_type) if opt_fp_out is None else opt_fp_out - if not opt_force and Path(fp_out).exists(): - log.error('File exists. Use "-f / --force" to overwite') - return - - # init face landmark processors - if opt_detector_type == types.FaceLandmark2D_68.DLIB: - # use dlib 68 point detector - landmark_detector = face_landmarks.Dlib2D_68() - elif opt_detector_type == types.FaceLandmark2D_68.FACE_ALIGNMENT: - # use dlib 5 point detector - landmark_detector = face_landmarks.FaceAlignment2D_68() - else: - log.error('{} not yet implemented'.format(opt_detector_type.name)) - return - - log.info(f'Using landmark detector: {opt_detector_type.name}') - - # ------------------------------------------------------------------------- - # load filepath data - fp_record = data_store.metadata(types.Metadata.FILE_RECORD) - df_record = pd.read_csv(fp_record).set_index('index') - # load ROI data - fp_roi = data_store.metadata(types.Metadata.FACE_ROI) - df_roi = pd.read_csv(fp_roi).set_index('index') - # slice if you want - if opt_slice: - df_roi = df_roi[opt_slice[0]:opt_slice[1]] - # group by image index (speedup if multiple faces per image) - df_img_groups = df_roi.groupby('record_index') - log.debug('processing {:,} groups'.format(len(df_img_groups))) - - # store landmarks in list - results = [] - - # ------------------------------------------------------------------------- - # iterate groups with file/record index as key - - for record_index, df_img_group in tqdm(df_img_groups): - - # access file_record DataSeries - file_record = df_record.iloc[record_index] - - # load image - fp_im = data_store.face(file_record.subdir, file_record.fn, file_record.ext) - im = cv.imread(fp_im) - im_resized = im_utils.resize(im, width=opt_size[0], height=opt_size[1]) - dim = im_resized.shape[:2][::-1] - - # iterate ROIs in this image - for roi_index, df_img in df_img_group.iterrows(): - - # find landmarks - x, y, w, h = df_img.x, df_img.y, df_img.w, df_img.h # normalized values - #dim = (file_record.width, file_record.height) # original w,h - bbox = BBox.from_xywh(x, y, w, h).to_dim(dim) - points = landmark_detector.landmarks(im_resized, bbox) - points_norm = landmark_detector.normalize(points, dim) - points_str = landmark_detector.to_str(points_norm) - - # display if optioned - if opt_display: - dst = im_resized.copy() - draw_utils.draw_landmarks2D(dst, points) - draw_utils.draw_bbox(dst, bbox) - cv.imshow('', dst) - display_utils.handle_keyboard() - - # add to results for CSV - results.append({'vec': points_str, 'roi_index':roi_index}) - - - # create DataFrame and save to CSV - file_utils.mkdirs(fp_out) - df = pd.DataFrame.from_dict(results) - df.index.name = 'index' - df.to_csv(fp_out) - - # save script - file_utils.write_text(' '.join(sys.argv), '{}.sh'.format(fp_out)) \ No newline at end of file diff --git a/megapixels/commands/cv/face_landmark_3d_68.py b/megapixels/commands/cv/face_landmark_3d_68.py deleted file mode 100644 index a2d14d72..00000000 --- a/megapixels/commands/cv/face_landmark_3d_68.py +++ /dev/null @@ -1,147 +0,0 @@ -""" - -""" - -import click - -from app.settings import types -from app.utils import click_utils -from app.settings import app_cfg as cfg - -color_filters = {'color': 1, 'gray': 2, 'all': 3} - -@click.command() -@click.option('-i', '--input', 'opt_fp_in', default=None, - help='Override enum input filename CSV') -@click.option('-o', '--output', 'opt_fp_out', default=None, - help='Override enum output filename CSV') -@click.option('-m', '--media', 'opt_dir_media', default=None, - help='Override enum media directory') -@click.option('--store', 'opt_data_store', - type=cfg.DataStoreVar, - default=click_utils.get_default(types.DataStore.HDD), - show_default=True, - help=click_utils.show_help(types.Dataset)) -@click.option('--dataset', 'opt_dataset', - type=cfg.DatasetVar, - required=True, - show_default=True, - help=click_utils.show_help(types.Dataset)) -@click.option('-d', '--detector', 'opt_detector_type', - type=cfg.FaceLandmark3D_68Var, - default=click_utils.get_default(types.FaceLandmark3D_68.FACE_ALIGNMENT), - help=click_utils.show_help(types.FaceLandmark3D_68)) -@click.option('--size', 'opt_size', - type=(int, int), default=(300, 300), - help='Output image size') -@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None), - help='Slice list of files') -@click.option('-f', '--force', 'opt_force', is_flag=True, - help='Force overwrite file') -@click.option('-d', '--display', 'opt_display', is_flag=True, - help='Display image for debugging') -@click.pass_context -def cli(ctx, opt_fp_in, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset, opt_detector_type, - opt_size, opt_slice, opt_force, opt_display): - """Generate 3D 68-point landmarks""" - - import sys - import os - from os.path import join - from pathlib import Path - from glob import glob - - from tqdm import tqdm - import numpy as np - import cv2 as cv - import pandas as pd - - from app.utils import logger_utils, file_utils, im_utils, display_utils, draw_utils - from app.utils import plot_utils - from app.processors import face_landmarks - from app.models.data_store import DataStore - from app.models.bbox import BBox - - # -------------------------------------------------------------------------- - # init here - - log = logger_utils.Logger.getLogger() - log.warn('not normalizing points') - # init filepaths - data_store = DataStore(opt_data_store, opt_dataset) - # set file output path - metadata_type = types.Metadata.FACE_LANDMARK_3D_68 - fp_out = data_store.metadata(metadata_type) if opt_fp_out is None else opt_fp_out - if not opt_force and Path(fp_out).exists(): - log.error('File exists. Use "-f / --force" to overwite') - return - - # init face landmark processors - if opt_detector_type == types.FaceLandmark3D_68.FACE_ALIGNMENT: - # use FaceAlignment 68 point 3D detector - landmark_detector = face_landmarks.FaceAlignment3D_68() - else: - log.error('{} not yet implemented'.format(opt_detector_type.name)) - return - - log.info(f'Using landmark detector: {opt_detector_type.name}') - - # ------------------------------------------------------------------------- - # load data - - fp_record = data_store.metadata(types.Metadata.FILE_RECORD) # file_record.csv - df_record = pd.read_csv(fp_record).set_index('index') - fp_roi = data_store.metadata(types.Metadata.FACE_ROI) # face_roi.csv - df_roi = pd.read_csv(fp_roi).set_index('index') - if opt_slice: - df_roi = df_roi[opt_slice[0]:opt_slice[1]] # slice if you want - df_img_groups = df_roi.groupby('record_index') # groups by image index (load once) - log.debug('processing {:,} groups'.format(len(df_img_groups))) - - # store landmarks in list - results = [] - - # iterate groups with file/record index as key - for record_index, df_img_group in tqdm(df_img_groups): - - # acces file record - ds_record = df_record.iloc[record_index] - - # load image - fp_im = data_store.face(ds_record.subdir, ds_record.fn, ds_record.ext) - im = cv.imread(fp_im) - im_resized = im_utils.resize(im, width=opt_size[0], height=opt_size[1]) - - # iterate image group dataframe with roi index as key - for roi_index, df_img in df_img_group.iterrows(): - - # get bbox - x, y, w, h = df_img.x, df_img.y, df_img.w, df_img.h - dim = im_resized.shape[:2][::-1] - bbox = BBox.from_xywh(x, y, w, h).to_dim(dim) - - # get landmark points - points = landmark_detector.landmarks(im_resized, bbox) - # NB can't really normalize these points, but are normalized against 3D space - #points_norm = landmark_detector.normalize(points, dim) # normalized using 200 - points_flattenend = landmark_detector.flatten(points) - - # display to screen if optioned - if opt_display: - draw_utils.draw_landmarks3D(im_resized, points) - draw_utils.draw_bbox(im_resized, bbox) - cv.imshow('', im_resized) - display_utils.handle_keyboard() - - #plot_utils.generate_3d_landmark_anim(points, '/home/adam/Downloads/3d.gif') - - results.append(points_flattenend) - - # create DataFrame and save to CSV - file_utils.mkdirs(fp_out) - df = pd.DataFrame.from_dict(results) - df.index.name = 'index' - df.to_csv(fp_out) - - # save script - file_utils.write_text(' '.join(sys.argv), '{}.sh'.format(fp_out)) \ No newline at end of file diff --git a/megapixels/commands/cv/face_pose.py b/megapixels/commands/cv/face_pose.py deleted file mode 100644 index cb7ec56c..00000000 --- a/megapixels/commands/cv/face_pose.py +++ /dev/null @@ -1,164 +0,0 @@ -""" -NB: This only works with the DLIB 68-point landmarks. - -Converts ROIs to pose: yaw, roll, pitch -pitch: looking down or up in yes gesture -roll: tilting head towards shoulder -yaw: twisting head left to right in no gesture - -""" - -""" -TODO -- check compatibility with MTCNN 68 point detector -- improve accuracy by using MTCNN 5-point -- refer to https://github.com/jerryhouuu/Face-Yaw-Roll-Pitch-from-Pose-Estimation-using-OpenCV/ -""" - -import click - -from app.settings import types -from app.utils import click_utils -from app.settings import app_cfg as cfg - -@click.command() -@click.option('-i', '--input', 'opt_fp_in', default=None, - help='Override enum input filename CSV') -@click.option('-o', '--output', 'opt_fp_out', default=None, - help='Override enum output filename CSV') -@click.option('-m', '--media', 'opt_dir_media', default=None, - help='Override enum media directory') -@click.option('--store', 'opt_data_store', - type=cfg.DataStoreVar, - default=click_utils.get_default(types.DataStore.HDD), - show_default=True, - help=click_utils.show_help(types.Dataset)) -@click.option('--dataset', 'opt_dataset', - type=cfg.DatasetVar, - required=True, - show_default=True, - help=click_utils.show_help(types.Dataset)) -@click.option('--size', 'opt_size', - type=(int, int), default=(300, 300), - help='Output image size') -@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None), - help='Slice list of files') -@click.option('-f', '--force', 'opt_force', is_flag=True, - help='Force overwrite file') -@click.option('-d', '--display', 'opt_display', is_flag=True, - help='Display image for debugging') -@click.pass_context -def cli(ctx, opt_fp_in, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset, opt_size, - opt_slice, opt_force, opt_display): - """Converts ROIs to pose: roll, yaw, pitch""" - - import sys - import os - from os.path import join - from pathlib import Path - from glob import glob - - from tqdm import tqdm - import numpy as np - import dlib # must keep a local reference for dlib - import cv2 as cv - import pandas as pd - - from app.models.bbox import BBox - from app.utils import logger_utils, file_utils, im_utils, display_utils, draw_utils - from app.processors.face_landmarks import Dlib2D_68 - from app.processors.face_pose import FacePoseDLIB - from app.models.data_store import DataStore - - # ------------------------------------------------- - # init here - - log = logger_utils.Logger.getLogger() - - # set data_store - data_store = DataStore(opt_data_store, opt_dataset) - - # get filepath out - fp_out = data_store.metadata(types.Metadata.FACE_POSE) if opt_fp_out is None else opt_fp_out - if not opt_force and Path(fp_out).exists(): - log.error('File exists. Use "-f / --force" to overwite') - return - - # init face processors - face_pose = FacePoseDLIB() - face_landmarks = Dlib2D_68() - - # ------------------------------------------------- - # load data - - fp_record = data_store.metadata(types.Metadata.FILE_RECORD) - df_record = pd.read_csv(fp_record, dtype=cfg.FILE_RECORD_DTYPES).set_index('index') - # load ROI data - fp_roi = data_store.metadata(types.Metadata.FACE_ROI) - df_roi = pd.read_csv(fp_roi).set_index('index') - # slice if you want - if opt_slice: - df_roi = df_roi[opt_slice[0]:opt_slice[1]] - # group by image index (speedup if multiple faces per image) - df_img_groups = df_roi.groupby('record_index') - log.debug('processing {:,} groups'.format(len(df_img_groups))) - - # store poses and convert to DataFrame - results = [] - - # ------------------------------------------------- - # iterate groups with file/record index as key - for record_index, df_img_group in tqdm(df_img_groups): - - # access the file_record - file_record = df_record.iloc[record_index] # pands.DataSeries - - # load image - fp_im = data_store.face(file_record.subdir, file_record.fn, file_record.ext) - im = cv.imread(fp_im) - im_resized = im_utils.resize(im, width=opt_size[0], height=opt_size[1]) - - # iterate image group dataframe with roi index as key - for roi_index, df_img in df_img_group.iterrows(): - - # get bbox - x, y, w, h = df_img.x, df_img.y, df_img.w, df_img.h - #dim = (file_record.width, file_record.height) - dim = im_resized.shape[:2][::-1] - bbox_norm = BBox.from_xywh(x, y, w, h) - bbox_dim = bbox_norm.to_dim(dim) - - # get pose - landmarks = face_landmarks.landmarks(im_resized, bbox_norm) - pose_data = face_pose.pose(landmarks, dim) - #pose_degrees = pose_data['degrees'] # only keep the degrees data - #pose_degrees['points_nose'] = pose_data - - # draw landmarks if optioned - if opt_display: - draw_utils.draw_pose(im_resized, pose_data['point_nose'], pose_data['points']) - draw_utils.draw_degrees(im_resized, pose_data) - cv.imshow('', im_resized) - display_utils.handle_keyboard() - - # add image index and append to result CSV data - pose_data['roi_index'] = roi_index - for k, v in pose_data['points'].items(): - pose_data[f'point_{k}_x'] = v[0] / dim[0] - pose_data[f'point_{k}_y'] = v[1] / dim[1] - - # rearrange data structure for DataFrame - pose_data.pop('points') - pose_data['point_nose_x'] = pose_data['point_nose'][0] / dim[0] - pose_data['point_nose_y'] = pose_data['point_nose'][1] / dim[1] - pose_data.pop('point_nose') - results.append(pose_data) - - # create DataFrame and save to CSV - file_utils.mkdirs(fp_out) - df = pd.DataFrame.from_dict(results) - df.index.name = 'index' - df.to_csv(fp_out) - - # save script - file_utils.write_text(' '.join(sys.argv), '{}.sh'.format(fp_out)) \ No newline at end of file diff --git a/megapixels/commands/cv/face_roi.py b/megapixels/commands/cv/face_roi.py deleted file mode 100644 index e83b0f61..00000000 --- a/megapixels/commands/cv/face_roi.py +++ /dev/null @@ -1,187 +0,0 @@ -""" -Crop images to prepare for training -""" - -import click -# from PIL import Image, ImageOps, ImageFilter, ImageDraw - -from app.settings import types -from app.utils import click_utils -from app.settings import app_cfg as cfg - -color_filters = {'color': 1, 'gray': 2, 'all': 3} - -@click.command() -@click.option('-i', '--input', 'opt_fp_in', default=None, - help='Override enum input filename CSV') -@click.option('-o', '--output', 'opt_fp_out', default=None, - help='Override enum output filename CSV') -@click.option('-m', '--media', 'opt_dir_media', default=None, - help='Override enum media directory') -@click.option('--store', 'opt_data_store', - type=cfg.DataStoreVar, - default=click_utils.get_default(types.DataStore.HDD), - show_default=True, - help=click_utils.show_help(types.Dataset)) -@click.option('--dataset', 'opt_dataset', - type=cfg.DatasetVar, - required=True, - show_default=True, - help=click_utils.show_help(types.Dataset)) -@click.option('--size', 'opt_size', - type=(int, int), default=(480, 480), - help='Output image size') -@click.option('-d', '--detector', 'opt_detector_type', - type=cfg.FaceDetectNetVar, - default=click_utils.get_default(types.FaceDetectNet.CVDNN), - help=click_utils.show_help(types.FaceDetectNet)) -@click.option('-g', '--gpu', 'opt_gpu', default=0, - help='GPU index') -@click.option('--conf', 'opt_conf_thresh', default=0.85, type=click.FloatRange(0,1), - help='Confidence minimum threshold') -@click.option('-p', '--pyramids', 'opt_pyramids', default=0, type=click.IntRange(0,4), - help='Number pyramids to upscale for DLIB detectors') -@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None), - help='Slice list of files') -@click.option('--display/--no-display', 'opt_display', is_flag=True, default=False, - help='Display detections to debug') -@click.option('-f', '--force', 'opt_force', is_flag=True, - help='Force overwrite file') -@click.option('--color', 'opt_color_filter', - type=click.Choice(color_filters.keys()), default='all', - help='Filter to keep color or grayscale images (color = keep color') -@click.option('--keep', 'opt_largest', type=click.Choice(['largest', 'all']), default='all', - help='Only keep largest face') -@click.option('--zone', 'opt_zone', default=(0.0, 0.0), type=(float, float), - help='Face center must be located within zone region (0.5 = half width/height)') -@click.pass_context -def cli(ctx, opt_fp_in, opt_dir_media, opt_fp_out, opt_data_store, opt_dataset, opt_size, opt_detector_type, - opt_gpu, opt_conf_thresh, opt_pyramids, opt_slice, opt_display, opt_force, opt_color_filter, - opt_largest, opt_zone): - """Converts frames with faces to CSV of ROIs""" - - import sys - import os - from os.path import join - from pathlib import Path - from glob import glob - - from tqdm import tqdm - import numpy as np - import dlib # must keep a local reference for dlib - import cv2 as cv - import pandas as pd - - from app.utils import logger_utils, file_utils, im_utils, display_utils, draw_utils - from app.processors import face_detector - from app.models.data_store import DataStore - - # ------------------------------------------------- - # init here - - log = logger_utils.Logger.getLogger() - - # set data_store - data_store = DataStore(opt_data_store, opt_dataset) - - # get filepath out - fp_out = data_store.metadata(types.Metadata.FACE_ROI) if opt_fp_out is None else opt_fp_out - if not opt_force and Path(fp_out).exists(): - log.error('File exists. Use "-f / --force" to overwite') - return - - # set detector - if opt_detector_type == types.FaceDetectNet.CVDNN: - detector = face_detector.DetectorCVDNN() - elif opt_detector_type == types.FaceDetectNet.DLIB_CNN: - detector = face_detector.DetectorDLIBCNN(gpu=opt_gpu) - elif opt_detector_type == types.FaceDetectNet.DLIB_HOG: - detector = face_detector.DetectorDLIBHOG() - elif opt_detector_type == types.FaceDetectNet.MTCNN_TF: - detector = face_detector.DetectorMTCNN_TF(gpu=opt_gpu) - elif opt_detector_type == types.FaceDetectNet.HAAR: - log.error('{} not yet implemented'.format(opt_detector_type.name)) - return - - - # get list of files to process - fp_record = data_store.metadata(types.Metadata.FILE_RECORD) if opt_fp_in is None else opt_fp_in - df_record = pd.read_csv(fp_record, dtype=cfg.FILE_RECORD_DTYPES).set_index('index') - if opt_slice: - df_record = df_record[opt_slice[0]:opt_slice[1]] - log.debug('processing {:,} files'.format(len(df_record))) - - # filter out grayscale - color_filter = color_filters[opt_color_filter] - # set largest flag, to keep all or only largest - opt_largest = (opt_largest == 'largest') - - data = [] - skipped_files = [] - processed_files = [] - - for df_record in tqdm(df_record.itertuples(), total=len(df_record)): - fp_im = data_store.face(str(df_record.subdir), str(df_record.fn), str(df_record.ext)) - try: - im = cv.imread(fp_im) - im_resized = im_utils.resize(im, width=opt_size[0], height=opt_size[1]) - except Exception as e: - log.debug(f'could not read: {fp_im}') - return - # filter out color or grayscale iamges - if color_filter != color_filters['all']: - try: - is_gray = im_utils.is_grayscale(im) - if is_gray and color_filter != color_filters['gray']: - log.debug('Skipping grayscale image: {}'.format(fp_im)) - continue - except Exception as e: - log.error('Could not check grayscale: {}'.format(fp_im)) - continue - - try: - bboxes_norm = detector.detect(im_resized, pyramids=opt_pyramids, largest=opt_largest, - zone=opt_zone, conf_thresh=opt_conf_thresh) - except Exception as e: - log.error('could not detect: {}'.format(fp_im)) - log.error('{}'.format(e)) - continue - - if len(bboxes_norm) == 0: - skipped_files.append(fp_im) - log.warn(f'no faces in: {fp_im}') - log.warn(f'skipped: {len(skipped_files)}. found:{len(processed_files)} files') - else: - processed_files.append(fp_im) - for bbox in bboxes_norm: - roi = { - 'record_index': int(df_record.Index), - 'x': bbox.x, - 'y': bbox.y, - 'w': bbox.w, - 'h': bbox.h - } - data.append(roi) - - # if display optined - if opt_display and len(bboxes_norm): - # draw each box - for bbox_norm in bboxes_norm: - dim = im_resized.shape[:2][::-1] - bbox_dim = bbox.to_dim(dim) - if dim[0] > 1000: - im_resized = im_utils.resize(im_resized, width=1000) - im_resized = draw_utils.draw_bbox(im_resized, bbox_norm) - - # display and wait - cv.imshow('', im_resized) - display_utils.handle_keyboard() - - # create DataFrame and save to CSV - file_utils.mkdirs(fp_out) - df = pd.DataFrame.from_dict(data) - df.index.name = 'index' - df.to_csv(fp_out) - - # save script - file_utils.write_text(' '.join(sys.argv), '{}.sh'.format(fp_out)) \ No newline at end of file diff --git a/megapixels/commands/cv/face_vector.py b/megapixels/commands/cv/face_vector.py deleted file mode 100644 index cb155d08..00000000 --- a/megapixels/commands/cv/face_vector.py +++ /dev/null @@ -1,133 +0,0 @@ -""" -Converts ROIs to face vector -NB: the VGG Face2 extractor should be used with MTCNN ROIs (not square) - the DLIB face extractor should be used with DLIB ROIs (square) -see https://github.com/ox-vgg/vgg_face2 for TAR@FAR -""" - -import click - -from app.settings import types -from app.utils import click_utils -from app.settings import app_cfg as cfg - -@click.command() -@click.option('-o', '--output', 'opt_fp_out', default=None, - help='Override enum output filename CSV') -@click.option('-m', '--media', 'opt_dir_media', default=None, - help='Override enum media directory') -@click.option('--store', 'opt_data_store', - type=cfg.DataStoreVar, - default=click_utils.get_default(types.DataStore.HDD), - show_default=True, - help=click_utils.show_help(types.Dataset)) -@click.option('--dataset', 'opt_dataset', - type=cfg.DatasetVar, - required=True, - show_default=True, - help=click_utils.show_help(types.Dataset)) -@click.option('--size', 'opt_size', - type=(int, int), default=cfg.DEFAULT_SIZE_FACE_DETECT, - help='Output image size') -@click.option('-e', '--extractor', 'opt_extractor', - default=click_utils.get_default(types.FaceExtractor.VGG), - type=cfg.FaceExtractorVar, - help='Type of extractor framework/network to use') -@click.option('-j', '--jitters', 'opt_jitters', default=cfg.DLIB_FACEREC_JITTERS, - help='Number of jitters (only for dlib') -@click.option('-p', '--padding', 'opt_padding', default=cfg.FACEREC_PADDING, - help='Percentage ROI padding') -@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None), - help='Slice list of files') -@click.option('-f', '--force', 'opt_force', is_flag=True, - help='Force overwrite file') -@click.option('-g', '--gpu', 'opt_gpu', default=0, - help='GPU index') -@click.pass_context -def cli(ctx, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset, opt_size, - opt_extractor, opt_slice, opt_force, opt_gpu, opt_jitters, opt_padding): - """Converts face ROIs to vectors""" - - import sys - import os - from os.path import join - from pathlib import Path - from glob import glob - - from tqdm import tqdm - import numpy as np - import dlib # must keep a local reference for dlib - import cv2 as cv - import pandas as pd - - from app.models.bbox import BBox - from app.models.data_store import DataStore - from app.utils import logger_utils, file_utils, im_utils - from app.processors import face_extractor - - - # ------------------------------------------------- - # init here - - log = logger_utils.Logger.getLogger() - # set data_store - data_store = DataStore(opt_data_store, opt_dataset) - - # get filepath out - fp_out = data_store.metadata(types.Metadata.FACE_VECTOR) if opt_fp_out is None else opt_fp_out - if not opt_force and Path(fp_out).exists(): - log.error('File exists. Use "-f / --force" to overwite') - return - - # init face processors - if opt_extractor == types.FaceExtractor.DLIB: - log.debug('set dlib') - extractor = face_extractor.ExtractorDLIB(gpu=opt_gpu, jitters=opt_jitters) - elif opt_extractor == types.FaceExtractor.VGG: - extractor = face_extractor.ExtractorVGG() - - # load data - fp_record = data_store.metadata(types.Metadata.FILE_RECORD) - df_record = pd.read_csv(fp_record, dtype=cfg.FILE_RECORD_DTYPES).set_index('index') - fp_roi = data_store.metadata(types.Metadata.FACE_ROI) - df_roi = pd.read_csv(fp_roi).set_index('index') - - if opt_slice: - df_roi = df_roi[opt_slice[0]:opt_slice[1]] - - # ------------------------------------------------- - # process images - - df_img_groups = df_roi.groupby('record_index') - log.debug('processing {:,} groups'.format(len(df_img_groups))) - - vecs = [] - for record_index, df_img_group in tqdm(df_img_groups): - # make fp - ds_record = df_record.iloc[record_index] - fp_im = data_store.face(ds_record.subdir, ds_record.fn, ds_record.ext) - im = cv.imread(fp_im) - im = im_utils.resize(im, width=opt_size[0], height=opt_size[1]) - for roi_index, df_img in df_img_group.iterrows(): - # get bbox - x, y, w, h = df_img.x, df_img.y, df_img.w, df_img.h - dim = (ds_record.width, ds_record.height) - # get face vector - bbox = BBox.from_xywh(x, y, w, h) # norm - # compute vec - vec = extractor.extract(im, bbox) # use normalized BBox - vec_str = extractor.to_str(vec) - vec_obj = {'vec':vec_str, 'roi_index': roi_index, 'record_index':record_index} - vecs.append(vec_obj) - - # ------------------------------------------------- - # save data - - # create DataFrame and save to CSV - df = pd.DataFrame.from_dict(vecs) - df.index.name = 'index' - file_utils.mkdirs(fp_out) - df.to_csv(fp_out) - - # save script - file_utils.write_text(' '.join(sys.argv), '{}.sh'.format(fp_out)) \ No newline at end of file diff --git a/megapixels/commands/cv/mirror.py b/megapixels/commands/cv/mirror.py deleted file mode 100644 index 9ca1cac7..00000000 --- a/megapixels/commands/cv/mirror.py +++ /dev/null @@ -1,57 +0,0 @@ -""" -Crop images to prepare for training -""" - -import click -import cv2 as cv -from PIL import Image, ImageOps, ImageFilter - -from app.settings import types -from app.utils import click_utils -from app.settings import app_cfg as cfg - - -@click.command() -@click.option('-i', '--input', 'opt_dir_in', required=True, - help='Input directory') -@click.option('-o', '--output', 'opt_dir_out', required=True, - help='Output directory') -@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None), - help='Slice the input list') -@click.pass_context -def cli(ctx, opt_dir_in, opt_dir_out, opt_slice): - """Mirror augment image directory""" - - import os - from os.path import join - from pathlib import Path - from glob import glob - from tqdm import tqdm - - from app.utils import logger_utils, file_utils, im_utils - - # ------------------------------------------------- - # init - - log = logger_utils.Logger.getLogger() - - # ------------------------------------------------- - # process here - - # get list of files to process - fp_ims = glob(join(opt_dir_in, '*.jpg')) - fp_ims += glob(join(opt_dir_in, '*.png')) - - if opt_slice: - fp_ims = fp_ims[opt_slice[0]:opt_slice[1]] - log.info('processing {:,} files'.format(len(fp_ims))) - - # ensure output dir exists - file_utils.mkdirs(opt_dir_out) - - # resize and save images - for fp_im in tqdm(fp_ims): - im = Image.open(fp_im) - fpp_im = Path(fp_im) - fp_out = join(opt_dir_out, '{}_mirror{}'.format(fpp_im.stem, fpp_im.suffix)) - im.save(fp_out) \ No newline at end of file diff --git a/megapixels/commands/cv/resize.py b/megapixels/commands/cv/resize.py deleted file mode 100644 index 7409ee6f..00000000 --- a/megapixels/commands/cv/resize.py +++ /dev/null @@ -1,150 +0,0 @@ -""" -Crop images to prepare for training -""" - -import click -import cv2 as cv -from PIL import Image, ImageOps, ImageFilter - -from app.settings import types -from app.utils import click_utils -from app.settings import app_cfg as cfg - -""" -Filter Q-Down Q-Up Speed -NEAREST ⭐⭐⭐⭐⭐ -BOX ⭐ ⭐⭐⭐⭐ -BILINEAR ⭐ ⭐ ⭐⭐⭐ -HAMMING ⭐⭐ ⭐⭐⭐ -BICUBIC ⭐⭐⭐ ⭐⭐⭐ ⭐⭐ -LANCZOS ⭐⭐⭐⭐ ⭐⭐⭐⭐ ⭐ -""" -methods = { - 'lanczos': Image.LANCZOS, - 'bicubic': Image.BICUBIC, - 'hamming': Image.HAMMING, - 'bileaner': Image.BILINEAR, - 'box': Image.BOX, - 'nearest': Image.NEAREST - } -centerings = { - 'tl': (0.0, 0.0), - 'tc': (0.5, 0.0), - 'tr': (0.0, 0.0), - 'lc': (0.0, 0.5), - 'cc': (0.5, 0.5), - 'rc': (1.0, 0.5), - 'bl': (0.0, 1.0), - 'bc': (1.0, 0.5), - 'br': (1.0, 1.0) -} - -@click.command() -@click.option('-i', '--input', 'opt_dir_in', required=True, - help='Input directory') -@click.option('-o', '--output', 'opt_dir_out', required=True, - help='Output directory') -@click.option('-e', '--ext', 'opt_glob_ext', - default='png', type=click.Choice(['jpg', 'png']), - help='File glob ext') -@click.option('--size', 'opt_size', - type=(int, int), default=(256, 256), - help='Max output size') -@click.option('--method', 'opt_scale_method', - type=click.Choice(methods.keys()), - default='lanczos', - help='Scaling method to use') -@click.option('--equalize', 'opt_equalize', is_flag=True, - help='Equalize historgram') -@click.option('--sharpen', 'opt_sharpen', is_flag=True, - help='Unsharp mask') -@click.option('--center', 'opt_center', default='cc', type=click.Choice(centerings.keys()), - help='Crop focal point') -@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None), - help='Slice the input list') -@click.option('-t', '--threads', 'opt_threads', default=8, - help='Number of threads') -@click.pass_context -def cli(ctx, opt_dir_in, opt_dir_out, opt_glob_ext, opt_size, opt_scale_method, - opt_equalize, opt_sharpen, opt_center, opt_slice, opt_threads): - """Crop, mirror images""" - - import os - from os.path import join - from pathlib import Path - from glob import glob - from tqdm import tqdm - from multiprocessing.dummy import Pool as ThreadPool - from functools import partial - - from app.utils import logger_utils, file_utils, im_utils - - # ------------------------------------------------- - # init - - log = logger_utils.Logger.getLogger() - - - # ------------------------------------------------- - # process here - - def pool_resize(fp_im, opt_size, scale_method): - # Threaded image resize function - try: - pbar.update(1) - try: - im = Image.open(fp_im).convert('RGB') - im.verify() - except Exception as e: - log.warn('Could not open: {}'.format(fp_im)) - log.error(e) - return False - - #im = ImageOps.fit(im, opt_size, method=scale_method, centering=centering) - - if opt_equalize: - im_np = im_utils.pil2np(im) - im_np_eq = eq_hist_yuv(im_np) - im_np = cv.addWeighted(im_np_eq, 0.35, im_np, 0.65, 0) - im = im_utils.np2pil(im_np) - - if opt_sharpen: - im = im.filter(ImageFilter.UnsharpMask) - - fp_out = join(opt_dir_out, Path(fp_im).name) - im.save(fp_out) - return True - except: - return False - - #centering = centerings[opt_center] - #scale_method = methods[opt_scale_method] - - # get list of files to process - fp_ims = glob(join(opt_dir_in, '*.{}'.format(opt_glob_ext))) - if opt_slice: - fp_ims = fp_ims[opt_slice[0]:opt_slice[1]] - log.info('processing {:,} files'.format(len(fp_ims))) - - - # ensure output dir exists - file_utils.mkdirs(opt_dir_out) - - # setup multithreading - pbar = tqdm(total=len(fp_ims)) - #pool_resize = partial(pool_resize, opt_size=opt_size, scale_method=scale_method, centering=centering) - pool_resize = partial(pool_resize, opt_size=opt_size) - #result_list = pool.map(prod_x, data_list) - pool = ThreadPool(opt_threads) - with tqdm(total=len(fp_ims)) as pbar: - results = pool.map(pool_resize, fp_ims) - pbar.close() - - log.info('Resized: {} / {} images'.format(results.count(True), len(fp_ims))) - - - -def eq_hist_yuv(im): - im_yuv = cv.cvtColor(im, cv.COLOR_BGR2YUV) - im_yuv[:,:,0] = cv.equalizeHist(im_yuv[:,:,0]) - return cv.cvtColor(im_yuv, cv.COLOR_YUV2BGR) diff --git a/megapixels/commands/cv/resize_dataset.py b/megapixels/commands/cv/resize_dataset.py deleted file mode 100644 index 3a6ec15f..00000000 --- a/megapixels/commands/cv/resize_dataset.py +++ /dev/null @@ -1,149 +0,0 @@ -""" -Crop images to prepare for training -""" - -import click -import cv2 as cv -from PIL import Image, ImageOps, ImageFilter - -from app.settings import types -from app.utils import click_utils -from app.settings import app_cfg as cfg - -cv_resize_algos = { - 'area': cv.INTER_AREA, - 'lanco': cv.INTER_LANCZOS4, - 'linear': cv.INTER_LINEAR, - 'linear_exact': cv.INTER_LINEAR_EXACT, - 'nearest': cv.INTER_NEAREST -} -""" -Filter Q-Down Q-Up Speed -NEAREST ⭐⭐⭐⭐⭐ -BOX ⭐ ⭐⭐⭐⭐ -BILINEAR ⭐ ⭐ ⭐⭐⭐ -HAMMING ⭐⭐ ⭐⭐⭐ -BICUBIC ⭐⭐⭐ ⭐⭐⭐ ⭐⭐ -LANCZOS ⭐⭐⭐⭐ ⭐⭐⭐⭐ ⭐ -""" -pil_resize_algos = { - 'antialias': Image.ANTIALIAS, - 'lanczos': Image.LANCZOS, - 'bicubic': Image.BICUBIC, - 'hamming': Image.HAMMING, - 'bileaner': Image.BILINEAR, - 'box': Image.BOX, - 'nearest': Image.NEAREST - } - -@click.command() -@click.option('--dataset', 'opt_dataset', - type=cfg.DatasetVar, - required=True, - show_default=True, - help=click_utils.show_help(types.Dataset)) -@click.option('--store', 'opt_data_store', - type=cfg.DataStoreVar, - default=click_utils.get_default(types.DataStore.HDD), - show_default=True, - help=click_utils.show_help(types.Dataset)) -@click.option('-o', '--output', 'opt_dir_out', required=True, - help='Output directory') -@click.option('-e', '--ext', 'opt_glob_ext', - default='png', type=click.Choice(['jpg', 'png']), - help='File glob ext') -@click.option('--size', 'opt_size', - type=(int, int), default=(256, 256), - help='Output image size max (w,h)') -@click.option('--interp', 'opt_interp_algo', - type=click.Choice(pil_resize_algos.keys()), - default='bicubic', - help='Interpolation resizing algorithms') -@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None), - help='Slice the input list') -@click.option('-t', '--threads', 'opt_threads', default=8, - help='Number of threads') -@click.option('--recursive/--no-recursive', 'opt_recursive', is_flag=True, default=False, - help='Use glob recursion (slower)') -@click.pass_context -def cli(ctx, opt_dataset, opt_data_store, opt_dir_out, opt_glob_ext, opt_size, opt_interp_algo, - opt_slice, opt_threads, opt_recursive): - """Resize dataset images""" - - import os - from os.path import join - from pathlib import Path - from glob import glob - from tqdm import tqdm - from multiprocessing.dummy import Pool as ThreadPool - from functools import partial - import pandas as pd - import numpy as np - - from app.utils import logger_utils, file_utils, im_utils - from app.models.data_store import DataStore - - # ------------------------------------------------- - # init - - log = logger_utils.Logger.getLogger() - - - # ------------------------------------------------- - # process here - - def pool_resize(fp_in, dir_in, dir_out, im_size, interp_algo): - # Threaded image resize function - pbar.update(1) - try: - im = Image.open(fp_in).convert('RGB') - im.verify() # throws error if image is corrupt - im.thumbnail(im_size, interp_algo) - fp_out = fp_in.replace(dir_in, dir_out) - file_utils.mkdirs(fp_out) - im.save(fp_out, quality=100) - except Exception as e: - log.warn(f'Could not open: {fp_in}, Error: {e}') - return False - return True - - - data_store = DataStore(opt_data_store, opt_dataset) - fp_records = data_store.metadata(types.Metadata.FILE_RECORD) - df_records = pd.read_csv(fp_records, dtype=cfg.FILE_RECORD_DTYPES).set_index('index') - dir_in = data_store.media_images_original() - - # get list of files to process - #fp_ims = file_utils.glob_multi(opt_dir_in, ['jpg', 'png'], recursive=opt_recursive) - fp_ims = [] - for ds_record in df_records.itertuples(): - fp_im = data_store.face(ds_record.subdir, ds_record.fn, ds_record.ext) - fp_ims.append(fp_im) - - if opt_slice: - fp_ims = fp_ims[opt_slice[0]:opt_slice[1]] - if not fp_ims: - log.error('No images. Try with "--recursive"') - return - log.info(f'processing {len(fp_ims):,} images') - - # algorithm to use for resizing - interp_algo = pil_resize_algos[opt_interp_algo] - log.info(f'using {interp_algo} for interpoloation') - - # ensure output dir exists - file_utils.mkdirs(opt_dir_out) - - # setup multithreading - pbar = tqdm(total=len(fp_ims)) - # fixed arguments for pool function - map_pool_resize = partial(pool_resize, dir_in=dir_in, dir_out=opt_dir_out, im_size=opt_size, interp_algo=interp_algo) - #result_list = pool.map(prod_x, data_list) # simple - pool = ThreadPool(opt_threads) - # start multithreading - with tqdm(total=len(fp_ims)) as pbar: - results = pool.map(map_pool_resize, fp_ims) - # end multithreading - pbar.close() - - log.info(f'Resized: {results.count(True)} / {len(fp_ims)} images') \ No newline at end of file diff --git a/megapixels/commands/cv/videos_to_frames.py b/megapixels/commands/cv/videos_to_frames.py deleted file mode 100644 index 0b56c46a..00000000 --- a/megapixels/commands/cv/videos_to_frames.py +++ /dev/null @@ -1,73 +0,0 @@ -from glob import glob -import os -from os.path import join -from pathlib import Path - -import click - -from app.settings import types -from app.utils import click_utils -from app.settings import app_cfg as cfg -from app.utils import logger_utils - -import dlib -import pandas as pd -from PIL import Image, ImageOps, ImageFilter -from app.utils import file_utils, im_utils - - -log = logger_utils.Logger.getLogger() - -@click.command() -@click.option('-i', '--input', 'opt_fp_in', required=True, - help='Input directory') -@click.option('-o', '--output', 'opt_fp_out', required=True, - help='Output directory') -@click.option('--size', 'opt_size', default=(320, 240), - help='Inference size for face detection' ) -@click.option('--interval', 'opt_frame_interval', default=20, - help='Number of frames before saving next face') -@click.pass_context -def cli(ctx, opt_fp_in, opt_fp_out, opt_size, opt_frame_interval): - """Converts videos to frames with faces""" - - # ------------------------------------------------- - # process - - from tqdm import tqdm - import cv2 as cv - from tqdm import tqdm - from app.processors import face_detector - - detector = face_detector.DetectorDLIBCNN() - - # get file list - fp_videos = glob(join(opt_fp_in, '*.mp4')) - fp_videos += glob(join(opt_fp_in, '*.webm')) - fp_videos += glob(join(opt_fp_in, '*.mkv')) - - frame_interval_count = 0 - frame_count = 0 - - file_utils.mkdirs(opt_fp_out) - - for fp_video in tqdm(fp_videos): - - video = cv.VideoCapture(fp_video) - - while video.isOpened(): - res, frame = video.read() - if not res: - break - - frame_count += 1 # for naming - frame_interval_count += 1 # for interval - - bboxes = detector.detect(frame, opt_size=opt_size, opt_pyramids=0) - if len(bboxes) > 0 and frame_interval_count >= opt_frame_interval: - # save frame - fname = file_utils.zpad(frame_count) - fp_frame = join(opt_fp_out, '{}_{}.jpg'.format(Path(fp_video).stem, fname)) - cv.imwrite(fp_frame, frame) - frame_interval_count = 0 - diff --git a/megapixels/commands/datasets/preproc_wiki_imdb.py b/megapixels/commands/datasets/preproc_wiki_imdb.py new file mode 100644 index 00000000..66680ed0 --- /dev/null +++ b/megapixels/commands/datasets/preproc_wiki_imdb.py @@ -0,0 +1,205 @@ +from glob import glob +import os +from os.path import join +from pathlib import Path + +import click + +from app.settings import types +from app.utils import click_utils +from app.settings import app_cfg as cfg +from app.utils import logger_utils + +import dlib +import pandas as pd +from PIL import Image, ImageOps, ImageFilter +from app.utils import file_utils, im_utils + + +log = logger_utils.Logger.getLogger() + +@click.command() +@click.option('-i', '--input', 'opt_fp_in', required=True, + help='Input directory') +@click.option('-o', '--output', 'opt_fp_out', + help='Output directory') +@click.option('--videos', 'opt_dir_videos', + help='Output directory') +@click.option('--action', 'opt_action', + type=click.Choice(['info', 'faces', 'rename', 'download', 'metadata', 'split_frames']), + default='info', + help='Command action') +@click.pass_context +def cli(ctx, opt_fp_in, opt_fp_out, opt_dir_videos, opt_action): + """YTMU utils""" + + + from tqdm import tqdm + + # ------------------------------------------------- + # process + + if opt_action == 'metadata': + # downloads video metadata with ytdl + handle_metadata(opt_fp_in, opt_fp_out) + elif opt_action == 'download': + # downloads video files with ytdl + handle_download(opt_fp_in, opt_fp_out) + elif opt_action == 'info': + # converts original data file to clean CSV + handle_info() + elif opt_action == 'rename': + # rename the videos to video ID + handle_rename(opt_fp_in, opt_fp_out, opt_dir_videos) + elif opt_action == 'split_frames': + # rename the videos to video ID + handle_split_frames(opt_fp_in, opt_fp_out, opt_dir_videos) + + + + +# ---------------------------------------------------- +# handlers + +def handle_split_frames(fp_in, dir_out, dir_videos): + if not dir_out or not dir_videos: + log.error('-o/--output and --videos required') + return + import cv2 as cv + from tqdm import tqdm + from app.processors import face_detector + detector = face_detector.DetectorDLIBCNN() + + # get file list + fp_videos = glob(join(dir_videos, '*.mp4')) + fp_videos += glob(join(dir_videos, '*.webm')) + fp_videos += glob(join(dir_videos, '*.mkv')) + face_interval = 30 + frame_interval_count = 0 + frame_count = 0 + + file_utils.mkdirs(dir_out) + + for fp_video in tqdm(fp_videos): + # log.debug('opening: {}'.format(fp_video)) + video = cv.VideoCapture(fp_video) + while video.isOpened(): + res, frame = video.read() + if not res: + break + + frame_count += 1 # for naming + frame_interval_count += 1 # for interval + bboxes = detector.detect(frame, opt_size=(320, 240), opt_pyramids=0) + if len(bboxes) > 0 and frame_interval_count >= face_interval: + # save frame + fp_frame = join(dir_out, '{}_{}.jpg'.format(Path(fp_video).stem, file_utils.zpad(frame_count))) + cv.imwrite(fp_frame, frame) + frame_interval_count = 0 + + +def handle_metadata(fp_in, fp_out): + + keys = ['description', 'average_rating', 'dislike_count', 'categories', + 'thumbnail', 'title', 'upload_date', 'uploader_url', 'uploader_id', + 'fps', 'height', 'width', 'like_count', 'license', 'tags'] + + import youtube_dl + + ydl = youtube_dl.YoutubeDL({'outtmpl': '%(id)s%(ext)s'}) + + df = pd.read_csv(fp_in) + data_exp = [] + + for i, row in df.iterrows(): + video_data = {'url': row['url'], 'id': row['id']} + try: + with ydl: + url = 'http://www.youtube.com/watch?v={}'.format(row['id']) + result = ydl.extract_info(url, download=False) + video = result['entries'][0] if 'entries' in result else result + for k in keys: + val = video[k] + if k == 'title': + log.debug(val) + if type(val) == list: + val = '; '.join(val) + if type(val) == str: + video_data[k] = str(val).replace(',',';') + # log.debug('video_data: {}'.format(video_data)) + except Exception as e: + log.warn('video unavilable: {}'.format(row['url'])) + log.error(e) + continue + data_exp.append(video_data) + + df_exp = pd.DataFrame.from_dict(data_exp) + df_exp.to_csv(fp_out) + + +def handle_download(fp_in, dir_out): + import youtube_dl + df = pd.read_csv(fp_in) + fp_videos = glob(join(dir_out, '*.mp4')) + fp_videos += glob(join(dir_out, '*.webm')) + fp_videos += glob(join(dir_out, '*.mkv')) + + ydl = youtube_dl.YoutubeDL({'outtmpl': '%(id)s%(ext)s'}) + + for i, row in df.iterrows(): + vid = row['id'] + found = False + for fp_video in fp_videos: + if vid in fp_video: + log.debug('skip: {}'.format(vid)) + found = True + if not found: + try: + with ydl: + ydl.download(['http://www.youtube.com/watch?v={}'.format(vid)]) + except: + log.error('could not dl: {}'.format(vid)) + + +def handle_info(fp_in, fp_out): + if not fp_out: + log.error('--output required') + return + urls = file_utils.load_text(fp_in) + videos = [] + for url in urls: + splits = url.split('v=') + try: + vid = splits[1] + vid = vid.split('&')[0] + videos.append({'url': url, 'id': vid}) + except: + log.warn('no video id for {}'.format(url)) + # convert to df + df = pd.DataFrame.from_dict(videos) + df.to_csv(opt_fp_out) + + +def handle_rename(fp_in, fp_out, dir_videos): + import shutil + + if not dir_videos: + log.error('--videos required') + return + + fp_videos = glob(join(dir_videos, '*.mp4')) + fp_videos += glob(join(dir_videos, '*.webm')) + fp_videos += glob(join(dir_videos, '*.mkv')) + + df = pd.read_csv(fp_in) + + for i, row in df.iterrows(): + vid = row['id'] + fp_videos_copy = fp_videos.copy() + for fp_video in fp_videos: + if vid in fp_video: + dst = join(dir_videos, '{}{}'.format(vid, Path(fp_video).suffix)) + shutil.move(fp_video, dst) + log.debug('move {} to {}'.format(fp_video, dst)) + fp_videos.remove(fp_video) + break \ No newline at end of file diff --git a/megapixels/commands/demo/face_search.py b/megapixels/commands/demo/face_search.py index f551cafd..4c7036f4 100644 --- a/megapixels/commands/demo/face_search.py +++ b/megapixels/commands/demo/face_search.py @@ -53,7 +53,7 @@ def cli(ctx, opt_fp_in, opt_data_store, opt_dataset, opt_results, opt_gpu): dataset.load_metadata(types.Metadata.FILE_RECORD) dataset.load_metadata(types.Metadata.FACE_VECTOR) dataset.load_metadata(types.Metadata.FACE_ROI) - # dataset.load_metadata(types.Metadata.IDENTITY) + dataset.load_metadata(types.Metadata.IDENTITY) # init face detection detector = face_detector.DetectorCVDNN() @@ -82,22 +82,50 @@ def cli(ctx, opt_fp_in, opt_data_store, opt_dataset, opt_results, opt_gpu): image_records = dataset.find_matches(vec_query, n_results=opt_results) # summary - im_query = draw_utils.draw_bbox(im_query, bbox_norm, stroke_weight=8) + im_query = draw_utils.draw_bbox(im_query, bbox_norm, stroke_weight=4) ims_match = [im_query] + + opt_size = (256,256) + for image_record in image_records: image_record.summarize() log.info(f'{image_record.filepath}') im_match = cv.imread(image_record.filepath) - - im_match_pil = Image.open(image_record.filepath).convert('RGB') - # bbox = - ims_match.append(im_match) - + dim_match = im_match.shape[:2][::-1] + bbox_match = image_record.bbox + score = image_record.score + if score < .5: + clr = (0,255,0) + elif score < .6: + clr = (0,255,125) + elif score < .65: + clr = (0,125,125) + elif score < .7: + clr = (0,125,255) + else: + clr = (0,0,255) + + im_match = draw_utils.draw_bbox(im_match, bbox_match, stroke_weight=4, color=clr ) + bbox_match_dim = bbox_match.to_dim(dim_match) + + im_pil = im_utils.ensure_pil(im_match) + center = (bbox_match_dim.cx, bbox_match_dim.cy) + im_pil = ImageOps.fit(im_pil, opt_size, centering=center) + im_np = im_utils.ensure_np(im_pil) + if image_record.identity is not None: + log.debug(f'identity: {image_record.identity.name_display}') + else: + log.debug('no identity info') + log.debug(f'score: {image_record.score}') + + ims_match.append(im_np) # make montages of most similar faces montages = imutils.build_montages(ims_match, (256, 256), (3,2)) # display for i, montage in enumerate(montages): cv.imshow(f'{opt_dataset.name.upper()}: page {i}', montage) + fp_out = join(Path(opt_fp_in).parent, f'{Path(opt_fp_in).stem}_{i}.png') + cv.imwrite(fp_out, montage) display_utils.handle_keyboard() diff --git a/megapixels/commands/processor/_old_files_to_face_rois.py b/megapixels/commands/processor/_old_files_to_face_rois.py new file mode 100644 index 00000000..d92cbd74 --- /dev/null +++ b/megapixels/commands/processor/_old_files_to_face_rois.py @@ -0,0 +1,168 @@ +""" +Crop images to prepare for training +""" + +import click +# from PIL import Image, ImageOps, ImageFilter, ImageDraw + +from app.settings import types +from app.utils import click_utils +from app.settings import app_cfg as cfg + +color_filters = {'color': 1, 'gray': 2, 'all': 3} + +@click.command() +@click.option('-i', '--input', 'opt_fp_files', required=True, + help='Input file meta CSV') +@click.option('-o', '--output', 'opt_fp_out', required=True, + help='Output CSV') +@click.option('-e', '--ext', 'opt_ext', + default='jpg', type=click.Choice(['jpg', 'png']), + help='File glob ext') +@click.option('--size', 'opt_size', + type=(int, int), default=(300, 300), + help='Output image size') +@click.option('-t', '--detector-type', 'opt_detector_type', + type=cfg.FaceDetectNetVar, + default=click_utils.get_default(types.FaceDetectNet.DLIB_CNN), + help=click_utils.show_help(types.FaceDetectNet)) +@click.option('-g', '--gpu', 'opt_gpu', default=0, + help='GPU index') +@click.option('--conf', 'opt_conf_thresh', default=0.85, type=click.FloatRange(0,1), + help='Confidence minimum threshold') +@click.option('-p', '--pyramids', 'opt_pyramids', default=0, type=click.IntRange(0,4), + help='Number pyramids to upscale for DLIB detectors') +@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None), + help='Slice list of files') +@click.option('--display/--no-display', 'opt_display', is_flag=True, default=False, + help='Display detections to debug') +@click.option('--recursive/--no-recursive', 'opt_recursive', is_flag=True, default=False, + help='Use glob recursion (slower)') +@click.option('-f', '--force', 'opt_force', is_flag=True, + help='Force overwrite file') +@click.option('--color', 'opt_color_filter', + type=click.Choice(color_filters.keys()), default='color', + help='Filter to keep color or grayscale images (color = keep color') +@click.pass_context +def cli(ctx, opt_dirs_in, opt_fp_out, opt_ext, opt_size, opt_detector_type, + opt_gpu, opt_conf_thresh, opt_pyramids, opt_slice, opt_display, opt_recursive, opt_force, opt_color_filter): + """Converts frames with faces to CSV of ROIs""" + + import sys + import os + from os.path import join + from pathlib import Path + from glob import glob + + from tqdm import tqdm + import numpy as np + import dlib # must keep a local reference for dlib + import cv2 as cv + import pandas as pd + + from app.utils import logger_utils, file_utils, im_utils + from app.processors import face_detector + + # ------------------------------------------------- + # init here + + log = logger_utils.Logger.getLogger() + + if not opt_force and Path(opt_fp_out).exists(): + log.error('File exists. Use "-f / --force" to overwite') + return + + if opt_detector_type == types.FaceDetectNet.CVDNN: + detector = face_detector.DetectorCVDNN() + elif opt_detector_type == types.FaceDetectNet.DLIB_CNN: + detector = face_detector.DetectorDLIBCNN(opt_gpu) + elif opt_detector_type == types.FaceDetectNet.DLIB_HOG: + detector = face_detector.DetectorDLIBHOG() + elif opt_detector_type == types.FaceDetectNet.MTCNN: + detector = face_detector.DetectorMTCNN() + elif opt_detector_type == types.FaceDetectNet.HAAR: + log.error('{} not yet implemented'.format(opt_detector_type.name)) + return + + + # ------------------------------------------------- + # process here + color_filter = color_filters[opt_color_filter] + + # get list of files to process + fp_ims = [] + for opt_dir_in in opt_dirs_in: + if opt_recursive: + fp_glob = join(opt_dir_in, '**/*.{}'.format(opt_ext)) + fp_ims += glob(fp_glob, recursive=True) + else: + fp_glob = join(opt_dir_in, '*.{}'.format(opt_ext)) + fp_ims += glob(fp_glob) + log.debug(fp_glob) + + + if opt_slice: + fp_ims = fp_ims[opt_slice[0]:opt_slice[1]] + log.debug('processing {:,} files'.format(len(fp_ims))) + + + data = [] + + for fp_im in tqdm(fp_ims): + im = cv.imread(fp_im) + + # filter out color or grayscale iamges + if color_filter != color_filters['all']: + try: + is_gray = im_utils.is_grayscale(im) + if is_gray and color_filter != color_filters['gray']: + log.debug('Skipping grayscale image: {}'.format(fp_im)) + continue + except Exception as e: + log.error('Could not check grayscale: {}'.format(fp_im)) + continue + + try: + bboxes = detector.detect(im, opt_size=opt_size, opt_pyramids=opt_pyramids) + except Exception as e: + log.error('could not detect: {}'.format(fp_im)) + log.error('{}'.format(e)) + fpp_im = Path(fp_im) + subdir = str(fpp_im.parent.relative_to(opt_dir_in)) + + for bbox in bboxes: + # log.debug('is square: {}'.format(bbox.w == bbox.h)) + nw,nh = int(bbox.w * im.shape[1]), int(bbox.h * im.shape[0]) + roi = { + 'fn': fpp_im.stem, + 'ext': fpp_im.suffix.replace('.',''), + 'x': bbox.x, + 'y': bbox.y, + 'w': bbox.w, + 'h': bbox.h, + 'image_height': im.shape[0], + 'image_width': im.shape[1], + 'subdir': subdir} + bbox_dim = bbox.to_dim(im.shape[:2][::-1]) # w,h + data.append(roi) + + # debug display + if opt_display and len(bboxes): + im_md = im_utils.resize(im, width=min(1200, opt_size[0])) + for bbox in bboxes: + bbox_dim = bbox.to_dim(im_md.shape[:2][::-1]) + cv.rectangle(im_md, bbox_dim.pt_tl, bbox_dim.pt_br, (0,255,0), 3) + cv.imshow('', im_md) + while True: + k = cv.waitKey(1) & 0xFF + if k == 27 or k == ord('q'): # ESC + cv.destroyAllWindows() + sys.exit() + elif k != 255: + # any key to continue + break + + # save date + file_utils.mkdirs(opt_fp_out) + df = pd.DataFrame.from_dict(data) + df.to_csv(opt_fp_out, index=False) \ No newline at end of file diff --git a/megapixels/commands/processor/cluster.py b/megapixels/commands/processor/cluster.py new file mode 100644 index 00000000..419091a0 --- /dev/null +++ b/megapixels/commands/processor/cluster.py @@ -0,0 +1,47 @@ +import click + +from app.settings import types +from app.utils import click_utils +from app.settings import app_cfg as cfg +from app.utils.logger_utils import Logger + +@click.command() +@click.option('--data_store', 'opt_data_store', + type=cfg.DataStoreVar, + default=click_utils.get_default(types.DataStore.NAS), + show_default=True, + help=click_utils.show_help(types.Dataset)) +@click.option('--dataset', 'opt_dataset', + type=cfg.DatasetVar, + required=True, + show_default=True, + help=click_utils.show_help(types.Dataset)) +@click.option('--metadata', 'opt_metadata', required=True, + type=cfg.MetadataVar, + show_default=True, + help=click_utils.show_help(types.Metadata)) +@click.pass_context +def cli(ctx, opt_data_store, opt_dataset, opt_metadata): + """Display image info""" + + # cluster the embeddings + print("[INFO] clustering...") + clt = DBSCAN(metric="euclidean", n_jobs=args["jobs"]) + clt.fit(encodings) + + # determine the total number of unique faces found in the dataset + labelIDs = np.unique(clt.labels_) + numUniqueFaces = len(np.where(labelIDs > -1)[0]) + print("[INFO] # unique faces: {}".format(numUniqueFaces)) + # load and display image + im = cv.imread(fp_im) + cv.imshow('', im) + + while True: + k = cv.waitKey(1) & 0xFF + if k == 27 or k == ord('q'): # ESC + cv.destroyAllWindows() + sys.exit() + elif k != 255: + # any key to continue + break \ No newline at end of file diff --git a/megapixels/commands/processor/crop.py b/megapixels/commands/processor/crop.py new file mode 100644 index 00000000..778be0c4 --- /dev/null +++ b/megapixels/commands/processor/crop.py @@ -0,0 +1,104 @@ +""" +Crop images to prepare for training +""" + +import click +from PIL import Image, ImageOps, ImageFilter, ImageDraw + +from app.settings import types +from app.utils import click_utils +from app.settings import app_cfg as cfg + +@click.command() +@click.option('-i', '--input', 'opt_dir_in', required=True, + help='Input directory') +@click.option('-o', '--output', 'opt_dir_out', required=True, + help='Output directory') +@click.option('-e', '--ext', 'opt_ext', + default='jpg', type=click.Choice(['jpg', 'png']), + help='File glob ext') +@click.option('--size', 'opt_size', + type=(int, int), default=(256, 256), + help='Output image size') +@click.option('-t', '--crop-type', 'opt_crop_type', + default='center', type=click.Choice(['center', 'mirror', 'face', 'person', 'none']), + help='Force fit image center location') +@click.pass_context +def cli(ctx, opt_dir_in, opt_dir_out, opt_ext, opt_size, opt_crop_type): + """Crop, mirror images""" + + import os + from os.path import join + from pathlib import Path + from glob import glob + from tqdm import tqdm + + + from app.utils import logger_utils, file_utils, im_utils + + # ------------------------------------------------- + # process here + + log = logger_utils.Logger.getLogger() + log.info('crop images') + + # get list of files to process + fp_ims = glob(join(opt_dir_in, '*.{}'.format(opt_ext))) + log.debug('files: {}'.format(len(fp_ims))) + + # ensure output dir exists + file_utils.mkdirs(opt_dir_out) + + for fp_im in tqdm(fp_ims): + im = process_crop(fp_im, opt_size, opt_crop_type) + fp_out = join(opt_dir_out, Path(fp_im).name) + im.save(fp_out) + + +def process_crop(fp_im, opt_size, crop_type): + im = Image.open(fp_im) + if crop_type == 'center': + im = crop_square_fit(im, opt_size) + elif crop_type == 'mirror': + im = mirror_crop_square(im, opt_size) + return im + +def crop_square_fit(im, size, center=(0.5, 0.5)): + return ImageOps.fit(im, size, method=Image.BICUBIC, centering=center) + +def mirror_crop_square(im, size): + # force to even dims + if im.size[0] % 2 or im.size[1] % 2: + im = ImageOps.fit(im, ((im.size[0] // 2) * 2, (im.size[1] // 2) * 2)) + + # create new square image + min_size, max_size = (min(im.size), max(im.size)) + orig_w, orig_h = im.size + margin = (max_size - min_size) // 2 + w, h = (max_size, max_size) + im_new = Image.new('RGB', (w, h), color=(0, 0, 0)) + + #crop (l, t, r, b) + if orig_w > orig_h: + # landscape, mirror expand T/B + im_top = ImageOps.mirror(im.crop((0, 0, margin, w))) + im_bot = ImageOps.mirror(im.crop((orig_h - margin, 0, orig_h, w))) + im_new.paste(im_top, (0, 0)) + im_new.paste(im, (margin, 0, orig_h + margin, w)) + im_new.paste(im_bot, (h - margin, 0)) + elif orig_h > orig_w: + # portrait, mirror expand L/R + im_left = ImageOps.mirror(im.crop((0, 0, margin, h))) + im_right = ImageOps.mirror(im.crop((orig_w - margin, 0, orig_w, h))) + im_new.paste(im_left, (0, 0)) + im_new.paste(im, (margin, 0, orig_w + margin, h)) + im_new.paste(im_right, (w - margin, 0)) + + return im_new.resize(size) + + +def center_crop_face(): + pass + +def center_crop_person(): + pass \ No newline at end of file diff --git a/megapixels/commands/processor/csv_to_faces.py b/megapixels/commands/processor/csv_to_faces.py new file mode 100644 index 00000000..64c8b965 --- /dev/null +++ b/megapixels/commands/processor/csv_to_faces.py @@ -0,0 +1,105 @@ +""" +Reads in CSV of ROIs and extracts facial regions with padding +""" + +import click + +from app.settings import types +from app.utils import click_utils +from app.settings import app_cfg as cfg + +@click.command() +@click.option('-i', '--input', 'opt_fp_in', required=True, + help='Input CSV') +@click.option('-m', '--media', 'opt_dir_media', required=True, + help='Input image/video directory') +@click.option('-o', '--output', 'opt_dir_out', required=True, + help='Output directory for extracted ROI images') +@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None), + help='Slice list of files') +@click.option('--padding', 'opt_padding', default=0.25, + help='Facial padding as percentage of face width') +@click.option('--ext', 'opt_ext_out', default='png', type=click.Choice(['jpg', 'png']), + help='Output image type') +@click.option('--min', 'opt_min', default=(60, 60), + help='Minimum original face size') +@click.pass_context +def cli(ctx, opt_fp_in, opt_dir_media, opt_dir_out, opt_slice, + opt_padding, opt_ext_out, opt_min): + """Converts ROIs to images""" + + import os + from os.path import join + from pathlib import Path + from glob import glob + + from tqdm import tqdm + import numpy as np + from PIL import Image, ImageOps, ImageFilter, ImageDraw + import cv2 as cv + import pandas as pd + + from app.utils import logger_utils, file_utils, im_utils + from app.models.bbox import BBox + + # ------------------------------------------------- + # process here + log = logger_utils.Logger.getLogger() + + df_rois = pd.read_csv(opt_fp_in, dtype={'subdir': str, 'fn': str}) + if opt_slice: + df_rois = df_rois[opt_slice[0]:opt_slice[1]] + + log.info('Processing {:,} rows'.format(len(df_rois))) + + file_utils.mkdirs(opt_dir_out) + + df_rois_grouped = df_rois.groupby(['fn']) # group by fn/filename + groups = df_rois_grouped.groups + skipped = [] + + for group in tqdm(groups): + # get image + group_rows = df_rois_grouped.get_group(group) + + row = group_rows.iloc[0] + fp_im = join(opt_dir_media, str(row['subdir']), '{fn}.{ext}'.format(**row)) # TODO change to ext + try: + im = Image.open(fp_im).convert('RGB') + im.verify() + except Exception as e: + log.warn('Could not open: {}'.format(fp_im)) + log.error(e) + continue + + for idx, roi in group_rows.iterrows(): + # get bbox to im dimensions + xywh = [roi['x'], roi['y'], roi['w'] , roi['h']] + bbox = BBox.from_xywh(*xywh) + dim = im.size + bbox_dim = bbox.to_dim(dim) + # expand + opt_padding_px = int(opt_padding * bbox_dim.width) + bbox_dim_exp = bbox_dim.expand_dim(opt_padding_px, dim) + # crop + x1y2 = bbox_dim_exp.pt_tl + bbox_dim_exp.pt_br + im_crop = im.crop(box=x1y2) + + # strip exif, create new image and paste data + im_crop_data = list(im_crop.getdata()) + im_crop_no_exif = Image.new(im_crop.mode, im_crop.size) + im_crop_no_exif.putdata(im_crop_data) + + # save + idx_zpad = file_utils.zpad(idx, zeros=3) + subdir = '' if roi['subdir'] == '.' else '{}_'.format(roi['subdir']) + subdir = subdir.replace('/', '_') + fp_im_out = join(opt_dir_out, '{}{}{}.{}'.format(subdir, roi['fn'], idx_zpad, opt_ext_out)) + # threshold size and save + if im_crop_no_exif.size[0] < opt_min[0] or im_crop_no_exif.size[1] < opt_min[1]: + skipped.append(fp_im_out) + log.info('Face too small: {}, idx: {}'.format(fp_im, idx)) + else: + im_crop_no_exif.save(fp_im_out) + + log.info('Skipped {:,} images'.format(len(skipped))) diff --git a/megapixels/commands/processor/csv_to_faces_mt.py b/megapixels/commands/processor/csv_to_faces_mt.py new file mode 100644 index 00000000..64c8b965 --- /dev/null +++ b/megapixels/commands/processor/csv_to_faces_mt.py @@ -0,0 +1,105 @@ +""" +Reads in CSV of ROIs and extracts facial regions with padding +""" + +import click + +from app.settings import types +from app.utils import click_utils +from app.settings import app_cfg as cfg + +@click.command() +@click.option('-i', '--input', 'opt_fp_in', required=True, + help='Input CSV') +@click.option('-m', '--media', 'opt_dir_media', required=True, + help='Input image/video directory') +@click.option('-o', '--output', 'opt_dir_out', required=True, + help='Output directory for extracted ROI images') +@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None), + help='Slice list of files') +@click.option('--padding', 'opt_padding', default=0.25, + help='Facial padding as percentage of face width') +@click.option('--ext', 'opt_ext_out', default='png', type=click.Choice(['jpg', 'png']), + help='Output image type') +@click.option('--min', 'opt_min', default=(60, 60), + help='Minimum original face size') +@click.pass_context +def cli(ctx, opt_fp_in, opt_dir_media, opt_dir_out, opt_slice, + opt_padding, opt_ext_out, opt_min): + """Converts ROIs to images""" + + import os + from os.path import join + from pathlib import Path + from glob import glob + + from tqdm import tqdm + import numpy as np + from PIL import Image, ImageOps, ImageFilter, ImageDraw + import cv2 as cv + import pandas as pd + + from app.utils import logger_utils, file_utils, im_utils + from app.models.bbox import BBox + + # ------------------------------------------------- + # process here + log = logger_utils.Logger.getLogger() + + df_rois = pd.read_csv(opt_fp_in, dtype={'subdir': str, 'fn': str}) + if opt_slice: + df_rois = df_rois[opt_slice[0]:opt_slice[1]] + + log.info('Processing {:,} rows'.format(len(df_rois))) + + file_utils.mkdirs(opt_dir_out) + + df_rois_grouped = df_rois.groupby(['fn']) # group by fn/filename + groups = df_rois_grouped.groups + skipped = [] + + for group in tqdm(groups): + # get image + group_rows = df_rois_grouped.get_group(group) + + row = group_rows.iloc[0] + fp_im = join(opt_dir_media, str(row['subdir']), '{fn}.{ext}'.format(**row)) # TODO change to ext + try: + im = Image.open(fp_im).convert('RGB') + im.verify() + except Exception as e: + log.warn('Could not open: {}'.format(fp_im)) + log.error(e) + continue + + for idx, roi in group_rows.iterrows(): + # get bbox to im dimensions + xywh = [roi['x'], roi['y'], roi['w'] , roi['h']] + bbox = BBox.from_xywh(*xywh) + dim = im.size + bbox_dim = bbox.to_dim(dim) + # expand + opt_padding_px = int(opt_padding * bbox_dim.width) + bbox_dim_exp = bbox_dim.expand_dim(opt_padding_px, dim) + # crop + x1y2 = bbox_dim_exp.pt_tl + bbox_dim_exp.pt_br + im_crop = im.crop(box=x1y2) + + # strip exif, create new image and paste data + im_crop_data = list(im_crop.getdata()) + im_crop_no_exif = Image.new(im_crop.mode, im_crop.size) + im_crop_no_exif.putdata(im_crop_data) + + # save + idx_zpad = file_utils.zpad(idx, zeros=3) + subdir = '' if roi['subdir'] == '.' else '{}_'.format(roi['subdir']) + subdir = subdir.replace('/', '_') + fp_im_out = join(opt_dir_out, '{}{}{}.{}'.format(subdir, roi['fn'], idx_zpad, opt_ext_out)) + # threshold size and save + if im_crop_no_exif.size[0] < opt_min[0] or im_crop_no_exif.size[1] < opt_min[1]: + skipped.append(fp_im_out) + log.info('Face too small: {}, idx: {}'.format(fp_im, idx)) + else: + im_crop_no_exif.save(fp_im_out) + + log.info('Skipped {:,} images'.format(len(skipped))) diff --git a/megapixels/commands/processor/face_3ddfa.py b/megapixels/commands/processor/face_3ddfa.py new file mode 100644 index 00000000..ffc74180 --- /dev/null +++ b/megapixels/commands/processor/face_3ddfa.py @@ -0,0 +1,331 @@ +import click + +from app.settings import types +from app.utils import click_utils +from app.settings import app_cfg as cfg + + +@click.command() +@click.option('-i', '--input', 'opt_fp_in', default=None, required=True, + help='Image filepath') +@click.option('-o', '--output', 'opt_fp_out', default=None, + help='GIF output path') +@click.option('--size', 'opt_size', + type=(int, int), default=(300, 300), + help='Output image size') +@click.option('-g', '--gpu', 'opt_gpu', default=0, + help='GPU index') +@click.option('-f', '--force', 'opt_force', is_flag=True, + help='Force overwrite file') +@click.option('--display/--no-display', 'opt_display', is_flag=True, default=False, + help='Display detections to debug') +@click.pass_context +def cli(ctx, opt_fp_in, opt_fp_out, opt_gpu, opt_size, opt_force, opt_display): + """Face detector demo""" + + import sys + import os + from os.path import join + from pathlib import Path + import time + + from tqdm import tqdm + import numpy as np + import pandas as pd + import cv2 as cv + import dlib + + from app.utils import logger_utils, file_utils, im_utils, display_utils, draw_utils + from app.utils import plot_utils + from app.processors import face_detector, face_age + from app.models.data_store import DataStore + + # 3DDFA + # git clone https://github.com/cleardusk/3DDFA/ 3rdparty/ + + import torch + import torchvision.transforms as transforms + import mobilenet_v1 + from utils.ddfa import ToTensorGjz, NormalizeGjz, str2bool + import scipy.io as sio + from utils.inference import get_suffix, parse_roi_box_from_landmark, crop_img, predict_68pts, dump_to_ply, dump_vertex, \ + draw_landmarks, predict_dense, parse_roi_box_from_bbox, get_colors, write_obj_with_colors + from utils.cv_plot import plot_pose_box + from utils.estimate_pose import parse_pose + from utils.render import get_depths_image, cget_depths_image, cpncc + from utils.paf import gen_img_paf + import argparse + import torch.backends.cudnn as cudnn + + + log = logger_utils.Logger.getLogger() + + + # ------------------------------------------------- + # load image + + im = cv.imread(opt_fp_in) + im_resized = im_utils.resize(im, width=opt_size[0], height=opt_size[1]) + + # ---------------------------------------------------------------------------- + # detect face + + face_detector = face_detector.DetectorDLIBCNN(gpu=opt_gpu) # -1 for CPU + bboxes = face_detector.detect(im_resized, largest=True) + bbox = bboxes[0] + dim = im_resized.shape[:2][::-1] + bbox_dim = bbox.to_dim(dim) + if not bbox: + log.error('no face detected') + return + else: + log.info(f'face detected: {bbox_dim.to_xyxy()}') + + + # ---------------------------------------------------------------------------- + # age + + age_apparent_predictor = face_age.FaceAgeApparent() + age_real_predictor = face_age.FaceAgeReal() + + st = time.time() + age_real = age_real_predictor.age(im_resized, bbox_dim) + log.info(f'age real took: {(time.time()-st)/1000:.5f}s') + st = time.time() + age_apparent = age_apparent_predictor.age(im_resized, bbox_dim) + log.info(f'age apparent took: {(time.time()-st)/1000:.5f}s') + + + # ---------------------------------------------------------------------------- + # output + + log.info(f'Face coords: {bbox_dim} face') + log.info(f'Age (real): {(age_real):.2f}') + log.info(f'Age (apparent): {(age_apparent):.2f}') + + + # ---------------------------------------------------------------------------- + # draw + + # draw real age + im_age_real = im_resized.copy() + draw_utils.draw_bbox(im_age_real, bbox_dim) + txt = f'{(age_real):.2f}' + draw_utils.draw_text(im_age_real, bbox_dim.pt_tl, txt) + + # apparent + im_age_apparent = im_resized.copy() + draw_utils.draw_bbox(im_age_apparent, bbox_dim) + txt = f'{(age_apparent):.2f}' + draw_utils.draw_text(im_age_apparent, bbox_dim.pt_tl, txt) + + + # ---------------------------------------------------------------------------- + # save + + if opt_fp_out: + # save pose only + fpp_out = Path(opt_fp_out) + + fp_out = join(fpp_out.parent, f'{fpp_out.stem}_real{fpp_out.suffix}') + cv.imwrite(fp_out, im_age_real) + + fp_out = join(fpp_out.parent, f'{fpp_out.stem}_apparent{fpp_out.suffix}') + cv.imwrite(fp_out, im_age_apparent) + + + # ---------------------------------------------------------------------------- + # display + + if opt_display: + # show all images here + cv.imshow('real', im_age_real) + cv.imshow('apparent', im_age_apparent) + display_utils.handle_keyboard() + + + + + +STD_SIZE = 120 + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='3DDFA inference pipeline') + parser.add_argument('-f', '--files', nargs='+', + help='image files paths fed into network, single or multiple images') + parser.add_argument('-m', '--mode', default='cpu', type=str, help='gpu or cpu mode') + parser.add_argument('--show_flg', default='true', type=str2bool, help='whether show the visualization result') + parser.add_argument('--bbox_init', default='one', type=str, + help='one|two: one-step bbox initialization or two-step') + parser.add_argument('--dump_res', default='true', type=str2bool, help='whether write out the visualization image') + parser.add_argument('--dump_vertex', default='true', type=str2bool, + help='whether write out the dense face vertices to mat') + parser.add_argument('--dump_ply', default='true', type=str2bool) + parser.add_argument('--dump_pts', default='true', type=str2bool) + parser.add_argument('--dump_roi_box', default='true', type=str2bool) + parser.add_argument('--dump_pose', default='true', type=str2bool) + parser.add_argument('--dump_depth', default='true', type=str2bool) + parser.add_argument('--dump_pncc', default='true', type=str2bool) + parser.add_argument('--dump_paf', default='true', type=str2bool) + parser.add_argument('--paf_size', default=3, type=int, help='PAF feature kernel size') + parser.add_argument('--dump_obj', default='true', type=str2bool) + parser.add_argument('--dlib_bbox', default='true', type=str2bool, help='whether use dlib to predict bbox') + parser.add_argument('--dlib_landmark', default='true', type=str2bool, + help='whether use dlib landmark to crop image') + + args = parser.parse_args() + main(args) + + + +def main(args): + # 1. load pre-tained model + checkpoint_fp = 'models/phase1_wpdc_vdc_v2.pth.tar' + arch = 'mobilenet_1' + + checkpoint = torch.load(checkpoint_fp, map_location=lambda storage, loc: storage)['state_dict'] + model = getattr(mobilenet_v1, arch)(num_classes=62) # 62 = 12(pose) + 40(shape) +10(expression) + model_dict = model.state_dict() + # because the model is trained by multiple gpus, prefix module should be removed + for k in checkpoint.keys(): + model_dict[k.replace('module.', '')] = checkpoint[k] + model.load_state_dict(model_dict, strict=False) + if args.mode == 'gpu': + cudnn.benchmark = True + model = model.cuda() + model.eval() + + # 2. load dlib model for face detection and landmark used for face cropping + if args.dlib_landmark: + dlib_landmark_model = 'models/shape_predictor_68_face_landmarks.dat' + face_regressor = dlib.shape_predictor(dlib_landmark_model) + if args.dlib_bbox: + face_detector = dlib.get_frontal_face_detector() + + # 3. forward + tri = sio.loadmat('visualize/tri.mat')['tri'] + transform = transforms.Compose([ToTensorGjz(), NormalizeGjz(mean=127.5, std=128)]) + for img_fp in args.files: + img_ori = cv2.imread(img_fp) + if args.dlib_bbox: + rects = face_detector(img_ori, 1) + else: + rects = [] + + if len(rects) == 0: + rects = dlib.rectangles() + rect_fp = img_fp + '.bbox' + lines = open(rect_fp).read().strip().split('\n')[1:] + for l in lines: + l, r, t, b = [int(_) for _ in l.split(' ')[1:]] + rect = dlib.rectangle(l, r, t, b) + rects.append(rect) + + pts_res = [] + Ps = [] # Camera matrix collection + poses = [] # pose collection, [todo: validate it] + vertices_lst = [] # store multiple face vertices + ind = 0 + suffix = get_suffix(img_fp) + for rect in rects: + # whether use dlib landmark to crop image, if not, use only face bbox to calc roi bbox for cropping + if args.dlib_landmark: + # - use landmark for cropping + pts = face_regressor(img_ori, rect).parts() + pts = np.array([[pt.x, pt.y] for pt in pts]).T + roi_box = parse_roi_box_from_landmark(pts) + else: + # - use detected face bbox + bbox = [rect.left(), rect.top(), rect.right(), rect.bottom()] + roi_box = parse_roi_box_from_bbox(bbox) + + img = crop_img(img_ori, roi_box) + + # forward: one step + img = cv2.resize(img, dsize=(STD_SIZE, STD_SIZE), interpolation=cv2.INTER_LINEAR) + input = transform(img).unsqueeze(0) + with torch.no_grad(): + if args.mode == 'gpu': + input = input.cuda() + param = model(input) + param = param.squeeze().cpu().numpy().flatten().astype(np.float32) + + # 68 pts + pts68 = predict_68pts(param, roi_box) + + # two-step for more accurate bbox to crop face + if args.bbox_init == 'two': + roi_box = parse_roi_box_from_landmark(pts68) + img_step2 = crop_img(img_ori, roi_box) + img_step2 = cv2.resize(img_step2, dsize=(STD_SIZE, STD_SIZE), interpolation=cv2.INTER_LINEAR) + input = transform(img_step2).unsqueeze(0) + with torch.no_grad(): + if args.mode == 'gpu': + input = input.cuda() + param = model(input) + param = param.squeeze().cpu().numpy().flatten().astype(np.float32) + + pts68 = predict_68pts(param, roi_box) + + pts_res.append(pts68) + P, pose = parse_pose(param) + Ps.append(P) + poses.append(pose) + + # dense face 3d vertices + if args.dump_ply or args.dump_vertex or args.dump_depth or args.dump_pncc or args.dump_obj: + vertices = predict_dense(param, roi_box) + vertices_lst.append(vertices) + if args.dump_ply: + dump_to_ply(vertices, tri, '{}_{}.ply'.format(img_fp.replace(suffix, ''), ind)) + if args.dump_vertex: + dump_vertex(vertices, '{}_{}.mat'.format(img_fp.replace(suffix, ''), ind)) + + # save .mat for 3d Face + wfp = '{}_{}_face3d.mat'.format(img_fp.replace(suffix, ''), ind) + colors = get_colors(img_ori, vertices) + sio.savemat(wfp, {'vertices': vertices, 'colors': colors, 'triangles': tri}) + + if args.dump_pts: + wfp = '{}_{}.txt'.format(img_fp.replace(suffix, ''), ind) + np.savetxt(wfp, pts68, fmt='%.3f') + print('Save 68 3d landmarks to {}'.format(wfp)) + if args.dump_roi_box: + wfp = '{}_{}.roibox'.format(img_fp.replace(suffix, ''), ind) + np.savetxt(wfp, roi_box, fmt='%.3f') + print('Save roi box to {}'.format(wfp)) + if args.dump_paf: + wfp_paf = '{}_{}_paf.jpg'.format(img_fp.replace(suffix, ''), ind) + wfp_crop = '{}_{}_crop.jpg'.format(img_fp.replace(suffix, ''), ind) + paf_feature = gen_img_paf(img_crop=img, param=param, kernel_size=args.paf_size) + + cv2.imwrite(wfp_paf, paf_feature) + cv2.imwrite(wfp_crop, img) + print('Dump to {} and {}'.format(wfp_crop, wfp_paf)) + if args.dump_obj: + wfp = '{}_{}.obj'.format(img_fp.replace(suffix, ''), ind) + colors = get_colors(img_ori, vertices) + write_obj_with_colors(wfp, vertices, tri, colors) + print('Dump obj with sampled texture to {}'.format(wfp)) + ind += 1 + + if args.dump_pose: + # P, pose = parse_pose(param) # Camera matrix (without scale), and pose (yaw, pitch, roll, to verify) + img_pose = plot_pose_box(img_ori, Ps, pts_res) + wfp = img_fp.replace(suffix, '_pose.jpg') + cv2.imwrite(wfp, img_pose) + print('Dump to {}'.format(wfp)) + if args.dump_depth: + wfp = img_fp.replace(suffix, '_depth.png') + # depths_img = get_depths_image(img_ori, vertices_lst, tri-1) # python version + depths_img = cget_depths_image(img_ori, vertices_lst, tri - 1) # cython version + cv2.imwrite(wfp, depths_img) + print('Dump to {}'.format(wfp)) + if args.dump_pncc: + wfp = img_fp.replace(suffix, '_pncc.png') + pncc_feature = cpncc(img_ori, vertices_lst, tri - 1) # cython version + cv2.imwrite(wfp, pncc_feature[:, :, ::-1]) # cv2.imwrite will swap RGB -> BGR + print('Dump to {}'.format(wfp)) + if args.dump_res: + draw_landmarks(img_ori, pts_res, wfp=img_fp.replace(suffix, '_3DDFA.jpg'), show_flg=args.show_flg) diff --git a/megapixels/commands/processor/face_attributes.py b/megapixels/commands/processor/face_attributes.py new file mode 100644 index 00000000..01fe3bd1 --- /dev/null +++ b/megapixels/commands/processor/face_attributes.py @@ -0,0 +1,136 @@ +""" + +""" + +import click + +from app.settings import types +from app.utils import click_utils +from app.settings import app_cfg as cfg + + +@click.command() +@click.option('-i', '--input', 'opt_fp_in', default=None, + help='Override enum input filename CSV') +@click.option('-o', '--output', 'opt_fp_out', default=None, + help='Override enum output filename CSV') +@click.option('-m', '--media', 'opt_dir_media', default=None, + help='Override enum media directory') +@click.option('--store', 'opt_data_store', + type=cfg.DataStoreVar, + default=click_utils.get_default(types.DataStore.HDD), + show_default=True, + help=click_utils.show_help(types.Dataset)) +@click.option('--dataset', 'opt_dataset', + type=cfg.DatasetVar, + required=True, + show_default=True, + help=click_utils.show_help(types.Dataset)) +@click.option('--size', 'opt_size', + type=(int, int), default=cfg.DEFAULT_SIZE_FACE_DETECT, + help='Processing size for detection') +@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None), + help='Slice list of files') +@click.option('-f', '--force', 'opt_force', is_flag=True, + help='Force overwrite file') +@click.option('-d', '--display', 'opt_display', is_flag=True, + help='Display image for debugging') +@click.pass_context +def cli(ctx, opt_fp_in, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset, + opt_size, opt_slice, opt_force, opt_display): + """Creates 2D 68-point landmarks""" + + import sys + import os + from os.path import join + from pathlib import Path + from glob import glob + + from tqdm import tqdm + import numpy as np + import cv2 as cv + import pandas as pd + + from app.utils import logger_utils, file_utils, im_utils, display_utils, draw_utils + from app.processors import face_age_gender + from app.models.data_store import DataStore + from app.models.bbox import BBox + + # ------------------------------------------------------------------------- + # init here + + log = logger_utils.Logger.getLogger() + # init face processors + age_estimator_apnt = face_age_gender.FaceAgeApparent() + age_estimator_real = face_age_gender.FaceAgeReal() + gender_estimator = face_age_gender.FaceGender() + + # init filepaths + data_store = DataStore(opt_data_store, opt_dataset) + # set file output path + metadata_type = types.Metadata.FACE_ATTRIBUTES + fp_out = data_store.metadata(metadata_type) if opt_fp_out is None else opt_fp_out + if not opt_force and Path(fp_out).exists(): + log.error('File exists. Use "-f / --force" to overwite') + return + + # ------------------------------------------------------------------------- + # load filepath data + fp_record = data_store.metadata(types.Metadata.FILE_RECORD) + df_record = pd.read_csv(fp_record, dtype=cfg.FILE_RECORD_DTYPES).set_index('index') + # load ROI data + fp_roi = data_store.metadata(types.Metadata.FACE_ROI) + df_roi = pd.read_csv(fp_roi).set_index('index') + # slice if you want + if opt_slice: + df_roi = df_roi[opt_slice[0]:opt_slice[1]] + # group by image index (speedup if multiple faces per image) + df_img_groups = df_roi.groupby('record_index') + log.debug('processing {:,} groups'.format(len(df_img_groups))) + + # store landmarks in list + results = [] + + # ------------------------------------------------------------------------- + # iterate groups with file/record index as key + + for record_index, df_img_group in tqdm(df_img_groups): + + # access file_record DataSeries + file_record = df_record.iloc[record_index] + + # load image + fp_im = data_store.face(file_record.subdir, file_record.fn, file_record.ext) + im = cv.imread(fp_im) + im_resized = im_utils.resize(im, width=opt_size[0], height=opt_size[1]) + dim = im_resized.shape[:2][::-1] + + # iterate ROIs in this image + for roi_index, df_img in df_img_group.iterrows(): + + # find landmarks + bbox_norm = BBox.from_xywh(df_img.x, df_img.y, df_img.w, df_img.h) + bbox_dim = bbox_norm.to_dim(dim) + + age_apnt = age_estimator_apnt.predict(im_resized, bbox_norm) + age_real = age_estimator_real.predict(im_resized, bbox_norm) + gender = gender_estimator.predict(im_resized, bbox_norm) + + attr_obj = { + 'age_real':float(f'{age_real:.2f}'), + 'age_apparent': float(f'{age_apnt:.2f}'), + 'm': float(f'{gender["m"]:.4f}'), + 'f': float(f'{gender["f"]:.4f}'), + 'roi_index': roi_index + } + results.append(attr_obj) + + + # create DataFrame and save to CSV + file_utils.mkdirs(fp_out) + df = pd.DataFrame.from_dict(results) + df.index.name = 'index' + df.to_csv(fp_out) + + # save script + file_utils.write_text(' '.join(sys.argv), '{}.sh'.format(fp_out)) \ No newline at end of file diff --git a/megapixels/commands/processor/face_frames.py b/megapixels/commands/processor/face_frames.py new file mode 100644 index 00000000..76f23af1 --- /dev/null +++ b/megapixels/commands/processor/face_frames.py @@ -0,0 +1,82 @@ +from glob import glob +import os +from os.path import join +from pathlib import Path + +import click + + + + +@click.command() +@click.option('-i', '--input', 'opt_fp_in', required=True, + help='Input directory to glob') +@click.option('-o', '--output', 'opt_fp_out', required=True, + help='Output directory for face frames') +@click.option('--size', 'opt_size', + type=(int, int), default=(300, 300), + help='Output image size') +@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None), + help='Slice list of files') +@click.pass_context +def cli(ctx, opt_fp_in, opt_fp_out, opt_size, opt_slice): + """Split video to face frames""" + + from tqdm import tqdm + import dlib + import pandas as pd + from PIL import Image, ImageOps, ImageFilter + import cv2 as cv + import numpy as np + + from app.processors import face_detector + from app.utils import logger_utils, file_utils, im_utils + from app.settings import types + from app.utils import click_utils + from app.settings import app_cfg as cfg + from app.models.bbox import BBox + + log = logger_utils.Logger.getLogger() + + # ------------------------------------------------- + # process + + detector = face_detector.DetectorDLIBCNN() + + # get file list + fp_videos = glob(join(opt_fp_in, '*.mp4')) + fp_videos += glob(join(opt_fp_in, '*.webm')) + fp_videos += glob(join(opt_fp_in, '*.mkv')) + + min_distance_per = .025 # minimum distance percentage to save new face image + face_interval = 5 + frame_interval_count = 0 + frame_count = 0 + bbox_prev = BBox(0,0,0,0) + file_utils.mkdirs(opt_fp_out) + dnn_size = opt_size + max_dim = max(dnn_size) + px_thresh = int(max_dim * min_distance_per) + + for fp_video in tqdm(fp_videos): + # load video + video = cv.VideoCapture(fp_video) + # iterate through frames + while video.isOpened(): + res, frame = video.read() + if not res: + break + # increment frames, save frame if interval has passed + frame_count += 1 # for naming + frame_interval_count += 1 # for interval + bboxes = detector.detect(frame, opt_size=dnn_size, opt_pyramids=0) + if len(bboxes) > 0 and frame_interval_count >= face_interval: + dim = frame.shape[:2][::-1] + d = bboxes[0].to_dim(dim).distance(bbox_prev) + if d > px_thresh: + # save frame + zfc = file_utils.zpad(frame_count) + fp_frame = join(opt_fp_out, '{}_{}.jpg'.format(Path(fp_video).stem, zfc)) + cv.imwrite(fp_frame, frame) + frame_interval_count = 0 + bbox_prev = bboxes[0] diff --git a/megapixels/commands/processor/face_landmark_2d_5.py b/megapixels/commands/processor/face_landmark_2d_5.py new file mode 100644 index 00000000..40ec6f41 --- /dev/null +++ b/megapixels/commands/processor/face_landmark_2d_5.py @@ -0,0 +1,146 @@ +""" + +""" + +import click + +from app.settings import types +from app.utils import click_utils +from app.settings import app_cfg as cfg + +color_filters = {'color': 1, 'gray': 2, 'all': 3} + +@click.command() +@click.option('-i', '--input', 'opt_fp_in', default=None, + help='Override enum input filename CSV') +@click.option('-o', '--output', 'opt_fp_out', default=None, + help='Override enum output filename CSV') +@click.option('-m', '--media', 'opt_dir_media', default=None, + help='Override enum media directory') +@click.option('--store', 'opt_data_store', + type=cfg.DataStoreVar, + default=click_utils.get_default(types.DataStore.HDD), + show_default=True, + help=click_utils.show_help(types.Dataset)) +@click.option('--dataset', 'opt_dataset', + type=cfg.DatasetVar, + required=True, + show_default=True, + help=click_utils.show_help(types.Dataset)) +@click.option('-d', '--detector', 'opt_detector_type', + type=cfg.FaceLandmark2D_5Var, + default=click_utils.get_default(types.FaceLandmark2D_5.DLIB), + help=click_utils.show_help(types.FaceLandmark2D_5)) +@click.option('--size', 'opt_size', + type=(int, int), default=(300, 300), + help='Output image size') +@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None), + help='Slice list of files') +@click.option('-f', '--force', 'opt_force', is_flag=True, + help='Force overwrite file') +@click.option('-d', '--display', 'opt_display', is_flag=True, + help='Display image for debugging') +@click.pass_context +def cli(ctx, opt_fp_in, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset, opt_detector_type, + opt_size, opt_slice, opt_force, opt_display): + """Creates 2D 5-point landmarks""" + + import sys + import os + from os.path import join + from pathlib import Path + from glob import glob + + from tqdm import tqdm + import numpy as np + import cv2 as cv + import pandas as pd + + from app.utils import logger_utils, file_utils, im_utils, display_utils, draw_utils + from app.processors import face_landmarks + from app.models.data_store import DataStore + from app.models.bbox import BBox + + # ------------------------------------------------- + # init here + + log = logger_utils.Logger.getLogger() + # init filepaths + data_store = DataStore(opt_data_store, opt_dataset) + # set file output path + metadata_type = types.Metadata.FACE_LANDMARK_2D_5 + fp_out = data_store.metadata(metadata_type) if opt_fp_out is None else opt_fp_out + if not opt_force and Path(fp_out).exists(): + log.error('File exists. Use "-f / --force" to overwite') + return + + # init face landmark processors + if opt_detector_type == types.FaceLandmark2D_5.DLIB: + # use dlib 68 point detector + landmark_detector = face_landmarks.Dlib2D_5() + elif opt_detector_type == types.FaceLandmark2D_5.MTCNN: + # use dlib 5 point detector + landmark_detector = face_landmarks.MTCNN2D_5() + else: + log.error('{} not yet implemented'.format(opt_detector_type.name)) + return + + log.info(f'Using landmark detector: {opt_detector_type.name}') + + # load filepath data + fp_record = data_store.metadata(types.Metadata.FILE_RECORD) + df_record = pd.read_csv(fp_record).set_index('index') + # load ROI data + fp_roi = data_store.metadata(types.Metadata.FACE_ROI) + df_roi = pd.read_csv(fp_roi).set_index('index') + # slice if you want + if opt_slice: + df_roi = df_roi[opt_slice[0]:opt_slice[1]] + # group by image index (speedup if multiple faces per image) + df_img_groups = df_roi.groupby('record_index') + log.debug('processing {:,} groups'.format(len(df_img_groups))) + + # store landmarks in list + results = [] + + # iterate groups with file/record index as key + for record_index, df_img_group in tqdm(df_img_groups): + + # acces file record + ds_record = df_record.iloc[record_index] + + # load image + fp_im = data_store.face(ds_record.subdir, ds_record.fn, ds_record.ext) + im = cv.imread(fp_im) + im_resized = im_utils.resize(im, width=opt_size[0], height=opt_size[1]) + + # iterate image group dataframe with roi index as key + for roi_index, df_img in df_img_group.iterrows(): + + # get bbox + x, y, w, h = df_img.x, df_img.y, df_img.w, df_img.h + dim = im_resized.shape[:2][::-1] + bbox = BBox.from_xywh(x, y, w, h).to_dim(dim) + + # get landmark points + points = landmark_detector.landmarks(im_resized, bbox) + points_norm = landmark_detector.normalize(points, dim) + points_flat = landmark_detector.flatten(points_norm) + + # display to screen if optioned + if opt_display: + draw_utils.draw_landmarks2D(im_resized, points) + draw_utils.draw_bbox(im_resized, bbox) + cv.imshow('', im_resized) + display_utils.handle_keyboard() + + results.append(points_flat) + + # create DataFrame and save to CSV + file_utils.mkdirs(fp_out) + df = pd.DataFrame.from_dict(results) + df.index.name = 'index' + df.to_csv(fp_out) + + # save script + file_utils.write_text(' '.join(sys.argv), '{}.sh'.format(fp_out)) \ No newline at end of file diff --git a/megapixels/commands/processor/face_landmark_2d_68.py b/megapixels/commands/processor/face_landmark_2d_68.py new file mode 100644 index 00000000..c6978a40 --- /dev/null +++ b/megapixels/commands/processor/face_landmark_2d_68.py @@ -0,0 +1,150 @@ +""" + +""" + +import click + +from app.settings import types +from app.utils import click_utils +from app.settings import app_cfg as cfg + + +@click.command() +@click.option('-i', '--input', 'opt_fp_in', default=None, + help='Override enum input filename CSV') +@click.option('-o', '--output', 'opt_fp_out', default=None, + help='Override enum output filename CSV') +@click.option('-m', '--media', 'opt_dir_media', default=None, + help='Override enum media directory') +@click.option('--store', 'opt_data_store', + type=cfg.DataStoreVar, + default=click_utils.get_default(types.DataStore.HDD), + show_default=True, + help=click_utils.show_help(types.Dataset)) +@click.option('--dataset', 'opt_dataset', + type=cfg.DatasetVar, + required=True, + show_default=True, + help=click_utils.show_help(types.Dataset)) +@click.option('-d', '--detector', 'opt_detector_type', + type=cfg.FaceLandmark2D_68Var, + default=click_utils.get_default(types.FaceLandmark2D_68.DLIB), + help=click_utils.show_help(types.FaceLandmark2D_68)) +@click.option('--size', 'opt_size', + type=(int, int), default=(300, 300), + help='Output image size') +@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None), + help='Slice list of files') +@click.option('-f', '--force', 'opt_force', is_flag=True, + help='Force overwrite file') +@click.option('-d', '--display', 'opt_display', is_flag=True, + help='Display image for debugging') +@click.pass_context +def cli(ctx, opt_fp_in, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset, opt_detector_type, + opt_size, opt_slice, opt_force, opt_display): + """Creates 2D 68-point landmarks""" + + import sys + import os + from os.path import join + from pathlib import Path + from glob import glob + + from tqdm import tqdm + import numpy as np + import cv2 as cv + import pandas as pd + + from app.utils import logger_utils, file_utils, im_utils, display_utils, draw_utils + from app.processors import face_landmarks + from app.models.data_store import DataStore + from app.models.bbox import BBox + + # ------------------------------------------------------------------------- + # init here + + log = logger_utils.Logger.getLogger() + # init filepaths + data_store = DataStore(opt_data_store, opt_dataset) + # set file output path + metadata_type = types.Metadata.FACE_LANDMARK_2D_68 + fp_out = data_store.metadata(metadata_type) if opt_fp_out is None else opt_fp_out + if not opt_force and Path(fp_out).exists(): + log.error('File exists. Use "-f / --force" to overwite') + return + + # init face landmark processors + if opt_detector_type == types.FaceLandmark2D_68.DLIB: + # use dlib 68 point detector + landmark_detector = face_landmarks.Dlib2D_68() + elif opt_detector_type == types.FaceLandmark2D_68.FACE_ALIGNMENT: + # use dlib 5 point detector + landmark_detector = face_landmarks.FaceAlignment2D_68() + else: + log.error('{} not yet implemented'.format(opt_detector_type.name)) + return + + log.info(f'Using landmark detector: {opt_detector_type.name}') + + # ------------------------------------------------------------------------- + # load filepath data + fp_record = data_store.metadata(types.Metadata.FILE_RECORD) + df_record = pd.read_csv(fp_record).set_index('index') + # load ROI data + fp_roi = data_store.metadata(types.Metadata.FACE_ROI) + df_roi = pd.read_csv(fp_roi).set_index('index') + # slice if you want + if opt_slice: + df_roi = df_roi[opt_slice[0]:opt_slice[1]] + # group by image index (speedup if multiple faces per image) + df_img_groups = df_roi.groupby('record_index') + log.debug('processing {:,} groups'.format(len(df_img_groups))) + + # store landmarks in list + results = [] + + # ------------------------------------------------------------------------- + # iterate groups with file/record index as key + + for record_index, df_img_group in tqdm(df_img_groups): + + # access file_record DataSeries + file_record = df_record.iloc[record_index] + + # load image + fp_im = data_store.face(file_record.subdir, file_record.fn, file_record.ext) + im = cv.imread(fp_im) + im_resized = im_utils.resize(im, width=opt_size[0], height=opt_size[1]) + dim = im_resized.shape[:2][::-1] + + # iterate ROIs in this image + for roi_index, df_img in df_img_group.iterrows(): + + # find landmarks + x, y, w, h = df_img.x, df_img.y, df_img.w, df_img.h # normalized values + #dim = (file_record.width, file_record.height) # original w,h + bbox = BBox.from_xywh(x, y, w, h).to_dim(dim) + points = landmark_detector.landmarks(im_resized, bbox) + points_norm = landmark_detector.normalize(points, dim) + points_str = landmark_detector.to_str(points_norm) + + # display if optioned + if opt_display: + dst = im_resized.copy() + draw_utils.draw_landmarks2D(dst, points) + draw_utils.draw_bbox(dst, bbox) + cv.imshow('', dst) + display_utils.handle_keyboard() + + # add to results for CSV + results.append({'vec': points_str, 'roi_index':roi_index}) + + + # create DataFrame and save to CSV + file_utils.mkdirs(fp_out) + df = pd.DataFrame.from_dict(results) + df.index.name = 'index' + df.to_csv(fp_out) + + # save script + file_utils.write_text(' '.join(sys.argv), '{}.sh'.format(fp_out)) \ No newline at end of file diff --git a/megapixels/commands/processor/face_landmark_3d_68.py b/megapixels/commands/processor/face_landmark_3d_68.py new file mode 100644 index 00000000..a2d14d72 --- /dev/null +++ b/megapixels/commands/processor/face_landmark_3d_68.py @@ -0,0 +1,147 @@ +""" + +""" + +import click + +from app.settings import types +from app.utils import click_utils +from app.settings import app_cfg as cfg + +color_filters = {'color': 1, 'gray': 2, 'all': 3} + +@click.command() +@click.option('-i', '--input', 'opt_fp_in', default=None, + help='Override enum input filename CSV') +@click.option('-o', '--output', 'opt_fp_out', default=None, + help='Override enum output filename CSV') +@click.option('-m', '--media', 'opt_dir_media', default=None, + help='Override enum media directory') +@click.option('--store', 'opt_data_store', + type=cfg.DataStoreVar, + default=click_utils.get_default(types.DataStore.HDD), + show_default=True, + help=click_utils.show_help(types.Dataset)) +@click.option('--dataset', 'opt_dataset', + type=cfg.DatasetVar, + required=True, + show_default=True, + help=click_utils.show_help(types.Dataset)) +@click.option('-d', '--detector', 'opt_detector_type', + type=cfg.FaceLandmark3D_68Var, + default=click_utils.get_default(types.FaceLandmark3D_68.FACE_ALIGNMENT), + help=click_utils.show_help(types.FaceLandmark3D_68)) +@click.option('--size', 'opt_size', + type=(int, int), default=(300, 300), + help='Output image size') +@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None), + help='Slice list of files') +@click.option('-f', '--force', 'opt_force', is_flag=True, + help='Force overwrite file') +@click.option('-d', '--display', 'opt_display', is_flag=True, + help='Display image for debugging') +@click.pass_context +def cli(ctx, opt_fp_in, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset, opt_detector_type, + opt_size, opt_slice, opt_force, opt_display): + """Generate 3D 68-point landmarks""" + + import sys + import os + from os.path import join + from pathlib import Path + from glob import glob + + from tqdm import tqdm + import numpy as np + import cv2 as cv + import pandas as pd + + from app.utils import logger_utils, file_utils, im_utils, display_utils, draw_utils + from app.utils import plot_utils + from app.processors import face_landmarks + from app.models.data_store import DataStore + from app.models.bbox import BBox + + # -------------------------------------------------------------------------- + # init here + + log = logger_utils.Logger.getLogger() + log.warn('not normalizing points') + # init filepaths + data_store = DataStore(opt_data_store, opt_dataset) + # set file output path + metadata_type = types.Metadata.FACE_LANDMARK_3D_68 + fp_out = data_store.metadata(metadata_type) if opt_fp_out is None else opt_fp_out + if not opt_force and Path(fp_out).exists(): + log.error('File exists. Use "-f / --force" to overwite') + return + + # init face landmark processors + if opt_detector_type == types.FaceLandmark3D_68.FACE_ALIGNMENT: + # use FaceAlignment 68 point 3D detector + landmark_detector = face_landmarks.FaceAlignment3D_68() + else: + log.error('{} not yet implemented'.format(opt_detector_type.name)) + return + + log.info(f'Using landmark detector: {opt_detector_type.name}') + + # ------------------------------------------------------------------------- + # load data + + fp_record = data_store.metadata(types.Metadata.FILE_RECORD) # file_record.csv + df_record = pd.read_csv(fp_record).set_index('index') + fp_roi = data_store.metadata(types.Metadata.FACE_ROI) # face_roi.csv + df_roi = pd.read_csv(fp_roi).set_index('index') + if opt_slice: + df_roi = df_roi[opt_slice[0]:opt_slice[1]] # slice if you want + df_img_groups = df_roi.groupby('record_index') # groups by image index (load once) + log.debug('processing {:,} groups'.format(len(df_img_groups))) + + # store landmarks in list + results = [] + + # iterate groups with file/record index as key + for record_index, df_img_group in tqdm(df_img_groups): + + # acces file record + ds_record = df_record.iloc[record_index] + + # load image + fp_im = data_store.face(ds_record.subdir, ds_record.fn, ds_record.ext) + im = cv.imread(fp_im) + im_resized = im_utils.resize(im, width=opt_size[0], height=opt_size[1]) + + # iterate image group dataframe with roi index as key + for roi_index, df_img in df_img_group.iterrows(): + + # get bbox + x, y, w, h = df_img.x, df_img.y, df_img.w, df_img.h + dim = im_resized.shape[:2][::-1] + bbox = BBox.from_xywh(x, y, w, h).to_dim(dim) + + # get landmark points + points = landmark_detector.landmarks(im_resized, bbox) + # NB can't really normalize these points, but are normalized against 3D space + #points_norm = landmark_detector.normalize(points, dim) # normalized using 200 + points_flattenend = landmark_detector.flatten(points) + + # display to screen if optioned + if opt_display: + draw_utils.draw_landmarks3D(im_resized, points) + draw_utils.draw_bbox(im_resized, bbox) + cv.imshow('', im_resized) + display_utils.handle_keyboard() + + #plot_utils.generate_3d_landmark_anim(points, '/home/adam/Downloads/3d.gif') + + results.append(points_flattenend) + + # create DataFrame and save to CSV + file_utils.mkdirs(fp_out) + df = pd.DataFrame.from_dict(results) + df.index.name = 'index' + df.to_csv(fp_out) + + # save script + file_utils.write_text(' '.join(sys.argv), '{}.sh'.format(fp_out)) \ No newline at end of file diff --git a/megapixels/commands/processor/face_pose.py b/megapixels/commands/processor/face_pose.py new file mode 100644 index 00000000..cb7ec56c --- /dev/null +++ b/megapixels/commands/processor/face_pose.py @@ -0,0 +1,164 @@ +""" +NB: This only works with the DLIB 68-point landmarks. + +Converts ROIs to pose: yaw, roll, pitch +pitch: looking down or up in yes gesture +roll: tilting head towards shoulder +yaw: twisting head left to right in no gesture + +""" + +""" +TODO +- check compatibility with MTCNN 68 point detector +- improve accuracy by using MTCNN 5-point +- refer to https://github.com/jerryhouuu/Face-Yaw-Roll-Pitch-from-Pose-Estimation-using-OpenCV/ +""" + +import click + +from app.settings import types +from app.utils import click_utils +from app.settings import app_cfg as cfg + +@click.command() +@click.option('-i', '--input', 'opt_fp_in', default=None, + help='Override enum input filename CSV') +@click.option('-o', '--output', 'opt_fp_out', default=None, + help='Override enum output filename CSV') +@click.option('-m', '--media', 'opt_dir_media', default=None, + help='Override enum media directory') +@click.option('--store', 'opt_data_store', + type=cfg.DataStoreVar, + default=click_utils.get_default(types.DataStore.HDD), + show_default=True, + help=click_utils.show_help(types.Dataset)) +@click.option('--dataset', 'opt_dataset', + type=cfg.DatasetVar, + required=True, + show_default=True, + help=click_utils.show_help(types.Dataset)) +@click.option('--size', 'opt_size', + type=(int, int), default=(300, 300), + help='Output image size') +@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None), + help='Slice list of files') +@click.option('-f', '--force', 'opt_force', is_flag=True, + help='Force overwrite file') +@click.option('-d', '--display', 'opt_display', is_flag=True, + help='Display image for debugging') +@click.pass_context +def cli(ctx, opt_fp_in, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset, opt_size, + opt_slice, opt_force, opt_display): + """Converts ROIs to pose: roll, yaw, pitch""" + + import sys + import os + from os.path import join + from pathlib import Path + from glob import glob + + from tqdm import tqdm + import numpy as np + import dlib # must keep a local reference for dlib + import cv2 as cv + import pandas as pd + + from app.models.bbox import BBox + from app.utils import logger_utils, file_utils, im_utils, display_utils, draw_utils + from app.processors.face_landmarks import Dlib2D_68 + from app.processors.face_pose import FacePoseDLIB + from app.models.data_store import DataStore + + # ------------------------------------------------- + # init here + + log = logger_utils.Logger.getLogger() + + # set data_store + data_store = DataStore(opt_data_store, opt_dataset) + + # get filepath out + fp_out = data_store.metadata(types.Metadata.FACE_POSE) if opt_fp_out is None else opt_fp_out + if not opt_force and Path(fp_out).exists(): + log.error('File exists. Use "-f / --force" to overwite') + return + + # init face processors + face_pose = FacePoseDLIB() + face_landmarks = Dlib2D_68() + + # ------------------------------------------------- + # load data + + fp_record = data_store.metadata(types.Metadata.FILE_RECORD) + df_record = pd.read_csv(fp_record, dtype=cfg.FILE_RECORD_DTYPES).set_index('index') + # load ROI data + fp_roi = data_store.metadata(types.Metadata.FACE_ROI) + df_roi = pd.read_csv(fp_roi).set_index('index') + # slice if you want + if opt_slice: + df_roi = df_roi[opt_slice[0]:opt_slice[1]] + # group by image index (speedup if multiple faces per image) + df_img_groups = df_roi.groupby('record_index') + log.debug('processing {:,} groups'.format(len(df_img_groups))) + + # store poses and convert to DataFrame + results = [] + + # ------------------------------------------------- + # iterate groups with file/record index as key + for record_index, df_img_group in tqdm(df_img_groups): + + # access the file_record + file_record = df_record.iloc[record_index] # pands.DataSeries + + # load image + fp_im = data_store.face(file_record.subdir, file_record.fn, file_record.ext) + im = cv.imread(fp_im) + im_resized = im_utils.resize(im, width=opt_size[0], height=opt_size[1]) + + # iterate image group dataframe with roi index as key + for roi_index, df_img in df_img_group.iterrows(): + + # get bbox + x, y, w, h = df_img.x, df_img.y, df_img.w, df_img.h + #dim = (file_record.width, file_record.height) + dim = im_resized.shape[:2][::-1] + bbox_norm = BBox.from_xywh(x, y, w, h) + bbox_dim = bbox_norm.to_dim(dim) + + # get pose + landmarks = face_landmarks.landmarks(im_resized, bbox_norm) + pose_data = face_pose.pose(landmarks, dim) + #pose_degrees = pose_data['degrees'] # only keep the degrees data + #pose_degrees['points_nose'] = pose_data + + # draw landmarks if optioned + if opt_display: + draw_utils.draw_pose(im_resized, pose_data['point_nose'], pose_data['points']) + draw_utils.draw_degrees(im_resized, pose_data) + cv.imshow('', im_resized) + display_utils.handle_keyboard() + + # add image index and append to result CSV data + pose_data['roi_index'] = roi_index + for k, v in pose_data['points'].items(): + pose_data[f'point_{k}_x'] = v[0] / dim[0] + pose_data[f'point_{k}_y'] = v[1] / dim[1] + + # rearrange data structure for DataFrame + pose_data.pop('points') + pose_data['point_nose_x'] = pose_data['point_nose'][0] / dim[0] + pose_data['point_nose_y'] = pose_data['point_nose'][1] / dim[1] + pose_data.pop('point_nose') + results.append(pose_data) + + # create DataFrame and save to CSV + file_utils.mkdirs(fp_out) + df = pd.DataFrame.from_dict(results) + df.index.name = 'index' + df.to_csv(fp_out) + + # save script + file_utils.write_text(' '.join(sys.argv), '{}.sh'.format(fp_out)) \ No newline at end of file diff --git a/megapixels/commands/processor/face_roi.py b/megapixels/commands/processor/face_roi.py new file mode 100644 index 00000000..fc933049 --- /dev/null +++ b/megapixels/commands/processor/face_roi.py @@ -0,0 +1,187 @@ +""" +Crop images to prepare for training +""" + +import click +# from PIL import Image, ImageOps, ImageFilter, ImageDraw + +from app.settings import types +from app.utils import click_utils +from app.settings import app_cfg as cfg + +color_filters = {'color': 1, 'gray': 2, 'all': 3} + +@click.command() +@click.option('-i', '--input', 'opt_fp_in', default=None, + help='Override enum input filename CSV') +@click.option('-o', '--output', 'opt_fp_out', default=None, + help='Override enum output filename CSV') +@click.option('-m', '--media', 'opt_dir_media', default=None, + help='Override enum media directory') +@click.option('--store', 'opt_data_store', + type=cfg.DataStoreVar, + default=click_utils.get_default(types.DataStore.HDD), + show_default=True, + help=click_utils.show_help(types.Dataset)) +@click.option('--dataset', 'opt_dataset', + type=cfg.DatasetVar, + required=True, + show_default=True, + help=click_utils.show_help(types.Dataset)) +@click.option('--size', 'opt_size', + type=(int, int), default=(480, 480), + help='Output image size') +@click.option('-d', '--detector', 'opt_detector_type', + type=cfg.FaceDetectNetVar, + default=click_utils.get_default(types.FaceDetectNet.CVDNN), + help=click_utils.show_help(types.FaceDetectNet)) +@click.option('-g', '--gpu', 'opt_gpu', default=0, + help='GPU index') +@click.option('--conf', 'opt_conf_thresh', default=0.85, type=click.FloatRange(0,1), + help='Confidence minimum threshold') +@click.option('-p', '--pyramids', 'opt_pyramids', default=0, type=click.IntRange(0,4), + help='Number pyramids to upscale for DLIB detectors') +@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None), + help='Slice list of files') +@click.option('--display/--no-display', 'opt_display', is_flag=True, default=False, + help='Display detections to debug') +@click.option('-f', '--force', 'opt_force', is_flag=True, + help='Force overwrite file') +@click.option('--color', 'opt_color_filter', + type=click.Choice(color_filters.keys()), default='color', + help='Filter to keep color or grayscale images (color = keep color') +@click.option('--keep', 'opt_largest', type=click.Choice(['largest', 'all']), default='largest', + help='Only keep largest face') +@click.option('--zone', 'opt_zone', default=(0.0, 0.0), type=(float, float), + help='Face center must be located within zone region (0.5 = half width/height)') +@click.pass_context +def cli(ctx, opt_fp_in, opt_dir_media, opt_fp_out, opt_data_store, opt_dataset, opt_size, opt_detector_type, + opt_gpu, opt_conf_thresh, opt_pyramids, opt_slice, opt_display, opt_force, opt_color_filter, + opt_largest, opt_zone): + """Converts frames with faces to CSV of ROIs""" + + import sys + import os + from os.path import join + from pathlib import Path + from glob import glob + + from tqdm import tqdm + import numpy as np + import dlib # must keep a local reference for dlib + import cv2 as cv + import pandas as pd + + from app.utils import logger_utils, file_utils, im_utils, display_utils, draw_utils + from app.processors import face_detector + from app.models.data_store import DataStore + + # ------------------------------------------------- + # init here + + log = logger_utils.Logger.getLogger() + + # set data_store + data_store = DataStore(opt_data_store, opt_dataset) + + # get filepath out + fp_out = data_store.metadata(types.Metadata.FACE_ROI) if opt_fp_out is None else opt_fp_out + if not opt_force and Path(fp_out).exists(): + log.error('File exists. Use "-f / --force" to overwite') + return + + # set detector + if opt_detector_type == types.FaceDetectNet.CVDNN: + detector = face_detector.DetectorCVDNN() + elif opt_detector_type == types.FaceDetectNet.DLIB_CNN: + detector = face_detector.DetectorDLIBCNN(gpu=opt_gpu) + elif opt_detector_type == types.FaceDetectNet.DLIB_HOG: + detector = face_detector.DetectorDLIBHOG() + elif opt_detector_type == types.FaceDetectNet.MTCNN_TF: + detector = face_detector.DetectorMTCNN_TF(gpu=opt_gpu) + elif opt_detector_type == types.FaceDetectNet.HAAR: + log.error('{} not yet implemented'.format(opt_detector_type.name)) + return + + + # get list of files to process + fp_record = data_store.metadata(types.Metadata.FILE_RECORD) if opt_fp_in is None else opt_fp_in + df_record = pd.read_csv(fp_record, dtype=cfg.FILE_RECORD_DTYPES).set_index('index') + if opt_slice: + df_record = df_record[opt_slice[0]:opt_slice[1]] + log.debug('processing {:,} files'.format(len(df_record))) + + # filter out grayscale + color_filter = color_filters[opt_color_filter] + # set largest flag, to keep all or only largest + opt_largest = (opt_largest == 'largest') + + data = [] + skipped_files = [] + processed_files = [] + + for df_record in tqdm(df_record.itertuples(), total=len(df_record)): + fp_im = data_store.face(str(df_record.subdir), str(df_record.fn), str(df_record.ext)) + try: + im = cv.imread(fp_im) + im_resized = im_utils.resize(im, width=opt_size[0], height=opt_size[1]) + except Exception as e: + log.debug(f'could not read: {fp_im}') + return + # filter out color or grayscale iamges + if color_filter != color_filters['all']: + try: + is_gray = im_utils.is_grayscale(im) + if is_gray and color_filter != color_filters['gray']: + log.debug('Skipping grayscale image: {}'.format(fp_im)) + continue + except Exception as e: + log.error('Could not check grayscale: {}'.format(fp_im)) + continue + + try: + bboxes_norm = detector.detect(im_resized, pyramids=opt_pyramids, largest=opt_largest, + zone=opt_zone, conf_thresh=opt_conf_thresh) + except Exception as e: + log.error('could not detect: {}'.format(fp_im)) + log.error('{}'.format(e)) + continue + + if len(bboxes_norm) == 0: + skipped_files.append(fp_im) + log.warn(f'no faces in: {fp_im}') + log.warn(f'skipped: {len(skipped_files)}. found:{len(processed_files)} files') + else: + processed_files.append(fp_im) + for bbox in bboxes_norm: + roi = { + 'record_index': int(df_record.Index), + 'x': bbox.x, + 'y': bbox.y, + 'w': bbox.w, + 'h': bbox.h + } + data.append(roi) + + # if display optined + if opt_display and len(bboxes_norm): + # draw each box + for bbox_norm in bboxes_norm: + dim = im_resized.shape[:2][::-1] + bbox_dim = bbox.to_dim(dim) + if dim[0] > 1000: + im_resized = im_utils.resize(im_resized, width=1000) + im_resized = draw_utils.draw_bbox(im_resized, bbox_norm) + + # display and wait + cv.imshow('', im_resized) + display_utils.handle_keyboard() + + # create DataFrame and save to CSV + file_utils.mkdirs(fp_out) + df = pd.DataFrame.from_dict(data) + df.index.name = 'index' + df.to_csv(fp_out) + + # save script + file_utils.write_text(' '.join(sys.argv), '{}.sh'.format(fp_out)) \ No newline at end of file diff --git a/megapixels/commands/processor/face_vector.py b/megapixels/commands/processor/face_vector.py new file mode 100644 index 00000000..cb155d08 --- /dev/null +++ b/megapixels/commands/processor/face_vector.py @@ -0,0 +1,133 @@ +""" +Converts ROIs to face vector +NB: the VGG Face2 extractor should be used with MTCNN ROIs (not square) + the DLIB face extractor should be used with DLIB ROIs (square) +see https://github.com/ox-vgg/vgg_face2 for TAR@FAR +""" + +import click + +from app.settings import types +from app.utils import click_utils +from app.settings import app_cfg as cfg + +@click.command() +@click.option('-o', '--output', 'opt_fp_out', default=None, + help='Override enum output filename CSV') +@click.option('-m', '--media', 'opt_dir_media', default=None, + help='Override enum media directory') +@click.option('--store', 'opt_data_store', + type=cfg.DataStoreVar, + default=click_utils.get_default(types.DataStore.HDD), + show_default=True, + help=click_utils.show_help(types.Dataset)) +@click.option('--dataset', 'opt_dataset', + type=cfg.DatasetVar, + required=True, + show_default=True, + help=click_utils.show_help(types.Dataset)) +@click.option('--size', 'opt_size', + type=(int, int), default=cfg.DEFAULT_SIZE_FACE_DETECT, + help='Output image size') +@click.option('-e', '--extractor', 'opt_extractor', + default=click_utils.get_default(types.FaceExtractor.VGG), + type=cfg.FaceExtractorVar, + help='Type of extractor framework/network to use') +@click.option('-j', '--jitters', 'opt_jitters', default=cfg.DLIB_FACEREC_JITTERS, + help='Number of jitters (only for dlib') +@click.option('-p', '--padding', 'opt_padding', default=cfg.FACEREC_PADDING, + help='Percentage ROI padding') +@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None), + help='Slice list of files') +@click.option('-f', '--force', 'opt_force', is_flag=True, + help='Force overwrite file') +@click.option('-g', '--gpu', 'opt_gpu', default=0, + help='GPU index') +@click.pass_context +def cli(ctx, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset, opt_size, + opt_extractor, opt_slice, opt_force, opt_gpu, opt_jitters, opt_padding): + """Converts face ROIs to vectors""" + + import sys + import os + from os.path import join + from pathlib import Path + from glob import glob + + from tqdm import tqdm + import numpy as np + import dlib # must keep a local reference for dlib + import cv2 as cv + import pandas as pd + + from app.models.bbox import BBox + from app.models.data_store import DataStore + from app.utils import logger_utils, file_utils, im_utils + from app.processors import face_extractor + + + # ------------------------------------------------- + # init here + + log = logger_utils.Logger.getLogger() + # set data_store + data_store = DataStore(opt_data_store, opt_dataset) + + # get filepath out + fp_out = data_store.metadata(types.Metadata.FACE_VECTOR) if opt_fp_out is None else opt_fp_out + if not opt_force and Path(fp_out).exists(): + log.error('File exists. Use "-f / --force" to overwite') + return + + # init face processors + if opt_extractor == types.FaceExtractor.DLIB: + log.debug('set dlib') + extractor = face_extractor.ExtractorDLIB(gpu=opt_gpu, jitters=opt_jitters) + elif opt_extractor == types.FaceExtractor.VGG: + extractor = face_extractor.ExtractorVGG() + + # load data + fp_record = data_store.metadata(types.Metadata.FILE_RECORD) + df_record = pd.read_csv(fp_record, dtype=cfg.FILE_RECORD_DTYPES).set_index('index') + fp_roi = data_store.metadata(types.Metadata.FACE_ROI) + df_roi = pd.read_csv(fp_roi).set_index('index') + + if opt_slice: + df_roi = df_roi[opt_slice[0]:opt_slice[1]] + + # ------------------------------------------------- + # process images + + df_img_groups = df_roi.groupby('record_index') + log.debug('processing {:,} groups'.format(len(df_img_groups))) + + vecs = [] + for record_index, df_img_group in tqdm(df_img_groups): + # make fp + ds_record = df_record.iloc[record_index] + fp_im = data_store.face(ds_record.subdir, ds_record.fn, ds_record.ext) + im = cv.imread(fp_im) + im = im_utils.resize(im, width=opt_size[0], height=opt_size[1]) + for roi_index, df_img in df_img_group.iterrows(): + # get bbox + x, y, w, h = df_img.x, df_img.y, df_img.w, df_img.h + dim = (ds_record.width, ds_record.height) + # get face vector + bbox = BBox.from_xywh(x, y, w, h) # norm + # compute vec + vec = extractor.extract(im, bbox) # use normalized BBox + vec_str = extractor.to_str(vec) + vec_obj = {'vec':vec_str, 'roi_index': roi_index, 'record_index':record_index} + vecs.append(vec_obj) + + # ------------------------------------------------- + # save data + + # create DataFrame and save to CSV + df = pd.DataFrame.from_dict(vecs) + df.index.name = 'index' + file_utils.mkdirs(fp_out) + df.to_csv(fp_out) + + # save script + file_utils.write_text(' '.join(sys.argv), '{}.sh'.format(fp_out)) \ No newline at end of file diff --git a/megapixels/commands/processor/mirror.py b/megapixels/commands/processor/mirror.py new file mode 100644 index 00000000..9ca1cac7 --- /dev/null +++ b/megapixels/commands/processor/mirror.py @@ -0,0 +1,57 @@ +""" +Crop images to prepare for training +""" + +import click +import cv2 as cv +from PIL import Image, ImageOps, ImageFilter + +from app.settings import types +from app.utils import click_utils +from app.settings import app_cfg as cfg + + +@click.command() +@click.option('-i', '--input', 'opt_dir_in', required=True, + help='Input directory') +@click.option('-o', '--output', 'opt_dir_out', required=True, + help='Output directory') +@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None), + help='Slice the input list') +@click.pass_context +def cli(ctx, opt_dir_in, opt_dir_out, opt_slice): + """Mirror augment image directory""" + + import os + from os.path import join + from pathlib import Path + from glob import glob + from tqdm import tqdm + + from app.utils import logger_utils, file_utils, im_utils + + # ------------------------------------------------- + # init + + log = logger_utils.Logger.getLogger() + + # ------------------------------------------------- + # process here + + # get list of files to process + fp_ims = glob(join(opt_dir_in, '*.jpg')) + fp_ims += glob(join(opt_dir_in, '*.png')) + + if opt_slice: + fp_ims = fp_ims[opt_slice[0]:opt_slice[1]] + log.info('processing {:,} files'.format(len(fp_ims))) + + # ensure output dir exists + file_utils.mkdirs(opt_dir_out) + + # resize and save images + for fp_im in tqdm(fp_ims): + im = Image.open(fp_im) + fpp_im = Path(fp_im) + fp_out = join(opt_dir_out, '{}_mirror{}'.format(fpp_im.stem, fpp_im.suffix)) + im.save(fp_out) \ No newline at end of file diff --git a/megapixels/commands/processor/resize.py b/megapixels/commands/processor/resize.py new file mode 100644 index 00000000..7409ee6f --- /dev/null +++ b/megapixels/commands/processor/resize.py @@ -0,0 +1,150 @@ +""" +Crop images to prepare for training +""" + +import click +import cv2 as cv +from PIL import Image, ImageOps, ImageFilter + +from app.settings import types +from app.utils import click_utils +from app.settings import app_cfg as cfg + +""" +Filter Q-Down Q-Up Speed +NEAREST ⭐⭐⭐⭐⭐ +BOX ⭐ ⭐⭐⭐⭐ +BILINEAR ⭐ ⭐ ⭐⭐⭐ +HAMMING ⭐⭐ ⭐⭐⭐ +BICUBIC ⭐⭐⭐ ⭐⭐⭐ ⭐⭐ +LANCZOS ⭐⭐⭐⭐ ⭐⭐⭐⭐ ⭐ +""" +methods = { + 'lanczos': Image.LANCZOS, + 'bicubic': Image.BICUBIC, + 'hamming': Image.HAMMING, + 'bileaner': Image.BILINEAR, + 'box': Image.BOX, + 'nearest': Image.NEAREST + } +centerings = { + 'tl': (0.0, 0.0), + 'tc': (0.5, 0.0), + 'tr': (0.0, 0.0), + 'lc': (0.0, 0.5), + 'cc': (0.5, 0.5), + 'rc': (1.0, 0.5), + 'bl': (0.0, 1.0), + 'bc': (1.0, 0.5), + 'br': (1.0, 1.0) +} + +@click.command() +@click.option('-i', '--input', 'opt_dir_in', required=True, + help='Input directory') +@click.option('-o', '--output', 'opt_dir_out', required=True, + help='Output directory') +@click.option('-e', '--ext', 'opt_glob_ext', + default='png', type=click.Choice(['jpg', 'png']), + help='File glob ext') +@click.option('--size', 'opt_size', + type=(int, int), default=(256, 256), + help='Max output size') +@click.option('--method', 'opt_scale_method', + type=click.Choice(methods.keys()), + default='lanczos', + help='Scaling method to use') +@click.option('--equalize', 'opt_equalize', is_flag=True, + help='Equalize historgram') +@click.option('--sharpen', 'opt_sharpen', is_flag=True, + help='Unsharp mask') +@click.option('--center', 'opt_center', default='cc', type=click.Choice(centerings.keys()), + help='Crop focal point') +@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None), + help='Slice the input list') +@click.option('-t', '--threads', 'opt_threads', default=8, + help='Number of threads') +@click.pass_context +def cli(ctx, opt_dir_in, opt_dir_out, opt_glob_ext, opt_size, opt_scale_method, + opt_equalize, opt_sharpen, opt_center, opt_slice, opt_threads): + """Crop, mirror images""" + + import os + from os.path import join + from pathlib import Path + from glob import glob + from tqdm import tqdm + from multiprocessing.dummy import Pool as ThreadPool + from functools import partial + + from app.utils import logger_utils, file_utils, im_utils + + # ------------------------------------------------- + # init + + log = logger_utils.Logger.getLogger() + + + # ------------------------------------------------- + # process here + + def pool_resize(fp_im, opt_size, scale_method): + # Threaded image resize function + try: + pbar.update(1) + try: + im = Image.open(fp_im).convert('RGB') + im.verify() + except Exception as e: + log.warn('Could not open: {}'.format(fp_im)) + log.error(e) + return False + + #im = ImageOps.fit(im, opt_size, method=scale_method, centering=centering) + + if opt_equalize: + im_np = im_utils.pil2np(im) + im_np_eq = eq_hist_yuv(im_np) + im_np = cv.addWeighted(im_np_eq, 0.35, im_np, 0.65, 0) + im = im_utils.np2pil(im_np) + + if opt_sharpen: + im = im.filter(ImageFilter.UnsharpMask) + + fp_out = join(opt_dir_out, Path(fp_im).name) + im.save(fp_out) + return True + except: + return False + + #centering = centerings[opt_center] + #scale_method = methods[opt_scale_method] + + # get list of files to process + fp_ims = glob(join(opt_dir_in, '*.{}'.format(opt_glob_ext))) + if opt_slice: + fp_ims = fp_ims[opt_slice[0]:opt_slice[1]] + log.info('processing {:,} files'.format(len(fp_ims))) + + + # ensure output dir exists + file_utils.mkdirs(opt_dir_out) + + # setup multithreading + pbar = tqdm(total=len(fp_ims)) + #pool_resize = partial(pool_resize, opt_size=opt_size, scale_method=scale_method, centering=centering) + pool_resize = partial(pool_resize, opt_size=opt_size) + #result_list = pool.map(prod_x, data_list) + pool = ThreadPool(opt_threads) + with tqdm(total=len(fp_ims)) as pbar: + results = pool.map(pool_resize, fp_ims) + pbar.close() + + log.info('Resized: {} / {} images'.format(results.count(True), len(fp_ims))) + + + +def eq_hist_yuv(im): + im_yuv = cv.cvtColor(im, cv.COLOR_BGR2YUV) + im_yuv[:,:,0] = cv.equalizeHist(im_yuv[:,:,0]) + return cv.cvtColor(im_yuv, cv.COLOR_YUV2BGR) diff --git a/megapixels/commands/processor/resize_dataset.py b/megapixels/commands/processor/resize_dataset.py new file mode 100644 index 00000000..3a6ec15f --- /dev/null +++ b/megapixels/commands/processor/resize_dataset.py @@ -0,0 +1,149 @@ +""" +Crop images to prepare for training +""" + +import click +import cv2 as cv +from PIL import Image, ImageOps, ImageFilter + +from app.settings import types +from app.utils import click_utils +from app.settings import app_cfg as cfg + +cv_resize_algos = { + 'area': cv.INTER_AREA, + 'lanco': cv.INTER_LANCZOS4, + 'linear': cv.INTER_LINEAR, + 'linear_exact': cv.INTER_LINEAR_EXACT, + 'nearest': cv.INTER_NEAREST +} +""" +Filter Q-Down Q-Up Speed +NEAREST ⭐⭐⭐⭐⭐ +BOX ⭐ ⭐⭐⭐⭐ +BILINEAR ⭐ ⭐ ⭐⭐⭐ +HAMMING ⭐⭐ ⭐⭐⭐ +BICUBIC ⭐⭐⭐ ⭐⭐⭐ ⭐⭐ +LANCZOS ⭐⭐⭐⭐ ⭐⭐⭐⭐ ⭐ +""" +pil_resize_algos = { + 'antialias': Image.ANTIALIAS, + 'lanczos': Image.LANCZOS, + 'bicubic': Image.BICUBIC, + 'hamming': Image.HAMMING, + 'bileaner': Image.BILINEAR, + 'box': Image.BOX, + 'nearest': Image.NEAREST + } + +@click.command() +@click.option('--dataset', 'opt_dataset', + type=cfg.DatasetVar, + required=True, + show_default=True, + help=click_utils.show_help(types.Dataset)) +@click.option('--store', 'opt_data_store', + type=cfg.DataStoreVar, + default=click_utils.get_default(types.DataStore.HDD), + show_default=True, + help=click_utils.show_help(types.Dataset)) +@click.option('-o', '--output', 'opt_dir_out', required=True, + help='Output directory') +@click.option('-e', '--ext', 'opt_glob_ext', + default='png', type=click.Choice(['jpg', 'png']), + help='File glob ext') +@click.option('--size', 'opt_size', + type=(int, int), default=(256, 256), + help='Output image size max (w,h)') +@click.option('--interp', 'opt_interp_algo', + type=click.Choice(pil_resize_algos.keys()), + default='bicubic', + help='Interpolation resizing algorithms') +@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None), + help='Slice the input list') +@click.option('-t', '--threads', 'opt_threads', default=8, + help='Number of threads') +@click.option('--recursive/--no-recursive', 'opt_recursive', is_flag=True, default=False, + help='Use glob recursion (slower)') +@click.pass_context +def cli(ctx, opt_dataset, opt_data_store, opt_dir_out, opt_glob_ext, opt_size, opt_interp_algo, + opt_slice, opt_threads, opt_recursive): + """Resize dataset images""" + + import os + from os.path import join + from pathlib import Path + from glob import glob + from tqdm import tqdm + from multiprocessing.dummy import Pool as ThreadPool + from functools import partial + import pandas as pd + import numpy as np + + from app.utils import logger_utils, file_utils, im_utils + from app.models.data_store import DataStore + + # ------------------------------------------------- + # init + + log = logger_utils.Logger.getLogger() + + + # ------------------------------------------------- + # process here + + def pool_resize(fp_in, dir_in, dir_out, im_size, interp_algo): + # Threaded image resize function + pbar.update(1) + try: + im = Image.open(fp_in).convert('RGB') + im.verify() # throws error if image is corrupt + im.thumbnail(im_size, interp_algo) + fp_out = fp_in.replace(dir_in, dir_out) + file_utils.mkdirs(fp_out) + im.save(fp_out, quality=100) + except Exception as e: + log.warn(f'Could not open: {fp_in}, Error: {e}') + return False + return True + + + data_store = DataStore(opt_data_store, opt_dataset) + fp_records = data_store.metadata(types.Metadata.FILE_RECORD) + df_records = pd.read_csv(fp_records, dtype=cfg.FILE_RECORD_DTYPES).set_index('index') + dir_in = data_store.media_images_original() + + # get list of files to process + #fp_ims = file_utils.glob_multi(opt_dir_in, ['jpg', 'png'], recursive=opt_recursive) + fp_ims = [] + for ds_record in df_records.itertuples(): + fp_im = data_store.face(ds_record.subdir, ds_record.fn, ds_record.ext) + fp_ims.append(fp_im) + + if opt_slice: + fp_ims = fp_ims[opt_slice[0]:opt_slice[1]] + if not fp_ims: + log.error('No images. Try with "--recursive"') + return + log.info(f'processing {len(fp_ims):,} images') + + # algorithm to use for resizing + interp_algo = pil_resize_algos[opt_interp_algo] + log.info(f'using {interp_algo} for interpoloation') + + # ensure output dir exists + file_utils.mkdirs(opt_dir_out) + + # setup multithreading + pbar = tqdm(total=len(fp_ims)) + # fixed arguments for pool function + map_pool_resize = partial(pool_resize, dir_in=dir_in, dir_out=opt_dir_out, im_size=opt_size, interp_algo=interp_algo) + #result_list = pool.map(prod_x, data_list) # simple + pool = ThreadPool(opt_threads) + # start multithreading + with tqdm(total=len(fp_ims)) as pbar: + results = pool.map(map_pool_resize, fp_ims) + # end multithreading + pbar.close() + + log.info(f'Resized: {results.count(True)} / {len(fp_ims)} images') \ No newline at end of file diff --git a/megapixels/commands/processor/videos_to_frames.py b/megapixels/commands/processor/videos_to_frames.py new file mode 100644 index 00000000..0b56c46a --- /dev/null +++ b/megapixels/commands/processor/videos_to_frames.py @@ -0,0 +1,73 @@ +from glob import glob +import os +from os.path import join +from pathlib import Path + +import click + +from app.settings import types +from app.utils import click_utils +from app.settings import app_cfg as cfg +from app.utils import logger_utils + +import dlib +import pandas as pd +from PIL import Image, ImageOps, ImageFilter +from app.utils import file_utils, im_utils + + +log = logger_utils.Logger.getLogger() + +@click.command() +@click.option('-i', '--input', 'opt_fp_in', required=True, + help='Input directory') +@click.option('-o', '--output', 'opt_fp_out', required=True, + help='Output directory') +@click.option('--size', 'opt_size', default=(320, 240), + help='Inference size for face detection' ) +@click.option('--interval', 'opt_frame_interval', default=20, + help='Number of frames before saving next face') +@click.pass_context +def cli(ctx, opt_fp_in, opt_fp_out, opt_size, opt_frame_interval): + """Converts videos to frames with faces""" + + # ------------------------------------------------- + # process + + from tqdm import tqdm + import cv2 as cv + from tqdm import tqdm + from app.processors import face_detector + + detector = face_detector.DetectorDLIBCNN() + + # get file list + fp_videos = glob(join(opt_fp_in, '*.mp4')) + fp_videos += glob(join(opt_fp_in, '*.webm')) + fp_videos += glob(join(opt_fp_in, '*.mkv')) + + frame_interval_count = 0 + frame_count = 0 + + file_utils.mkdirs(opt_fp_out) + + for fp_video in tqdm(fp_videos): + + video = cv.VideoCapture(fp_video) + + while video.isOpened(): + res, frame = video.read() + if not res: + break + + frame_count += 1 # for naming + frame_interval_count += 1 # for interval + + bboxes = detector.detect(frame, opt_size=opt_size, opt_pyramids=0) + if len(bboxes) > 0 and frame_interval_count >= opt_frame_interval: + # save frame + fname = file_utils.zpad(frame_count) + fp_frame = join(opt_fp_out, '{}_{}.jpg'.format(Path(fp_video).stem, fname)) + cv.imwrite(fp_frame, frame) + frame_interval_count = 0 + diff --git a/megapixels/notebooks/face_analysis/3d_face_plot.ipynb b/megapixels/notebooks/face_analysis/3d_face_plot.ipynb index 591b8706..81419941 100644 --- a/megapixels/notebooks/face_analysis/3d_face_plot.ipynb +++ b/megapixels/notebooks/face_analysis/3d_face_plot.ipynb @@ -178,7 +178,7 @@ }, { "cell_type": "code", - "execution_count": 29, + "execution_count": 10, "metadata": {}, "outputs": [ { @@ -198,7 +198,7 @@ }, { "cell_type": "code", - "execution_count": 30, + "execution_count": 11, "metadata": {}, "outputs": [ { @@ -216,7 +216,7 @@ }, { "cell_type": "code", - "execution_count": 33, + "execution_count": 12, "metadata": {}, "outputs": [], "source": [ @@ -225,9 +225,809 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 13, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "application/javascript": [ + "/* Put everything inside the global mpl namespace */\n", + "window.mpl = {};\n", + "\n", + "\n", + "mpl.get_websocket_type = function() {\n", + " if (typeof(WebSocket) !== 'undefined') {\n", + " return WebSocket;\n", + " } else if (typeof(MozWebSocket) !== 'undefined') {\n", + " return MozWebSocket;\n", + " } else {\n", + " alert('Your browser does not have WebSocket support.' +\n", + " 'Please try Chrome, Safari or Firefox ≥ 6. ' +\n", + " 'Firefox 4 and 5 are also supported but you ' +\n", + " 'have to enable WebSockets in about:config.');\n", + " };\n", + "}\n", + "\n", + "mpl.figure = function(figure_id, websocket, ondownload, parent_element) {\n", + " this.id = figure_id;\n", + "\n", + " this.ws = websocket;\n", + "\n", + " this.supports_binary = (this.ws.binaryType != undefined);\n", + "\n", + " if (!this.supports_binary) {\n", + " var warnings = document.getElementById(\"mpl-warnings\");\n", + " if (warnings) {\n", + " warnings.style.display = 'block';\n", + " warnings.textContent = (\n", + " \"This browser does not support binary websocket messages. \" +\n", + " \"Performance may be slow.\");\n", + " }\n", + " }\n", + "\n", + " this.imageObj = new Image();\n", + "\n", + " this.context = undefined;\n", + " this.message = undefined;\n", + " this.canvas = undefined;\n", + " this.rubberband_canvas = undefined;\n", + " this.rubberband_context = undefined;\n", + " this.format_dropdown = undefined;\n", + "\n", + " this.image_mode = 'full';\n", + "\n", + " this.root = $('
');\n", + " this._root_extra_style(this.root)\n", + " this.root.attr('style', 'display: inline-block');\n", + "\n", + " $(parent_element).append(this.root);\n", + "\n", + " this._init_header(this);\n", + " this._init_canvas(this);\n", + " this._init_toolbar(this);\n", + "\n", + " var fig = this;\n", + "\n", + " this.waiting = false;\n", + "\n", + " this.ws.onopen = function () {\n", + " fig.send_message(\"supports_binary\", {value: fig.supports_binary});\n", + " fig.send_message(\"send_image_mode\", {});\n", + " if (mpl.ratio != 1) {\n", + " fig.send_message(\"set_dpi_ratio\", {'dpi_ratio': mpl.ratio});\n", + " }\n", + " fig.send_message(\"refresh\", {});\n", + " }\n", + "\n", + " this.imageObj.onload = function() {\n", + " if (fig.image_mode == 'full') {\n", + " // Full images could contain transparency (where diff images\n", + " // almost always do), so we need to clear the canvas so that\n", + " // there is no ghosting.\n", + " fig.context.clearRect(0, 0, fig.canvas.width, fig.canvas.height);\n", + " }\n", + " fig.context.drawImage(fig.imageObj, 0, 0);\n", + " };\n", + "\n", + " this.imageObj.onunload = function() {\n", + " fig.ws.close();\n", + " }\n", + "\n", + " this.ws.onmessage = this._make_on_message_function(this);\n", + "\n", + " this.ondownload = ondownload;\n", + "}\n", + "\n", + "mpl.figure.prototype._init_header = function() {\n", + " var titlebar = $(\n", + " '
');\n", + " var titletext = $(\n", + " '
');\n", + " titlebar.append(titletext)\n", + " this.root.append(titlebar);\n", + " this.header = titletext[0];\n", + "}\n", + "\n", + "\n", + "\n", + "mpl.figure.prototype._canvas_extra_style = function(canvas_div) {\n", + "\n", + "}\n", + "\n", + "\n", + "mpl.figure.prototype._root_extra_style = function(canvas_div) {\n", + "\n", + "}\n", + "\n", + "mpl.figure.prototype._init_canvas = function() {\n", + " var fig = this;\n", + "\n", + " var canvas_div = $('
');\n", + "\n", + " canvas_div.attr('style', 'position: relative; clear: both; outline: 0');\n", + "\n", + " function canvas_keyboard_event(event) {\n", + " return fig.key_event(event, event['data']);\n", + " }\n", + "\n", + " canvas_div.keydown('key_press', canvas_keyboard_event);\n", + " canvas_div.keyup('key_release', canvas_keyboard_event);\n", + " this.canvas_div = canvas_div\n", + " this._canvas_extra_style(canvas_div)\n", + " this.root.append(canvas_div);\n", + "\n", + " var canvas = $('');\n", + " canvas.addClass('mpl-canvas');\n", + " canvas.attr('style', \"left: 0; top: 0; z-index: 0; outline: 0\")\n", + "\n", + " this.canvas = canvas[0];\n", + " this.context = canvas[0].getContext(\"2d\");\n", + "\n", + " var backingStore = this.context.backingStorePixelRatio ||\n", + "\tthis.context.webkitBackingStorePixelRatio ||\n", + "\tthis.context.mozBackingStorePixelRatio ||\n", + "\tthis.context.msBackingStorePixelRatio ||\n", + "\tthis.context.oBackingStorePixelRatio ||\n", + "\tthis.context.backingStorePixelRatio || 1;\n", + "\n", + " mpl.ratio = (window.devicePixelRatio || 1) / backingStore;\n", + "\n", + " var rubberband = $('');\n", + " rubberband.attr('style', \"position: absolute; left: 0; top: 0; z-index: 1;\")\n", + "\n", + " var pass_mouse_events = true;\n", + "\n", + " canvas_div.resizable({\n", + " start: function(event, ui) {\n", + " pass_mouse_events = false;\n", + " },\n", + " resize: function(event, ui) {\n", + " fig.request_resize(ui.size.width, ui.size.height);\n", + " },\n", + " stop: function(event, ui) {\n", + " pass_mouse_events = true;\n", + " fig.request_resize(ui.size.width, ui.size.height);\n", + " },\n", + " });\n", + "\n", + " function mouse_event_fn(event) {\n", + " if (pass_mouse_events)\n", + " return fig.mouse_event(event, event['data']);\n", + " }\n", + "\n", + " rubberband.mousedown('button_press', mouse_event_fn);\n", + " rubberband.mouseup('button_release', mouse_event_fn);\n", + " // Throttle sequential mouse events to 1 every 20ms.\n", + " rubberband.mousemove('motion_notify', mouse_event_fn);\n", + "\n", + " rubberband.mouseenter('figure_enter', mouse_event_fn);\n", + " rubberband.mouseleave('figure_leave', mouse_event_fn);\n", + "\n", + " canvas_div.on(\"wheel\", function (event) {\n", + " event = event.originalEvent;\n", + " event['data'] = 'scroll'\n", + " if (event.deltaY < 0) {\n", + " event.step = 1;\n", + " } else {\n", + " event.step = -1;\n", + " }\n", + " mouse_event_fn(event);\n", + " });\n", + "\n", + " canvas_div.append(canvas);\n", + " canvas_div.append(rubberband);\n", + "\n", + " this.rubberband = rubberband;\n", + " this.rubberband_canvas = rubberband[0];\n", + " this.rubberband_context = rubberband[0].getContext(\"2d\");\n", + " this.rubberband_context.strokeStyle = \"#000000\";\n", + "\n", + " this._resize_canvas = function(width, height) {\n", + " // Keep the size of the canvas, canvas container, and rubber band\n", + " // canvas in synch.\n", + " canvas_div.css('width', width)\n", + " canvas_div.css('height', height)\n", + "\n", + " canvas.attr('width', width * mpl.ratio);\n", + " canvas.attr('height', height * mpl.ratio);\n", + " canvas.attr('style', 'width: ' + width + 'px; height: ' + height + 'px;');\n", + "\n", + " rubberband.attr('width', width);\n", + " rubberband.attr('height', height);\n", + " }\n", + "\n", + " // Set the figure to an initial 600x600px, this will subsequently be updated\n", + " // upon first draw.\n", + " this._resize_canvas(600, 600);\n", + "\n", + " // Disable right mouse context menu.\n", + " $(this.rubberband_canvas).bind(\"contextmenu\",function(e){\n", + " return false;\n", + " });\n", + "\n", + " function set_focus () {\n", + " canvas.focus();\n", + " canvas_div.focus();\n", + " }\n", + "\n", + " window.setTimeout(set_focus, 100);\n", + "}\n", + "\n", + "mpl.figure.prototype._init_toolbar = function() {\n", + " var fig = this;\n", + "\n", + " var nav_element = $('
')\n", + " nav_element.attr('style', 'width: 100%');\n", + " this.root.append(nav_element);\n", + "\n", + " // Define a callback function for later on.\n", + " function toolbar_event(event) {\n", + " return fig.toolbar_button_onclick(event['data']);\n", + " }\n", + " function toolbar_mouse_event(event) {\n", + " return fig.toolbar_button_onmouseover(event['data']);\n", + " }\n", + "\n", + " for(var toolbar_ind in mpl.toolbar_items) {\n", + " var name = mpl.toolbar_items[toolbar_ind][0];\n", + " var tooltip = mpl.toolbar_items[toolbar_ind][1];\n", + " var image = mpl.toolbar_items[toolbar_ind][2];\n", + " var method_name = mpl.toolbar_items[toolbar_ind][3];\n", + "\n", + " if (!name) {\n", + " // put a spacer in here.\n", + " continue;\n", + " }\n", + " var button = $('');\n", + " button.click(method_name, toolbar_event);\n", + " button.mouseover(tooltip, toolbar_mouse_event);\n", + " nav_element.append(button);\n", + " }\n", + "\n", + " // Add the status bar.\n", + " var status_bar = $('');\n", + " nav_element.append(status_bar);\n", + " this.message = status_bar[0];\n", + "\n", + " // Add the close button to the window.\n", + " var buttongrp = $('
');\n", + " var button = $('');\n", + " button.click(function (evt) { fig.handle_close(fig, {}); } );\n", + " button.mouseover('Stop Interaction', toolbar_mouse_event);\n", + " buttongrp.append(button);\n", + " var titlebar = this.root.find($('.ui-dialog-titlebar'));\n", + " titlebar.prepend(buttongrp);\n", + "}\n", + "\n", + "mpl.figure.prototype._root_extra_style = function(el){\n", + " var fig = this\n", + " el.on(\"remove\", function(){\n", + "\tfig.close_ws(fig, {});\n", + " });\n", + "}\n", + "\n", + "mpl.figure.prototype._canvas_extra_style = function(el){\n", + " // this is important to make the div 'focusable\n", + " el.attr('tabindex', 0)\n", + " // reach out to IPython and tell the keyboard manager to turn it's self\n", + " // off when our div gets focus\n", + "\n", + " // location in version 3\n", + " if (IPython.notebook.keyboard_manager) {\n", + " IPython.notebook.keyboard_manager.register_events(el);\n", + " }\n", + " else {\n", + " // location in version 2\n", + " IPython.keyboard_manager.register_events(el);\n", + " }\n", + "\n", + "}\n", + "\n", + "mpl.figure.prototype._key_event_extra = function(event, name) {\n", + " var manager = IPython.notebook.keyboard_manager;\n", + " if (!manager)\n", + " manager = IPython.keyboard_manager;\n", + "\n", + " // Check for shift+enter\n", + " if (event.shiftKey && event.which == 13) {\n", + " this.canvas_div.blur();\n", + " event.shiftKey = false;\n", + " // Send a \"J\" for go to next cell\n", + " event.which = 74;\n", + " event.keyCode = 74;\n", + " manager.command_mode();\n", + " manager.handle_keydown(event);\n", + " }\n", + "}\n", + "\n", + "mpl.figure.prototype.handle_save = function(fig, msg) {\n", + " fig.ondownload(fig, null);\n", + "}\n", + "\n", + "\n", + "mpl.find_output_cell = function(html_output) {\n", + " // Return the cell and output element which can be found *uniquely* in the notebook.\n", + " // Note - this is a bit hacky, but it is done because the \"notebook_saving.Notebook\"\n", + " // IPython event is triggered only after the cells have been serialised, which for\n", + " // our purposes (turning an active figure into a static one), is too late.\n", + " var cells = IPython.notebook.get_cells();\n", + " var ncells = cells.length;\n", + " for (var i=0; i= 3 moved mimebundle to data attribute of output\n", + " data = data.data;\n", + " }\n", + " if (data['text/html'] == html_output) {\n", + " return [cell, data, j];\n", + " }\n", + " }\n", + " }\n", + " }\n", + "}\n", + "\n", + "// Register the function which deals with the matplotlib target/channel.\n", + "// The kernel may be null if the page has been refreshed.\n", + "if (IPython.notebook.kernel != null) {\n", + " IPython.notebook.kernel.comm_manager.register_target('matplotlib', mpl.mpl_figure_comm);\n", + "}\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "(68.0, 201.0) (110.0, 225.0) (-61.021374, 41.419292)\n" + ] + } + ], "source": [ "im = cv.imread(fp_im)\n", "im_rgb = cv.cvtColor(im, cv.COLOR_BGR2RGB)\n", @@ -245,20 +1845,29 @@ "generate_3d_face_plain(im_rgb, lm)" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "```\n", + "\"RGB:0/51/153 (hexadecimal: 003399)\" for \"PANTONE REFLEX BLUE\" and \"RGB:255/204/0 (hexadecimal: FFCC00)\" for \"PANTONE YELLOW\" for the web palette (the limited 12\n", + "```" + ] + }, { "cell_type": "code", - "execution_count": null, + "execution_count": 26, "metadata": {}, "outputs": [], "source": [ " # line weight\n", - "def generate_3d_face(lm, fp_out, num_frames=30, dpi=72, stroke_weight=2, size=(480,480),\n", - " mark_size=10, mark_type='.', mark_clr=(0,255,0), fps=10, transparent=False):\n", + "def generate_3d_face(lm, fp_out, num_frames=30, dpi=72, stroke_weight=0, size=(480,480),\n", + " mark_size=2, mark_type='*', mark_clr=(0,255,0), fps=10, transparent=False):\n", " '''Generates 3D plot of face landmarks\n", " '''\n", " # convert opencv BGR numpy image to RGB\n", - " bg_color = '#%02x%02x%02x' % (0,0,0)\n", - " mark_clr = '#%02x%02x%02x' % (0,255,255)\n", + " bg_color = '#%02x%02x%02x' % (0,51,153)\n", + " mark_clr = '#%02x%02x%02x' % (255,204,0)\n", " \n", " # scale to make larger\n", " #lm = np.array([1.2*x,y,z] for x,y,z in list(lm))\n", @@ -293,27 +1902,27 @@ " \n", " # scatter plot the dots\n", " # jaw line\n", - " mark_clr = '#%02x%02x%02x' % (0,255,0) # green\n", + " #mark_clr = '#%02x%02x%02x' % (0,255,0) # green\n", " ax.plot3D(lm[:17,0]*1.2,lm[:17,1], lm[:17,2],\n", " marker=mark_type, markersize=mark_size, color=mark_clr,linewidth=stroke_weight)\n", " # stage-right eyebrow\n", - " mark_clr = '#%02x%02x%02x' % (255,0,0) # green\n", + " #mark_clr = '#%02x%02x%02x' % (255,0,0) # green\n", " ax.plot3D(lm[17:22,0]*1.2,lm[17:22,1],lm[17:22,2],\n", " marker=mark_type, markersize=mark_size, color=mark_clr,linewidth=stroke_weight)\n", " # stage-left eyebrow\n", - " mark_clr = '#%02x%02x%02x' % (255,255,0) # yellow\n", + " #mark_clr = '#%02x%02x%02x' % (255,255,0) # yellow\n", " ax.plot3D(lm[22:27,0]*1.2,lm[22:27,1],lm[22:27,2], \n", " marker=mark_type, markersize=mark_size, color=mark_clr,linewidth=stroke_weight)\n", " # nose ridge\n", - " mark_clr = '#%02x%02x%02x' % (0,0,255) # blue\n", + " #mark_clr = '#%02x%02x%02x' % (0,0,255) # blue\n", " ax.plot3D(lm[27:31,0]*1.2,lm[27:31,1],lm[27:31,2],\n", " marker=mark_type, markersize=mark_size, color=mark_clr,linewidth=stroke_weight)\n", " # nose-bottom\n", - " mark_clr = '#%02x%02x%02x' % (255,0,255) # magenta\n", + " #mark_clr = '#%02x%02x%02x' % (255,0,255) # magenta\n", " ax.plot3D(lm[31:36,0]*1.2,lm[31:36,1],lm[31:36,2],\n", " marker=mark_type, markersize=mark_size, color=mark_clr,linewidth=stroke_weight)\n", " # stage-left eye\n", - " mark_clr = '#%02x%02x%02x' % (0,255,255) # cyan\n", + " #mark_clr = '#%02x%02x%02x' % (0,255,255) # cyan\n", " px, py, pz = lm[36:42,0]*1.2,lm[36:42,1],lm[36:42,2]\n", " px = np.append(px, lm[36,0]*1.2)\n", " py = np.append(py, lm[36,1])\n", @@ -321,7 +1930,7 @@ " ax.plot3D(px, py, pz, marker=mark_type, markersize=mark_size, color=mark_clr,linewidth=stroke_weight)\n", " \n", " # stage-right eye\n", - " mark_clr = '#%02x%02x%02x' % (255,255,255) # white\n", + " #mark_clr = '#%02x%02x%02x' % (255,255,255) # white\n", " px, py, pz = lm[42:48,0]*1.2,lm[42:48,1],lm[42:48,2]\n", " px = np.append(px, lm[42,0]*1.2)\n", " py = np.append(py, lm[42,1])\n", @@ -329,7 +1938,7 @@ " ax.plot3D(px, py, pz, marker=mark_type, markersize=mark_size, color=mark_clr,linewidth=stroke_weight)\n", " \n", " # mouth\n", - " mark_clr = '#%02x%02x%02x' % (255,125,0) # orange?\n", + " #mark_clr = '#%02x%02x%02x' % (255,125,0) # orange?\n", " px, py, pz = lm[48:,0]*1.2,lm[48:,1],lm[48:,2]\n", " px = np.append(px, lm[48,0]*1.2)\n", " py = np.append(py, lm[48,1])\n", @@ -371,11 +1980,20 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 27, "metadata": { "scrolled": false }, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "time: 0.0018\n", + "Saved file to /home/adam/Downloads/0012_01.gif\n" + ] + } + ], "source": [ "# filepaths\n", "dir_out = '/home/adam/Downloads/'\n", diff --git a/megapixels/notebooks/face_analysis/3d_face_plot_cpdp.ipynb b/megapixels/notebooks/face_analysis/3d_face_plot_cpdp.ipynb new file mode 100644 index 00000000..ba6f97b1 --- /dev/null +++ b/megapixels/notebooks/face_analysis/3d_face_plot_cpdp.ipynb @@ -0,0 +1,2967 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# 3D Face Plot\n", + "\n", + "Attenzione visualization" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "%load_ext autoreload\n", + "%autoreload 2\n", + "import os\n", + "from os.path import join\n", + "import sys\n", + "import time\n", + "from random import randint\n", + "import random\n", + "\n", + "import cv2 as cv\n", + "import numpy as np\n", + "import imutils\n", + "import matplotlib.animation\n", + "%matplotlib notebook\n", + "from glob import glob\n", + "from matplotlib import cbook\n", + "from matplotlib import cm\n", + "#from matplotlib.colors import LightSource\n", + "import face_alignment\n", + "import numpy as np\n", + "\n", + "from mpl_toolkits.mplot3d import Axes3D\n", + "import matplotlib.pyplot as plt\n", + "import mpl_toolkits.mplot3d.axes3d as p3\n", + "from matplotlib import animation\n", + "\n", + "from skimage import io\n", + "from tqdm import tqdm_notebook as tqdm\n", + "from IPython.display import clear_output\n", + "from pathlib import Path\n", + "\n", + "sys.path.append('/work/megapixels_dev/megapixels/')" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "# Generate random hex colors\n", + "def rhex():\n", + " r = lambda: random.randint(0,255)\n", + " return '#%02X%02X%02X' % (r(), r(), r())" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "# init 3d face\n", + "# Run the 3D face alignment on a test image, without CUDA.\n", + "fa = face_alignment.FaceAlignment(face_alignment.LandmarksType._3D, device='cuda:0', flip_input=True)" + ] + }, + { + "cell_type": "code", + "execution_count": 159, + "metadata": {}, + "outputs": [], + "source": [ + "fp_im = '/home/adam/Nextcloud/files-ahprojects-com/ahprojects/cpdp_politicians/may/may_1.jpg'\n", + "im = cv.imread(fp_im)" + ] + }, + { + "cell_type": "code", + "execution_count": 160, + "metadata": {}, + "outputs": [], + "source": [ + "def generate_3d_face_plain(im, lm):\n", + " preds = lm\n", + " fig = plt.figure(figsize=plt.figaspect(.5))\n", + " ax = fig.add_subplot(1, 2, 1)\n", + " ax.imshow(im)\n", + " ax.plot(preds[0:17,0],preds[0:17,1],marker='o',markersize=6,linestyle='-',color='w',lw=2)\n", + " ax.plot(preds[17:22,0],preds[17:22,1],marker='o',markersize=6,linestyle='-',color='w',lw=2)\n", + " ax.plot(preds[22:27,0],preds[22:27,1],marker='o',markersize=6,linestyle='-',color='w',lw=2)\n", + " ax.plot(preds[27:31,0],preds[27:31,1],marker='o',markersize=6,linestyle='-',color='w',lw=2)\n", + " ax.plot(preds[31:36,0],preds[31:36,1],marker='o',markersize=6,linestyle='-',color='w',lw=2)\n", + " ax.plot(preds[36:42,0],preds[36:42,1],marker='o',markersize=6,linestyle='-',color='w',lw=2)\n", + " ax.plot(preds[42:48,0],preds[42:48,1],marker='o',markersize=6,linestyle='-',color='w',lw=2)\n", + " ax.plot(preds[48:60,0],preds[48:60,1],marker='o',markersize=6,linestyle='-',color='w',lw=2)\n", + " ax.plot(preds[60:68,0],preds[60:68,1],marker='o',markersize=6,linestyle='-',color='w',lw=2) \n", + " ax.axis('off')\n", + "\n", + " ax = fig.add_subplot(1, 2, 2, projection='3d')\n", + " surf = ax.scatter(preds[:,0]*1.2,preds[:,1],preds[:,2],c=\"cyan\", alpha=1.0, edgecolor='b')\n", + " ax.plot3D(preds[:17,0]*1.2,preds[:17,1], preds[:17,2], color='blue' )\n", + " ax.plot3D(preds[17:22,0]*1.2,preds[17:22,1],preds[17:22,2], color='blue')\n", + " ax.plot3D(preds[22:27,0]*1.2,preds[22:27,1],preds[22:27,2], color='blue')\n", + " ax.plot3D(preds[27:31,0]*1.2,preds[27:31,1],preds[27:31,2], color='blue')\n", + " ax.plot3D(preds[31:36,0]*1.2,preds[31:36,1],preds[31:36,2], color='blue')\n", + " ax.plot3D(preds[36:42,0]*1.2,preds[36:42,1],preds[36:42,2], color='blue')\n", + " ax.plot3D(preds[42:48,0]*1.2,preds[42:48,1],preds[42:48,2], color='blue')\n", + " ax.plot3D(preds[48:,0]*1.2,preds[48:,1],preds[48:,2], color='blue' )\n", + " \n", + " # pad\n", + " xmm = (np.min(lm[:,0]),np.max(lm[:,0]))\n", + " ymm = (np.min(lm[:,1]),np.max(lm[:,1]))\n", + " zmm = (np.min(lm[:,2]),np.max(lm[:,2]))\n", + " \n", + " print(xmm, ymm, zmm)\n", + "# ax.set_xticks([])\n", + "# ax.set_yticks([])\n", + "# ax.set_zticks([])\n", + " plt.setp( ax.get_xticklabels(), visible=False)\n", + " plt.setp( ax.get_yticklabels(), visible=False)\n", + " #ax.set_xlim(xmm[0]-50, xmm[1]+50)\n", + " #ax.set_ylim(ymm[0]-50, ymm[1]+50)\n", + " #ax.set_ylim(zmm[0]- .1*zmm[0],zmm[1] + .1*zmm[1])\n", + " #ax.set_ylim(103, 275)\n", + " #ax.set_zlim((-100,100))\n", + " ax.view_init(elev=15., azim=135.)\n", + "\n", + " plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": 161, + "metadata": {}, + "outputs": [], + "source": [ + "from app.utils import im_utils" + ] + }, + { + "cell_type": "code", + "execution_count": 162, + "metadata": {}, + "outputs": [], + "source": [ + "im = cv.imread(fp_im)\n", + "im_resized = im_utils.resize(im, width=600, height=600)\n", + "im_rgb = cv.cvtColor(im, cv.COLOR_BGR2RGB)" + ] + }, + { + "cell_type": "code", + "execution_count": 163, + "metadata": {}, + "outputs": [], + "source": [ + "#import dlib\n", + "from app.processors import face_detector" + ] + }, + { + "cell_type": "code", + "execution_count": 164, + "metadata": {}, + "outputs": [], + "source": [ + "#face_detector = face_detector.DetectorDLIBCNN(gpu=0) # -1 for CPU\n", + "face_detector = face_detector.DetectorCVDNN()" + ] + }, + { + "cell_type": "code", + "execution_count": 165, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "detecting face...\n" + ] + }, + { + "ename": "IndexError", + "evalue": "list index out of range", + "output_type": "error", + "traceback": [ + "\u001b[0;31m--------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mIndexError\u001b[0m Traceback (most recent call last)", + "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 2\u001b[0m \u001b[0mst\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtime\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtime\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 3\u001b[0m \u001b[0mbboxes\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mface_detector\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdetect\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mim_resized\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlargest\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mpyramids\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m3\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 4\u001b[0;31m \u001b[0mbbox\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mbboxes\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mto_dim\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mim_resized\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;36m2\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m-\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", + "\u001b[0;31mIndexError\u001b[0m: list index out of range" + ] + } + ], + "source": [ + "print('detecting face...')\n", + "st = time.time()\n", + "bboxes = face_detector.detect(im_resized, largest=True, pyramids=3)\n", + "bbox = bboxes[0].to_dim(im_resized.shape[:2][::-1])" + ] + }, + { + "cell_type": "code", + "execution_count": 166, + "metadata": {}, + "outputs": [ + { + "ename": "AttributeError", + "evalue": "'tuple' object has no attribute 'to_xyxy'", + "output_type": "error", + "traceback": [ + "\u001b[0;31m--------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mAttributeError\u001b[0m Traceback (most recent call last)", + "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mbbox\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mbbox\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mto_xyxy\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 2\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mbbox\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;31mAttributeError\u001b[0m: 'tuple' object has no attribute 'to_xyxy'" + ] + } + ], + "source": [ + "bbox = bbox.to_xyxy()\n", + "print(bbox)" + ] + }, + { + "cell_type": "code", + "execution_count": 167, + "metadata": {}, + "outputs": [], + "source": [ + "points = fa.get_landmarks_from_image(im_resized, [bbox] )" + ] + }, + { + "cell_type": "code", + "execution_count": 168, + "metadata": {}, + "outputs": [ + { + "data": { + "application/javascript": [ + "/* Put everything inside the global mpl namespace */\n", + "window.mpl = {};\n", + "\n", + "\n", + "mpl.get_websocket_type = function() {\n", + " if (typeof(WebSocket) !== 'undefined') {\n", + " return WebSocket;\n", + " } else if (typeof(MozWebSocket) !== 'undefined') {\n", + " return MozWebSocket;\n", + " } else {\n", + " alert('Your browser does not have WebSocket support.' +\n", + " 'Please try Chrome, Safari or Firefox ≥ 6. ' +\n", + " 'Firefox 4 and 5 are also supported but you ' +\n", + " 'have to enable WebSockets in about:config.');\n", + " };\n", + "}\n", + "\n", + "mpl.figure = function(figure_id, websocket, ondownload, parent_element) {\n", + " this.id = figure_id;\n", + "\n", + " this.ws = websocket;\n", + "\n", + " this.supports_binary = (this.ws.binaryType != undefined);\n", + "\n", + " if (!this.supports_binary) {\n", + " var warnings = document.getElementById(\"mpl-warnings\");\n", + " if (warnings) {\n", + " warnings.style.display = 'block';\n", + " warnings.textContent = (\n", + " \"This browser does not support binary websocket messages. \" +\n", + " \"Performance may be slow.\");\n", + " }\n", + " }\n", + "\n", + " this.imageObj = new Image();\n", + "\n", + " this.context = undefined;\n", + " this.message = undefined;\n", + " this.canvas = undefined;\n", + " this.rubberband_canvas = undefined;\n", + " this.rubberband_context = undefined;\n", + " this.format_dropdown = undefined;\n", + "\n", + " this.image_mode = 'full';\n", + "\n", + " this.root = $('
');\n", + " this._root_extra_style(this.root)\n", + " this.root.attr('style', 'display: inline-block');\n", + "\n", + " $(parent_element).append(this.root);\n", + "\n", + " this._init_header(this);\n", + " this._init_canvas(this);\n", + " this._init_toolbar(this);\n", + "\n", + " var fig = this;\n", + "\n", + " this.waiting = false;\n", + "\n", + " this.ws.onopen = function () {\n", + " fig.send_message(\"supports_binary\", {value: fig.supports_binary});\n", + " fig.send_message(\"send_image_mode\", {});\n", + " if (mpl.ratio != 1) {\n", + " fig.send_message(\"set_dpi_ratio\", {'dpi_ratio': mpl.ratio});\n", + " }\n", + " fig.send_message(\"refresh\", {});\n", + " }\n", + "\n", + " this.imageObj.onload = function() {\n", + " if (fig.image_mode == 'full') {\n", + " // Full images could contain transparency (where diff images\n", + " // almost always do), so we need to clear the canvas so that\n", + " // there is no ghosting.\n", + " fig.context.clearRect(0, 0, fig.canvas.width, fig.canvas.height);\n", + " }\n", + " fig.context.drawImage(fig.imageObj, 0, 0);\n", + " };\n", + "\n", + " this.imageObj.onunload = function() {\n", + " fig.ws.close();\n", + " }\n", + "\n", + " this.ws.onmessage = this._make_on_message_function(this);\n", + "\n", + " this.ondownload = ondownload;\n", + "}\n", + "\n", + "mpl.figure.prototype._init_header = function() {\n", + " var titlebar = $(\n", + " '
');\n", + " var titletext = $(\n", + " '
');\n", + " titlebar.append(titletext)\n", + " this.root.append(titlebar);\n", + " this.header = titletext[0];\n", + "}\n", + "\n", + "\n", + "\n", + "mpl.figure.prototype._canvas_extra_style = function(canvas_div) {\n", + "\n", + "}\n", + "\n", + "\n", + "mpl.figure.prototype._root_extra_style = function(canvas_div) {\n", + "\n", + "}\n", + "\n", + "mpl.figure.prototype._init_canvas = function() {\n", + " var fig = this;\n", + "\n", + " var canvas_div = $('
');\n", + "\n", + " canvas_div.attr('style', 'position: relative; clear: both; outline: 0');\n", + "\n", + " function canvas_keyboard_event(event) {\n", + " return fig.key_event(event, event['data']);\n", + " }\n", + "\n", + " canvas_div.keydown('key_press', canvas_keyboard_event);\n", + " canvas_div.keyup('key_release', canvas_keyboard_event);\n", + " this.canvas_div = canvas_div\n", + " this._canvas_extra_style(canvas_div)\n", + " this.root.append(canvas_div);\n", + "\n", + " var canvas = $('');\n", + " canvas.addClass('mpl-canvas');\n", + " canvas.attr('style', \"left: 0; top: 0; z-index: 0; outline: 0\")\n", + "\n", + " this.canvas = canvas[0];\n", + " this.context = canvas[0].getContext(\"2d\");\n", + "\n", + " var backingStore = this.context.backingStorePixelRatio ||\n", + "\tthis.context.webkitBackingStorePixelRatio ||\n", + "\tthis.context.mozBackingStorePixelRatio ||\n", + "\tthis.context.msBackingStorePixelRatio ||\n", + "\tthis.context.oBackingStorePixelRatio ||\n", + "\tthis.context.backingStorePixelRatio || 1;\n", + "\n", + " mpl.ratio = (window.devicePixelRatio || 1) / backingStore;\n", + "\n", + " var rubberband = $('');\n", + " rubberband.attr('style', \"position: absolute; left: 0; top: 0; z-index: 1;\")\n", + "\n", + " var pass_mouse_events = true;\n", + "\n", + " canvas_div.resizable({\n", + " start: function(event, ui) {\n", + " pass_mouse_events = false;\n", + " },\n", + " resize: function(event, ui) {\n", + " fig.request_resize(ui.size.width, ui.size.height);\n", + " },\n", + " stop: function(event, ui) {\n", + " pass_mouse_events = true;\n", + " fig.request_resize(ui.size.width, ui.size.height);\n", + " },\n", + " });\n", + "\n", + " function mouse_event_fn(event) {\n", + " if (pass_mouse_events)\n", + " return fig.mouse_event(event, event['data']);\n", + " }\n", + "\n", + " rubberband.mousedown('button_press', mouse_event_fn);\n", + " rubberband.mouseup('button_release', mouse_event_fn);\n", + " // Throttle sequential mouse events to 1 every 20ms.\n", + " rubberband.mousemove('motion_notify', mouse_event_fn);\n", + "\n", + " rubberband.mouseenter('figure_enter', mouse_event_fn);\n", + " rubberband.mouseleave('figure_leave', mouse_event_fn);\n", + "\n", + " canvas_div.on(\"wheel\", function (event) {\n", + " event = event.originalEvent;\n", + " event['data'] = 'scroll'\n", + " if (event.deltaY < 0) {\n", + " event.step = 1;\n", + " } else {\n", + " event.step = -1;\n", + " }\n", + " mouse_event_fn(event);\n", + " });\n", + "\n", + " canvas_div.append(canvas);\n", + " canvas_div.append(rubberband);\n", + "\n", + " this.rubberband = rubberband;\n", + " this.rubberband_canvas = rubberband[0];\n", + " this.rubberband_context = rubberband[0].getContext(\"2d\");\n", + " this.rubberband_context.strokeStyle = \"#000000\";\n", + "\n", + " this._resize_canvas = function(width, height) {\n", + " // Keep the size of the canvas, canvas container, and rubber band\n", + " // canvas in synch.\n", + " canvas_div.css('width', width)\n", + " canvas_div.css('height', height)\n", + "\n", + " canvas.attr('width', width * mpl.ratio);\n", + " canvas.attr('height', height * mpl.ratio);\n", + " canvas.attr('style', 'width: ' + width + 'px; height: ' + height + 'px;');\n", + "\n", + " rubberband.attr('width', width);\n", + " rubberband.attr('height', height);\n", + " }\n", + "\n", + " // Set the figure to an initial 600x600px, this will subsequently be updated\n", + " // upon first draw.\n", + " this._resize_canvas(600, 600);\n", + "\n", + " // Disable right mouse context menu.\n", + " $(this.rubberband_canvas).bind(\"contextmenu\",function(e){\n", + " return false;\n", + " });\n", + "\n", + " function set_focus () {\n", + " canvas.focus();\n", + " canvas_div.focus();\n", + " }\n", + "\n", + " window.setTimeout(set_focus, 100);\n", + "}\n", + "\n", + "mpl.figure.prototype._init_toolbar = function() {\n", + " var fig = this;\n", + "\n", + " var nav_element = $('
')\n", + " nav_element.attr('style', 'width: 100%');\n", + " this.root.append(nav_element);\n", + "\n", + " // Define a callback function for later on.\n", + " function toolbar_event(event) {\n", + " return fig.toolbar_button_onclick(event['data']);\n", + " }\n", + " function toolbar_mouse_event(event) {\n", + " return fig.toolbar_button_onmouseover(event['data']);\n", + " }\n", + "\n", + " for(var toolbar_ind in mpl.toolbar_items) {\n", + " var name = mpl.toolbar_items[toolbar_ind][0];\n", + " var tooltip = mpl.toolbar_items[toolbar_ind][1];\n", + " var image = mpl.toolbar_items[toolbar_ind][2];\n", + " var method_name = mpl.toolbar_items[toolbar_ind][3];\n", + "\n", + " if (!name) {\n", + " // put a spacer in here.\n", + " continue;\n", + " }\n", + " var button = $('');\n", + " button.click(method_name, toolbar_event);\n", + " button.mouseover(tooltip, toolbar_mouse_event);\n", + " nav_element.append(button);\n", + " }\n", + "\n", + " // Add the status bar.\n", + " var status_bar = $('');\n", + " nav_element.append(status_bar);\n", + " this.message = status_bar[0];\n", + "\n", + " // Add the close button to the window.\n", + " var buttongrp = $('
');\n", + " var button = $('');\n", + " button.click(function (evt) { fig.handle_close(fig, {}); } );\n", + " button.mouseover('Stop Interaction', toolbar_mouse_event);\n", + " buttongrp.append(button);\n", + " var titlebar = this.root.find($('.ui-dialog-titlebar'));\n", + " titlebar.prepend(buttongrp);\n", + "}\n", + "\n", + "mpl.figure.prototype._root_extra_style = function(el){\n", + " var fig = this\n", + " el.on(\"remove\", function(){\n", + "\tfig.close_ws(fig, {});\n", + " });\n", + "}\n", + "\n", + "mpl.figure.prototype._canvas_extra_style = function(el){\n", + " // this is important to make the div 'focusable\n", + " el.attr('tabindex', 0)\n", + " // reach out to IPython and tell the keyboard manager to turn it's self\n", + " // off when our div gets focus\n", + "\n", + " // location in version 3\n", + " if (IPython.notebook.keyboard_manager) {\n", + " IPython.notebook.keyboard_manager.register_events(el);\n", + " }\n", + " else {\n", + " // location in version 2\n", + " IPython.keyboard_manager.register_events(el);\n", + " }\n", + "\n", + "}\n", + "\n", + "mpl.figure.prototype._key_event_extra = function(event, name) {\n", + " var manager = IPython.notebook.keyboard_manager;\n", + " if (!manager)\n", + " manager = IPython.keyboard_manager;\n", + "\n", + " // Check for shift+enter\n", + " if (event.shiftKey && event.which == 13) {\n", + " this.canvas_div.blur();\n", + " event.shiftKey = false;\n", + " // Send a \"J\" for go to next cell\n", + " event.which = 74;\n", + " event.keyCode = 74;\n", + " manager.command_mode();\n", + " manager.handle_keydown(event);\n", + " }\n", + "}\n", + "\n", + "mpl.figure.prototype.handle_save = function(fig, msg) {\n", + " fig.ondownload(fig, null);\n", + "}\n", + "\n", + "\n", + "mpl.find_output_cell = function(html_output) {\n", + " // Return the cell and output element which can be found *uniquely* in the notebook.\n", + " // Note - this is a bit hacky, but it is done because the \"notebook_saving.Notebook\"\n", + " // IPython event is triggered only after the cells have been serialised, which for\n", + " // our purposes (turning an active figure into a static one), is too late.\n", + " var cells = IPython.notebook.get_cells();\n", + " var ncells = cells.length;\n", + " for (var i=0; i= 3 moved mimebundle to data attribute of output\n", + " data = data.data;\n", + " }\n", + " if (data['text/html'] == html_output) {\n", + " return [cell, data, j];\n", + " }\n", + " }\n", + " }\n", + " }\n", + "}\n", + "\n", + "// Register the function which deals with the matplotlib target/channel.\n", + "// The kernel may be null if the page has been refreshed.\n", + "if (IPython.notebook.kernel != null) {\n", + " IPython.notebook.kernel.comm_manager.register_target('matplotlib', mpl.mpl_figure_comm);\n", + "}\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "(172.0, 328.0) (129.0, 267.0) (-63.546772, 46.46999)\n" + ] + } + ], + "source": [ + "im = cv.imread(fp_im)\n", + "im_rgb = cv.cvtColor(im, cv.COLOR_BGR2RGB)\n", + "lm = fa.get_landmarks(im_rgb)[-1]\n", + "generate_3d_face_plain(im_rgb, lm)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "```\n", + "\"RGB:0/51/153 (hexadecimal: 003399)\" for \"PANTONE REFLEX BLUE\" and \"RGB:255/204/0 (hexadecimal: FFCC00)\" for \"PANTONE YELLOW\" for the web palette (the limited 12\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": 156, + "metadata": {}, + "outputs": [], + "source": [ + " # line weight\n", + "def generate_3d_face_anim(lm, fp_out, num_frames=30, dpi=72, stroke_weight=0, size=(480,480),\n", + " mark_size=2, mark_type='*', mark_clr=(0,255,0), fps=10, transparent=False):\n", + " '''Generates 3D plot of face landmarks\n", + " '''\n", + " # convert opencv BGR numpy image to RGB\n", + " bg_color = '#%02x%02x%02x' % (0,51,153)\n", + " mark_clr = '#%02x%02x%02x' % (255,204,0)\n", + " \n", + " # scale to make larger\n", + " #lm = np.array([1.2*x,y,z] for x,y,z in list(lm))\n", + " \n", + " # center x,y,z\n", + " xmm = (np.min(lm[:,0]),np.max(lm[:,0]))\n", + " ymm = (np.min(lm[:,1]),np.max(lm[:,1]))\n", + " zmm = (np.min(lm[:,2]),np.max(lm[:,2]))\n", + " \n", + " # make copy of landmarks\n", + " lm_orig = lm.copy()\n", + " xmm = (np.min(lm_orig[:,0]),np.max(lm_orig[:,0]))\n", + " ymm = (np.min(lm_orig[:,1]),np.max(lm_orig[:,1]))\n", + " zmm = (np.min(lm_orig[:,2]),np.max(lm_orig[:,2]))\n", + " \n", + " # swap the y and z components to improve 3d rotation angles for matplotlib\n", + " lm = np.zeros_like(lm_orig).astype(np.uint8)\n", + " for i,p in enumerate(lm_orig):\n", + " x,y,z = p\n", + " lm[i] = np.array([x - xmm[0], z - zmm[0], y - ymm[0]])\n", + " \n", + " # Create plot\n", + " figsize = (size[0]/dpi, size[1]/dpi )\n", + " fig = plt.figure(figsize=figsize, dpi=dpi) # frameon=False\n", + " #fig.set_size_inches(100/100, 1, forward=False)\n", + " fig.tight_layout()\n", + " fig.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=None, hspace=None)\n", + " ax = fig.add_subplot(111, projection='3d')\n", + " ax.set_facecolor(bg_color) # background color\n", + " \n", + " xscale, yscale, zscale = (1.2, 1.0, 1.0)\n", + " \n", + " # scatter plot the dots\n", + " # jaw line\n", + " #mark_clr = '#%02x%02x%02x' % (0,255,0) # green\n", + " ax.plot3D(lm[:17,0]*1.2,lm[:17,1], lm[:17,2],\n", + " marker=mark_type, markersize=mark_size, color=mark_clr,linewidth=stroke_weight)\n", + " # stage-right eyebrow\n", + " #mark_clr = '#%02x%02x%02x' % (255,0,0) # green\n", + " ax.plot3D(lm[17:22,0]*1.2,lm[17:22,1],lm[17:22,2],\n", + " marker=mark_type, markersize=mark_size, color=mark_clr,linewidth=stroke_weight)\n", + " # stage-left eyebrow\n", + " #mark_clr = '#%02x%02x%02x' % (255,255,0) # yellow\n", + " ax.plot3D(lm[22:27,0]*1.2,lm[22:27,1],lm[22:27,2], \n", + " marker=mark_type, markersize=mark_size, color=mark_clr,linewidth=stroke_weight)\n", + " # nose ridge\n", + " #mark_clr = '#%02x%02x%02x' % (0,0,255) # blue\n", + " ax.plot3D(lm[27:31,0]*1.2,lm[27:31,1],lm[27:31,2],\n", + " marker=mark_type, markersize=mark_size, color=mark_clr,linewidth=stroke_weight)\n", + " # nose-bottom\n", + " #mark_clr = '#%02x%02x%02x' % (255,0,255) # magenta\n", + " ax.plot3D(lm[31:36,0]*1.2,lm[31:36,1],lm[31:36,2],\n", + " marker=mark_type, markersize=mark_size, color=mark_clr,linewidth=stroke_weight)\n", + " # stage-left eye\n", + " #mark_clr = '#%02x%02x%02x' % (0,255,255) # cyan\n", + " px, py, pz = lm[36:42,0]*1.2,lm[36:42,1],lm[36:42,2]\n", + " px = np.append(px, lm[36,0]*1.2)\n", + " py = np.append(py, lm[36,1])\n", + " pz = np.append(pz, lm[36,2])\n", + " ax.plot3D(px, py, pz, marker=mark_type, markersize=mark_size, color=mark_clr,linewidth=stroke_weight)\n", + " \n", + " # stage-right eye\n", + " #mark_clr = '#%02x%02x%02x' % (255,255,255) # white\n", + " px, py, pz = lm[42:48,0]*1.2,lm[42:48,1],lm[42:48,2]\n", + " px = np.append(px, lm[42,0]*1.2)\n", + " py = np.append(py, lm[42,1])\n", + " pz = np.append(pz, lm[42,2])\n", + " ax.plot3D(px, py, pz, marker=mark_type, markersize=mark_size, color=mark_clr,linewidth=stroke_weight)\n", + " \n", + " # mouth\n", + " #mark_clr = '#%02x%02x%02x' % (255,125,0) # orange?\n", + " px, py, pz = lm[48:,0]*1.2,lm[48:,1],lm[48:,2]\n", + " px = np.append(px, lm[48,0]*1.2)\n", + " py = np.append(py, lm[48,1])\n", + " pz = np.append(pz, lm[48,2])\n", + " ax.plot3D(px, py, pz, marker=mark_type, markersize=mark_size, color=mark_clr, linewidth=stroke_weight)\n", + " \n", + " rh = '#00ff00' # edge color\n", + " #ax.scatter(lm[:,0]*xscale,lm[:,1]*yscale,lm[:,2]*zscale, c=rh, alpha=1.0, s=35, edgecolor=rh)\n", + " #ax.scatter(lm[:,0]*xscale,lm[:,1]*yscale,lm[:,2]*zscale, c=rh, alpha=1.0, s=1)\n", + " \n", + " # center center x,y,z points\n", + " cx = ((xmm[0] - xmm[1]) // 2) + xmm[1]\n", + " cy = ((ymm[1] - ymm[0]) // 2) + ymm[0]\n", + " cz = ((zmm[1] - zmm[0]) // 2) + zmm[0]\n", + " \n", + " # set initial plot view\n", + " ax.view_init(elev=120., azim=70.)\n", + " \n", + " # remove ticks\n", + " ax.set_xticks([])\n", + " ax.set_yticks([])\n", + " ax.set_zticks([])\n", + " \n", + " # remove axis\n", + " ax.set_frame_on(False)\n", + " ax.set_axis_off()\n", + "\n", + " # rotation increments: from 0 to 360 in num_frames\n", + " phi = np.linspace(np.pi/2, 2*np.pi, num_frames)\n", + "\n", + " def update(phi):\n", + " ax.view_init(180,phi*180./np.pi)\n", + " \n", + " ani = matplotlib.animation.FuncAnimation(fig, update, frames=phi)\n", + " savefig_kwargs = {'pad_inches': 0, 'transparent': transparent}\n", + " ani.save(fp_out, writer='imagemagick', fps=fps, savefig_kwargs=savefig_kwargs)\n", + " clear_output()" + ] + }, + { + "cell_type": "code", + "execution_count": 170, + "metadata": {}, + "outputs": [], + "source": [ + " # line weight\n", + "def generate_3d_face(lm, fp_out, num_frames=30, dpi=72, stroke_weight=0, size=(480,480),\n", + " mark_size=2, mark_type='*', mark_clr=(0,255,0), fps=10, transparent=False):\n", + " '''Generates 3D plot of face landmarks\n", + " '''\n", + " # convert opencv BGR numpy image to RGB\n", + " bg_color = '#%02x%02x%02x' % (0,51,153)\n", + " mark_clr = '#%02x%02x%02x' % (255,204,0)\n", + " \n", + " # scale to make larger\n", + " #lm = np.array([1.2*x,y,z] for x,y,z in list(lm))\n", + " \n", + " # center x,y,z\n", + " xmm = (np.min(lm[:,0]),np.max(lm[:,0]))\n", + " ymm = (np.min(lm[:,1]),np.max(lm[:,1]))\n", + " zmm = (np.min(lm[:,2]),np.max(lm[:,2]))\n", + " \n", + " # make copy of landmarks\n", + " lm_orig = lm.copy()\n", + " xmm = (np.min(lm_orig[:,0]),np.max(lm_orig[:,0]))\n", + " ymm = (np.min(lm_orig[:,1]),np.max(lm_orig[:,1]))\n", + " zmm = (np.min(lm_orig[:,2]),np.max(lm_orig[:,2]))\n", + " \n", + " # swap the y and z components to improve 3d rotation angles for matplotlib\n", + " lm = np.zeros_like(lm_orig).astype(np.uint8)\n", + " for i,p in enumerate(lm_orig):\n", + " x,y,z = p\n", + " lm[i] = np.array([x - xmm[0], z - zmm[0], y - ymm[0]])\n", + " \n", + " # Create plot\n", + " \n", + " figsize = (size[0]/dpi, size[1]/dpi )\n", + " fig = plt.figure(figsize=figsize, dpi=dpi) # frameon=False\n", + " #fig.set_size_inches(100/100, 1, forward=False)\n", + " fig.tight_layout()\n", + " fig.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=None, hspace=None)\n", + " ax = fig.add_subplot(111, projection='3d')\n", + " ax.set_facecolor(bg_color) # background color\n", + " \n", + " xscale, yscale, zscale = (1.2, 1.0, 1.0)\n", + " \n", + " # scatter plot the dots\n", + " # jaw line\n", + " #mark_clr = '#%02x%02x%02x' % (0,255,0) # green\n", + " ax.plot3D(lm[:17,0]*1.2,lm[:17,1], lm[:17,2],\n", + " marker=mark_type, markersize=mark_size, color=mark_clr,linewidth=stroke_weight)\n", + " # stage-right eyebrow\n", + " #mark_clr = '#%02x%02x%02x' % (255,0,0) # green\n", + " ax.plot3D(lm[17:22,0]*1.2,lm[17:22,1],lm[17:22,2],\n", + " marker=mark_type, markersize=mark_size, color=mark_clr,linewidth=stroke_weight)\n", + " # stage-left eyebrow\n", + " #mark_clr = '#%02x%02x%02x' % (255,255,0) # yellow\n", + " ax.plot3D(lm[22:27,0]*1.2,lm[22:27,1],lm[22:27,2], \n", + " marker=mark_type, markersize=mark_size, color=mark_clr,linewidth=stroke_weight)\n", + " # nose ridge\n", + " #mark_clr = '#%02x%02x%02x' % (0,0,255) # blue\n", + " ax.plot3D(lm[27:31,0]*1.2,lm[27:31,1],lm[27:31,2],\n", + " marker=mark_type, markersize=mark_size, color=mark_clr,linewidth=stroke_weight)\n", + " # nose-bottom\n", + " #mark_clr = '#%02x%02x%02x' % (255,0,255) # magenta\n", + " ax.plot3D(lm[31:36,0]*1.2,lm[31:36,1],lm[31:36,2],\n", + " marker=mark_type, markersize=mark_size, color=mark_clr,linewidth=stroke_weight)\n", + " # stage-left eye\n", + " #mark_clr = '#%02x%02x%02x' % (0,255,255) # cyan\n", + " px, py, pz = lm[36:42,0]*1.2,lm[36:42,1],lm[36:42,2]\n", + " px = np.append(px, lm[36,0]*1.2)\n", + " py = np.append(py, lm[36,1])\n", + " pz = np.append(pz, lm[36,2])\n", + " ax.plot3D(px, py, pz, marker=mark_type, markersize=mark_size, color=mark_clr,linewidth=stroke_weight)\n", + " \n", + " # stage-right eye\n", + " #mark_clr = '#%02x%02x%02x' % (255,255,255) # white\n", + " px, py, pz = lm[42:48,0]*1.2,lm[42:48,1],lm[42:48,2]\n", + " px = np.append(px, lm[42,0]*1.2)\n", + " py = np.append(py, lm[42,1])\n", + " pz = np.append(pz, lm[42,2])\n", + " ax.plot3D(px, py, pz, marker=mark_type, markersize=mark_size, color=mark_clr,linewidth=stroke_weight)\n", + " \n", + " # mouth\n", + " #mark_clr = '#%02x%02x%02x' % (255,125,0) # orange?\n", + " px, py, pz = lm[48:,0]*1.2,lm[48:,1],lm[48:,2]\n", + " px = np.append(px, lm[48,0]*1.2)\n", + " py = np.append(py, lm[48,1])\n", + " pz = np.append(pz, lm[48,2])\n", + " ax.plot3D(px, py, pz, marker=mark_type, markersize=mark_size, color=mark_clr, linewidth=stroke_weight)\n", + " \n", + " rh = '#%02x%02x%02x' % (255,224,10) # edge color\n", + " ax.scatter(lm[:,0]*xscale,lm[:,1]*yscale,lm[:,2]*zscale, c=rh, alpha=1.0, s=35, edgecolor=rh)\n", + " ax.scatter(lm[:,0]*xscale,lm[:,1]*yscale,lm[:,2]*zscale, c=rh, alpha=1.0, s=1)\n", + " \n", + " # center center x,y,z points\n", + " cx = ((xmm[0] - xmm[1]) // 2) + xmm[1]\n", + " cy = ((ymm[1] - ymm[0]) // 2) + ymm[0]\n", + " cz = ((zmm[1] - zmm[0]) // 2) + zmm[0]\n", + " \n", + " # set initial plot view\n", + " ax.view_init(elev=180., azim=90.)\n", + " \n", + " # remove ticks\n", + " ax.set_xticks([])\n", + " ax.set_yticks([])\n", + " ax.set_zticks([])\n", + " \n", + " # remove axis\n", + " ax.set_frame_on(False)\n", + " ax.set_axis_off()\n", + "\n", + " # rotation increments: from 0 to 360 in num_frames\n", + " phi = np.linspace(np.pi/2, 2*np.pi, num_frames)\n", + "\n", + " plt.savefig(fp_out)\n", + " fig.show()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 172, + "metadata": {}, + "outputs": [ + { + "data": { + "application/javascript": [ + "/* Put everything inside the global mpl namespace */\n", + "window.mpl = {};\n", + "\n", + "\n", + "mpl.get_websocket_type = function() {\n", + " if (typeof(WebSocket) !== 'undefined') {\n", + " return WebSocket;\n", + " } else if (typeof(MozWebSocket) !== 'undefined') {\n", + " return MozWebSocket;\n", + " } else {\n", + " alert('Your browser does not have WebSocket support.' +\n", + " 'Please try Chrome, Safari or Firefox ≥ 6. ' +\n", + " 'Firefox 4 and 5 are also supported but you ' +\n", + " 'have to enable WebSockets in about:config.');\n", + " };\n", + "}\n", + "\n", + "mpl.figure = function(figure_id, websocket, ondownload, parent_element) {\n", + " this.id = figure_id;\n", + "\n", + " this.ws = websocket;\n", + "\n", + " this.supports_binary = (this.ws.binaryType != undefined);\n", + "\n", + " if (!this.supports_binary) {\n", + " var warnings = document.getElementById(\"mpl-warnings\");\n", + " if (warnings) {\n", + " warnings.style.display = 'block';\n", + " warnings.textContent = (\n", + " \"This browser does not support binary websocket messages. \" +\n", + " \"Performance may be slow.\");\n", + " }\n", + " }\n", + "\n", + " this.imageObj = new Image();\n", + "\n", + " this.context = undefined;\n", + " this.message = undefined;\n", + " this.canvas = undefined;\n", + " this.rubberband_canvas = undefined;\n", + " this.rubberband_context = undefined;\n", + " this.format_dropdown = undefined;\n", + "\n", + " this.image_mode = 'full';\n", + "\n", + " this.root = $('
');\n", + " this._root_extra_style(this.root)\n", + " this.root.attr('style', 'display: inline-block');\n", + "\n", + " $(parent_element).append(this.root);\n", + "\n", + " this._init_header(this);\n", + " this._init_canvas(this);\n", + " this._init_toolbar(this);\n", + "\n", + " var fig = this;\n", + "\n", + " this.waiting = false;\n", + "\n", + " this.ws.onopen = function () {\n", + " fig.send_message(\"supports_binary\", {value: fig.supports_binary});\n", + " fig.send_message(\"send_image_mode\", {});\n", + " if (mpl.ratio != 1) {\n", + " fig.send_message(\"set_dpi_ratio\", {'dpi_ratio': mpl.ratio});\n", + " }\n", + " fig.send_message(\"refresh\", {});\n", + " }\n", + "\n", + " this.imageObj.onload = function() {\n", + " if (fig.image_mode == 'full') {\n", + " // Full images could contain transparency (where diff images\n", + " // almost always do), so we need to clear the canvas so that\n", + " // there is no ghosting.\n", + " fig.context.clearRect(0, 0, fig.canvas.width, fig.canvas.height);\n", + " }\n", + " fig.context.drawImage(fig.imageObj, 0, 0);\n", + " };\n", + "\n", + " this.imageObj.onunload = function() {\n", + " fig.ws.close();\n", + " }\n", + "\n", + " this.ws.onmessage = this._make_on_message_function(this);\n", + "\n", + " this.ondownload = ondownload;\n", + "}\n", + "\n", + "mpl.figure.prototype._init_header = function() {\n", + " var titlebar = $(\n", + " '
');\n", + " var titletext = $(\n", + " '
');\n", + " titlebar.append(titletext)\n", + " this.root.append(titlebar);\n", + " this.header = titletext[0];\n", + "}\n", + "\n", + "\n", + "\n", + "mpl.figure.prototype._canvas_extra_style = function(canvas_div) {\n", + "\n", + "}\n", + "\n", + "\n", + "mpl.figure.prototype._root_extra_style = function(canvas_div) {\n", + "\n", + "}\n", + "\n", + "mpl.figure.prototype._init_canvas = function() {\n", + " var fig = this;\n", + "\n", + " var canvas_div = $('
');\n", + "\n", + " canvas_div.attr('style', 'position: relative; clear: both; outline: 0');\n", + "\n", + " function canvas_keyboard_event(event) {\n", + " return fig.key_event(event, event['data']);\n", + " }\n", + "\n", + " canvas_div.keydown('key_press', canvas_keyboard_event);\n", + " canvas_div.keyup('key_release', canvas_keyboard_event);\n", + " this.canvas_div = canvas_div\n", + " this._canvas_extra_style(canvas_div)\n", + " this.root.append(canvas_div);\n", + "\n", + " var canvas = $('');\n", + " canvas.addClass('mpl-canvas');\n", + " canvas.attr('style', \"left: 0; top: 0; z-index: 0; outline: 0\")\n", + "\n", + " this.canvas = canvas[0];\n", + " this.context = canvas[0].getContext(\"2d\");\n", + "\n", + " var backingStore = this.context.backingStorePixelRatio ||\n", + "\tthis.context.webkitBackingStorePixelRatio ||\n", + "\tthis.context.mozBackingStorePixelRatio ||\n", + "\tthis.context.msBackingStorePixelRatio ||\n", + "\tthis.context.oBackingStorePixelRatio ||\n", + "\tthis.context.backingStorePixelRatio || 1;\n", + "\n", + " mpl.ratio = (window.devicePixelRatio || 1) / backingStore;\n", + "\n", + " var rubberband = $('');\n", + " rubberband.attr('style', \"position: absolute; left: 0; top: 0; z-index: 1;\")\n", + "\n", + " var pass_mouse_events = true;\n", + "\n", + " canvas_div.resizable({\n", + " start: function(event, ui) {\n", + " pass_mouse_events = false;\n", + " },\n", + " resize: function(event, ui) {\n", + " fig.request_resize(ui.size.width, ui.size.height);\n", + " },\n", + " stop: function(event, ui) {\n", + " pass_mouse_events = true;\n", + " fig.request_resize(ui.size.width, ui.size.height);\n", + " },\n", + " });\n", + "\n", + " function mouse_event_fn(event) {\n", + " if (pass_mouse_events)\n", + " return fig.mouse_event(event, event['data']);\n", + " }\n", + "\n", + " rubberband.mousedown('button_press', mouse_event_fn);\n", + " rubberband.mouseup('button_release', mouse_event_fn);\n", + " // Throttle sequential mouse events to 1 every 20ms.\n", + " rubberband.mousemove('motion_notify', mouse_event_fn);\n", + "\n", + " rubberband.mouseenter('figure_enter', mouse_event_fn);\n", + " rubberband.mouseleave('figure_leave', mouse_event_fn);\n", + "\n", + " canvas_div.on(\"wheel\", function (event) {\n", + " event = event.originalEvent;\n", + " event['data'] = 'scroll'\n", + " if (event.deltaY < 0) {\n", + " event.step = 1;\n", + " } else {\n", + " event.step = -1;\n", + " }\n", + " mouse_event_fn(event);\n", + " });\n", + "\n", + " canvas_div.append(canvas);\n", + " canvas_div.append(rubberband);\n", + "\n", + " this.rubberband = rubberband;\n", + " this.rubberband_canvas = rubberband[0];\n", + " this.rubberband_context = rubberband[0].getContext(\"2d\");\n", + " this.rubberband_context.strokeStyle = \"#000000\";\n", + "\n", + " this._resize_canvas = function(width, height) {\n", + " // Keep the size of the canvas, canvas container, and rubber band\n", + " // canvas in synch.\n", + " canvas_div.css('width', width)\n", + " canvas_div.css('height', height)\n", + "\n", + " canvas.attr('width', width * mpl.ratio);\n", + " canvas.attr('height', height * mpl.ratio);\n", + " canvas.attr('style', 'width: ' + width + 'px; height: ' + height + 'px;');\n", + "\n", + " rubberband.attr('width', width);\n", + " rubberband.attr('height', height);\n", + " }\n", + "\n", + " // Set the figure to an initial 600x600px, this will subsequently be updated\n", + " // upon first draw.\n", + " this._resize_canvas(600, 600);\n", + "\n", + " // Disable right mouse context menu.\n", + " $(this.rubberband_canvas).bind(\"contextmenu\",function(e){\n", + " return false;\n", + " });\n", + "\n", + " function set_focus () {\n", + " canvas.focus();\n", + " canvas_div.focus();\n", + " }\n", + "\n", + " window.setTimeout(set_focus, 100);\n", + "}\n", + "\n", + "mpl.figure.prototype._init_toolbar = function() {\n", + " var fig = this;\n", + "\n", + " var nav_element = $('
')\n", + " nav_element.attr('style', 'width: 100%');\n", + " this.root.append(nav_element);\n", + "\n", + " // Define a callback function for later on.\n", + " function toolbar_event(event) {\n", + " return fig.toolbar_button_onclick(event['data']);\n", + " }\n", + " function toolbar_mouse_event(event) {\n", + " return fig.toolbar_button_onmouseover(event['data']);\n", + " }\n", + "\n", + " for(var toolbar_ind in mpl.toolbar_items) {\n", + " var name = mpl.toolbar_items[toolbar_ind][0];\n", + " var tooltip = mpl.toolbar_items[toolbar_ind][1];\n", + " var image = mpl.toolbar_items[toolbar_ind][2];\n", + " var method_name = mpl.toolbar_items[toolbar_ind][3];\n", + "\n", + " if (!name) {\n", + " // put a spacer in here.\n", + " continue;\n", + " }\n", + " var button = $('