summaryrefslogtreecommitdiff
path: root/megapixels/commands/processor
diff options
context:
space:
mode:
authoradamhrv <adam@ahprojects.com>2019-01-18 11:00:18 +0100
committeradamhrv <adam@ahprojects.com>2019-01-18 11:00:18 +0100
commite06af50389f849be0bfe4fa97d39f4519ef2c711 (patch)
tree49755b51e1b8b1f8031e5483333570a8e9951272 /megapixels/commands/processor
parent03ad11fb2a3dcd425d50167b15d72d4e0ef536a2 (diff)
change to cli_proc
Diffstat (limited to 'megapixels/commands/processor')
-rw-r--r--megapixels/commands/processor/_old_files_to_face_rois.py168
-rw-r--r--megapixels/commands/processor/cluster.py47
-rw-r--r--megapixels/commands/processor/crop.py104
-rw-r--r--megapixels/commands/processor/csv_to_faces.py105
-rw-r--r--megapixels/commands/processor/csv_to_faces_mt.py105
-rw-r--r--megapixels/commands/processor/face_3ddfa.py331
-rw-r--r--megapixels/commands/processor/face_attributes.py136
-rw-r--r--megapixels/commands/processor/face_frames.py82
-rw-r--r--megapixels/commands/processor/face_landmark_2d_5.py146
-rw-r--r--megapixels/commands/processor/face_landmark_2d_68.py150
-rw-r--r--megapixels/commands/processor/face_landmark_3d_68.py147
-rw-r--r--megapixels/commands/processor/face_pose.py164
-rw-r--r--megapixels/commands/processor/face_roi.py187
-rw-r--r--megapixels/commands/processor/face_vector.py133
-rw-r--r--megapixels/commands/processor/mirror.py57
-rw-r--r--megapixels/commands/processor/resize.py150
-rw-r--r--megapixels/commands/processor/resize_dataset.py149
-rw-r--r--megapixels/commands/processor/videos_to_frames.py73
18 files changed, 2434 insertions, 0 deletions
diff --git a/megapixels/commands/processor/_old_files_to_face_rois.py b/megapixels/commands/processor/_old_files_to_face_rois.py
new file mode 100644
index 00000000..d92cbd74
--- /dev/null
+++ b/megapixels/commands/processor/_old_files_to_face_rois.py
@@ -0,0 +1,168 @@
+"""
+Crop images to prepare for training
+"""
+
+import click
+# from PIL import Image, ImageOps, ImageFilter, ImageDraw
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+
+color_filters = {'color': 1, 'gray': 2, 'all': 3}
+
+@click.command()
+@click.option('-i', '--input', 'opt_fp_files', required=True,
+ help='Input file meta CSV')
+@click.option('-o', '--output', 'opt_fp_out', required=True,
+ help='Output CSV')
+@click.option('-e', '--ext', 'opt_ext',
+ default='jpg', type=click.Choice(['jpg', 'png']),
+ help='File glob ext')
+@click.option('--size', 'opt_size',
+ type=(int, int), default=(300, 300),
+ help='Output image size')
+@click.option('-t', '--detector-type', 'opt_detector_type',
+ type=cfg.FaceDetectNetVar,
+ default=click_utils.get_default(types.FaceDetectNet.DLIB_CNN),
+ help=click_utils.show_help(types.FaceDetectNet))
+@click.option('-g', '--gpu', 'opt_gpu', default=0,
+ help='GPU index')
+@click.option('--conf', 'opt_conf_thresh', default=0.85, type=click.FloatRange(0,1),
+ help='Confidence minimum threshold')
+@click.option('-p', '--pyramids', 'opt_pyramids', default=0, type=click.IntRange(0,4),
+ help='Number pyramids to upscale for DLIB detectors')
+@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None),
+ help='Slice list of files')
+@click.option('--display/--no-display', 'opt_display', is_flag=True, default=False,
+ help='Display detections to debug')
+@click.option('--recursive/--no-recursive', 'opt_recursive', is_flag=True, default=False,
+ help='Use glob recursion (slower)')
+@click.option('-f', '--force', 'opt_force', is_flag=True,
+ help='Force overwrite file')
+@click.option('--color', 'opt_color_filter',
+ type=click.Choice(color_filters.keys()), default='color',
+ help='Filter to keep color or grayscale images (color = keep color')
+@click.pass_context
+def cli(ctx, opt_dirs_in, opt_fp_out, opt_ext, opt_size, opt_detector_type,
+ opt_gpu, opt_conf_thresh, opt_pyramids, opt_slice, opt_display, opt_recursive, opt_force, opt_color_filter):
+ """Converts frames with faces to CSV of ROIs"""
+
+ import sys
+ import os
+ from os.path import join
+ from pathlib import Path
+ from glob import glob
+
+ from tqdm import tqdm
+ import numpy as np
+ import dlib # must keep a local reference for dlib
+ import cv2 as cv
+ import pandas as pd
+
+ from app.utils import logger_utils, file_utils, im_utils
+ from app.processors import face_detector
+
+ # -------------------------------------------------
+ # init here
+
+ log = logger_utils.Logger.getLogger()
+
+ if not opt_force and Path(opt_fp_out).exists():
+ log.error('File exists. Use "-f / --force" to overwite')
+ return
+
+ if opt_detector_type == types.FaceDetectNet.CVDNN:
+ detector = face_detector.DetectorCVDNN()
+ elif opt_detector_type == types.FaceDetectNet.DLIB_CNN:
+ detector = face_detector.DetectorDLIBCNN(opt_gpu)
+ elif opt_detector_type == types.FaceDetectNet.DLIB_HOG:
+ detector = face_detector.DetectorDLIBHOG()
+ elif opt_detector_type == types.FaceDetectNet.MTCNN:
+ detector = face_detector.DetectorMTCNN()
+ elif opt_detector_type == types.FaceDetectNet.HAAR:
+ log.error('{} not yet implemented'.format(opt_detector_type.name))
+ return
+
+
+ # -------------------------------------------------
+ # process here
+ color_filter = color_filters[opt_color_filter]
+
+ # get list of files to process
+ fp_ims = []
+ for opt_dir_in in opt_dirs_in:
+ if opt_recursive:
+ fp_glob = join(opt_dir_in, '**/*.{}'.format(opt_ext))
+ fp_ims += glob(fp_glob, recursive=True)
+ else:
+ fp_glob = join(opt_dir_in, '*.{}'.format(opt_ext))
+ fp_ims += glob(fp_glob)
+ log.debug(fp_glob)
+
+
+ if opt_slice:
+ fp_ims = fp_ims[opt_slice[0]:opt_slice[1]]
+ log.debug('processing {:,} files'.format(len(fp_ims)))
+
+
+ data = []
+
+ for fp_im in tqdm(fp_ims):
+ im = cv.imread(fp_im)
+
+ # filter out color or grayscale iamges
+ if color_filter != color_filters['all']:
+ try:
+ is_gray = im_utils.is_grayscale(im)
+ if is_gray and color_filter != color_filters['gray']:
+ log.debug('Skipping grayscale image: {}'.format(fp_im))
+ continue
+ except Exception as e:
+ log.error('Could not check grayscale: {}'.format(fp_im))
+ continue
+
+ try:
+ bboxes = detector.detect(im, opt_size=opt_size, opt_pyramids=opt_pyramids)
+ except Exception as e:
+ log.error('could not detect: {}'.format(fp_im))
+ log.error('{}'.format(e))
+ fpp_im = Path(fp_im)
+ subdir = str(fpp_im.parent.relative_to(opt_dir_in))
+
+ for bbox in bboxes:
+ # log.debug('is square: {}'.format(bbox.w == bbox.h))
+ nw,nh = int(bbox.w * im.shape[1]), int(bbox.h * im.shape[0])
+ roi = {
+ 'fn': fpp_im.stem,
+ 'ext': fpp_im.suffix.replace('.',''),
+ 'x': bbox.x,
+ 'y': bbox.y,
+ 'w': bbox.w,
+ 'h': bbox.h,
+ 'image_height': im.shape[0],
+ 'image_width': im.shape[1],
+ 'subdir': subdir}
+ bbox_dim = bbox.to_dim(im.shape[:2][::-1]) # w,h
+ data.append(roi)
+
+ # debug display
+ if opt_display and len(bboxes):
+ im_md = im_utils.resize(im, width=min(1200, opt_size[0]))
+ for bbox in bboxes:
+ bbox_dim = bbox.to_dim(im_md.shape[:2][::-1])
+ cv.rectangle(im_md, bbox_dim.pt_tl, bbox_dim.pt_br, (0,255,0), 3)
+ cv.imshow('', im_md)
+ while True:
+ k = cv.waitKey(1) & 0xFF
+ if k == 27 or k == ord('q'): # ESC
+ cv.destroyAllWindows()
+ sys.exit()
+ elif k != 255:
+ # any key to continue
+ break
+
+ # save date
+ file_utils.mkdirs(opt_fp_out)
+ df = pd.DataFrame.from_dict(data)
+ df.to_csv(opt_fp_out, index=False) \ No newline at end of file
diff --git a/megapixels/commands/processor/cluster.py b/megapixels/commands/processor/cluster.py
new file mode 100644
index 00000000..419091a0
--- /dev/null
+++ b/megapixels/commands/processor/cluster.py
@@ -0,0 +1,47 @@
+import click
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+from app.utils.logger_utils import Logger
+
+@click.command()
+@click.option('--data_store', 'opt_data_store',
+ type=cfg.DataStoreVar,
+ default=click_utils.get_default(types.DataStore.NAS),
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--dataset', 'opt_dataset',
+ type=cfg.DatasetVar,
+ required=True,
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--metadata', 'opt_metadata', required=True,
+ type=cfg.MetadataVar,
+ show_default=True,
+ help=click_utils.show_help(types.Metadata))
+@click.pass_context
+def cli(ctx, opt_data_store, opt_dataset, opt_metadata):
+ """Display image info"""
+
+ # cluster the embeddings
+ print("[INFO] clustering...")
+ clt = DBSCAN(metric="euclidean", n_jobs=args["jobs"])
+ clt.fit(encodings)
+
+ # determine the total number of unique faces found in the dataset
+ labelIDs = np.unique(clt.labels_)
+ numUniqueFaces = len(np.where(labelIDs > -1)[0])
+ print("[INFO] # unique faces: {}".format(numUniqueFaces))
+ # load and display image
+ im = cv.imread(fp_im)
+ cv.imshow('', im)
+
+ while True:
+ k = cv.waitKey(1) & 0xFF
+ if k == 27 or k == ord('q'): # ESC
+ cv.destroyAllWindows()
+ sys.exit()
+ elif k != 255:
+ # any key to continue
+ break \ No newline at end of file
diff --git a/megapixels/commands/processor/crop.py b/megapixels/commands/processor/crop.py
new file mode 100644
index 00000000..778be0c4
--- /dev/null
+++ b/megapixels/commands/processor/crop.py
@@ -0,0 +1,104 @@
+"""
+Crop images to prepare for training
+"""
+
+import click
+from PIL import Image, ImageOps, ImageFilter, ImageDraw
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+
+@click.command()
+@click.option('-i', '--input', 'opt_dir_in', required=True,
+ help='Input directory')
+@click.option('-o', '--output', 'opt_dir_out', required=True,
+ help='Output directory')
+@click.option('-e', '--ext', 'opt_ext',
+ default='jpg', type=click.Choice(['jpg', 'png']),
+ help='File glob ext')
+@click.option('--size', 'opt_size',
+ type=(int, int), default=(256, 256),
+ help='Output image size')
+@click.option('-t', '--crop-type', 'opt_crop_type',
+ default='center', type=click.Choice(['center', 'mirror', 'face', 'person', 'none']),
+ help='Force fit image center location')
+@click.pass_context
+def cli(ctx, opt_dir_in, opt_dir_out, opt_ext, opt_size, opt_crop_type):
+ """Crop, mirror images"""
+
+ import os
+ from os.path import join
+ from pathlib import Path
+ from glob import glob
+ from tqdm import tqdm
+
+
+ from app.utils import logger_utils, file_utils, im_utils
+
+ # -------------------------------------------------
+ # process here
+
+ log = logger_utils.Logger.getLogger()
+ log.info('crop images')
+
+ # get list of files to process
+ fp_ims = glob(join(opt_dir_in, '*.{}'.format(opt_ext)))
+ log.debug('files: {}'.format(len(fp_ims)))
+
+ # ensure output dir exists
+ file_utils.mkdirs(opt_dir_out)
+
+ for fp_im in tqdm(fp_ims):
+ im = process_crop(fp_im, opt_size, opt_crop_type)
+ fp_out = join(opt_dir_out, Path(fp_im).name)
+ im.save(fp_out)
+
+
+def process_crop(fp_im, opt_size, crop_type):
+ im = Image.open(fp_im)
+ if crop_type == 'center':
+ im = crop_square_fit(im, opt_size)
+ elif crop_type == 'mirror':
+ im = mirror_crop_square(im, opt_size)
+ return im
+
+def crop_square_fit(im, size, center=(0.5, 0.5)):
+ return ImageOps.fit(im, size, method=Image.BICUBIC, centering=center)
+
+def mirror_crop_square(im, size):
+ # force to even dims
+ if im.size[0] % 2 or im.size[1] % 2:
+ im = ImageOps.fit(im, ((im.size[0] // 2) * 2, (im.size[1] // 2) * 2))
+
+ # create new square image
+ min_size, max_size = (min(im.size), max(im.size))
+ orig_w, orig_h = im.size
+ margin = (max_size - min_size) // 2
+ w, h = (max_size, max_size)
+ im_new = Image.new('RGB', (w, h), color=(0, 0, 0))
+
+ #crop (l, t, r, b)
+ if orig_w > orig_h:
+ # landscape, mirror expand T/B
+ im_top = ImageOps.mirror(im.crop((0, 0, margin, w)))
+ im_bot = ImageOps.mirror(im.crop((orig_h - margin, 0, orig_h, w)))
+ im_new.paste(im_top, (0, 0))
+ im_new.paste(im, (margin, 0, orig_h + margin, w))
+ im_new.paste(im_bot, (h - margin, 0))
+ elif orig_h > orig_w:
+ # portrait, mirror expand L/R
+ im_left = ImageOps.mirror(im.crop((0, 0, margin, h)))
+ im_right = ImageOps.mirror(im.crop((orig_w - margin, 0, orig_w, h)))
+ im_new.paste(im_left, (0, 0))
+ im_new.paste(im, (margin, 0, orig_w + margin, h))
+ im_new.paste(im_right, (w - margin, 0))
+
+ return im_new.resize(size)
+
+
+def center_crop_face():
+ pass
+
+def center_crop_person():
+ pass \ No newline at end of file
diff --git a/megapixels/commands/processor/csv_to_faces.py b/megapixels/commands/processor/csv_to_faces.py
new file mode 100644
index 00000000..64c8b965
--- /dev/null
+++ b/megapixels/commands/processor/csv_to_faces.py
@@ -0,0 +1,105 @@
+"""
+Reads in CSV of ROIs and extracts facial regions with padding
+"""
+
+import click
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+
+@click.command()
+@click.option('-i', '--input', 'opt_fp_in', required=True,
+ help='Input CSV')
+@click.option('-m', '--media', 'opt_dir_media', required=True,
+ help='Input image/video directory')
+@click.option('-o', '--output', 'opt_dir_out', required=True,
+ help='Output directory for extracted ROI images')
+@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None),
+ help='Slice list of files')
+@click.option('--padding', 'opt_padding', default=0.25,
+ help='Facial padding as percentage of face width')
+@click.option('--ext', 'opt_ext_out', default='png', type=click.Choice(['jpg', 'png']),
+ help='Output image type')
+@click.option('--min', 'opt_min', default=(60, 60),
+ help='Minimum original face size')
+@click.pass_context
+def cli(ctx, opt_fp_in, opt_dir_media, opt_dir_out, opt_slice,
+ opt_padding, opt_ext_out, opt_min):
+ """Converts ROIs to images"""
+
+ import os
+ from os.path import join
+ from pathlib import Path
+ from glob import glob
+
+ from tqdm import tqdm
+ import numpy as np
+ from PIL import Image, ImageOps, ImageFilter, ImageDraw
+ import cv2 as cv
+ import pandas as pd
+
+ from app.utils import logger_utils, file_utils, im_utils
+ from app.models.bbox import BBox
+
+ # -------------------------------------------------
+ # process here
+ log = logger_utils.Logger.getLogger()
+
+ df_rois = pd.read_csv(opt_fp_in, dtype={'subdir': str, 'fn': str})
+ if opt_slice:
+ df_rois = df_rois[opt_slice[0]:opt_slice[1]]
+
+ log.info('Processing {:,} rows'.format(len(df_rois)))
+
+ file_utils.mkdirs(opt_dir_out)
+
+ df_rois_grouped = df_rois.groupby(['fn']) # group by fn/filename
+ groups = df_rois_grouped.groups
+ skipped = []
+
+ for group in tqdm(groups):
+ # get image
+ group_rows = df_rois_grouped.get_group(group)
+
+ row = group_rows.iloc[0]
+ fp_im = join(opt_dir_media, str(row['subdir']), '{fn}.{ext}'.format(**row)) # TODO change to ext
+ try:
+ im = Image.open(fp_im).convert('RGB')
+ im.verify()
+ except Exception as e:
+ log.warn('Could not open: {}'.format(fp_im))
+ log.error(e)
+ continue
+
+ for idx, roi in group_rows.iterrows():
+ # get bbox to im dimensions
+ xywh = [roi['x'], roi['y'], roi['w'] , roi['h']]
+ bbox = BBox.from_xywh(*xywh)
+ dim = im.size
+ bbox_dim = bbox.to_dim(dim)
+ # expand
+ opt_padding_px = int(opt_padding * bbox_dim.width)
+ bbox_dim_exp = bbox_dim.expand_dim(opt_padding_px, dim)
+ # crop
+ x1y2 = bbox_dim_exp.pt_tl + bbox_dim_exp.pt_br
+ im_crop = im.crop(box=x1y2)
+
+ # strip exif, create new image and paste data
+ im_crop_data = list(im_crop.getdata())
+ im_crop_no_exif = Image.new(im_crop.mode, im_crop.size)
+ im_crop_no_exif.putdata(im_crop_data)
+
+ # save
+ idx_zpad = file_utils.zpad(idx, zeros=3)
+ subdir = '' if roi['subdir'] == '.' else '{}_'.format(roi['subdir'])
+ subdir = subdir.replace('/', '_')
+ fp_im_out = join(opt_dir_out, '{}{}{}.{}'.format(subdir, roi['fn'], idx_zpad, opt_ext_out))
+ # threshold size and save
+ if im_crop_no_exif.size[0] < opt_min[0] or im_crop_no_exif.size[1] < opt_min[1]:
+ skipped.append(fp_im_out)
+ log.info('Face too small: {}, idx: {}'.format(fp_im, idx))
+ else:
+ im_crop_no_exif.save(fp_im_out)
+
+ log.info('Skipped {:,} images'.format(len(skipped)))
diff --git a/megapixels/commands/processor/csv_to_faces_mt.py b/megapixels/commands/processor/csv_to_faces_mt.py
new file mode 100644
index 00000000..64c8b965
--- /dev/null
+++ b/megapixels/commands/processor/csv_to_faces_mt.py
@@ -0,0 +1,105 @@
+"""
+Reads in CSV of ROIs and extracts facial regions with padding
+"""
+
+import click
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+
+@click.command()
+@click.option('-i', '--input', 'opt_fp_in', required=True,
+ help='Input CSV')
+@click.option('-m', '--media', 'opt_dir_media', required=True,
+ help='Input image/video directory')
+@click.option('-o', '--output', 'opt_dir_out', required=True,
+ help='Output directory for extracted ROI images')
+@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None),
+ help='Slice list of files')
+@click.option('--padding', 'opt_padding', default=0.25,
+ help='Facial padding as percentage of face width')
+@click.option('--ext', 'opt_ext_out', default='png', type=click.Choice(['jpg', 'png']),
+ help='Output image type')
+@click.option('--min', 'opt_min', default=(60, 60),
+ help='Minimum original face size')
+@click.pass_context
+def cli(ctx, opt_fp_in, opt_dir_media, opt_dir_out, opt_slice,
+ opt_padding, opt_ext_out, opt_min):
+ """Converts ROIs to images"""
+
+ import os
+ from os.path import join
+ from pathlib import Path
+ from glob import glob
+
+ from tqdm import tqdm
+ import numpy as np
+ from PIL import Image, ImageOps, ImageFilter, ImageDraw
+ import cv2 as cv
+ import pandas as pd
+
+ from app.utils import logger_utils, file_utils, im_utils
+ from app.models.bbox import BBox
+
+ # -------------------------------------------------
+ # process here
+ log = logger_utils.Logger.getLogger()
+
+ df_rois = pd.read_csv(opt_fp_in, dtype={'subdir': str, 'fn': str})
+ if opt_slice:
+ df_rois = df_rois[opt_slice[0]:opt_slice[1]]
+
+ log.info('Processing {:,} rows'.format(len(df_rois)))
+
+ file_utils.mkdirs(opt_dir_out)
+
+ df_rois_grouped = df_rois.groupby(['fn']) # group by fn/filename
+ groups = df_rois_grouped.groups
+ skipped = []
+
+ for group in tqdm(groups):
+ # get image
+ group_rows = df_rois_grouped.get_group(group)
+
+ row = group_rows.iloc[0]
+ fp_im = join(opt_dir_media, str(row['subdir']), '{fn}.{ext}'.format(**row)) # TODO change to ext
+ try:
+ im = Image.open(fp_im).convert('RGB')
+ im.verify()
+ except Exception as e:
+ log.warn('Could not open: {}'.format(fp_im))
+ log.error(e)
+ continue
+
+ for idx, roi in group_rows.iterrows():
+ # get bbox to im dimensions
+ xywh = [roi['x'], roi['y'], roi['w'] , roi['h']]
+ bbox = BBox.from_xywh(*xywh)
+ dim = im.size
+ bbox_dim = bbox.to_dim(dim)
+ # expand
+ opt_padding_px = int(opt_padding * bbox_dim.width)
+ bbox_dim_exp = bbox_dim.expand_dim(opt_padding_px, dim)
+ # crop
+ x1y2 = bbox_dim_exp.pt_tl + bbox_dim_exp.pt_br
+ im_crop = im.crop(box=x1y2)
+
+ # strip exif, create new image and paste data
+ im_crop_data = list(im_crop.getdata())
+ im_crop_no_exif = Image.new(im_crop.mode, im_crop.size)
+ im_crop_no_exif.putdata(im_crop_data)
+
+ # save
+ idx_zpad = file_utils.zpad(idx, zeros=3)
+ subdir = '' if roi['subdir'] == '.' else '{}_'.format(roi['subdir'])
+ subdir = subdir.replace('/', '_')
+ fp_im_out = join(opt_dir_out, '{}{}{}.{}'.format(subdir, roi['fn'], idx_zpad, opt_ext_out))
+ # threshold size and save
+ if im_crop_no_exif.size[0] < opt_min[0] or im_crop_no_exif.size[1] < opt_min[1]:
+ skipped.append(fp_im_out)
+ log.info('Face too small: {}, idx: {}'.format(fp_im, idx))
+ else:
+ im_crop_no_exif.save(fp_im_out)
+
+ log.info('Skipped {:,} images'.format(len(skipped)))
diff --git a/megapixels/commands/processor/face_3ddfa.py b/megapixels/commands/processor/face_3ddfa.py
new file mode 100644
index 00000000..ffc74180
--- /dev/null
+++ b/megapixels/commands/processor/face_3ddfa.py
@@ -0,0 +1,331 @@
+import click
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+
+
+@click.command()
+@click.option('-i', '--input', 'opt_fp_in', default=None, required=True,
+ help='Image filepath')
+@click.option('-o', '--output', 'opt_fp_out', default=None,
+ help='GIF output path')
+@click.option('--size', 'opt_size',
+ type=(int, int), default=(300, 300),
+ help='Output image size')
+@click.option('-g', '--gpu', 'opt_gpu', default=0,
+ help='GPU index')
+@click.option('-f', '--force', 'opt_force', is_flag=True,
+ help='Force overwrite file')
+@click.option('--display/--no-display', 'opt_display', is_flag=True, default=False,
+ help='Display detections to debug')
+@click.pass_context
+def cli(ctx, opt_fp_in, opt_fp_out, opt_gpu, opt_size, opt_force, opt_display):
+ """Face detector demo"""
+
+ import sys
+ import os
+ from os.path import join
+ from pathlib import Path
+ import time
+
+ from tqdm import tqdm
+ import numpy as np
+ import pandas as pd
+ import cv2 as cv
+ import dlib
+
+ from app.utils import logger_utils, file_utils, im_utils, display_utils, draw_utils
+ from app.utils import plot_utils
+ from app.processors import face_detector, face_age
+ from app.models.data_store import DataStore
+
+ # 3DDFA
+ # git clone https://github.com/cleardusk/3DDFA/ 3rdparty/
+
+ import torch
+ import torchvision.transforms as transforms
+ import mobilenet_v1
+ from utils.ddfa import ToTensorGjz, NormalizeGjz, str2bool
+ import scipy.io as sio
+ from utils.inference import get_suffix, parse_roi_box_from_landmark, crop_img, predict_68pts, dump_to_ply, dump_vertex, \
+ draw_landmarks, predict_dense, parse_roi_box_from_bbox, get_colors, write_obj_with_colors
+ from utils.cv_plot import plot_pose_box
+ from utils.estimate_pose import parse_pose
+ from utils.render import get_depths_image, cget_depths_image, cpncc
+ from utils.paf import gen_img_paf
+ import argparse
+ import torch.backends.cudnn as cudnn
+
+
+ log = logger_utils.Logger.getLogger()
+
+
+ # -------------------------------------------------
+ # load image
+
+ im = cv.imread(opt_fp_in)
+ im_resized = im_utils.resize(im, width=opt_size[0], height=opt_size[1])
+
+ # ----------------------------------------------------------------------------
+ # detect face
+
+ face_detector = face_detector.DetectorDLIBCNN(gpu=opt_gpu) # -1 for CPU
+ bboxes = face_detector.detect(im_resized, largest=True)
+ bbox = bboxes[0]
+ dim = im_resized.shape[:2][::-1]
+ bbox_dim = bbox.to_dim(dim)
+ if not bbox:
+ log.error('no face detected')
+ return
+ else:
+ log.info(f'face detected: {bbox_dim.to_xyxy()}')
+
+
+ # ----------------------------------------------------------------------------
+ # age
+
+ age_apparent_predictor = face_age.FaceAgeApparent()
+ age_real_predictor = face_age.FaceAgeReal()
+
+ st = time.time()
+ age_real = age_real_predictor.age(im_resized, bbox_dim)
+ log.info(f'age real took: {(time.time()-st)/1000:.5f}s')
+ st = time.time()
+ age_apparent = age_apparent_predictor.age(im_resized, bbox_dim)
+ log.info(f'age apparent took: {(time.time()-st)/1000:.5f}s')
+
+
+ # ----------------------------------------------------------------------------
+ # output
+
+ log.info(f'Face coords: {bbox_dim} face')
+ log.info(f'Age (real): {(age_real):.2f}')
+ log.info(f'Age (apparent): {(age_apparent):.2f}')
+
+
+ # ----------------------------------------------------------------------------
+ # draw
+
+ # draw real age
+ im_age_real = im_resized.copy()
+ draw_utils.draw_bbox(im_age_real, bbox_dim)
+ txt = f'{(age_real):.2f}'
+ draw_utils.draw_text(im_age_real, bbox_dim.pt_tl, txt)
+
+ # apparent
+ im_age_apparent = im_resized.copy()
+ draw_utils.draw_bbox(im_age_apparent, bbox_dim)
+ txt = f'{(age_apparent):.2f}'
+ draw_utils.draw_text(im_age_apparent, bbox_dim.pt_tl, txt)
+
+
+ # ----------------------------------------------------------------------------
+ # save
+
+ if opt_fp_out:
+ # save pose only
+ fpp_out = Path(opt_fp_out)
+
+ fp_out = join(fpp_out.parent, f'{fpp_out.stem}_real{fpp_out.suffix}')
+ cv.imwrite(fp_out, im_age_real)
+
+ fp_out = join(fpp_out.parent, f'{fpp_out.stem}_apparent{fpp_out.suffix}')
+ cv.imwrite(fp_out, im_age_apparent)
+
+
+ # ----------------------------------------------------------------------------
+ # display
+
+ if opt_display:
+ # show all images here
+ cv.imshow('real', im_age_real)
+ cv.imshow('apparent', im_age_apparent)
+ display_utils.handle_keyboard()
+
+
+
+
+
+STD_SIZE = 120
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser(description='3DDFA inference pipeline')
+ parser.add_argument('-f', '--files', nargs='+',
+ help='image files paths fed into network, single or multiple images')
+ parser.add_argument('-m', '--mode', default='cpu', type=str, help='gpu or cpu mode')
+ parser.add_argument('--show_flg', default='true', type=str2bool, help='whether show the visualization result')
+ parser.add_argument('--bbox_init', default='one', type=str,
+ help='one|two: one-step bbox initialization or two-step')
+ parser.add_argument('--dump_res', default='true', type=str2bool, help='whether write out the visualization image')
+ parser.add_argument('--dump_vertex', default='true', type=str2bool,
+ help='whether write out the dense face vertices to mat')
+ parser.add_argument('--dump_ply', default='true', type=str2bool)
+ parser.add_argument('--dump_pts', default='true', type=str2bool)
+ parser.add_argument('--dump_roi_box', default='true', type=str2bool)
+ parser.add_argument('--dump_pose', default='true', type=str2bool)
+ parser.add_argument('--dump_depth', default='true', type=str2bool)
+ parser.add_argument('--dump_pncc', default='true', type=str2bool)
+ parser.add_argument('--dump_paf', default='true', type=str2bool)
+ parser.add_argument('--paf_size', default=3, type=int, help='PAF feature kernel size')
+ parser.add_argument('--dump_obj', default='true', type=str2bool)
+ parser.add_argument('--dlib_bbox', default='true', type=str2bool, help='whether use dlib to predict bbox')
+ parser.add_argument('--dlib_landmark', default='true', type=str2bool,
+ help='whether use dlib landmark to crop image')
+
+ args = parser.parse_args()
+ main(args)
+
+
+
+def main(args):
+ # 1. load pre-tained model
+ checkpoint_fp = 'models/phase1_wpdc_vdc_v2.pth.tar'
+ arch = 'mobilenet_1'
+
+ checkpoint = torch.load(checkpoint_fp, map_location=lambda storage, loc: storage)['state_dict']
+ model = getattr(mobilenet_v1, arch)(num_classes=62) # 62 = 12(pose) + 40(shape) +10(expression)
+ model_dict = model.state_dict()
+ # because the model is trained by multiple gpus, prefix module should be removed
+ for k in checkpoint.keys():
+ model_dict[k.replace('module.', '')] = checkpoint[k]
+ model.load_state_dict(model_dict, strict=False)
+ if args.mode == 'gpu':
+ cudnn.benchmark = True
+ model = model.cuda()
+ model.eval()
+
+ # 2. load dlib model for face detection and landmark used for face cropping
+ if args.dlib_landmark:
+ dlib_landmark_model = 'models/shape_predictor_68_face_landmarks.dat'
+ face_regressor = dlib.shape_predictor(dlib_landmark_model)
+ if args.dlib_bbox:
+ face_detector = dlib.get_frontal_face_detector()
+
+ # 3. forward
+ tri = sio.loadmat('visualize/tri.mat')['tri']
+ transform = transforms.Compose([ToTensorGjz(), NormalizeGjz(mean=127.5, std=128)])
+ for img_fp in args.files:
+ img_ori = cv2.imread(img_fp)
+ if args.dlib_bbox:
+ rects = face_detector(img_ori, 1)
+ else:
+ rects = []
+
+ if len(rects) == 0:
+ rects = dlib.rectangles()
+ rect_fp = img_fp + '.bbox'
+ lines = open(rect_fp).read().strip().split('\n')[1:]
+ for l in lines:
+ l, r, t, b = [int(_) for _ in l.split(' ')[1:]]
+ rect = dlib.rectangle(l, r, t, b)
+ rects.append(rect)
+
+ pts_res = []
+ Ps = [] # Camera matrix collection
+ poses = [] # pose collection, [todo: validate it]
+ vertices_lst = [] # store multiple face vertices
+ ind = 0
+ suffix = get_suffix(img_fp)
+ for rect in rects:
+ # whether use dlib landmark to crop image, if not, use only face bbox to calc roi bbox for cropping
+ if args.dlib_landmark:
+ # - use landmark for cropping
+ pts = face_regressor(img_ori, rect).parts()
+ pts = np.array([[pt.x, pt.y] for pt in pts]).T
+ roi_box = parse_roi_box_from_landmark(pts)
+ else:
+ # - use detected face bbox
+ bbox = [rect.left(), rect.top(), rect.right(), rect.bottom()]
+ roi_box = parse_roi_box_from_bbox(bbox)
+
+ img = crop_img(img_ori, roi_box)
+
+ # forward: one step
+ img = cv2.resize(img, dsize=(STD_SIZE, STD_SIZE), interpolation=cv2.INTER_LINEAR)
+ input = transform(img).unsqueeze(0)
+ with torch.no_grad():
+ if args.mode == 'gpu':
+ input = input.cuda()
+ param = model(input)
+ param = param.squeeze().cpu().numpy().flatten().astype(np.float32)
+
+ # 68 pts
+ pts68 = predict_68pts(param, roi_box)
+
+ # two-step for more accurate bbox to crop face
+ if args.bbox_init == 'two':
+ roi_box = parse_roi_box_from_landmark(pts68)
+ img_step2 = crop_img(img_ori, roi_box)
+ img_step2 = cv2.resize(img_step2, dsize=(STD_SIZE, STD_SIZE), interpolation=cv2.INTER_LINEAR)
+ input = transform(img_step2).unsqueeze(0)
+ with torch.no_grad():
+ if args.mode == 'gpu':
+ input = input.cuda()
+ param = model(input)
+ param = param.squeeze().cpu().numpy().flatten().astype(np.float32)
+
+ pts68 = predict_68pts(param, roi_box)
+
+ pts_res.append(pts68)
+ P, pose = parse_pose(param)
+ Ps.append(P)
+ poses.append(pose)
+
+ # dense face 3d vertices
+ if args.dump_ply or args.dump_vertex or args.dump_depth or args.dump_pncc or args.dump_obj:
+ vertices = predict_dense(param, roi_box)
+ vertices_lst.append(vertices)
+ if args.dump_ply:
+ dump_to_ply(vertices, tri, '{}_{}.ply'.format(img_fp.replace(suffix, ''), ind))
+ if args.dump_vertex:
+ dump_vertex(vertices, '{}_{}.mat'.format(img_fp.replace(suffix, ''), ind))
+
+ # save .mat for 3d Face
+ wfp = '{}_{}_face3d.mat'.format(img_fp.replace(suffix, ''), ind)
+ colors = get_colors(img_ori, vertices)
+ sio.savemat(wfp, {'vertices': vertices, 'colors': colors, 'triangles': tri})
+
+ if args.dump_pts:
+ wfp = '{}_{}.txt'.format(img_fp.replace(suffix, ''), ind)
+ np.savetxt(wfp, pts68, fmt='%.3f')
+ print('Save 68 3d landmarks to {}'.format(wfp))
+ if args.dump_roi_box:
+ wfp = '{}_{}.roibox'.format(img_fp.replace(suffix, ''), ind)
+ np.savetxt(wfp, roi_box, fmt='%.3f')
+ print('Save roi box to {}'.format(wfp))
+ if args.dump_paf:
+ wfp_paf = '{}_{}_paf.jpg'.format(img_fp.replace(suffix, ''), ind)
+ wfp_crop = '{}_{}_crop.jpg'.format(img_fp.replace(suffix, ''), ind)
+ paf_feature = gen_img_paf(img_crop=img, param=param, kernel_size=args.paf_size)
+
+ cv2.imwrite(wfp_paf, paf_feature)
+ cv2.imwrite(wfp_crop, img)
+ print('Dump to {} and {}'.format(wfp_crop, wfp_paf))
+ if args.dump_obj:
+ wfp = '{}_{}.obj'.format(img_fp.replace(suffix, ''), ind)
+ colors = get_colors(img_ori, vertices)
+ write_obj_with_colors(wfp, vertices, tri, colors)
+ print('Dump obj with sampled texture to {}'.format(wfp))
+ ind += 1
+
+ if args.dump_pose:
+ # P, pose = parse_pose(param) # Camera matrix (without scale), and pose (yaw, pitch, roll, to verify)
+ img_pose = plot_pose_box(img_ori, Ps, pts_res)
+ wfp = img_fp.replace(suffix, '_pose.jpg')
+ cv2.imwrite(wfp, img_pose)
+ print('Dump to {}'.format(wfp))
+ if args.dump_depth:
+ wfp = img_fp.replace(suffix, '_depth.png')
+ # depths_img = get_depths_image(img_ori, vertices_lst, tri-1) # python version
+ depths_img = cget_depths_image(img_ori, vertices_lst, tri - 1) # cython version
+ cv2.imwrite(wfp, depths_img)
+ print('Dump to {}'.format(wfp))
+ if args.dump_pncc:
+ wfp = img_fp.replace(suffix, '_pncc.png')
+ pncc_feature = cpncc(img_ori, vertices_lst, tri - 1) # cython version
+ cv2.imwrite(wfp, pncc_feature[:, :, ::-1]) # cv2.imwrite will swap RGB -> BGR
+ print('Dump to {}'.format(wfp))
+ if args.dump_res:
+ draw_landmarks(img_ori, pts_res, wfp=img_fp.replace(suffix, '_3DDFA.jpg'), show_flg=args.show_flg)
diff --git a/megapixels/commands/processor/face_attributes.py b/megapixels/commands/processor/face_attributes.py
new file mode 100644
index 00000000..01fe3bd1
--- /dev/null
+++ b/megapixels/commands/processor/face_attributes.py
@@ -0,0 +1,136 @@
+"""
+
+"""
+
+import click
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+
+
+@click.command()
+@click.option('-i', '--input', 'opt_fp_in', default=None,
+ help='Override enum input filename CSV')
+@click.option('-o', '--output', 'opt_fp_out', default=None,
+ help='Override enum output filename CSV')
+@click.option('-m', '--media', 'opt_dir_media', default=None,
+ help='Override enum media directory')
+@click.option('--store', 'opt_data_store',
+ type=cfg.DataStoreVar,
+ default=click_utils.get_default(types.DataStore.HDD),
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--dataset', 'opt_dataset',
+ type=cfg.DatasetVar,
+ required=True,
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--size', 'opt_size',
+ type=(int, int), default=cfg.DEFAULT_SIZE_FACE_DETECT,
+ help='Processing size for detection')
+@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None),
+ help='Slice list of files')
+@click.option('-f', '--force', 'opt_force', is_flag=True,
+ help='Force overwrite file')
+@click.option('-d', '--display', 'opt_display', is_flag=True,
+ help='Display image for debugging')
+@click.pass_context
+def cli(ctx, opt_fp_in, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset,
+ opt_size, opt_slice, opt_force, opt_display):
+ """Creates 2D 68-point landmarks"""
+
+ import sys
+ import os
+ from os.path import join
+ from pathlib import Path
+ from glob import glob
+
+ from tqdm import tqdm
+ import numpy as np
+ import cv2 as cv
+ import pandas as pd
+
+ from app.utils import logger_utils, file_utils, im_utils, display_utils, draw_utils
+ from app.processors import face_age_gender
+ from app.models.data_store import DataStore
+ from app.models.bbox import BBox
+
+ # -------------------------------------------------------------------------
+ # init here
+
+ log = logger_utils.Logger.getLogger()
+ # init face processors
+ age_estimator_apnt = face_age_gender.FaceAgeApparent()
+ age_estimator_real = face_age_gender.FaceAgeReal()
+ gender_estimator = face_age_gender.FaceGender()
+
+ # init filepaths
+ data_store = DataStore(opt_data_store, opt_dataset)
+ # set file output path
+ metadata_type = types.Metadata.FACE_ATTRIBUTES
+ fp_out = data_store.metadata(metadata_type) if opt_fp_out is None else opt_fp_out
+ if not opt_force and Path(fp_out).exists():
+ log.error('File exists. Use "-f / --force" to overwite')
+ return
+
+ # -------------------------------------------------------------------------
+ # load filepath data
+ fp_record = data_store.metadata(types.Metadata.FILE_RECORD)
+ df_record = pd.read_csv(fp_record, dtype=cfg.FILE_RECORD_DTYPES).set_index('index')
+ # load ROI data
+ fp_roi = data_store.metadata(types.Metadata.FACE_ROI)
+ df_roi = pd.read_csv(fp_roi).set_index('index')
+ # slice if you want
+ if opt_slice:
+ df_roi = df_roi[opt_slice[0]:opt_slice[1]]
+ # group by image index (speedup if multiple faces per image)
+ df_img_groups = df_roi.groupby('record_index')
+ log.debug('processing {:,} groups'.format(len(df_img_groups)))
+
+ # store landmarks in list
+ results = []
+
+ # -------------------------------------------------------------------------
+ # iterate groups with file/record index as key
+
+ for record_index, df_img_group in tqdm(df_img_groups):
+
+ # access file_record DataSeries
+ file_record = df_record.iloc[record_index]
+
+ # load image
+ fp_im = data_store.face(file_record.subdir, file_record.fn, file_record.ext)
+ im = cv.imread(fp_im)
+ im_resized = im_utils.resize(im, width=opt_size[0], height=opt_size[1])
+ dim = im_resized.shape[:2][::-1]
+
+ # iterate ROIs in this image
+ for roi_index, df_img in df_img_group.iterrows():
+
+ # find landmarks
+ bbox_norm = BBox.from_xywh(df_img.x, df_img.y, df_img.w, df_img.h)
+ bbox_dim = bbox_norm.to_dim(dim)
+
+ age_apnt = age_estimator_apnt.predict(im_resized, bbox_norm)
+ age_real = age_estimator_real.predict(im_resized, bbox_norm)
+ gender = gender_estimator.predict(im_resized, bbox_norm)
+
+ attr_obj = {
+ 'age_real':float(f'{age_real:.2f}'),
+ 'age_apparent': float(f'{age_apnt:.2f}'),
+ 'm': float(f'{gender["m"]:.4f}'),
+ 'f': float(f'{gender["f"]:.4f}'),
+ 'roi_index': roi_index
+ }
+ results.append(attr_obj)
+
+
+ # create DataFrame and save to CSV
+ file_utils.mkdirs(fp_out)
+ df = pd.DataFrame.from_dict(results)
+ df.index.name = 'index'
+ df.to_csv(fp_out)
+
+ # save script
+ file_utils.write_text(' '.join(sys.argv), '{}.sh'.format(fp_out)) \ No newline at end of file
diff --git a/megapixels/commands/processor/face_frames.py b/megapixels/commands/processor/face_frames.py
new file mode 100644
index 00000000..76f23af1
--- /dev/null
+++ b/megapixels/commands/processor/face_frames.py
@@ -0,0 +1,82 @@
+from glob import glob
+import os
+from os.path import join
+from pathlib import Path
+
+import click
+
+
+
+
+@click.command()
+@click.option('-i', '--input', 'opt_fp_in', required=True,
+ help='Input directory to glob')
+@click.option('-o', '--output', 'opt_fp_out', required=True,
+ help='Output directory for face frames')
+@click.option('--size', 'opt_size',
+ type=(int, int), default=(300, 300),
+ help='Output image size')
+@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None),
+ help='Slice list of files')
+@click.pass_context
+def cli(ctx, opt_fp_in, opt_fp_out, opt_size, opt_slice):
+ """Split video to face frames"""
+
+ from tqdm import tqdm
+ import dlib
+ import pandas as pd
+ from PIL import Image, ImageOps, ImageFilter
+ import cv2 as cv
+ import numpy as np
+
+ from app.processors import face_detector
+ from app.utils import logger_utils, file_utils, im_utils
+ from app.settings import types
+ from app.utils import click_utils
+ from app.settings import app_cfg as cfg
+ from app.models.bbox import BBox
+
+ log = logger_utils.Logger.getLogger()
+
+ # -------------------------------------------------
+ # process
+
+ detector = face_detector.DetectorDLIBCNN()
+
+ # get file list
+ fp_videos = glob(join(opt_fp_in, '*.mp4'))
+ fp_videos += glob(join(opt_fp_in, '*.webm'))
+ fp_videos += glob(join(opt_fp_in, '*.mkv'))
+
+ min_distance_per = .025 # minimum distance percentage to save new face image
+ face_interval = 5
+ frame_interval_count = 0
+ frame_count = 0
+ bbox_prev = BBox(0,0,0,0)
+ file_utils.mkdirs(opt_fp_out)
+ dnn_size = opt_size
+ max_dim = max(dnn_size)
+ px_thresh = int(max_dim * min_distance_per)
+
+ for fp_video in tqdm(fp_videos):
+ # load video
+ video = cv.VideoCapture(fp_video)
+ # iterate through frames
+ while video.isOpened():
+ res, frame = video.read()
+ if not res:
+ break
+ # increment frames, save frame if interval has passed
+ frame_count += 1 # for naming
+ frame_interval_count += 1 # for interval
+ bboxes = detector.detect(frame, opt_size=dnn_size, opt_pyramids=0)
+ if len(bboxes) > 0 and frame_interval_count >= face_interval:
+ dim = frame.shape[:2][::-1]
+ d = bboxes[0].to_dim(dim).distance(bbox_prev)
+ if d > px_thresh:
+ # save frame
+ zfc = file_utils.zpad(frame_count)
+ fp_frame = join(opt_fp_out, '{}_{}.jpg'.format(Path(fp_video).stem, zfc))
+ cv.imwrite(fp_frame, frame)
+ frame_interval_count = 0
+ bbox_prev = bboxes[0]
diff --git a/megapixels/commands/processor/face_landmark_2d_5.py b/megapixels/commands/processor/face_landmark_2d_5.py
new file mode 100644
index 00000000..40ec6f41
--- /dev/null
+++ b/megapixels/commands/processor/face_landmark_2d_5.py
@@ -0,0 +1,146 @@
+"""
+
+"""
+
+import click
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+
+color_filters = {'color': 1, 'gray': 2, 'all': 3}
+
+@click.command()
+@click.option('-i', '--input', 'opt_fp_in', default=None,
+ help='Override enum input filename CSV')
+@click.option('-o', '--output', 'opt_fp_out', default=None,
+ help='Override enum output filename CSV')
+@click.option('-m', '--media', 'opt_dir_media', default=None,
+ help='Override enum media directory')
+@click.option('--store', 'opt_data_store',
+ type=cfg.DataStoreVar,
+ default=click_utils.get_default(types.DataStore.HDD),
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--dataset', 'opt_dataset',
+ type=cfg.DatasetVar,
+ required=True,
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('-d', '--detector', 'opt_detector_type',
+ type=cfg.FaceLandmark2D_5Var,
+ default=click_utils.get_default(types.FaceLandmark2D_5.DLIB),
+ help=click_utils.show_help(types.FaceLandmark2D_5))
+@click.option('--size', 'opt_size',
+ type=(int, int), default=(300, 300),
+ help='Output image size')
+@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None),
+ help='Slice list of files')
+@click.option('-f', '--force', 'opt_force', is_flag=True,
+ help='Force overwrite file')
+@click.option('-d', '--display', 'opt_display', is_flag=True,
+ help='Display image for debugging')
+@click.pass_context
+def cli(ctx, opt_fp_in, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset, opt_detector_type,
+ opt_size, opt_slice, opt_force, opt_display):
+ """Creates 2D 5-point landmarks"""
+
+ import sys
+ import os
+ from os.path import join
+ from pathlib import Path
+ from glob import glob
+
+ from tqdm import tqdm
+ import numpy as np
+ import cv2 as cv
+ import pandas as pd
+
+ from app.utils import logger_utils, file_utils, im_utils, display_utils, draw_utils
+ from app.processors import face_landmarks
+ from app.models.data_store import DataStore
+ from app.models.bbox import BBox
+
+ # -------------------------------------------------
+ # init here
+
+ log = logger_utils.Logger.getLogger()
+ # init filepaths
+ data_store = DataStore(opt_data_store, opt_dataset)
+ # set file output path
+ metadata_type = types.Metadata.FACE_LANDMARK_2D_5
+ fp_out = data_store.metadata(metadata_type) if opt_fp_out is None else opt_fp_out
+ if not opt_force and Path(fp_out).exists():
+ log.error('File exists. Use "-f / --force" to overwite')
+ return
+
+ # init face landmark processors
+ if opt_detector_type == types.FaceLandmark2D_5.DLIB:
+ # use dlib 68 point detector
+ landmark_detector = face_landmarks.Dlib2D_5()
+ elif opt_detector_type == types.FaceLandmark2D_5.MTCNN:
+ # use dlib 5 point detector
+ landmark_detector = face_landmarks.MTCNN2D_5()
+ else:
+ log.error('{} not yet implemented'.format(opt_detector_type.name))
+ return
+
+ log.info(f'Using landmark detector: {opt_detector_type.name}')
+
+ # load filepath data
+ fp_record = data_store.metadata(types.Metadata.FILE_RECORD)
+ df_record = pd.read_csv(fp_record).set_index('index')
+ # load ROI data
+ fp_roi = data_store.metadata(types.Metadata.FACE_ROI)
+ df_roi = pd.read_csv(fp_roi).set_index('index')
+ # slice if you want
+ if opt_slice:
+ df_roi = df_roi[opt_slice[0]:opt_slice[1]]
+ # group by image index (speedup if multiple faces per image)
+ df_img_groups = df_roi.groupby('record_index')
+ log.debug('processing {:,} groups'.format(len(df_img_groups)))
+
+ # store landmarks in list
+ results = []
+
+ # iterate groups with file/record index as key
+ for record_index, df_img_group in tqdm(df_img_groups):
+
+ # acces file record
+ ds_record = df_record.iloc[record_index]
+
+ # load image
+ fp_im = data_store.face(ds_record.subdir, ds_record.fn, ds_record.ext)
+ im = cv.imread(fp_im)
+ im_resized = im_utils.resize(im, width=opt_size[0], height=opt_size[1])
+
+ # iterate image group dataframe with roi index as key
+ for roi_index, df_img in df_img_group.iterrows():
+
+ # get bbox
+ x, y, w, h = df_img.x, df_img.y, df_img.w, df_img.h
+ dim = im_resized.shape[:2][::-1]
+ bbox = BBox.from_xywh(x, y, w, h).to_dim(dim)
+
+ # get landmark points
+ points = landmark_detector.landmarks(im_resized, bbox)
+ points_norm = landmark_detector.normalize(points, dim)
+ points_flat = landmark_detector.flatten(points_norm)
+
+ # display to screen if optioned
+ if opt_display:
+ draw_utils.draw_landmarks2D(im_resized, points)
+ draw_utils.draw_bbox(im_resized, bbox)
+ cv.imshow('', im_resized)
+ display_utils.handle_keyboard()
+
+ results.append(points_flat)
+
+ # create DataFrame and save to CSV
+ file_utils.mkdirs(fp_out)
+ df = pd.DataFrame.from_dict(results)
+ df.index.name = 'index'
+ df.to_csv(fp_out)
+
+ # save script
+ file_utils.write_text(' '.join(sys.argv), '{}.sh'.format(fp_out)) \ No newline at end of file
diff --git a/megapixels/commands/processor/face_landmark_2d_68.py b/megapixels/commands/processor/face_landmark_2d_68.py
new file mode 100644
index 00000000..c6978a40
--- /dev/null
+++ b/megapixels/commands/processor/face_landmark_2d_68.py
@@ -0,0 +1,150 @@
+"""
+
+"""
+
+import click
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+
+
+@click.command()
+@click.option('-i', '--input', 'opt_fp_in', default=None,
+ help='Override enum input filename CSV')
+@click.option('-o', '--output', 'opt_fp_out', default=None,
+ help='Override enum output filename CSV')
+@click.option('-m', '--media', 'opt_dir_media', default=None,
+ help='Override enum media directory')
+@click.option('--store', 'opt_data_store',
+ type=cfg.DataStoreVar,
+ default=click_utils.get_default(types.DataStore.HDD),
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--dataset', 'opt_dataset',
+ type=cfg.DatasetVar,
+ required=True,
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('-d', '--detector', 'opt_detector_type',
+ type=cfg.FaceLandmark2D_68Var,
+ default=click_utils.get_default(types.FaceLandmark2D_68.DLIB),
+ help=click_utils.show_help(types.FaceLandmark2D_68))
+@click.option('--size', 'opt_size',
+ type=(int, int), default=(300, 300),
+ help='Output image size')
+@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None),
+ help='Slice list of files')
+@click.option('-f', '--force', 'opt_force', is_flag=True,
+ help='Force overwrite file')
+@click.option('-d', '--display', 'opt_display', is_flag=True,
+ help='Display image for debugging')
+@click.pass_context
+def cli(ctx, opt_fp_in, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset, opt_detector_type,
+ opt_size, opt_slice, opt_force, opt_display):
+ """Creates 2D 68-point landmarks"""
+
+ import sys
+ import os
+ from os.path import join
+ from pathlib import Path
+ from glob import glob
+
+ from tqdm import tqdm
+ import numpy as np
+ import cv2 as cv
+ import pandas as pd
+
+ from app.utils import logger_utils, file_utils, im_utils, display_utils, draw_utils
+ from app.processors import face_landmarks
+ from app.models.data_store import DataStore
+ from app.models.bbox import BBox
+
+ # -------------------------------------------------------------------------
+ # init here
+
+ log = logger_utils.Logger.getLogger()
+ # init filepaths
+ data_store = DataStore(opt_data_store, opt_dataset)
+ # set file output path
+ metadata_type = types.Metadata.FACE_LANDMARK_2D_68
+ fp_out = data_store.metadata(metadata_type) if opt_fp_out is None else opt_fp_out
+ if not opt_force and Path(fp_out).exists():
+ log.error('File exists. Use "-f / --force" to overwite')
+ return
+
+ # init face landmark processors
+ if opt_detector_type == types.FaceLandmark2D_68.DLIB:
+ # use dlib 68 point detector
+ landmark_detector = face_landmarks.Dlib2D_68()
+ elif opt_detector_type == types.FaceLandmark2D_68.FACE_ALIGNMENT:
+ # use dlib 5 point detector
+ landmark_detector = face_landmarks.FaceAlignment2D_68()
+ else:
+ log.error('{} not yet implemented'.format(opt_detector_type.name))
+ return
+
+ log.info(f'Using landmark detector: {opt_detector_type.name}')
+
+ # -------------------------------------------------------------------------
+ # load filepath data
+ fp_record = data_store.metadata(types.Metadata.FILE_RECORD)
+ df_record = pd.read_csv(fp_record).set_index('index')
+ # load ROI data
+ fp_roi = data_store.metadata(types.Metadata.FACE_ROI)
+ df_roi = pd.read_csv(fp_roi).set_index('index')
+ # slice if you want
+ if opt_slice:
+ df_roi = df_roi[opt_slice[0]:opt_slice[1]]
+ # group by image index (speedup if multiple faces per image)
+ df_img_groups = df_roi.groupby('record_index')
+ log.debug('processing {:,} groups'.format(len(df_img_groups)))
+
+ # store landmarks in list
+ results = []
+
+ # -------------------------------------------------------------------------
+ # iterate groups with file/record index as key
+
+ for record_index, df_img_group in tqdm(df_img_groups):
+
+ # access file_record DataSeries
+ file_record = df_record.iloc[record_index]
+
+ # load image
+ fp_im = data_store.face(file_record.subdir, file_record.fn, file_record.ext)
+ im = cv.imread(fp_im)
+ im_resized = im_utils.resize(im, width=opt_size[0], height=opt_size[1])
+ dim = im_resized.shape[:2][::-1]
+
+ # iterate ROIs in this image
+ for roi_index, df_img in df_img_group.iterrows():
+
+ # find landmarks
+ x, y, w, h = df_img.x, df_img.y, df_img.w, df_img.h # normalized values
+ #dim = (file_record.width, file_record.height) # original w,h
+ bbox = BBox.from_xywh(x, y, w, h).to_dim(dim)
+ points = landmark_detector.landmarks(im_resized, bbox)
+ points_norm = landmark_detector.normalize(points, dim)
+ points_str = landmark_detector.to_str(points_norm)
+
+ # display if optioned
+ if opt_display:
+ dst = im_resized.copy()
+ draw_utils.draw_landmarks2D(dst, points)
+ draw_utils.draw_bbox(dst, bbox)
+ cv.imshow('', dst)
+ display_utils.handle_keyboard()
+
+ # add to results for CSV
+ results.append({'vec': points_str, 'roi_index':roi_index})
+
+
+ # create DataFrame and save to CSV
+ file_utils.mkdirs(fp_out)
+ df = pd.DataFrame.from_dict(results)
+ df.index.name = 'index'
+ df.to_csv(fp_out)
+
+ # save script
+ file_utils.write_text(' '.join(sys.argv), '{}.sh'.format(fp_out)) \ No newline at end of file
diff --git a/megapixels/commands/processor/face_landmark_3d_68.py b/megapixels/commands/processor/face_landmark_3d_68.py
new file mode 100644
index 00000000..a2d14d72
--- /dev/null
+++ b/megapixels/commands/processor/face_landmark_3d_68.py
@@ -0,0 +1,147 @@
+"""
+
+"""
+
+import click
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+
+color_filters = {'color': 1, 'gray': 2, 'all': 3}
+
+@click.command()
+@click.option('-i', '--input', 'opt_fp_in', default=None,
+ help='Override enum input filename CSV')
+@click.option('-o', '--output', 'opt_fp_out', default=None,
+ help='Override enum output filename CSV')
+@click.option('-m', '--media', 'opt_dir_media', default=None,
+ help='Override enum media directory')
+@click.option('--store', 'opt_data_store',
+ type=cfg.DataStoreVar,
+ default=click_utils.get_default(types.DataStore.HDD),
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--dataset', 'opt_dataset',
+ type=cfg.DatasetVar,
+ required=True,
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('-d', '--detector', 'opt_detector_type',
+ type=cfg.FaceLandmark3D_68Var,
+ default=click_utils.get_default(types.FaceLandmark3D_68.FACE_ALIGNMENT),
+ help=click_utils.show_help(types.FaceLandmark3D_68))
+@click.option('--size', 'opt_size',
+ type=(int, int), default=(300, 300),
+ help='Output image size')
+@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None),
+ help='Slice list of files')
+@click.option('-f', '--force', 'opt_force', is_flag=True,
+ help='Force overwrite file')
+@click.option('-d', '--display', 'opt_display', is_flag=True,
+ help='Display image for debugging')
+@click.pass_context
+def cli(ctx, opt_fp_in, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset, opt_detector_type,
+ opt_size, opt_slice, opt_force, opt_display):
+ """Generate 3D 68-point landmarks"""
+
+ import sys
+ import os
+ from os.path import join
+ from pathlib import Path
+ from glob import glob
+
+ from tqdm import tqdm
+ import numpy as np
+ import cv2 as cv
+ import pandas as pd
+
+ from app.utils import logger_utils, file_utils, im_utils, display_utils, draw_utils
+ from app.utils import plot_utils
+ from app.processors import face_landmarks
+ from app.models.data_store import DataStore
+ from app.models.bbox import BBox
+
+ # --------------------------------------------------------------------------
+ # init here
+
+ log = logger_utils.Logger.getLogger()
+ log.warn('not normalizing points')
+ # init filepaths
+ data_store = DataStore(opt_data_store, opt_dataset)
+ # set file output path
+ metadata_type = types.Metadata.FACE_LANDMARK_3D_68
+ fp_out = data_store.metadata(metadata_type) if opt_fp_out is None else opt_fp_out
+ if not opt_force and Path(fp_out).exists():
+ log.error('File exists. Use "-f / --force" to overwite')
+ return
+
+ # init face landmark processors
+ if opt_detector_type == types.FaceLandmark3D_68.FACE_ALIGNMENT:
+ # use FaceAlignment 68 point 3D detector
+ landmark_detector = face_landmarks.FaceAlignment3D_68()
+ else:
+ log.error('{} not yet implemented'.format(opt_detector_type.name))
+ return
+
+ log.info(f'Using landmark detector: {opt_detector_type.name}')
+
+ # -------------------------------------------------------------------------
+ # load data
+
+ fp_record = data_store.metadata(types.Metadata.FILE_RECORD) # file_record.csv
+ df_record = pd.read_csv(fp_record).set_index('index')
+ fp_roi = data_store.metadata(types.Metadata.FACE_ROI) # face_roi.csv
+ df_roi = pd.read_csv(fp_roi).set_index('index')
+ if opt_slice:
+ df_roi = df_roi[opt_slice[0]:opt_slice[1]] # slice if you want
+ df_img_groups = df_roi.groupby('record_index') # groups by image index (load once)
+ log.debug('processing {:,} groups'.format(len(df_img_groups)))
+
+ # store landmarks in list
+ results = []
+
+ # iterate groups with file/record index as key
+ for record_index, df_img_group in tqdm(df_img_groups):
+
+ # acces file record
+ ds_record = df_record.iloc[record_index]
+
+ # load image
+ fp_im = data_store.face(ds_record.subdir, ds_record.fn, ds_record.ext)
+ im = cv.imread(fp_im)
+ im_resized = im_utils.resize(im, width=opt_size[0], height=opt_size[1])
+
+ # iterate image group dataframe with roi index as key
+ for roi_index, df_img in df_img_group.iterrows():
+
+ # get bbox
+ x, y, w, h = df_img.x, df_img.y, df_img.w, df_img.h
+ dim = im_resized.shape[:2][::-1]
+ bbox = BBox.from_xywh(x, y, w, h).to_dim(dim)
+
+ # get landmark points
+ points = landmark_detector.landmarks(im_resized, bbox)
+ # NB can't really normalize these points, but are normalized against 3D space
+ #points_norm = landmark_detector.normalize(points, dim) # normalized using 200
+ points_flattenend = landmark_detector.flatten(points)
+
+ # display to screen if optioned
+ if opt_display:
+ draw_utils.draw_landmarks3D(im_resized, points)
+ draw_utils.draw_bbox(im_resized, bbox)
+ cv.imshow('', im_resized)
+ display_utils.handle_keyboard()
+
+ #plot_utils.generate_3d_landmark_anim(points, '/home/adam/Downloads/3d.gif')
+
+ results.append(points_flattenend)
+
+ # create DataFrame and save to CSV
+ file_utils.mkdirs(fp_out)
+ df = pd.DataFrame.from_dict(results)
+ df.index.name = 'index'
+ df.to_csv(fp_out)
+
+ # save script
+ file_utils.write_text(' '.join(sys.argv), '{}.sh'.format(fp_out)) \ No newline at end of file
diff --git a/megapixels/commands/processor/face_pose.py b/megapixels/commands/processor/face_pose.py
new file mode 100644
index 00000000..cb7ec56c
--- /dev/null
+++ b/megapixels/commands/processor/face_pose.py
@@ -0,0 +1,164 @@
+"""
+NB: This only works with the DLIB 68-point landmarks.
+
+Converts ROIs to pose: yaw, roll, pitch
+pitch: looking down or up in yes gesture
+roll: tilting head towards shoulder
+yaw: twisting head left to right in no gesture
+
+"""
+
+"""
+TODO
+- check compatibility with MTCNN 68 point detector
+- improve accuracy by using MTCNN 5-point
+- refer to https://github.com/jerryhouuu/Face-Yaw-Roll-Pitch-from-Pose-Estimation-using-OpenCV/
+"""
+
+import click
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+
+@click.command()
+@click.option('-i', '--input', 'opt_fp_in', default=None,
+ help='Override enum input filename CSV')
+@click.option('-o', '--output', 'opt_fp_out', default=None,
+ help='Override enum output filename CSV')
+@click.option('-m', '--media', 'opt_dir_media', default=None,
+ help='Override enum media directory')
+@click.option('--store', 'opt_data_store',
+ type=cfg.DataStoreVar,
+ default=click_utils.get_default(types.DataStore.HDD),
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--dataset', 'opt_dataset',
+ type=cfg.DatasetVar,
+ required=True,
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--size', 'opt_size',
+ type=(int, int), default=(300, 300),
+ help='Output image size')
+@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None),
+ help='Slice list of files')
+@click.option('-f', '--force', 'opt_force', is_flag=True,
+ help='Force overwrite file')
+@click.option('-d', '--display', 'opt_display', is_flag=True,
+ help='Display image for debugging')
+@click.pass_context
+def cli(ctx, opt_fp_in, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset, opt_size,
+ opt_slice, opt_force, opt_display):
+ """Converts ROIs to pose: roll, yaw, pitch"""
+
+ import sys
+ import os
+ from os.path import join
+ from pathlib import Path
+ from glob import glob
+
+ from tqdm import tqdm
+ import numpy as np
+ import dlib # must keep a local reference for dlib
+ import cv2 as cv
+ import pandas as pd
+
+ from app.models.bbox import BBox
+ from app.utils import logger_utils, file_utils, im_utils, display_utils, draw_utils
+ from app.processors.face_landmarks import Dlib2D_68
+ from app.processors.face_pose import FacePoseDLIB
+ from app.models.data_store import DataStore
+
+ # -------------------------------------------------
+ # init here
+
+ log = logger_utils.Logger.getLogger()
+
+ # set data_store
+ data_store = DataStore(opt_data_store, opt_dataset)
+
+ # get filepath out
+ fp_out = data_store.metadata(types.Metadata.FACE_POSE) if opt_fp_out is None else opt_fp_out
+ if not opt_force and Path(fp_out).exists():
+ log.error('File exists. Use "-f / --force" to overwite')
+ return
+
+ # init face processors
+ face_pose = FacePoseDLIB()
+ face_landmarks = Dlib2D_68()
+
+ # -------------------------------------------------
+ # load data
+
+ fp_record = data_store.metadata(types.Metadata.FILE_RECORD)
+ df_record = pd.read_csv(fp_record, dtype=cfg.FILE_RECORD_DTYPES).set_index('index')
+ # load ROI data
+ fp_roi = data_store.metadata(types.Metadata.FACE_ROI)
+ df_roi = pd.read_csv(fp_roi).set_index('index')
+ # slice if you want
+ if opt_slice:
+ df_roi = df_roi[opt_slice[0]:opt_slice[1]]
+ # group by image index (speedup if multiple faces per image)
+ df_img_groups = df_roi.groupby('record_index')
+ log.debug('processing {:,} groups'.format(len(df_img_groups)))
+
+ # store poses and convert to DataFrame
+ results = []
+
+ # -------------------------------------------------
+ # iterate groups with file/record index as key
+ for record_index, df_img_group in tqdm(df_img_groups):
+
+ # access the file_record
+ file_record = df_record.iloc[record_index] # pands.DataSeries
+
+ # load image
+ fp_im = data_store.face(file_record.subdir, file_record.fn, file_record.ext)
+ im = cv.imread(fp_im)
+ im_resized = im_utils.resize(im, width=opt_size[0], height=opt_size[1])
+
+ # iterate image group dataframe with roi index as key
+ for roi_index, df_img in df_img_group.iterrows():
+
+ # get bbox
+ x, y, w, h = df_img.x, df_img.y, df_img.w, df_img.h
+ #dim = (file_record.width, file_record.height)
+ dim = im_resized.shape[:2][::-1]
+ bbox_norm = BBox.from_xywh(x, y, w, h)
+ bbox_dim = bbox_norm.to_dim(dim)
+
+ # get pose
+ landmarks = face_landmarks.landmarks(im_resized, bbox_norm)
+ pose_data = face_pose.pose(landmarks, dim)
+ #pose_degrees = pose_data['degrees'] # only keep the degrees data
+ #pose_degrees['points_nose'] = pose_data
+
+ # draw landmarks if optioned
+ if opt_display:
+ draw_utils.draw_pose(im_resized, pose_data['point_nose'], pose_data['points'])
+ draw_utils.draw_degrees(im_resized, pose_data)
+ cv.imshow('', im_resized)
+ display_utils.handle_keyboard()
+
+ # add image index and append to result CSV data
+ pose_data['roi_index'] = roi_index
+ for k, v in pose_data['points'].items():
+ pose_data[f'point_{k}_x'] = v[0] / dim[0]
+ pose_data[f'point_{k}_y'] = v[1] / dim[1]
+
+ # rearrange data structure for DataFrame
+ pose_data.pop('points')
+ pose_data['point_nose_x'] = pose_data['point_nose'][0] / dim[0]
+ pose_data['point_nose_y'] = pose_data['point_nose'][1] / dim[1]
+ pose_data.pop('point_nose')
+ results.append(pose_data)
+
+ # create DataFrame and save to CSV
+ file_utils.mkdirs(fp_out)
+ df = pd.DataFrame.from_dict(results)
+ df.index.name = 'index'
+ df.to_csv(fp_out)
+
+ # save script
+ file_utils.write_text(' '.join(sys.argv), '{}.sh'.format(fp_out)) \ No newline at end of file
diff --git a/megapixels/commands/processor/face_roi.py b/megapixels/commands/processor/face_roi.py
new file mode 100644
index 00000000..fc933049
--- /dev/null
+++ b/megapixels/commands/processor/face_roi.py
@@ -0,0 +1,187 @@
+"""
+Crop images to prepare for training
+"""
+
+import click
+# from PIL import Image, ImageOps, ImageFilter, ImageDraw
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+
+color_filters = {'color': 1, 'gray': 2, 'all': 3}
+
+@click.command()
+@click.option('-i', '--input', 'opt_fp_in', default=None,
+ help='Override enum input filename CSV')
+@click.option('-o', '--output', 'opt_fp_out', default=None,
+ help='Override enum output filename CSV')
+@click.option('-m', '--media', 'opt_dir_media', default=None,
+ help='Override enum media directory')
+@click.option('--store', 'opt_data_store',
+ type=cfg.DataStoreVar,
+ default=click_utils.get_default(types.DataStore.HDD),
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--dataset', 'opt_dataset',
+ type=cfg.DatasetVar,
+ required=True,
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--size', 'opt_size',
+ type=(int, int), default=(480, 480),
+ help='Output image size')
+@click.option('-d', '--detector', 'opt_detector_type',
+ type=cfg.FaceDetectNetVar,
+ default=click_utils.get_default(types.FaceDetectNet.CVDNN),
+ help=click_utils.show_help(types.FaceDetectNet))
+@click.option('-g', '--gpu', 'opt_gpu', default=0,
+ help='GPU index')
+@click.option('--conf', 'opt_conf_thresh', default=0.85, type=click.FloatRange(0,1),
+ help='Confidence minimum threshold')
+@click.option('-p', '--pyramids', 'opt_pyramids', default=0, type=click.IntRange(0,4),
+ help='Number pyramids to upscale for DLIB detectors')
+@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None),
+ help='Slice list of files')
+@click.option('--display/--no-display', 'opt_display', is_flag=True, default=False,
+ help='Display detections to debug')
+@click.option('-f', '--force', 'opt_force', is_flag=True,
+ help='Force overwrite file')
+@click.option('--color', 'opt_color_filter',
+ type=click.Choice(color_filters.keys()), default='color',
+ help='Filter to keep color or grayscale images (color = keep color')
+@click.option('--keep', 'opt_largest', type=click.Choice(['largest', 'all']), default='largest',
+ help='Only keep largest face')
+@click.option('--zone', 'opt_zone', default=(0.0, 0.0), type=(float, float),
+ help='Face center must be located within zone region (0.5 = half width/height)')
+@click.pass_context
+def cli(ctx, opt_fp_in, opt_dir_media, opt_fp_out, opt_data_store, opt_dataset, opt_size, opt_detector_type,
+ opt_gpu, opt_conf_thresh, opt_pyramids, opt_slice, opt_display, opt_force, opt_color_filter,
+ opt_largest, opt_zone):
+ """Converts frames with faces to CSV of ROIs"""
+
+ import sys
+ import os
+ from os.path import join
+ from pathlib import Path
+ from glob import glob
+
+ from tqdm import tqdm
+ import numpy as np
+ import dlib # must keep a local reference for dlib
+ import cv2 as cv
+ import pandas as pd
+
+ from app.utils import logger_utils, file_utils, im_utils, display_utils, draw_utils
+ from app.processors import face_detector
+ from app.models.data_store import DataStore
+
+ # -------------------------------------------------
+ # init here
+
+ log = logger_utils.Logger.getLogger()
+
+ # set data_store
+ data_store = DataStore(opt_data_store, opt_dataset)
+
+ # get filepath out
+ fp_out = data_store.metadata(types.Metadata.FACE_ROI) if opt_fp_out is None else opt_fp_out
+ if not opt_force and Path(fp_out).exists():
+ log.error('File exists. Use "-f / --force" to overwite')
+ return
+
+ # set detector
+ if opt_detector_type == types.FaceDetectNet.CVDNN:
+ detector = face_detector.DetectorCVDNN()
+ elif opt_detector_type == types.FaceDetectNet.DLIB_CNN:
+ detector = face_detector.DetectorDLIBCNN(gpu=opt_gpu)
+ elif opt_detector_type == types.FaceDetectNet.DLIB_HOG:
+ detector = face_detector.DetectorDLIBHOG()
+ elif opt_detector_type == types.FaceDetectNet.MTCNN_TF:
+ detector = face_detector.DetectorMTCNN_TF(gpu=opt_gpu)
+ elif opt_detector_type == types.FaceDetectNet.HAAR:
+ log.error('{} not yet implemented'.format(opt_detector_type.name))
+ return
+
+
+ # get list of files to process
+ fp_record = data_store.metadata(types.Metadata.FILE_RECORD) if opt_fp_in is None else opt_fp_in
+ df_record = pd.read_csv(fp_record, dtype=cfg.FILE_RECORD_DTYPES).set_index('index')
+ if opt_slice:
+ df_record = df_record[opt_slice[0]:opt_slice[1]]
+ log.debug('processing {:,} files'.format(len(df_record)))
+
+ # filter out grayscale
+ color_filter = color_filters[opt_color_filter]
+ # set largest flag, to keep all or only largest
+ opt_largest = (opt_largest == 'largest')
+
+ data = []
+ skipped_files = []
+ processed_files = []
+
+ for df_record in tqdm(df_record.itertuples(), total=len(df_record)):
+ fp_im = data_store.face(str(df_record.subdir), str(df_record.fn), str(df_record.ext))
+ try:
+ im = cv.imread(fp_im)
+ im_resized = im_utils.resize(im, width=opt_size[0], height=opt_size[1])
+ except Exception as e:
+ log.debug(f'could not read: {fp_im}')
+ return
+ # filter out color or grayscale iamges
+ if color_filter != color_filters['all']:
+ try:
+ is_gray = im_utils.is_grayscale(im)
+ if is_gray and color_filter != color_filters['gray']:
+ log.debug('Skipping grayscale image: {}'.format(fp_im))
+ continue
+ except Exception as e:
+ log.error('Could not check grayscale: {}'.format(fp_im))
+ continue
+
+ try:
+ bboxes_norm = detector.detect(im_resized, pyramids=opt_pyramids, largest=opt_largest,
+ zone=opt_zone, conf_thresh=opt_conf_thresh)
+ except Exception as e:
+ log.error('could not detect: {}'.format(fp_im))
+ log.error('{}'.format(e))
+ continue
+
+ if len(bboxes_norm) == 0:
+ skipped_files.append(fp_im)
+ log.warn(f'no faces in: {fp_im}')
+ log.warn(f'skipped: {len(skipped_files)}. found:{len(processed_files)} files')
+ else:
+ processed_files.append(fp_im)
+ for bbox in bboxes_norm:
+ roi = {
+ 'record_index': int(df_record.Index),
+ 'x': bbox.x,
+ 'y': bbox.y,
+ 'w': bbox.w,
+ 'h': bbox.h
+ }
+ data.append(roi)
+
+ # if display optined
+ if opt_display and len(bboxes_norm):
+ # draw each box
+ for bbox_norm in bboxes_norm:
+ dim = im_resized.shape[:2][::-1]
+ bbox_dim = bbox.to_dim(dim)
+ if dim[0] > 1000:
+ im_resized = im_utils.resize(im_resized, width=1000)
+ im_resized = draw_utils.draw_bbox(im_resized, bbox_norm)
+
+ # display and wait
+ cv.imshow('', im_resized)
+ display_utils.handle_keyboard()
+
+ # create DataFrame and save to CSV
+ file_utils.mkdirs(fp_out)
+ df = pd.DataFrame.from_dict(data)
+ df.index.name = 'index'
+ df.to_csv(fp_out)
+
+ # save script
+ file_utils.write_text(' '.join(sys.argv), '{}.sh'.format(fp_out)) \ No newline at end of file
diff --git a/megapixels/commands/processor/face_vector.py b/megapixels/commands/processor/face_vector.py
new file mode 100644
index 00000000..cb155d08
--- /dev/null
+++ b/megapixels/commands/processor/face_vector.py
@@ -0,0 +1,133 @@
+"""
+Converts ROIs to face vector
+NB: the VGG Face2 extractor should be used with MTCNN ROIs (not square)
+ the DLIB face extractor should be used with DLIB ROIs (square)
+see https://github.com/ox-vgg/vgg_face2 for TAR@FAR
+"""
+
+import click
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+
+@click.command()
+@click.option('-o', '--output', 'opt_fp_out', default=None,
+ help='Override enum output filename CSV')
+@click.option('-m', '--media', 'opt_dir_media', default=None,
+ help='Override enum media directory')
+@click.option('--store', 'opt_data_store',
+ type=cfg.DataStoreVar,
+ default=click_utils.get_default(types.DataStore.HDD),
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--dataset', 'opt_dataset',
+ type=cfg.DatasetVar,
+ required=True,
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--size', 'opt_size',
+ type=(int, int), default=cfg.DEFAULT_SIZE_FACE_DETECT,
+ help='Output image size')
+@click.option('-e', '--extractor', 'opt_extractor',
+ default=click_utils.get_default(types.FaceExtractor.VGG),
+ type=cfg.FaceExtractorVar,
+ help='Type of extractor framework/network to use')
+@click.option('-j', '--jitters', 'opt_jitters', default=cfg.DLIB_FACEREC_JITTERS,
+ help='Number of jitters (only for dlib')
+@click.option('-p', '--padding', 'opt_padding', default=cfg.FACEREC_PADDING,
+ help='Percentage ROI padding')
+@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None),
+ help='Slice list of files')
+@click.option('-f', '--force', 'opt_force', is_flag=True,
+ help='Force overwrite file')
+@click.option('-g', '--gpu', 'opt_gpu', default=0,
+ help='GPU index')
+@click.pass_context
+def cli(ctx, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset, opt_size,
+ opt_extractor, opt_slice, opt_force, opt_gpu, opt_jitters, opt_padding):
+ """Converts face ROIs to vectors"""
+
+ import sys
+ import os
+ from os.path import join
+ from pathlib import Path
+ from glob import glob
+
+ from tqdm import tqdm
+ import numpy as np
+ import dlib # must keep a local reference for dlib
+ import cv2 as cv
+ import pandas as pd
+
+ from app.models.bbox import BBox
+ from app.models.data_store import DataStore
+ from app.utils import logger_utils, file_utils, im_utils
+ from app.processors import face_extractor
+
+
+ # -------------------------------------------------
+ # init here
+
+ log = logger_utils.Logger.getLogger()
+ # set data_store
+ data_store = DataStore(opt_data_store, opt_dataset)
+
+ # get filepath out
+ fp_out = data_store.metadata(types.Metadata.FACE_VECTOR) if opt_fp_out is None else opt_fp_out
+ if not opt_force and Path(fp_out).exists():
+ log.error('File exists. Use "-f / --force" to overwite')
+ return
+
+ # init face processors
+ if opt_extractor == types.FaceExtractor.DLIB:
+ log.debug('set dlib')
+ extractor = face_extractor.ExtractorDLIB(gpu=opt_gpu, jitters=opt_jitters)
+ elif opt_extractor == types.FaceExtractor.VGG:
+ extractor = face_extractor.ExtractorVGG()
+
+ # load data
+ fp_record = data_store.metadata(types.Metadata.FILE_RECORD)
+ df_record = pd.read_csv(fp_record, dtype=cfg.FILE_RECORD_DTYPES).set_index('index')
+ fp_roi = data_store.metadata(types.Metadata.FACE_ROI)
+ df_roi = pd.read_csv(fp_roi).set_index('index')
+
+ if opt_slice:
+ df_roi = df_roi[opt_slice[0]:opt_slice[1]]
+
+ # -------------------------------------------------
+ # process images
+
+ df_img_groups = df_roi.groupby('record_index')
+ log.debug('processing {:,} groups'.format(len(df_img_groups)))
+
+ vecs = []
+ for record_index, df_img_group in tqdm(df_img_groups):
+ # make fp
+ ds_record = df_record.iloc[record_index]
+ fp_im = data_store.face(ds_record.subdir, ds_record.fn, ds_record.ext)
+ im = cv.imread(fp_im)
+ im = im_utils.resize(im, width=opt_size[0], height=opt_size[1])
+ for roi_index, df_img in df_img_group.iterrows():
+ # get bbox
+ x, y, w, h = df_img.x, df_img.y, df_img.w, df_img.h
+ dim = (ds_record.width, ds_record.height)
+ # get face vector
+ bbox = BBox.from_xywh(x, y, w, h) # norm
+ # compute vec
+ vec = extractor.extract(im, bbox) # use normalized BBox
+ vec_str = extractor.to_str(vec)
+ vec_obj = {'vec':vec_str, 'roi_index': roi_index, 'record_index':record_index}
+ vecs.append(vec_obj)
+
+ # -------------------------------------------------
+ # save data
+
+ # create DataFrame and save to CSV
+ df = pd.DataFrame.from_dict(vecs)
+ df.index.name = 'index'
+ file_utils.mkdirs(fp_out)
+ df.to_csv(fp_out)
+
+ # save script
+ file_utils.write_text(' '.join(sys.argv), '{}.sh'.format(fp_out)) \ No newline at end of file
diff --git a/megapixels/commands/processor/mirror.py b/megapixels/commands/processor/mirror.py
new file mode 100644
index 00000000..9ca1cac7
--- /dev/null
+++ b/megapixels/commands/processor/mirror.py
@@ -0,0 +1,57 @@
+"""
+Crop images to prepare for training
+"""
+
+import click
+import cv2 as cv
+from PIL import Image, ImageOps, ImageFilter
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+
+
+@click.command()
+@click.option('-i', '--input', 'opt_dir_in', required=True,
+ help='Input directory')
+@click.option('-o', '--output', 'opt_dir_out', required=True,
+ help='Output directory')
+@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None),
+ help='Slice the input list')
+@click.pass_context
+def cli(ctx, opt_dir_in, opt_dir_out, opt_slice):
+ """Mirror augment image directory"""
+
+ import os
+ from os.path import join
+ from pathlib import Path
+ from glob import glob
+ from tqdm import tqdm
+
+ from app.utils import logger_utils, file_utils, im_utils
+
+ # -------------------------------------------------
+ # init
+
+ log = logger_utils.Logger.getLogger()
+
+ # -------------------------------------------------
+ # process here
+
+ # get list of files to process
+ fp_ims = glob(join(opt_dir_in, '*.jpg'))
+ fp_ims += glob(join(opt_dir_in, '*.png'))
+
+ if opt_slice:
+ fp_ims = fp_ims[opt_slice[0]:opt_slice[1]]
+ log.info('processing {:,} files'.format(len(fp_ims)))
+
+ # ensure output dir exists
+ file_utils.mkdirs(opt_dir_out)
+
+ # resize and save images
+ for fp_im in tqdm(fp_ims):
+ im = Image.open(fp_im)
+ fpp_im = Path(fp_im)
+ fp_out = join(opt_dir_out, '{}_mirror{}'.format(fpp_im.stem, fpp_im.suffix))
+ im.save(fp_out) \ No newline at end of file
diff --git a/megapixels/commands/processor/resize.py b/megapixels/commands/processor/resize.py
new file mode 100644
index 00000000..7409ee6f
--- /dev/null
+++ b/megapixels/commands/processor/resize.py
@@ -0,0 +1,150 @@
+"""
+Crop images to prepare for training
+"""
+
+import click
+import cv2 as cv
+from PIL import Image, ImageOps, ImageFilter
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+
+"""
+Filter Q-Down Q-Up Speed
+NEAREST ⭐⭐⭐⭐⭐
+BOX ⭐ ⭐⭐⭐⭐
+BILINEAR ⭐ ⭐ ⭐⭐⭐
+HAMMING ⭐⭐ ⭐⭐⭐
+BICUBIC ⭐⭐⭐ ⭐⭐⭐ ⭐⭐
+LANCZOS ⭐⭐⭐⭐ ⭐⭐⭐⭐ ⭐
+"""
+methods = {
+ 'lanczos': Image.LANCZOS,
+ 'bicubic': Image.BICUBIC,
+ 'hamming': Image.HAMMING,
+ 'bileaner': Image.BILINEAR,
+ 'box': Image.BOX,
+ 'nearest': Image.NEAREST
+ }
+centerings = {
+ 'tl': (0.0, 0.0),
+ 'tc': (0.5, 0.0),
+ 'tr': (0.0, 0.0),
+ 'lc': (0.0, 0.5),
+ 'cc': (0.5, 0.5),
+ 'rc': (1.0, 0.5),
+ 'bl': (0.0, 1.0),
+ 'bc': (1.0, 0.5),
+ 'br': (1.0, 1.0)
+}
+
+@click.command()
+@click.option('-i', '--input', 'opt_dir_in', required=True,
+ help='Input directory')
+@click.option('-o', '--output', 'opt_dir_out', required=True,
+ help='Output directory')
+@click.option('-e', '--ext', 'opt_glob_ext',
+ default='png', type=click.Choice(['jpg', 'png']),
+ help='File glob ext')
+@click.option('--size', 'opt_size',
+ type=(int, int), default=(256, 256),
+ help='Max output size')
+@click.option('--method', 'opt_scale_method',
+ type=click.Choice(methods.keys()),
+ default='lanczos',
+ help='Scaling method to use')
+@click.option('--equalize', 'opt_equalize', is_flag=True,
+ help='Equalize historgram')
+@click.option('--sharpen', 'opt_sharpen', is_flag=True,
+ help='Unsharp mask')
+@click.option('--center', 'opt_center', default='cc', type=click.Choice(centerings.keys()),
+ help='Crop focal point')
+@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None),
+ help='Slice the input list')
+@click.option('-t', '--threads', 'opt_threads', default=8,
+ help='Number of threads')
+@click.pass_context
+def cli(ctx, opt_dir_in, opt_dir_out, opt_glob_ext, opt_size, opt_scale_method,
+ opt_equalize, opt_sharpen, opt_center, opt_slice, opt_threads):
+ """Crop, mirror images"""
+
+ import os
+ from os.path import join
+ from pathlib import Path
+ from glob import glob
+ from tqdm import tqdm
+ from multiprocessing.dummy import Pool as ThreadPool
+ from functools import partial
+
+ from app.utils import logger_utils, file_utils, im_utils
+
+ # -------------------------------------------------
+ # init
+
+ log = logger_utils.Logger.getLogger()
+
+
+ # -------------------------------------------------
+ # process here
+
+ def pool_resize(fp_im, opt_size, scale_method):
+ # Threaded image resize function
+ try:
+ pbar.update(1)
+ try:
+ im = Image.open(fp_im).convert('RGB')
+ im.verify()
+ except Exception as e:
+ log.warn('Could not open: {}'.format(fp_im))
+ log.error(e)
+ return False
+
+ #im = ImageOps.fit(im, opt_size, method=scale_method, centering=centering)
+
+ if opt_equalize:
+ im_np = im_utils.pil2np(im)
+ im_np_eq = eq_hist_yuv(im_np)
+ im_np = cv.addWeighted(im_np_eq, 0.35, im_np, 0.65, 0)
+ im = im_utils.np2pil(im_np)
+
+ if opt_sharpen:
+ im = im.filter(ImageFilter.UnsharpMask)
+
+ fp_out = join(opt_dir_out, Path(fp_im).name)
+ im.save(fp_out)
+ return True
+ except:
+ return False
+
+ #centering = centerings[opt_center]
+ #scale_method = methods[opt_scale_method]
+
+ # get list of files to process
+ fp_ims = glob(join(opt_dir_in, '*.{}'.format(opt_glob_ext)))
+ if opt_slice:
+ fp_ims = fp_ims[opt_slice[0]:opt_slice[1]]
+ log.info('processing {:,} files'.format(len(fp_ims)))
+
+
+ # ensure output dir exists
+ file_utils.mkdirs(opt_dir_out)
+
+ # setup multithreading
+ pbar = tqdm(total=len(fp_ims))
+ #pool_resize = partial(pool_resize, opt_size=opt_size, scale_method=scale_method, centering=centering)
+ pool_resize = partial(pool_resize, opt_size=opt_size)
+ #result_list = pool.map(prod_x, data_list)
+ pool = ThreadPool(opt_threads)
+ with tqdm(total=len(fp_ims)) as pbar:
+ results = pool.map(pool_resize, fp_ims)
+ pbar.close()
+
+ log.info('Resized: {} / {} images'.format(results.count(True), len(fp_ims)))
+
+
+
+def eq_hist_yuv(im):
+ im_yuv = cv.cvtColor(im, cv.COLOR_BGR2YUV)
+ im_yuv[:,:,0] = cv.equalizeHist(im_yuv[:,:,0])
+ return cv.cvtColor(im_yuv, cv.COLOR_YUV2BGR)
diff --git a/megapixels/commands/processor/resize_dataset.py b/megapixels/commands/processor/resize_dataset.py
new file mode 100644
index 00000000..3a6ec15f
--- /dev/null
+++ b/megapixels/commands/processor/resize_dataset.py
@@ -0,0 +1,149 @@
+"""
+Crop images to prepare for training
+"""
+
+import click
+import cv2 as cv
+from PIL import Image, ImageOps, ImageFilter
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+
+cv_resize_algos = {
+ 'area': cv.INTER_AREA,
+ 'lanco': cv.INTER_LANCZOS4,
+ 'linear': cv.INTER_LINEAR,
+ 'linear_exact': cv.INTER_LINEAR_EXACT,
+ 'nearest': cv.INTER_NEAREST
+}
+"""
+Filter Q-Down Q-Up Speed
+NEAREST ⭐⭐⭐⭐⭐
+BOX ⭐ ⭐⭐⭐⭐
+BILINEAR ⭐ ⭐ ⭐⭐⭐
+HAMMING ⭐⭐ ⭐⭐⭐
+BICUBIC ⭐⭐⭐ ⭐⭐⭐ ⭐⭐
+LANCZOS ⭐⭐⭐⭐ ⭐⭐⭐⭐ ⭐
+"""
+pil_resize_algos = {
+ 'antialias': Image.ANTIALIAS,
+ 'lanczos': Image.LANCZOS,
+ 'bicubic': Image.BICUBIC,
+ 'hamming': Image.HAMMING,
+ 'bileaner': Image.BILINEAR,
+ 'box': Image.BOX,
+ 'nearest': Image.NEAREST
+ }
+
+@click.command()
+@click.option('--dataset', 'opt_dataset',
+ type=cfg.DatasetVar,
+ required=True,
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--store', 'opt_data_store',
+ type=cfg.DataStoreVar,
+ default=click_utils.get_default(types.DataStore.HDD),
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('-o', '--output', 'opt_dir_out', required=True,
+ help='Output directory')
+@click.option('-e', '--ext', 'opt_glob_ext',
+ default='png', type=click.Choice(['jpg', 'png']),
+ help='File glob ext')
+@click.option('--size', 'opt_size',
+ type=(int, int), default=(256, 256),
+ help='Output image size max (w,h)')
+@click.option('--interp', 'opt_interp_algo',
+ type=click.Choice(pil_resize_algos.keys()),
+ default='bicubic',
+ help='Interpolation resizing algorithms')
+@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None),
+ help='Slice the input list')
+@click.option('-t', '--threads', 'opt_threads', default=8,
+ help='Number of threads')
+@click.option('--recursive/--no-recursive', 'opt_recursive', is_flag=True, default=False,
+ help='Use glob recursion (slower)')
+@click.pass_context
+def cli(ctx, opt_dataset, opt_data_store, opt_dir_out, opt_glob_ext, opt_size, opt_interp_algo,
+ opt_slice, opt_threads, opt_recursive):
+ """Resize dataset images"""
+
+ import os
+ from os.path import join
+ from pathlib import Path
+ from glob import glob
+ from tqdm import tqdm
+ from multiprocessing.dummy import Pool as ThreadPool
+ from functools import partial
+ import pandas as pd
+ import numpy as np
+
+ from app.utils import logger_utils, file_utils, im_utils
+ from app.models.data_store import DataStore
+
+ # -------------------------------------------------
+ # init
+
+ log = logger_utils.Logger.getLogger()
+
+
+ # -------------------------------------------------
+ # process here
+
+ def pool_resize(fp_in, dir_in, dir_out, im_size, interp_algo):
+ # Threaded image resize function
+ pbar.update(1)
+ try:
+ im = Image.open(fp_in).convert('RGB')
+ im.verify() # throws error if image is corrupt
+ im.thumbnail(im_size, interp_algo)
+ fp_out = fp_in.replace(dir_in, dir_out)
+ file_utils.mkdirs(fp_out)
+ im.save(fp_out, quality=100)
+ except Exception as e:
+ log.warn(f'Could not open: {fp_in}, Error: {e}')
+ return False
+ return True
+
+
+ data_store = DataStore(opt_data_store, opt_dataset)
+ fp_records = data_store.metadata(types.Metadata.FILE_RECORD)
+ df_records = pd.read_csv(fp_records, dtype=cfg.FILE_RECORD_DTYPES).set_index('index')
+ dir_in = data_store.media_images_original()
+
+ # get list of files to process
+ #fp_ims = file_utils.glob_multi(opt_dir_in, ['jpg', 'png'], recursive=opt_recursive)
+ fp_ims = []
+ for ds_record in df_records.itertuples():
+ fp_im = data_store.face(ds_record.subdir, ds_record.fn, ds_record.ext)
+ fp_ims.append(fp_im)
+
+ if opt_slice:
+ fp_ims = fp_ims[opt_slice[0]:opt_slice[1]]
+ if not fp_ims:
+ log.error('No images. Try with "--recursive"')
+ return
+ log.info(f'processing {len(fp_ims):,} images')
+
+ # algorithm to use for resizing
+ interp_algo = pil_resize_algos[opt_interp_algo]
+ log.info(f'using {interp_algo} for interpoloation')
+
+ # ensure output dir exists
+ file_utils.mkdirs(opt_dir_out)
+
+ # setup multithreading
+ pbar = tqdm(total=len(fp_ims))
+ # fixed arguments for pool function
+ map_pool_resize = partial(pool_resize, dir_in=dir_in, dir_out=opt_dir_out, im_size=opt_size, interp_algo=interp_algo)
+ #result_list = pool.map(prod_x, data_list) # simple
+ pool = ThreadPool(opt_threads)
+ # start multithreading
+ with tqdm(total=len(fp_ims)) as pbar:
+ results = pool.map(map_pool_resize, fp_ims)
+ # end multithreading
+ pbar.close()
+
+ log.info(f'Resized: {results.count(True)} / {len(fp_ims)} images') \ No newline at end of file
diff --git a/megapixels/commands/processor/videos_to_frames.py b/megapixels/commands/processor/videos_to_frames.py
new file mode 100644
index 00000000..0b56c46a
--- /dev/null
+++ b/megapixels/commands/processor/videos_to_frames.py
@@ -0,0 +1,73 @@
+from glob import glob
+import os
+from os.path import join
+from pathlib import Path
+
+import click
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+from app.utils import logger_utils
+
+import dlib
+import pandas as pd
+from PIL import Image, ImageOps, ImageFilter
+from app.utils import file_utils, im_utils
+
+
+log = logger_utils.Logger.getLogger()
+
+@click.command()
+@click.option('-i', '--input', 'opt_fp_in', required=True,
+ help='Input directory')
+@click.option('-o', '--output', 'opt_fp_out', required=True,
+ help='Output directory')
+@click.option('--size', 'opt_size', default=(320, 240),
+ help='Inference size for face detection' )
+@click.option('--interval', 'opt_frame_interval', default=20,
+ help='Number of frames before saving next face')
+@click.pass_context
+def cli(ctx, opt_fp_in, opt_fp_out, opt_size, opt_frame_interval):
+ """Converts videos to frames with faces"""
+
+ # -------------------------------------------------
+ # process
+
+ from tqdm import tqdm
+ import cv2 as cv
+ from tqdm import tqdm
+ from app.processors import face_detector
+
+ detector = face_detector.DetectorDLIBCNN()
+
+ # get file list
+ fp_videos = glob(join(opt_fp_in, '*.mp4'))
+ fp_videos += glob(join(opt_fp_in, '*.webm'))
+ fp_videos += glob(join(opt_fp_in, '*.mkv'))
+
+ frame_interval_count = 0
+ frame_count = 0
+
+ file_utils.mkdirs(opt_fp_out)
+
+ for fp_video in tqdm(fp_videos):
+
+ video = cv.VideoCapture(fp_video)
+
+ while video.isOpened():
+ res, frame = video.read()
+ if not res:
+ break
+
+ frame_count += 1 # for naming
+ frame_interval_count += 1 # for interval
+
+ bboxes = detector.detect(frame, opt_size=opt_size, opt_pyramids=0)
+ if len(bboxes) > 0 and frame_interval_count >= opt_frame_interval:
+ # save frame
+ fname = file_utils.zpad(frame_count)
+ fp_frame = join(opt_fp_out, '{}_{}.jpg'.format(Path(fp_video).stem, fname))
+ cv.imwrite(fp_frame, frame)
+ frame_interval_count = 0
+