summaryrefslogtreecommitdiff
path: root/megapixels/commands
diff options
context:
space:
mode:
Diffstat (limited to 'megapixels/commands')
-rw-r--r--megapixels/commands/admin/rsync.py106
-rw-r--r--megapixels/commands/cv/_old_files_to_face_rois.py168
-rw-r--r--megapixels/commands/cv/cluster.py47
-rw-r--r--megapixels/commands/cv/crop.py104
-rw-r--r--megapixels/commands/cv/csv_to_faces.py105
-rw-r--r--megapixels/commands/cv/csv_to_faces_mt.py105
-rw-r--r--megapixels/commands/cv/face_frames.py82
-rw-r--r--megapixels/commands/cv/face_landmarks_3d.py96
-rw-r--r--megapixels/commands/cv/face_pose.py140
-rw-r--r--megapixels/commands/cv/face_roi.py171
-rw-r--r--megapixels/commands/cv/face_vector.py125
-rw-r--r--megapixels/commands/cv/mirror.py57
-rw-r--r--megapixels/commands/cv/resize.py149
-rw-r--r--megapixels/commands/cv/videos_to_frames.py73
-rw-r--r--megapixels/commands/datasets/50people.py129
-rw-r--r--megapixels/commands/datasets/feret.py139
-rw-r--r--megapixels/commands/datasets/filter_by_pose.py96
-rw-r--r--megapixels/commands/datasets/gen_filepath.py102
-rw-r--r--megapixels/commands/datasets/gen_uuid.py65
-rw-r--r--megapixels/commands/datasets/identity_meta_lfw.py93
-rw-r--r--megapixels/commands/datasets/identity_meta_vgg_face2.py88
-rw-r--r--megapixels/commands/datasets/lookup.py63
-rw-r--r--megapixels/commands/datasets/megaface_flickr_api.py141
-rw-r--r--megapixels/commands/datasets/megaface_names.py65
-rw-r--r--megapixels/commands/datasets/records.py167
-rw-r--r--megapixels/commands/datasets/s3_sync.py61
-rw-r--r--megapixels/commands/datasets/symlink_uuid.py57
-rw-r--r--megapixels/commands/datasets/vecs_to_id.py50
-rw-r--r--megapixels/commands/datasets/vecs_to_uuid.py56
-rw-r--r--megapixels/commands/datasets/ytmu.py205
-rw-r--r--megapixels/commands/demo/face_analysis.py56
-rw-r--r--megapixels/commands/demo/face_search.py100
-rw-r--r--megapixels/commands/faiss/build_db.py15
-rw-r--r--megapixels/commands/faiss/build_faiss.py24
-rw-r--r--megapixels/commands/faiss/sync_metadata.py18
-rw-r--r--megapixels/commands/misc/compare_sres.py59
-rw-r--r--megapixels/commands/site/build.py21
37 files changed, 3398 insertions, 0 deletions
diff --git a/megapixels/commands/admin/rsync.py b/megapixels/commands/admin/rsync.py
new file mode 100644
index 00000000..a821b460
--- /dev/null
+++ b/megapixels/commands/admin/rsync.py
@@ -0,0 +1,106 @@
+"""
+Parallel rsync media_records between drives
+For parallel rsync with media records, use vframe/commands/rsync
+"""
+
+import click
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+
+@click.command()
+@click.option('-i', '--input', 'dir_in', required=True,
+ help='Input directory')
+@click.option('-o', '--output', 'dir_out', required=True,
+ help='Output directory')
+@click.option('-t', '--threads', 'opt_threads', default=8,
+ help='Number of threads')
+@click.option('--validate/--no-validate', 'opt_validate', is_flag=True, default=False,
+ help='Validate files after copy')
+@click.option('--extract/--no-extract', 'opt_extract', is_flag=True, default=False,
+ help='Extract files after copy')
+@click.pass_context
+def cli(ctx, dir_in, dir_out, opt_threads, opt_validate, opt_extract):
+ """rsync folders"""
+
+ import os
+ from os.path import join
+ from pathlib import Path
+
+ # NB deactivate logger in imported module
+ import logging
+ logging.getLogger().addHandler(logging.NullHandler())
+ from parallel_sync import rsync
+
+ from app.settings.paths import Paths
+ from app.utils import logger_utils, file_utils
+
+ # -------------------------------------------------
+ # process here
+
+ log = logger_utils.Logger.getLogger()
+ log.info('RSYNC from {} to {}'.format(dir_in, dir_out))
+ log.info('opt_extract: {}'.format(opt_extract))
+ log.info('opt_validate: {}'.format(opt_validate))
+ log.info('opt_threads: {}'.format(opt_validate))
+
+ file_utils.mkdirs(dir_out)
+
+ rsync.copy(dir_in, dir_out, parallelism=opt_threads,
+ validate=opt_validate, extract=opt_extract)
+
+ log.info('done rsyncing')
+
+
+ # ---------------------------------------------------------------
+
+
+
+ # if dir_in:
+ # # use input filepath as source
+ # if not Path(dir_in).is_dir():
+ # log.error('{} is not a directory'.format(dir_in))
+ # ctx.exit()
+ # if not Path(dir_out).is_dir():
+ # ctx.log.error('{} is not a directory'.format(dir_out))
+ # return
+
+ # log.info('RSYNC from {} to {}'.format(dir_in, dir_out))
+ # log.debug('opt_validate: {}'.format(opt_validate))
+ # log.debug('opt_extract: {}'.format(opt_extract))
+ # # local_copy(paths, parallelism=10, extract=False, validate=False):
+ # file_utils.mkdirs(dir_out)
+ # rsync.copy(dir_in, dir_out, parallelism=opt_threads,
+ # validate=opt_validate, extract=opt_extract)
+ # else:
+ # log.debug('get paths')
+ # # use source mappings as rsync source
+ # if not opt_media_format:
+ # ctx.log.error('--media format not supplied for source mappings')
+ # return
+
+ # # ensure FILEPATH metadata exists
+ # # parallel-rsync accepts a list of tupes (src, dst)
+ # file_routes = []
+ # for chair_item in chair_items:
+ # item = chair_item.item
+ # sha256 = chair_item.item.sha256
+ # filepath_metadata = item.get_metadata(types.Metadata.FILEPATH)
+ # if not filepath_metadata:
+ # ctx.log.error('no FILEPATH metadata')
+ # return
+ # fp_media =
+ # src = join('')
+ # dir_media = Paths.media_dir(opt_media_format, data_store=opt_disk, verified=ctx.opts['verified'])
+ # dst = join('')
+ # file_routes.append((src, dst))
+
+ # ctx.log.debug('dir_media: {}'.format(dir_media))
+ # return
+
+ # # -------------------------------------------------
+
+ # # send back to sink
+ # for chair_item in chair_items:
+ # sink.send(chair_item)
diff --git a/megapixels/commands/cv/_old_files_to_face_rois.py b/megapixels/commands/cv/_old_files_to_face_rois.py
new file mode 100644
index 00000000..d92cbd74
--- /dev/null
+++ b/megapixels/commands/cv/_old_files_to_face_rois.py
@@ -0,0 +1,168 @@
+"""
+Crop images to prepare for training
+"""
+
+import click
+# from PIL import Image, ImageOps, ImageFilter, ImageDraw
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+
+color_filters = {'color': 1, 'gray': 2, 'all': 3}
+
+@click.command()
+@click.option('-i', '--input', 'opt_fp_files', required=True,
+ help='Input file meta CSV')
+@click.option('-o', '--output', 'opt_fp_out', required=True,
+ help='Output CSV')
+@click.option('-e', '--ext', 'opt_ext',
+ default='jpg', type=click.Choice(['jpg', 'png']),
+ help='File glob ext')
+@click.option('--size', 'opt_size',
+ type=(int, int), default=(300, 300),
+ help='Output image size')
+@click.option('-t', '--detector-type', 'opt_detector_type',
+ type=cfg.FaceDetectNetVar,
+ default=click_utils.get_default(types.FaceDetectNet.DLIB_CNN),
+ help=click_utils.show_help(types.FaceDetectNet))
+@click.option('-g', '--gpu', 'opt_gpu', default=0,
+ help='GPU index')
+@click.option('--conf', 'opt_conf_thresh', default=0.85, type=click.FloatRange(0,1),
+ help='Confidence minimum threshold')
+@click.option('-p', '--pyramids', 'opt_pyramids', default=0, type=click.IntRange(0,4),
+ help='Number pyramids to upscale for DLIB detectors')
+@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None),
+ help='Slice list of files')
+@click.option('--display/--no-display', 'opt_display', is_flag=True, default=False,
+ help='Display detections to debug')
+@click.option('--recursive/--no-recursive', 'opt_recursive', is_flag=True, default=False,
+ help='Use glob recursion (slower)')
+@click.option('-f', '--force', 'opt_force', is_flag=True,
+ help='Force overwrite file')
+@click.option('--color', 'opt_color_filter',
+ type=click.Choice(color_filters.keys()), default='color',
+ help='Filter to keep color or grayscale images (color = keep color')
+@click.pass_context
+def cli(ctx, opt_dirs_in, opt_fp_out, opt_ext, opt_size, opt_detector_type,
+ opt_gpu, opt_conf_thresh, opt_pyramids, opt_slice, opt_display, opt_recursive, opt_force, opt_color_filter):
+ """Converts frames with faces to CSV of ROIs"""
+
+ import sys
+ import os
+ from os.path import join
+ from pathlib import Path
+ from glob import glob
+
+ from tqdm import tqdm
+ import numpy as np
+ import dlib # must keep a local reference for dlib
+ import cv2 as cv
+ import pandas as pd
+
+ from app.utils import logger_utils, file_utils, im_utils
+ from app.processors import face_detector
+
+ # -------------------------------------------------
+ # init here
+
+ log = logger_utils.Logger.getLogger()
+
+ if not opt_force and Path(opt_fp_out).exists():
+ log.error('File exists. Use "-f / --force" to overwite')
+ return
+
+ if opt_detector_type == types.FaceDetectNet.CVDNN:
+ detector = face_detector.DetectorCVDNN()
+ elif opt_detector_type == types.FaceDetectNet.DLIB_CNN:
+ detector = face_detector.DetectorDLIBCNN(opt_gpu)
+ elif opt_detector_type == types.FaceDetectNet.DLIB_HOG:
+ detector = face_detector.DetectorDLIBHOG()
+ elif opt_detector_type == types.FaceDetectNet.MTCNN:
+ detector = face_detector.DetectorMTCNN()
+ elif opt_detector_type == types.FaceDetectNet.HAAR:
+ log.error('{} not yet implemented'.format(opt_detector_type.name))
+ return
+
+
+ # -------------------------------------------------
+ # process here
+ color_filter = color_filters[opt_color_filter]
+
+ # get list of files to process
+ fp_ims = []
+ for opt_dir_in in opt_dirs_in:
+ if opt_recursive:
+ fp_glob = join(opt_dir_in, '**/*.{}'.format(opt_ext))
+ fp_ims += glob(fp_glob, recursive=True)
+ else:
+ fp_glob = join(opt_dir_in, '*.{}'.format(opt_ext))
+ fp_ims += glob(fp_glob)
+ log.debug(fp_glob)
+
+
+ if opt_slice:
+ fp_ims = fp_ims[opt_slice[0]:opt_slice[1]]
+ log.debug('processing {:,} files'.format(len(fp_ims)))
+
+
+ data = []
+
+ for fp_im in tqdm(fp_ims):
+ im = cv.imread(fp_im)
+
+ # filter out color or grayscale iamges
+ if color_filter != color_filters['all']:
+ try:
+ is_gray = im_utils.is_grayscale(im)
+ if is_gray and color_filter != color_filters['gray']:
+ log.debug('Skipping grayscale image: {}'.format(fp_im))
+ continue
+ except Exception as e:
+ log.error('Could not check grayscale: {}'.format(fp_im))
+ continue
+
+ try:
+ bboxes = detector.detect(im, opt_size=opt_size, opt_pyramids=opt_pyramids)
+ except Exception as e:
+ log.error('could not detect: {}'.format(fp_im))
+ log.error('{}'.format(e))
+ fpp_im = Path(fp_im)
+ subdir = str(fpp_im.parent.relative_to(opt_dir_in))
+
+ for bbox in bboxes:
+ # log.debug('is square: {}'.format(bbox.w == bbox.h))
+ nw,nh = int(bbox.w * im.shape[1]), int(bbox.h * im.shape[0])
+ roi = {
+ 'fn': fpp_im.stem,
+ 'ext': fpp_im.suffix.replace('.',''),
+ 'x': bbox.x,
+ 'y': bbox.y,
+ 'w': bbox.w,
+ 'h': bbox.h,
+ 'image_height': im.shape[0],
+ 'image_width': im.shape[1],
+ 'subdir': subdir}
+ bbox_dim = bbox.to_dim(im.shape[:2][::-1]) # w,h
+ data.append(roi)
+
+ # debug display
+ if opt_display and len(bboxes):
+ im_md = im_utils.resize(im, width=min(1200, opt_size[0]))
+ for bbox in bboxes:
+ bbox_dim = bbox.to_dim(im_md.shape[:2][::-1])
+ cv.rectangle(im_md, bbox_dim.pt_tl, bbox_dim.pt_br, (0,255,0), 3)
+ cv.imshow('', im_md)
+ while True:
+ k = cv.waitKey(1) & 0xFF
+ if k == 27 or k == ord('q'): # ESC
+ cv.destroyAllWindows()
+ sys.exit()
+ elif k != 255:
+ # any key to continue
+ break
+
+ # save date
+ file_utils.mkdirs(opt_fp_out)
+ df = pd.DataFrame.from_dict(data)
+ df.to_csv(opt_fp_out, index=False) \ No newline at end of file
diff --git a/megapixels/commands/cv/cluster.py b/megapixels/commands/cv/cluster.py
new file mode 100644
index 00000000..419091a0
--- /dev/null
+++ b/megapixels/commands/cv/cluster.py
@@ -0,0 +1,47 @@
+import click
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+from app.utils.logger_utils import Logger
+
+@click.command()
+@click.option('--data_store', 'opt_data_store',
+ type=cfg.DataStoreVar,
+ default=click_utils.get_default(types.DataStore.NAS),
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--dataset', 'opt_dataset',
+ type=cfg.DatasetVar,
+ required=True,
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--metadata', 'opt_metadata', required=True,
+ type=cfg.MetadataVar,
+ show_default=True,
+ help=click_utils.show_help(types.Metadata))
+@click.pass_context
+def cli(ctx, opt_data_store, opt_dataset, opt_metadata):
+ """Display image info"""
+
+ # cluster the embeddings
+ print("[INFO] clustering...")
+ clt = DBSCAN(metric="euclidean", n_jobs=args["jobs"])
+ clt.fit(encodings)
+
+ # determine the total number of unique faces found in the dataset
+ labelIDs = np.unique(clt.labels_)
+ numUniqueFaces = len(np.where(labelIDs > -1)[0])
+ print("[INFO] # unique faces: {}".format(numUniqueFaces))
+ # load and display image
+ im = cv.imread(fp_im)
+ cv.imshow('', im)
+
+ while True:
+ k = cv.waitKey(1) & 0xFF
+ if k == 27 or k == ord('q'): # ESC
+ cv.destroyAllWindows()
+ sys.exit()
+ elif k != 255:
+ # any key to continue
+ break \ No newline at end of file
diff --git a/megapixels/commands/cv/crop.py b/megapixels/commands/cv/crop.py
new file mode 100644
index 00000000..778be0c4
--- /dev/null
+++ b/megapixels/commands/cv/crop.py
@@ -0,0 +1,104 @@
+"""
+Crop images to prepare for training
+"""
+
+import click
+from PIL import Image, ImageOps, ImageFilter, ImageDraw
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+
+@click.command()
+@click.option('-i', '--input', 'opt_dir_in', required=True,
+ help='Input directory')
+@click.option('-o', '--output', 'opt_dir_out', required=True,
+ help='Output directory')
+@click.option('-e', '--ext', 'opt_ext',
+ default='jpg', type=click.Choice(['jpg', 'png']),
+ help='File glob ext')
+@click.option('--size', 'opt_size',
+ type=(int, int), default=(256, 256),
+ help='Output image size')
+@click.option('-t', '--crop-type', 'opt_crop_type',
+ default='center', type=click.Choice(['center', 'mirror', 'face', 'person', 'none']),
+ help='Force fit image center location')
+@click.pass_context
+def cli(ctx, opt_dir_in, opt_dir_out, opt_ext, opt_size, opt_crop_type):
+ """Crop, mirror images"""
+
+ import os
+ from os.path import join
+ from pathlib import Path
+ from glob import glob
+ from tqdm import tqdm
+
+
+ from app.utils import logger_utils, file_utils, im_utils
+
+ # -------------------------------------------------
+ # process here
+
+ log = logger_utils.Logger.getLogger()
+ log.info('crop images')
+
+ # get list of files to process
+ fp_ims = glob(join(opt_dir_in, '*.{}'.format(opt_ext)))
+ log.debug('files: {}'.format(len(fp_ims)))
+
+ # ensure output dir exists
+ file_utils.mkdirs(opt_dir_out)
+
+ for fp_im in tqdm(fp_ims):
+ im = process_crop(fp_im, opt_size, opt_crop_type)
+ fp_out = join(opt_dir_out, Path(fp_im).name)
+ im.save(fp_out)
+
+
+def process_crop(fp_im, opt_size, crop_type):
+ im = Image.open(fp_im)
+ if crop_type == 'center':
+ im = crop_square_fit(im, opt_size)
+ elif crop_type == 'mirror':
+ im = mirror_crop_square(im, opt_size)
+ return im
+
+def crop_square_fit(im, size, center=(0.5, 0.5)):
+ return ImageOps.fit(im, size, method=Image.BICUBIC, centering=center)
+
+def mirror_crop_square(im, size):
+ # force to even dims
+ if im.size[0] % 2 or im.size[1] % 2:
+ im = ImageOps.fit(im, ((im.size[0] // 2) * 2, (im.size[1] // 2) * 2))
+
+ # create new square image
+ min_size, max_size = (min(im.size), max(im.size))
+ orig_w, orig_h = im.size
+ margin = (max_size - min_size) // 2
+ w, h = (max_size, max_size)
+ im_new = Image.new('RGB', (w, h), color=(0, 0, 0))
+
+ #crop (l, t, r, b)
+ if orig_w > orig_h:
+ # landscape, mirror expand T/B
+ im_top = ImageOps.mirror(im.crop((0, 0, margin, w)))
+ im_bot = ImageOps.mirror(im.crop((orig_h - margin, 0, orig_h, w)))
+ im_new.paste(im_top, (0, 0))
+ im_new.paste(im, (margin, 0, orig_h + margin, w))
+ im_new.paste(im_bot, (h - margin, 0))
+ elif orig_h > orig_w:
+ # portrait, mirror expand L/R
+ im_left = ImageOps.mirror(im.crop((0, 0, margin, h)))
+ im_right = ImageOps.mirror(im.crop((orig_w - margin, 0, orig_w, h)))
+ im_new.paste(im_left, (0, 0))
+ im_new.paste(im, (margin, 0, orig_w + margin, h))
+ im_new.paste(im_right, (w - margin, 0))
+
+ return im_new.resize(size)
+
+
+def center_crop_face():
+ pass
+
+def center_crop_person():
+ pass \ No newline at end of file
diff --git a/megapixels/commands/cv/csv_to_faces.py b/megapixels/commands/cv/csv_to_faces.py
new file mode 100644
index 00000000..64c8b965
--- /dev/null
+++ b/megapixels/commands/cv/csv_to_faces.py
@@ -0,0 +1,105 @@
+"""
+Reads in CSV of ROIs and extracts facial regions with padding
+"""
+
+import click
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+
+@click.command()
+@click.option('-i', '--input', 'opt_fp_in', required=True,
+ help='Input CSV')
+@click.option('-m', '--media', 'opt_dir_media', required=True,
+ help='Input image/video directory')
+@click.option('-o', '--output', 'opt_dir_out', required=True,
+ help='Output directory for extracted ROI images')
+@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None),
+ help='Slice list of files')
+@click.option('--padding', 'opt_padding', default=0.25,
+ help='Facial padding as percentage of face width')
+@click.option('--ext', 'opt_ext_out', default='png', type=click.Choice(['jpg', 'png']),
+ help='Output image type')
+@click.option('--min', 'opt_min', default=(60, 60),
+ help='Minimum original face size')
+@click.pass_context
+def cli(ctx, opt_fp_in, opt_dir_media, opt_dir_out, opt_slice,
+ opt_padding, opt_ext_out, opt_min):
+ """Converts ROIs to images"""
+
+ import os
+ from os.path import join
+ from pathlib import Path
+ from glob import glob
+
+ from tqdm import tqdm
+ import numpy as np
+ from PIL import Image, ImageOps, ImageFilter, ImageDraw
+ import cv2 as cv
+ import pandas as pd
+
+ from app.utils import logger_utils, file_utils, im_utils
+ from app.models.bbox import BBox
+
+ # -------------------------------------------------
+ # process here
+ log = logger_utils.Logger.getLogger()
+
+ df_rois = pd.read_csv(opt_fp_in, dtype={'subdir': str, 'fn': str})
+ if opt_slice:
+ df_rois = df_rois[opt_slice[0]:opt_slice[1]]
+
+ log.info('Processing {:,} rows'.format(len(df_rois)))
+
+ file_utils.mkdirs(opt_dir_out)
+
+ df_rois_grouped = df_rois.groupby(['fn']) # group by fn/filename
+ groups = df_rois_grouped.groups
+ skipped = []
+
+ for group in tqdm(groups):
+ # get image
+ group_rows = df_rois_grouped.get_group(group)
+
+ row = group_rows.iloc[0]
+ fp_im = join(opt_dir_media, str(row['subdir']), '{fn}.{ext}'.format(**row)) # TODO change to ext
+ try:
+ im = Image.open(fp_im).convert('RGB')
+ im.verify()
+ except Exception as e:
+ log.warn('Could not open: {}'.format(fp_im))
+ log.error(e)
+ continue
+
+ for idx, roi in group_rows.iterrows():
+ # get bbox to im dimensions
+ xywh = [roi['x'], roi['y'], roi['w'] , roi['h']]
+ bbox = BBox.from_xywh(*xywh)
+ dim = im.size
+ bbox_dim = bbox.to_dim(dim)
+ # expand
+ opt_padding_px = int(opt_padding * bbox_dim.width)
+ bbox_dim_exp = bbox_dim.expand_dim(opt_padding_px, dim)
+ # crop
+ x1y2 = bbox_dim_exp.pt_tl + bbox_dim_exp.pt_br
+ im_crop = im.crop(box=x1y2)
+
+ # strip exif, create new image and paste data
+ im_crop_data = list(im_crop.getdata())
+ im_crop_no_exif = Image.new(im_crop.mode, im_crop.size)
+ im_crop_no_exif.putdata(im_crop_data)
+
+ # save
+ idx_zpad = file_utils.zpad(idx, zeros=3)
+ subdir = '' if roi['subdir'] == '.' else '{}_'.format(roi['subdir'])
+ subdir = subdir.replace('/', '_')
+ fp_im_out = join(opt_dir_out, '{}{}{}.{}'.format(subdir, roi['fn'], idx_zpad, opt_ext_out))
+ # threshold size and save
+ if im_crop_no_exif.size[0] < opt_min[0] or im_crop_no_exif.size[1] < opt_min[1]:
+ skipped.append(fp_im_out)
+ log.info('Face too small: {}, idx: {}'.format(fp_im, idx))
+ else:
+ im_crop_no_exif.save(fp_im_out)
+
+ log.info('Skipped {:,} images'.format(len(skipped)))
diff --git a/megapixels/commands/cv/csv_to_faces_mt.py b/megapixels/commands/cv/csv_to_faces_mt.py
new file mode 100644
index 00000000..64c8b965
--- /dev/null
+++ b/megapixels/commands/cv/csv_to_faces_mt.py
@@ -0,0 +1,105 @@
+"""
+Reads in CSV of ROIs and extracts facial regions with padding
+"""
+
+import click
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+
+@click.command()
+@click.option('-i', '--input', 'opt_fp_in', required=True,
+ help='Input CSV')
+@click.option('-m', '--media', 'opt_dir_media', required=True,
+ help='Input image/video directory')
+@click.option('-o', '--output', 'opt_dir_out', required=True,
+ help='Output directory for extracted ROI images')
+@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None),
+ help='Slice list of files')
+@click.option('--padding', 'opt_padding', default=0.25,
+ help='Facial padding as percentage of face width')
+@click.option('--ext', 'opt_ext_out', default='png', type=click.Choice(['jpg', 'png']),
+ help='Output image type')
+@click.option('--min', 'opt_min', default=(60, 60),
+ help='Minimum original face size')
+@click.pass_context
+def cli(ctx, opt_fp_in, opt_dir_media, opt_dir_out, opt_slice,
+ opt_padding, opt_ext_out, opt_min):
+ """Converts ROIs to images"""
+
+ import os
+ from os.path import join
+ from pathlib import Path
+ from glob import glob
+
+ from tqdm import tqdm
+ import numpy as np
+ from PIL import Image, ImageOps, ImageFilter, ImageDraw
+ import cv2 as cv
+ import pandas as pd
+
+ from app.utils import logger_utils, file_utils, im_utils
+ from app.models.bbox import BBox
+
+ # -------------------------------------------------
+ # process here
+ log = logger_utils.Logger.getLogger()
+
+ df_rois = pd.read_csv(opt_fp_in, dtype={'subdir': str, 'fn': str})
+ if opt_slice:
+ df_rois = df_rois[opt_slice[0]:opt_slice[1]]
+
+ log.info('Processing {:,} rows'.format(len(df_rois)))
+
+ file_utils.mkdirs(opt_dir_out)
+
+ df_rois_grouped = df_rois.groupby(['fn']) # group by fn/filename
+ groups = df_rois_grouped.groups
+ skipped = []
+
+ for group in tqdm(groups):
+ # get image
+ group_rows = df_rois_grouped.get_group(group)
+
+ row = group_rows.iloc[0]
+ fp_im = join(opt_dir_media, str(row['subdir']), '{fn}.{ext}'.format(**row)) # TODO change to ext
+ try:
+ im = Image.open(fp_im).convert('RGB')
+ im.verify()
+ except Exception as e:
+ log.warn('Could not open: {}'.format(fp_im))
+ log.error(e)
+ continue
+
+ for idx, roi in group_rows.iterrows():
+ # get bbox to im dimensions
+ xywh = [roi['x'], roi['y'], roi['w'] , roi['h']]
+ bbox = BBox.from_xywh(*xywh)
+ dim = im.size
+ bbox_dim = bbox.to_dim(dim)
+ # expand
+ opt_padding_px = int(opt_padding * bbox_dim.width)
+ bbox_dim_exp = bbox_dim.expand_dim(opt_padding_px, dim)
+ # crop
+ x1y2 = bbox_dim_exp.pt_tl + bbox_dim_exp.pt_br
+ im_crop = im.crop(box=x1y2)
+
+ # strip exif, create new image and paste data
+ im_crop_data = list(im_crop.getdata())
+ im_crop_no_exif = Image.new(im_crop.mode, im_crop.size)
+ im_crop_no_exif.putdata(im_crop_data)
+
+ # save
+ idx_zpad = file_utils.zpad(idx, zeros=3)
+ subdir = '' if roi['subdir'] == '.' else '{}_'.format(roi['subdir'])
+ subdir = subdir.replace('/', '_')
+ fp_im_out = join(opt_dir_out, '{}{}{}.{}'.format(subdir, roi['fn'], idx_zpad, opt_ext_out))
+ # threshold size and save
+ if im_crop_no_exif.size[0] < opt_min[0] or im_crop_no_exif.size[1] < opt_min[1]:
+ skipped.append(fp_im_out)
+ log.info('Face too small: {}, idx: {}'.format(fp_im, idx))
+ else:
+ im_crop_no_exif.save(fp_im_out)
+
+ log.info('Skipped {:,} images'.format(len(skipped)))
diff --git a/megapixels/commands/cv/face_frames.py b/megapixels/commands/cv/face_frames.py
new file mode 100644
index 00000000..76f23af1
--- /dev/null
+++ b/megapixels/commands/cv/face_frames.py
@@ -0,0 +1,82 @@
+from glob import glob
+import os
+from os.path import join
+from pathlib import Path
+
+import click
+
+
+
+
+@click.command()
+@click.option('-i', '--input', 'opt_fp_in', required=True,
+ help='Input directory to glob')
+@click.option('-o', '--output', 'opt_fp_out', required=True,
+ help='Output directory for face frames')
+@click.option('--size', 'opt_size',
+ type=(int, int), default=(300, 300),
+ help='Output image size')
+@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None),
+ help='Slice list of files')
+@click.pass_context
+def cli(ctx, opt_fp_in, opt_fp_out, opt_size, opt_slice):
+ """Split video to face frames"""
+
+ from tqdm import tqdm
+ import dlib
+ import pandas as pd
+ from PIL import Image, ImageOps, ImageFilter
+ import cv2 as cv
+ import numpy as np
+
+ from app.processors import face_detector
+ from app.utils import logger_utils, file_utils, im_utils
+ from app.settings import types
+ from app.utils import click_utils
+ from app.settings import app_cfg as cfg
+ from app.models.bbox import BBox
+
+ log = logger_utils.Logger.getLogger()
+
+ # -------------------------------------------------
+ # process
+
+ detector = face_detector.DetectorDLIBCNN()
+
+ # get file list
+ fp_videos = glob(join(opt_fp_in, '*.mp4'))
+ fp_videos += glob(join(opt_fp_in, '*.webm'))
+ fp_videos += glob(join(opt_fp_in, '*.mkv'))
+
+ min_distance_per = .025 # minimum distance percentage to save new face image
+ face_interval = 5
+ frame_interval_count = 0
+ frame_count = 0
+ bbox_prev = BBox(0,0,0,0)
+ file_utils.mkdirs(opt_fp_out)
+ dnn_size = opt_size
+ max_dim = max(dnn_size)
+ px_thresh = int(max_dim * min_distance_per)
+
+ for fp_video in tqdm(fp_videos):
+ # load video
+ video = cv.VideoCapture(fp_video)
+ # iterate through frames
+ while video.isOpened():
+ res, frame = video.read()
+ if not res:
+ break
+ # increment frames, save frame if interval has passed
+ frame_count += 1 # for naming
+ frame_interval_count += 1 # for interval
+ bboxes = detector.detect(frame, opt_size=dnn_size, opt_pyramids=0)
+ if len(bboxes) > 0 and frame_interval_count >= face_interval:
+ dim = frame.shape[:2][::-1]
+ d = bboxes[0].to_dim(dim).distance(bbox_prev)
+ if d > px_thresh:
+ # save frame
+ zfc = file_utils.zpad(frame_count)
+ fp_frame = join(opt_fp_out, '{}_{}.jpg'.format(Path(fp_video).stem, zfc))
+ cv.imwrite(fp_frame, frame)
+ frame_interval_count = 0
+ bbox_prev = bboxes[0]
diff --git a/megapixels/commands/cv/face_landmarks_3d.py b/megapixels/commands/cv/face_landmarks_3d.py
new file mode 100644
index 00000000..03ef8fc2
--- /dev/null
+++ b/megapixels/commands/cv/face_landmarks_3d.py
@@ -0,0 +1,96 @@
+"""
+
+"""
+
+import click
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+
+color_filters = {'color': 1, 'gray': 2, 'all': 3}
+
+@click.command()
+@click.option('-i', '--input', 'opt_dirs_in', required=True, multiple=True,
+ help='Input directory')
+@click.option('-o', '--output', 'opt_fp_out', required=True,
+ help='Output CSV')
+@click.option('-e', '--ext', 'opt_ext',
+ default='jpg', type=click.Choice(['jpg', 'png']),
+ help='File glob ext')
+@click.option('--size', 'opt_size',
+ type=(int, int), default=(300, 300),
+ help='Output image size')
+@click.option('-g', '--gpu', 'opt_gpu', default=0,
+ help='GPU index')
+@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None),
+ help='Slice list of files')
+@click.option('--recursive/--no-recursive', 'opt_recursive', is_flag=True, default=False,
+ help='Use glob recursion (slower)')
+@click.option('-f', '--force', 'opt_force', is_flag=True,
+ help='Force overwrite file')
+@click.pass_context
+def cli(ctx, opt_dirs_in, opt_fp_out, opt_ext, opt_size, opt_gpu, opt_slice,
+ opt_recursive, opt_force):
+ """Converts face imges to 3D landmarks"""
+
+ import sys
+ import os
+ from os.path import join
+ from pathlib import Path
+ from glob import glob
+
+ from tqdm import tqdm
+ import numpy as np
+ import dlib # must keep a local reference for dlib
+ import cv2 as cv
+ import pandas as pd
+ from face_alignment import FaceAlignment, LandmarksType
+ from skimage import io
+
+ from app.utils import logger_utils, file_utils
+ from app.processors import face_detector
+
+ # -------------------------------------------------
+ # init here
+
+
+ log = logger_utils.Logger.getLogger()
+
+ if not opt_force and Path(opt_fp_out).exists():
+ log.error('File exists. Use "-f / --force" to overwite')
+ return
+
+ device = 'cuda' if opt_gpu > -1 else 'cpu'
+ fa = FaceAlignment(LandmarksType._3D, flip_input=False, device=device)
+
+ # get list of files to process
+ fp_ims = []
+ for opt_dir_in in opt_dirs_in:
+ if opt_recursive:
+ fp_glob = join(opt_dir_in, '**/*.{}'.format(opt_ext))
+ fp_ims += glob(fp_glob, recursive=True)
+ else:
+ fp_glob = join(opt_dir_in, '*.{}'.format(opt_ext))
+ fp_ims += glob(fp_glob)
+ log.debug(fp_glob)
+
+
+ if opt_slice:
+ fp_ims = fp_ims[opt_slice[0]:opt_slice[1]]
+ log.debug('processing {:,} files'.format(len(fp_ims)))
+
+
+ data = {}
+
+ for fp_im in tqdm(fp_ims):
+ fpp_im = Path(fp_im)
+ im = io.imread(fp_im)
+ preds = fa.get_landmarks(im)
+ if preds and len(preds) > 0:
+ data[fpp_im.name] = preds[0].tolist()
+
+ # save date
+ file_utils.mkdirs(opt_fp_out)
+
+ file_utils.write_json(data, opt_fp_out, verbose=True) \ No newline at end of file
diff --git a/megapixels/commands/cv/face_pose.py b/megapixels/commands/cv/face_pose.py
new file mode 100644
index 00000000..c37d006f
--- /dev/null
+++ b/megapixels/commands/cv/face_pose.py
@@ -0,0 +1,140 @@
+"""
+Converts ROIs to pose: yaw, roll, pitch
+"""
+
+import click
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+
+@click.command()
+@click.option('-i', '--input', 'opt_fp_in', default=None,
+ help='Override enum input filename CSV')
+@click.option('-o', '--output', 'opt_fp_out', default=None,
+ help='Override enum output filename CSV')
+@click.option('-m', '--media', 'opt_dir_media', default=None,
+ help='Override enum media directory')
+@click.option('--data_store', 'opt_data_store',
+ type=cfg.DataStoreVar,
+ default=click_utils.get_default(types.DataStore.SSD),
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--dataset', 'opt_dataset',
+ type=cfg.DatasetVar,
+ required=True,
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--size', 'opt_size',
+ type=(int, int), default=(300, 300),
+ help='Output image size')
+@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None),
+ help='Slice list of files')
+@click.option('-f', '--force', 'opt_force', is_flag=True,
+ help='Force overwrite file')
+@click.option('-d', '--display', 'opt_display', is_flag=True,
+ help='Display image for debugging')
+@click.pass_context
+def cli(ctx, opt_fp_in, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset, opt_size,
+ opt_slice, opt_force, opt_display):
+ """Converts ROIs to pose: roll, yaw, pitch"""
+
+ import sys
+ import os
+ from os.path import join
+ from pathlib import Path
+ from glob import glob
+
+ from tqdm import tqdm
+ import numpy as np
+ import dlib # must keep a local reference for dlib
+ import cv2 as cv
+ import pandas as pd
+
+ from app.models.bbox import BBox
+ from app.utils import logger_utils, file_utils, im_utils
+ from app.processors.face_landmarks import LandmarksDLIB
+ from app.processors.face_pose import FacePoseDLIB
+ from app.models.data_store import DataStore
+
+ # -------------------------------------------------
+ # init here
+
+ log = logger_utils.Logger.getLogger()
+
+ # set data_store
+ data_store = DataStore(opt_data_store, opt_dataset)
+
+ # get filepath out
+ fp_out = data_store.metadata(types.Metadata.FACE_POSE) if opt_fp_out is None else opt_fp_out
+ if not opt_force and Path(fp_out).exists():
+ log.error('File exists. Use "-f / --force" to overwite')
+ return
+
+ # init face processors
+ face_pose = FacePoseDLIB()
+ face_landmarks = LandmarksDLIB()
+
+ # load filepath data
+ fp_record = data_store.metadata(types.Metadata.FILE_RECORD)
+ df_record = pd.read_csv(fp_record).set_index('index')
+ # load ROI data
+ fp_roi = data_store.metadata(types.Metadata.FACE_ROI)
+ df_roi = pd.read_csv(fp_roi).set_index('index')
+ # slice if you want
+ if opt_slice:
+ df_roi = df_roi[opt_slice[0]:opt_slice[1]]
+ # group by image index (speedup if multiple faces per image)
+ df_img_groups = df_roi.groupby('record_index')
+ log.debug('processing {:,} groups'.format(len(df_img_groups)))
+
+ # store poses and convert to DataFrame
+ poses = []
+
+ # iterate
+ for record_index, df_img_group in tqdm(df_img_groups):
+ # make fp
+ ds_record = df_record.iloc[record_index]
+ fp_im = data_store.face(ds_record.subdir, ds_record.fn, ds_record.ext)
+ im = cv.imread(fp_im)
+ # get bbox
+ x = df_img_group.x.values[0]
+ y = df_img_group.y.values[0]
+ w = df_img_group.w.values[0]
+ h = df_img_group.h.values[0]
+ dim = im.shape[:2][::-1]
+ bbox = BBox.from_xywh(x, y, w, h).to_dim(dim)
+ # get pose
+ landmarks = face_landmarks.landmarks(im, bbox)
+ pose_data = face_pose.pose(landmarks, dim, project_points=opt_display)
+ pose_degrees = pose_data['degrees'] # only keep the degrees data
+
+ # use the project point data if display flag set
+ if opt_display:
+ pts_im = pose_data['points_image']
+ pts_model = pose_data['points_model']
+ pt_nose = pose_data['point_nose']
+ dst = im.copy()
+ face_pose.draw_pose(dst, pts_im, pts_model, pt_nose)
+ face_pose.draw_degrees(dst, pose_degrees)
+ # display to cv window
+ cv.imshow('', dst)
+ while True:
+ k = cv.waitKey(1) & 0xFF
+ if k == 27 or k == ord('q'): # ESC
+ cv.destroyAllWindows()
+ sys.exit()
+ elif k != 255:
+ # any key to continue
+ break
+
+ # add image index and append to result CSV data
+ pose_degrees['record_index'] = record_index
+ poses.append(pose_degrees)
+
+
+ # save date
+ file_utils.mkdirs(fp_out)
+ df = pd.DataFrame.from_dict(poses)
+ df.index.name = 'index'
+ df.to_csv(fp_out) \ No newline at end of file
diff --git a/megapixels/commands/cv/face_roi.py b/megapixels/commands/cv/face_roi.py
new file mode 100644
index 00000000..a08566a8
--- /dev/null
+++ b/megapixels/commands/cv/face_roi.py
@@ -0,0 +1,171 @@
+"""
+Crop images to prepare for training
+"""
+
+import click
+# from PIL import Image, ImageOps, ImageFilter, ImageDraw
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+
+color_filters = {'color': 1, 'gray': 2, 'all': 3}
+
+@click.command()
+@click.option('-i', '--input', 'opt_fp_in', default=None,
+ help='Override enum input filename CSV')
+@click.option('-o', '--output', 'opt_fp_out', default=None,
+ help='Override enum output filename CSV')
+@click.option('-m', '--media', 'opt_dir_media', default=None,
+ help='Override enum media directory')
+@click.option('--data_store', 'opt_data_store',
+ type=cfg.DataStoreVar,
+ default=click_utils.get_default(types.DataStore.SSD),
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--dataset', 'opt_dataset',
+ type=cfg.DatasetVar,
+ required=True,
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--size', 'opt_size',
+ type=(int, int), default=(300, 300),
+ help='Output image size')
+@click.option('-t', '--detector-type', 'opt_detector_type',
+ type=cfg.FaceDetectNetVar,
+ default=click_utils.get_default(types.FaceDetectNet.DLIB_CNN),
+ help=click_utils.show_help(types.FaceDetectNet))
+@click.option('-g', '--gpu', 'opt_gpu', default=0,
+ help='GPU index')
+@click.option('--conf', 'opt_conf_thresh', default=0.85, type=click.FloatRange(0,1),
+ help='Confidence minimum threshold')
+@click.option('-p', '--pyramids', 'opt_pyramids', default=0, type=click.IntRange(0,4),
+ help='Number pyramids to upscale for DLIB detectors')
+@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None),
+ help='Slice list of files')
+@click.option('--display/--no-display', 'opt_display', is_flag=True, default=False,
+ help='Display detections to debug')
+@click.option('-f', '--force', 'opt_force', is_flag=True,
+ help='Force overwrite file')
+@click.option('--color', 'opt_color_filter',
+ type=click.Choice(color_filters.keys()), default='all',
+ help='Filter to keep color or grayscale images (color = keep color')
+@click.option('--largest/--all-faces', 'opt_largest', is_flag=True, default=True,
+ help='Only keep largest face')
+@click.pass_context
+def cli(ctx, opt_fp_in, opt_dir_media, opt_fp_out, opt_data_store, opt_dataset, opt_size, opt_detector_type,
+ opt_gpu, opt_conf_thresh, opt_pyramids, opt_slice, opt_display, opt_force, opt_color_filter,
+ opt_largest):
+ """Converts frames with faces to CSV of ROIs"""
+
+ import sys
+ import os
+ from os.path import join
+ from pathlib import Path
+ from glob import glob
+
+ from tqdm import tqdm
+ import numpy as np
+ import dlib # must keep a local reference for dlib
+ import cv2 as cv
+ import pandas as pd
+
+ from app.utils import logger_utils, file_utils, im_utils
+ from app.processors import face_detector
+ from app.models.data_store import DataStore
+
+ # -------------------------------------------------
+ # init here
+
+ log = logger_utils.Logger.getLogger()
+
+ # set data_store
+ data_store = DataStore(opt_data_store, opt_dataset)
+
+ # get filepath out
+ fp_out = data_store.metadata(types.Metadata.FACE_ROI) if opt_fp_out is None else opt_fp_out
+ if not opt_force and Path(fp_out).exists():
+ log.error('File exists. Use "-f / --force" to overwite')
+ return
+
+ # set detector
+ if opt_detector_type == types.FaceDetectNet.CVDNN:
+ detector = face_detector.DetectorCVDNN()
+ elif opt_detector_type == types.FaceDetectNet.DLIB_CNN:
+ detector = face_detector.DetectorDLIBCNN(opt_gpu)
+ elif opt_detector_type == types.FaceDetectNet.DLIB_HOG:
+ detector = face_detector.DetectorDLIBHOG()
+ elif opt_detector_type == types.FaceDetectNet.MTCNN:
+ detector = face_detector.DetectorMTCNN()
+ elif opt_detector_type == types.FaceDetectNet.HAAR:
+ log.error('{} not yet implemented'.format(opt_detector_type.name))
+ return
+
+
+ # get list of files to process
+ fp_in = data_store.metadata(types.Metadata.FILE_RECORD) if opt_fp_in is None else opt_fp_in
+ df_records = pd.read_csv(fp_in).set_index('index')
+ if opt_slice:
+ df_records = df_records[opt_slice[0]:opt_slice[1]]
+ log.debug('processing {:,} files'.format(len(df_records)))
+
+ # filter out grayscale
+ color_filter = color_filters[opt_color_filter]
+
+ data = []
+
+ for df_record in tqdm(df_records.itertuples(), total=len(df_records)):
+ fp_im = data_store.face(str(df_record.subdir), str(df_record.fn), str(df_record.ext))
+ im = cv.imread(fp_im)
+
+ # filter out color or grayscale iamges
+ if color_filter != color_filters['all']:
+ try:
+ is_gray = im_utils.is_grayscale(im)
+ if is_gray and color_filter != color_filters['gray']:
+ log.debug('Skipping grayscale image: {}'.format(fp_im))
+ continue
+ except Exception as e:
+ log.error('Could not check grayscale: {}'.format(fp_im))
+ continue
+
+ try:
+ bboxes = detector.detect(im, size=opt_size, pyramids=opt_pyramids, largest=opt_largest)
+ except Exception as e:
+ log.error('could not detect: {}'.format(fp_im))
+ log.error('{}'.format(e))
+ continue
+
+ for bbox in bboxes:
+ roi = {
+ 'record_index': int(df_record.Index),
+ 'x': bbox.x,
+ 'y': bbox.y,
+ 'w': bbox.w,
+ 'h': bbox.h,
+ 'image_width': im.shape[1],
+ 'image_height': im.shape[0]}
+ data.append(roi)
+
+ # debug display
+ if opt_display and len(bboxes):
+ im_md = im_utils.resize(im, width=min(1200, opt_size[0]))
+ for bbox in bboxes:
+ bbox_dim = bbox.to_dim(im_md.shape[:2][::-1])
+ log.debug(f'bbox: {bbox_dim}')
+ cv.rectangle(im_md, bbox_dim.pt_tl, bbox_dim.pt_br, (0,255,0), 3)
+ cv.imshow('', im_md)
+ while True:
+ k = cv.waitKey(1) & 0xFF
+ if k == 27 or k == ord('q'): # ESC
+ cv.destroyAllWindows()
+ sys.exit()
+ elif k != 255:
+ # any key to continue
+ break
+
+ # save date
+ file_utils.mkdirs(fp_out)
+ df = pd.DataFrame.from_dict(data)
+ df.index.name = 'index'
+ df.to_csv(fp_out) \ No newline at end of file
diff --git a/megapixels/commands/cv/face_vector.py b/megapixels/commands/cv/face_vector.py
new file mode 100644
index 00000000..7200d73b
--- /dev/null
+++ b/megapixels/commands/cv/face_vector.py
@@ -0,0 +1,125 @@
+"""
+Converts ROIs to face vector
+"""
+
+import click
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+
+@click.command()
+@click.option('-o', '--output', 'opt_fp_out', default=None,
+ help='Override enum output filename CSV')
+@click.option('-m', '--media', 'opt_dir_media', default=None,
+ help='Override enum media directory')
+@click.option('--data_store', 'opt_data_store',
+ type=cfg.DataStoreVar,
+ default=click_utils.get_default(types.DataStore.SSD),
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--dataset', 'opt_dataset',
+ type=cfg.DatasetVar,
+ required=True,
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--size', 'opt_size',
+ type=(int, int), default=(300, 300),
+ help='Output image size')
+@click.option('-j', '--jitters', 'opt_jitters', default=cfg.DLIB_FACEREC_JITTERS,
+ help='Number of jitters')
+@click.option('-p', '--padding', 'opt_padding', default=cfg.DLIB_FACEREC_PADDING,
+ help='Percentage padding')
+@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None),
+ help='Slice list of files')
+@click.option('-f', '--force', 'opt_force', is_flag=True,
+ help='Force overwrite file')
+@click.option('-g', '--gpu', 'opt_gpu', default=0,
+ help='GPU index')
+@click.pass_context
+def cli(ctx, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset, opt_size,
+ opt_slice, opt_force, opt_gpu, opt_jitters, opt_padding):
+ """Converts face ROIs to vectors"""
+
+ import sys
+ import os
+ from os.path import join
+ from pathlib import Path
+ from glob import glob
+
+ from tqdm import tqdm
+ import numpy as np
+ import dlib # must keep a local reference for dlib
+ import cv2 as cv
+ import pandas as pd
+
+ from app.models.bbox import BBox
+ from app.models.data_store import DataStore
+ from app.utils import logger_utils, file_utils, im_utils
+ from app.processors import face_recognition
+
+
+ # -------------------------------------------------
+ # init here
+
+ log = logger_utils.Logger.getLogger()
+ # set data_store
+ data_store = DataStore(opt_data_store, opt_dataset)
+
+ # get filepath out
+ fp_out = data_store.metadata(types.Metadata.FACE_VECTOR) if opt_fp_out is None else opt_fp_out
+ if not opt_force and Path(fp_out).exists():
+ log.error('File exists. Use "-f / --force" to overwite')
+ return
+
+ # init face processors
+ facerec = face_recognition.RecognitionDLIB()
+
+ # load data
+ fp_record = data_store.metadata(types.Metadata.FILE_RECORD)
+ df_record = pd.read_csv(fp_record).set_index('index')
+ fp_roi = data_store.metadata(types.Metadata.FACE_ROI)
+ df_roi = pd.read_csv(fp_roi).set_index('index')
+
+ if opt_slice:
+ df_roi = df_roi[opt_slice[0]:opt_slice[1]]
+
+ # -------------------------------------------------
+ # process here
+ df_img_groups = df_roi.groupby('record_index')
+ log.debug('processing {:,} groups'.format(len(df_img_groups)))
+
+ vecs = []
+
+ for image_index, df_img_group in tqdm(df_img_groups):
+ # make fp
+ roi_index = df_img_group.index.values[0]
+ # log.debug(f'roi_index: {roi_index}, image_index: {image_index}')
+ ds_file = df_record.loc[roi_index] # locate image meta
+ #ds_file = df_record.loc['index', image_index] # locate image meta
+
+ fp_im = data_store.face(str(ds_file.subdir), str(ds_file.fn), str(ds_file.ext))
+ im = cv.imread(fp_im)
+ # get bbox
+ x = df_img_group.x.values[0]
+ y = df_img_group.y.values[0]
+ w = df_img_group.w.values[0]
+ h = df_img_group.h.values[0]
+ imw = df_img_group.image_width.values[0]
+ imh = df_img_group.image_height.values[0]
+ dim = im.shape[:2][::-1]
+ # get face vector
+ dim = (imw, imh)
+ bbox_dim = BBox.from_xywh(x, y, w, h).to_dim(dim) # convert to int real dimensions
+ # compute vec
+ # padding=opt_padding not yet implemented in 19.16 but merged in master
+ vec = facerec.vec(im, bbox_dim, jitters=opt_jitters)
+ vec_str = ','.join([repr(x) for x in vec]) # convert to string for CSV
+ vecs.append( {'roi_index': roi_index, 'record_index': image_index, 'vec': vec_str})
+
+
+ # save date
+ df = pd.DataFrame.from_dict(vecs)
+ df.index.name = 'index'
+ file_utils.mkdirs(fp_out)
+ df.to_csv(fp_out) \ No newline at end of file
diff --git a/megapixels/commands/cv/mirror.py b/megapixels/commands/cv/mirror.py
new file mode 100644
index 00000000..9ca1cac7
--- /dev/null
+++ b/megapixels/commands/cv/mirror.py
@@ -0,0 +1,57 @@
+"""
+Crop images to prepare for training
+"""
+
+import click
+import cv2 as cv
+from PIL import Image, ImageOps, ImageFilter
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+
+
+@click.command()
+@click.option('-i', '--input', 'opt_dir_in', required=True,
+ help='Input directory')
+@click.option('-o', '--output', 'opt_dir_out', required=True,
+ help='Output directory')
+@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None),
+ help='Slice the input list')
+@click.pass_context
+def cli(ctx, opt_dir_in, opt_dir_out, opt_slice):
+ """Mirror augment image directory"""
+
+ import os
+ from os.path import join
+ from pathlib import Path
+ from glob import glob
+ from tqdm import tqdm
+
+ from app.utils import logger_utils, file_utils, im_utils
+
+ # -------------------------------------------------
+ # init
+
+ log = logger_utils.Logger.getLogger()
+
+ # -------------------------------------------------
+ # process here
+
+ # get list of files to process
+ fp_ims = glob(join(opt_dir_in, '*.jpg'))
+ fp_ims += glob(join(opt_dir_in, '*.png'))
+
+ if opt_slice:
+ fp_ims = fp_ims[opt_slice[0]:opt_slice[1]]
+ log.info('processing {:,} files'.format(len(fp_ims)))
+
+ # ensure output dir exists
+ file_utils.mkdirs(opt_dir_out)
+
+ # resize and save images
+ for fp_im in tqdm(fp_ims):
+ im = Image.open(fp_im)
+ fpp_im = Path(fp_im)
+ fp_out = join(opt_dir_out, '{}_mirror{}'.format(fpp_im.stem, fpp_im.suffix))
+ im.save(fp_out) \ No newline at end of file
diff --git a/megapixels/commands/cv/resize.py b/megapixels/commands/cv/resize.py
new file mode 100644
index 00000000..dcd621b3
--- /dev/null
+++ b/megapixels/commands/cv/resize.py
@@ -0,0 +1,149 @@
+"""
+Crop images to prepare for training
+"""
+
+import click
+import cv2 as cv
+from PIL import Image, ImageOps, ImageFilter
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+
+"""
+Filter Q-Down Q-Up Speed
+NEAREST ⭐⭐⭐⭐⭐
+BOX ⭐ ⭐⭐⭐⭐
+BILINEAR ⭐ ⭐ ⭐⭐⭐
+HAMMING ⭐⭐ ⭐⭐⭐
+BICUBIC ⭐⭐⭐ ⭐⭐⭐ ⭐⭐
+LANCZOS ⭐⭐⭐⭐ ⭐⭐⭐⭐ ⭐
+"""
+methods = {
+ 'lanczos': Image.LANCZOS,
+ 'bicubic': Image.BICUBIC,
+ 'hamming': Image.HAMMING,
+ 'bileaner': Image.BILINEAR,
+ 'box': Image.BOX,
+ 'nearest': Image.NEAREST
+ }
+centerings = {
+ 'tl': (0.0, 0.0),
+ 'tc': (0.5, 0.0),
+ 'tr': (0.0, 0.0),
+ 'lc': (0.0, 0.5),
+ 'cc': (0.5, 0.5),
+ 'rc': (1.0, 0.5),
+ 'bl': (0.0, 1.0),
+ 'bc': (1.0, 0.5),
+ 'br': (1.0, 1.0)
+}
+
+@click.command()
+@click.option('-i', '--input', 'opt_dir_in', required=True,
+ help='Input directory')
+@click.option('-o', '--output', 'opt_dir_out', required=True,
+ help='Output directory')
+@click.option('-e', '--ext', 'opt_glob_ext',
+ default='png', type=click.Choice(['jpg', 'png']),
+ help='File glob ext')
+@click.option('--size', 'opt_size',
+ type=(int, int), default=(256, 256),
+ help='Output image size (square)')
+@click.option('--method', 'opt_scale_method',
+ type=click.Choice(methods.keys()),
+ default='lanczos',
+ help='Scaling method to use')
+@click.option('--equalize', 'opt_equalize', is_flag=True,
+ help='Equalize historgram')
+@click.option('--sharpen', 'opt_sharpen', is_flag=True,
+ help='Unsharp mask')
+@click.option('--center', 'opt_center', default='cc', type=click.Choice(centerings.keys()),
+ help='Crop focal point')
+@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None),
+ help='Slice the input list')
+@click.option('-t', '--threads', 'opt_threads', default=8,
+ help='Number of threads')
+@click.pass_context
+def cli(ctx, opt_dir_in, opt_dir_out, opt_glob_ext, opt_size, opt_scale_method,
+ opt_equalize, opt_sharpen, opt_center, opt_slice, opt_threads):
+ """Crop, mirror images"""
+
+ import os
+ from os.path import join
+ from pathlib import Path
+ from glob import glob
+ from tqdm import tqdm
+ from multiprocessing.dummy import Pool as ThreadPool
+ from functools import partial
+
+ from app.utils import logger_utils, file_utils, im_utils
+
+ # -------------------------------------------------
+ # init
+
+ log = logger_utils.Logger.getLogger()
+
+
+ # -------------------------------------------------
+ # process here
+
+ def pool_resize(fp_im, opt_size, scale_method, centering):
+ # Threaded image resize function
+ try:
+ pbar.update(1)
+ try:
+ im = Image.open(fp_im).convert('RGB')
+ im.verify()
+ except Exception as e:
+ log.warn('Could not open: {}'.format(fp_im))
+ log.error(e)
+ return False
+
+ im = ImageOps.fit(im, opt_size, method=scale_method, centering=centering)
+
+ if opt_equalize:
+ im_np = im_utils.pil2np(im)
+ im_np_eq = eq_hist_yuv(im_np)
+ im_np = cv.addWeighted(im_np_eq, 0.35, im_np, 0.65, 0)
+ im = im_utils.np2pil(im_np)
+
+ if opt_sharpen:
+ im = im.filter(ImageFilter.UnsharpMask)
+
+ fp_out = join(opt_dir_out, Path(fp_im).name)
+ im.save(fp_out)
+ return True
+ except:
+ return False
+
+ centering = centerings[opt_center]
+ scale_method = methods[opt_scale_method]
+
+ # get list of files to process
+ fp_ims = glob(join(opt_dir_in, '*.{}'.format(opt_glob_ext)))
+ if opt_slice:
+ fp_ims = fp_ims[opt_slice[0]:opt_slice[1]]
+ log.info('processing {:,} files'.format(len(fp_ims)))
+
+
+ # ensure output dir exists
+ file_utils.mkdirs(opt_dir_out)
+
+ # setup multithreading
+ pbar = tqdm(total=len(fp_ims))
+ pool_resize = partial(pool_resize, opt_size=opt_size, scale_method=scale_method, centering=centering)
+ #result_list = pool.map(prod_x, data_list)
+ pool = ThreadPool(opt_threads)
+ with tqdm(total=len(fp_ims)) as pbar:
+ results = pool.map(pool_resize, fp_ims)
+ pbar.close()
+
+ log.info('Resized: {} / {} images'.format(results.count(True), len(fp_ims)))
+
+
+
+def eq_hist_yuv(im):
+ im_yuv = cv.cvtColor(im, cv.COLOR_BGR2YUV)
+ im_yuv[:,:,0] = cv.equalizeHist(im_yuv[:,:,0])
+ return cv.cvtColor(im_yuv, cv.COLOR_YUV2BGR)
diff --git a/megapixels/commands/cv/videos_to_frames.py b/megapixels/commands/cv/videos_to_frames.py
new file mode 100644
index 00000000..0b56c46a
--- /dev/null
+++ b/megapixels/commands/cv/videos_to_frames.py
@@ -0,0 +1,73 @@
+from glob import glob
+import os
+from os.path import join
+from pathlib import Path
+
+import click
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+from app.utils import logger_utils
+
+import dlib
+import pandas as pd
+from PIL import Image, ImageOps, ImageFilter
+from app.utils import file_utils, im_utils
+
+
+log = logger_utils.Logger.getLogger()
+
+@click.command()
+@click.option('-i', '--input', 'opt_fp_in', required=True,
+ help='Input directory')
+@click.option('-o', '--output', 'opt_fp_out', required=True,
+ help='Output directory')
+@click.option('--size', 'opt_size', default=(320, 240),
+ help='Inference size for face detection' )
+@click.option('--interval', 'opt_frame_interval', default=20,
+ help='Number of frames before saving next face')
+@click.pass_context
+def cli(ctx, opt_fp_in, opt_fp_out, opt_size, opt_frame_interval):
+ """Converts videos to frames with faces"""
+
+ # -------------------------------------------------
+ # process
+
+ from tqdm import tqdm
+ import cv2 as cv
+ from tqdm import tqdm
+ from app.processors import face_detector
+
+ detector = face_detector.DetectorDLIBCNN()
+
+ # get file list
+ fp_videos = glob(join(opt_fp_in, '*.mp4'))
+ fp_videos += glob(join(opt_fp_in, '*.webm'))
+ fp_videos += glob(join(opt_fp_in, '*.mkv'))
+
+ frame_interval_count = 0
+ frame_count = 0
+
+ file_utils.mkdirs(opt_fp_out)
+
+ for fp_video in tqdm(fp_videos):
+
+ video = cv.VideoCapture(fp_video)
+
+ while video.isOpened():
+ res, frame = video.read()
+ if not res:
+ break
+
+ frame_count += 1 # for naming
+ frame_interval_count += 1 # for interval
+
+ bboxes = detector.detect(frame, opt_size=opt_size, opt_pyramids=0)
+ if len(bboxes) > 0 and frame_interval_count >= opt_frame_interval:
+ # save frame
+ fname = file_utils.zpad(frame_count)
+ fp_frame = join(opt_fp_out, '{}_{}.jpg'.format(Path(fp_video).stem, fname))
+ cv.imwrite(fp_frame, frame)
+ frame_interval_count = 0
+
diff --git a/megapixels/commands/datasets/50people.py b/megapixels/commands/datasets/50people.py
new file mode 100644
index 00000000..fb35b2fe
--- /dev/null
+++ b/megapixels/commands/datasets/50people.py
@@ -0,0 +1,129 @@
+from glob import glob
+import os
+from os.path import join
+from pathlib import Path
+
+import click
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+from app.utils import logger_utils
+
+import dlib
+import pandas as pd
+from PIL import Image, ImageOps, ImageFilter
+from app.utils import file_utils, im_utils
+
+
+log = logger_utils.Logger.getLogger()
+
+@click.command()
+@click.option('-i', '--input', 'opt_fp_in', required=True,
+ help='Input directory')
+@click.option('-o', '--output', 'opt_fp_out',
+ help='Output directory')
+@click.option('--media', 'opt_dir_media',
+ help='Output directory')
+@click.option('--action', 'opt_action',
+ type=click.Choice(['download']),
+ default='info',
+ help='Command action')
+@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None),
+ help='Slice list of files')
+@click.pass_context
+def cli(ctx, opt_fp_in, opt_fp_out, opt_dir_media, opt_action, opt_slice):
+ """YTMU utils"""
+
+
+ from tqdm import tqdm
+
+ # -------------------------------------------------
+ # process
+
+ if opt_action == 'download':
+ # downloads video files with ytdl
+ handle_download(opt_fp_in, opt_fp_out, opt_slice)
+ elif opt_action == 'face_frames':
+ handle_face_frames(opt_fp_in, opt_fp_out, dir_media, opt_slice)
+
+
+
+
+
+def handle_face_frames(fp_in, dir_out, dir_videos):
+ if not dir_out or not dir_videos:
+ log.error('-o/--output and --videos required')
+ return
+
+ import cv2 as cv
+ from tqdm import tqdm
+ from app.processors import face_detector
+ detector = face_detector.DetectorDLIBCNN()
+
+ # get file list
+ fp_videos = glob(join(dir_videos, '*.mp4'))
+ fp_videos += glob(join(dir_videos, '*.webm'))
+ fp_videos += glob(join(dir_videos, '*.mkv'))
+
+ face_interval = 30
+ frame_interval_count = 0
+ frame_count = 0
+
+ file_utils.mkdirs(dir_out)
+
+ for fp_video in tqdm(fp_videos):
+ # log.debug('opening: {}'.format(fp_video))
+ video = cv.VideoCapture(fp_video)
+ while video.isOpened():
+ res, frame = video.read()
+ if not res:
+ break
+
+ frame_count += 1 # for naming
+ frame_interval_count += 1 # for interval
+ bboxes = detector.detect(frame, opt_size=(320, 240), opt_pyramids=0)
+ if len(bboxes) > 0 and frame_interval_count >= face_interval:
+ # save frame
+ fp_frame = join(dir_out, '{}_{}.jpg'.format(Path(fp_video).stem, file_utils.zpad(frame_count)))
+ cv.imwrite(fp_frame, frame)
+ frame_interval_count = 0
+
+
+def handle_download(fp_in, dir_out, opt_slice):
+ import youtube_dl
+ df = pd.read_csv(fp_in)
+ if opt_slice:
+ df = df[opt_slice[0]:opt_slice[1]]
+ df = df.fillna('')
+ fp_videos = glob(join(dir_out, '*.mp4'))
+ fp_videos += glob(join(dir_out, '*.webm'))
+ fp_videos += glob(join(dir_out, '*.mkv'))
+
+ ydl = youtube_dl.YoutubeDL({'outtmpl': join(dir_out, '') + '%(id)s.%(ext)s'})
+
+ for i, row in df.iterrows():
+ vid = str(row['youtube_id'])
+ if not vid:
+ vid = row['vimeo_id']
+ if vid:
+ vid = str(int(vid))
+ url = 'https://vimeo.com/{}'.format(vid)
+ else:
+ url = 'https://youtube.com/watch?v={}'.format(vid)
+ if not vid:
+ log.warn('no video id: {} for {}'.format(vid, row['city']))
+ continue
+
+ found = False
+ for fp_video in fp_videos:
+ if vid in fp_video:
+ #log.debug('skip: {}'.format(vid))
+ found = True
+
+ if not found:
+ try:
+ with ydl:
+ ydl.download([url])
+ except:
+ log.error('could not dl: {}'.format(vid))
diff --git a/megapixels/commands/datasets/feret.py b/megapixels/commands/datasets/feret.py
new file mode 100644
index 00000000..906b4e37
--- /dev/null
+++ b/megapixels/commands/datasets/feret.py
@@ -0,0 +1,139 @@
+import bz2
+import io
+
+import click
+from PIL import Image
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+from app.utils.logger_utils import Logger
+
+log = Logger.getLogger()
+
+pose_choices = {
+'fa':0, 'fb':0, 'hl':67.5, 'hr':-67.5, 'pl':90, 'pr':-90,
+'ql':22.5, 'qr':-22.5, 'ra':45, 'rb':15, 'rc':-15, 'rd':-45, 're':-75}
+
+poses_left = ['hl', 'ql', 'pl', 'ra', 'rb']
+poses_right = ['hr', 'qr', 'pr', 'rc', 're', 're']
+
+@click.command()
+@click.option('-i', '--input', 'opt_fp_in', required=True,
+ help='Input directory')
+@click.option('-o', '--output', 'opt_fp_out', required=True,
+ help='Output directory')
+@click.option('-a', '--angle', 'opt_angle', type=(float, float), default=(0,0),
+ help='Min/max face angles')
+@click.option('-t', '--threads', 'opt_threads', default=8,
+ help='Number of threads')
+@click.option('--flip', 'opt_flip', type=click.Choice(['r', 'l']),
+ help='Flip profile images to the R or L')
+@click.pass_context
+def cli(ctx, opt_fp_in, opt_fp_out, opt_angle, opt_threads, opt_flip):
+ """Extracts FERET images"""
+
+ from glob import glob
+ from os.path import join
+ from pathlib import Path
+ import time
+ from tqdm import tqdm
+ from multiprocessing.dummy import Pool as ThreadPool
+ from functools import partial
+
+ from PIL import ImageOps
+ from app.utils import file_utils
+
+ # filter angles
+ poses = [k for k, v in pose_choices.items() if \
+ abs(v) >= opt_angle[0] and abs(v) <= opt_angle[1]]
+
+ # glob images dir for all *ppm.bz2
+ fp_ims = []
+ for pose in poses:
+ log.info('globbing pose: {}'.format(pose))
+ fp_ims += glob(join(opt_fp_in, '**/*_{}.ppm.bz2').format(pose))
+ log.info('Processing: {:,} files'.format(len(fp_ims)))
+
+ # convert bz2 to png
+ def pool_func(fp_im, opt_fp_out, opt_flip):
+ try:
+ pbar.update(1)
+ im_pil = bz2_to_pil(fp_im)
+ fpp_im = Path(fp_im)
+ fp_out = join(opt_fp_out, '{}.png'.format(fpp_im.stem))
+ fp_out = fp_out.replace('.ppm','') # remove ppm
+ if opt_flip:
+ pose_code = fpp_im.stem.split('_')[-1][:2]
+ # log.debug('opt_flip: {}, found: {}'.format(opt_flip, pose_code))
+ if opt_flip == 'r' and pose_code in poses_right \
+ or opt_flip == 'l' and pose_code in poses_left:
+ im_pil = ImageOps.mirror(im_pil)
+ im_pil.save(fp_out)
+ return True
+ except Exception as e:
+ log.error('Error processing: {}, error: {}'.format(fp_im, e))
+ return False
+
+ # make output directory
+ file_utils.mkdirs(opt_fp_out)
+
+ # setup multithreading
+ pbar = tqdm(total=len(fp_ims))
+ pool_resize = partial(pool_func, opt_fp_out=opt_fp_out, opt_flip=opt_flip)
+ pool = ThreadPool(opt_threads)
+ with tqdm(total=len(fp_ims)) as pbar:
+ results = pool.map(pool_resize, fp_ims)
+ pbar.close()
+
+ # results
+ log.info('Converted: {} / {} images'.format(results.count(True), len(fp_ims)))
+
+
+# ------------------------------------------------------------------
+# local utils
+
+def bz2_to_pil(fp_src):
+ with open(fp_src, 'rb') as fp:
+ im_raw = bz2.decompress(fp.read())
+ im_pil = Image.open(io.BytesIO(im_raw))
+ return im_pil
+
+
+
+"""
+
+A breakdown of the images by pose is:
+ Pose Angle Images Subjects
+ fa 0 1364 994
+ fb 0 1358 993
+ hl +67.5 1267 917
+ hr -67.5 1320 953
+ pl +90 1312 960
+ pr -90 1363 994
+ ql +22.5 761 501
+ qr -22.5 761 501
+ ra +45 321 261
+ rb +15 321 261
+ rc -15 610 423
+ rd -45 290 236
+ re -75 290 236
+
+ There are 13 different poses. (The orientation "right" means
+facing the photographer's right.)
+ fa regular frontal image
+ fb alternative frontal image, taken shortly after the
+ corresponding fa image
+ pl profile left
+ hl half left - head turned about 67.5 degrees left
+ ql quarter left - head turned about 22.5 degrees left
+ pr profile right
+ hr half right - head turned about 67.5 degrees right
+ qr quarter right - head turned about 22.5 degrees right
+ ra random image - head turned about 45 degree left
+ rb random image - head turned about 15 degree left
+ rc random image - head turned about 15 degree right
+ rd random image - head turned about 45 degree right
+ re random image - head turned about 75 degree right
+
+""" \ No newline at end of file
diff --git a/megapixels/commands/datasets/filter_by_pose.py b/megapixels/commands/datasets/filter_by_pose.py
new file mode 100644
index 00000000..a588b18e
--- /dev/null
+++ b/megapixels/commands/datasets/filter_by_pose.py
@@ -0,0 +1,96 @@
+import click
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+from app.utils.logger_utils import Logger
+
+log = Logger.getLogger()
+
+@click.command()
+@click.option('-i', '--input', 'opt_fp_in', default=None,
+ help='Override enum input filename CSV')
+@click.option('-o', '--output', 'opt_fp_out', default=None,
+ help='Override enum output filename CSV')
+@click.option('--data_store', 'opt_data_store',
+ type=cfg.DataStoreVar,
+ default=click_utils.get_default(types.DataStore.SSD),
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--dataset', 'opt_dataset',
+ type=cfg.DatasetVar,
+ required=True,
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--yaw', 'opt_yaw', type=(float, float), default=cfg.POSE_MINMAX_YAW,
+ help='Yaw (min, max)')
+@click.option('--roll', 'opt_roll', type=(float, float), default=cfg.POSE_MINMAX_ROLL,
+ help='Roll (min, max)')
+@click.option('--pitch', 'opt_pitch', type=(float, float), default=cfg.POSE_MINMAX_PITCH,
+ help='Pitch (min, max)')
+@click.pass_context
+def cli(ctx, opt_fp_in, opt_fp_out, opt_data_store, opt_dataset, opt_yaw, opt_roll, opt_pitch):
+ """Filter out exaggerated poses"""
+
+ import sys
+ from os.path import join
+ from pathlib import Path
+ import shutil
+ from datetime import datetime
+
+ import pandas as pd
+ from tqdm import tqdm
+
+ from app.models.data_store import DataStore
+ from app.utils import file_utils
+
+ # create date store
+ data_store = DataStore(opt_data_store, opt_dataset)
+ # load pose
+ fp_pose = data_store.metadata(types.Metadata.FACE_POSE)
+ df_pose = pd.read_csv(fp_pose).set_index('index')
+ # load roi
+ fp_roi = data_store.metadata(types.Metadata.FACE_ROI)
+ df_roi = pd.read_csv(fp_roi).set_index('index')
+ # load filepath
+ fp_record = data_store.metadata(types.Metadata.FILE_RECORD)
+ df_record = pd.read_csv(fp_record).set_index('index')
+ # debug
+ log.info('Processing {:,} rows'.format(len(df_pose)))
+ n_rows = len(df_record)
+
+ # filter out extreme poses
+ invalid_indices = []
+ for ds_pose in tqdm(df_pose.itertuples(), total=len(df_pose)):
+ if ds_pose.yaw < opt_yaw[0] or ds_pose.yaw > opt_yaw[1] \
+ and ds_pose.roll < opt_roll[0] or ds_pose.roll > opt_roll[1] \
+ and ds_pose.pitch < opt_pitch[0] or ds_pose.pitch > opt_pitch[1]:
+ invalid_indices.append(ds_pose.Index) # unique file indexs
+
+ # filter out valid/invalid
+ log.info(f'indices 0-20: {invalid_indices[:20]}')
+ log.info(f'Removing {len(invalid_indices)} invalid indices...')
+ df_record = df_record.drop(df_record.index[invalid_indices])
+ df_roi = df_roi.drop(df_roi.index[invalid_indices])
+ df_pose = df_pose.drop(df_pose.index[invalid_indices])
+ log.info(f'Removed {n_rows - len(df_record)}')
+
+ # move file to make backup
+ dir_bkup = join(Path(fp_pose).parent, f'backup_{datetime.now():%Y%m%d_%M%S}')
+ file_utils.mkdirs(dir_bkup)
+ # move files to backup
+ shutil.move(fp_record, join(dir_bkup, Path(fp_record).name))
+ shutil.move(fp_roi, join(dir_bkup, Path(fp_roi).name))
+ shutil.move(fp_pose, join(dir_bkup, Path(fp_pose).name))
+ # resave file records
+ df_record = df_record.reset_index(drop=True)
+ df_record.index.name = 'index'
+ df_record.to_csv(fp_record)
+ # resave ROI
+ df_roi = df_roi.reset_index(drop=True)
+ df_roi.index.name = 'index'
+ df_roi.to_csv(fp_roi)
+ # resave pose
+ df_pose = df_pose.reset_index(drop=True)
+ df_pose.index.name = 'index'
+ df_pose.to_csv(fp_pose)
diff --git a/megapixels/commands/datasets/gen_filepath.py b/megapixels/commands/datasets/gen_filepath.py
new file mode 100644
index 00000000..5db405c0
--- /dev/null
+++ b/megapixels/commands/datasets/gen_filepath.py
@@ -0,0 +1,102 @@
+"""
+Begin with this file to process folder of images
+- Converts folders and subdirectories into CSV with file attributes split
+"""
+import click
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+from app.utils.logger_utils import Logger
+
+log = Logger.getLogger()
+
+@click.command()
+@click.option('-i', '--input', 'opt_fp_in',
+ help='Override enum input filename CSV')
+@click.option('-o', '--output', 'opt_fp_out',
+ help='Override enum output filename CSV')
+@click.option('--data_store', 'opt_data_store',
+ type=cfg.DataStoreVar,
+ default=click_utils.get_default(types.DataStore.NAS),
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--dataset', 'opt_dataset',
+ type=cfg.DatasetVar,
+ required=True,
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None),
+ help='Slice list of files')
+@click.option('--recursive/--no-recursive', 'opt_recursive', is_flag=True, default=False,
+ help='Use glob recursion (slower)')
+@click.option('-t', '--threads', 'opt_threads', default=4,
+ help='Number of threads')
+@click.option('-f', '--force', 'opt_force', is_flag=True,
+ help='Force overwrite file')
+@click.pass_context
+def cli(ctx, opt_fp_in, opt_fp_out, opt_data_store, opt_dataset, opt_slice,
+ opt_recursive, opt_threads, opt_force):
+ """Multithreading test"""
+
+ from glob import glob
+ from os.path import join
+ from pathlib import Path
+ import time
+ from multiprocessing.dummy import Pool as ThreadPool
+ import random
+
+ import pandas as pd
+ from tqdm import tqdm
+ from glob import glob
+
+ from app.models.data_store import DataStore
+ from app.utils import file_utils, im_utils
+
+ data_store = DataStore(opt_data_store, opt_dataset)
+ fp_out = opt_fp_out if opt_fp_out is not None else data_store.metadata(types.Metadata.FILEPATH)
+ if not opt_force and Path(fp_out).exists():
+ log.error('File exists. Use "-f / --force" to overwite')
+ return
+
+
+ # glob files
+ fp_in = opt_fp_in if opt_fp_in is not None else data_store.media_images_original()
+ fp_ims = []
+ log.info(f'Globbing {fp_in}')
+ for ext in ['jpg', 'png']:
+ if opt_recursive:
+ fp_glob = join(fp_in, '**/*.{}'.format(ext))
+ fp_ims += glob(fp_glob, recursive=True)
+ else:
+ fp_glob = join(fp_in, '*.{}'.format(ext))
+ fp_ims += glob(fp_glob)
+
+ if not fp_ims:
+ log.warn('No images. Try with "--recursive"')
+ return
+
+ if opt_slice:
+ fp_ims = fp_ims[opt_slice[0]:opt_slice[1]]
+
+ log.info('Found {:,} images'.format(len(fp_ims)))
+
+
+ # convert data to dict
+ data = []
+ for i, fp_im in enumerate(tqdm(fp_ims)):
+ fpp_im = Path(fp_im)
+ subdir = str(fpp_im.parent.relative_to(fp_in))
+ data.append( {
+ 'subdir': subdir,
+ 'fn': fpp_im.stem,
+ 'ext': fpp_im.suffix.replace('.','')
+ })
+
+ # save to CSV
+ file_utils.mkdirs(fp_out)
+ df_filepath = pd.DataFrame.from_dict(data)
+ df_filepath = df_filepath.sort_values(by=['subdir'], ascending=True)
+ df_filepath = df_filepath.reset_index()
+ df_filepath.index.name = 'index'
+ df_filepath.to_csv(fp_out) \ No newline at end of file
diff --git a/megapixels/commands/datasets/gen_uuid.py b/megapixels/commands/datasets/gen_uuid.py
new file mode 100644
index 00000000..d7e7b52c
--- /dev/null
+++ b/megapixels/commands/datasets/gen_uuid.py
@@ -0,0 +1,65 @@
+import click
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+from app.utils.logger_utils import Logger
+
+log = Logger.getLogger()
+
+@click.command()
+@click.option('-i', '--input', 'opt_fp_in', default=None,
+ help='Override enum input filename CSV')
+@click.option('-o', '--output', 'opt_fp_out', default=None,
+ help='Override enum output filename CSV')
+@click.option('--data_store', 'opt_data_store',
+ type=cfg.DataStoreVar,
+ default=click_utils.get_default(types.DataStore.NAS),
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--dataset', 'opt_dataset',
+ type=cfg.DatasetVar,
+ required=True,
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('-f', '--force', 'opt_force', is_flag=True,
+ help='Force overwrite file')
+@click.pass_context
+def cli(ctx, opt_fp_in, opt_fp_out, opt_data_store, opt_dataset, opt_force):
+ """Appends UUID to records CSV"""
+
+ from glob import glob
+ from os.path import join
+ from pathlib import Path
+ import base64
+ import uuid
+
+ from tqdm import tqdm
+ import pandas as pd
+
+ from app.models.data_store import DataStore
+
+
+ # set data_store
+ data_store = DataStore(opt_data_store, opt_dataset)
+ # get filepath out
+ fp_out = data_store.metadata(types.Metadata.UUID) if opt_fp_out is None else opt_fp_out
+ # exit if exists
+ if not opt_force and Path(fp_out).exists():
+ log.error('File exists. Use "-f / --force" to overwite')
+ return
+
+ # load sha256 records
+ fp_in = data_store.metadata(types.Metadata.SHA256) if opt_fp_in is None else opt_fp_in
+ log.info(f'Loading: {fp_in}')
+ df_records = pd.read_csv(fp_in).set_index('index')
+
+ df_uuids = df_records.copy()
+ df_uuids['uuid'] = [uuid.uuid4()] * len(df_uuids)
+
+ for df_record in tqdm(df_records.itertuples(), total=len(df_uuids)):
+ image_index = df_record.Index
+ df_uuids.at[image_index, 'uuid'] = uuid.uuid4()
+
+ df_uuids = df_uuids.drop(['sha256', 'identity_index'], axis=1)
+ df_uuids.to_csv(fp_out) \ No newline at end of file
diff --git a/megapixels/commands/datasets/identity_meta_lfw.py b/megapixels/commands/datasets/identity_meta_lfw.py
new file mode 100644
index 00000000..45386b23
--- /dev/null
+++ b/megapixels/commands/datasets/identity_meta_lfw.py
@@ -0,0 +1,93 @@
+'''
+add identity from description using subdir
+'''
+import click
+
+from app.settings import types
+from app.models.dataset import Dataset
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+from app.utils.logger_utils import Logger
+
+log = Logger.getLogger()
+
+@click.command()
+@click.option('-i', '--input', 'opt_fp_in', required=True,
+ help='Identity meta file')
+@click.option('-o', '--output', 'opt_fp_out', default=None,
+ help='Override enum output filename CSV')
+@click.option('--column', 'opt_identity_key', default='identity_key',
+ help='Match column')
+@click.option('--data_store', 'opt_data_store',
+ type=cfg.DataStoreVar,
+ default=click_utils.get_default(types.DataStore.SSD),
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('-f', '--force', 'opt_force', is_flag=True,
+ help='Force overwrite file')
+@click.pass_context
+def cli(ctx, opt_fp_in, opt_fp_out, opt_identity_key, opt_data_store, opt_force):
+ """Display image info"""
+
+ import sys
+ from glob import glob
+ from os.path import join
+ from pathlib import Path
+ import time
+
+ import pandas as pd
+ import cv2 as cv
+ from tqdm import tqdm
+
+ from app.utils import file_utils, im_utils
+ from app.models.data_store import DataStore
+
+ log = Logger.getLogger()
+
+ # output file
+ opt_dataset = types.Dataset.LFW
+ data_store = DataStore(opt_data_store, opt_dataset)
+ fp_out = data_store.metadata(types.Metadata.IDENTITY) if opt_fp_out is None else opt_fp_out
+ # exit if exists
+ log.debug(fp_out)
+ if not opt_force and Path(fp_out).exists():
+ log.error('File exists. Use "-f / --force" to overwite')
+ return
+
+ # init dataset
+ # load file records
+ fp_record = data_store.metadata(types.Metadata.FILE_RECORD)
+ df_record = pd.read_csv(fp_record).set_index('index')
+
+ # load identity meta
+ # this file is maybe prepared in a Jupyter notebook
+ # the "identity_key"
+ df_identity_meta = pd.read_csv(opt_fp_in).set_index('index')
+ # create a new file called 'identity.csv'
+ identities = []
+ # iterate records and get identity index where 'identity_key' matches
+ log.debug(type(df_record))
+ identity_indices = []
+ for record_idx, ds_record in tqdm(df_record.iterrows(), total=len(df_record)):
+ identity_value = ds_record[opt_identity_key]
+ identity_index = ds_record.identity_index
+ ds_identity_meta = df_identity_meta.loc[(df_identity_meta[opt_identity_key] == identity_value)]
+ if identity_index not in identity_indices:
+ identity_indices.append(identity_index)
+ identities.append({
+ 'description': ds_identity_meta.description.values[0],
+ 'name': ds_identity_meta.name.values[0],
+ 'images': ds_identity_meta.images.values[0],
+ 'gender': ds_identity_meta.gender.values[0],
+ })
+
+ # write to csv
+ df_identity = pd.DataFrame.from_dict(identities)
+ df_identity.index.name = 'index'
+ df_identity.to_csv(fp_out)
+ '''
+ index,name,name_orig,description,gender,images,image_index,identity_key
+ 0,A. J. Cook,AJ Cook,Canadian actress,f,1,0,AJ_Cook
+ '''
+
+
diff --git a/megapixels/commands/datasets/identity_meta_vgg_face2.py b/megapixels/commands/datasets/identity_meta_vgg_face2.py
new file mode 100644
index 00000000..85b6644d
--- /dev/null
+++ b/megapixels/commands/datasets/identity_meta_vgg_face2.py
@@ -0,0 +1,88 @@
+'''
+add identity from description using subdir
+'''
+import click
+
+from app.settings import types
+from app.models.dataset import Dataset
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+from app.utils.logger_utils import Logger
+
+log = Logger.getLogger()
+
+@click.command()
+@click.option('-i', '--input', 'opt_fp_in', required=True,
+ help='Identity meta file')
+@click.option('-o', '--output', 'opt_fp_out', default=None,
+ help='Override enum output filename CSV')
+@click.option('--data_store', 'opt_data_store',
+ type=cfg.DataStoreVar,
+ default=click_utils.get_default(types.DataStore.SSD),
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('-f', '--force', 'opt_force', is_flag=True,
+ help='Force overwrite file')
+@click.pass_context
+def cli(ctx, opt_fp_in, opt_fp_out, opt_data_store, opt_force):
+ """Display image info"""
+
+ import sys
+ from glob import glob
+ from os.path import join
+ from pathlib import Path
+ import time
+
+ import pandas as pd
+ import cv2 as cv
+ from tqdm import tqdm
+
+ from app.utils import file_utils, im_utils
+ from app.models.data_store import DataStore
+
+ log = Logger.getLogger()
+
+ # output file
+ opt_dataset = types.Dataset.VGG_FACE2
+ data_store = DataStore(opt_data_store, opt_dataset)
+ fp_out = data_store.metadata(types.Metadata.IDENTITY) if opt_fp_out is None else opt_fp_out
+ # exit if exists
+ log.debug(fp_out)
+ if not opt_force and Path(fp_out).exists():
+ log.error('File exists. Use "-f / --force" to overwite')
+ return
+
+ # init dataset
+ # load file records
+ identity_key = 'identity_key'
+ fp_record = data_store.metadata(types.Metadata.FILE_RECORD)
+ df_record = pd.read_csv(fp_record).set_index('index')
+
+ # load identity meta
+ # this file is maybe prepared in a Jupyter notebook
+ # the "identity_key"
+ df_identity_meta = pd.read_csv(opt_fp_in).set_index('index')
+ # create a new file called 'identity.csv'
+ identities = []
+ # iterate records and get identity index where 'identity_key' matches
+ log.debug(type(df_record))
+ identity_indices = []
+ for ds_record in tqdm(df_record.itertuples(), total=len(df_record)):
+ identity_value = ds_record.identity_key
+ identity_index = ds_record.identity_index
+ ds_identity_meta = df_identity_meta.loc[(df_identity_meta[identity_key] == identity_value)]
+ if identity_index not in identity_indices:
+ identity_indices.append(identity_index)
+ identities.append({
+ 'description': ds_identity_meta.description.values[0],
+ 'name': ds_identity_meta.name.values[0],
+ 'images': ds_identity_meta.images.values[0],
+ 'gender': ds_identity_meta.gender.values[0],
+ })
+
+ # write to csv
+ df_identity = pd.DataFrame.from_dict(identities)
+ df_identity.index.name = 'index'
+ df_identity.to_csv(fp_out)
+
+
diff --git a/megapixels/commands/datasets/lookup.py b/megapixels/commands/datasets/lookup.py
new file mode 100644
index 00000000..5ae4c3f5
--- /dev/null
+++ b/megapixels/commands/datasets/lookup.py
@@ -0,0 +1,63 @@
+import click
+
+from app.settings import types
+from app.models.dataset import Dataset
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+from app.utils.logger_utils import Logger
+
+log = Logger.getLogger()
+
+@click.command()
+@click.option('--index', 'opt_index', type=int, required=True,
+ help='File index to lookup')
+@click.option('--data_store', 'opt_data_store',
+ type=cfg.DataStoreVar,
+ default=click_utils.get_default(types.DataStore.SSD),
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--dataset', 'opt_dataset',
+ type=cfg.DatasetVar,
+ required=True,
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.pass_context
+def cli(ctx, opt_index, opt_data_store, opt_dataset):
+ """Display image info"""
+
+ import sys
+ from glob import glob
+ from os.path import join
+ from pathlib import Path
+ import time
+
+ import pandas as pd
+ import cv2 as cv
+ from tqdm import tqdm
+
+ from app.utils import file_utils, im_utils
+ from app.models.data_store import DataStore
+
+ log = Logger.getLogger()
+ # init dataset
+ dataset = Dataset(opt_data_store, opt_dataset)
+ #dataset.load_face_vectors()
+ dataset.load_records()
+ dataset.load_identities()
+ # set data store and load files
+ # get image record from file index
+ image_record = dataset.index_to_record(opt_index)
+ image_record.summarize()
+ # load image
+ im = cv.imread(image_record.filepath)
+ # display
+ cv.imshow('', im)
+ # cv gui
+ while True:
+ k = cv.waitKey(1) & 0xFF
+ if k == 27 or k == ord('q'): # ESC
+ cv.destroyAllWindows()
+ sys.exit()
+ elif k != 255:
+ # any key to continue
+ break \ No newline at end of file
diff --git a/megapixels/commands/datasets/megaface_flickr_api.py b/megapixels/commands/datasets/megaface_flickr_api.py
new file mode 100644
index 00000000..62232ab8
--- /dev/null
+++ b/megapixels/commands/datasets/megaface_flickr_api.py
@@ -0,0 +1,141 @@
+from glob import glob
+import os
+from os.path import join
+from pathlib import Path
+
+import click
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+from app.utils import logger_utils
+
+import dlib
+import pandas as pd
+from PIL import Image, ImageOps, ImageFilter
+from app.utils import file_utils, im_utils
+
+
+log = logger_utils.Logger.getLogger()
+
+@click.command()
+@click.option('-i', '--input', 'opt_fp_in', required=True,
+ help='Input directory')
+@click.option('-o', '--output', 'opt_fp_out',
+ help='Output directory')
+@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None),
+ help='Slice list of files')
+@click.option('-d', '--delay', 'opt_delay', default=None, type=int,
+ help='Delay between API calls to prevent rate-limiting')
+@click.option('--checkpoints', 'opt_checkpoints', is_flag=True,
+ help='Save checkpoints')
+@click.option('--api_key', 'opt_api_key', envvar='FLICKR_API_KEY')
+@click.option('--api_secret', 'opt_api_secret', envvar='FLICKR_API_SECRET')
+@click.option('--checkpoint_interval', 'opt_ckpt_interval', default=10000,
+ help='Save checkpoint interval')
+@click.pass_context
+def cli(ctx, opt_fp_in, opt_fp_out, opt_slice, opt_api_key, opt_api_secret,
+ opt_delay, opt_checkpoints, opt_ckpt_interval):
+ """Appends Flickr API info to CSV"""
+
+ from tqdm import tqdm
+ from glob import glob
+ import time
+ import flickr_api # pip install flickr_api
+ from flickr_api.flickrerrors import FlickrAPIError
+
+ # -------------------------------------------------
+ # process
+
+ if not opt_api_key or not opt_api_secret:
+ log.error('source .env vars for Flickr API and try again')
+ return
+
+ # init Flickr API
+ flickr_api.set_keys(api_key=opt_api_key, api_secret=opt_api_secret)
+
+ # reqd in CSV
+ df_ids = pd.read_csv(opt_fp_in)
+ if opt_slice:
+ df_ids = df_ids[opt_slice[0]:opt_slice[1]]
+
+ log.info('Processing: {:,} items'.format(len(df_ids)))
+
+ # iterate MegaFace IDs
+ identities = []
+
+ tqdm.pandas()
+
+ for idx, df_id in tqdm(df_ids.iterrows(), total=len(df_ids)):
+ # a = flickr_api.Person(id='123456789@N01')
+ df_id_dict = dict(df_id)
+
+ # append relevant data
+ try:
+ person = flickr_api.Person(id=df_id['nsid'])
+ info = person.getInfo()
+ df_id_dict.update( {
+ 'user_name': info.get('username', ''),
+ 'location': info.get('location', ''),
+ 'real_name': info.get('realname', ''),
+ 'time_zone': info.get('timezone', {}).get('timezone_id', ''),
+ 'time_first_photo': info.get('photos_info', {}).get('firstdatetaken'),
+ 'photos_count': info.get('photos_info', {}).get('count'),
+ 'description': info.get('description', ''),
+ 'id': info.get('id'),
+ 'path_alias': info.get('path_alias', ''),
+ 'is_pro': info.get('ispro', ''),
+ 'url_photos': info.get('photosurl', ''),
+ 'url_profile': info.get('photosurl', ''),
+ 'url_mobile': info.get('mobileurl', ''),
+ })
+ identities.append(df_id_dict)
+
+ except FlickrAPIError as e:
+ log.error(e)
+
+
+ if opt_checkpoints:
+ if (idx + 1) % opt_ckpt_interval == 0:
+ df = pd.DataFrame.from_dict(identities)
+ fpp_out = Path(opt_fp_out)
+ opt_fp_out_ckpt = join(fpp_out.parent, '{}_ckpt_{}.csv'.format(fpp_out.stem, file_utils.zpad(idx + 1)))
+ log.info('Saving checkpoint {:,} to {}'.format(idx + 1, opt_fp_out_ckpt))
+ df.to_csv(opt_fp_out_ckpt, index=False)
+
+ if opt_delay:
+ time.sleep(opt_delay)
+
+
+ df = pd.DataFrame.from_dict(identities)
+ df.to_csv(opt_fp_out, index=False)
+
+ log.info('Wrote: {:,} lines to {}'.format(len(df), opt_fp_out))
+
+
+"""
+Example API data:
+{'id': '7124086@N07',
+ 'nsid': '7124086@N07',
+ 'ispro': 1,
+ 'can_buy_pro': 0,
+ 'iconserver': '2325',
+ 'iconfarm': 3,
+ 'path_alias': 'shirleylin',
+ 'has_stats': '1',
+ 'pro_badge': 'standard',
+ 'expire': '0',
+ 'username': 'ShirleyLin',
+ 'realname': 'Shirley Lin',
+ 'location': 'Fremont, California, US',
+ 'timezone': {'label': 'Pacific Time (US & Canada); Tijuana',
+ 'offset': '-08:00',
+ 'timezone_id': 'PST8PDT'},
+ 'description': '',
+ 'photosurl': 'https://www.flickr.com/photos/shirleylin/',
+ 'profileurl': 'https://www.flickr.com/people/shirleylin/',
+ 'mobileurl': 'https://m.flickr.com/photostream.gne?id=7102756',
+ 'photos_info': {'firstdatetaken': '2004-05-24 12:12:15',
+ 'firstdate': '1172556588',
+ 'count': 9665}}
+""" \ No newline at end of file
diff --git a/megapixels/commands/datasets/megaface_names.py b/megapixels/commands/datasets/megaface_names.py
new file mode 100644
index 00000000..01e93e2d
--- /dev/null
+++ b/megapixels/commands/datasets/megaface_names.py
@@ -0,0 +1,65 @@
+from glob import glob
+import os
+from os.path import join
+from pathlib import Path
+
+import click
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+from app.utils import logger_utils
+
+import dlib
+import pandas as pd
+from PIL import Image, ImageOps, ImageFilter
+from app.utils import file_utils, im_utils
+
+
+log = logger_utils.Logger.getLogger()
+
+@click.command()
+@click.option('-i', '--input', 'opt_fp_in', required=True,
+ help='Input directory')
+@click.option('-o', '--output', 'opt_fp_out',
+ help='Output directory')
+@click.pass_context
+def cli(ctx, opt_fp_in, opt_fp_out):
+ """Creates CSV of NSIDs from MegaFace"""
+
+ from tqdm import tqdm
+ from glob import glob
+
+ # -------------------------------------------------
+ # process
+ fp_im_dirs = glob(join(opt_fp_in, '**/'), recursive=True)
+
+ log.info('Found {} directories'.format(len(fp_im_dirs)))
+
+ identities = {}
+
+ for fp_im_dir in tqdm(fp_im_dirs):
+ # 1234567@N05_identity_1
+ try:
+ dir_id_name = Path(fp_im_dir).name
+ nsid = dir_id_name.split('_')[0]
+ identity_num = dir_id_name.split('_')[2]
+ id_key = '{}_{}'.format(nsid, identity_num)
+ num_images = len(glob(join(fp_im_dir, '*.jpg')))
+ if not id_key in identities.keys():
+ identities[id_key] = {'nsid': nsid, 'identity': identity_num, 'images': num_images}
+ else:
+ identities[id_key]['images'] += num_images
+ except Exception as e:
+ continue
+
+ # convert to dict
+ identities_list = [v for k, v in identities.items()]
+ df = pd.DataFrame.from_dict(identities_list)
+
+ file_utils.mkdirs(opt_fp_out)
+
+ log.info('Wrote {} lines to {}'.format(len(df), opt_fp_out))
+ df.to_csv(opt_fp_out, index=False)
+
+
diff --git a/megapixels/commands/datasets/records.py b/megapixels/commands/datasets/records.py
new file mode 100644
index 00000000..b6ef618b
--- /dev/null
+++ b/megapixels/commands/datasets/records.py
@@ -0,0 +1,167 @@
+'''
+
+'''
+import click
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+from app.utils.logger_utils import Logger
+
+log = Logger.getLogger()
+
+identity_sources = ['subdir', 'subdir_head', 'subdir_tail']
+
+@click.command()
+@click.option('-i', '--input', 'opt_fp_in', default=None,
+ help='Override enum input filename CSV')
+@click.option('-o', '--output', 'opt_fp_out', default=None,
+ help='Override enum output filename CSV')
+@click.option('-m', '--media', 'opt_dir_media', default=None,
+ help='Override enum media directory')
+@click.option('--data_store', 'opt_data_store',
+ type=cfg.DataStoreVar,
+ default=click_utils.get_default(types.DataStore.SSD),
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--dataset', 'opt_dataset',
+ type=cfg.DatasetVar,
+ required=True,
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None),
+ help='Slice list of files')
+@click.option('-t', '--threads', 'opt_threads', default=12,
+ help='Number of threads')
+@click.option('-f', '--force', 'opt_force', is_flag=True,
+ help='Force overwrite file')
+@click.option('--identity', 'opt_identity', default=None, type=click.Choice(identity_sources),
+ help='Identity source, blank for no identity')
+@click.option('--recursive/--no-recursive', 'opt_recursive', is_flag=True, default=False,
+ help='Use glob recursion (slower)')
+@click.pass_context
+def cli(ctx, opt_fp_in, opt_fp_out, opt_dataset, opt_data_store, opt_dir_media, opt_slice, opt_threads,
+ opt_identity, opt_force, opt_recursive):
+ """Generates sha256, uuid, and identity index CSV file"""
+
+ import sys
+ from glob import glob
+ from os.path import join
+ from pathlib import Path
+ import time
+ from multiprocessing.dummy import Pool as ThreadPool
+ import random
+ import uuid
+
+ import pandas as pd
+ from tqdm import tqdm
+ from glob import glob
+
+ from app.models.data_store import DataStore
+ from app.utils import file_utils, im_utils
+
+
+ # set data_store
+ data_store = DataStore(opt_data_store, opt_dataset)
+ # get filepath out
+ fp_out = data_store.metadata(types.Metadata.FILE_RECORD) if opt_fp_out is None else opt_fp_out
+ # exit if exists
+ if not opt_force and Path(fp_out).exists():
+ log.error('File exists. Use "-f / --force" to overwite')
+ return
+
+ # ----------------------------------------------------------------
+ # glob files
+
+ fp_in = opt_fp_in if opt_fp_in is not None else data_store.media_images_original()
+ log.info(f'Globbing {fp_in}')
+ fp_ims = file_utils.glob_multi(fp_in, ['jpg', 'png'], recursive=opt_recursive)
+ # fail if none
+ if not fp_ims:
+ log.error('No images. Try with "--recursive"')
+ return
+ # slice to reduce
+ if opt_slice:
+ fp_ims = fp_ims[opt_slice[0]:opt_slice[1]]
+ log.info('Found {:,} images'.format(len(fp_ims)))
+
+
+ # ----------------------------------------------------------------
+ # multithread process into SHA256
+
+ pbar = tqdm(total=len(fp_ims))
+
+ def as_sha256(fp_im):
+ pbar.update(1)
+ return file_utils.sha256(fp_im)
+
+ # convert to thread pool
+ sha256s = [] # ?
+ pool = ThreadPool(opt_threads)
+ with tqdm(total=len(fp_ims)) as pbar:
+ sha256s = pool.map(as_sha256, fp_ims)
+ pbar.close()
+
+
+ # ----------------------------------------------------------------
+ # convert data to dict
+
+ data = []
+ indentity_count = 0
+ for sha256, fp_im in zip(sha256s, fp_ims):
+ fpp_im = Path(fp_im)
+ subdir = str(fpp_im.parent.relative_to(fp_in))
+
+
+ if opt_identity:
+ subdirs = subdir.split('/')
+ if not len(subdirs) > 0:
+ log.error(f'Could not split subdir: "{subdir}. Try different option for "--identity"')
+ log.error('exiting')
+ return
+ if opt_identity == 'subdir':
+ identity = subdirs[0] # use first/only part
+ elif opt_identity == 'subdir_head':
+ identity = subdirs[0] # use first part of subdir path
+ elif opt_identity == 'subdir_tail':
+ identity = subdirs[-1] # use last part of subdir path
+ else:
+ identity = indentity_count # use incrementing number
+ indentity_count += 1
+
+ data.append({
+ 'subdir': subdir,
+ 'fn': fpp_im.stem,
+ 'ext': fpp_im.suffix.replace('.',''),
+ 'sha256': sha256,
+ 'uuid': uuid.uuid4(),
+ 'identity_key': identity
+ })
+
+ df_records = pd.DataFrame.from_dict(data)
+ if opt_identity:
+ log.info(f'adding identity index using: "{opt_identity}". This may take a while...')
+ # convert dict to DataFrame
+ # sort based on identity_key
+ df_records = df_records.sort_values(by=['identity_key'], ascending=True)
+ # add new column for identity
+ df_records['identity_index'] = [-1] * len(df_records)
+ # populate the identity_index
+ df_records_identity_groups = df_records.groupby('identity_key')
+ # enumerate groups to create identity indices
+ for identity_index, df_records_identity_group_tuple in enumerate(df_records_identity_groups):
+ identity_key, df_records_identity_group = df_records_identity_group_tuple
+ for ds_record in df_records_identity_group.itertuples():
+ df_records.at[ds_record.Index, 'identity_index'] = identity_index
+ # reset index after being sorted
+ df_records = df_records.reset_index(drop=True)
+ else:
+ # name everyone person 1, 2, 3...
+ pass
+
+ df_records.index.name = 'index' # reassign 'index' as primary key column
+ # write to CSV
+ file_utils.mkdirs(fp_out)
+ df_records.to_csv(fp_out)
+ # done
+ log.info(f'wrote rows: {len(df_records)} to {fp_out}') \ No newline at end of file
diff --git a/megapixels/commands/datasets/s3_sync.py b/megapixels/commands/datasets/s3_sync.py
new file mode 100644
index 00000000..17940c6d
--- /dev/null
+++ b/megapixels/commands/datasets/s3_sync.py
@@ -0,0 +1,61 @@
+import click
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+
+s3_dirs = {'media': cfg.S3_MEDIA_URL, 'metadata': cfg.S3_METADATA_URL}
+
+@click.command()
+@click.option('--data_store', 'opt_data_store',
+ type=cfg.DataStoreVar,
+ default=click_utils.get_default(types.DataStore.SSD),
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--dataset', 'opt_dataset',
+ type=cfg.DatasetVar,
+ required=True,
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('-t', '--type', 'opt_type', type=click.Choice(s3_dirs.keys()), required=True,
+ help='S3 location')
+@click.option('--dry-run', 'opt_dryrun', is_flag=True, default=False)
+@click.pass_context
+def cli(ctx, opt_data_store, opt_dataset, opt_type, opt_dryrun):
+ """Syncs files with S3/spaces server"""
+
+ from os.path import join
+ from pathlib import Path
+
+ from tqdm import tqdm
+ import pandas as pd
+ import subprocess
+
+ from app.utils import logger_utils, file_utils
+ from app.models.data_store import DataStore
+
+ # -------------------------------------------------
+ # init here
+
+ log = logger_utils.Logger.getLogger()
+
+ # set data_store
+ data_store = DataStore(opt_data_store, opt_dataset)
+ dataset_name = opt_dataset.name.lower()
+ if opt_type == 'media':
+ dir_src = join(data_store.uuid_dir(), '')
+ dir_dst = join(s3_dirs[opt_type], dataset_name, '')
+ elif opt_type == 'metadata':
+ dir_src = join(data_store.metadata_dir(), '')
+ dir_dst = join(s3_dirs[opt_type], dataset_name, '')
+
+ cmd = ['s3cmd', 'sync', dir_src, dir_dst, '-P', '--follow-symlinks']
+ log.info(' '.join(cmd))
+ if not opt_dryrun:
+ subprocess.call(cmd)
+
+
+'''
+upload: '/data_store_ssd/datasets/people/vgg_face2/media/uuid/00418e0e-48e9-44f9-b6a0-b2ffd773802e.jpg' -> 's3://megapixels/v1/media/vgg_face2/00418e0e-48e9-44f9-b6a0-b2ffd773802e.jpg' [3202 of 3187313]
+[2953 of 3187313]
+''' \ No newline at end of file
diff --git a/megapixels/commands/datasets/symlink_uuid.py b/megapixels/commands/datasets/symlink_uuid.py
new file mode 100644
index 00000000..7c5faa95
--- /dev/null
+++ b/megapixels/commands/datasets/symlink_uuid.py
@@ -0,0 +1,57 @@
+import click
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+
+@click.command()
+@click.option('-i', '--input', 'opt_fp_in', default=None,
+ help='Override enum input filename CSV')
+@click.option('-o', '--output', 'opt_fp_out', default=None,
+ help='Override enum output filename CSV')
+@click.option('--data_store', 'opt_data_store',
+ type=cfg.DataStoreVar,
+ default=click_utils.get_default(types.DataStore.SSD),
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--dataset', 'opt_dataset',
+ type=cfg.DatasetVar,
+ required=True,
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.pass_context
+def cli(ctx, opt_fp_in, opt_fp_out, opt_data_store, opt_dataset):
+ """Symlinks images to new directory for S3"""
+
+ import sys
+ import os
+ from os.path import join
+ from pathlib import Path
+
+ from tqdm import tqdm
+ import pandas as pd
+
+ from app.utils import logger_utils, file_utils
+ from app.models.data_store import DataStore
+
+ # -------------------------------------------------
+ # init here
+
+ log = logger_utils.Logger.getLogger()
+
+ # set data_store
+ data_store = DataStore(opt_data_store, opt_dataset)
+ fp_records = data_store.metadata(types.Metadata.FILE_RECORD)
+ df_records = pd.read_csv(fp_records).set_index('index')
+ nrows = len(df_records)
+
+ dir_out = data_store.uuid_dir() if opt_fp_out is None else opt_fp_out
+ file_utils.mkdirs(dir_out)
+
+ for ds_record in tqdm(df_records.itertuples(), total=nrows):
+ # make image path
+ fp_src = data_store.face(ds_record.subdir, ds_record.fn, ds_record.ext)
+ fp_dst = data_store.face_uuid(ds_record.uuid, ds_record.ext)
+ Path(fp_dst).symlink_to(Path(fp_src))
+
+ log.info('symlinked {:,} files'.format(nrows)) \ No newline at end of file
diff --git a/megapixels/commands/datasets/vecs_to_id.py b/megapixels/commands/datasets/vecs_to_id.py
new file mode 100644
index 00000000..07c7389e
--- /dev/null
+++ b/megapixels/commands/datasets/vecs_to_id.py
@@ -0,0 +1,50 @@
+import click
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+
+@click.command()
+@click.option('-i', '--input', 'opt_fp_in', required=True,
+ help='Input directory')
+@click.option('-r', '--records', 'opt_fp_records', required=True,
+ help='Input directory')
+@click.option('-o', '--output', 'opt_fp_out', required=True,
+ help='Output JSON')
+@click.option('-f', '--force', 'opt_force', is_flag=True,
+ help='Force overwrite file')
+@click.pass_context
+def cli(ctx, opt_fp_in, opt_fp_records, opt_fp_out,opt_force):
+ """Merges ID with face vectors"""
+
+ import sys
+ import os
+ from os.path import join
+ from pathlib import Path
+
+ from tqdm import tqdm
+ import pandas as pd
+
+ from app.utils import logger_utils, file_utils
+
+ # -------------------------------------------------
+ # init here
+
+ log = logger_utils.Logger.getLogger()
+
+ df_vecs = pd.read_csv(opt_fp_in)
+ df_records = pd.read_csv(opt_fp_records)
+ nrows = len(df_vecs)
+
+ # face vecs
+ id_vecs = {}
+
+ for roi_idx, row in tqdm(df_vecs.iterrows(), total=nrows):
+ record_id = int(row['id'])
+ vec = row['vec'].split(',')
+ id_vecs[record_id] = vec
+
+ # save as JSON
+ file_utils.write_json(id_vecs, opt_fp_out, verbose=True)
+
+ \ No newline at end of file
diff --git a/megapixels/commands/datasets/vecs_to_uuid.py b/megapixels/commands/datasets/vecs_to_uuid.py
new file mode 100644
index 00000000..7bb82083
--- /dev/null
+++ b/megapixels/commands/datasets/vecs_to_uuid.py
@@ -0,0 +1,56 @@
+"""
+Crop images to prepare for training
+"""
+
+import click
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+
+@click.command()
+@click.option('-i', '--input', 'opt_fp_in', required=True,
+ help='Input directory')
+@click.option('-r', '--records', 'opt_fp_records', required=True,
+ help='Input directory')
+@click.option('-o', '--output', 'opt_fp_out', required=True,
+ help='Output JSON')
+@click.option('-f', '--force', 'opt_force', is_flag=True,
+ help='Force overwrite file')
+@click.pass_context
+def cli(ctx, opt_fp_in, opt_fp_records, opt_fp_out,opt_force):
+ """Merges UUID with face vectors"""
+
+ import sys
+ import os
+ from os.path import join
+ from pathlib import Path
+
+ from tqdm import tqdm
+ import pandas as pd
+
+ from app.utils import logger_utils, file_utils
+
+ # -------------------------------------------------
+ # init here
+
+ log = logger_utils.Logger.getLogger()
+
+ df_vecs = pd.read_csv(opt_fp_in)
+ df_records = pd.read_csv(opt_fp_records)
+ nrows = len(df_vecs)
+
+ # face vecs
+ uuid_vecs = {}
+
+ for roi_idx, row in tqdm(df_vecs.iterrows(), total=nrows):
+ # make image path
+ record_id = int(row['id'])
+ uuid = df_records.iloc[record_id]['uuid']
+ vec = row['vec'].split(',')
+ uuid_vecs[uuid] = vec
+
+ # save as JSON
+ file_utils.write_json(uuid_vecs, opt_fp_out)
+
+ \ No newline at end of file
diff --git a/megapixels/commands/datasets/ytmu.py b/megapixels/commands/datasets/ytmu.py
new file mode 100644
index 00000000..66680ed0
--- /dev/null
+++ b/megapixels/commands/datasets/ytmu.py
@@ -0,0 +1,205 @@
+from glob import glob
+import os
+from os.path import join
+from pathlib import Path
+
+import click
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+from app.utils import logger_utils
+
+import dlib
+import pandas as pd
+from PIL import Image, ImageOps, ImageFilter
+from app.utils import file_utils, im_utils
+
+
+log = logger_utils.Logger.getLogger()
+
+@click.command()
+@click.option('-i', '--input', 'opt_fp_in', required=True,
+ help='Input directory')
+@click.option('-o', '--output', 'opt_fp_out',
+ help='Output directory')
+@click.option('--videos', 'opt_dir_videos',
+ help='Output directory')
+@click.option('--action', 'opt_action',
+ type=click.Choice(['info', 'faces', 'rename', 'download', 'metadata', 'split_frames']),
+ default='info',
+ help='Command action')
+@click.pass_context
+def cli(ctx, opt_fp_in, opt_fp_out, opt_dir_videos, opt_action):
+ """YTMU utils"""
+
+
+ from tqdm import tqdm
+
+ # -------------------------------------------------
+ # process
+
+ if opt_action == 'metadata':
+ # downloads video metadata with ytdl
+ handle_metadata(opt_fp_in, opt_fp_out)
+ elif opt_action == 'download':
+ # downloads video files with ytdl
+ handle_download(opt_fp_in, opt_fp_out)
+ elif opt_action == 'info':
+ # converts original data file to clean CSV
+ handle_info()
+ elif opt_action == 'rename':
+ # rename the videos to video ID
+ handle_rename(opt_fp_in, opt_fp_out, opt_dir_videos)
+ elif opt_action == 'split_frames':
+ # rename the videos to video ID
+ handle_split_frames(opt_fp_in, opt_fp_out, opt_dir_videos)
+
+
+
+
+# ----------------------------------------------------
+# handlers
+
+def handle_split_frames(fp_in, dir_out, dir_videos):
+ if not dir_out or not dir_videos:
+ log.error('-o/--output and --videos required')
+ return
+ import cv2 as cv
+ from tqdm import tqdm
+ from app.processors import face_detector
+ detector = face_detector.DetectorDLIBCNN()
+
+ # get file list
+ fp_videos = glob(join(dir_videos, '*.mp4'))
+ fp_videos += glob(join(dir_videos, '*.webm'))
+ fp_videos += glob(join(dir_videos, '*.mkv'))
+ face_interval = 30
+ frame_interval_count = 0
+ frame_count = 0
+
+ file_utils.mkdirs(dir_out)
+
+ for fp_video in tqdm(fp_videos):
+ # log.debug('opening: {}'.format(fp_video))
+ video = cv.VideoCapture(fp_video)
+ while video.isOpened():
+ res, frame = video.read()
+ if not res:
+ break
+
+ frame_count += 1 # for naming
+ frame_interval_count += 1 # for interval
+ bboxes = detector.detect(frame, opt_size=(320, 240), opt_pyramids=0)
+ if len(bboxes) > 0 and frame_interval_count >= face_interval:
+ # save frame
+ fp_frame = join(dir_out, '{}_{}.jpg'.format(Path(fp_video).stem, file_utils.zpad(frame_count)))
+ cv.imwrite(fp_frame, frame)
+ frame_interval_count = 0
+
+
+def handle_metadata(fp_in, fp_out):
+
+ keys = ['description', 'average_rating', 'dislike_count', 'categories',
+ 'thumbnail', 'title', 'upload_date', 'uploader_url', 'uploader_id',
+ 'fps', 'height', 'width', 'like_count', 'license', 'tags']
+
+ import youtube_dl
+
+ ydl = youtube_dl.YoutubeDL({'outtmpl': '%(id)s%(ext)s'})
+
+ df = pd.read_csv(fp_in)
+ data_exp = []
+
+ for i, row in df.iterrows():
+ video_data = {'url': row['url'], 'id': row['id']}
+ try:
+ with ydl:
+ url = 'http://www.youtube.com/watch?v={}'.format(row['id'])
+ result = ydl.extract_info(url, download=False)
+ video = result['entries'][0] if 'entries' in result else result
+ for k in keys:
+ val = video[k]
+ if k == 'title':
+ log.debug(val)
+ if type(val) == list:
+ val = '; '.join(val)
+ if type(val) == str:
+ video_data[k] = str(val).replace(',',';')
+ # log.debug('video_data: {}'.format(video_data))
+ except Exception as e:
+ log.warn('video unavilable: {}'.format(row['url']))
+ log.error(e)
+ continue
+ data_exp.append(video_data)
+
+ df_exp = pd.DataFrame.from_dict(data_exp)
+ df_exp.to_csv(fp_out)
+
+
+def handle_download(fp_in, dir_out):
+ import youtube_dl
+ df = pd.read_csv(fp_in)
+ fp_videos = glob(join(dir_out, '*.mp4'))
+ fp_videos += glob(join(dir_out, '*.webm'))
+ fp_videos += glob(join(dir_out, '*.mkv'))
+
+ ydl = youtube_dl.YoutubeDL({'outtmpl': '%(id)s%(ext)s'})
+
+ for i, row in df.iterrows():
+ vid = row['id']
+ found = False
+ for fp_video in fp_videos:
+ if vid in fp_video:
+ log.debug('skip: {}'.format(vid))
+ found = True
+ if not found:
+ try:
+ with ydl:
+ ydl.download(['http://www.youtube.com/watch?v={}'.format(vid)])
+ except:
+ log.error('could not dl: {}'.format(vid))
+
+
+def handle_info(fp_in, fp_out):
+ if not fp_out:
+ log.error('--output required')
+ return
+ urls = file_utils.load_text(fp_in)
+ videos = []
+ for url in urls:
+ splits = url.split('v=')
+ try:
+ vid = splits[1]
+ vid = vid.split('&')[0]
+ videos.append({'url': url, 'id': vid})
+ except:
+ log.warn('no video id for {}'.format(url))
+ # convert to df
+ df = pd.DataFrame.from_dict(videos)
+ df.to_csv(opt_fp_out)
+
+
+def handle_rename(fp_in, fp_out, dir_videos):
+ import shutil
+
+ if not dir_videos:
+ log.error('--videos required')
+ return
+
+ fp_videos = glob(join(dir_videos, '*.mp4'))
+ fp_videos += glob(join(dir_videos, '*.webm'))
+ fp_videos += glob(join(dir_videos, '*.mkv'))
+
+ df = pd.read_csv(fp_in)
+
+ for i, row in df.iterrows():
+ vid = row['id']
+ fp_videos_copy = fp_videos.copy()
+ for fp_video in fp_videos:
+ if vid in fp_video:
+ dst = join(dir_videos, '{}{}'.format(vid, Path(fp_video).suffix))
+ shutil.move(fp_video, dst)
+ log.debug('move {} to {}'.format(fp_video, dst))
+ fp_videos.remove(fp_video)
+ break \ No newline at end of file
diff --git a/megapixels/commands/demo/face_analysis.py b/megapixels/commands/demo/face_analysis.py
new file mode 100644
index 00000000..6721a02d
--- /dev/null
+++ b/megapixels/commands/demo/face_analysis.py
@@ -0,0 +1,56 @@
+import click
+
+from app.settings import types
+from app.models.dataset import Dataset
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+from app.utils.logger_utils import Logger
+
+@click.command()
+@click.option('--data_store', 'opt_data_store',
+ type=cfg.DataStoreVar,
+ default=click_utils.get_default(types.DataStore.NAS),
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--dataset', 'opt_dataset',
+ type=cfg.DatasetVar,
+ required=True,
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.pass_context
+def cli(ctx, opt_index, opt_data_store, opt_dataset):
+ """Display image info"""
+
+ import sys
+ from glob import glob
+ from os.path import join
+ from pathlib import Path
+ import time
+
+ import pandas as pd
+ import cv2 as cv
+ from tqdm import tqdm
+
+ from app.utils import file_utils, im_utils, path_utils
+
+ log = Logger.getLogger()
+
+ dataset = Dataset(opt_dataset).load(opt_data_store)
+ # find image records
+ image_record = dataset.roi_idx_to_record(opt_index)
+ # debug
+ image_record.summarize()
+ # load image
+ fp_im = image_record.filepath
+ im = cv.imread(fp_im)
+ # display
+ cv.imshow('', im)
+ # cv gui
+ while True:
+ k = cv.waitKey(1) & 0xFF
+ if k == 27 or k == ord('q'): # ESC
+ cv.destroyAllWindows()
+ sys.exit()
+ elif k != 255:
+ # any key to continue
+ break \ No newline at end of file
diff --git a/megapixels/commands/demo/face_search.py b/megapixels/commands/demo/face_search.py
new file mode 100644
index 00000000..6e4bcdad
--- /dev/null
+++ b/megapixels/commands/demo/face_search.py
@@ -0,0 +1,100 @@
+import click
+
+from app.settings import types
+from app.models.dataset import Dataset
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+from app.utils.logger_utils import Logger
+
+log = Logger.getLogger()
+
+@click.command()
+@click.option('-i', '--input', 'opt_fp_in', required=True,
+ help='File to lookup')
+@click.option('--data_store', 'opt_data_store',
+ type=cfg.DataStoreVar,
+ default=click_utils.get_default(types.DataStore.SSD),
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--dataset', 'opt_dataset',
+ type=cfg.DatasetVar,
+ required=True,
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--results', 'opt_results', default=5,
+ help='Number of match results to display')
+@click.option('--gpu', 'opt_gpu', default=0,
+ help='GPU index (use -1 for CPU')
+@click.pass_context
+def cli(ctx, opt_fp_in, opt_data_store, opt_dataset, opt_gpu):
+ """Display image info"""
+
+ import sys
+ from glob import glob
+ from os.path import join
+ from pathlib import Path
+ import time
+
+ import pandas as pd
+ import cv2 as cv
+ from tqdm import tqdm
+ import imutils
+
+ from app.utils import file_utils, im_utils
+ from app.models.data_store import DataStore
+ from app.processors import face_detector
+ from app.processors import face_recognition
+
+ log = Logger.getLogger()
+ # init dataset
+ dataset = Dataset(opt_data_store, opt_dataset)
+ dataset.load_face_vectors()
+ dataset.load_records()
+ dataset.load_identities()
+
+ # init face detection
+ detector = face_detector.DetectorDLIBHOG()
+
+ # init face recognition
+ recognition = face_recognition.RecognitionDLIB(gpu=opt_gpu)
+
+ # load query image
+ im_query = cv.imread(opt_fp_in)
+
+ # get detection as BBox object
+ bboxes = detector.detect(im_query, largest=True)
+ bbox = bboxes[0]
+ dim = im_query.shape[:2][::-1]
+ bbox = bbox.to_dim(dim) # convert back to real dimensions
+
+ if not bbox:
+ log.error('No face detected. Exiting')
+ return
+
+ # extract the face vectors
+ vec_query = recognition.vec(im_query, bbox)
+
+ # find matches
+ image_records = dataset.find_matches(vec_query, n_results=opt_results)
+
+ # summary
+ ims_match = [im_query]
+ for image_record in image_records:
+ image_record.summarize()
+ log.info(f'{image_record.filepath}')
+ im_match = cv.imread(image_record.filepath)
+ ims_match.append(im_match)
+
+ montages = imutils.build_montages(ims_match, (256, 256), (3,2))
+
+ for i, montage in enumerate(montages):
+ cv.imshow(f'{i}', montage)
+ # cv gui
+ while True:
+ k = cv.waitKey(1) & 0xFF
+ if k == 27 or k == ord('q'): # ESC
+ cv.destroyAllWindows()
+ sys.exit()
+ elif k != 255:
+ # any key to continue
+ break
diff --git a/megapixels/commands/faiss/build_db.py b/megapixels/commands/faiss/build_db.py
new file mode 100644
index 00000000..0f979e41
--- /dev/null
+++ b/megapixels/commands/faiss/build_db.py
@@ -0,0 +1,15 @@
+"""
+Load all the CSV files into MySQL
+"""
+
+import click
+
+from app.models.sql_factory import load_sql_datasets
+
+@click.command()
+@click.pass_context
+def cli(ctx):
+ """import the various CSVs into MySQL
+ """
+ print('Loading CSV datasets into SQL...')
+ load_sql_datasets(replace=True)
diff --git a/megapixels/commands/faiss/build_faiss.py b/megapixels/commands/faiss/build_faiss.py
new file mode 100644
index 00000000..fc6b37ce
--- /dev/null
+++ b/megapixels/commands/faiss/build_faiss.py
@@ -0,0 +1,24 @@
+"""
+Index all of the FAISS datasets
+"""
+
+import os
+import glob
+import click
+import faiss
+import time
+import numpy as np
+
+from app.utils.file_utils import load_recipe, load_csv_safe
+from app.settings import app_cfg as cfg
+from app.processors.faiss import build_all_faiss_databases
+
+@click.command()
+@click.pass_context
+def cli(ctx):
+ """build the FAISS index.
+ - looks for all datasets in faiss/metadata/
+ - uses the recipe above by default
+ - however you can override this by adding a new recipe in faiss/recipes/{name}.json
+ """
+ build_all_faiss_databases()
diff --git a/megapixels/commands/faiss/sync_metadata.py b/megapixels/commands/faiss/sync_metadata.py
new file mode 100644
index 00000000..b01211b4
--- /dev/null
+++ b/megapixels/commands/faiss/sync_metadata.py
@@ -0,0 +1,18 @@
+"""
+Sync the FAISS metadata
+"""
+
+import subprocess
+import click
+
+from app.settings import app_cfg as cfg
+
+@click.command()
+@click.pass_context
+def cli(ctx):
+ """synchronize metadata files from s3"""
+ sts = subprocess.call([
+ "s3cmd", "sync",
+ "s3://megapixels/v1/metadata/",
+ cfg.DIR_FAISS_METADATA + '/',
+ ])
diff --git a/megapixels/commands/misc/compare_sres.py b/megapixels/commands/misc/compare_sres.py
new file mode 100644
index 00000000..b96570fe
--- /dev/null
+++ b/megapixels/commands/misc/compare_sres.py
@@ -0,0 +1,59 @@
+import click
+
+
+@click.command()
+@click.option('-i', '--orig', 'opt_dir_in_orig', required=True,
+ help='Input directory')
+@click.option('-n', '--new', 'opt_dir_in_new', required=True,
+ help='Input directory files to compare to')
+@click.pass_context
+def cli(ctx, opt_dir_in_orig, opt_dir_in_new):
+ """Compare quality of super resolution images"""
+
+ import os
+
+ import sys
+ from os.path import join
+ from pathlib import Path
+ from glob import glob
+
+ from random import randint
+ from PIL import Image, ImageOps, ImageFilter
+ from pathlib import Path
+ import cv2 as cv
+
+ from app.settings import types
+ from app.utils import click_utils
+ from app.settings import app_cfg as cfg
+ from app.utils import file_utils, im_utils, logger_utils
+
+ log = logger_utils.Logger.getLogger()
+
+ fp_ims = glob(join(opt_dir_in_new, '*.jpg'))
+ fp_ims += glob(join(opt_dir_in_new, '*.png'))
+
+ log.info('{}'.format(len(fp_ims)))
+
+ while True:
+ rn = randint(0, len(fp_ims) - 1)
+ fp_im_new = fp_ims[rn]
+ fp_im_orig = fp_im_new.replace(opt_dir_in_new, opt_dir_in_orig)
+ log.info('new: {}'.format(fp_im_new))
+ log.info('orig: {}'.format(fp_im_orig))
+
+ im_new = cv.imread(fp_im_new)
+ im_orig = cv.imread(fp_im_orig)
+
+ # show
+ cv.imshow('new', im_new)
+ cv.imshow('orig', im_orig)
+
+ # handle key io
+ k = cv.waitKey(0) & 0xFF
+ if k == 27 or k == ord('q'): # ESC
+ # exits the app
+ cv.destroyAllWindows()
+ sys.exit('Exiting because Q or ESC was pressed')
+ elif k == ord(' ') or k == 81 or k == 83:
+ continue
+
diff --git a/megapixels/commands/site/build.py b/megapixels/commands/site/build.py
new file mode 100644
index 00000000..2d344899
--- /dev/null
+++ b/megapixels/commands/site/build.py
@@ -0,0 +1,21 @@
+"""
+Build the static site
+"""
+
+import click
+
+from app.site.builder import build_site, build_file
+
+@click.command()
+@click.option('-i', '--input', 'input_file', required=False,
+ help='File to generate')
+@click.pass_context
+def cli(ctx, input_file):
+ """Build the static site
+ """
+ if input_file:
+ print('Building {}'.format(input_file))
+ build_file(input_file)
+ else:
+ print('Building the site...')
+ build_site()