From c3839ea797401d740db64691c0b4922c935b131c Mon Sep 17 00:00:00 2001 From: adamhrv Date: Sun, 16 Dec 2018 19:37:58 +0100 Subject: still sorting CSV vectors indexes --- megapixels/app/models/data_store.py | 55 ++++++++ megapixels/app/models/dataset.py | 41 +++--- megapixels/app/settings/app_cfg.py | 8 ++ megapixels/app/utils/path_utils.py | 52 -------- megapixels/cli_demo.py | 35 +++++ megapixels/commands/cv/files_to_rois.py | 156 ---------------------- megapixels/commands/cv/gen_face_vec.py | 123 ++++++++++++++++++ megapixels/commands/cv/gen_pose.py | 141 ++++++++++++++++++++ megapixels/commands/cv/gen_rois.py | 172 +++++++++++++++++++++++++ megapixels/commands/cv/rois_to_pose.py | 127 ------------------ megapixels/commands/cv/rois_to_vecs.py | 109 ---------------- megapixels/commands/datasets/add_uuid.py | 44 ------- megapixels/commands/datasets/file_meta.py | 84 ------------ megapixels/commands/datasets/filter_by_pose.py | 101 +++++++++++++++ megapixels/commands/datasets/filter_poses.py | 76 ----------- megapixels/commands/datasets/gen_filepath.py | 102 +++++++++++++++ megapixels/commands/datasets/gen_sha256.py | 152 ++++++++++++++++++++++ megapixels/commands/datasets/gen_uuid.py | 65 ++++++++++ megapixels/commands/datasets/lookup.py | 26 ++-- megapixels/commands/datasets/sha256.py | 89 ------------- megapixels/commands/demo/face_analysis.py | 56 ++++++++ megapixels/commands/demo/face_search.py | 95 ++++++++++++++ 22 files changed, 1141 insertions(+), 768 deletions(-) create mode 100644 megapixels/app/models/data_store.py delete mode 100644 megapixels/app/utils/path_utils.py create mode 100644 megapixels/cli_demo.py delete mode 100644 megapixels/commands/cv/files_to_rois.py create mode 100644 megapixels/commands/cv/gen_face_vec.py create mode 100644 megapixels/commands/cv/gen_pose.py create mode 100644 megapixels/commands/cv/gen_rois.py delete mode 100644 megapixels/commands/cv/rois_to_pose.py delete mode 100644 megapixels/commands/cv/rois_to_vecs.py delete mode 100644 megapixels/commands/datasets/add_uuid.py delete mode 100644 megapixels/commands/datasets/file_meta.py create mode 100644 megapixels/commands/datasets/filter_by_pose.py delete mode 100644 megapixels/commands/datasets/filter_poses.py create mode 100644 megapixels/commands/datasets/gen_filepath.py create mode 100644 megapixels/commands/datasets/gen_sha256.py create mode 100644 megapixels/commands/datasets/gen_uuid.py delete mode 100644 megapixels/commands/datasets/sha256.py create mode 100644 megapixels/commands/demo/face_analysis.py create mode 100644 megapixels/commands/demo/face_search.py diff --git a/megapixels/app/models/data_store.py b/megapixels/app/models/data_store.py new file mode 100644 index 00000000..8ec1f8ba --- /dev/null +++ b/megapixels/app/models/data_store.py @@ -0,0 +1,55 @@ +import os +from os.path import join +import logging + +from app.settings import app_cfg as cfg +from app.settings import types + + +# ------------------------------------------------------------------------- +# Metadata and media files +# ------------------------------------------------------------------------- + +class DataStore: + # local data store + def __init__(self, opt_data_store, opt_dataset): + self.data_store = join(f'/data_store_{opt_data_store.name.lower()}') + self.dir_dataset = join(self.data_store, 'datasets', cfg.DIR_PEOPLE, opt_dataset.name.lower()) + self.dir_media = join(self.dir_dataset, 'media') + self.dir_metadata = join(self.dir_dataset, 'metadata') + + def metadata(self, enum_type): + return join(self.dir_metadata, f'{enum_type.name.lower()}.csv') + + def media_images_original(self): + return join(self.dir_media, 'original') + + def face_image(self, subdir, fn, ext): + return join(self.dir_media, 'original', subdir, f'{fn}.{ext}') + + def face_image_crop(self, subdir, fn, ext): + return join(self.dir_media, 'cropped', subdir, f'{fn}.{ext}') + + +class DataStoreS3: + # S3 server + def __init__(self, opt_dataset): + self._dir_media = join(cfg.S3_HTTP_MEDIA_URL, opt_dataset.name.lower()) + self._dir_metadata = join(cfg.S3_HTTP_METADATA_URL, opt_dataset.name.lower()) + + def metadata(self, opt_metadata_type, ext='csv'): + return join(self._dir_metadata, f'{opt_metadata_type.name.lower()}.{ext}') + + def face_image(self, opt_uuid, ext='jpg'): + #return join(self._dir_media, 'original', f'{opt_uuid}.{ext}') + return join(self._dir_media, f'{opt_uuid}.{ext}') + + def face_image_crop(self, opt_uuid, ext='jpg'): + # not currently using? + return join(self._dir_media, 'cropped', f'{opt_uuid}.{ext}') + + + +# ------------------------------------------------------------------------- +# Models +# ------------------------------------------------------------------------- \ No newline at end of file diff --git a/megapixels/app/models/dataset.py b/megapixels/app/models/dataset.py index 11d568a5..8fef8a7e 100644 --- a/megapixels/app/models/dataset.py +++ b/megapixels/app/models/dataset.py @@ -2,6 +2,7 @@ Dataset model: container for all CSVs about a dataset """ import os +import sys from os.path import join from pathlib import Path import logging @@ -12,7 +13,8 @@ import numpy as np from app.settings import app_cfg as cfg from app.settings import types from app.models.bbox import BBox -from app.utils import file_utils, im_utils, path_utils +from app.utils import file_utils, im_utils +from app.models.data_store import DataStore, DataStoreS3 from app.utils.logger_utils import Logger # ------------------------------------------------------------------------- @@ -21,17 +23,19 @@ from app.utils.logger_utils import Logger class Dataset: - def __init__(self, opt_dataset_type, opt_data_store=types.DataStore.NAS): + def __init__(self, opt_data_store, opt_dataset_type, load_files=True): self._dataset_type = opt_dataset_type # enum type self.log = Logger.getLogger() self._metadata = {} self._face_vectors = [] self._nullframe = pd.DataFrame() # empty placeholder - self.data_store = path_utils.DataStore(opt_data_store, self._dataset_type) - self.data_store_s3 = path_utils.DataStoreS3(self._dataset_type) + self.data_store = DataStore(opt_data_store, self._dataset_type) + self.data_store_s3 = DataStoreS3(self._dataset_type) + self.load_metadata() - def load(self, opt_data_store): + def load_metadata(self): '''Loads all CSV files into (dict) of DataFrames''' + self.log.info(f'creating dataset: {self._dataset_type}...') for metadata_type in types.Metadata: self.log.info(f'load metadata: {metadata_type}') fp_csv = self.data_store.metadata(metadata_type) @@ -40,11 +44,12 @@ class Dataset: self._metadata[metadata_type] = pd.read_csv(fp_csv).set_index('index') if metadata_type == types.Metadata.FACE_VECTOR: # convert DataFrame to list of floats - self._face_vecs = self.df_to_vec_list(self._metadata[metadata_type]) + self._face_vectors = self.df_to_vec_list(self._metadata[metadata_type]) + self.log.info(f'build face vector dict: {len(self._face_vectors)}') self._metadata[metadata_type].drop('vec', axis=1, inplace=True) else: - self.log.error('File not found: {fp_csv}. Replaced with empty DataFrame') - self._metadata[metadata_type] = self._nullframe + self.log.error(f'File not found: {fp_csv}. Exiting.') + sys.exit() self.log.info('finished loading') def metadata(self, opt_metadata_type): @@ -80,7 +85,7 @@ class Dataset: image_record = ImageRecord(image_index, sha256, uuid, bbox, fp_im, fp_url) # now get the identity index (if available) identity_index = ds_sha256.identity_index - if identity_index: + if identity_index > -1: # then use the identity index to get the identity meta df_identity = df_filepath = self._metadata[types.Metadata.IDENTITY] ds_identity = df_identity.iloc[identity_index] @@ -95,18 +100,24 @@ class Dataset: identity = Identity(identity_index, name=name, desc=desc, gender=gender, n_images=n_images, url=url, age=age, nationality=nationality) image_record.identity = identity + else: + self.log.info(f'no identity index: {ds_sha256}') return image_record - def matches(self, query_vec, n_results=5, threshold=0.5): + def find_matches(self, query_vec, n_results=5, threshold=0.6): image_records = [] # list of image matches w/identity if available # find most similar feature vectors indexes - match_idxs = self.similar(query_vec, n_results, threshold) + #match_idxs = self.similar(query_vec, n_results, threshold) + sim_scores = np.linalg.norm(np.array([query_vec]) - np.array(self._face_vectors), axis=1) + match_idxs = np.argpartition(sim_scores, n_results)[:n_results] + for match_idx in match_idxs: # get the corresponding face vector row + self.log.debug(f'find match index: {match_idx}') image_record = self.roi_idx_to_record(match_idx) - results.append(image_record) + image_records.append(image_record) return image_records # ---------------------------------------------------------------------- @@ -114,8 +125,7 @@ class Dataset: def df_to_vec_list(self, df): # convert the DataFrame CSV to float list of vecs - vecs = [list(map(float,x.vec.split(','))) for x in df.itertuples()] - return vecs + return [list(map(float,x.vec.split(','))) for x in df.itertuples()] def similar(self, query_vec, n_results): '''Finds most similar N indices of query face vector @@ -124,8 +134,7 @@ class Dataset: :returns (list) of (int) indices ''' # uses np.linalg based on the ageitgey/face_recognition code - vecs_sim_scores = np.linalg.norm(np.array([query_vec]) - np.array(self._face_vectors), axis=1) - top_idxs = np.argpartition(vecs_sim_scores, n_results)[:n_results] + return top_idxs diff --git a/megapixels/app/settings/app_cfg.py b/megapixels/app/settings/app_cfg.py index 50eaf576..7f9ed187 100644 --- a/megapixels/app/settings/app_cfg.py +++ b/megapixels/app/settings/app_cfg.py @@ -75,6 +75,7 @@ DIR_COMMANDS_DATASETS = 'commands/datasets' DIR_COMMANDS_FAISS = 'commands/faiss' DIR_COMMANDS_MISC = 'commands/misc' DIR_COMMANDS_SITE = 'commands/site' +DIR_COMMANDS_DEMO = 'commands/demo' # ----------------------------------------------------------------------------- # Filesystem settings @@ -89,6 +90,13 @@ HASH_BRANCH_SIZE = 3 DLIB_FACEREC_JITTERS = 5 # number of face recognition jitters DLIB_FACEREC_PADDING = 0.25 # default dlib +POSE_MINMAX_YAW = (-25,25) +POSE_MINMAX_ROLL = (-15,15) +POSE_MINMAX_PITCH = (-10,10) + +POSE_MINMAX_YAW = (-40,40) +POSE_MINMAX_ROLL = (-35,35) +POSE_MINMAX_PITCH = (-25,25) # ----------------------------------------------------------------------------- # Logging options exposed for custom click Params # ----------------------------------------------------------------------------- diff --git a/megapixels/app/utils/path_utils.py b/megapixels/app/utils/path_utils.py deleted file mode 100644 index b0262ea0..00000000 --- a/megapixels/app/utils/path_utils.py +++ /dev/null @@ -1,52 +0,0 @@ -import os -from os.path import join -import logging - -from app.settings import app_cfg as cfg -from app.settings import types - - -# ------------------------------------------------------------------------- -# Metadata and media files -# ------------------------------------------------------------------------- - -class DataStore: - # local data store - def __init__(self, opt_data_store, opt_dataset): - self.data_store = join(f'/data_store_{opt_data_store.name.lower()}') - self.dir_dataset = join(self.data_store, 'datasets', cfg.DIR_PEOPLE, opt_dataset.name.lower()) - self.dir_media = join(self.dir_dataset, 'media') - self.dir_metadata = join(self.dir_dataset, 'metadata') - - def metadata(self, enum_type): - return join(self.dir_metadata, f'{enum_type.name.lower()}.csv') - - def face_image(self, subdir, fn, ext): - return join(self.dir_media, 'original', subdir, f'{fn}.{ext}') - - def face_image_crop(self, subdir, fn, ext): - return join(self.dir_media, 'cropped', subdir, f'{fn}.{ext}') - - -class DataStoreS3: - # S3 server - def __init__(self, opt_dataset): - self._dir_media = join(cfg.S3_HTTP_MEDIA_URL, opt_dataset.name.lower()) - self._dir_metadata = join(cfg.S3_HTTP_METADATA_URL, opt_dataset.name.lower()) - - def metadata(self, opt_metadata_type, ext='csv'): - return join(self._dir_metadata, f'{opt_metadata_type.name.lower()}.{ext}') - - def face_image(self, opt_uuid, ext='jpg'): - #return join(self._dir_media, 'original', f'{opt_uuid}.{ext}') - return join(self._dir_media, f'{opt_uuid}.{ext}') - - def face_image_crop(self, opt_uuid, ext='jpg'): - # not currently using? - return join(self._dir_media, 'cropped', f'{opt_uuid}.{ext}') - - - -# ------------------------------------------------------------------------- -# Models -# ------------------------------------------------------------------------- \ No newline at end of file diff --git a/megapixels/cli_demo.py b/megapixels/cli_demo.py new file mode 100644 index 00000000..703db856 --- /dev/null +++ b/megapixels/cli_demo.py @@ -0,0 +1,35 @@ +# -------------------------------------------------------- +# add/edit commands in commands/datasets directory +# -------------------------------------------------------- + +import click + +from app.settings import app_cfg as cfg +from app.utils import logger_utils +from app.models.click_factory import ClickSimple + +# click cli factory +cc = ClickSimple.create(cfg.DIR_COMMANDS_DEMO) + +# -------------------------------------------------------- +# CLI +# -------------------------------------------------------- +@click.group(cls=cc, chain=False) +@click.option('-v', '--verbose', 'verbosity', count=True, default=4, + show_default=True, + help='Verbosity: -v DEBUG, -vv INFO, -vvv WARN, -vvvv ERROR, -vvvvv CRITICAL') +@click.pass_context +def cli(ctx, **kwargs): + """\033[1m\033[94mMegaPixels: Dataset Image Scripts\033[0m + """ + ctx.opts = {} + # init logger + logger_utils.Logger.create(verbosity=kwargs['verbosity']) + + +# -------------------------------------------------------- +# Entrypoint +# -------------------------------------------------------- +if __name__ == '__main__': + cli() + diff --git a/megapixels/commands/cv/files_to_rois.py b/megapixels/commands/cv/files_to_rois.py deleted file mode 100644 index 1aaf991c..00000000 --- a/megapixels/commands/cv/files_to_rois.py +++ /dev/null @@ -1,156 +0,0 @@ -""" -Crop images to prepare for training -""" - -import click -# from PIL import Image, ImageOps, ImageFilter, ImageDraw - -from app.settings import types -from app.utils import click_utils -from app.settings import app_cfg as cfg - -color_filters = {'color': 1, 'gray': 2, 'all': 3} - -@click.command() -@click.option('-i', '--input', 'opt_fp_in', required=True, - help='Input CSV (eg image_files.csv)') -@click.option('-m', '--media', 'opt_dir_media', required=True, - help='Input media directory') -@click.option('-o', '--output', 'opt_fp_out', required=True, - help='Output CSV') -@click.option('--size', 'opt_size', - type=(int, int), default=(300, 300), - help='Output image size') -@click.option('-t', '--detector-type', 'opt_detector_type', - type=cfg.FaceDetectNetVar, - default=click_utils.get_default(types.FaceDetectNet.DLIB_CNN), - help=click_utils.show_help(types.FaceDetectNet)) -@click.option('-g', '--gpu', 'opt_gpu', default=0, - help='GPU index') -@click.option('--conf', 'opt_conf_thresh', default=0.85, type=click.FloatRange(0,1), - help='Confidence minimum threshold') -@click.option('-p', '--pyramids', 'opt_pyramids', default=0, type=click.IntRange(0,4), - help='Number pyramids to upscale for DLIB detectors') -@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None), - help='Slice list of files') -@click.option('--display/--no-display', 'opt_display', is_flag=True, default=False, - help='Display detections to debug') -@click.option('-f', '--force', 'opt_force', is_flag=True, - help='Force overwrite file') -@click.option('--color', 'opt_color_filter', - type=click.Choice(color_filters.keys()), default='all', - help='Filter to keep color or grayscale images (color = keep color') -@click.option('--largest', 'opt_largest', is_flag=True, - help='Only keep largest face') -@click.pass_context -def cli(ctx, opt_fp_in, opt_dir_media, opt_fp_out, opt_size, opt_detector_type, - opt_gpu, opt_conf_thresh, opt_pyramids, opt_slice, opt_display, opt_force, opt_color_filter, - opt_largest): - """Converts frames with faces to CSV of ROIs""" - - import sys - import os - from os.path import join - from pathlib import Path - from glob import glob - - from tqdm import tqdm - import numpy as np - import dlib # must keep a local reference for dlib - import cv2 as cv - import pandas as pd - - from app.utils import logger_utils, file_utils, im_utils - from app.processors import face_detector - - # ------------------------------------------------- - # init here - - log = logger_utils.Logger.getLogger() - - if not opt_force and Path(opt_fp_out).exists(): - log.error('File exists. Use "-f / --force" to overwite') - return - - if opt_detector_type == types.FaceDetectNet.CVDNN: - detector = face_detector.DetectorCVDNN() - elif opt_detector_type == types.FaceDetectNet.DLIB_CNN: - detector = face_detector.DetectorDLIBCNN(opt_gpu) - elif opt_detector_type == types.FaceDetectNet.DLIB_HOG: - detector = face_detector.DetectorDLIBHOG() - elif opt_detector_type == types.FaceDetectNet.MTCNN: - detector = face_detector.DetectorMTCNN() - elif opt_detector_type == types.FaceDetectNet.HAAR: - log.error('{} not yet implemented'.format(opt_detector_type.name)) - return - - - # ------------------------------------------------- - # process here - color_filter = color_filters[opt_color_filter] - - # get list of files to process - df_files = pd.read_csv(opt_fp_in).set_index('index') - - if opt_slice: - df_files = df_files[opt_slice[0]:opt_slice[1]] - log.debug('processing {:,} files'.format(len(df_files))) - - - data = [] - - for df_file in tqdm(df_files.itertuples(), total=len(df_files)): - fp_im = join(opt_dir_media, str(df_file.subdir), f'{df_file.fn}.{df_file.ext}') - im = cv.imread(fp_im) - - # filter out color or grayscale iamges - if color_filter != color_filters['all']: - try: - is_gray = im_utils.is_grayscale(im) - if is_gray and color_filter != color_filters['gray']: - log.debug('Skipping grayscale image: {}'.format(fp_im)) - continue - except Exception as e: - log.error('Could not check grayscale: {}'.format(fp_im)) - continue - - try: - bboxes = detector.detect(im, size=opt_size, pyramids=opt_pyramids, largest=opt_largest) - except Exception as e: - log.error('could not detect: {}'.format(fp_im)) - log.error('{}'.format(e)) - continue - - for bbox in bboxes: - roi = { - 'image_index': int(df_file.Index), - 'x': bbox.x, - 'y': bbox.y, - 'w': bbox.w, - 'h': bbox.h, - 'image_width': im.shape[1], - 'image_height': im.shape[0]} - data.append(roi) - - # debug display - if opt_display and len(bboxes): - bbox_dim = bbox.to_dim(im.shape[:2][::-1]) # w,h - im_md = im_utils.resize(im, width=min(1200, opt_size[0])) - for bbox in bboxes: - bbox_dim = bbox.to_dim(im_md.shape[:2][::-1]) - cv.rectangle(im_md, bbox_dim.pt_tl, bbox_dim.pt_br, (0,255,0), 3) - cv.imshow('', im_md) - while True: - k = cv.waitKey(1) & 0xFF - if k == 27 or k == ord('q'): # ESC - cv.destroyAllWindows() - sys.exit() - elif k != 255: - # any key to continue - break - - # save date - file_utils.mkdirs(opt_fp_out) - df = pd.DataFrame.from_dict(data) - df.index.name = 'index' - df.to_csv(opt_fp_out) \ No newline at end of file diff --git a/megapixels/commands/cv/gen_face_vec.py b/megapixels/commands/cv/gen_face_vec.py new file mode 100644 index 00000000..83e1460d --- /dev/null +++ b/megapixels/commands/cv/gen_face_vec.py @@ -0,0 +1,123 @@ +""" +Converts ROIs to face vector +""" + +import click + +from app.settings import types +from app.utils import click_utils +from app.settings import app_cfg as cfg + +@click.command() +@click.option('-o', '--output', 'opt_fp_out', default=None, + help='Override enum output filename CSV') +@click.option('-m', '--media', 'opt_dir_media', default=None, + help='Override enum media directory') +@click.option('--data_store', 'opt_data_store', + type=cfg.DataStoreVar, + default=click_utils.get_default(types.DataStore.SSD), + show_default=True, + help=click_utils.show_help(types.Dataset)) +@click.option('--dataset', 'opt_dataset', + type=cfg.DatasetVar, + required=True, + show_default=True, + help=click_utils.show_help(types.Dataset)) +@click.option('--size', 'opt_size', + type=(int, int), default=(300, 300), + help='Output image size') +@click.option('-j', '--jitters', 'opt_jitters', default=cfg.DLIB_FACEREC_JITTERS, + help='Number of jitters') +@click.option('-p', '--padding', 'opt_padding', default=cfg.DLIB_FACEREC_PADDING, + help='Percentage padding') +@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None), + help='Slice list of files') +@click.option('-f', '--force', 'opt_force', is_flag=True, + help='Force overwrite file') +@click.option('-g', '--gpu', 'opt_gpu', default=0, + help='GPU index') +@click.pass_context +def cli(ctx, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset, opt_size, + opt_slice, opt_force, opt_gpu, opt_jitters, opt_padding): + """Converts face ROIs to vectors""" + + import sys + import os + from os.path import join + from pathlib import Path + from glob import glob + + from tqdm import tqdm + import numpy as np + import dlib # must keep a local reference for dlib + import cv2 as cv + import pandas as pd + + from app.models.bbox import BBox + from app.models.data_store import DataStore + from app.utils import logger_utils, file_utils, im_utils + from app.processors import face_recognition + + + # ------------------------------------------------- + # init here + + log = logger_utils.Logger.getLogger() + # set data_store + data_store = DataStore(opt_data_store, opt_dataset) + + # get filepath out + fp_out = data_store.metadata(types.Metadata.FACE_VECTOR) if opt_fp_out is None else opt_fp_out + if not opt_force and Path(fp_out).exists(): + log.error('File exists. Use "-f / --force" to overwite') + return + + # init face processors + facerec = face_recognition.RecognitionDLIB() + + # load data + df_file = pd.read_csv(data_store.metadata(types.Metadata.FILEPATH)).set_index('index') + df_roi = pd.read_csv(data_store.metadata(types.Metadata.FACE_ROI)).set_index('index') + + if opt_slice: + df_roi = df_roi[opt_slice[0]:opt_slice[1]] + + # ------------------------------------------------- + # process here + df_img_groups = df_roi.groupby('image_index') + log.debug('processing {:,} groups'.format(len(df_img_groups))) + + vecs = [] + + for image_index, df_img_group in tqdm(df_img_groups): + # make fp + roi_index = df_img_group.index.values[0] + log.debug(f'roi_index: {roi_index}, image_index: {image_index}') + ds_file = df_file.loc[roi_index] # locate image meta + #ds_file = df_file.loc['index', image_index] # locate image meta + + fp_im = data_store.face_image(str(ds_file.subdir), str(ds_file.fn), str(ds_file.ext)) + im = cv.imread(fp_im) + # get bbox + x = df_img_group.x.values[0] + y = df_img_group.y.values[0] + w = df_img_group.w.values[0] + h = df_img_group.h.values[0] + imw = df_img_group.image_width.values[0] + imh = df_img_group.image_height.values[0] + dim = im.shape[:2][::-1] + # get face vector + dim = (imw, imh) + bbox_dim = BBox.from_xywh(x, y, w, h).to_dim(dim) # convert to int real dimensions + # compute vec + # padding=opt_padding not yet implemented in 19.16 but merged in master + vec = facerec.vec(im, bbox_dim, jitters=opt_jitters) + vec_str = ','.join([repr(x) for x in vec]) # convert to string for CSV + vecs.append( {'roi_index': roi_index, 'image_index': image_index, 'vec': vec_str}) + + + # save date + df = pd.DataFrame.from_dict(vecs) + df.index.name = 'index' + #file_utils.mkdirs(fp_out) + #df.to_csv(fp_out) \ No newline at end of file diff --git a/megapixels/commands/cv/gen_pose.py b/megapixels/commands/cv/gen_pose.py new file mode 100644 index 00000000..aefadb00 --- /dev/null +++ b/megapixels/commands/cv/gen_pose.py @@ -0,0 +1,141 @@ +""" +Converts ROIs to pose: yaw, roll, pitch +""" + +import click + +from app.settings import types +from app.utils import click_utils +from app.settings import app_cfg as cfg + +@click.command() +@click.option('-i', '--input', 'opt_fp_in', default=None, + help='Override enum input filename CSV') +@click.option('-o', '--output', 'opt_fp_out', default=None, + help='Override enum output filename CSV') +@click.option('-m', '--media', 'opt_dir_media', default=None, + help='Override enum media directory') +@click.option('--data_store', 'opt_data_store', + type=cfg.DataStoreVar, + default=click_utils.get_default(types.DataStore.SSD), + show_default=True, + help=click_utils.show_help(types.Dataset)) +@click.option('--dataset', 'opt_dataset', + type=cfg.DatasetVar, + required=True, + show_default=True, + help=click_utils.show_help(types.Dataset)) +@click.option('--size', 'opt_size', + type=(int, int), default=(300, 300), + help='Output image size') +@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None), + help='Slice list of files') +@click.option('-f', '--force', 'opt_force', is_flag=True, + help='Force overwrite file') +@click.option('-d', '--display', 'opt_display', is_flag=True, + help='Display image for debugging') +@click.pass_context +def cli(ctx, opt_fp_in, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset, opt_size, + opt_slice, opt_force, opt_display): + """Converts ROIs to pose: roll, yaw, pitch""" + + import sys + import os + from os.path import join + from pathlib import Path + from glob import glob + + from tqdm import tqdm + import numpy as np + import dlib # must keep a local reference for dlib + import cv2 as cv + import pandas as pd + + from app.models.bbox import BBox + from app.utils import logger_utils, file_utils, im_utils + from app.processors.face_landmarks import LandmarksDLIB + from app.processors.face_pose import FacePoseDLIB + from app.models.data_store import DataStore + + # ------------------------------------------------- + # init here + + log = logger_utils.Logger.getLogger() + + # set data_store + data_store = DataStore(opt_data_store, opt_dataset) + + # get filepath out + fp_out = data_store.metadata(types.Metadata.FACE_POSE) if opt_fp_out is None else opt_fp_out + if not opt_force and Path(fp_out).exists(): + log.error('File exists. Use "-f / --force" to overwite') + return + + # init face processors + face_pose = FacePoseDLIB() + face_landmarks = LandmarksDLIB() + + # load filepath data + fp_filepath = data_store.metadata(types.Metadata.FILEPATH) + df_filepath = pd.read_csv(fp_filepath) + # load ROI data + fp_roi = data_store.metadata(types.Metadata.FACE_ROI) + df_roi = pd.read_csv(fp_roi) + # slice if you want + if opt_slice: + df_roi = df_roi[opt_slice[0]:opt_slice[1]] + # group by image index (speedup if multiple faces per image) + df_img_groups = df_roi.groupby('image_index') + log.debug('processing {:,} groups'.format(len(df_img_groups))) + + # store poses and convert to DataFrame + poses = [] + + # iterate + for image_index, df_img_group in tqdm(df_img_groups): + # make fp + ds_file = df_filepath.iloc[image_index] + fp_im = data_store.face_image(ds_file.subdir, ds_file.fn, ds_file.ext) + #fp_im = join(opt_dir_media, ds_file.subdir, '{}.{}'.format(ds_file.fn, ds_file.ext)) + im = cv.imread(fp_im) + # get bbox + x = df_img_group.x.values[0] + y = df_img_group.y.values[0] + w = df_img_group.w.values[0] + h = df_img_group.h.values[0] + dim = im.shape[:2][::-1] + bbox = BBox.from_xywh(x, y, w, h).to_dim(dim) + # get pose + landmarks = face_landmarks.landmarks(im, bbox) + pose_data = face_pose.pose(landmarks, dim, project_points=opt_display) + pose_degrees = pose_data['degrees'] # only keep the degrees data + + # use the project point data if display flag set + if opt_display: + pts_im = pose_data['points_image'] + pts_model = pose_data['points_model'] + pt_nose = pose_data['point_nose'] + dst = im.copy() + face_pose.draw_pose(dst, pts_im, pts_model, pt_nose) + face_pose.draw_degrees(dst, pose_degrees) + # display to cv window + cv.imshow('', dst) + while True: + k = cv.waitKey(1) & 0xFF + if k == 27 or k == ord('q'): # ESC + cv.destroyAllWindows() + sys.exit() + elif k != 255: + # any key to continue + break + + # add image index and append to result CSV data + pose_degrees['image_index'] = image_index + poses.append(pose_degrees) + + + # save date + file_utils.mkdirs(fp_out) + df = pd.DataFrame.from_dict(poses) + df.index.name = 'index' + df.to_csv(fp_out) \ No newline at end of file diff --git a/megapixels/commands/cv/gen_rois.py b/megapixels/commands/cv/gen_rois.py new file mode 100644 index 00000000..20dd598a --- /dev/null +++ b/megapixels/commands/cv/gen_rois.py @@ -0,0 +1,172 @@ +""" +Crop images to prepare for training +""" + +import click +# from PIL import Image, ImageOps, ImageFilter, ImageDraw + +from app.settings import types +from app.utils import click_utils +from app.settings import app_cfg as cfg + +color_filters = {'color': 1, 'gray': 2, 'all': 3} + +@click.command() +@click.option('-i', '--input', 'opt_fp_in', default=None, + help='Override enum input filename CSV') +@click.option('-o', '--output', 'opt_fp_out', default=None, + help='Override enum output filename CSV') +@click.option('-m', '--media', 'opt_dir_media', default=None, + help='Override enum media directory') +@click.option('--data_store', 'opt_data_store', + type=cfg.DataStoreVar, + default=click_utils.get_default(types.DataStore.SSD), + show_default=True, + help=click_utils.show_help(types.Dataset)) +@click.option('--dataset', 'opt_dataset', + type=cfg.DatasetVar, + required=True, + show_default=True, + help=click_utils.show_help(types.Dataset)) +@click.option('--size', 'opt_size', + type=(int, int), default=(300, 300), + help='Output image size') +@click.option('-t', '--detector-type', 'opt_detector_type', + type=cfg.FaceDetectNetVar, + default=click_utils.get_default(types.FaceDetectNet.DLIB_CNN), + help=click_utils.show_help(types.FaceDetectNet)) +@click.option('-g', '--gpu', 'opt_gpu', default=0, + help='GPU index') +@click.option('--conf', 'opt_conf_thresh', default=0.85, type=click.FloatRange(0,1), + help='Confidence minimum threshold') +@click.option('-p', '--pyramids', 'opt_pyramids', default=0, type=click.IntRange(0,4), + help='Number pyramids to upscale for DLIB detectors') +@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None), + help='Slice list of files') +@click.option('--display/--no-display', 'opt_display', is_flag=True, default=False, + help='Display detections to debug') +@click.option('-f', '--force', 'opt_force', is_flag=True, + help='Force overwrite file') +@click.option('--color', 'opt_color_filter', + type=click.Choice(color_filters.keys()), default='all', + help='Filter to keep color or grayscale images (color = keep color') +@click.option('--largest/--all-faces', 'opt_largest', is_flag=True, default=True, + help='Only keep largest face') +@click.pass_context +def cli(ctx, opt_fp_in, opt_dir_media, opt_fp_out, opt_data_store, opt_dataset, opt_size, opt_detector_type, + opt_gpu, opt_conf_thresh, opt_pyramids, opt_slice, opt_display, opt_force, opt_color_filter, + opt_largest): + """Converts frames with faces to CSV of ROIs""" + + import sys + import os + from os.path import join + from pathlib import Path + from glob import glob + + from tqdm import tqdm + import numpy as np + import dlib # must keep a local reference for dlib + import cv2 as cv + import pandas as pd + + from app.utils import logger_utils, file_utils, im_utils + from app.processors import face_detector + from app.models.data_store import DataStore + + # ------------------------------------------------- + # init here + + log = logger_utils.Logger.getLogger() + + # set data_store + data_store = DataStore(opt_data_store, opt_dataset) + + # get filepath out + fp_out = data_store.metadata(types.Metadata.FACE_ROI) if opt_fp_out is None else opt_fp_out + if not opt_force and Path(fp_out).exists(): + log.error('File exists. Use "-f / --force" to overwite') + return + + # set detector + if opt_detector_type == types.FaceDetectNet.CVDNN: + detector = face_detector.DetectorCVDNN() + elif opt_detector_type == types.FaceDetectNet.DLIB_CNN: + detector = face_detector.DetectorDLIBCNN(opt_gpu) + elif opt_detector_type == types.FaceDetectNet.DLIB_HOG: + detector = face_detector.DetectorDLIBHOG() + elif opt_detector_type == types.FaceDetectNet.MTCNN: + detector = face_detector.DetectorMTCNN() + elif opt_detector_type == types.FaceDetectNet.HAAR: + log.error('{} not yet implemented'.format(opt_detector_type.name)) + return + + + # get list of files to process + fp_in = data_store.metadata(types.Metadata.FILEPATH) if opt_fp_in is None else opt_fp_in + df_files = pd.read_csv(fp_in).set_index('index') + if opt_slice: + df_files = df_files[opt_slice[0]:opt_slice[1]] + log.debug('processing {:,} files'.format(len(df_files))) + + # filter out grayscale + color_filter = color_filters[opt_color_filter] + + data = [] + + for df_file in tqdm(df_files.itertuples(), total=len(df_files)): + fp_im = data_store.face_image(str(df_file.subdir), str(df_file.fn), str(df_file.ext)) + #fp_im = join(opt_dir_media, str(df_file.subdir), f'{df_file.fn}.{df_file.ext}') + im = cv.imread(fp_im) + + # filter out color or grayscale iamges + if color_filter != color_filters['all']: + try: + is_gray = im_utils.is_grayscale(im) + if is_gray and color_filter != color_filters['gray']: + log.debug('Skipping grayscale image: {}'.format(fp_im)) + continue + except Exception as e: + log.error('Could not check grayscale: {}'.format(fp_im)) + continue + + try: + bboxes = detector.detect(im, size=opt_size, pyramids=opt_pyramids, largest=opt_largest) + except Exception as e: + log.error('could not detect: {}'.format(fp_im)) + log.error('{}'.format(e)) + continue + + for bbox in bboxes: + roi = { + 'image_index': int(df_file.Index), + 'x': bbox.x, + 'y': bbox.y, + 'w': bbox.w, + 'h': bbox.h, + 'image_width': im.shape[1], + 'image_height': im.shape[0]} + data.append(roi) + + # debug display + if opt_display and len(bboxes): + bbox_dim = bbox.to_dim(im.shape[:2][::-1]) # w,h + im_md = im_utils.resize(im, width=min(1200, opt_size[0])) + for bbox in bboxes: + bbox_dim = bbox.to_dim(im_md.shape[:2][::-1]) + cv.rectangle(im_md, bbox_dim.pt_tl, bbox_dim.pt_br, (0,255,0), 3) + cv.imshow('', im_md) + while True: + k = cv.waitKey(1) & 0xFF + if k == 27 or k == ord('q'): # ESC + cv.destroyAllWindows() + sys.exit() + elif k != 255: + # any key to continue + break + + # save date + file_utils.mkdirs(fp_out) + df = pd.DataFrame.from_dict(data) + df.index.name = 'index' + df.to_csv(opt_fp_out) \ No newline at end of file diff --git a/megapixels/commands/cv/rois_to_pose.py b/megapixels/commands/cv/rois_to_pose.py deleted file mode 100644 index 3877cecf..00000000 --- a/megapixels/commands/cv/rois_to_pose.py +++ /dev/null @@ -1,127 +0,0 @@ -""" -Converts ROIs to pose: yaw, roll, pitch -""" - -import click - -from app.settings import types -from app.utils import click_utils -from app.settings import app_cfg as cfg - -@click.command() -@click.option('-i', '--input', 'opt_fp_files', required=True, - help='Input ROI CSV') -@click.option('-r', '--rois', 'opt_fp_rois', required=True, - help='Input ROI CSV') -@click.option('-m', '--media', 'opt_dir_media', required=True, - help='Input media directory') -@click.option('-o', '--output', 'opt_fp_out', required=True, - help='Output CSV') -@click.option('--size', 'opt_size', - type=(int, int), default=(300, 300), - help='Output image size') -@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None), - help='Slice list of files') -@click.option('-f', '--force', 'opt_force', is_flag=True, - help='Force overwrite file') -@click.option('-d', '--display', 'opt_display', is_flag=True, - help='Display image for debugging') -@click.pass_context -def cli(ctx, opt_fp_files, opt_fp_rois, opt_dir_media, opt_fp_out, opt_size, - opt_slice, opt_force, opt_display): - """Converts ROIs to pose: roll, yaw, pitch""" - - import sys - import os - from os.path import join - from pathlib import Path - from glob import glob - - from tqdm import tqdm - import numpy as np - import dlib # must keep a local reference for dlib - import cv2 as cv - import pandas as pd - - from app.models.bbox import BBox - from app.utils import logger_utils, file_utils, im_utils - from app.processors.face_landmarks import LandmarksDLIB - from app.processors.face_pose import FacePoseDLIB - - # ------------------------------------------------- - # init here - - log = logger_utils.Logger.getLogger() - - # init face processors - face_pose = FacePoseDLIB() - face_landmarks = LandmarksDLIB() - - # load datra - df_files = pd.read_csv(opt_fp_files) - df_rois = pd.read_csv(opt_fp_rois) - - if not opt_force and Path(opt_fp_out).exists(): - log.error('File exists. Use "-f / --force" to overwite') - return - - if opt_slice: - df_rois = df_rois[opt_slice[0]:opt_slice[1]] - - # ------------------------------------------------- - # process here - df_img_groups = df_rois.groupby('image_index') - log.debug('processing {:,} groups'.format(len(df_img_groups))) - - - poses = [] - - # iterate - #for df_roi_group_idx, df_roi_group in tqdm(df_roi_groups): - for image_index, df_img_group in tqdm(df_img_groups): - # make fp - #image_index = df_roi_group.image_index.values[0] - pds_file = df_files.iloc[image_index] - fp_im = join(opt_dir_media, pds_file.subdir, '{}.{}'.format(pds_file.fn, pds_file.ext)) - im = cv.imread(fp_im) - # get bbox - x = df_img_group.x.values[0] - y = df_img_group.y.values[0] - w = df_img_group.w.values[0] - h = df_img_group.h.values[0] - dim = im.shape[:2][::-1] - bbox = BBox.from_xywh(x, y, w, h).to_dim(dim) - # get pose - landmarks = face_landmarks.landmarks(im, bbox) - pose_data = face_pose.pose(landmarks, dim, project_points=opt_display) - pose_degrees = pose_data['degrees'] # only keep the degrees data - - # use the project point data if display flag set - if opt_display: - pts_im = pose_data['points_image'] - pts_model = pose_data['points_model'] - pt_nose = pose_data['point_nose'] - dst = im.copy() - face_pose.draw_pose(dst, pts_im, pts_model, pt_nose) - face_pose.draw_degrees(dst, pose_degrees) - # display to cv window - cv.imshow('', dst) - while True: - k = cv.waitKey(1) & 0xFF - if k == 27 or k == ord('q'): # ESC - cv.destroyAllWindows() - sys.exit() - elif k != 255: - # any key to continue - break - - # add image index and append to result CSV data - pose_degrees['image_index'] = image_index - poses.append(pose_degrees) - - - # save date - file_utils.mkdirs(opt_fp_out) - df = pd.DataFrame.from_dict(poses) - df.index.name = 'index' - df.to_csv(opt_fp_out) \ No newline at end of file diff --git a/megapixels/commands/cv/rois_to_vecs.py b/megapixels/commands/cv/rois_to_vecs.py deleted file mode 100644 index 525f4404..00000000 --- a/megapixels/commands/cv/rois_to_vecs.py +++ /dev/null @@ -1,109 +0,0 @@ -""" -Converts ROIs to face vector -""" - -import click - -from app.settings import types -from app.utils import click_utils -from app.settings import app_cfg as cfg - -@click.command() -@click.option('-i', '--input', 'opt_fp_files', required=True, - help='Input file meta CSV') -@click.option('-r', '--rois', 'opt_fp_rois', required=True, - help='Input ROI CSV') -@click.option('-m', '--media', 'opt_dir_media', required=True, - help='Input media directory') -@click.option('-o', '--output', 'opt_fp_out', required=True, - help='Output CSV') -@click.option('--size', 'opt_size', - type=(int, int), default=(300, 300), - help='Output image size') -@click.option('-j', '--jitters', 'opt_jitters', default=cfg.DLIB_FACEREC_JITTERS, - help='Number of jitters') -@click.option('-p', '--padding', 'opt_padding', default=cfg.DLIB_FACEREC_PADDING, - help='Percentage padding') -@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None), - help='Slice list of files') -@click.option('-f', '--force', 'opt_force', is_flag=True, - help='Force overwrite file') -@click.option('-g', '--gpu', 'opt_gpu', default=0, - help='GPU index') -@click.pass_context -def cli(ctx, opt_fp_files, opt_fp_rois, opt_dir_media, opt_fp_out, opt_size, - opt_slice, opt_force, opt_gpu, opt_jitters, opt_padding): - """Converts face ROIs to vectors""" - - import sys - import os - from os.path import join - from pathlib import Path - from glob import glob - - from tqdm import tqdm - import numpy as np - import dlib # must keep a local reference for dlib - import cv2 as cv - import pandas as pd - - from app.models.bbox import BBox - from app.utils import logger_utils, file_utils, im_utils - from app.processors import face_recognition - - - # ------------------------------------------------- - # init here - - log = logger_utils.Logger.getLogger() - - # init face processors - facerec = face_recognition.RecognitionDLIB() - - # load data - df_file_meta = pd.read_csv(opt_fp_files) - df_rois = pd.read_csv(opt_fp_rois) - - if not opt_force and Path(opt_fp_out).exists(): - log.error('File exists. Use "-f / --force" to overwite') - return - - if opt_slice: - df_rois = df_rois[opt_slice[0]:opt_slice[1]] - - # ------------------------------------------------- - # process here - df_img_groups = df_rois.groupby('image_index') - log.debug('processing {:,} groups'.format(len(df_img_groups))) - - vecs = [] - - for image_index, df_img_group in tqdm(df_img_groups): - # make fp - roi_index = df_img_group.index.values[0] - file_meta = df_file_meta.iloc[image_index] # locate image meta - fp_im = join(opt_dir_media, file_meta.subdir, '{}.{}'.format(file_meta.fn, file_meta.ext)) - im = cv.imread(fp_im) - # get bbox - x = df_img_group.x.values[0] - y = df_img_group.y.values[0] - w = df_img_group.w.values[0] - h = df_img_group.h.values[0] - imw = df_img_group.image_width.values[0] - imh = df_img_group.image_height.values[0] - dim = im.shape[:2][::-1] - # get face vector - dim = (imw, imh) - bbox_dim = BBox.from_xywh(x, y, w, h).to_dim(dim) # convert to int real dimensions - # compute vec - # padding=opt_padding not yet implemented in 19.16 but merged in master - vec = facerec.vec(im, bbox_dim, jitters=opt_jitters) - vec_str = ','.join([repr(x) for x in vec]) # convert to string for CSV - vecs.append( {'roi_index': roi_index, 'image_index': image_index, 'vec': vec_str}) - - - # save date - file_utils.mkdirs(opt_fp_out) - df = pd.DataFrame.from_dict(vecs) - df.index.name = 'index' - df.to_csv(opt_fp_out) \ No newline at end of file diff --git a/megapixels/commands/datasets/add_uuid.py b/megapixels/commands/datasets/add_uuid.py deleted file mode 100644 index 9c14c0e3..00000000 --- a/megapixels/commands/datasets/add_uuid.py +++ /dev/null @@ -1,44 +0,0 @@ -import click - -from app.settings import types -from app.utils import click_utils -from app.settings import app_cfg as cfg -from app.utils.logger_utils import Logger - -log = Logger.getLogger() - -@click.command() -@click.option('-i', '--input', 'opt_fp_in', required=True, - help='Input directory') -@click.option('-o', '--output', 'opt_fp_out', - help='Output directory') -@click.option('-f', '--force', 'opt_force', is_flag=True, - help='Force overwrite file') -@click.pass_context -def cli(ctx, opt_fp_in, opt_fp_out, opt_force): - """Appends UUID to records CSV""" - - from glob import glob - from os.path import join - from pathlib import Path - import base64 - import uuid - - from tqdm import tqdm - import pandas as pd - - if not opt_force and Path(opt_fp_out).exists(): - log.error('File exists. Use "-f / --force" to overwite') - return - - # load names - df_records = pd.read_csv(opt_fp_in) - records = df_records.to_dict('index') - # append a UUID to every entry - for idx, item in records.items(): - records[idx]['uuid'] = uuid.uuid4() - # save to csv - df_uuid = pd.DataFrame.from_dict(list(records.values())) # ignore the indices - df_uuid.to_csv(opt_fp_out, index=False) - - log.info('done') \ No newline at end of file diff --git a/megapixels/commands/datasets/file_meta.py b/megapixels/commands/datasets/file_meta.py deleted file mode 100644 index e1456f44..00000000 --- a/megapixels/commands/datasets/file_meta.py +++ /dev/null @@ -1,84 +0,0 @@ -""" -Begin with this file to process folder of images -- Converts folders and subdirectories into CSV with file attributes split -""" -import click - -from app.settings import types -from app.utils import click_utils -from app.settings import app_cfg as cfg -from app.utils.logger_utils import Logger - -log = Logger.getLogger() - -@click.command() -@click.option('-i', '--input', 'opt_fp_in', required=True, - help='Input directory') -@click.option('-o', '--output', 'opt_fp_out', required=True, - help='Output file for file meta CSV') -@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None), - help='Slice list of files') -@click.option('--recursive/--no-recursive', 'opt_recursive', is_flag=True, default=False, - help='Use glob recursion (slower)') -@click.option('-t', '--threads', 'opt_threads', default=4, - help='Number of threads') -@click.option('-f', '--force', 'opt_force', is_flag=True, - help='Force overwrite file') -@click.pass_context -def cli(ctx, opt_fp_in, opt_fp_out, opt_slice, opt_recursive, opt_threads, opt_force): - """Multithreading test""" - - from glob import glob - from os.path import join - from pathlib import Path - import time - from multiprocessing.dummy import Pool as ThreadPool - import random - - import pandas as pd - from tqdm import tqdm - from glob import glob - - from app.utils import file_utils, im_utils - - - if not opt_force and Path(opt_fp_out).exists(): - log.error('File exists. Use "-f / --force" to overwite') - return - - fp_ims = [] - log.info(f'Globbing {opt_fp_in}') - for ext in ['jpg', 'png']: - if opt_recursive: - fp_glob = join(opt_fp_in, '**/*.{}'.format(ext)) - fp_ims += glob(fp_glob, recursive=True) - else: - fp_glob = join(opt_fp_in, '*.{}'.format(ext)) - fp_ims += glob(fp_glob) - - if not fp_ims: - log.warn('No images. Try with "--recursive"') - return - - if opt_slice: - fp_ims = fp_ims[opt_slice[0]:opt_slice[1]] - - log.info('Processing {:,} images'.format(len(fp_ims))) - - - # convert data to dict - data = [] - for i, fp_im in enumerate(tqdm(fp_ims)): - fpp_im = Path(fp_im) - subdir = str(fpp_im.parent.relative_to(opt_fp_in)) - data.append( { - 'subdir': subdir, - 'fn': fpp_im.stem, - 'ext': fpp_im.suffix.replace('.','') - }) - - # save to CSV - file_utils.mkdirs(opt_fp_out) - df = pd.DataFrame.from_dict(data) - df.index.name = 'index' - df.to_csv(opt_fp_out) \ No newline at end of file diff --git a/megapixels/commands/datasets/filter_by_pose.py b/megapixels/commands/datasets/filter_by_pose.py new file mode 100644 index 00000000..6fdbef98 --- /dev/null +++ b/megapixels/commands/datasets/filter_by_pose.py @@ -0,0 +1,101 @@ +import click + +from app.settings import types +from app.utils import click_utils +from app.settings import app_cfg as cfg +from app.utils.logger_utils import Logger + +log = Logger.getLogger() + +@click.command() +@click.option('-i', '--input', 'opt_fp_in', default=None, + help='Override enum input filename CSV') +@click.option('-o', '--output', 'opt_fp_out', default=None, + help='Override enum output filename CSV') +@click.option('--data_store', 'opt_data_store', + type=cfg.DataStoreVar, + default=click_utils.get_default(types.DataStore.SSD), + show_default=True, + help=click_utils.show_help(types.Dataset)) +@click.option('--dataset', 'opt_dataset', + type=cfg.DatasetVar, + required=True, + show_default=True, + help=click_utils.show_help(types.Dataset)) +@click.option('--yaw', 'opt_yaw', type=(float, float), default=cfg.POSE_MINMAX_YAW, + help='Yaw (min, max)') +@click.option('--roll', 'opt_roll', type=(float, float), default=cfg.POSE_MINMAX_ROLL, + help='Roll (min, max)') +@click.option('--pitch', 'opt_pitch', type=(float, float), default=cfg.POSE_MINMAX_PITCH, + help='Pitch (min, max)') +@click.pass_context +def cli(ctx, opt_fp_in, opt_fp_out, opt_data_store, opt_dataset, opt_yaw, opt_roll, opt_pitch): + """Filter out exaggerated poses""" + + import sys + from os.path import join + from pathlib import Path + import shutil + from datetime import datetime + + import pandas as pd + from tqdm import tqdm + + from app.models.data_store import DataStore + from app.utils import file_utils + + # create date store + data_store = DataStore(opt_data_store, opt_dataset) + # load pose + fp_pose = data_store.metadata(types.Metadata.FACE_POSE) + df_pose = pd.read_csv(fp_pose).set_index('index') + # load roi + fp_roi = data_store.metadata(types.Metadata.FACE_ROI) + df_roi = pd.read_csv(fp_roi).set_index('index') + # load filepath + fp_filepath = data_store.metadata(types.Metadata.FILEPATH) + df_filepath = pd.read_csv(fp_filepath).set_index('index') + # load uuid + fp_uuid= data_store.metadata(types.Metadata.UUID) + df_uuid = pd.read_csv(fp_uuid).set_index('index') + # load sha256 index + fp_sha256 = data_store.metadata(types.Metadata.SHA256) + df_sha256 = pd.read_csv(fp_sha256).set_index('index') + # debug + log.info('Processing {:,} rows'.format(len(df_pose))) + n_rows = len(df_pose) + + # filter out extreme poses + invalid_indices = [] + for ds_pose in tqdm(df_pose.itertuples(), total=len(df_pose)): + if ds_pose.yaw < opt_yaw[0] or ds_pose.yaw > opt_yaw[1] \ + and ds_pose.roll < opt_roll[0] or ds_pose.roll > opt_roll[1] \ + and ds_pose.pitch < opt_pitch[0] or ds_pose.pitch > opt_pitch[1]: + invalid_indices.append(ds_pose.Index) # unique file indexs + + # filter out valid/invalid + log.info(invalid_indices[:20]) + log.info(f'Removing {len(invalid_indices)} invalid indices...') + df_filepath = df_filepath.drop(df_pose.index[invalid_indices]) + df_sha256 = df_sha256.drop(df_pose.index[invalid_indices]) + df_uuid = df_uuid.drop(df_pose.index[invalid_indices]) + df_roi = df_roi.drop(df_pose.index[invalid_indices]) + df_pose = df_pose.drop(df_pose.index[invalid_indices]) + log.info(f'Removed {n_rows - len(df_pose)}') + + # move file to make backup + dir_bkup = join(Path(fp_pose).parent, f'backup_{datetime.now():%Y%m%d_%M%S}') + file_utils.mkdirs(dir_bkup) + # move files to backup + shutil.move(fp_filepath, join(dir_bkup, Path(fp_filepath).name)) + shutil.move(fp_sha256, join(dir_bkup, Path(fp_sha256).name)) + shutil.move(fp_uuid, join(dir_bkup, Path(fp_uuid).name)) + shutil.move(fp_roi, join(dir_bkup, Path(fp_roi).name)) + shutil.move(fp_pose, join(dir_bkup, Path(fp_pose).name)) + # save filtered poses + df_filepath.to_csv(fp_filepath) + df_sha256.to_csv(fp_sha256) + df_uuid.to_csv(fp_uuid) + df_roi.to_csv(fp_roi) + df_pose.to_csv(fp_pose) + diff --git a/megapixels/commands/datasets/filter_poses.py b/megapixels/commands/datasets/filter_poses.py deleted file mode 100644 index 304eeff2..00000000 --- a/megapixels/commands/datasets/filter_poses.py +++ /dev/null @@ -1,76 +0,0 @@ -import click - -from app.settings import types -from app.utils import click_utils -from app.settings import app_cfg as cfg -from app.utils.logger_utils import Logger - -log = Logger.getLogger() - -@click.command() -@click.option('-i', '--input', 'opt_fp_in', required=True, - help='Input directory') -@click.option('-o', '--output', 'opt_fp_out', required=True, - help='Output directory') -@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None), - help='Slice list of files') -@click.option('-f', '--force', 'opt_force', is_flag=True, - help='Force overwrite file') -@click.option('--yaw', 'opt_yaw', type=(float, float), default=(-25,25), - help='Yaw (min, max)') -@click.option('--roll', 'opt_roll', type=(float, float), default=(-15,15), - help='Roll (min, max)') -@click.option('--pitch', 'opt_pitch', type=(float, float), default=(-10,10), - help='Pitch (min, max)') -@click.option('--drop', 'opt_drop', type=click.Choice(['valid', 'invalid']), default='invalid', - help='Drop valid or invalid poses') -@click.pass_context -def cli(ctx, opt_fp_in, opt_fp_out, opt_slice, opt_yaw, opt_roll, opt_pitch, - opt_drop, opt_force): - """Filter out exaggerated poses""" - - from glob import glob - from os.path import join - from pathlib import Path - import time - from multiprocessing.dummy import Pool as ThreadPool - import random - - import pandas as pd - from tqdm import tqdm - from glob import glob - - from app.utils import file_utils, im_utils - - - if not opt_force and Path(opt_fp_out).exists(): - log.error('File exists. Use "-f / --force" to overwite') - return - - df_poses = pd.read_csv(opt_fp_in).set_index('index') - - if opt_slice: - df_poses = df_poses[opt_slice[0]:opt_slice[1]] - - log.info('Processing {:,} rows'.format(len(df_poses))) - - # extend a new temporary column - df_poses['valid'] = [0] * len(df_poses) - - # filter out extreme poses - for ds_pose in tqdm(df_poses.itertuples(), total=len(df_poses)): - if ds_pose.yaw > opt_yaw[0] and ds_pose.yaw < opt_yaw[1] \ - and ds_pose.roll > opt_roll[0] and ds_pose.roll < opt_roll[1] \ - and ds_pose.pitch > opt_pitch[0] and ds_pose.pitch < opt_pitch[1]: - df_poses.at[ds_pose.Index, 'valid'] = 1 - - # filter out valid/invalid - drop_val = 0 if opt_drop == 'valid' else 0 # drop 0's if drop == valid, else drop 1's - df_poses_filtered = df_poses.drop(df_poses[df_poses.valid == int()].index, axis=0) - - # drop temp column - df_poses_filtered = df_poses_filtered.drop('valid', axis=1) - - # save filtered poses - df_poses_filtered.to_csv(opt_fp_out) - log.info('Saved {:,} rows'.format(len(df_poses_filtered))) \ No newline at end of file diff --git a/megapixels/commands/datasets/gen_filepath.py b/megapixels/commands/datasets/gen_filepath.py new file mode 100644 index 00000000..e06fee6b --- /dev/null +++ b/megapixels/commands/datasets/gen_filepath.py @@ -0,0 +1,102 @@ +""" +Begin with this file to process folder of images +- Converts folders and subdirectories into CSV with file attributes split +""" +import click + +from app.settings import types +from app.utils import click_utils +from app.settings import app_cfg as cfg +from app.utils.logger_utils import Logger + +log = Logger.getLogger() + +@click.command() +@click.option('-i', '--input', 'opt_fp_in', + help='Override enum input filename CSV') +@click.option('-o', '--output', 'opt_fp_out', + help='Override enum output filename CSV') +@click.option('--data_store', 'opt_data_store', + type=cfg.DataStoreVar, + default=click_utils.get_default(types.DataStore.NAS), + show_default=True, + help=click_utils.show_help(types.Dataset)) +@click.option('--dataset', 'opt_dataset', + type=cfg.DatasetVar, + required=True, + show_default=True, + help=click_utils.show_help(types.Dataset)) +@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None), + help='Slice list of files') +@click.option('--recursive/--no-recursive', 'opt_recursive', is_flag=True, default=False, + help='Use glob recursion (slower)') +@click.option('-t', '--threads', 'opt_threads', default=4, + help='Number of threads') +@click.option('-f', '--force', 'opt_force', is_flag=True, + help='Force overwrite file') +@click.pass_context +def cli(ctx, opt_fp_in, opt_fp_out, opt_data_store, opt_dataset, opt_slice, + opt_recursive, opt_threads, opt_force): + """Multithreading test""" + + from glob import glob + from os.path import join + from pathlib import Path + import time + from multiprocessing.dummy import Pool as ThreadPool + import random + + import pandas as pd + from tqdm import tqdm + from glob import glob + + from app.models import DataStore + from app.utils import file_utils, im_utils + + data_store = DataStore(opt_data_store, opt_dataset) + fp_out = opt_fp_out if opt_fp_out is not None else data_store.metadata(types.Metadata.FILEPATH) + if not opt_force and Path(fp_out).exists(): + log.error('File exists. Use "-f / --force" to overwite') + return + + + # glob files + fp_in = opt_fp_in if opt_fp_in is not None else data_store.media_images_original() + fp_ims = [] + log.info(f'Globbing {fp_in}') + for ext in ['jpg', 'png']: + if opt_recursive: + fp_glob = join(fp_in, '**/*.{}'.format(ext)) + fp_ims += glob(fp_glob, recursive=True) + else: + fp_glob = join(fp_in, '*.{}'.format(ext)) + fp_ims += glob(fp_glob) + + if not fp_ims: + log.warn('No images. Try with "--recursive"') + return + + if opt_slice: + fp_ims = fp_ims[opt_slice[0]:opt_slice[1]] + + log.info('Found {:,} images'.format(len(fp_ims))) + + + # convert data to dict + data = [] + for i, fp_im in enumerate(tqdm(fp_ims)): + fpp_im = Path(fp_im) + subdir = str(fpp_im.parent.relative_to(fp_in)) + data.append( { + 'subdir': subdir, + 'fn': fpp_im.stem, + 'ext': fpp_im.suffix.replace('.','') + }) + + # save to CSV + file_utils.mkdirs(fp_out) + df_filepath = pd.DataFrame.from_dict(data) + df_filepath = df_filepath.sort_values(by=['subdir'], ascending=True) + df_filepath = df_filepath.reset_index(drop=True) + df_filepath.index.name = 'index' + df_filepath.to_csv(fp_out) \ No newline at end of file diff --git a/megapixels/commands/datasets/gen_sha256.py b/megapixels/commands/datasets/gen_sha256.py new file mode 100644 index 00000000..1616eebf --- /dev/null +++ b/megapixels/commands/datasets/gen_sha256.py @@ -0,0 +1,152 @@ +''' + +''' +import click + +from app.settings import types +from app.utils import click_utils +from app.settings import app_cfg as cfg +from app.utils.logger_utils import Logger + +log = Logger.getLogger() + +identity_sources = ['subdir', 'subdir_head', 'subdir_tail'] + +@click.command() +@click.option('-i', '--input', 'opt_fp_in', default=None, + help='Override enum input filename CSV') +@click.option('-o', '--output', 'opt_fp_out', default=None, + help='Override enum output filename CSV') +@click.option('-m', '--media', 'opt_dir_media', default=None, + help='Override enum media directory') +@click.option('--data_store', 'opt_data_store', + type=cfg.DataStoreVar, + default=click_utils.get_default(types.DataStore.NAS), + show_default=True, + help=click_utils.show_help(types.Dataset)) +@click.option('--dataset', 'opt_dataset', + type=cfg.DatasetVar, + required=True, + show_default=True, + help=click_utils.show_help(types.Dataset)) +@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None), + help='Slice list of files') +@click.option('-t', '--threads', 'opt_threads', default=12, + help='Number of threads') +@click.option('-f', '--force', 'opt_force', is_flag=True, + help='Force overwrite file') +@click.option('--identity', 'opt_identity', default='subdir_tail', type=click.Choice(identity_sources), + help='Identity source, blank for no identity') +@click.pass_context +def cli(ctx, opt_fp_in, opt_fp_out, opt_dataset, opt_data_store, opt_dir_media, opt_slice, opt_threads, + opt_identity, opt_force): + """Generates sha256/identity index CSV file""" + + import sys + from glob import glob + from os.path import join + from pathlib import Path + import time + from multiprocessing.dummy import Pool as ThreadPool + import random + + import pandas as pd + from tqdm import tqdm + from glob import glob + + from app.models import DataStore + from app.utils import file_utils, im_utils + + + # set data_store + data_store = DataStore(opt_data_store, opt_dataset) + # get filepath out + fp_out = data_store.metadata(types.Metadata.SHA256) if opt_fp_out is None else opt_fp_out + # exit if exists + if not opt_force and Path(fp_out).exists(): + log.error('File exists. Use "-f / --force" to overwite') + return + # get filepath in + fp_in = data_store.metadata(types.Metadata.FILEPATH) + df_files = pd.read_csv(fp_in).set_index('index') + # slice if you want + if opt_slice: + df_files = df_files[opt_slice[0]:opt_slice[1]] + + log.info('Processing {:,} images'.format(len(df_files))) + + + # prepare list of images to multithread into sha256s + dir_media = data_store.media_images_original() if opt_dir_media is None else opt_dir_media + file_objs = [] + for ds_file in df_files.itertuples(): + fp_im = join(dir_media, str(ds_file.subdir), f"{ds_file.fn}.{ds_file.ext}") + # find the image_index + # append the subdir option, sort by this then increment by unique subdir + file_obj = {'fp': fp_im, 'index': ds_file.Index} + if opt_identity: + subdirs = ds_file.subdir.split('/') + if not len(subdirs) > 0: + log.error(f'Could not split subdir: "{ds_file.subdir}. Try different option for "--identity"') + log.error('exiting') + return + if opt_identity == 'subdir': + subdir = subdirs[0] + elif opt_identity == 'subdir_head': + # use first part of subdir path + subdir = subdirs[0] + elif opt_identity == 'subdir_tail': + # use last part of subdir path + subdir = subdirs[-1] + file_obj['identity_subdir'] = subdir + file_objs.append(file_obj) + + # convert to thread pool + pbar = tqdm(total=len(file_objs)) + + def as_sha256(file_obj): + pbar.update(1) + file_obj['sha256'] = file_utils.sha256(file_obj['fp']) + return file_obj + + # multithread pool + pool_file_objs = [] + st = time.time() + pool = ThreadPool(opt_threads) + with tqdm(total=len(file_objs)) as pbar: + pool_file_objs = pool.map(as_sha256, file_objs) + pbar.close() + + # convert data to dict + data = [] + for pool_file_obj in pool_file_objs: + data.append( { + 'sha256': pool_file_obj['sha256'], + 'index': pool_file_obj['index'], + 'identity_subdir': pool_file_obj.get('identity_subdir', ''), + }) + + # sort based on identity_subdir + # save to CSV + df_sha256 = pd.DataFrame.from_dict(data) + # add new column for identity + df_sha256['identity_index'] = [1] * len(df_sha256) + df_sha256 = df_sha256.sort_values(by=['identity_subdir'], ascending=True) + df_sha256_identity_groups = df_sha256.groupby('identity_subdir') + for identity_index, df_sha256_identity_group_tuple in enumerate(df_sha256_identity_groups): + identity_subdir, df_sha256_identity_group = df_sha256_identity_group_tuple + for ds_sha256 in df_sha256_identity_group.itertuples(): + df_sha256.at[ds_sha256.Index, 'identity_index'] = identity_index + # drop temp identity subdir column + df_sha256 = df_sha256.drop('identity_subdir', axis=1) + # write to CSV + log.info(f'rows: {len(df_sha256)}') + file_utils.mkdirs(fp_out) + df_sha256.set_index('index') + df_sha256 = df_sha256.sort_values(['index'], ascending=[True]) + df_sha256.to_csv(fp_out, index=False) + + # timing + log.info(f'wrote file: {fp_out}') + log.info('time: {:.2f}, theads: {}'.format(time.time() - st, opt_threads)) + \ No newline at end of file diff --git a/megapixels/commands/datasets/gen_uuid.py b/megapixels/commands/datasets/gen_uuid.py new file mode 100644 index 00000000..612c43ee --- /dev/null +++ b/megapixels/commands/datasets/gen_uuid.py @@ -0,0 +1,65 @@ +import click + +from app.settings import types +from app.utils import click_utils +from app.settings import app_cfg as cfg +from app.utils.logger_utils import Logger + +log = Logger.getLogger() + +@click.command() +@click.option('-i', '--input', 'opt_fp_in', default=None, + help='Override enum input filename CSV') +@click.option('-o', '--output', 'opt_fp_out', default=None, + help='Override enum output filename CSV') +@click.option('--data_store', 'opt_data_store', + type=cfg.DataStoreVar, + default=click_utils.get_default(types.DataStore.NAS), + show_default=True, + help=click_utils.show_help(types.Dataset)) +@click.option('--dataset', 'opt_dataset', + type=cfg.DatasetVar, + required=True, + show_default=True, + help=click_utils.show_help(types.Dataset)) +@click.option('-f', '--force', 'opt_force', is_flag=True, + help='Force overwrite file') +@click.pass_context +def cli(ctx, opt_fp_in, opt_fp_out, opt_data_store, opt_dataset, opt_force): + """Appends UUID to records CSV""" + + from glob import glob + from os.path import join + from pathlib import Path + import base64 + import uuid + + from tqdm import tqdm + import pandas as pd + + from app.models import DataStore + + + # set data_store + data_store = DataStore(opt_data_store, opt_dataset) + # get filepath out + fp_out = data_store.metadata(types.Metadata.UUID) if opt_fp_out is None else opt_fp_out + # exit if exists + if not opt_force and Path(fp_out).exists(): + log.error('File exists. Use "-f / --force" to overwite') + return + + # load sha256 records + fp_in = data_store.metadata(types.Metadata.SHA256) if opt_fp_in is None else opt_fp_in + log.info(f'Loading: {fp_in}') + df_records = pd.read_csv(fp_in).set_index('index') + + df_uuids = df_records.copy() + df_uuids['uuid'] = [uuid.uuid4()] * len(df_uuids) + + for df_record in tqdm(df_records.itertuples(), total=len(df_uuids)): + image_index = df_record.Index + df_uuids.at[image_index, 'uuid'] = uuid.uuid4() + + df_uuids = df_uuids.drop(['sha256', 'identity_index'], axis=1) + df_uuids.to_csv(fp_out) \ No newline at end of file diff --git a/megapixels/commands/datasets/lookup.py b/megapixels/commands/datasets/lookup.py index e84bdf3e..5a2a171e 100644 --- a/megapixels/commands/datasets/lookup.py +++ b/megapixels/commands/datasets/lookup.py @@ -6,8 +6,10 @@ from app.utils import click_utils from app.settings import app_cfg as cfg from app.utils.logger_utils import Logger +log = Logger.getLogger() + @click.command() -@click.option('--index', 'opt_index', type=int, +@click.option('--index', 'opt_index', type=int, required=True, help='Vector index to lookup') @click.option('--data_store', 'opt_data_store', type=cfg.DataStoreVar, @@ -19,12 +21,8 @@ from app.utils.logger_utils import Logger required=True, show_default=True, help=click_utils.show_help(types.Dataset)) -@click.option('--metadata', 'opt_metadata_type', required=True, - type=cfg.MetadataVar, - show_default=True, - help=click_utils.show_help(types.Metadata)) @click.pass_context -def cli(ctx, opt_index, opt_data_store, opt_dataset, opt_metadata_type): +def cli(ctx, opt_index, opt_data_store, opt_dataset): """Display image info""" import sys @@ -37,22 +35,20 @@ def cli(ctx, opt_index, opt_data_store, opt_dataset, opt_metadata_type): import cv2 as cv from tqdm import tqdm - from app.utils import file_utils, im_utils, path_utils + from app.utils import file_utils, im_utils + from app.models.data_store import DataStore log = Logger.getLogger() - - log.info(f'creating dataset: {opt_dataset}') - dataset = Dataset(opt_dataset) - # loads all CSV files, may take a while - log.info(f'loading dataset...') - dataset.load(opt_data_store) + # init dataset + dataset = Dataset(opt_data_store, opt_dataset) + # set data store and load files + dataset.load() # find image records image_record = dataset.roi_idx_to_record(opt_index) # debug image_record.summarize() # load image - fp_im = image_record.filepath - im = cv.imread(fp_im) + im = cv.imread(image_record.filepath) # display cv.imshow('', im) # cv gui diff --git a/megapixels/commands/datasets/sha256.py b/megapixels/commands/datasets/sha256.py deleted file mode 100644 index 4c734073..00000000 --- a/megapixels/commands/datasets/sha256.py +++ /dev/null @@ -1,89 +0,0 @@ -import click - -from app.settings import types -from app.utils import click_utils -from app.settings import app_cfg as cfg -from app.utils.logger_utils import Logger - -log = Logger.getLogger() - -@click.command() -@click.option('-i', '--input', 'opt_fp_in', required=True, - help='Input directory') -@click.option('-m', '--media', 'opt_dir_media', required=True, - help='Input media directory') -@click.option('-o', '--output', 'opt_fp_out', required=True, - help='Output directory') -@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None), - help='Slice list of files') -@click.option('-t', '--threads', 'opt_threads', default=4, - help='Number of threads') -@click.option('-f', '--force', 'opt_force', is_flag=True, - help='Force overwrite file') -@click.pass_context -def cli(ctx, opt_fp_in, opt_dir_media, opt_fp_out, opt_slice, opt_threads, opt_force): - """Multithreading test""" - - from glob import glob - from os.path import join - from pathlib import Path - import time - from multiprocessing.dummy import Pool as ThreadPool - import random - - import pandas as pd - from tqdm import tqdm - from glob import glob - - from app.utils import file_utils, im_utils - - - if not opt_force and Path(opt_fp_out).exists(): - log.error('File exists. Use "-f / --force" to overwite') - return - - df_files = pd.read_csv(opt_fp_in).set_index('index') - - if opt_slice: - df_files = df_files[opt_slice[0]:opt_slice[1]] - - log.info('Processing {:,} images'.format(len(df_files))) - - - # prepare list of images to multithread into sha256s - file_objs = [] - for ds_file in df_files.itertuples(): - fp_im = join(opt_dir_media, str(ds_file.subdir), f"{ds_file.fn}.{ds_file.ext}") - file_objs.append({'fp': fp_im, 'index': ds_file.Index}) - - # convert to thread pool - pbar = tqdm(total=len(file_objs)) - - def as_sha256(file_obj): - pbar.update(1) - file_obj['sha256'] = file_utils.sha256(file_obj['fp']) - return file_obj - - # multithread pool - pool_file_objs = [] - st = time.time() - pool = ThreadPool(opt_threads) - with tqdm(total=len(file_objs)) as pbar: - pool_file_objs = pool.map(as_sha256, file_objs) - pbar.close() - - # convert data to dict - data = [] - for pool_file_obj in pool_file_objs: - data.append( { - 'sha256': pool_file_obj['sha256'], - 'index': pool_file_obj['index'] - }) - - # save to CSV - file_utils.mkdirs(opt_fp_out) - df = pd.DataFrame.from_dict(data) - df.to_csv(opt_fp_out, index=False) - - # timing - log.info('time: {:.2f}, theads: {}'.format(time.time() - st, opt_threads)) \ No newline at end of file diff --git a/megapixels/commands/demo/face_analysis.py b/megapixels/commands/demo/face_analysis.py new file mode 100644 index 00000000..6721a02d --- /dev/null +++ b/megapixels/commands/demo/face_analysis.py @@ -0,0 +1,56 @@ +import click + +from app.settings import types +from app.models.dataset import Dataset +from app.utils import click_utils +from app.settings import app_cfg as cfg +from app.utils.logger_utils import Logger + +@click.command() +@click.option('--data_store', 'opt_data_store', + type=cfg.DataStoreVar, + default=click_utils.get_default(types.DataStore.NAS), + show_default=True, + help=click_utils.show_help(types.Dataset)) +@click.option('--dataset', 'opt_dataset', + type=cfg.DatasetVar, + required=True, + show_default=True, + help=click_utils.show_help(types.Dataset)) +@click.pass_context +def cli(ctx, opt_index, opt_data_store, opt_dataset): + """Display image info""" + + import sys + from glob import glob + from os.path import join + from pathlib import Path + import time + + import pandas as pd + import cv2 as cv + from tqdm import tqdm + + from app.utils import file_utils, im_utils, path_utils + + log = Logger.getLogger() + + dataset = Dataset(opt_dataset).load(opt_data_store) + # find image records + image_record = dataset.roi_idx_to_record(opt_index) + # debug + image_record.summarize() + # load image + fp_im = image_record.filepath + im = cv.imread(fp_im) + # display + cv.imshow('', im) + # cv gui + while True: + k = cv.waitKey(1) & 0xFF + if k == 27 or k == ord('q'): # ESC + cv.destroyAllWindows() + sys.exit() + elif k != 255: + # any key to continue + break \ No newline at end of file diff --git a/megapixels/commands/demo/face_search.py b/megapixels/commands/demo/face_search.py new file mode 100644 index 00000000..08b2323d --- /dev/null +++ b/megapixels/commands/demo/face_search.py @@ -0,0 +1,95 @@ +import click + +from app.settings import types +from app.models.dataset import Dataset +from app.utils import click_utils +from app.settings import app_cfg as cfg +from app.utils.logger_utils import Logger + +@click.command() +@click.option('-i', '--input', 'opt_fp_in', required=True, + help='Input face image') +@click.option('--data_store', 'opt_data_store', + type=cfg.DataStoreVar, + default=click_utils.get_default(types.DataStore.SSD), + show_default=True, + help=click_utils.show_help(types.Dataset)) +@click.option('--dataset', 'opt_dataset', + type=cfg.DatasetVar, + required=True, + show_default=True, + help=click_utils.show_help(types.Dataset)) +@click.option('--gpu', 'opt_gpu', default=0, + help='GPU index (use -1 for CPU)') +@click.pass_context +def cli(ctx, opt_fp_in, opt_data_store, opt_dataset, opt_gpu): + """Display image info""" + + import sys + from glob import glob + from os.path import join + from pathlib import Path + import time + + import imutils + import pandas as pd + import cv2 as cv + import dlib + from tqdm import tqdm + + from app.utils import file_utils, im_utils + from app.models.data_store import DataStore, DataStoreS3 + from app.processors import face_detector + from app.processors import face_recognition + + log = Logger.getLogger() + + # init face detection + + # init face recognition + detector = face_detector.DetectorDLIBHOG() + # face recognition/vector + recognition = face_recognition.RecognitionDLIB(gpu=opt_gpu) + + # load query image + im_query = cv.imread(opt_fp_in) + # get detection as BBox object + bboxes = detector.detect(im_query, largest=True) + bbox = bboxes[0] + dim = im_query.shape[:2][::-1] + bbox = bbox.to_dim(dim) # convert back to real dimensions + + if not bbox: + log.error('No face detected. Exiting') + return + + # extract the face vectors + vec_query = recognition.vec(im_query, bbox) + + # load dataset CSVs + dataset = Dataset(opt_data_store, opt_dataset) + + # find matches + image_records = dataset.find_matches(vec_query, n_results=5) + + # summary + ims_match = [im_query] + for image_record in image_records: + image_record.summarize() + log.info(f'{image_record.filepath}') + im_match = cv.imread(image_record.filepath) + ims_match.append(im_match) + + montages = imutils.build_montages(ims_match, (256, 256), (3,2)) + + for i, montage in enumerate(montages): + cv.imshow(f'{i}', montage) + # cv gui + while True: + k = cv.waitKey(1) & 0xFF + if k == 27 or k == ord('q'): # ESC + cv.destroyAllWindows() + sys.exit() + elif k != 255: + # any key to continue + break \ No newline at end of file -- cgit v1.2.3-70-g09d2