From 88ec48e1c4d93ba9cd3aa186c068ef2aa4c27c56 Mon Sep 17 00:00:00 2001 From: adamhrv Date: Mon, 17 Dec 2018 01:37:31 +0100 Subject: fixing dataset procesosrs --- megapixels/commands/cv/cluster.py | 22 ++--- megapixels/commands/cv/face_pose.py | 140 +++++++++++++++++++++++++++ megapixels/commands/cv/face_roi.py | 171 ++++++++++++++++++++++++++++++++ megapixels/commands/cv/face_vector.py | 125 ++++++++++++++++++++++++ megapixels/commands/cv/gen_face_vec.py | 123 ----------------------- megapixels/commands/cv/gen_pose.py | 141 --------------------------- megapixels/commands/cv/gen_rois.py | 172 --------------------------------- 7 files changed, 447 insertions(+), 447 deletions(-) create mode 100644 megapixels/commands/cv/face_pose.py create mode 100644 megapixels/commands/cv/face_roi.py create mode 100644 megapixels/commands/cv/face_vector.py delete mode 100644 megapixels/commands/cv/gen_face_vec.py delete mode 100644 megapixels/commands/cv/gen_pose.py delete mode 100644 megapixels/commands/cv/gen_rois.py (limited to 'megapixels/commands/cv') diff --git a/megapixels/commands/cv/cluster.py b/megapixels/commands/cv/cluster.py index 94334133..419091a0 100644 --- a/megapixels/commands/cv/cluster.py +++ b/megapixels/commands/cv/cluster.py @@ -23,20 +23,20 @@ from app.utils.logger_utils import Logger @click.pass_context def cli(ctx, opt_data_store, opt_dataset, opt_metadata): """Display image info""" - - # cluster the embeddings -print("[INFO] clustering...") -clt = DBSCAN(metric="euclidean", n_jobs=args["jobs"]) -clt.fit(encodings) - -# determine the total number of unique faces found in the dataset -labelIDs = np.unique(clt.labels_) -numUniqueFaces = len(np.where(labelIDs > -1)[0]) -print("[INFO] # unique faces: {}".format(numUniqueFaces)) + + # cluster the embeddings + print("[INFO] clustering...") + clt = DBSCAN(metric="euclidean", n_jobs=args["jobs"]) + clt.fit(encodings) + + # determine the total number of unique faces found in the dataset + labelIDs = np.unique(clt.labels_) + numUniqueFaces = len(np.where(labelIDs > -1)[0]) + print("[INFO] # unique faces: {}".format(numUniqueFaces)) # load and display image im = cv.imread(fp_im) cv.imshow('', im) - + while True: k = cv.waitKey(1) & 0xFF if k == 27 or k == ord('q'): # ESC diff --git a/megapixels/commands/cv/face_pose.py b/megapixels/commands/cv/face_pose.py new file mode 100644 index 00000000..e7ffb7ac --- /dev/null +++ b/megapixels/commands/cv/face_pose.py @@ -0,0 +1,140 @@ +""" +Converts ROIs to pose: yaw, roll, pitch +""" + +import click + +from app.settings import types +from app.utils import click_utils +from app.settings import app_cfg as cfg + +@click.command() +@click.option('-i', '--input', 'opt_fp_in', default=None, + help='Override enum input filename CSV') +@click.option('-o', '--output', 'opt_fp_out', default=None, + help='Override enum output filename CSV') +@click.option('-m', '--media', 'opt_dir_media', default=None, + help='Override enum media directory') +@click.option('--data_store', 'opt_data_store', + type=cfg.DataStoreVar, + default=click_utils.get_default(types.DataStore.SSD), + show_default=True, + help=click_utils.show_help(types.Dataset)) +@click.option('--dataset', 'opt_dataset', + type=cfg.DatasetVar, + required=True, + show_default=True, + help=click_utils.show_help(types.Dataset)) +@click.option('--size', 'opt_size', + type=(int, int), default=(300, 300), + help='Output image size') +@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None), + help='Slice list of files') +@click.option('-f', '--force', 'opt_force', is_flag=True, + help='Force overwrite file') +@click.option('-d', '--display', 'opt_display', is_flag=True, + help='Display image for debugging') +@click.pass_context +def cli(ctx, opt_fp_in, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset, opt_size, + opt_slice, opt_force, opt_display): + """Converts ROIs to pose: roll, yaw, pitch""" + + import sys + import os + from os.path import join + from pathlib import Path + from glob import glob + + from tqdm import tqdm + import numpy as np + import dlib # must keep a local reference for dlib + import cv2 as cv + import pandas as pd + + from app.models.bbox import BBox + from app.utils import logger_utils, file_utils, im_utils + from app.processors.face_landmarks import LandmarksDLIB + from app.processors.face_pose import FacePoseDLIB + from app.models.data_store import DataStore + + # ------------------------------------------------- + # init here + + log = logger_utils.Logger.getLogger() + + # set data_store + data_store = DataStore(opt_data_store, opt_dataset) + + # get filepath out + fp_out = data_store.metadata(types.Metadata.FACE_POSE) if opt_fp_out is None else opt_fp_out + if not opt_force and Path(fp_out).exists(): + log.error('File exists. Use "-f / --force" to overwite') + return + + # init face processors + face_pose = FacePoseDLIB() + face_landmarks = LandmarksDLIB() + + # load filepath data + fp_record = data_store.metadata(types.Metadata.FILE_RECORD) + df_record = pd.read_csv(fp_record).set_index('index') + # load ROI data + fp_roi = data_store.metadata(types.Metadata.FACE_ROI) + df_roi = pd.read_csv(fp_roi).set_index('index') + # slice if you want + if opt_slice: + df_roi = df_roi[opt_slice[0]:opt_slice[1]] + # group by image index (speedup if multiple faces per image) + df_img_groups = df_roi.groupby('record_index') + log.debug('processing {:,} groups'.format(len(df_img_groups))) + + # store poses and convert to DataFrame + poses = [] + + # iterate + for record_index, df_img_group in tqdm(df_img_groups): + # make fp + ds_record = df_record.iloc[record_index] + fp_im = data_store.face_image(ds_record.subdir, ds_record.fn, ds_record.ext) + im = cv.imread(fp_im) + # get bbox + x = df_img_group.x.values[0] + y = df_img_group.y.values[0] + w = df_img_group.w.values[0] + h = df_img_group.h.values[0] + dim = im.shape[:2][::-1] + bbox = BBox.from_xywh(x, y, w, h).to_dim(dim) + # get pose + landmarks = face_landmarks.landmarks(im, bbox) + pose_data = face_pose.pose(landmarks, dim, project_points=opt_display) + pose_degrees = pose_data['degrees'] # only keep the degrees data + + # use the project point data if display flag set + if opt_display: + pts_im = pose_data['points_image'] + pts_model = pose_data['points_model'] + pt_nose = pose_data['point_nose'] + dst = im.copy() + face_pose.draw_pose(dst, pts_im, pts_model, pt_nose) + face_pose.draw_degrees(dst, pose_degrees) + # display to cv window + cv.imshow('', dst) + while True: + k = cv.waitKey(1) & 0xFF + if k == 27 or k == ord('q'): # ESC + cv.destroyAllWindows() + sys.exit() + elif k != 255: + # any key to continue + break + + # add image index and append to result CSV data + pose_degrees['record_index'] = record_index + poses.append(pose_degrees) + + + # save date + file_utils.mkdirs(fp_out) + df = pd.DataFrame.from_dict(poses) + df.index.name = 'index' + df.to_csv(fp_out) \ No newline at end of file diff --git a/megapixels/commands/cv/face_roi.py b/megapixels/commands/cv/face_roi.py new file mode 100644 index 00000000..d7248aee --- /dev/null +++ b/megapixels/commands/cv/face_roi.py @@ -0,0 +1,171 @@ +""" +Crop images to prepare for training +""" + +import click +# from PIL import Image, ImageOps, ImageFilter, ImageDraw + +from app.settings import types +from app.utils import click_utils +from app.settings import app_cfg as cfg + +color_filters = {'color': 1, 'gray': 2, 'all': 3} + +@click.command() +@click.option('-i', '--input', 'opt_fp_in', default=None, + help='Override enum input filename CSV') +@click.option('-o', '--output', 'opt_fp_out', default=None, + help='Override enum output filename CSV') +@click.option('-m', '--media', 'opt_dir_media', default=None, + help='Override enum media directory') +@click.option('--data_store', 'opt_data_store', + type=cfg.DataStoreVar, + default=click_utils.get_default(types.DataStore.SSD), + show_default=True, + help=click_utils.show_help(types.Dataset)) +@click.option('--dataset', 'opt_dataset', + type=cfg.DatasetVar, + required=True, + show_default=True, + help=click_utils.show_help(types.Dataset)) +@click.option('--size', 'opt_size', + type=(int, int), default=(300, 300), + help='Output image size') +@click.option('-t', '--detector-type', 'opt_detector_type', + type=cfg.FaceDetectNetVar, + default=click_utils.get_default(types.FaceDetectNet.DLIB_CNN), + help=click_utils.show_help(types.FaceDetectNet)) +@click.option('-g', '--gpu', 'opt_gpu', default=0, + help='GPU index') +@click.option('--conf', 'opt_conf_thresh', default=0.85, type=click.FloatRange(0,1), + help='Confidence minimum threshold') +@click.option('-p', '--pyramids', 'opt_pyramids', default=0, type=click.IntRange(0,4), + help='Number pyramids to upscale for DLIB detectors') +@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None), + help='Slice list of files') +@click.option('--display/--no-display', 'opt_display', is_flag=True, default=False, + help='Display detections to debug') +@click.option('-f', '--force', 'opt_force', is_flag=True, + help='Force overwrite file') +@click.option('--color', 'opt_color_filter', + type=click.Choice(color_filters.keys()), default='all', + help='Filter to keep color or grayscale images (color = keep color') +@click.option('--largest/--all-faces', 'opt_largest', is_flag=True, default=True, + help='Only keep largest face') +@click.pass_context +def cli(ctx, opt_fp_in, opt_dir_media, opt_fp_out, opt_data_store, opt_dataset, opt_size, opt_detector_type, + opt_gpu, opt_conf_thresh, opt_pyramids, opt_slice, opt_display, opt_force, opt_color_filter, + opt_largest): + """Converts frames with faces to CSV of ROIs""" + + import sys + import os + from os.path import join + from pathlib import Path + from glob import glob + + from tqdm import tqdm + import numpy as np + import dlib # must keep a local reference for dlib + import cv2 as cv + import pandas as pd + + from app.utils import logger_utils, file_utils, im_utils + from app.processors import face_detector + from app.models.data_store import DataStore + + # ------------------------------------------------- + # init here + + log = logger_utils.Logger.getLogger() + + # set data_store + data_store = DataStore(opt_data_store, opt_dataset) + + # get filepath out + fp_out = data_store.metadata(types.Metadata.FACE_ROI) if opt_fp_out is None else opt_fp_out + if not opt_force and Path(fp_out).exists(): + log.error('File exists. Use "-f / --force" to overwite') + return + + # set detector + if opt_detector_type == types.FaceDetectNet.CVDNN: + detector = face_detector.DetectorCVDNN() + elif opt_detector_type == types.FaceDetectNet.DLIB_CNN: + detector = face_detector.DetectorDLIBCNN(opt_gpu) + elif opt_detector_type == types.FaceDetectNet.DLIB_HOG: + detector = face_detector.DetectorDLIBHOG() + elif opt_detector_type == types.FaceDetectNet.MTCNN: + detector = face_detector.DetectorMTCNN() + elif opt_detector_type == types.FaceDetectNet.HAAR: + log.error('{} not yet implemented'.format(opt_detector_type.name)) + return + + + # get list of files to process + fp_in = data_store.metadata(types.Metadata.FILE_RECORD) if opt_fp_in is None else opt_fp_in + df_records = pd.read_csv(fp_in).set_index('index') + if opt_slice: + df_records = df_records[opt_slice[0]:opt_slice[1]] + log.debug('processing {:,} files'.format(len(df_records))) + + # filter out grayscale + color_filter = color_filters[opt_color_filter] + + data = [] + + for df_record in tqdm(df_records.itertuples(), total=len(df_records)): + fp_im = data_store.face_image(str(df_record.subdir), str(df_record.fn), str(df_record.ext)) + im = cv.imread(fp_im) + + # filter out color or grayscale iamges + if color_filter != color_filters['all']: + try: + is_gray = im_utils.is_grayscale(im) + if is_gray and color_filter != color_filters['gray']: + log.debug('Skipping grayscale image: {}'.format(fp_im)) + continue + except Exception as e: + log.error('Could not check grayscale: {}'.format(fp_im)) + continue + + try: + bboxes = detector.detect(im, size=opt_size, pyramids=opt_pyramids, largest=opt_largest) + except Exception as e: + log.error('could not detect: {}'.format(fp_im)) + log.error('{}'.format(e)) + continue + + for bbox in bboxes: + roi = { + 'record_index': int(df_record.Index), + 'x': bbox.x, + 'y': bbox.y, + 'w': bbox.w, + 'h': bbox.h, + 'image_width': im.shape[1], + 'image_height': im.shape[0]} + data.append(roi) + + # debug display + if opt_display and len(bboxes): + bbox_dim = bbox.to_dim(im.shape[:2][::-1]) # w,h + im_md = im_utils.resize(im, width=min(1200, opt_size[0])) + for bbox in bboxes: + bbox_dim = bbox.to_dim(im_md.shape[:2][::-1]) + cv.rectangle(im_md, bbox_dim.pt_tl, bbox_dim.pt_br, (0,255,0), 3) + cv.imshow('', im_md) + while True: + k = cv.waitKey(1) & 0xFF + if k == 27 or k == ord('q'): # ESC + cv.destroyAllWindows() + sys.exit() + elif k != 255: + # any key to continue + break + + # save date + file_utils.mkdirs(fp_out) + df = pd.DataFrame.from_dict(data) + df.index.name = 'index' + df.to_csv(fp_out) \ No newline at end of file diff --git a/megapixels/commands/cv/face_vector.py b/megapixels/commands/cv/face_vector.py new file mode 100644 index 00000000..203f73eb --- /dev/null +++ b/megapixels/commands/cv/face_vector.py @@ -0,0 +1,125 @@ +""" +Converts ROIs to face vector +""" + +import click + +from app.settings import types +from app.utils import click_utils +from app.settings import app_cfg as cfg + +@click.command() +@click.option('-o', '--output', 'opt_fp_out', default=None, + help='Override enum output filename CSV') +@click.option('-m', '--media', 'opt_dir_media', default=None, + help='Override enum media directory') +@click.option('--data_store', 'opt_data_store', + type=cfg.DataStoreVar, + default=click_utils.get_default(types.DataStore.SSD), + show_default=True, + help=click_utils.show_help(types.Dataset)) +@click.option('--dataset', 'opt_dataset', + type=cfg.DatasetVar, + required=True, + show_default=True, + help=click_utils.show_help(types.Dataset)) +@click.option('--size', 'opt_size', + type=(int, int), default=(300, 300), + help='Output image size') +@click.option('-j', '--jitters', 'opt_jitters', default=cfg.DLIB_FACEREC_JITTERS, + help='Number of jitters') +@click.option('-p', '--padding', 'opt_padding', default=cfg.DLIB_FACEREC_PADDING, + help='Percentage padding') +@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None), + help='Slice list of files') +@click.option('-f', '--force', 'opt_force', is_flag=True, + help='Force overwrite file') +@click.option('-g', '--gpu', 'opt_gpu', default=0, + help='GPU index') +@click.pass_context +def cli(ctx, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset, opt_size, + opt_slice, opt_force, opt_gpu, opt_jitters, opt_padding): + """Converts face ROIs to vectors""" + + import sys + import os + from os.path import join + from pathlib import Path + from glob import glob + + from tqdm import tqdm + import numpy as np + import dlib # must keep a local reference for dlib + import cv2 as cv + import pandas as pd + + from app.models.bbox import BBox + from app.models.data_store import DataStore + from app.utils import logger_utils, file_utils, im_utils + from app.processors import face_recognition + + + # ------------------------------------------------- + # init here + + log = logger_utils.Logger.getLogger() + # set data_store + data_store = DataStore(opt_data_store, opt_dataset) + + # get filepath out + fp_out = data_store.metadata(types.Metadata.FACE_VECTOR) if opt_fp_out is None else opt_fp_out + if not opt_force and Path(fp_out).exists(): + log.error('File exists. Use "-f / --force" to overwite') + return + + # init face processors + facerec = face_recognition.RecognitionDLIB() + + # load data + fp_record = data_store.metadata(types.Metadata.FILE_RECORD) + df_record = pd.read_csv(fp_record).set_index('index') + fp_roi = data_store.metadata(types.Metadata.FACE_ROI) + df_roi = pd.read_csv(fp_roi).set_index('index') + + if opt_slice: + df_roi = df_roi[opt_slice[0]:opt_slice[1]] + + # ------------------------------------------------- + # process here + df_img_groups = df_roi.groupby('record_index') + log.debug('processing {:,} groups'.format(len(df_img_groups))) + + vecs = [] + + for image_index, df_img_group in tqdm(df_img_groups): + # make fp + roi_index = df_img_group.index.values[0] + # log.debug(f'roi_index: {roi_index}, image_index: {image_index}') + ds_file = df_record.loc[roi_index] # locate image meta + #ds_file = df_record.loc['index', image_index] # locate image meta + + fp_im = data_store.face_image(str(ds_file.subdir), str(ds_file.fn), str(ds_file.ext)) + im = cv.imread(fp_im) + # get bbox + x = df_img_group.x.values[0] + y = df_img_group.y.values[0] + w = df_img_group.w.values[0] + h = df_img_group.h.values[0] + imw = df_img_group.image_width.values[0] + imh = df_img_group.image_height.values[0] + dim = im.shape[:2][::-1] + # get face vector + dim = (imw, imh) + bbox_dim = BBox.from_xywh(x, y, w, h).to_dim(dim) # convert to int real dimensions + # compute vec + # padding=opt_padding not yet implemented in 19.16 but merged in master + vec = facerec.vec(im, bbox_dim, jitters=opt_jitters) + vec_str = ','.join([repr(x) for x in vec]) # convert to string for CSV + vecs.append( {'roi_index': roi_index, 'image_index': image_index, 'vec': vec_str}) + + + # save date + df = pd.DataFrame.from_dict(vecs) + df.index.name = 'index' + file_utils.mkdirs(fp_out) + df.to_csv(fp_out) \ No newline at end of file diff --git a/megapixels/commands/cv/gen_face_vec.py b/megapixels/commands/cv/gen_face_vec.py deleted file mode 100644 index 83e1460d..00000000 --- a/megapixels/commands/cv/gen_face_vec.py +++ /dev/null @@ -1,123 +0,0 @@ -""" -Converts ROIs to face vector -""" - -import click - -from app.settings import types -from app.utils import click_utils -from app.settings import app_cfg as cfg - -@click.command() -@click.option('-o', '--output', 'opt_fp_out', default=None, - help='Override enum output filename CSV') -@click.option('-m', '--media', 'opt_dir_media', default=None, - help='Override enum media directory') -@click.option('--data_store', 'opt_data_store', - type=cfg.DataStoreVar, - default=click_utils.get_default(types.DataStore.SSD), - show_default=True, - help=click_utils.show_help(types.Dataset)) -@click.option('--dataset', 'opt_dataset', - type=cfg.DatasetVar, - required=True, - show_default=True, - help=click_utils.show_help(types.Dataset)) -@click.option('--size', 'opt_size', - type=(int, int), default=(300, 300), - help='Output image size') -@click.option('-j', '--jitters', 'opt_jitters', default=cfg.DLIB_FACEREC_JITTERS, - help='Number of jitters') -@click.option('-p', '--padding', 'opt_padding', default=cfg.DLIB_FACEREC_PADDING, - help='Percentage padding') -@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None), - help='Slice list of files') -@click.option('-f', '--force', 'opt_force', is_flag=True, - help='Force overwrite file') -@click.option('-g', '--gpu', 'opt_gpu', default=0, - help='GPU index') -@click.pass_context -def cli(ctx, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset, opt_size, - opt_slice, opt_force, opt_gpu, opt_jitters, opt_padding): - """Converts face ROIs to vectors""" - - import sys - import os - from os.path import join - from pathlib import Path - from glob import glob - - from tqdm import tqdm - import numpy as np - import dlib # must keep a local reference for dlib - import cv2 as cv - import pandas as pd - - from app.models.bbox import BBox - from app.models.data_store import DataStore - from app.utils import logger_utils, file_utils, im_utils - from app.processors import face_recognition - - - # ------------------------------------------------- - # init here - - log = logger_utils.Logger.getLogger() - # set data_store - data_store = DataStore(opt_data_store, opt_dataset) - - # get filepath out - fp_out = data_store.metadata(types.Metadata.FACE_VECTOR) if opt_fp_out is None else opt_fp_out - if not opt_force and Path(fp_out).exists(): - log.error('File exists. Use "-f / --force" to overwite') - return - - # init face processors - facerec = face_recognition.RecognitionDLIB() - - # load data - df_file = pd.read_csv(data_store.metadata(types.Metadata.FILEPATH)).set_index('index') - df_roi = pd.read_csv(data_store.metadata(types.Metadata.FACE_ROI)).set_index('index') - - if opt_slice: - df_roi = df_roi[opt_slice[0]:opt_slice[1]] - - # ------------------------------------------------- - # process here - df_img_groups = df_roi.groupby('image_index') - log.debug('processing {:,} groups'.format(len(df_img_groups))) - - vecs = [] - - for image_index, df_img_group in tqdm(df_img_groups): - # make fp - roi_index = df_img_group.index.values[0] - log.debug(f'roi_index: {roi_index}, image_index: {image_index}') - ds_file = df_file.loc[roi_index] # locate image meta - #ds_file = df_file.loc['index', image_index] # locate image meta - - fp_im = data_store.face_image(str(ds_file.subdir), str(ds_file.fn), str(ds_file.ext)) - im = cv.imread(fp_im) - # get bbox - x = df_img_group.x.values[0] - y = df_img_group.y.values[0] - w = df_img_group.w.values[0] - h = df_img_group.h.values[0] - imw = df_img_group.image_width.values[0] - imh = df_img_group.image_height.values[0] - dim = im.shape[:2][::-1] - # get face vector - dim = (imw, imh) - bbox_dim = BBox.from_xywh(x, y, w, h).to_dim(dim) # convert to int real dimensions - # compute vec - # padding=opt_padding not yet implemented in 19.16 but merged in master - vec = facerec.vec(im, bbox_dim, jitters=opt_jitters) - vec_str = ','.join([repr(x) for x in vec]) # convert to string for CSV - vecs.append( {'roi_index': roi_index, 'image_index': image_index, 'vec': vec_str}) - - - # save date - df = pd.DataFrame.from_dict(vecs) - df.index.name = 'index' - #file_utils.mkdirs(fp_out) - #df.to_csv(fp_out) \ No newline at end of file diff --git a/megapixels/commands/cv/gen_pose.py b/megapixels/commands/cv/gen_pose.py deleted file mode 100644 index aefadb00..00000000 --- a/megapixels/commands/cv/gen_pose.py +++ /dev/null @@ -1,141 +0,0 @@ -""" -Converts ROIs to pose: yaw, roll, pitch -""" - -import click - -from app.settings import types -from app.utils import click_utils -from app.settings import app_cfg as cfg - -@click.command() -@click.option('-i', '--input', 'opt_fp_in', default=None, - help='Override enum input filename CSV') -@click.option('-o', '--output', 'opt_fp_out', default=None, - help='Override enum output filename CSV') -@click.option('-m', '--media', 'opt_dir_media', default=None, - help='Override enum media directory') -@click.option('--data_store', 'opt_data_store', - type=cfg.DataStoreVar, - default=click_utils.get_default(types.DataStore.SSD), - show_default=True, - help=click_utils.show_help(types.Dataset)) -@click.option('--dataset', 'opt_dataset', - type=cfg.DatasetVar, - required=True, - show_default=True, - help=click_utils.show_help(types.Dataset)) -@click.option('--size', 'opt_size', - type=(int, int), default=(300, 300), - help='Output image size') -@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None), - help='Slice list of files') -@click.option('-f', '--force', 'opt_force', is_flag=True, - help='Force overwrite file') -@click.option('-d', '--display', 'opt_display', is_flag=True, - help='Display image for debugging') -@click.pass_context -def cli(ctx, opt_fp_in, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset, opt_size, - opt_slice, opt_force, opt_display): - """Converts ROIs to pose: roll, yaw, pitch""" - - import sys - import os - from os.path import join - from pathlib import Path - from glob import glob - - from tqdm import tqdm - import numpy as np - import dlib # must keep a local reference for dlib - import cv2 as cv - import pandas as pd - - from app.models.bbox import BBox - from app.utils import logger_utils, file_utils, im_utils - from app.processors.face_landmarks import LandmarksDLIB - from app.processors.face_pose import FacePoseDLIB - from app.models.data_store import DataStore - - # ------------------------------------------------- - # init here - - log = logger_utils.Logger.getLogger() - - # set data_store - data_store = DataStore(opt_data_store, opt_dataset) - - # get filepath out - fp_out = data_store.metadata(types.Metadata.FACE_POSE) if opt_fp_out is None else opt_fp_out - if not opt_force and Path(fp_out).exists(): - log.error('File exists. Use "-f / --force" to overwite') - return - - # init face processors - face_pose = FacePoseDLIB() - face_landmarks = LandmarksDLIB() - - # load filepath data - fp_filepath = data_store.metadata(types.Metadata.FILEPATH) - df_filepath = pd.read_csv(fp_filepath) - # load ROI data - fp_roi = data_store.metadata(types.Metadata.FACE_ROI) - df_roi = pd.read_csv(fp_roi) - # slice if you want - if opt_slice: - df_roi = df_roi[opt_slice[0]:opt_slice[1]] - # group by image index (speedup if multiple faces per image) - df_img_groups = df_roi.groupby('image_index') - log.debug('processing {:,} groups'.format(len(df_img_groups))) - - # store poses and convert to DataFrame - poses = [] - - # iterate - for image_index, df_img_group in tqdm(df_img_groups): - # make fp - ds_file = df_filepath.iloc[image_index] - fp_im = data_store.face_image(ds_file.subdir, ds_file.fn, ds_file.ext) - #fp_im = join(opt_dir_media, ds_file.subdir, '{}.{}'.format(ds_file.fn, ds_file.ext)) - im = cv.imread(fp_im) - # get bbox - x = df_img_group.x.values[0] - y = df_img_group.y.values[0] - w = df_img_group.w.values[0] - h = df_img_group.h.values[0] - dim = im.shape[:2][::-1] - bbox = BBox.from_xywh(x, y, w, h).to_dim(dim) - # get pose - landmarks = face_landmarks.landmarks(im, bbox) - pose_data = face_pose.pose(landmarks, dim, project_points=opt_display) - pose_degrees = pose_data['degrees'] # only keep the degrees data - - # use the project point data if display flag set - if opt_display: - pts_im = pose_data['points_image'] - pts_model = pose_data['points_model'] - pt_nose = pose_data['point_nose'] - dst = im.copy() - face_pose.draw_pose(dst, pts_im, pts_model, pt_nose) - face_pose.draw_degrees(dst, pose_degrees) - # display to cv window - cv.imshow('', dst) - while True: - k = cv.waitKey(1) & 0xFF - if k == 27 or k == ord('q'): # ESC - cv.destroyAllWindows() - sys.exit() - elif k != 255: - # any key to continue - break - - # add image index and append to result CSV data - pose_degrees['image_index'] = image_index - poses.append(pose_degrees) - - - # save date - file_utils.mkdirs(fp_out) - df = pd.DataFrame.from_dict(poses) - df.index.name = 'index' - df.to_csv(fp_out) \ No newline at end of file diff --git a/megapixels/commands/cv/gen_rois.py b/megapixels/commands/cv/gen_rois.py deleted file mode 100644 index 20dd598a..00000000 --- a/megapixels/commands/cv/gen_rois.py +++ /dev/null @@ -1,172 +0,0 @@ -""" -Crop images to prepare for training -""" - -import click -# from PIL import Image, ImageOps, ImageFilter, ImageDraw - -from app.settings import types -from app.utils import click_utils -from app.settings import app_cfg as cfg - -color_filters = {'color': 1, 'gray': 2, 'all': 3} - -@click.command() -@click.option('-i', '--input', 'opt_fp_in', default=None, - help='Override enum input filename CSV') -@click.option('-o', '--output', 'opt_fp_out', default=None, - help='Override enum output filename CSV') -@click.option('-m', '--media', 'opt_dir_media', default=None, - help='Override enum media directory') -@click.option('--data_store', 'opt_data_store', - type=cfg.DataStoreVar, - default=click_utils.get_default(types.DataStore.SSD), - show_default=True, - help=click_utils.show_help(types.Dataset)) -@click.option('--dataset', 'opt_dataset', - type=cfg.DatasetVar, - required=True, - show_default=True, - help=click_utils.show_help(types.Dataset)) -@click.option('--size', 'opt_size', - type=(int, int), default=(300, 300), - help='Output image size') -@click.option('-t', '--detector-type', 'opt_detector_type', - type=cfg.FaceDetectNetVar, - default=click_utils.get_default(types.FaceDetectNet.DLIB_CNN), - help=click_utils.show_help(types.FaceDetectNet)) -@click.option('-g', '--gpu', 'opt_gpu', default=0, - help='GPU index') -@click.option('--conf', 'opt_conf_thresh', default=0.85, type=click.FloatRange(0,1), - help='Confidence minimum threshold') -@click.option('-p', '--pyramids', 'opt_pyramids', default=0, type=click.IntRange(0,4), - help='Number pyramids to upscale for DLIB detectors') -@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None), - help='Slice list of files') -@click.option('--display/--no-display', 'opt_display', is_flag=True, default=False, - help='Display detections to debug') -@click.option('-f', '--force', 'opt_force', is_flag=True, - help='Force overwrite file') -@click.option('--color', 'opt_color_filter', - type=click.Choice(color_filters.keys()), default='all', - help='Filter to keep color or grayscale images (color = keep color') -@click.option('--largest/--all-faces', 'opt_largest', is_flag=True, default=True, - help='Only keep largest face') -@click.pass_context -def cli(ctx, opt_fp_in, opt_dir_media, opt_fp_out, opt_data_store, opt_dataset, opt_size, opt_detector_type, - opt_gpu, opt_conf_thresh, opt_pyramids, opt_slice, opt_display, opt_force, opt_color_filter, - opt_largest): - """Converts frames with faces to CSV of ROIs""" - - import sys - import os - from os.path import join - from pathlib import Path - from glob import glob - - from tqdm import tqdm - import numpy as np - import dlib # must keep a local reference for dlib - import cv2 as cv - import pandas as pd - - from app.utils import logger_utils, file_utils, im_utils - from app.processors import face_detector - from app.models.data_store import DataStore - - # ------------------------------------------------- - # init here - - log = logger_utils.Logger.getLogger() - - # set data_store - data_store = DataStore(opt_data_store, opt_dataset) - - # get filepath out - fp_out = data_store.metadata(types.Metadata.FACE_ROI) if opt_fp_out is None else opt_fp_out - if not opt_force and Path(fp_out).exists(): - log.error('File exists. Use "-f / --force" to overwite') - return - - # set detector - if opt_detector_type == types.FaceDetectNet.CVDNN: - detector = face_detector.DetectorCVDNN() - elif opt_detector_type == types.FaceDetectNet.DLIB_CNN: - detector = face_detector.DetectorDLIBCNN(opt_gpu) - elif opt_detector_type == types.FaceDetectNet.DLIB_HOG: - detector = face_detector.DetectorDLIBHOG() - elif opt_detector_type == types.FaceDetectNet.MTCNN: - detector = face_detector.DetectorMTCNN() - elif opt_detector_type == types.FaceDetectNet.HAAR: - log.error('{} not yet implemented'.format(opt_detector_type.name)) - return - - - # get list of files to process - fp_in = data_store.metadata(types.Metadata.FILEPATH) if opt_fp_in is None else opt_fp_in - df_files = pd.read_csv(fp_in).set_index('index') - if opt_slice: - df_files = df_files[opt_slice[0]:opt_slice[1]] - log.debug('processing {:,} files'.format(len(df_files))) - - # filter out grayscale - color_filter = color_filters[opt_color_filter] - - data = [] - - for df_file in tqdm(df_files.itertuples(), total=len(df_files)): - fp_im = data_store.face_image(str(df_file.subdir), str(df_file.fn), str(df_file.ext)) - #fp_im = join(opt_dir_media, str(df_file.subdir), f'{df_file.fn}.{df_file.ext}') - im = cv.imread(fp_im) - - # filter out color or grayscale iamges - if color_filter != color_filters['all']: - try: - is_gray = im_utils.is_grayscale(im) - if is_gray and color_filter != color_filters['gray']: - log.debug('Skipping grayscale image: {}'.format(fp_im)) - continue - except Exception as e: - log.error('Could not check grayscale: {}'.format(fp_im)) - continue - - try: - bboxes = detector.detect(im, size=opt_size, pyramids=opt_pyramids, largest=opt_largest) - except Exception as e: - log.error('could not detect: {}'.format(fp_im)) - log.error('{}'.format(e)) - continue - - for bbox in bboxes: - roi = { - 'image_index': int(df_file.Index), - 'x': bbox.x, - 'y': bbox.y, - 'w': bbox.w, - 'h': bbox.h, - 'image_width': im.shape[1], - 'image_height': im.shape[0]} - data.append(roi) - - # debug display - if opt_display and len(bboxes): - bbox_dim = bbox.to_dim(im.shape[:2][::-1]) # w,h - im_md = im_utils.resize(im, width=min(1200, opt_size[0])) - for bbox in bboxes: - bbox_dim = bbox.to_dim(im_md.shape[:2][::-1]) - cv.rectangle(im_md, bbox_dim.pt_tl, bbox_dim.pt_br, (0,255,0), 3) - cv.imshow('', im_md) - while True: - k = cv.waitKey(1) & 0xFF - if k == 27 or k == ord('q'): # ESC - cv.destroyAllWindows() - sys.exit() - elif k != 255: - # any key to continue - break - - # save date - file_utils.mkdirs(fp_out) - df = pd.DataFrame.from_dict(data) - df.index.name = 'index' - df.to_csv(opt_fp_out) \ No newline at end of file -- cgit v1.2.3-70-g09d2