summaryrefslogtreecommitdiff
path: root/megapixels/commands/cv/face_roi.py
diff options
context:
space:
mode:
authoradamhrv <adam@ahprojects.com>2019-01-18 11:00:18 +0100
committeradamhrv <adam@ahprojects.com>2019-01-18 11:00:18 +0100
commite06af50389f849be0bfe4fa97d39f4519ef2c711 (patch)
tree49755b51e1b8b1f8031e5483333570a8e9951272 /megapixels/commands/cv/face_roi.py
parent03ad11fb2a3dcd425d50167b15d72d4e0ef536a2 (diff)
change to cli_proc
Diffstat (limited to 'megapixels/commands/cv/face_roi.py')
-rw-r--r--megapixels/commands/cv/face_roi.py187
1 files changed, 0 insertions, 187 deletions
diff --git a/megapixels/commands/cv/face_roi.py b/megapixels/commands/cv/face_roi.py
deleted file mode 100644
index e83b0f61..00000000
--- a/megapixels/commands/cv/face_roi.py
+++ /dev/null
@@ -1,187 +0,0 @@
-"""
-Crop images to prepare for training
-"""
-
-import click
-# from PIL import Image, ImageOps, ImageFilter, ImageDraw
-
-from app.settings import types
-from app.utils import click_utils
-from app.settings import app_cfg as cfg
-
-color_filters = {'color': 1, 'gray': 2, 'all': 3}
-
-@click.command()
-@click.option('-i', '--input', 'opt_fp_in', default=None,
- help='Override enum input filename CSV')
-@click.option('-o', '--output', 'opt_fp_out', default=None,
- help='Override enum output filename CSV')
-@click.option('-m', '--media', 'opt_dir_media', default=None,
- help='Override enum media directory')
-@click.option('--store', 'opt_data_store',
- type=cfg.DataStoreVar,
- default=click_utils.get_default(types.DataStore.HDD),
- show_default=True,
- help=click_utils.show_help(types.Dataset))
-@click.option('--dataset', 'opt_dataset',
- type=cfg.DatasetVar,
- required=True,
- show_default=True,
- help=click_utils.show_help(types.Dataset))
-@click.option('--size', 'opt_size',
- type=(int, int), default=(480, 480),
- help='Output image size')
-@click.option('-d', '--detector', 'opt_detector_type',
- type=cfg.FaceDetectNetVar,
- default=click_utils.get_default(types.FaceDetectNet.CVDNN),
- help=click_utils.show_help(types.FaceDetectNet))
-@click.option('-g', '--gpu', 'opt_gpu', default=0,
- help='GPU index')
-@click.option('--conf', 'opt_conf_thresh', default=0.85, type=click.FloatRange(0,1),
- help='Confidence minimum threshold')
-@click.option('-p', '--pyramids', 'opt_pyramids', default=0, type=click.IntRange(0,4),
- help='Number pyramids to upscale for DLIB detectors')
-@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None),
- help='Slice list of files')
-@click.option('--display/--no-display', 'opt_display', is_flag=True, default=False,
- help='Display detections to debug')
-@click.option('-f', '--force', 'opt_force', is_flag=True,
- help='Force overwrite file')
-@click.option('--color', 'opt_color_filter',
- type=click.Choice(color_filters.keys()), default='all',
- help='Filter to keep color or grayscale images (color = keep color')
-@click.option('--keep', 'opt_largest', type=click.Choice(['largest', 'all']), default='all',
- help='Only keep largest face')
-@click.option('--zone', 'opt_zone', default=(0.0, 0.0), type=(float, float),
- help='Face center must be located within zone region (0.5 = half width/height)')
-@click.pass_context
-def cli(ctx, opt_fp_in, opt_dir_media, opt_fp_out, opt_data_store, opt_dataset, opt_size, opt_detector_type,
- opt_gpu, opt_conf_thresh, opt_pyramids, opt_slice, opt_display, opt_force, opt_color_filter,
- opt_largest, opt_zone):
- """Converts frames with faces to CSV of ROIs"""
-
- import sys
- import os
- from os.path import join
- from pathlib import Path
- from glob import glob
-
- from tqdm import tqdm
- import numpy as np
- import dlib # must keep a local reference for dlib
- import cv2 as cv
- import pandas as pd
-
- from app.utils import logger_utils, file_utils, im_utils, display_utils, draw_utils
- from app.processors import face_detector
- from app.models.data_store import DataStore
-
- # -------------------------------------------------
- # init here
-
- log = logger_utils.Logger.getLogger()
-
- # set data_store
- data_store = DataStore(opt_data_store, opt_dataset)
-
- # get filepath out
- fp_out = data_store.metadata(types.Metadata.FACE_ROI) if opt_fp_out is None else opt_fp_out
- if not opt_force and Path(fp_out).exists():
- log.error('File exists. Use "-f / --force" to overwite')
- return
-
- # set detector
- if opt_detector_type == types.FaceDetectNet.CVDNN:
- detector = face_detector.DetectorCVDNN()
- elif opt_detector_type == types.FaceDetectNet.DLIB_CNN:
- detector = face_detector.DetectorDLIBCNN(gpu=opt_gpu)
- elif opt_detector_type == types.FaceDetectNet.DLIB_HOG:
- detector = face_detector.DetectorDLIBHOG()
- elif opt_detector_type == types.FaceDetectNet.MTCNN_TF:
- detector = face_detector.DetectorMTCNN_TF(gpu=opt_gpu)
- elif opt_detector_type == types.FaceDetectNet.HAAR:
- log.error('{} not yet implemented'.format(opt_detector_type.name))
- return
-
-
- # get list of files to process
- fp_record = data_store.metadata(types.Metadata.FILE_RECORD) if opt_fp_in is None else opt_fp_in
- df_record = pd.read_csv(fp_record, dtype=cfg.FILE_RECORD_DTYPES).set_index('index')
- if opt_slice:
- df_record = df_record[opt_slice[0]:opt_slice[1]]
- log.debug('processing {:,} files'.format(len(df_record)))
-
- # filter out grayscale
- color_filter = color_filters[opt_color_filter]
- # set largest flag, to keep all or only largest
- opt_largest = (opt_largest == 'largest')
-
- data = []
- skipped_files = []
- processed_files = []
-
- for df_record in tqdm(df_record.itertuples(), total=len(df_record)):
- fp_im = data_store.face(str(df_record.subdir), str(df_record.fn), str(df_record.ext))
- try:
- im = cv.imread(fp_im)
- im_resized = im_utils.resize(im, width=opt_size[0], height=opt_size[1])
- except Exception as e:
- log.debug(f'could not read: {fp_im}')
- return
- # filter out color or grayscale iamges
- if color_filter != color_filters['all']:
- try:
- is_gray = im_utils.is_grayscale(im)
- if is_gray and color_filter != color_filters['gray']:
- log.debug('Skipping grayscale image: {}'.format(fp_im))
- continue
- except Exception as e:
- log.error('Could not check grayscale: {}'.format(fp_im))
- continue
-
- try:
- bboxes_norm = detector.detect(im_resized, pyramids=opt_pyramids, largest=opt_largest,
- zone=opt_zone, conf_thresh=opt_conf_thresh)
- except Exception as e:
- log.error('could not detect: {}'.format(fp_im))
- log.error('{}'.format(e))
- continue
-
- if len(bboxes_norm) == 0:
- skipped_files.append(fp_im)
- log.warn(f'no faces in: {fp_im}')
- log.warn(f'skipped: {len(skipped_files)}. found:{len(processed_files)} files')
- else:
- processed_files.append(fp_im)
- for bbox in bboxes_norm:
- roi = {
- 'record_index': int(df_record.Index),
- 'x': bbox.x,
- 'y': bbox.y,
- 'w': bbox.w,
- 'h': bbox.h
- }
- data.append(roi)
-
- # if display optined
- if opt_display and len(bboxes_norm):
- # draw each box
- for bbox_norm in bboxes_norm:
- dim = im_resized.shape[:2][::-1]
- bbox_dim = bbox.to_dim(dim)
- if dim[0] > 1000:
- im_resized = im_utils.resize(im_resized, width=1000)
- im_resized = draw_utils.draw_bbox(im_resized, bbox_norm)
-
- # display and wait
- cv.imshow('', im_resized)
- display_utils.handle_keyboard()
-
- # create DataFrame and save to CSV
- file_utils.mkdirs(fp_out)
- df = pd.DataFrame.from_dict(data)
- df.index.name = 'index'
- df.to_csv(fp_out)
-
- # save script
- file_utils.write_text(' '.join(sys.argv), '{}.sh'.format(fp_out)) \ No newline at end of file