summaryrefslogtreecommitdiff
path: root/megapixels/commands/cv
diff options
context:
space:
mode:
authorJules Laplace <julescarbon@gmail.com>2019-01-17 15:11:47 +0100
committerJules Laplace <julescarbon@gmail.com>2019-01-17 15:11:47 +0100
commit85ae432fb6c6c17292b319bca068e46a4ea81eb3 (patch)
tree4d0270fac0fdc7c1c1333af9c4bb82c6eb00669d /megapixels/commands/cv
parentc293006ba43944ffeb4dcab17b2256f3a5491a36 (diff)
parent03ad11fb2a3dcd425d50167b15d72d4e0ef536a2 (diff)
Merge branch 'master' of github.com:adamhrv/megapixels_dev
Diffstat (limited to 'megapixels/commands/cv')
-rw-r--r--megapixels/commands/cv/face_attributes.py136
-rw-r--r--megapixels/commands/cv/face_landmark_2d_68.py4
-rw-r--r--megapixels/commands/cv/face_pose.py11
-rw-r--r--megapixels/commands/cv/face_roi.py67
-rw-r--r--megapixels/commands/cv/face_vector.py46
-rw-r--r--megapixels/commands/cv/resize.py13
-rw-r--r--megapixels/commands/cv/resize_dataset.py149
7 files changed, 369 insertions, 57 deletions
diff --git a/megapixels/commands/cv/face_attributes.py b/megapixels/commands/cv/face_attributes.py
new file mode 100644
index 00000000..01fe3bd1
--- /dev/null
+++ b/megapixels/commands/cv/face_attributes.py
@@ -0,0 +1,136 @@
+"""
+
+"""
+
+import click
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+
+
+@click.command()
+@click.option('-i', '--input', 'opt_fp_in', default=None,
+ help='Override enum input filename CSV')
+@click.option('-o', '--output', 'opt_fp_out', default=None,
+ help='Override enum output filename CSV')
+@click.option('-m', '--media', 'opt_dir_media', default=None,
+ help='Override enum media directory')
+@click.option('--store', 'opt_data_store',
+ type=cfg.DataStoreVar,
+ default=click_utils.get_default(types.DataStore.HDD),
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--dataset', 'opt_dataset',
+ type=cfg.DatasetVar,
+ required=True,
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--size', 'opt_size',
+ type=(int, int), default=cfg.DEFAULT_SIZE_FACE_DETECT,
+ help='Processing size for detection')
+@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None),
+ help='Slice list of files')
+@click.option('-f', '--force', 'opt_force', is_flag=True,
+ help='Force overwrite file')
+@click.option('-d', '--display', 'opt_display', is_flag=True,
+ help='Display image for debugging')
+@click.pass_context
+def cli(ctx, opt_fp_in, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset,
+ opt_size, opt_slice, opt_force, opt_display):
+ """Creates 2D 68-point landmarks"""
+
+ import sys
+ import os
+ from os.path import join
+ from pathlib import Path
+ from glob import glob
+
+ from tqdm import tqdm
+ import numpy as np
+ import cv2 as cv
+ import pandas as pd
+
+ from app.utils import logger_utils, file_utils, im_utils, display_utils, draw_utils
+ from app.processors import face_age_gender
+ from app.models.data_store import DataStore
+ from app.models.bbox import BBox
+
+ # -------------------------------------------------------------------------
+ # init here
+
+ log = logger_utils.Logger.getLogger()
+ # init face processors
+ age_estimator_apnt = face_age_gender.FaceAgeApparent()
+ age_estimator_real = face_age_gender.FaceAgeReal()
+ gender_estimator = face_age_gender.FaceGender()
+
+ # init filepaths
+ data_store = DataStore(opt_data_store, opt_dataset)
+ # set file output path
+ metadata_type = types.Metadata.FACE_ATTRIBUTES
+ fp_out = data_store.metadata(metadata_type) if opt_fp_out is None else opt_fp_out
+ if not opt_force and Path(fp_out).exists():
+ log.error('File exists. Use "-f / --force" to overwite')
+ return
+
+ # -------------------------------------------------------------------------
+ # load filepath data
+ fp_record = data_store.metadata(types.Metadata.FILE_RECORD)
+ df_record = pd.read_csv(fp_record, dtype=cfg.FILE_RECORD_DTYPES).set_index('index')
+ # load ROI data
+ fp_roi = data_store.metadata(types.Metadata.FACE_ROI)
+ df_roi = pd.read_csv(fp_roi).set_index('index')
+ # slice if you want
+ if opt_slice:
+ df_roi = df_roi[opt_slice[0]:opt_slice[1]]
+ # group by image index (speedup if multiple faces per image)
+ df_img_groups = df_roi.groupby('record_index')
+ log.debug('processing {:,} groups'.format(len(df_img_groups)))
+
+ # store landmarks in list
+ results = []
+
+ # -------------------------------------------------------------------------
+ # iterate groups with file/record index as key
+
+ for record_index, df_img_group in tqdm(df_img_groups):
+
+ # access file_record DataSeries
+ file_record = df_record.iloc[record_index]
+
+ # load image
+ fp_im = data_store.face(file_record.subdir, file_record.fn, file_record.ext)
+ im = cv.imread(fp_im)
+ im_resized = im_utils.resize(im, width=opt_size[0], height=opt_size[1])
+ dim = im_resized.shape[:2][::-1]
+
+ # iterate ROIs in this image
+ for roi_index, df_img in df_img_group.iterrows():
+
+ # find landmarks
+ bbox_norm = BBox.from_xywh(df_img.x, df_img.y, df_img.w, df_img.h)
+ bbox_dim = bbox_norm.to_dim(dim)
+
+ age_apnt = age_estimator_apnt.predict(im_resized, bbox_norm)
+ age_real = age_estimator_real.predict(im_resized, bbox_norm)
+ gender = gender_estimator.predict(im_resized, bbox_norm)
+
+ attr_obj = {
+ 'age_real':float(f'{age_real:.2f}'),
+ 'age_apparent': float(f'{age_apnt:.2f}'),
+ 'm': float(f'{gender["m"]:.4f}'),
+ 'f': float(f'{gender["f"]:.4f}'),
+ 'roi_index': roi_index
+ }
+ results.append(attr_obj)
+
+
+ # create DataFrame and save to CSV
+ file_utils.mkdirs(fp_out)
+ df = pd.DataFrame.from_dict(results)
+ df.index.name = 'index'
+ df.to_csv(fp_out)
+
+ # save script
+ file_utils.write_text(' '.join(sys.argv), '{}.sh'.format(fp_out)) \ No newline at end of file
diff --git a/megapixels/commands/cv/face_landmark_2d_68.py b/megapixels/commands/cv/face_landmark_2d_68.py
index e24d4b60..c6978a40 100644
--- a/megapixels/commands/cv/face_landmark_2d_68.py
+++ b/megapixels/commands/cv/face_landmark_2d_68.py
@@ -126,7 +126,7 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset,
bbox = BBox.from_xywh(x, y, w, h).to_dim(dim)
points = landmark_detector.landmarks(im_resized, bbox)
points_norm = landmark_detector.normalize(points, dim)
- points_flat = landmark_detector.flatten(points_norm)
+ points_str = landmark_detector.to_str(points_norm)
# display if optioned
if opt_display:
@@ -137,7 +137,7 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset,
display_utils.handle_keyboard()
# add to results for CSV
- results.append(points_flat)
+ results.append({'vec': points_str, 'roi_index':roi_index})
# create DataFrame and save to CSV
diff --git a/megapixels/commands/cv/face_pose.py b/megapixels/commands/cv/face_pose.py
index 70ea1f30..cb7ec56c 100644
--- a/megapixels/commands/cv/face_pose.py
+++ b/megapixels/commands/cv/face_pose.py
@@ -92,7 +92,7 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset,
# load data
fp_record = data_store.metadata(types.Metadata.FILE_RECORD)
- df_record = pd.read_csv(fp_record).set_index('index')
+ df_record = pd.read_csv(fp_record, dtype=cfg.FILE_RECORD_DTYPES).set_index('index')
# load ROI data
fp_roi = data_store.metadata(types.Metadata.FACE_ROI)
df_roi = pd.read_csv(fp_roi).set_index('index')
@@ -125,10 +125,11 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset,
x, y, w, h = df_img.x, df_img.y, df_img.w, df_img.h
#dim = (file_record.width, file_record.height)
dim = im_resized.shape[:2][::-1]
- bbox = BBox.from_xywh(x, y, w, h).to_dim(dim)
+ bbox_norm = BBox.from_xywh(x, y, w, h)
+ bbox_dim = bbox_norm.to_dim(dim)
# get pose
- landmarks = face_landmarks.landmarks(im_resized, bbox)
+ landmarks = face_landmarks.landmarks(im_resized, bbox_norm)
pose_data = face_pose.pose(landmarks, dim)
#pose_degrees = pose_data['degrees'] # only keep the degrees data
#pose_degrees['points_nose'] = pose_data
@@ -143,8 +144,8 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset,
# add image index and append to result CSV data
pose_data['roi_index'] = roi_index
for k, v in pose_data['points'].items():
- pose_data[f'point_{k}_x'] = v[0][0] / dim[0]
- pose_data[f'point_{k}_y'] = v[0][1] / dim[1]
+ pose_data[f'point_{k}_x'] = v[0] / dim[0]
+ pose_data[f'point_{k}_y'] = v[1] / dim[1]
# rearrange data structure for DataFrame
pose_data.pop('points')
diff --git a/megapixels/commands/cv/face_roi.py b/megapixels/commands/cv/face_roi.py
index 70fff401..e83b0f61 100644
--- a/megapixels/commands/cv/face_roi.py
+++ b/megapixels/commands/cv/face_roi.py
@@ -33,7 +33,7 @@ color_filters = {'color': 1, 'gray': 2, 'all': 3}
help='Output image size')
@click.option('-d', '--detector', 'opt_detector_type',
type=cfg.FaceDetectNetVar,
- default=click_utils.get_default(types.FaceDetectNet.DLIB_CNN),
+ default=click_utils.get_default(types.FaceDetectNet.CVDNN),
help=click_utils.show_help(types.FaceDetectNet))
@click.option('-g', '--gpu', 'opt_gpu', default=0,
help='GPU index')
@@ -97,31 +97,37 @@ def cli(ctx, opt_fp_in, opt_dir_media, opt_fp_out, opt_data_store, opt_dataset,
detector = face_detector.DetectorDLIBCNN(gpu=opt_gpu)
elif opt_detector_type == types.FaceDetectNet.DLIB_HOG:
detector = face_detector.DetectorDLIBHOG()
- elif opt_detector_type == types.FaceDetectNet.MTCNN:
- detector = face_detector.DetectorMTCNN(gpu=opt_gpu)
+ elif opt_detector_type == types.FaceDetectNet.MTCNN_TF:
+ detector = face_detector.DetectorMTCNN_TF(gpu=opt_gpu)
elif opt_detector_type == types.FaceDetectNet.HAAR:
log.error('{} not yet implemented'.format(opt_detector_type.name))
return
# get list of files to process
- fp_in = data_store.metadata(types.Metadata.FILE_RECORD) if opt_fp_in is None else opt_fp_in
- df_records = pd.read_csv(fp_in).set_index('index')
+ fp_record = data_store.metadata(types.Metadata.FILE_RECORD) if opt_fp_in is None else opt_fp_in
+ df_record = pd.read_csv(fp_record, dtype=cfg.FILE_RECORD_DTYPES).set_index('index')
if opt_slice:
- df_records = df_records[opt_slice[0]:opt_slice[1]]
- log.debug('processing {:,} files'.format(len(df_records)))
+ df_record = df_record[opt_slice[0]:opt_slice[1]]
+ log.debug('processing {:,} files'.format(len(df_record)))
# filter out grayscale
color_filter = color_filters[opt_color_filter]
# set largest flag, to keep all or only largest
- opt_largest = opt_largest == 'largest'
+ opt_largest = (opt_largest == 'largest')
data = []
+ skipped_files = []
+ processed_files = []
- for df_record in tqdm(df_records.itertuples(), total=len(df_records)):
+ for df_record in tqdm(df_record.itertuples(), total=len(df_record)):
fp_im = data_store.face(str(df_record.subdir), str(df_record.fn), str(df_record.ext))
- im = cv.imread(fp_im)
- im_resized = im_utils.resize(im, width=opt_size[0], height=opt_size[1])
+ try:
+ im = cv.imread(fp_im)
+ im_resized = im_utils.resize(im, width=opt_size[0], height=opt_size[1])
+ except Exception as e:
+ log.debug(f'could not read: {fp_im}')
+ return
# filter out color or grayscale iamges
if color_filter != color_filters['all']:
try:
@@ -134,31 +140,38 @@ def cli(ctx, opt_fp_in, opt_dir_media, opt_fp_out, opt_data_store, opt_dataset,
continue
try:
- bboxes = detector.detect(im_resized, pyramids=opt_pyramids, largest=opt_largest,
+ bboxes_norm = detector.detect(im_resized, pyramids=opt_pyramids, largest=opt_largest,
zone=opt_zone, conf_thresh=opt_conf_thresh)
except Exception as e:
log.error('could not detect: {}'.format(fp_im))
log.error('{}'.format(e))
continue
- for bbox in bboxes:
- roi = {
- 'record_index': int(df_record.Index),
- 'x': bbox.x,
- 'y': bbox.y,
- 'w': bbox.w,
- 'h': bbox.h
- }
- data.append(roi)
- if len(bboxes) == 0:
+ if len(bboxes_norm) == 0:
+ skipped_files.append(fp_im)
log.warn(f'no faces in: {fp_im}')
-
+ log.warn(f'skipped: {len(skipped_files)}. found:{len(processed_files)} files')
+ else:
+ processed_files.append(fp_im)
+ for bbox in bboxes_norm:
+ roi = {
+ 'record_index': int(df_record.Index),
+ 'x': bbox.x,
+ 'y': bbox.y,
+ 'w': bbox.w,
+ 'h': bbox.h
+ }
+ data.append(roi)
+
# if display optined
- if opt_display and len(bboxes):
+ if opt_display and len(bboxes_norm):
# draw each box
- for bbox in bboxes:
- bbox_dim = bbox.to_dim(im_resized.shape[:2][::-1])
- draw_utils.draw_bbox(im_resized, bbox_dim)
+ for bbox_norm in bboxes_norm:
+ dim = im_resized.shape[:2][::-1]
+ bbox_dim = bbox.to_dim(dim)
+ if dim[0] > 1000:
+ im_resized = im_utils.resize(im_resized, width=1000)
+ im_resized = draw_utils.draw_bbox(im_resized, bbox_norm)
# display and wait
cv.imshow('', im_resized)
diff --git a/megapixels/commands/cv/face_vector.py b/megapixels/commands/cv/face_vector.py
index 4df647f5..cb155d08 100644
--- a/megapixels/commands/cv/face_vector.py
+++ b/megapixels/commands/cv/face_vector.py
@@ -1,5 +1,8 @@
"""
Converts ROIs to face vector
+NB: the VGG Face2 extractor should be used with MTCNN ROIs (not square)
+ the DLIB face extractor should be used with DLIB ROIs (square)
+see https://github.com/ox-vgg/vgg_face2 for TAR@FAR
"""
import click
@@ -24,12 +27,16 @@ from app.settings import app_cfg as cfg
show_default=True,
help=click_utils.show_help(types.Dataset))
@click.option('--size', 'opt_size',
- type=(int, int), default=(300, 300),
+ type=(int, int), default=cfg.DEFAULT_SIZE_FACE_DETECT,
help='Output image size')
+@click.option('-e', '--extractor', 'opt_extractor',
+ default=click_utils.get_default(types.FaceExtractor.VGG),
+ type=cfg.FaceExtractorVar,
+ help='Type of extractor framework/network to use')
@click.option('-j', '--jitters', 'opt_jitters', default=cfg.DLIB_FACEREC_JITTERS,
- help='Number of jitters')
-@click.option('-p', '--padding', 'opt_padding', default=cfg.DLIB_FACEREC_PADDING,
- help='Percentage padding')
+ help='Number of jitters (only for dlib')
+@click.option('-p', '--padding', 'opt_padding', default=cfg.FACEREC_PADDING,
+ help='Percentage ROI padding')
@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None),
help='Slice list of files')
@click.option('-f', '--force', 'opt_force', is_flag=True,
@@ -38,7 +45,7 @@ from app.settings import app_cfg as cfg
help='GPU index')
@click.pass_context
def cli(ctx, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset, opt_size,
- opt_slice, opt_force, opt_gpu, opt_jitters, opt_padding):
+ opt_extractor, opt_slice, opt_force, opt_gpu, opt_jitters, opt_padding):
"""Converts face ROIs to vectors"""
import sys
@@ -56,7 +63,7 @@ def cli(ctx, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset, opt_size,
from app.models.bbox import BBox
from app.models.data_store import DataStore
from app.utils import logger_utils, file_utils, im_utils
- from app.processors import face_recognition
+ from app.processors import face_extractor
# -------------------------------------------------
@@ -73,11 +80,15 @@ def cli(ctx, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset, opt_size,
return
# init face processors
- facerec = face_recognition.RecognitionDLIB()
+ if opt_extractor == types.FaceExtractor.DLIB:
+ log.debug('set dlib')
+ extractor = face_extractor.ExtractorDLIB(gpu=opt_gpu, jitters=opt_jitters)
+ elif opt_extractor == types.FaceExtractor.VGG:
+ extractor = face_extractor.ExtractorVGG()
# load data
fp_record = data_store.metadata(types.Metadata.FILE_RECORD)
- df_record = pd.read_csv(fp_record).set_index('index')
+ df_record = pd.read_csv(fp_record, dtype=cfg.FILE_RECORD_DTYPES).set_index('index')
fp_roi = data_store.metadata(types.Metadata.FACE_ROI)
df_roi = pd.read_csv(fp_roi).set_index('index')
@@ -85,7 +96,8 @@ def cli(ctx, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset, opt_size,
df_roi = df_roi[opt_slice[0]:opt_slice[1]]
# -------------------------------------------------
- # process here
+ # process images
+
df_img_groups = df_roi.groupby('record_index')
log.debug('processing {:,} groups'.format(len(df_img_groups)))
@@ -95,21 +107,21 @@ def cli(ctx, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset, opt_size,
ds_record = df_record.iloc[record_index]
fp_im = data_store.face(ds_record.subdir, ds_record.fn, ds_record.ext)
im = cv.imread(fp_im)
+ im = im_utils.resize(im, width=opt_size[0], height=opt_size[1])
for roi_index, df_img in df_img_group.iterrows():
# get bbox
x, y, w, h = df_img.x, df_img.y, df_img.w, df_img.h
dim = (ds_record.width, ds_record.height)
- #dim = im.shape[:2][::-1]
# get face vector
- bbox_dim = BBox.from_xywh(x, y, w, h).to_dim(dim) # convert to int real dimensions
+ bbox = BBox.from_xywh(x, y, w, h) # norm
# compute vec
- # padding=opt_padding not yet implemented in dlib===19.16 but merged in master
- vec = facerec.vec(im, bbox_dim, jitters=opt_jitters)
- vec_flat = facerec.flatten(vec)
- vec_flat['roi_index'] = roi_index
- vec_flat['record_index'] = record_index
- vecs.append(vec_flat)
+ vec = extractor.extract(im, bbox) # use normalized BBox
+ vec_str = extractor.to_str(vec)
+ vec_obj = {'vec':vec_str, 'roi_index': roi_index, 'record_index':record_index}
+ vecs.append(vec_obj)
+ # -------------------------------------------------
+ # save data
# create DataFrame and save to CSV
df = pd.DataFrame.from_dict(vecs)
diff --git a/megapixels/commands/cv/resize.py b/megapixels/commands/cv/resize.py
index dcd621b3..7409ee6f 100644
--- a/megapixels/commands/cv/resize.py
+++ b/megapixels/commands/cv/resize.py
@@ -49,7 +49,7 @@ centerings = {
help='File glob ext')
@click.option('--size', 'opt_size',
type=(int, int), default=(256, 256),
- help='Output image size (square)')
+ help='Max output size')
@click.option('--method', 'opt_scale_method',
type=click.Choice(methods.keys()),
default='lanczos',
@@ -88,7 +88,7 @@ def cli(ctx, opt_dir_in, opt_dir_out, opt_glob_ext, opt_size, opt_scale_method,
# -------------------------------------------------
# process here
- def pool_resize(fp_im, opt_size, scale_method, centering):
+ def pool_resize(fp_im, opt_size, scale_method):
# Threaded image resize function
try:
pbar.update(1)
@@ -100,7 +100,7 @@ def cli(ctx, opt_dir_in, opt_dir_out, opt_glob_ext, opt_size, opt_scale_method,
log.error(e)
return False
- im = ImageOps.fit(im, opt_size, method=scale_method, centering=centering)
+ #im = ImageOps.fit(im, opt_size, method=scale_method, centering=centering)
if opt_equalize:
im_np = im_utils.pil2np(im)
@@ -117,8 +117,8 @@ def cli(ctx, opt_dir_in, opt_dir_out, opt_glob_ext, opt_size, opt_scale_method,
except:
return False
- centering = centerings[opt_center]
- scale_method = methods[opt_scale_method]
+ #centering = centerings[opt_center]
+ #scale_method = methods[opt_scale_method]
# get list of files to process
fp_ims = glob(join(opt_dir_in, '*.{}'.format(opt_glob_ext)))
@@ -132,7 +132,8 @@ def cli(ctx, opt_dir_in, opt_dir_out, opt_glob_ext, opt_size, opt_scale_method,
# setup multithreading
pbar = tqdm(total=len(fp_ims))
- pool_resize = partial(pool_resize, opt_size=opt_size, scale_method=scale_method, centering=centering)
+ #pool_resize = partial(pool_resize, opt_size=opt_size, scale_method=scale_method, centering=centering)
+ pool_resize = partial(pool_resize, opt_size=opt_size)
#result_list = pool.map(prod_x, data_list)
pool = ThreadPool(opt_threads)
with tqdm(total=len(fp_ims)) as pbar:
diff --git a/megapixels/commands/cv/resize_dataset.py b/megapixels/commands/cv/resize_dataset.py
new file mode 100644
index 00000000..3a6ec15f
--- /dev/null
+++ b/megapixels/commands/cv/resize_dataset.py
@@ -0,0 +1,149 @@
+"""
+Crop images to prepare for training
+"""
+
+import click
+import cv2 as cv
+from PIL import Image, ImageOps, ImageFilter
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+
+cv_resize_algos = {
+ 'area': cv.INTER_AREA,
+ 'lanco': cv.INTER_LANCZOS4,
+ 'linear': cv.INTER_LINEAR,
+ 'linear_exact': cv.INTER_LINEAR_EXACT,
+ 'nearest': cv.INTER_NEAREST
+}
+"""
+Filter Q-Down Q-Up Speed
+NEAREST ⭐⭐⭐⭐⭐
+BOX ⭐ ⭐⭐⭐⭐
+BILINEAR ⭐ ⭐ ⭐⭐⭐
+HAMMING ⭐⭐ ⭐⭐⭐
+BICUBIC ⭐⭐⭐ ⭐⭐⭐ ⭐⭐
+LANCZOS ⭐⭐⭐⭐ ⭐⭐⭐⭐ ⭐
+"""
+pil_resize_algos = {
+ 'antialias': Image.ANTIALIAS,
+ 'lanczos': Image.LANCZOS,
+ 'bicubic': Image.BICUBIC,
+ 'hamming': Image.HAMMING,
+ 'bileaner': Image.BILINEAR,
+ 'box': Image.BOX,
+ 'nearest': Image.NEAREST
+ }
+
+@click.command()
+@click.option('--dataset', 'opt_dataset',
+ type=cfg.DatasetVar,
+ required=True,
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--store', 'opt_data_store',
+ type=cfg.DataStoreVar,
+ default=click_utils.get_default(types.DataStore.HDD),
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('-o', '--output', 'opt_dir_out', required=True,
+ help='Output directory')
+@click.option('-e', '--ext', 'opt_glob_ext',
+ default='png', type=click.Choice(['jpg', 'png']),
+ help='File glob ext')
+@click.option('--size', 'opt_size',
+ type=(int, int), default=(256, 256),
+ help='Output image size max (w,h)')
+@click.option('--interp', 'opt_interp_algo',
+ type=click.Choice(pil_resize_algos.keys()),
+ default='bicubic',
+ help='Interpolation resizing algorithms')
+@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None),
+ help='Slice the input list')
+@click.option('-t', '--threads', 'opt_threads', default=8,
+ help='Number of threads')
+@click.option('--recursive/--no-recursive', 'opt_recursive', is_flag=True, default=False,
+ help='Use glob recursion (slower)')
+@click.pass_context
+def cli(ctx, opt_dataset, opt_data_store, opt_dir_out, opt_glob_ext, opt_size, opt_interp_algo,
+ opt_slice, opt_threads, opt_recursive):
+ """Resize dataset images"""
+
+ import os
+ from os.path import join
+ from pathlib import Path
+ from glob import glob
+ from tqdm import tqdm
+ from multiprocessing.dummy import Pool as ThreadPool
+ from functools import partial
+ import pandas as pd
+ import numpy as np
+
+ from app.utils import logger_utils, file_utils, im_utils
+ from app.models.data_store import DataStore
+
+ # -------------------------------------------------
+ # init
+
+ log = logger_utils.Logger.getLogger()
+
+
+ # -------------------------------------------------
+ # process here
+
+ def pool_resize(fp_in, dir_in, dir_out, im_size, interp_algo):
+ # Threaded image resize function
+ pbar.update(1)
+ try:
+ im = Image.open(fp_in).convert('RGB')
+ im.verify() # throws error if image is corrupt
+ im.thumbnail(im_size, interp_algo)
+ fp_out = fp_in.replace(dir_in, dir_out)
+ file_utils.mkdirs(fp_out)
+ im.save(fp_out, quality=100)
+ except Exception as e:
+ log.warn(f'Could not open: {fp_in}, Error: {e}')
+ return False
+ return True
+
+
+ data_store = DataStore(opt_data_store, opt_dataset)
+ fp_records = data_store.metadata(types.Metadata.FILE_RECORD)
+ df_records = pd.read_csv(fp_records, dtype=cfg.FILE_RECORD_DTYPES).set_index('index')
+ dir_in = data_store.media_images_original()
+
+ # get list of files to process
+ #fp_ims = file_utils.glob_multi(opt_dir_in, ['jpg', 'png'], recursive=opt_recursive)
+ fp_ims = []
+ for ds_record in df_records.itertuples():
+ fp_im = data_store.face(ds_record.subdir, ds_record.fn, ds_record.ext)
+ fp_ims.append(fp_im)
+
+ if opt_slice:
+ fp_ims = fp_ims[opt_slice[0]:opt_slice[1]]
+ if not fp_ims:
+ log.error('No images. Try with "--recursive"')
+ return
+ log.info(f'processing {len(fp_ims):,} images')
+
+ # algorithm to use for resizing
+ interp_algo = pil_resize_algos[opt_interp_algo]
+ log.info(f'using {interp_algo} for interpoloation')
+
+ # ensure output dir exists
+ file_utils.mkdirs(opt_dir_out)
+
+ # setup multithreading
+ pbar = tqdm(total=len(fp_ims))
+ # fixed arguments for pool function
+ map_pool_resize = partial(pool_resize, dir_in=dir_in, dir_out=opt_dir_out, im_size=opt_size, interp_algo=interp_algo)
+ #result_list = pool.map(prod_x, data_list) # simple
+ pool = ThreadPool(opt_threads)
+ # start multithreading
+ with tqdm(total=len(fp_ims)) as pbar:
+ results = pool.map(map_pool_resize, fp_ims)
+ # end multithreading
+ pbar.close()
+
+ log.info(f'Resized: {results.count(True)} / {len(fp_ims)} images') \ No newline at end of file