summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authoradamhrv <adam@ahprojects.com>2019-01-17 11:26:41 +0100
committeradamhrv <adam@ahprojects.com>2019-01-17 11:26:41 +0100
commitcb4d6d6f5be213edbc4f3b1e4452e5b7ce5e9378 (patch)
treea6a66d408e68c9a1401cc729a72952ea8f200762
parenta672dfdfdbac7cdac43e22c5d0bf29550770e2ad (diff)
updates for batch processing
-rw-r--r--megapixels/app/models/data_store.py3
-rw-r--r--megapixels/app/models/dataset.py67
-rw-r--r--megapixels/app/processors/face_detector.py30
-rw-r--r--megapixels/app/settings/app_cfg.py7
-rw-r--r--megapixels/app/settings/types.py6
-rw-r--r--megapixels/app/utils/display_utils.py6
-rw-r--r--megapixels/commands/cv/face_attributes.py19
-rw-r--r--megapixels/commands/cv/face_pose.py2
-rw-r--r--megapixels/commands/cv/face_roi.py61
-rw-r--r--megapixels/commands/cv/face_vector.py3
-rw-r--r--megapixels/commands/cv/resize.py13
-rw-r--r--megapixels/commands/cv/resize_dataset.py149
-rw-r--r--megapixels/commands/datasets/file_record.py2
-rw-r--r--megapixels/commands/demo/face_search.py11
-rw-r--r--megapixels/notebooks/face_analysis/face_recognition_vgg.ipynb36
15 files changed, 335 insertions, 80 deletions
diff --git a/megapixels/app/models/data_store.py b/megapixels/app/models/data_store.py
index 626c9da4..a8d6916f 100644
--- a/megapixels/app/models/data_store.py
+++ b/megapixels/app/models/data_store.py
@@ -24,6 +24,9 @@ class DataStore:
def metadata_dir(self):
return join(self.dir_metadata)
+ def media_dir(self):
+ return join(self.dir_media)
+
def media_images_original(self):
return join(self.dir_media, 'original')
diff --git a/megapixels/app/models/dataset.py b/megapixels/app/models/dataset.py
index bbef9ff5..1b91467b 100644
--- a/megapixels/app/models/dataset.py
+++ b/megapixels/app/models/dataset.py
@@ -32,7 +32,7 @@ class Dataset:
self.data_store = DataStore(opt_data_store, self._dataset_type)
self.data_store_s3 = DataStoreS3(self._dataset_type)
- def load_face_vectors(self):
+ def _load_face_vectors(self):
metadata_type = types.Metadata.FACE_VECTOR
fp_csv = self.data_store.metadata(metadata_type)
self.log.info(f'loading: {fp_csv}')
@@ -51,18 +51,17 @@ class Dataset:
self.log.error(f'File not found: {fp_csv}. Exiting.')
sys.exit()
- def load_records(self):
+ def _load_file_records(self):
metadata_type = types.Metadata.FILE_RECORD
fp_csv = self.data_store.metadata(metadata_type)
self.log.info(f'loading: {fp_csv}')
if Path(fp_csv).is_file():
- self._metadata[metadata_type] = pd.read_csv(fp_csv, dtype={'fn':str}).set_index('index')
+ self._metadata[metadata_type] = pd.read_csv(fp_csv, dtype=cfg.FILE_RECORD_DTYPES).set_index('index')
else:
self.log.error(f'File not found: {fp_csv}. Exiting.')
sys.exit()
- def load_identities(self):
- metadata_type = types.Metadata.IDENTITY
+ def _load_metadata(self, metadata_type):
fp_csv = self.data_store.metadata(metadata_type)
self.log.info(f'loading: {fp_csv}')
if Path(fp_csv).is_file():
@@ -70,6 +69,14 @@ class Dataset:
else:
self.log.error(f'File not found: {fp_csv}. Exiting.')
sys.exit()
+
+ def load_metadata(self, metadata_type):
+ if metadata_type == types.Metadata.FILE_RECORD:
+ self._load_file_records()
+ elif metadata_type == types.Metadata.FACE_VECTOR:
+ self._load_face_vectors()
+ else:
+ self._load_metadata(metadata_type)
def metadata(self, opt_metadata_type):
return self._metadata.get(opt_metadata_type, None)
@@ -82,11 +89,11 @@ class Dataset:
# get identity meta
df_identity = self._metadata[types.Metadata.IDENTITY]
# future datasets can have multiple identities per images
- ds_identities = df_identity.iloc[identity_index]
+ #ds_identities = df_identity.iloc[identity_index]
# get filepath and S3 url
fp_im = self.data_store.face(ds_record.subdir, ds_record.fn, ds_record.ext)
s3_url = self.data_store_s3.face(ds_record.uuid)
- image_record = ImageRecord(ds_record, fp_im, s3_url, ds_identities=ds_identities)
+ image_record = ImageRecord(ds_record, fp_im, s3_url)
return image_record
def vector_to_record(self, record_index):
@@ -149,7 +156,14 @@ class Dataset:
df_vector = self._metadata[types.Metadata.FACE_VECTOR]
df_record = self._metadata[types.Metadata.FILE_RECORD]
-
+ if types.Metadata.IDENTITY in self._metadata.keys():
+ df_identity = self._metadata[types.Metadata.IDENTITY]
+ else:
+ df_identity = None
+ df_roi = self._metadata[types.Metadata.FACE_ROI]
+
+ identities = []
+
for match_idx in match_idxs:
# get the corresponding face vector row
roi_index = self._face_vector_roi_idxs[match_idx]
@@ -158,7 +172,18 @@ class Dataset:
self.log.debug(f'find match index: {match_idx}, --> roi_index: {roi_index}')
fp_im = self.data_store.face(ds_record.subdir, ds_record.fn, ds_record.ext)
s3_url = self.data_store_s3.face(ds_record.uuid)
- image_record = ImageRecord(ds_record, fp_im, s3_url)
+ identities = []
+ if types.Metadata.IDENTITY in self._metadata.keys():
+ ds_id = df_identity.loc[df_identity['identity_key'] == ik].iloc[0]
+ identity = Identity(idx,
+ name_display=ds_id.name_display,
+ name_full=ds_id.name_full,
+ description=ds_id.description,
+ gender=ds_id.gender,
+ image_index=ds_id.image_index,
+ identity_key=ds_id.identity_key)
+ identities.append(identity)
+ image_record = ImageRecord(ds_record, fp_im, s3_url, identities=identities)
image_records.append(image_record)
return image_records
@@ -191,19 +216,20 @@ class Dataset:
class ImageRecord:
- def __init__(self, ds_record, fp, url, ds_rois=None, ds_identities=None):
+ def __init__(self, ds_record, fp, url, rois=None, identities=None):
# maybe more other meta will go there
self.image_index = ds_record.index
self.sha256 = ds_record.sha256
self.uuid = ds_record.uuid
self.filepath = fp
+ self.width = ds_record.width
+ self.height = ds_record.height
self.url = url
- self._identities = []
+ self.rois = rois
+ self.identities = identities
# image records contain ROIs
# ROIs are linked to identities
- #self._identities = [Identity(x) for x in ds_identities]
-
@property
def identity(self, index):
return self._identity
@@ -215,7 +241,7 @@ class ImageRecord:
log.info(f'sha256: {self.sha256}')
log.info(f'UUID: {self.uuid}')
log.info(f'S3 url: {self.url}')
- for identity in self._identities:
+ for identity in self.identities:
log.info(f'fullname: {identity.fullname}')
log.info(f'description: {identity.description}')
log.info(f'gender: {identity.gender}')
@@ -224,13 +250,10 @@ class ImageRecord:
class Identity:
- def __init__(self, idx, name='NA', desc='NA', gender='NA', n_images=1,
- url='NA', age='NA', nationality='NA'):
+ def __init__(self, idx, name_display=None, name_full=None, description=None, gender=None, roi=None):
self.index = idx
- self.name = name
- self.description = desc
+ self.name_display = name_display
+ self.name_full = name_full
+ self.description = description
self.gender = gender
- self.n_images = n_images
- self.url = url
- self.age = age
- self.nationality = nationality
+ self.roi = roi
diff --git a/megapixels/app/processors/face_detector.py b/megapixels/app/processors/face_detector.py
index fbf91071..7b5310c5 100644
--- a/megapixels/app/processors/face_detector.py
+++ b/megapixels/app/processors/face_detector.py
@@ -69,6 +69,7 @@ class DetectorMTCNN_TF:
# pip install mtcnn
dnn_size = (300, 300)
+ conf_thresh = 0.9
def __init__(self, size=(400,400), gpu=0):
self.log = logger_utils.Logger.getLogger()
@@ -84,17 +85,33 @@ class DetectorMTCNN_TF:
:param im: (numpy.ndarray) image
:returns list of BBox
'''
+
bboxes = []
dnn_size = self.dnn_size if size is None else size
+ conf_thresh = self.conf_thresh if conf_thresh is None else conf_thresh
im = im_utils.resize(im, width=dnn_size[0], height=dnn_size[1])
dim = im.shape[:2][::-1]
dets = self.detector.detect_faces(im)
+ '''
+ {
+ 'box': [4, 140, 14, 18],
+ 'confidence': 0.9588413834571838,
+ 'keypoints': {
+ 'left_eye': (8, 147),
+ 'right_eye': (14, 146),
+ 'nose': (12, 151),
+ 'mouth_left': (9, 155),
+ 'mouth_right': (14, 154)
+ }
+ }
+ '''
for det in dets:
rect = det['box']
- #keypoints = det['keypoints'] # not using here. see 'face_landmarks.py'
- bbox = BBox.from_xywh_dim(*rect, dim)
- bboxes.append(bbox)
+ conf = det['confidence']
+ if conf > conf_thresh:
+ bbox = BBox.from_xywh_dim(*rect, dim)
+ bboxes.append(bbox)
if largest and len(bboxes) > 1:
# only keep largest
@@ -222,8 +239,11 @@ class DetectorCVDNN:
bboxes = []
for i in range(0, net_outputs.shape[2]):
- conf = net_outputs[0, 0, i, 2]
- if conf > conf_thresh:
+ conf = float(net_outputs[0, 0, i, 2])
+ # BUG: this face detector creates ghost face detections in stage-left from nose-bottom neck
+ # temp fix is to elminate ROI extending outside of frame
+ bounds = np.array(net_outputs[0, 0, i, 3:7])
+ if conf > conf_thresh and np.all(bounds < 1):
rect_norm = net_outputs[0, 0, i, 3:7]
bboxes.append(BBox(*rect_norm))
diff --git a/megapixels/app/settings/app_cfg.py b/megapixels/app/settings/app_cfg.py
index 42e37b7a..c256635b 100644
--- a/megapixels/app/settings/app_cfg.py
+++ b/megapixels/app/settings/app_cfg.py
@@ -110,6 +110,13 @@ POSE_MINMAX_PITCH = (-10,10)
POSE_MINMAX_YAW = (-40,40)
POSE_MINMAX_ROLL = (-35,35)
POSE_MINMAX_PITCH = (-25,25)
+
+# -----------------------------------------------------------------------------
+# Pandas data
+# -----------------------------------------------------------------------------
+
+FILE_RECORD_DTYPES = {'fn':str, 'subdir': str}
+
# -----------------------------------------------------------------------------
# Logging options exposed for custom click Params
# -----------------------------------------------------------------------------
diff --git a/megapixels/app/settings/types.py b/megapixels/app/settings/types.py
index 9325fc3c..7a34ccc2 100644
--- a/megapixels/app/settings/types.py
+++ b/megapixels/app/settings/types.py
@@ -43,10 +43,12 @@ class LogLevel(Enum):
class Metadata(Enum):
IDENTITY, FILE_RECORD, FACE_VECTOR, FACE_POSE, \
- FACE_ROI, FACE_LANDMARK_2D_68, FACE_LANDMARK_2D_5,FACE_LANDMARK_3D_68, FACE_ATTRIBUTES = range(9)
+ FACE_ROI, FACE_LANDMARK_2D_68, FACE_LANDMARK_2D_5,FACE_LANDMARK_3D_68, \
+ FACE_ATTRIBUTES = range(9)
class Dataset(Enum):
- LFW, VGG_FACE2, MSCELEB, UCCS, UMD_FACES, SCUT_FBP, UCF_SELFIE, UTK = range(8)
+ LFW, VGG_FACE2, MSCELEB, UCCS, UMD_FACES, SCUT_FBP, UCF_SELFIE, UTK, \
+ CASIA_WEBFACE, AFW, PUBFIG83, HELEN, PIPA, MEGAFACE = range(14)
# ---------------------------------------------------------------------
diff --git a/megapixels/app/utils/display_utils.py b/megapixels/app/utils/display_utils.py
index e72cc0f0..43328ae9 100644
--- a/megapixels/app/utils/display_utils.py
+++ b/megapixels/app/utils/display_utils.py
@@ -15,5 +15,7 @@ def handle_keyboard(delay_amt=1):
if k == 27 or k == ord('q'): # ESC
cv.destroyAllWindows()
sys.exit()
- #else:
- #log.info('Press Q, q, or ESC to exit')
+ elif k == 32 or k == 83: # 83 = right arrow
+ break
+ elif k != 255:
+ log.debug(f'k: {k}')
diff --git a/megapixels/commands/cv/face_attributes.py b/megapixels/commands/cv/face_attributes.py
index bb7978f7..01fe3bd1 100644
--- a/megapixels/commands/cv/face_attributes.py
+++ b/megapixels/commands/cv/face_attributes.py
@@ -77,7 +77,7 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset,
# -------------------------------------------------------------------------
# load filepath data
fp_record = data_store.metadata(types.Metadata.FILE_RECORD)
- df_record = pd.read_csv(fp_record, dtype={'fn':str}).set_index('index')
+ df_record = pd.read_csv(fp_record, dtype=cfg.FILE_RECORD_DTYPES).set_index('index')
# load ROI data
fp_roi = data_store.metadata(types.Metadata.FACE_ROI)
df_roi = pd.read_csv(fp_roi).set_index('index')
@@ -112,18 +112,15 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset,
bbox_norm = BBox.from_xywh(df_img.x, df_img.y, df_img.w, df_img.h)
bbox_dim = bbox_norm.to_dim(dim)
- #age_apnt = age_estimator_apnt.predict(im_resized, bbox_norm)
- #age_real = age_estimator_real.predict(im_resized, bbox_norm)
- #gender = gender_estimator.predict(im_resized, bbox_norm)
+ age_apnt = age_estimator_apnt.predict(im_resized, bbox_norm)
+ age_real = age_estimator_real.predict(im_resized, bbox_norm)
+ gender = gender_estimator.predict(im_resized, bbox_norm)
- # attr_obj = {
- # 'age_real':float(f'{age_real:.2f}'),
- # 'age_apparent': float(f'{age_apnt:.2f}'),
- # 'm': float(f'{gender["m"]:.4f}'),
- # 'f': float(f'{gender["f"]:.4f}'),
- # 'roi_index': roi_index
- # }
attr_obj = {
+ 'age_real':float(f'{age_real:.2f}'),
+ 'age_apparent': float(f'{age_apnt:.2f}'),
+ 'm': float(f'{gender["m"]:.4f}'),
+ 'f': float(f'{gender["f"]:.4f}'),
'roi_index': roi_index
}
results.append(attr_obj)
diff --git a/megapixels/commands/cv/face_pose.py b/megapixels/commands/cv/face_pose.py
index 75db603b..cb7ec56c 100644
--- a/megapixels/commands/cv/face_pose.py
+++ b/megapixels/commands/cv/face_pose.py
@@ -92,7 +92,7 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset,
# load data
fp_record = data_store.metadata(types.Metadata.FILE_RECORD)
- df_record = pd.read_csv(fp_record, dtype={'fn':str}).set_index('index')
+ df_record = pd.read_csv(fp_record, dtype=cfg.FILE_RECORD_DTYPES).set_index('index')
# load ROI data
fp_roi = data_store.metadata(types.Metadata.FACE_ROI)
df_roi = pd.read_csv(fp_roi).set_index('index')
diff --git a/megapixels/commands/cv/face_roi.py b/megapixels/commands/cv/face_roi.py
index 950936cf..e83b0f61 100644
--- a/megapixels/commands/cv/face_roi.py
+++ b/megapixels/commands/cv/face_roi.py
@@ -105,23 +105,29 @@ def cli(ctx, opt_fp_in, opt_dir_media, opt_fp_out, opt_data_store, opt_dataset,
# get list of files to process
- fp_in = data_store.metadata(types.Metadata.FILE_RECORD) if opt_fp_in is None else opt_fp_in
- df_records = pd.read_csv(fp_in, dtype={'fn':str}).set_index('index')
+ fp_record = data_store.metadata(types.Metadata.FILE_RECORD) if opt_fp_in is None else opt_fp_in
+ df_record = pd.read_csv(fp_record, dtype=cfg.FILE_RECORD_DTYPES).set_index('index')
if opt_slice:
- df_records = df_records[opt_slice[0]:opt_slice[1]]
- log.debug('processing {:,} files'.format(len(df_records)))
+ df_record = df_record[opt_slice[0]:opt_slice[1]]
+ log.debug('processing {:,} files'.format(len(df_record)))
# filter out grayscale
color_filter = color_filters[opt_color_filter]
# set largest flag, to keep all or only largest
- opt_largest = opt_largest == 'largest'
+ opt_largest = (opt_largest == 'largest')
data = []
+ skipped_files = []
+ processed_files = []
- for df_record in tqdm(df_records.itertuples(), total=len(df_records)):
+ for df_record in tqdm(df_record.itertuples(), total=len(df_record)):
fp_im = data_store.face(str(df_record.subdir), str(df_record.fn), str(df_record.ext))
- im = cv.imread(fp_im)
- im_resized = im_utils.resize(im, width=opt_size[0], height=opt_size[1])
+ try:
+ im = cv.imread(fp_im)
+ im_resized = im_utils.resize(im, width=opt_size[0], height=opt_size[1])
+ except Exception as e:
+ log.debug(f'could not read: {fp_im}')
+ return
# filter out color or grayscale iamges
if color_filter != color_filters['all']:
try:
@@ -134,31 +140,38 @@ def cli(ctx, opt_fp_in, opt_dir_media, opt_fp_out, opt_data_store, opt_dataset,
continue
try:
- bboxes = detector.detect(im_resized, pyramids=opt_pyramids, largest=opt_largest,
+ bboxes_norm = detector.detect(im_resized, pyramids=opt_pyramids, largest=opt_largest,
zone=opt_zone, conf_thresh=opt_conf_thresh)
except Exception as e:
log.error('could not detect: {}'.format(fp_im))
log.error('{}'.format(e))
continue
- for bbox in bboxes:
- roi = {
- 'record_index': int(df_record.Index),
- 'x': bbox.x,
- 'y': bbox.y,
- 'w': bbox.w,
- 'h': bbox.h
- }
- data.append(roi)
- if len(bboxes) == 0:
+ if len(bboxes_norm) == 0:
+ skipped_files.append(fp_im)
log.warn(f'no faces in: {fp_im}')
-
+ log.warn(f'skipped: {len(skipped_files)}. found:{len(processed_files)} files')
+ else:
+ processed_files.append(fp_im)
+ for bbox in bboxes_norm:
+ roi = {
+ 'record_index': int(df_record.Index),
+ 'x': bbox.x,
+ 'y': bbox.y,
+ 'w': bbox.w,
+ 'h': bbox.h
+ }
+ data.append(roi)
+
# if display optined
- if opt_display and len(bboxes):
+ if opt_display and len(bboxes_norm):
# draw each box
- for bbox in bboxes:
- bbox_dim = bbox.to_dim(im_resized.shape[:2][::-1])
- draw_utils.draw_bbox(im_resized, bbox_dim)
+ for bbox_norm in bboxes_norm:
+ dim = im_resized.shape[:2][::-1]
+ bbox_dim = bbox.to_dim(dim)
+ if dim[0] > 1000:
+ im_resized = im_utils.resize(im_resized, width=1000)
+ im_resized = draw_utils.draw_bbox(im_resized, bbox_norm)
# display and wait
cv.imshow('', im_resized)
diff --git a/megapixels/commands/cv/face_vector.py b/megapixels/commands/cv/face_vector.py
index 9a527bc3..cb155d08 100644
--- a/megapixels/commands/cv/face_vector.py
+++ b/megapixels/commands/cv/face_vector.py
@@ -88,7 +88,7 @@ def cli(ctx, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset, opt_size,
# load data
fp_record = data_store.metadata(types.Metadata.FILE_RECORD)
- df_record = pd.read_csv(fp_record, dtype={'fn':str}).set_index('index')
+ df_record = pd.read_csv(fp_record, dtype=cfg.FILE_RECORD_DTYPES).set_index('index')
fp_roi = data_store.metadata(types.Metadata.FACE_ROI)
df_roi = pd.read_csv(fp_roi).set_index('index')
@@ -107,6 +107,7 @@ def cli(ctx, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset, opt_size,
ds_record = df_record.iloc[record_index]
fp_im = data_store.face(ds_record.subdir, ds_record.fn, ds_record.ext)
im = cv.imread(fp_im)
+ im = im_utils.resize(im, width=opt_size[0], height=opt_size[1])
for roi_index, df_img in df_img_group.iterrows():
# get bbox
x, y, w, h = df_img.x, df_img.y, df_img.w, df_img.h
diff --git a/megapixels/commands/cv/resize.py b/megapixels/commands/cv/resize.py
index dcd621b3..7409ee6f 100644
--- a/megapixels/commands/cv/resize.py
+++ b/megapixels/commands/cv/resize.py
@@ -49,7 +49,7 @@ centerings = {
help='File glob ext')
@click.option('--size', 'opt_size',
type=(int, int), default=(256, 256),
- help='Output image size (square)')
+ help='Max output size')
@click.option('--method', 'opt_scale_method',
type=click.Choice(methods.keys()),
default='lanczos',
@@ -88,7 +88,7 @@ def cli(ctx, opt_dir_in, opt_dir_out, opt_glob_ext, opt_size, opt_scale_method,
# -------------------------------------------------
# process here
- def pool_resize(fp_im, opt_size, scale_method, centering):
+ def pool_resize(fp_im, opt_size, scale_method):
# Threaded image resize function
try:
pbar.update(1)
@@ -100,7 +100,7 @@ def cli(ctx, opt_dir_in, opt_dir_out, opt_glob_ext, opt_size, opt_scale_method,
log.error(e)
return False
- im = ImageOps.fit(im, opt_size, method=scale_method, centering=centering)
+ #im = ImageOps.fit(im, opt_size, method=scale_method, centering=centering)
if opt_equalize:
im_np = im_utils.pil2np(im)
@@ -117,8 +117,8 @@ def cli(ctx, opt_dir_in, opt_dir_out, opt_glob_ext, opt_size, opt_scale_method,
except:
return False
- centering = centerings[opt_center]
- scale_method = methods[opt_scale_method]
+ #centering = centerings[opt_center]
+ #scale_method = methods[opt_scale_method]
# get list of files to process
fp_ims = glob(join(opt_dir_in, '*.{}'.format(opt_glob_ext)))
@@ -132,7 +132,8 @@ def cli(ctx, opt_dir_in, opt_dir_out, opt_glob_ext, opt_size, opt_scale_method,
# setup multithreading
pbar = tqdm(total=len(fp_ims))
- pool_resize = partial(pool_resize, opt_size=opt_size, scale_method=scale_method, centering=centering)
+ #pool_resize = partial(pool_resize, opt_size=opt_size, scale_method=scale_method, centering=centering)
+ pool_resize = partial(pool_resize, opt_size=opt_size)
#result_list = pool.map(prod_x, data_list)
pool = ThreadPool(opt_threads)
with tqdm(total=len(fp_ims)) as pbar:
diff --git a/megapixels/commands/cv/resize_dataset.py b/megapixels/commands/cv/resize_dataset.py
new file mode 100644
index 00000000..3a6ec15f
--- /dev/null
+++ b/megapixels/commands/cv/resize_dataset.py
@@ -0,0 +1,149 @@
+"""
+Crop images to prepare for training
+"""
+
+import click
+import cv2 as cv
+from PIL import Image, ImageOps, ImageFilter
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+
+cv_resize_algos = {
+ 'area': cv.INTER_AREA,
+ 'lanco': cv.INTER_LANCZOS4,
+ 'linear': cv.INTER_LINEAR,
+ 'linear_exact': cv.INTER_LINEAR_EXACT,
+ 'nearest': cv.INTER_NEAREST
+}
+"""
+Filter Q-Down Q-Up Speed
+NEAREST ⭐⭐⭐⭐⭐
+BOX ⭐ ⭐⭐⭐⭐
+BILINEAR ⭐ ⭐ ⭐⭐⭐
+HAMMING ⭐⭐ ⭐⭐⭐
+BICUBIC ⭐⭐⭐ ⭐⭐⭐ ⭐⭐
+LANCZOS ⭐⭐⭐⭐ ⭐⭐⭐⭐ ⭐
+"""
+pil_resize_algos = {
+ 'antialias': Image.ANTIALIAS,
+ 'lanczos': Image.LANCZOS,
+ 'bicubic': Image.BICUBIC,
+ 'hamming': Image.HAMMING,
+ 'bileaner': Image.BILINEAR,
+ 'box': Image.BOX,
+ 'nearest': Image.NEAREST
+ }
+
+@click.command()
+@click.option('--dataset', 'opt_dataset',
+ type=cfg.DatasetVar,
+ required=True,
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--store', 'opt_data_store',
+ type=cfg.DataStoreVar,
+ default=click_utils.get_default(types.DataStore.HDD),
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('-o', '--output', 'opt_dir_out', required=True,
+ help='Output directory')
+@click.option('-e', '--ext', 'opt_glob_ext',
+ default='png', type=click.Choice(['jpg', 'png']),
+ help='File glob ext')
+@click.option('--size', 'opt_size',
+ type=(int, int), default=(256, 256),
+ help='Output image size max (w,h)')
+@click.option('--interp', 'opt_interp_algo',
+ type=click.Choice(pil_resize_algos.keys()),
+ default='bicubic',
+ help='Interpolation resizing algorithms')
+@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None),
+ help='Slice the input list')
+@click.option('-t', '--threads', 'opt_threads', default=8,
+ help='Number of threads')
+@click.option('--recursive/--no-recursive', 'opt_recursive', is_flag=True, default=False,
+ help='Use glob recursion (slower)')
+@click.pass_context
+def cli(ctx, opt_dataset, opt_data_store, opt_dir_out, opt_glob_ext, opt_size, opt_interp_algo,
+ opt_slice, opt_threads, opt_recursive):
+ """Resize dataset images"""
+
+ import os
+ from os.path import join
+ from pathlib import Path
+ from glob import glob
+ from tqdm import tqdm
+ from multiprocessing.dummy import Pool as ThreadPool
+ from functools import partial
+ import pandas as pd
+ import numpy as np
+
+ from app.utils import logger_utils, file_utils, im_utils
+ from app.models.data_store import DataStore
+
+ # -------------------------------------------------
+ # init
+
+ log = logger_utils.Logger.getLogger()
+
+
+ # -------------------------------------------------
+ # process here
+
+ def pool_resize(fp_in, dir_in, dir_out, im_size, interp_algo):
+ # Threaded image resize function
+ pbar.update(1)
+ try:
+ im = Image.open(fp_in).convert('RGB')
+ im.verify() # throws error if image is corrupt
+ im.thumbnail(im_size, interp_algo)
+ fp_out = fp_in.replace(dir_in, dir_out)
+ file_utils.mkdirs(fp_out)
+ im.save(fp_out, quality=100)
+ except Exception as e:
+ log.warn(f'Could not open: {fp_in}, Error: {e}')
+ return False
+ return True
+
+
+ data_store = DataStore(opt_data_store, opt_dataset)
+ fp_records = data_store.metadata(types.Metadata.FILE_RECORD)
+ df_records = pd.read_csv(fp_records, dtype=cfg.FILE_RECORD_DTYPES).set_index('index')
+ dir_in = data_store.media_images_original()
+
+ # get list of files to process
+ #fp_ims = file_utils.glob_multi(opt_dir_in, ['jpg', 'png'], recursive=opt_recursive)
+ fp_ims = []
+ for ds_record in df_records.itertuples():
+ fp_im = data_store.face(ds_record.subdir, ds_record.fn, ds_record.ext)
+ fp_ims.append(fp_im)
+
+ if opt_slice:
+ fp_ims = fp_ims[opt_slice[0]:opt_slice[1]]
+ if not fp_ims:
+ log.error('No images. Try with "--recursive"')
+ return
+ log.info(f'processing {len(fp_ims):,} images')
+
+ # algorithm to use for resizing
+ interp_algo = pil_resize_algos[opt_interp_algo]
+ log.info(f'using {interp_algo} for interpoloation')
+
+ # ensure output dir exists
+ file_utils.mkdirs(opt_dir_out)
+
+ # setup multithreading
+ pbar = tqdm(total=len(fp_ims))
+ # fixed arguments for pool function
+ map_pool_resize = partial(pool_resize, dir_in=dir_in, dir_out=opt_dir_out, im_size=opt_size, interp_algo=interp_algo)
+ #result_list = pool.map(prod_x, data_list) # simple
+ pool = ThreadPool(opt_threads)
+ # start multithreading
+ with tqdm(total=len(fp_ims)) as pbar:
+ results = pool.map(map_pool_resize, fp_ims)
+ # end multithreading
+ pbar.close()
+
+ log.info(f'Resized: {results.count(True)} / {len(fp_ims)} images') \ No newline at end of file
diff --git a/megapixels/commands/datasets/file_record.py b/megapixels/commands/datasets/file_record.py
index b5daef4e..41a5df28 100644
--- a/megapixels/commands/datasets/file_record.py
+++ b/megapixels/commands/datasets/file_record.py
@@ -127,7 +127,7 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_dataset, opt_data_store, opt_dir_media,
sha256 = file_utils.sha256(fp_im)
im = Image.open(fp_im)
im.verify() # throws error if bad file
- assert(im.size[0] > 100 and im.size[1] > 100)
+ assert(im.size[0] > 60 and im.size[1] > 60)
except Exception as e:
log.warn(f'skipping file: {fp_im}')
return None
diff --git a/megapixels/commands/demo/face_search.py b/megapixels/commands/demo/face_search.py
index d50f5c73..f551cafd 100644
--- a/megapixels/commands/demo/face_search.py
+++ b/megapixels/commands/demo/face_search.py
@@ -39,6 +39,7 @@ def cli(ctx, opt_fp_in, opt_data_store, opt_dataset, opt_results, opt_gpu):
import cv2 as cv
from tqdm import tqdm
import imutils
+ from PIL import Image, ImageOps
from app.utils import file_utils, im_utils, display_utils, draw_utils
from app.models.data_store import DataStore
@@ -49,9 +50,10 @@ def cli(ctx, opt_fp_in, opt_data_store, opt_dataset, opt_results, opt_gpu):
# init dataset
dataset = Dataset(opt_data_store, opt_dataset)
- dataset.load_face_vectors()
- dataset.load_records()
- # dataset.load_identities()
+ dataset.load_metadata(types.Metadata.FILE_RECORD)
+ dataset.load_metadata(types.Metadata.FACE_VECTOR)
+ dataset.load_metadata(types.Metadata.FACE_ROI)
+ # dataset.load_metadata(types.Metadata.IDENTITY)
# init face detection
detector = face_detector.DetectorCVDNN()
@@ -86,6 +88,9 @@ def cli(ctx, opt_fp_in, opt_data_store, opt_dataset, opt_results, opt_gpu):
image_record.summarize()
log.info(f'{image_record.filepath}')
im_match = cv.imread(image_record.filepath)
+
+ im_match_pil = Image.open(image_record.filepath).convert('RGB')
+ # bbox =
ims_match.append(im_match)
# make montages of most similar faces
diff --git a/megapixels/notebooks/face_analysis/face_recognition_vgg.ipynb b/megapixels/notebooks/face_analysis/face_recognition_vgg.ipynb
index 45e167b4..e9808232 100644
--- a/megapixels/notebooks/face_analysis/face_recognition_vgg.ipynb
+++ b/megapixels/notebooks/face_analysis/face_recognition_vgg.ipynb
@@ -372,10 +372,42 @@
},
{
"cell_type": "code",
- "execution_count": null,
+ "execution_count": 452,
"metadata": {},
"outputs": [],
- "source": []
+ "source": [
+ "a = [.1, .2, .3, 1.1]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 456,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from PIL import Image"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 464,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "(590, 760) (372, 480)\n"
+ ]
+ }
+ ],
+ "source": [
+ "fp_im_test = '/home/adam/Downloads/faces/snowden.jpg'\n",
+ "im_rs = Image.open(fp_im_test).convert('RGB')\n",
+ "im_rs_sm = im_rs.copy()\n",
+ "im_rs_sm.thumbnail((480,480))\n",
+ "print(im_rs.size, '', im_rs_sm.size)"
+ ]
},
{
"cell_type": "code",