summaryrefslogtreecommitdiff
path: root/megapixels
diff options
context:
space:
mode:
authoradamhrv <adam@ahprojects.com>2018-12-16 19:37:58 +0100
committeradamhrv <adam@ahprojects.com>2018-12-16 19:37:58 +0100
commitc3839ea797401d740db64691c0b4922c935b131c (patch)
treeef64b6b441dd677a41f79a423af8b7a44e68b23f /megapixels
parent10f467b64e3be528ac246d5cf664d675aca3e7f3 (diff)
still sorting CSV vectors indexes
Diffstat (limited to 'megapixels')
-rw-r--r--megapixels/app/models/data_store.py (renamed from megapixels/app/utils/path_utils.py)3
-rw-r--r--megapixels/app/models/dataset.py41
-rw-r--r--megapixels/app/settings/app_cfg.py8
-rw-r--r--megapixels/cli_demo.py35
-rw-r--r--megapixels/commands/cv/gen_face_vec.py (renamed from megapixels/commands/cv/rois_to_vecs.py)56
-rw-r--r--megapixels/commands/cv/gen_pose.py (renamed from megapixels/commands/cv/rois_to_pose.py)72
-rw-r--r--megapixels/commands/cv/gen_rois.py (renamed from megapixels/commands/cv/files_to_rois.py)52
-rw-r--r--megapixels/commands/datasets/add_uuid.py44
-rw-r--r--megapixels/commands/datasets/filter_by_pose.py101
-rw-r--r--megapixels/commands/datasets/filter_poses.py76
-rw-r--r--megapixels/commands/datasets/gen_filepath.py (renamed from megapixels/commands/datasets/file_meta.py)50
-rw-r--r--megapixels/commands/datasets/gen_sha256.py152
-rw-r--r--megapixels/commands/datasets/gen_uuid.py65
-rw-r--r--megapixels/commands/datasets/lookup.py26
-rw-r--r--megapixels/commands/datasets/sha256.py89
-rw-r--r--megapixels/commands/demo/face_analysis.py56
-rw-r--r--megapixels/commands/demo/face_search.py95
17 files changed, 697 insertions, 324 deletions
diff --git a/megapixels/app/utils/path_utils.py b/megapixels/app/models/data_store.py
index b0262ea0..8ec1f8ba 100644
--- a/megapixels/app/utils/path_utils.py
+++ b/megapixels/app/models/data_store.py
@@ -21,6 +21,9 @@ class DataStore:
def metadata(self, enum_type):
return join(self.dir_metadata, f'{enum_type.name.lower()}.csv')
+ def media_images_original(self):
+ return join(self.dir_media, 'original')
+
def face_image(self, subdir, fn, ext):
return join(self.dir_media, 'original', subdir, f'{fn}.{ext}')
diff --git a/megapixels/app/models/dataset.py b/megapixels/app/models/dataset.py
index 11d568a5..8fef8a7e 100644
--- a/megapixels/app/models/dataset.py
+++ b/megapixels/app/models/dataset.py
@@ -2,6 +2,7 @@
Dataset model: container for all CSVs about a dataset
"""
import os
+import sys
from os.path import join
from pathlib import Path
import logging
@@ -12,7 +13,8 @@ import numpy as np
from app.settings import app_cfg as cfg
from app.settings import types
from app.models.bbox import BBox
-from app.utils import file_utils, im_utils, path_utils
+from app.utils import file_utils, im_utils
+from app.models.data_store import DataStore, DataStoreS3
from app.utils.logger_utils import Logger
# -------------------------------------------------------------------------
@@ -21,17 +23,19 @@ from app.utils.logger_utils import Logger
class Dataset:
- def __init__(self, opt_dataset_type, opt_data_store=types.DataStore.NAS):
+ def __init__(self, opt_data_store, opt_dataset_type, load_files=True):
self._dataset_type = opt_dataset_type # enum type
self.log = Logger.getLogger()
self._metadata = {}
self._face_vectors = []
self._nullframe = pd.DataFrame() # empty placeholder
- self.data_store = path_utils.DataStore(opt_data_store, self._dataset_type)
- self.data_store_s3 = path_utils.DataStoreS3(self._dataset_type)
+ self.data_store = DataStore(opt_data_store, self._dataset_type)
+ self.data_store_s3 = DataStoreS3(self._dataset_type)
+ self.load_metadata()
- def load(self, opt_data_store):
+ def load_metadata(self):
'''Loads all CSV files into (dict) of DataFrames'''
+ self.log.info(f'creating dataset: {self._dataset_type}...')
for metadata_type in types.Metadata:
self.log.info(f'load metadata: {metadata_type}')
fp_csv = self.data_store.metadata(metadata_type)
@@ -40,11 +44,12 @@ class Dataset:
self._metadata[metadata_type] = pd.read_csv(fp_csv).set_index('index')
if metadata_type == types.Metadata.FACE_VECTOR:
# convert DataFrame to list of floats
- self._face_vecs = self.df_to_vec_list(self._metadata[metadata_type])
+ self._face_vectors = self.df_to_vec_list(self._metadata[metadata_type])
+ self.log.info(f'build face vector dict: {len(self._face_vectors)}')
self._metadata[metadata_type].drop('vec', axis=1, inplace=True)
else:
- self.log.error('File not found: {fp_csv}. Replaced with empty DataFrame')
- self._metadata[metadata_type] = self._nullframe
+ self.log.error(f'File not found: {fp_csv}. Exiting.')
+ sys.exit()
self.log.info('finished loading')
def metadata(self, opt_metadata_type):
@@ -80,7 +85,7 @@ class Dataset:
image_record = ImageRecord(image_index, sha256, uuid, bbox, fp_im, fp_url)
# now get the identity index (if available)
identity_index = ds_sha256.identity_index
- if identity_index:
+ if identity_index > -1:
# then use the identity index to get the identity meta
df_identity = df_filepath = self._metadata[types.Metadata.IDENTITY]
ds_identity = df_identity.iloc[identity_index]
@@ -95,18 +100,24 @@ class Dataset:
identity = Identity(identity_index, name=name, desc=desc, gender=gender, n_images=n_images,
url=url, age=age, nationality=nationality)
image_record.identity = identity
+ else:
+ self.log.info(f'no identity index: {ds_sha256}')
return image_record
- def matches(self, query_vec, n_results=5, threshold=0.5):
+ def find_matches(self, query_vec, n_results=5, threshold=0.6):
image_records = [] # list of image matches w/identity if available
# find most similar feature vectors indexes
- match_idxs = self.similar(query_vec, n_results, threshold)
+ #match_idxs = self.similar(query_vec, n_results, threshold)
+ sim_scores = np.linalg.norm(np.array([query_vec]) - np.array(self._face_vectors), axis=1)
+ match_idxs = np.argpartition(sim_scores, n_results)[:n_results]
+
for match_idx in match_idxs:
# get the corresponding face vector row
+ self.log.debug(f'find match index: {match_idx}')
image_record = self.roi_idx_to_record(match_idx)
- results.append(image_record)
+ image_records.append(image_record)
return image_records
# ----------------------------------------------------------------------
@@ -114,8 +125,7 @@ class Dataset:
def df_to_vec_list(self, df):
# convert the DataFrame CSV to float list of vecs
- vecs = [list(map(float,x.vec.split(','))) for x in df.itertuples()]
- return vecs
+ return [list(map(float,x.vec.split(','))) for x in df.itertuples()]
def similar(self, query_vec, n_results):
'''Finds most similar N indices of query face vector
@@ -124,8 +134,7 @@ class Dataset:
:returns (list) of (int) indices
'''
# uses np.linalg based on the ageitgey/face_recognition code
- vecs_sim_scores = np.linalg.norm(np.array([query_vec]) - np.array(self._face_vectors), axis=1)
- top_idxs = np.argpartition(vecs_sim_scores, n_results)[:n_results]
+
return top_idxs
diff --git a/megapixels/app/settings/app_cfg.py b/megapixels/app/settings/app_cfg.py
index 50eaf576..7f9ed187 100644
--- a/megapixels/app/settings/app_cfg.py
+++ b/megapixels/app/settings/app_cfg.py
@@ -75,6 +75,7 @@ DIR_COMMANDS_DATASETS = 'commands/datasets'
DIR_COMMANDS_FAISS = 'commands/faiss'
DIR_COMMANDS_MISC = 'commands/misc'
DIR_COMMANDS_SITE = 'commands/site'
+DIR_COMMANDS_DEMO = 'commands/demo'
# -----------------------------------------------------------------------------
# Filesystem settings
@@ -89,6 +90,13 @@ HASH_BRANCH_SIZE = 3
DLIB_FACEREC_JITTERS = 5 # number of face recognition jitters
DLIB_FACEREC_PADDING = 0.25 # default dlib
+POSE_MINMAX_YAW = (-25,25)
+POSE_MINMAX_ROLL = (-15,15)
+POSE_MINMAX_PITCH = (-10,10)
+
+POSE_MINMAX_YAW = (-40,40)
+POSE_MINMAX_ROLL = (-35,35)
+POSE_MINMAX_PITCH = (-25,25)
# -----------------------------------------------------------------------------
# Logging options exposed for custom click Params
# -----------------------------------------------------------------------------
diff --git a/megapixels/cli_demo.py b/megapixels/cli_demo.py
new file mode 100644
index 00000000..703db856
--- /dev/null
+++ b/megapixels/cli_demo.py
@@ -0,0 +1,35 @@
+# --------------------------------------------------------
+# add/edit commands in commands/datasets directory
+# --------------------------------------------------------
+
+import click
+
+from app.settings import app_cfg as cfg
+from app.utils import logger_utils
+from app.models.click_factory import ClickSimple
+
+# click cli factory
+cc = ClickSimple.create(cfg.DIR_COMMANDS_DEMO)
+
+# --------------------------------------------------------
+# CLI
+# --------------------------------------------------------
+@click.group(cls=cc, chain=False)
+@click.option('-v', '--verbose', 'verbosity', count=True, default=4,
+ show_default=True,
+ help='Verbosity: -v DEBUG, -vv INFO, -vvv WARN, -vvvv ERROR, -vvvvv CRITICAL')
+@click.pass_context
+def cli(ctx, **kwargs):
+ """\033[1m\033[94mMegaPixels: Dataset Image Scripts\033[0m
+ """
+ ctx.opts = {}
+ # init logger
+ logger_utils.Logger.create(verbosity=kwargs['verbosity'])
+
+
+# --------------------------------------------------------
+# Entrypoint
+# --------------------------------------------------------
+if __name__ == '__main__':
+ cli()
+
diff --git a/megapixels/commands/cv/rois_to_vecs.py b/megapixels/commands/cv/gen_face_vec.py
index 525f4404..83e1460d 100644
--- a/megapixels/commands/cv/rois_to_vecs.py
+++ b/megapixels/commands/cv/gen_face_vec.py
@@ -9,14 +9,20 @@ from app.utils import click_utils
from app.settings import app_cfg as cfg
@click.command()
-@click.option('-i', '--input', 'opt_fp_files', required=True,
- help='Input file meta CSV')
-@click.option('-r', '--rois', 'opt_fp_rois', required=True,
- help='Input ROI CSV')
-@click.option('-m', '--media', 'opt_dir_media', required=True,
- help='Input media directory')
-@click.option('-o', '--output', 'opt_fp_out', required=True,
- help='Output CSV')
+@click.option('-o', '--output', 'opt_fp_out', default=None,
+ help='Override enum output filename CSV')
+@click.option('-m', '--media', 'opt_dir_media', default=None,
+ help='Override enum media directory')
+@click.option('--data_store', 'opt_data_store',
+ type=cfg.DataStoreVar,
+ default=click_utils.get_default(types.DataStore.SSD),
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--dataset', 'opt_dataset',
+ type=cfg.DatasetVar,
+ required=True,
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
@click.option('--size', 'opt_size',
type=(int, int), default=(300, 300),
help='Output image size')
@@ -31,7 +37,7 @@ from app.settings import app_cfg as cfg
@click.option('-g', '--gpu', 'opt_gpu', default=0,
help='GPU index')
@click.pass_context
-def cli(ctx, opt_fp_files, opt_fp_rois, opt_dir_media, opt_fp_out, opt_size,
+def cli(ctx, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset, opt_size,
opt_slice, opt_force, opt_gpu, opt_jitters, opt_padding):
"""Converts face ROIs to vectors"""
@@ -48,6 +54,7 @@ def cli(ctx, opt_fp_files, opt_fp_rois, opt_dir_media, opt_fp_out, opt_size,
import pandas as pd
from app.models.bbox import BBox
+ from app.models.data_store import DataStore
from app.utils import logger_utils, file_utils, im_utils
from app.processors import face_recognition
@@ -56,24 +63,28 @@ def cli(ctx, opt_fp_files, opt_fp_rois, opt_dir_media, opt_fp_out, opt_size,
# init here
log = logger_utils.Logger.getLogger()
+ # set data_store
+ data_store = DataStore(opt_data_store, opt_dataset)
+
+ # get filepath out
+ fp_out = data_store.metadata(types.Metadata.FACE_VECTOR) if opt_fp_out is None else opt_fp_out
+ if not opt_force and Path(fp_out).exists():
+ log.error('File exists. Use "-f / --force" to overwite')
+ return
# init face processors
facerec = face_recognition.RecognitionDLIB()
# load data
- df_file_meta = pd.read_csv(opt_fp_files)
- df_rois = pd.read_csv(opt_fp_rois)
+ df_file = pd.read_csv(data_store.metadata(types.Metadata.FILEPATH)).set_index('index')
+ df_roi = pd.read_csv(data_store.metadata(types.Metadata.FACE_ROI)).set_index('index')
- if not opt_force and Path(opt_fp_out).exists():
- log.error('File exists. Use "-f / --force" to overwite')
- return
-
if opt_slice:
- df_rois = df_rois[opt_slice[0]:opt_slice[1]]
+ df_roi = df_roi[opt_slice[0]:opt_slice[1]]
# -------------------------------------------------
# process here
- df_img_groups = df_rois.groupby('image_index')
+ df_img_groups = df_roi.groupby('image_index')
log.debug('processing {:,} groups'.format(len(df_img_groups)))
vecs = []
@@ -81,8 +92,11 @@ def cli(ctx, opt_fp_files, opt_fp_rois, opt_dir_media, opt_fp_out, opt_size,
for image_index, df_img_group in tqdm(df_img_groups):
# make fp
roi_index = df_img_group.index.values[0]
- file_meta = df_file_meta.iloc[image_index] # locate image meta
- fp_im = join(opt_dir_media, file_meta.subdir, '{}.{}'.format(file_meta.fn, file_meta.ext))
+ log.debug(f'roi_index: {roi_index}, image_index: {image_index}')
+ ds_file = df_file.loc[roi_index] # locate image meta
+ #ds_file = df_file.loc['index', image_index] # locate image meta
+
+ fp_im = data_store.face_image(str(ds_file.subdir), str(ds_file.fn), str(ds_file.ext))
im = cv.imread(fp_im)
# get bbox
x = df_img_group.x.values[0]
@@ -103,7 +117,7 @@ def cli(ctx, opt_fp_files, opt_fp_rois, opt_dir_media, opt_fp_out, opt_size,
# save date
- file_utils.mkdirs(opt_fp_out)
df = pd.DataFrame.from_dict(vecs)
df.index.name = 'index'
- df.to_csv(opt_fp_out) \ No newline at end of file
+ #file_utils.mkdirs(fp_out)
+ #df.to_csv(fp_out) \ No newline at end of file
diff --git a/megapixels/commands/cv/rois_to_pose.py b/megapixels/commands/cv/gen_pose.py
index 3877cecf..aefadb00 100644
--- a/megapixels/commands/cv/rois_to_pose.py
+++ b/megapixels/commands/cv/gen_pose.py
@@ -9,14 +9,22 @@ from app.utils import click_utils
from app.settings import app_cfg as cfg
@click.command()
-@click.option('-i', '--input', 'opt_fp_files', required=True,
- help='Input ROI CSV')
-@click.option('-r', '--rois', 'opt_fp_rois', required=True,
- help='Input ROI CSV')
-@click.option('-m', '--media', 'opt_dir_media', required=True,
- help='Input media directory')
-@click.option('-o', '--output', 'opt_fp_out', required=True,
- help='Output CSV')
+@click.option('-i', '--input', 'opt_fp_in', default=None,
+ help='Override enum input filename CSV')
+@click.option('-o', '--output', 'opt_fp_out', default=None,
+ help='Override enum output filename CSV')
+@click.option('-m', '--media', 'opt_dir_media', default=None,
+ help='Override enum media directory')
+@click.option('--data_store', 'opt_data_store',
+ type=cfg.DataStoreVar,
+ default=click_utils.get_default(types.DataStore.SSD),
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--dataset', 'opt_dataset',
+ type=cfg.DatasetVar,
+ required=True,
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
@click.option('--size', 'opt_size',
type=(int, int), default=(300, 300),
help='Output image size')
@@ -27,7 +35,7 @@ from app.settings import app_cfg as cfg
@click.option('-d', '--display', 'opt_display', is_flag=True,
help='Display image for debugging')
@click.pass_context
-def cli(ctx, opt_fp_files, opt_fp_rois, opt_dir_media, opt_fp_out, opt_size,
+def cli(ctx, opt_fp_in, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset, opt_size,
opt_slice, opt_force, opt_display):
"""Converts ROIs to pose: roll, yaw, pitch"""
@@ -47,42 +55,48 @@ def cli(ctx, opt_fp_files, opt_fp_rois, opt_dir_media, opt_fp_out, opt_size,
from app.utils import logger_utils, file_utils, im_utils
from app.processors.face_landmarks import LandmarksDLIB
from app.processors.face_pose import FacePoseDLIB
+ from app.models.data_store import DataStore
# -------------------------------------------------
# init here
log = logger_utils.Logger.getLogger()
+
+ # set data_store
+ data_store = DataStore(opt_data_store, opt_dataset)
+
+ # get filepath out
+ fp_out = data_store.metadata(types.Metadata.FACE_POSE) if opt_fp_out is None else opt_fp_out
+ if not opt_force and Path(fp_out).exists():
+ log.error('File exists. Use "-f / --force" to overwite')
+ return
# init face processors
face_pose = FacePoseDLIB()
face_landmarks = LandmarksDLIB()
- # load datra
- df_files = pd.read_csv(opt_fp_files)
- df_rois = pd.read_csv(opt_fp_rois)
-
- if not opt_force and Path(opt_fp_out).exists():
- log.error('File exists. Use "-f / --force" to overwite')
- return
-
+ # load filepath data
+ fp_filepath = data_store.metadata(types.Metadata.FILEPATH)
+ df_filepath = pd.read_csv(fp_filepath)
+ # load ROI data
+ fp_roi = data_store.metadata(types.Metadata.FACE_ROI)
+ df_roi = pd.read_csv(fp_roi)
+ # slice if you want
if opt_slice:
- df_rois = df_rois[opt_slice[0]:opt_slice[1]]
-
- # -------------------------------------------------
- # process here
- df_img_groups = df_rois.groupby('image_index')
+ df_roi = df_roi[opt_slice[0]:opt_slice[1]]
+ # group by image index (speedup if multiple faces per image)
+ df_img_groups = df_roi.groupby('image_index')
log.debug('processing {:,} groups'.format(len(df_img_groups)))
-
+ # store poses and convert to DataFrame
poses = []
# iterate
- #for df_roi_group_idx, df_roi_group in tqdm(df_roi_groups):
for image_index, df_img_group in tqdm(df_img_groups):
# make fp
- #image_index = df_roi_group.image_index.values[0]
- pds_file = df_files.iloc[image_index]
- fp_im = join(opt_dir_media, pds_file.subdir, '{}.{}'.format(pds_file.fn, pds_file.ext))
+ ds_file = df_filepath.iloc[image_index]
+ fp_im = data_store.face_image(ds_file.subdir, ds_file.fn, ds_file.ext)
+ #fp_im = join(opt_dir_media, ds_file.subdir, '{}.{}'.format(ds_file.fn, ds_file.ext))
im = cv.imread(fp_im)
# get bbox
x = df_img_group.x.values[0]
@@ -121,7 +135,7 @@ def cli(ctx, opt_fp_files, opt_fp_rois, opt_dir_media, opt_fp_out, opt_size,
# save date
- file_utils.mkdirs(opt_fp_out)
+ file_utils.mkdirs(fp_out)
df = pd.DataFrame.from_dict(poses)
df.index.name = 'index'
- df.to_csv(opt_fp_out) \ No newline at end of file
+ df.to_csv(fp_out) \ No newline at end of file
diff --git a/megapixels/commands/cv/files_to_rois.py b/megapixels/commands/cv/gen_rois.py
index 1aaf991c..20dd598a 100644
--- a/megapixels/commands/cv/files_to_rois.py
+++ b/megapixels/commands/cv/gen_rois.py
@@ -12,12 +12,22 @@ from app.settings import app_cfg as cfg
color_filters = {'color': 1, 'gray': 2, 'all': 3}
@click.command()
-@click.option('-i', '--input', 'opt_fp_in', required=True,
- help='Input CSV (eg image_files.csv)')
-@click.option('-m', '--media', 'opt_dir_media', required=True,
- help='Input media directory')
-@click.option('-o', '--output', 'opt_fp_out', required=True,
- help='Output CSV')
+@click.option('-i', '--input', 'opt_fp_in', default=None,
+ help='Override enum input filename CSV')
+@click.option('-o', '--output', 'opt_fp_out', default=None,
+ help='Override enum output filename CSV')
+@click.option('-m', '--media', 'opt_dir_media', default=None,
+ help='Override enum media directory')
+@click.option('--data_store', 'opt_data_store',
+ type=cfg.DataStoreVar,
+ default=click_utils.get_default(types.DataStore.SSD),
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--dataset', 'opt_dataset',
+ type=cfg.DatasetVar,
+ required=True,
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
@click.option('--size', 'opt_size',
type=(int, int), default=(300, 300),
help='Output image size')
@@ -40,10 +50,10 @@ color_filters = {'color': 1, 'gray': 2, 'all': 3}
@click.option('--color', 'opt_color_filter',
type=click.Choice(color_filters.keys()), default='all',
help='Filter to keep color or grayscale images (color = keep color')
-@click.option('--largest', 'opt_largest', is_flag=True,
+@click.option('--largest/--all-faces', 'opt_largest', is_flag=True, default=True,
help='Only keep largest face')
@click.pass_context
-def cli(ctx, opt_fp_in, opt_dir_media, opt_fp_out, opt_size, opt_detector_type,
+def cli(ctx, opt_fp_in, opt_dir_media, opt_fp_out, opt_data_store, opt_dataset, opt_size, opt_detector_type,
opt_gpu, opt_conf_thresh, opt_pyramids, opt_slice, opt_display, opt_force, opt_color_filter,
opt_largest):
"""Converts frames with faces to CSV of ROIs"""
@@ -61,17 +71,24 @@ def cli(ctx, opt_fp_in, opt_dir_media, opt_fp_out, opt_size, opt_detector_type,
import pandas as pd
from app.utils import logger_utils, file_utils, im_utils
- from app.processors import face_detector
+ from app.processors import face_detector
+ from app.models.data_store import DataStore
# -------------------------------------------------
# init here
log = logger_utils.Logger.getLogger()
- if not opt_force and Path(opt_fp_out).exists():
+ # set data_store
+ data_store = DataStore(opt_data_store, opt_dataset)
+
+ # get filepath out
+ fp_out = data_store.metadata(types.Metadata.FACE_ROI) if opt_fp_out is None else opt_fp_out
+ if not opt_force and Path(fp_out).exists():
log.error('File exists. Use "-f / --force" to overwite')
return
+ # set detector
if opt_detector_type == types.FaceDetectNet.CVDNN:
detector = face_detector.DetectorCVDNN()
elif opt_detector_type == types.FaceDetectNet.DLIB_CNN:
@@ -85,22 +102,21 @@ def cli(ctx, opt_fp_in, opt_dir_media, opt_fp_out, opt_size, opt_detector_type,
return
- # -------------------------------------------------
- # process here
- color_filter = color_filters[opt_color_filter]
-
# get list of files to process
- df_files = pd.read_csv(opt_fp_in).set_index('index')
-
+ fp_in = data_store.metadata(types.Metadata.FILEPATH) if opt_fp_in is None else opt_fp_in
+ df_files = pd.read_csv(fp_in).set_index('index')
if opt_slice:
df_files = df_files[opt_slice[0]:opt_slice[1]]
log.debug('processing {:,} files'.format(len(df_files)))
+ # filter out grayscale
+ color_filter = color_filters[opt_color_filter]
data = []
for df_file in tqdm(df_files.itertuples(), total=len(df_files)):
- fp_im = join(opt_dir_media, str(df_file.subdir), f'{df_file.fn}.{df_file.ext}')
+ fp_im = data_store.face_image(str(df_file.subdir), str(df_file.fn), str(df_file.ext))
+ #fp_im = join(opt_dir_media, str(df_file.subdir), f'{df_file.fn}.{df_file.ext}')
im = cv.imread(fp_im)
# filter out color or grayscale iamges
@@ -150,7 +166,7 @@ def cli(ctx, opt_fp_in, opt_dir_media, opt_fp_out, opt_size, opt_detector_type,
break
# save date
- file_utils.mkdirs(opt_fp_out)
+ file_utils.mkdirs(fp_out)
df = pd.DataFrame.from_dict(data)
df.index.name = 'index'
df.to_csv(opt_fp_out) \ No newline at end of file
diff --git a/megapixels/commands/datasets/add_uuid.py b/megapixels/commands/datasets/add_uuid.py
deleted file mode 100644
index 9c14c0e3..00000000
--- a/megapixels/commands/datasets/add_uuid.py
+++ /dev/null
@@ -1,44 +0,0 @@
-import click
-
-from app.settings import types
-from app.utils import click_utils
-from app.settings import app_cfg as cfg
-from app.utils.logger_utils import Logger
-
-log = Logger.getLogger()
-
-@click.command()
-@click.option('-i', '--input', 'opt_fp_in', required=True,
- help='Input directory')
-@click.option('-o', '--output', 'opt_fp_out',
- help='Output directory')
-@click.option('-f', '--force', 'opt_force', is_flag=True,
- help='Force overwrite file')
-@click.pass_context
-def cli(ctx, opt_fp_in, opt_fp_out, opt_force):
- """Appends UUID to records CSV"""
-
- from glob import glob
- from os.path import join
- from pathlib import Path
- import base64
- import uuid
-
- from tqdm import tqdm
- import pandas as pd
-
- if not opt_force and Path(opt_fp_out).exists():
- log.error('File exists. Use "-f / --force" to overwite')
- return
-
- # load names
- df_records = pd.read_csv(opt_fp_in)
- records = df_records.to_dict('index')
- # append a UUID to every entry
- for idx, item in records.items():
- records[idx]['uuid'] = uuid.uuid4()
- # save to csv
- df_uuid = pd.DataFrame.from_dict(list(records.values())) # ignore the indices
- df_uuid.to_csv(opt_fp_out, index=False)
-
- log.info('done') \ No newline at end of file
diff --git a/megapixels/commands/datasets/filter_by_pose.py b/megapixels/commands/datasets/filter_by_pose.py
new file mode 100644
index 00000000..6fdbef98
--- /dev/null
+++ b/megapixels/commands/datasets/filter_by_pose.py
@@ -0,0 +1,101 @@
+import click
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+from app.utils.logger_utils import Logger
+
+log = Logger.getLogger()
+
+@click.command()
+@click.option('-i', '--input', 'opt_fp_in', default=None,
+ help='Override enum input filename CSV')
+@click.option('-o', '--output', 'opt_fp_out', default=None,
+ help='Override enum output filename CSV')
+@click.option('--data_store', 'opt_data_store',
+ type=cfg.DataStoreVar,
+ default=click_utils.get_default(types.DataStore.SSD),
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--dataset', 'opt_dataset',
+ type=cfg.DatasetVar,
+ required=True,
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--yaw', 'opt_yaw', type=(float, float), default=cfg.POSE_MINMAX_YAW,
+ help='Yaw (min, max)')
+@click.option('--roll', 'opt_roll', type=(float, float), default=cfg.POSE_MINMAX_ROLL,
+ help='Roll (min, max)')
+@click.option('--pitch', 'opt_pitch', type=(float, float), default=cfg.POSE_MINMAX_PITCH,
+ help='Pitch (min, max)')
+@click.pass_context
+def cli(ctx, opt_fp_in, opt_fp_out, opt_data_store, opt_dataset, opt_yaw, opt_roll, opt_pitch):
+ """Filter out exaggerated poses"""
+
+ import sys
+ from os.path import join
+ from pathlib import Path
+ import shutil
+ from datetime import datetime
+
+ import pandas as pd
+ from tqdm import tqdm
+
+ from app.models.data_store import DataStore
+ from app.utils import file_utils
+
+ # create date store
+ data_store = DataStore(opt_data_store, opt_dataset)
+ # load pose
+ fp_pose = data_store.metadata(types.Metadata.FACE_POSE)
+ df_pose = pd.read_csv(fp_pose).set_index('index')
+ # load roi
+ fp_roi = data_store.metadata(types.Metadata.FACE_ROI)
+ df_roi = pd.read_csv(fp_roi).set_index('index')
+ # load filepath
+ fp_filepath = data_store.metadata(types.Metadata.FILEPATH)
+ df_filepath = pd.read_csv(fp_filepath).set_index('index')
+ # load uuid
+ fp_uuid= data_store.metadata(types.Metadata.UUID)
+ df_uuid = pd.read_csv(fp_uuid).set_index('index')
+ # load sha256 index
+ fp_sha256 = data_store.metadata(types.Metadata.SHA256)
+ df_sha256 = pd.read_csv(fp_sha256).set_index('index')
+ # debug
+ log.info('Processing {:,} rows'.format(len(df_pose)))
+ n_rows = len(df_pose)
+
+ # filter out extreme poses
+ invalid_indices = []
+ for ds_pose in tqdm(df_pose.itertuples(), total=len(df_pose)):
+ if ds_pose.yaw < opt_yaw[0] or ds_pose.yaw > opt_yaw[1] \
+ and ds_pose.roll < opt_roll[0] or ds_pose.roll > opt_roll[1] \
+ and ds_pose.pitch < opt_pitch[0] or ds_pose.pitch > opt_pitch[1]:
+ invalid_indices.append(ds_pose.Index) # unique file indexs
+
+ # filter out valid/invalid
+ log.info(invalid_indices[:20])
+ log.info(f'Removing {len(invalid_indices)} invalid indices...')
+ df_filepath = df_filepath.drop(df_pose.index[invalid_indices])
+ df_sha256 = df_sha256.drop(df_pose.index[invalid_indices])
+ df_uuid = df_uuid.drop(df_pose.index[invalid_indices])
+ df_roi = df_roi.drop(df_pose.index[invalid_indices])
+ df_pose = df_pose.drop(df_pose.index[invalid_indices])
+ log.info(f'Removed {n_rows - len(df_pose)}')
+
+ # move file to make backup
+ dir_bkup = join(Path(fp_pose).parent, f'backup_{datetime.now():%Y%m%d_%M%S}')
+ file_utils.mkdirs(dir_bkup)
+ # move files to backup
+ shutil.move(fp_filepath, join(dir_bkup, Path(fp_filepath).name))
+ shutil.move(fp_sha256, join(dir_bkup, Path(fp_sha256).name))
+ shutil.move(fp_uuid, join(dir_bkup, Path(fp_uuid).name))
+ shutil.move(fp_roi, join(dir_bkup, Path(fp_roi).name))
+ shutil.move(fp_pose, join(dir_bkup, Path(fp_pose).name))
+ # save filtered poses
+ df_filepath.to_csv(fp_filepath)
+ df_sha256.to_csv(fp_sha256)
+ df_uuid.to_csv(fp_uuid)
+ df_roi.to_csv(fp_roi)
+ df_pose.to_csv(fp_pose)
+
diff --git a/megapixels/commands/datasets/filter_poses.py b/megapixels/commands/datasets/filter_poses.py
deleted file mode 100644
index 304eeff2..00000000
--- a/megapixels/commands/datasets/filter_poses.py
+++ /dev/null
@@ -1,76 +0,0 @@
-import click
-
-from app.settings import types
-from app.utils import click_utils
-from app.settings import app_cfg as cfg
-from app.utils.logger_utils import Logger
-
-log = Logger.getLogger()
-
-@click.command()
-@click.option('-i', '--input', 'opt_fp_in', required=True,
- help='Input directory')
-@click.option('-o', '--output', 'opt_fp_out', required=True,
- help='Output directory')
-@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None),
- help='Slice list of files')
-@click.option('-f', '--force', 'opt_force', is_flag=True,
- help='Force overwrite file')
-@click.option('--yaw', 'opt_yaw', type=(float, float), default=(-25,25),
- help='Yaw (min, max)')
-@click.option('--roll', 'opt_roll', type=(float, float), default=(-15,15),
- help='Roll (min, max)')
-@click.option('--pitch', 'opt_pitch', type=(float, float), default=(-10,10),
- help='Pitch (min, max)')
-@click.option('--drop', 'opt_drop', type=click.Choice(['valid', 'invalid']), default='invalid',
- help='Drop valid or invalid poses')
-@click.pass_context
-def cli(ctx, opt_fp_in, opt_fp_out, opt_slice, opt_yaw, opt_roll, opt_pitch,
- opt_drop, opt_force):
- """Filter out exaggerated poses"""
-
- from glob import glob
- from os.path import join
- from pathlib import Path
- import time
- from multiprocessing.dummy import Pool as ThreadPool
- import random
-
- import pandas as pd
- from tqdm import tqdm
- from glob import glob
-
- from app.utils import file_utils, im_utils
-
-
- if not opt_force and Path(opt_fp_out).exists():
- log.error('File exists. Use "-f / --force" to overwite')
- return
-
- df_poses = pd.read_csv(opt_fp_in).set_index('index')
-
- if opt_slice:
- df_poses = df_poses[opt_slice[0]:opt_slice[1]]
-
- log.info('Processing {:,} rows'.format(len(df_poses)))
-
- # extend a new temporary column
- df_poses['valid'] = [0] * len(df_poses)
-
- # filter out extreme poses
- for ds_pose in tqdm(df_poses.itertuples(), total=len(df_poses)):
- if ds_pose.yaw > opt_yaw[0] and ds_pose.yaw < opt_yaw[1] \
- and ds_pose.roll > opt_roll[0] and ds_pose.roll < opt_roll[1] \
- and ds_pose.pitch > opt_pitch[0] and ds_pose.pitch < opt_pitch[1]:
- df_poses.at[ds_pose.Index, 'valid'] = 1
-
- # filter out valid/invalid
- drop_val = 0 if opt_drop == 'valid' else 0 # drop 0's if drop == valid, else drop 1's
- df_poses_filtered = df_poses.drop(df_poses[df_poses.valid == int()].index, axis=0)
-
- # drop temp column
- df_poses_filtered = df_poses_filtered.drop('valid', axis=1)
-
- # save filtered poses
- df_poses_filtered.to_csv(opt_fp_out)
- log.info('Saved {:,} rows'.format(len(df_poses_filtered))) \ No newline at end of file
diff --git a/megapixels/commands/datasets/file_meta.py b/megapixels/commands/datasets/gen_filepath.py
index e1456f44..e06fee6b 100644
--- a/megapixels/commands/datasets/file_meta.py
+++ b/megapixels/commands/datasets/gen_filepath.py
@@ -12,10 +12,20 @@ from app.utils.logger_utils import Logger
log = Logger.getLogger()
@click.command()
-@click.option('-i', '--input', 'opt_fp_in', required=True,
- help='Input directory')
-@click.option('-o', '--output', 'opt_fp_out', required=True,
- help='Output file for file meta CSV')
+@click.option('-i', '--input', 'opt_fp_in',
+ help='Override enum input filename CSV')
+@click.option('-o', '--output', 'opt_fp_out',
+ help='Override enum output filename CSV')
+@click.option('--data_store', 'opt_data_store',
+ type=cfg.DataStoreVar,
+ default=click_utils.get_default(types.DataStore.NAS),
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--dataset', 'opt_dataset',
+ type=cfg.DatasetVar,
+ required=True,
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None),
help='Slice list of files')
@click.option('--recursive/--no-recursive', 'opt_recursive', is_flag=True, default=False,
@@ -25,7 +35,8 @@ log = Logger.getLogger()
@click.option('-f', '--force', 'opt_force', is_flag=True,
help='Force overwrite file')
@click.pass_context
-def cli(ctx, opt_fp_in, opt_fp_out, opt_slice, opt_recursive, opt_threads, opt_force):
+def cli(ctx, opt_fp_in, opt_fp_out, opt_data_store, opt_dataset, opt_slice,
+ opt_recursive, opt_threads, opt_force):
"""Multithreading test"""
from glob import glob
@@ -39,21 +50,26 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_slice, opt_recursive, opt_threads, opt_f
from tqdm import tqdm
from glob import glob
+ from app.models import DataStore
from app.utils import file_utils, im_utils
-
- if not opt_force and Path(opt_fp_out).exists():
+ data_store = DataStore(opt_data_store, opt_dataset)
+ fp_out = opt_fp_out if opt_fp_out is not None else data_store.metadata(types.Metadata.FILEPATH)
+ if not opt_force and Path(fp_out).exists():
log.error('File exists. Use "-f / --force" to overwite')
return
+
+ # glob files
+ fp_in = opt_fp_in if opt_fp_in is not None else data_store.media_images_original()
fp_ims = []
- log.info(f'Globbing {opt_fp_in}')
+ log.info(f'Globbing {fp_in}')
for ext in ['jpg', 'png']:
if opt_recursive:
- fp_glob = join(opt_fp_in, '**/*.{}'.format(ext))
+ fp_glob = join(fp_in, '**/*.{}'.format(ext))
fp_ims += glob(fp_glob, recursive=True)
else:
- fp_glob = join(opt_fp_in, '*.{}'.format(ext))
+ fp_glob = join(fp_in, '*.{}'.format(ext))
fp_ims += glob(fp_glob)
if not fp_ims:
@@ -63,14 +79,14 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_slice, opt_recursive, opt_threads, opt_f
if opt_slice:
fp_ims = fp_ims[opt_slice[0]:opt_slice[1]]
- log.info('Processing {:,} images'.format(len(fp_ims)))
+ log.info('Found {:,} images'.format(len(fp_ims)))
# convert data to dict
data = []
for i, fp_im in enumerate(tqdm(fp_ims)):
fpp_im = Path(fp_im)
- subdir = str(fpp_im.parent.relative_to(opt_fp_in))
+ subdir = str(fpp_im.parent.relative_to(fp_in))
data.append( {
'subdir': subdir,
'fn': fpp_im.stem,
@@ -78,7 +94,9 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_slice, opt_recursive, opt_threads, opt_f
})
# save to CSV
- file_utils.mkdirs(opt_fp_out)
- df = pd.DataFrame.from_dict(data)
- df.index.name = 'index'
- df.to_csv(opt_fp_out) \ No newline at end of file
+ file_utils.mkdirs(fp_out)
+ df_filepath = pd.DataFrame.from_dict(data)
+ df_filepath = df_filepath.sort_values(by=['subdir'], ascending=True)
+ df_filepath = df_filepath.reset_index(drop=True)
+ df_filepath.index.name = 'index'
+ df_filepath.to_csv(fp_out) \ No newline at end of file
diff --git a/megapixels/commands/datasets/gen_sha256.py b/megapixels/commands/datasets/gen_sha256.py
new file mode 100644
index 00000000..1616eebf
--- /dev/null
+++ b/megapixels/commands/datasets/gen_sha256.py
@@ -0,0 +1,152 @@
+'''
+
+'''
+import click
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+from app.utils.logger_utils import Logger
+
+log = Logger.getLogger()
+
+identity_sources = ['subdir', 'subdir_head', 'subdir_tail']
+
+@click.command()
+@click.option('-i', '--input', 'opt_fp_in', default=None,
+ help='Override enum input filename CSV')
+@click.option('-o', '--output', 'opt_fp_out', default=None,
+ help='Override enum output filename CSV')
+@click.option('-m', '--media', 'opt_dir_media', default=None,
+ help='Override enum media directory')
+@click.option('--data_store', 'opt_data_store',
+ type=cfg.DataStoreVar,
+ default=click_utils.get_default(types.DataStore.NAS),
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--dataset', 'opt_dataset',
+ type=cfg.DatasetVar,
+ required=True,
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None),
+ help='Slice list of files')
+@click.option('-t', '--threads', 'opt_threads', default=12,
+ help='Number of threads')
+@click.option('-f', '--force', 'opt_force', is_flag=True,
+ help='Force overwrite file')
+@click.option('--identity', 'opt_identity', default='subdir_tail', type=click.Choice(identity_sources),
+ help='Identity source, blank for no identity')
+@click.pass_context
+def cli(ctx, opt_fp_in, opt_fp_out, opt_dataset, opt_data_store, opt_dir_media, opt_slice, opt_threads,
+ opt_identity, opt_force):
+ """Generates sha256/identity index CSV file"""
+
+ import sys
+ from glob import glob
+ from os.path import join
+ from pathlib import Path
+ import time
+ from multiprocessing.dummy import Pool as ThreadPool
+ import random
+
+ import pandas as pd
+ from tqdm import tqdm
+ from glob import glob
+
+ from app.models import DataStore
+ from app.utils import file_utils, im_utils
+
+
+ # set data_store
+ data_store = DataStore(opt_data_store, opt_dataset)
+ # get filepath out
+ fp_out = data_store.metadata(types.Metadata.SHA256) if opt_fp_out is None else opt_fp_out
+ # exit if exists
+ if not opt_force and Path(fp_out).exists():
+ log.error('File exists. Use "-f / --force" to overwite')
+ return
+ # get filepath in
+ fp_in = data_store.metadata(types.Metadata.FILEPATH)
+ df_files = pd.read_csv(fp_in).set_index('index')
+ # slice if you want
+ if opt_slice:
+ df_files = df_files[opt_slice[0]:opt_slice[1]]
+
+ log.info('Processing {:,} images'.format(len(df_files)))
+
+
+ # prepare list of images to multithread into sha256s
+ dir_media = data_store.media_images_original() if opt_dir_media is None else opt_dir_media
+ file_objs = []
+ for ds_file in df_files.itertuples():
+ fp_im = join(dir_media, str(ds_file.subdir), f"{ds_file.fn}.{ds_file.ext}")
+ # find the image_index
+ # append the subdir option, sort by this then increment by unique subdir
+ file_obj = {'fp': fp_im, 'index': ds_file.Index}
+ if opt_identity:
+ subdirs = ds_file.subdir.split('/')
+ if not len(subdirs) > 0:
+ log.error(f'Could not split subdir: "{ds_file.subdir}. Try different option for "--identity"')
+ log.error('exiting')
+ return
+ if opt_identity == 'subdir':
+ subdir = subdirs[0]
+ elif opt_identity == 'subdir_head':
+ # use first part of subdir path
+ subdir = subdirs[0]
+ elif opt_identity == 'subdir_tail':
+ # use last part of subdir path
+ subdir = subdirs[-1]
+ file_obj['identity_subdir'] = subdir
+ file_objs.append(file_obj)
+
+ # convert to thread pool
+ pbar = tqdm(total=len(file_objs))
+
+ def as_sha256(file_obj):
+ pbar.update(1)
+ file_obj['sha256'] = file_utils.sha256(file_obj['fp'])
+ return file_obj
+
+ # multithread pool
+ pool_file_objs = []
+ st = time.time()
+ pool = ThreadPool(opt_threads)
+ with tqdm(total=len(file_objs)) as pbar:
+ pool_file_objs = pool.map(as_sha256, file_objs)
+ pbar.close()
+
+ # convert data to dict
+ data = []
+ for pool_file_obj in pool_file_objs:
+ data.append( {
+ 'sha256': pool_file_obj['sha256'],
+ 'index': pool_file_obj['index'],
+ 'identity_subdir': pool_file_obj.get('identity_subdir', ''),
+ })
+
+ # sort based on identity_subdir
+ # save to CSV
+ df_sha256 = pd.DataFrame.from_dict(data)
+ # add new column for identity
+ df_sha256['identity_index'] = [1] * len(df_sha256)
+ df_sha256 = df_sha256.sort_values(by=['identity_subdir'], ascending=True)
+ df_sha256_identity_groups = df_sha256.groupby('identity_subdir')
+ for identity_index, df_sha256_identity_group_tuple in enumerate(df_sha256_identity_groups):
+ identity_subdir, df_sha256_identity_group = df_sha256_identity_group_tuple
+ for ds_sha256 in df_sha256_identity_group.itertuples():
+ df_sha256.at[ds_sha256.Index, 'identity_index'] = identity_index
+ # drop temp identity subdir column
+ df_sha256 = df_sha256.drop('identity_subdir', axis=1)
+ # write to CSV
+ log.info(f'rows: {len(df_sha256)}')
+ file_utils.mkdirs(fp_out)
+ df_sha256.set_index('index')
+ df_sha256 = df_sha256.sort_values(['index'], ascending=[True])
+ df_sha256.to_csv(fp_out, index=False)
+
+ # timing
+ log.info(f'wrote file: {fp_out}')
+ log.info('time: {:.2f}, theads: {}'.format(time.time() - st, opt_threads))
+ \ No newline at end of file
diff --git a/megapixels/commands/datasets/gen_uuid.py b/megapixels/commands/datasets/gen_uuid.py
new file mode 100644
index 00000000..612c43ee
--- /dev/null
+++ b/megapixels/commands/datasets/gen_uuid.py
@@ -0,0 +1,65 @@
+import click
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+from app.utils.logger_utils import Logger
+
+log = Logger.getLogger()
+
+@click.command()
+@click.option('-i', '--input', 'opt_fp_in', default=None,
+ help='Override enum input filename CSV')
+@click.option('-o', '--output', 'opt_fp_out', default=None,
+ help='Override enum output filename CSV')
+@click.option('--data_store', 'opt_data_store',
+ type=cfg.DataStoreVar,
+ default=click_utils.get_default(types.DataStore.NAS),
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--dataset', 'opt_dataset',
+ type=cfg.DatasetVar,
+ required=True,
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('-f', '--force', 'opt_force', is_flag=True,
+ help='Force overwrite file')
+@click.pass_context
+def cli(ctx, opt_fp_in, opt_fp_out, opt_data_store, opt_dataset, opt_force):
+ """Appends UUID to records CSV"""
+
+ from glob import glob
+ from os.path import join
+ from pathlib import Path
+ import base64
+ import uuid
+
+ from tqdm import tqdm
+ import pandas as pd
+
+ from app.models import DataStore
+
+
+ # set data_store
+ data_store = DataStore(opt_data_store, opt_dataset)
+ # get filepath out
+ fp_out = data_store.metadata(types.Metadata.UUID) if opt_fp_out is None else opt_fp_out
+ # exit if exists
+ if not opt_force and Path(fp_out).exists():
+ log.error('File exists. Use "-f / --force" to overwite')
+ return
+
+ # load sha256 records
+ fp_in = data_store.metadata(types.Metadata.SHA256) if opt_fp_in is None else opt_fp_in
+ log.info(f'Loading: {fp_in}')
+ df_records = pd.read_csv(fp_in).set_index('index')
+
+ df_uuids = df_records.copy()
+ df_uuids['uuid'] = [uuid.uuid4()] * len(df_uuids)
+
+ for df_record in tqdm(df_records.itertuples(), total=len(df_uuids)):
+ image_index = df_record.Index
+ df_uuids.at[image_index, 'uuid'] = uuid.uuid4()
+
+ df_uuids = df_uuids.drop(['sha256', 'identity_index'], axis=1)
+ df_uuids.to_csv(fp_out) \ No newline at end of file
diff --git a/megapixels/commands/datasets/lookup.py b/megapixels/commands/datasets/lookup.py
index e84bdf3e..5a2a171e 100644
--- a/megapixels/commands/datasets/lookup.py
+++ b/megapixels/commands/datasets/lookup.py
@@ -6,8 +6,10 @@ from app.utils import click_utils
from app.settings import app_cfg as cfg
from app.utils.logger_utils import Logger
+log = Logger.getLogger()
+
@click.command()
-@click.option('--index', 'opt_index', type=int,
+@click.option('--index', 'opt_index', type=int, required=True,
help='Vector index to lookup')
@click.option('--data_store', 'opt_data_store',
type=cfg.DataStoreVar,
@@ -19,12 +21,8 @@ from app.utils.logger_utils import Logger
required=True,
show_default=True,
help=click_utils.show_help(types.Dataset))
-@click.option('--metadata', 'opt_metadata_type', required=True,
- type=cfg.MetadataVar,
- show_default=True,
- help=click_utils.show_help(types.Metadata))
@click.pass_context
-def cli(ctx, opt_index, opt_data_store, opt_dataset, opt_metadata_type):
+def cli(ctx, opt_index, opt_data_store, opt_dataset):
"""Display image info"""
import sys
@@ -37,22 +35,20 @@ def cli(ctx, opt_index, opt_data_store, opt_dataset, opt_metadata_type):
import cv2 as cv
from tqdm import tqdm
- from app.utils import file_utils, im_utils, path_utils
+ from app.utils import file_utils, im_utils
+ from app.models.data_store import DataStore
log = Logger.getLogger()
-
- log.info(f'creating dataset: {opt_dataset}')
- dataset = Dataset(opt_dataset)
- # loads all CSV files, may take a while
- log.info(f'loading dataset...')
- dataset.load(opt_data_store)
+ # init dataset
+ dataset = Dataset(opt_data_store, opt_dataset)
+ # set data store and load files
+ dataset.load()
# find image records
image_record = dataset.roi_idx_to_record(opt_index)
# debug
image_record.summarize()
# load image
- fp_im = image_record.filepath
- im = cv.imread(fp_im)
+ im = cv.imread(image_record.filepath)
# display
cv.imshow('', im)
# cv gui
diff --git a/megapixels/commands/datasets/sha256.py b/megapixels/commands/datasets/sha256.py
deleted file mode 100644
index 4c734073..00000000
--- a/megapixels/commands/datasets/sha256.py
+++ /dev/null
@@ -1,89 +0,0 @@
-import click
-
-from app.settings import types
-from app.utils import click_utils
-from app.settings import app_cfg as cfg
-from app.utils.logger_utils import Logger
-
-log = Logger.getLogger()
-
-@click.command()
-@click.option('-i', '--input', 'opt_fp_in', required=True,
- help='Input directory')
-@click.option('-m', '--media', 'opt_dir_media', required=True,
- help='Input media directory')
-@click.option('-o', '--output', 'opt_fp_out', required=True,
- help='Output directory')
-@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None),
- help='Slice list of files')
-@click.option('-t', '--threads', 'opt_threads', default=4,
- help='Number of threads')
-@click.option('-f', '--force', 'opt_force', is_flag=True,
- help='Force overwrite file')
-@click.pass_context
-def cli(ctx, opt_fp_in, opt_dir_media, opt_fp_out, opt_slice, opt_threads, opt_force):
- """Multithreading test"""
-
- from glob import glob
- from os.path import join
- from pathlib import Path
- import time
- from multiprocessing.dummy import Pool as ThreadPool
- import random
-
- import pandas as pd
- from tqdm import tqdm
- from glob import glob
-
- from app.utils import file_utils, im_utils
-
-
- if not opt_force and Path(opt_fp_out).exists():
- log.error('File exists. Use "-f / --force" to overwite')
- return
-
- df_files = pd.read_csv(opt_fp_in).set_index('index')
-
- if opt_slice:
- df_files = df_files[opt_slice[0]:opt_slice[1]]
-
- log.info('Processing {:,} images'.format(len(df_files)))
-
-
- # prepare list of images to multithread into sha256s
- file_objs = []
- for ds_file in df_files.itertuples():
- fp_im = join(opt_dir_media, str(ds_file.subdir), f"{ds_file.fn}.{ds_file.ext}")
- file_objs.append({'fp': fp_im, 'index': ds_file.Index})
-
- # convert to thread pool
- pbar = tqdm(total=len(file_objs))
-
- def as_sha256(file_obj):
- pbar.update(1)
- file_obj['sha256'] = file_utils.sha256(file_obj['fp'])
- return file_obj
-
- # multithread pool
- pool_file_objs = []
- st = time.time()
- pool = ThreadPool(opt_threads)
- with tqdm(total=len(file_objs)) as pbar:
- pool_file_objs = pool.map(as_sha256, file_objs)
- pbar.close()
-
- # convert data to dict
- data = []
- for pool_file_obj in pool_file_objs:
- data.append( {
- 'sha256': pool_file_obj['sha256'],
- 'index': pool_file_obj['index']
- })
-
- # save to CSV
- file_utils.mkdirs(opt_fp_out)
- df = pd.DataFrame.from_dict(data)
- df.to_csv(opt_fp_out, index=False)
-
- # timing
- log.info('time: {:.2f}, theads: {}'.format(time.time() - st, opt_threads)) \ No newline at end of file
diff --git a/megapixels/commands/demo/face_analysis.py b/megapixels/commands/demo/face_analysis.py
new file mode 100644
index 00000000..6721a02d
--- /dev/null
+++ b/megapixels/commands/demo/face_analysis.py
@@ -0,0 +1,56 @@
+import click
+
+from app.settings import types
+from app.models.dataset import Dataset
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+from app.utils.logger_utils import Logger
+
+@click.command()
+@click.option('--data_store', 'opt_data_store',
+ type=cfg.DataStoreVar,
+ default=click_utils.get_default(types.DataStore.NAS),
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--dataset', 'opt_dataset',
+ type=cfg.DatasetVar,
+ required=True,
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.pass_context
+def cli(ctx, opt_index, opt_data_store, opt_dataset):
+ """Display image info"""
+
+ import sys
+ from glob import glob
+ from os.path import join
+ from pathlib import Path
+ import time
+
+ import pandas as pd
+ import cv2 as cv
+ from tqdm import tqdm
+
+ from app.utils import file_utils, im_utils, path_utils
+
+ log = Logger.getLogger()
+
+ dataset = Dataset(opt_dataset).load(opt_data_store)
+ # find image records
+ image_record = dataset.roi_idx_to_record(opt_index)
+ # debug
+ image_record.summarize()
+ # load image
+ fp_im = image_record.filepath
+ im = cv.imread(fp_im)
+ # display
+ cv.imshow('', im)
+ # cv gui
+ while True:
+ k = cv.waitKey(1) & 0xFF
+ if k == 27 or k == ord('q'): # ESC
+ cv.destroyAllWindows()
+ sys.exit()
+ elif k != 255:
+ # any key to continue
+ break \ No newline at end of file
diff --git a/megapixels/commands/demo/face_search.py b/megapixels/commands/demo/face_search.py
new file mode 100644
index 00000000..08b2323d
--- /dev/null
+++ b/megapixels/commands/demo/face_search.py
@@ -0,0 +1,95 @@
+import click
+
+from app.settings import types
+from app.models.dataset import Dataset
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+from app.utils.logger_utils import Logger
+
+@click.command()
+@click.option('-i', '--input', 'opt_fp_in', required=True,
+ help='Input face image')
+@click.option('--data_store', 'opt_data_store',
+ type=cfg.DataStoreVar,
+ default=click_utils.get_default(types.DataStore.SSD),
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--dataset', 'opt_dataset',
+ type=cfg.DatasetVar,
+ required=True,
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--gpu', 'opt_gpu', default=0,
+ help='GPU index (use -1 for CPU)')
+@click.pass_context
+def cli(ctx, opt_fp_in, opt_data_store, opt_dataset, opt_gpu):
+ """Display image info"""
+
+ import sys
+ from glob import glob
+ from os.path import join
+ from pathlib import Path
+ import time
+
+ import imutils
+ import pandas as pd
+ import cv2 as cv
+ import dlib
+ from tqdm import tqdm
+
+ from app.utils import file_utils, im_utils
+ from app.models.data_store import DataStore, DataStoreS3
+ from app.processors import face_detector
+ from app.processors import face_recognition
+
+ log = Logger.getLogger()
+
+ # init face detection
+
+ # init face recognition
+ detector = face_detector.DetectorDLIBHOG()
+ # face recognition/vector
+ recognition = face_recognition.RecognitionDLIB(gpu=opt_gpu)
+
+ # load query image
+ im_query = cv.imread(opt_fp_in)
+ # get detection as BBox object
+ bboxes = detector.detect(im_query, largest=True)
+ bbox = bboxes[0]
+ dim = im_query.shape[:2][::-1]
+ bbox = bbox.to_dim(dim) # convert back to real dimensions
+
+ if not bbox:
+ log.error('No face detected. Exiting')
+ return
+
+ # extract the face vectors
+ vec_query = recognition.vec(im_query, bbox)
+
+ # load dataset CSVs
+ dataset = Dataset(opt_data_store, opt_dataset)
+
+ # find matches
+ image_records = dataset.find_matches(vec_query, n_results=5)
+
+ # summary
+ ims_match = [im_query]
+ for image_record in image_records:
+ image_record.summarize()
+ log.info(f'{image_record.filepath}')
+ im_match = cv.imread(image_record.filepath)
+ ims_match.append(im_match)
+
+ montages = imutils.build_montages(ims_match, (256, 256), (3,2))
+
+ for i, montage in enumerate(montages):
+ cv.imshow(f'{i}', montage)
+ # cv gui
+ while True:
+ k = cv.waitKey(1) & 0xFF
+ if k == 27 or k == ord('q'): # ESC
+ cv.destroyAllWindows()
+ sys.exit()
+ elif k != 255:
+ # any key to continue
+ break \ No newline at end of file