summaryrefslogtreecommitdiff
path: root/megapixels/commands
diff options
context:
space:
mode:
Diffstat (limited to 'megapixels/commands')
-rw-r--r--megapixels/commands/cv/cluster.py22
-rw-r--r--megapixels/commands/cv/face_pose.py (renamed from megapixels/commands/cv/rois_to_pose.py)75
-rw-r--r--megapixels/commands/cv/face_roi.py (renamed from megapixels/commands/cv/files_to_rois.py)61
-rw-r--r--megapixels/commands/cv/face_vector.py (renamed from megapixels/commands/cv/rois_to_vecs.py)58
-rw-r--r--megapixels/commands/datasets/add_uuid.py44
-rw-r--r--megapixels/commands/datasets/filter_by_pose.py96
-rw-r--r--megapixels/commands/datasets/filter_poses.py76
-rw-r--r--megapixels/commands/datasets/gen_filepath.py (renamed from megapixels/commands/datasets/file_meta.py)50
-rw-r--r--megapixels/commands/datasets/gen_uuid.py65
-rw-r--r--megapixels/commands/datasets/identity_meta_lfw.py93
-rw-r--r--megapixels/commands/datasets/identity_meta_vgg_face2.py88
-rw-r--r--megapixels/commands/datasets/lookup.py33
-rw-r--r--megapixels/commands/datasets/records.py159
-rw-r--r--megapixels/commands/datasets/s3.py47
-rw-r--r--megapixels/commands/datasets/s3_sync.py57
-rw-r--r--megapixels/commands/datasets/sha256.py89
-rw-r--r--megapixels/commands/datasets/symlink.py45
-rw-r--r--megapixels/commands/datasets/symlink_uuid.py57
-rw-r--r--megapixels/commands/demo/face_analysis.py56
-rw-r--r--megapixels/commands/demo/face_search.py94
20 files changed, 944 insertions, 421 deletions
diff --git a/megapixels/commands/cv/cluster.py b/megapixels/commands/cv/cluster.py
index 94334133..419091a0 100644
--- a/megapixels/commands/cv/cluster.py
+++ b/megapixels/commands/cv/cluster.py
@@ -23,20 +23,20 @@ from app.utils.logger_utils import Logger
@click.pass_context
def cli(ctx, opt_data_store, opt_dataset, opt_metadata):
"""Display image info"""
-
- # cluster the embeddings
-print("[INFO] clustering...")
-clt = DBSCAN(metric="euclidean", n_jobs=args["jobs"])
-clt.fit(encodings)
-
-# determine the total number of unique faces found in the dataset
-labelIDs = np.unique(clt.labels_)
-numUniqueFaces = len(np.where(labelIDs > -1)[0])
-print("[INFO] # unique faces: {}".format(numUniqueFaces))
+
+ # cluster the embeddings
+ print("[INFO] clustering...")
+ clt = DBSCAN(metric="euclidean", n_jobs=args["jobs"])
+ clt.fit(encodings)
+
+ # determine the total number of unique faces found in the dataset
+ labelIDs = np.unique(clt.labels_)
+ numUniqueFaces = len(np.where(labelIDs > -1)[0])
+ print("[INFO] # unique faces: {}".format(numUniqueFaces))
# load and display image
im = cv.imread(fp_im)
cv.imshow('', im)
-
+
while True:
k = cv.waitKey(1) & 0xFF
if k == 27 or k == ord('q'): # ESC
diff --git a/megapixels/commands/cv/rois_to_pose.py b/megapixels/commands/cv/face_pose.py
index 3877cecf..e7ffb7ac 100644
--- a/megapixels/commands/cv/rois_to_pose.py
+++ b/megapixels/commands/cv/face_pose.py
@@ -9,14 +9,22 @@ from app.utils import click_utils
from app.settings import app_cfg as cfg
@click.command()
-@click.option('-i', '--input', 'opt_fp_files', required=True,
- help='Input ROI CSV')
-@click.option('-r', '--rois', 'opt_fp_rois', required=True,
- help='Input ROI CSV')
-@click.option('-m', '--media', 'opt_dir_media', required=True,
- help='Input media directory')
-@click.option('-o', '--output', 'opt_fp_out', required=True,
- help='Output CSV')
+@click.option('-i', '--input', 'opt_fp_in', default=None,
+ help='Override enum input filename CSV')
+@click.option('-o', '--output', 'opt_fp_out', default=None,
+ help='Override enum output filename CSV')
+@click.option('-m', '--media', 'opt_dir_media', default=None,
+ help='Override enum media directory')
+@click.option('--data_store', 'opt_data_store',
+ type=cfg.DataStoreVar,
+ default=click_utils.get_default(types.DataStore.SSD),
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--dataset', 'opt_dataset',
+ type=cfg.DatasetVar,
+ required=True,
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
@click.option('--size', 'opt_size',
type=(int, int), default=(300, 300),
help='Output image size')
@@ -27,7 +35,7 @@ from app.settings import app_cfg as cfg
@click.option('-d', '--display', 'opt_display', is_flag=True,
help='Display image for debugging')
@click.pass_context
-def cli(ctx, opt_fp_files, opt_fp_rois, opt_dir_media, opt_fp_out, opt_size,
+def cli(ctx, opt_fp_in, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset, opt_size,
opt_slice, opt_force, opt_display):
"""Converts ROIs to pose: roll, yaw, pitch"""
@@ -47,42 +55,47 @@ def cli(ctx, opt_fp_files, opt_fp_rois, opt_dir_media, opt_fp_out, opt_size,
from app.utils import logger_utils, file_utils, im_utils
from app.processors.face_landmarks import LandmarksDLIB
from app.processors.face_pose import FacePoseDLIB
+ from app.models.data_store import DataStore
# -------------------------------------------------
# init here
log = logger_utils.Logger.getLogger()
+
+ # set data_store
+ data_store = DataStore(opt_data_store, opt_dataset)
+
+ # get filepath out
+ fp_out = data_store.metadata(types.Metadata.FACE_POSE) if opt_fp_out is None else opt_fp_out
+ if not opt_force and Path(fp_out).exists():
+ log.error('File exists. Use "-f / --force" to overwite')
+ return
# init face processors
face_pose = FacePoseDLIB()
face_landmarks = LandmarksDLIB()
- # load datra
- df_files = pd.read_csv(opt_fp_files)
- df_rois = pd.read_csv(opt_fp_rois)
-
- if not opt_force and Path(opt_fp_out).exists():
- log.error('File exists. Use "-f / --force" to overwite')
- return
-
+ # load filepath data
+ fp_record = data_store.metadata(types.Metadata.FILE_RECORD)
+ df_record = pd.read_csv(fp_record).set_index('index')
+ # load ROI data
+ fp_roi = data_store.metadata(types.Metadata.FACE_ROI)
+ df_roi = pd.read_csv(fp_roi).set_index('index')
+ # slice if you want
if opt_slice:
- df_rois = df_rois[opt_slice[0]:opt_slice[1]]
-
- # -------------------------------------------------
- # process here
- df_img_groups = df_rois.groupby('image_index')
+ df_roi = df_roi[opt_slice[0]:opt_slice[1]]
+ # group by image index (speedup if multiple faces per image)
+ df_img_groups = df_roi.groupby('record_index')
log.debug('processing {:,} groups'.format(len(df_img_groups)))
-
+ # store poses and convert to DataFrame
poses = []
# iterate
- #for df_roi_group_idx, df_roi_group in tqdm(df_roi_groups):
- for image_index, df_img_group in tqdm(df_img_groups):
+ for record_index, df_img_group in tqdm(df_img_groups):
# make fp
- #image_index = df_roi_group.image_index.values[0]
- pds_file = df_files.iloc[image_index]
- fp_im = join(opt_dir_media, pds_file.subdir, '{}.{}'.format(pds_file.fn, pds_file.ext))
+ ds_record = df_record.iloc[record_index]
+ fp_im = data_store.face_image(ds_record.subdir, ds_record.fn, ds_record.ext)
im = cv.imread(fp_im)
# get bbox
x = df_img_group.x.values[0]
@@ -116,12 +129,12 @@ def cli(ctx, opt_fp_files, opt_fp_rois, opt_dir_media, opt_fp_out, opt_size,
break
# add image index and append to result CSV data
- pose_degrees['image_index'] = image_index
+ pose_degrees['record_index'] = record_index
poses.append(pose_degrees)
# save date
- file_utils.mkdirs(opt_fp_out)
+ file_utils.mkdirs(fp_out)
df = pd.DataFrame.from_dict(poses)
df.index.name = 'index'
- df.to_csv(opt_fp_out) \ No newline at end of file
+ df.to_csv(fp_out) \ No newline at end of file
diff --git a/megapixels/commands/cv/files_to_rois.py b/megapixels/commands/cv/face_roi.py
index 1aaf991c..d7248aee 100644
--- a/megapixels/commands/cv/files_to_rois.py
+++ b/megapixels/commands/cv/face_roi.py
@@ -12,12 +12,22 @@ from app.settings import app_cfg as cfg
color_filters = {'color': 1, 'gray': 2, 'all': 3}
@click.command()
-@click.option('-i', '--input', 'opt_fp_in', required=True,
- help='Input CSV (eg image_files.csv)')
-@click.option('-m', '--media', 'opt_dir_media', required=True,
- help='Input media directory')
-@click.option('-o', '--output', 'opt_fp_out', required=True,
- help='Output CSV')
+@click.option('-i', '--input', 'opt_fp_in', default=None,
+ help='Override enum input filename CSV')
+@click.option('-o', '--output', 'opt_fp_out', default=None,
+ help='Override enum output filename CSV')
+@click.option('-m', '--media', 'opt_dir_media', default=None,
+ help='Override enum media directory')
+@click.option('--data_store', 'opt_data_store',
+ type=cfg.DataStoreVar,
+ default=click_utils.get_default(types.DataStore.SSD),
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--dataset', 'opt_dataset',
+ type=cfg.DatasetVar,
+ required=True,
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
@click.option('--size', 'opt_size',
type=(int, int), default=(300, 300),
help='Output image size')
@@ -40,10 +50,10 @@ color_filters = {'color': 1, 'gray': 2, 'all': 3}
@click.option('--color', 'opt_color_filter',
type=click.Choice(color_filters.keys()), default='all',
help='Filter to keep color or grayscale images (color = keep color')
-@click.option('--largest', 'opt_largest', is_flag=True,
+@click.option('--largest/--all-faces', 'opt_largest', is_flag=True, default=True,
help='Only keep largest face')
@click.pass_context
-def cli(ctx, opt_fp_in, opt_dir_media, opt_fp_out, opt_size, opt_detector_type,
+def cli(ctx, opt_fp_in, opt_dir_media, opt_fp_out, opt_data_store, opt_dataset, opt_size, opt_detector_type,
opt_gpu, opt_conf_thresh, opt_pyramids, opt_slice, opt_display, opt_force, opt_color_filter,
opt_largest):
"""Converts frames with faces to CSV of ROIs"""
@@ -61,17 +71,24 @@ def cli(ctx, opt_fp_in, opt_dir_media, opt_fp_out, opt_size, opt_detector_type,
import pandas as pd
from app.utils import logger_utils, file_utils, im_utils
- from app.processors import face_detector
+ from app.processors import face_detector
+ from app.models.data_store import DataStore
# -------------------------------------------------
# init here
log = logger_utils.Logger.getLogger()
- if not opt_force and Path(opt_fp_out).exists():
+ # set data_store
+ data_store = DataStore(opt_data_store, opt_dataset)
+
+ # get filepath out
+ fp_out = data_store.metadata(types.Metadata.FACE_ROI) if opt_fp_out is None else opt_fp_out
+ if not opt_force and Path(fp_out).exists():
log.error('File exists. Use "-f / --force" to overwite')
return
+ # set detector
if opt_detector_type == types.FaceDetectNet.CVDNN:
detector = face_detector.DetectorCVDNN()
elif opt_detector_type == types.FaceDetectNet.DLIB_CNN:
@@ -85,22 +102,20 @@ def cli(ctx, opt_fp_in, opt_dir_media, opt_fp_out, opt_size, opt_detector_type,
return
- # -------------------------------------------------
- # process here
- color_filter = color_filters[opt_color_filter]
-
# get list of files to process
- df_files = pd.read_csv(opt_fp_in).set_index('index')
-
+ fp_in = data_store.metadata(types.Metadata.FILE_RECORD) if opt_fp_in is None else opt_fp_in
+ df_records = pd.read_csv(fp_in).set_index('index')
if opt_slice:
- df_files = df_files[opt_slice[0]:opt_slice[1]]
- log.debug('processing {:,} files'.format(len(df_files)))
+ df_records = df_records[opt_slice[0]:opt_slice[1]]
+ log.debug('processing {:,} files'.format(len(df_records)))
+ # filter out grayscale
+ color_filter = color_filters[opt_color_filter]
data = []
- for df_file in tqdm(df_files.itertuples(), total=len(df_files)):
- fp_im = join(opt_dir_media, str(df_file.subdir), f'{df_file.fn}.{df_file.ext}')
+ for df_record in tqdm(df_records.itertuples(), total=len(df_records)):
+ fp_im = data_store.face_image(str(df_record.subdir), str(df_record.fn), str(df_record.ext))
im = cv.imread(fp_im)
# filter out color or grayscale iamges
@@ -123,7 +138,7 @@ def cli(ctx, opt_fp_in, opt_dir_media, opt_fp_out, opt_size, opt_detector_type,
for bbox in bboxes:
roi = {
- 'image_index': int(df_file.Index),
+ 'record_index': int(df_record.Index),
'x': bbox.x,
'y': bbox.y,
'w': bbox.w,
@@ -150,7 +165,7 @@ def cli(ctx, opt_fp_in, opt_dir_media, opt_fp_out, opt_size, opt_detector_type,
break
# save date
- file_utils.mkdirs(opt_fp_out)
+ file_utils.mkdirs(fp_out)
df = pd.DataFrame.from_dict(data)
df.index.name = 'index'
- df.to_csv(opt_fp_out) \ No newline at end of file
+ df.to_csv(fp_out) \ No newline at end of file
diff --git a/megapixels/commands/cv/rois_to_vecs.py b/megapixels/commands/cv/face_vector.py
index 525f4404..203f73eb 100644
--- a/megapixels/commands/cv/rois_to_vecs.py
+++ b/megapixels/commands/cv/face_vector.py
@@ -9,14 +9,20 @@ from app.utils import click_utils
from app.settings import app_cfg as cfg
@click.command()
-@click.option('-i', '--input', 'opt_fp_files', required=True,
- help='Input file meta CSV')
-@click.option('-r', '--rois', 'opt_fp_rois', required=True,
- help='Input ROI CSV')
-@click.option('-m', '--media', 'opt_dir_media', required=True,
- help='Input media directory')
-@click.option('-o', '--output', 'opt_fp_out', required=True,
- help='Output CSV')
+@click.option('-o', '--output', 'opt_fp_out', default=None,
+ help='Override enum output filename CSV')
+@click.option('-m', '--media', 'opt_dir_media', default=None,
+ help='Override enum media directory')
+@click.option('--data_store', 'opt_data_store',
+ type=cfg.DataStoreVar,
+ default=click_utils.get_default(types.DataStore.SSD),
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--dataset', 'opt_dataset',
+ type=cfg.DatasetVar,
+ required=True,
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
@click.option('--size', 'opt_size',
type=(int, int), default=(300, 300),
help='Output image size')
@@ -31,7 +37,7 @@ from app.settings import app_cfg as cfg
@click.option('-g', '--gpu', 'opt_gpu', default=0,
help='GPU index')
@click.pass_context
-def cli(ctx, opt_fp_files, opt_fp_rois, opt_dir_media, opt_fp_out, opt_size,
+def cli(ctx, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset, opt_size,
opt_slice, opt_force, opt_gpu, opt_jitters, opt_padding):
"""Converts face ROIs to vectors"""
@@ -48,6 +54,7 @@ def cli(ctx, opt_fp_files, opt_fp_rois, opt_dir_media, opt_fp_out, opt_size,
import pandas as pd
from app.models.bbox import BBox
+ from app.models.data_store import DataStore
from app.utils import logger_utils, file_utils, im_utils
from app.processors import face_recognition
@@ -56,24 +63,30 @@ def cli(ctx, opt_fp_files, opt_fp_rois, opt_dir_media, opt_fp_out, opt_size,
# init here
log = logger_utils.Logger.getLogger()
+ # set data_store
+ data_store = DataStore(opt_data_store, opt_dataset)
+
+ # get filepath out
+ fp_out = data_store.metadata(types.Metadata.FACE_VECTOR) if opt_fp_out is None else opt_fp_out
+ if not opt_force and Path(fp_out).exists():
+ log.error('File exists. Use "-f / --force" to overwite')
+ return
# init face processors
facerec = face_recognition.RecognitionDLIB()
# load data
- df_file_meta = pd.read_csv(opt_fp_files)
- df_rois = pd.read_csv(opt_fp_rois)
+ fp_record = data_store.metadata(types.Metadata.FILE_RECORD)
+ df_record = pd.read_csv(fp_record).set_index('index')
+ fp_roi = data_store.metadata(types.Metadata.FACE_ROI)
+ df_roi = pd.read_csv(fp_roi).set_index('index')
- if not opt_force and Path(opt_fp_out).exists():
- log.error('File exists. Use "-f / --force" to overwite')
- return
-
if opt_slice:
- df_rois = df_rois[opt_slice[0]:opt_slice[1]]
+ df_roi = df_roi[opt_slice[0]:opt_slice[1]]
# -------------------------------------------------
# process here
- df_img_groups = df_rois.groupby('image_index')
+ df_img_groups = df_roi.groupby('record_index')
log.debug('processing {:,} groups'.format(len(df_img_groups)))
vecs = []
@@ -81,8 +94,11 @@ def cli(ctx, opt_fp_files, opt_fp_rois, opt_dir_media, opt_fp_out, opt_size,
for image_index, df_img_group in tqdm(df_img_groups):
# make fp
roi_index = df_img_group.index.values[0]
- file_meta = df_file_meta.iloc[image_index] # locate image meta
- fp_im = join(opt_dir_media, file_meta.subdir, '{}.{}'.format(file_meta.fn, file_meta.ext))
+ # log.debug(f'roi_index: {roi_index}, image_index: {image_index}')
+ ds_file = df_record.loc[roi_index] # locate image meta
+ #ds_file = df_record.loc['index', image_index] # locate image meta
+
+ fp_im = data_store.face_image(str(ds_file.subdir), str(ds_file.fn), str(ds_file.ext))
im = cv.imread(fp_im)
# get bbox
x = df_img_group.x.values[0]
@@ -103,7 +119,7 @@ def cli(ctx, opt_fp_files, opt_fp_rois, opt_dir_media, opt_fp_out, opt_size,
# save date
- file_utils.mkdirs(opt_fp_out)
df = pd.DataFrame.from_dict(vecs)
df.index.name = 'index'
- df.to_csv(opt_fp_out) \ No newline at end of file
+ file_utils.mkdirs(fp_out)
+ df.to_csv(fp_out) \ No newline at end of file
diff --git a/megapixels/commands/datasets/add_uuid.py b/megapixels/commands/datasets/add_uuid.py
deleted file mode 100644
index 9c14c0e3..00000000
--- a/megapixels/commands/datasets/add_uuid.py
+++ /dev/null
@@ -1,44 +0,0 @@
-import click
-
-from app.settings import types
-from app.utils import click_utils
-from app.settings import app_cfg as cfg
-from app.utils.logger_utils import Logger
-
-log = Logger.getLogger()
-
-@click.command()
-@click.option('-i', '--input', 'opt_fp_in', required=True,
- help='Input directory')
-@click.option('-o', '--output', 'opt_fp_out',
- help='Output directory')
-@click.option('-f', '--force', 'opt_force', is_flag=True,
- help='Force overwrite file')
-@click.pass_context
-def cli(ctx, opt_fp_in, opt_fp_out, opt_force):
- """Appends UUID to records CSV"""
-
- from glob import glob
- from os.path import join
- from pathlib import Path
- import base64
- import uuid
-
- from tqdm import tqdm
- import pandas as pd
-
- if not opt_force and Path(opt_fp_out).exists():
- log.error('File exists. Use "-f / --force" to overwite')
- return
-
- # load names
- df_records = pd.read_csv(opt_fp_in)
- records = df_records.to_dict('index')
- # append a UUID to every entry
- for idx, item in records.items():
- records[idx]['uuid'] = uuid.uuid4()
- # save to csv
- df_uuid = pd.DataFrame.from_dict(list(records.values())) # ignore the indices
- df_uuid.to_csv(opt_fp_out, index=False)
-
- log.info('done') \ No newline at end of file
diff --git a/megapixels/commands/datasets/filter_by_pose.py b/megapixels/commands/datasets/filter_by_pose.py
new file mode 100644
index 00000000..a588b18e
--- /dev/null
+++ b/megapixels/commands/datasets/filter_by_pose.py
@@ -0,0 +1,96 @@
+import click
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+from app.utils.logger_utils import Logger
+
+log = Logger.getLogger()
+
+@click.command()
+@click.option('-i', '--input', 'opt_fp_in', default=None,
+ help='Override enum input filename CSV')
+@click.option('-o', '--output', 'opt_fp_out', default=None,
+ help='Override enum output filename CSV')
+@click.option('--data_store', 'opt_data_store',
+ type=cfg.DataStoreVar,
+ default=click_utils.get_default(types.DataStore.SSD),
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--dataset', 'opt_dataset',
+ type=cfg.DatasetVar,
+ required=True,
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--yaw', 'opt_yaw', type=(float, float), default=cfg.POSE_MINMAX_YAW,
+ help='Yaw (min, max)')
+@click.option('--roll', 'opt_roll', type=(float, float), default=cfg.POSE_MINMAX_ROLL,
+ help='Roll (min, max)')
+@click.option('--pitch', 'opt_pitch', type=(float, float), default=cfg.POSE_MINMAX_PITCH,
+ help='Pitch (min, max)')
+@click.pass_context
+def cli(ctx, opt_fp_in, opt_fp_out, opt_data_store, opt_dataset, opt_yaw, opt_roll, opt_pitch):
+ """Filter out exaggerated poses"""
+
+ import sys
+ from os.path import join
+ from pathlib import Path
+ import shutil
+ from datetime import datetime
+
+ import pandas as pd
+ from tqdm import tqdm
+
+ from app.models.data_store import DataStore
+ from app.utils import file_utils
+
+ # create date store
+ data_store = DataStore(opt_data_store, opt_dataset)
+ # load pose
+ fp_pose = data_store.metadata(types.Metadata.FACE_POSE)
+ df_pose = pd.read_csv(fp_pose).set_index('index')
+ # load roi
+ fp_roi = data_store.metadata(types.Metadata.FACE_ROI)
+ df_roi = pd.read_csv(fp_roi).set_index('index')
+ # load filepath
+ fp_record = data_store.metadata(types.Metadata.FILE_RECORD)
+ df_record = pd.read_csv(fp_record).set_index('index')
+ # debug
+ log.info('Processing {:,} rows'.format(len(df_pose)))
+ n_rows = len(df_record)
+
+ # filter out extreme poses
+ invalid_indices = []
+ for ds_pose in tqdm(df_pose.itertuples(), total=len(df_pose)):
+ if ds_pose.yaw < opt_yaw[0] or ds_pose.yaw > opt_yaw[1] \
+ and ds_pose.roll < opt_roll[0] or ds_pose.roll > opt_roll[1] \
+ and ds_pose.pitch < opt_pitch[0] or ds_pose.pitch > opt_pitch[1]:
+ invalid_indices.append(ds_pose.Index) # unique file indexs
+
+ # filter out valid/invalid
+ log.info(f'indices 0-20: {invalid_indices[:20]}')
+ log.info(f'Removing {len(invalid_indices)} invalid indices...')
+ df_record = df_record.drop(df_record.index[invalid_indices])
+ df_roi = df_roi.drop(df_roi.index[invalid_indices])
+ df_pose = df_pose.drop(df_pose.index[invalid_indices])
+ log.info(f'Removed {n_rows - len(df_record)}')
+
+ # move file to make backup
+ dir_bkup = join(Path(fp_pose).parent, f'backup_{datetime.now():%Y%m%d_%M%S}')
+ file_utils.mkdirs(dir_bkup)
+ # move files to backup
+ shutil.move(fp_record, join(dir_bkup, Path(fp_record).name))
+ shutil.move(fp_roi, join(dir_bkup, Path(fp_roi).name))
+ shutil.move(fp_pose, join(dir_bkup, Path(fp_pose).name))
+ # resave file records
+ df_record = df_record.reset_index(drop=True)
+ df_record.index.name = 'index'
+ df_record.to_csv(fp_record)
+ # resave ROI
+ df_roi = df_roi.reset_index(drop=True)
+ df_roi.index.name = 'index'
+ df_roi.to_csv(fp_roi)
+ # resave pose
+ df_pose = df_pose.reset_index(drop=True)
+ df_pose.index.name = 'index'
+ df_pose.to_csv(fp_pose)
diff --git a/megapixels/commands/datasets/filter_poses.py b/megapixels/commands/datasets/filter_poses.py
deleted file mode 100644
index 304eeff2..00000000
--- a/megapixels/commands/datasets/filter_poses.py
+++ /dev/null
@@ -1,76 +0,0 @@
-import click
-
-from app.settings import types
-from app.utils import click_utils
-from app.settings import app_cfg as cfg
-from app.utils.logger_utils import Logger
-
-log = Logger.getLogger()
-
-@click.command()
-@click.option('-i', '--input', 'opt_fp_in', required=True,
- help='Input directory')
-@click.option('-o', '--output', 'opt_fp_out', required=True,
- help='Output directory')
-@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None),
- help='Slice list of files')
-@click.option('-f', '--force', 'opt_force', is_flag=True,
- help='Force overwrite file')
-@click.option('--yaw', 'opt_yaw', type=(float, float), default=(-25,25),
- help='Yaw (min, max)')
-@click.option('--roll', 'opt_roll', type=(float, float), default=(-15,15),
- help='Roll (min, max)')
-@click.option('--pitch', 'opt_pitch', type=(float, float), default=(-10,10),
- help='Pitch (min, max)')
-@click.option('--drop', 'opt_drop', type=click.Choice(['valid', 'invalid']), default='invalid',
- help='Drop valid or invalid poses')
-@click.pass_context
-def cli(ctx, opt_fp_in, opt_fp_out, opt_slice, opt_yaw, opt_roll, opt_pitch,
- opt_drop, opt_force):
- """Filter out exaggerated poses"""
-
- from glob import glob
- from os.path import join
- from pathlib import Path
- import time
- from multiprocessing.dummy import Pool as ThreadPool
- import random
-
- import pandas as pd
- from tqdm import tqdm
- from glob import glob
-
- from app.utils import file_utils, im_utils
-
-
- if not opt_force and Path(opt_fp_out).exists():
- log.error('File exists. Use "-f / --force" to overwite')
- return
-
- df_poses = pd.read_csv(opt_fp_in).set_index('index')
-
- if opt_slice:
- df_poses = df_poses[opt_slice[0]:opt_slice[1]]
-
- log.info('Processing {:,} rows'.format(len(df_poses)))
-
- # extend a new temporary column
- df_poses['valid'] = [0] * len(df_poses)
-
- # filter out extreme poses
- for ds_pose in tqdm(df_poses.itertuples(), total=len(df_poses)):
- if ds_pose.yaw > opt_yaw[0] and ds_pose.yaw < opt_yaw[1] \
- and ds_pose.roll > opt_roll[0] and ds_pose.roll < opt_roll[1] \
- and ds_pose.pitch > opt_pitch[0] and ds_pose.pitch < opt_pitch[1]:
- df_poses.at[ds_pose.Index, 'valid'] = 1
-
- # filter out valid/invalid
- drop_val = 0 if opt_drop == 'valid' else 0 # drop 0's if drop == valid, else drop 1's
- df_poses_filtered = df_poses.drop(df_poses[df_poses.valid == int()].index, axis=0)
-
- # drop temp column
- df_poses_filtered = df_poses_filtered.drop('valid', axis=1)
-
- # save filtered poses
- df_poses_filtered.to_csv(opt_fp_out)
- log.info('Saved {:,} rows'.format(len(df_poses_filtered))) \ No newline at end of file
diff --git a/megapixels/commands/datasets/file_meta.py b/megapixels/commands/datasets/gen_filepath.py
index e1456f44..5db405c0 100644
--- a/megapixels/commands/datasets/file_meta.py
+++ b/megapixels/commands/datasets/gen_filepath.py
@@ -12,10 +12,20 @@ from app.utils.logger_utils import Logger
log = Logger.getLogger()
@click.command()
-@click.option('-i', '--input', 'opt_fp_in', required=True,
- help='Input directory')
-@click.option('-o', '--output', 'opt_fp_out', required=True,
- help='Output file for file meta CSV')
+@click.option('-i', '--input', 'opt_fp_in',
+ help='Override enum input filename CSV')
+@click.option('-o', '--output', 'opt_fp_out',
+ help='Override enum output filename CSV')
+@click.option('--data_store', 'opt_data_store',
+ type=cfg.DataStoreVar,
+ default=click_utils.get_default(types.DataStore.NAS),
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--dataset', 'opt_dataset',
+ type=cfg.DatasetVar,
+ required=True,
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None),
help='Slice list of files')
@click.option('--recursive/--no-recursive', 'opt_recursive', is_flag=True, default=False,
@@ -25,7 +35,8 @@ log = Logger.getLogger()
@click.option('-f', '--force', 'opt_force', is_flag=True,
help='Force overwrite file')
@click.pass_context
-def cli(ctx, opt_fp_in, opt_fp_out, opt_slice, opt_recursive, opt_threads, opt_force):
+def cli(ctx, opt_fp_in, opt_fp_out, opt_data_store, opt_dataset, opt_slice,
+ opt_recursive, opt_threads, opt_force):
"""Multithreading test"""
from glob import glob
@@ -39,21 +50,26 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_slice, opt_recursive, opt_threads, opt_f
from tqdm import tqdm
from glob import glob
+ from app.models.data_store import DataStore
from app.utils import file_utils, im_utils
-
- if not opt_force and Path(opt_fp_out).exists():
+ data_store = DataStore(opt_data_store, opt_dataset)
+ fp_out = opt_fp_out if opt_fp_out is not None else data_store.metadata(types.Metadata.FILEPATH)
+ if not opt_force and Path(fp_out).exists():
log.error('File exists. Use "-f / --force" to overwite')
return
+
+ # glob files
+ fp_in = opt_fp_in if opt_fp_in is not None else data_store.media_images_original()
fp_ims = []
- log.info(f'Globbing {opt_fp_in}')
+ log.info(f'Globbing {fp_in}')
for ext in ['jpg', 'png']:
if opt_recursive:
- fp_glob = join(opt_fp_in, '**/*.{}'.format(ext))
+ fp_glob = join(fp_in, '**/*.{}'.format(ext))
fp_ims += glob(fp_glob, recursive=True)
else:
- fp_glob = join(opt_fp_in, '*.{}'.format(ext))
+ fp_glob = join(fp_in, '*.{}'.format(ext))
fp_ims += glob(fp_glob)
if not fp_ims:
@@ -63,14 +79,14 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_slice, opt_recursive, opt_threads, opt_f
if opt_slice:
fp_ims = fp_ims[opt_slice[0]:opt_slice[1]]
- log.info('Processing {:,} images'.format(len(fp_ims)))
+ log.info('Found {:,} images'.format(len(fp_ims)))
# convert data to dict
data = []
for i, fp_im in enumerate(tqdm(fp_ims)):
fpp_im = Path(fp_im)
- subdir = str(fpp_im.parent.relative_to(opt_fp_in))
+ subdir = str(fpp_im.parent.relative_to(fp_in))
data.append( {
'subdir': subdir,
'fn': fpp_im.stem,
@@ -78,7 +94,9 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_slice, opt_recursive, opt_threads, opt_f
})
# save to CSV
- file_utils.mkdirs(opt_fp_out)
- df = pd.DataFrame.from_dict(data)
- df.index.name = 'index'
- df.to_csv(opt_fp_out) \ No newline at end of file
+ file_utils.mkdirs(fp_out)
+ df_filepath = pd.DataFrame.from_dict(data)
+ df_filepath = df_filepath.sort_values(by=['subdir'], ascending=True)
+ df_filepath = df_filepath.reset_index()
+ df_filepath.index.name = 'index'
+ df_filepath.to_csv(fp_out) \ No newline at end of file
diff --git a/megapixels/commands/datasets/gen_uuid.py b/megapixels/commands/datasets/gen_uuid.py
new file mode 100644
index 00000000..d7e7b52c
--- /dev/null
+++ b/megapixels/commands/datasets/gen_uuid.py
@@ -0,0 +1,65 @@
+import click
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+from app.utils.logger_utils import Logger
+
+log = Logger.getLogger()
+
+@click.command()
+@click.option('-i', '--input', 'opt_fp_in', default=None,
+ help='Override enum input filename CSV')
+@click.option('-o', '--output', 'opt_fp_out', default=None,
+ help='Override enum output filename CSV')
+@click.option('--data_store', 'opt_data_store',
+ type=cfg.DataStoreVar,
+ default=click_utils.get_default(types.DataStore.NAS),
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--dataset', 'opt_dataset',
+ type=cfg.DatasetVar,
+ required=True,
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('-f', '--force', 'opt_force', is_flag=True,
+ help='Force overwrite file')
+@click.pass_context
+def cli(ctx, opt_fp_in, opt_fp_out, opt_data_store, opt_dataset, opt_force):
+ """Appends UUID to records CSV"""
+
+ from glob import glob
+ from os.path import join
+ from pathlib import Path
+ import base64
+ import uuid
+
+ from tqdm import tqdm
+ import pandas as pd
+
+ from app.models.data_store import DataStore
+
+
+ # set data_store
+ data_store = DataStore(opt_data_store, opt_dataset)
+ # get filepath out
+ fp_out = data_store.metadata(types.Metadata.UUID) if opt_fp_out is None else opt_fp_out
+ # exit if exists
+ if not opt_force and Path(fp_out).exists():
+ log.error('File exists. Use "-f / --force" to overwite')
+ return
+
+ # load sha256 records
+ fp_in = data_store.metadata(types.Metadata.SHA256) if opt_fp_in is None else opt_fp_in
+ log.info(f'Loading: {fp_in}')
+ df_records = pd.read_csv(fp_in).set_index('index')
+
+ df_uuids = df_records.copy()
+ df_uuids['uuid'] = [uuid.uuid4()] * len(df_uuids)
+
+ for df_record in tqdm(df_records.itertuples(), total=len(df_uuids)):
+ image_index = df_record.Index
+ df_uuids.at[image_index, 'uuid'] = uuid.uuid4()
+
+ df_uuids = df_uuids.drop(['sha256', 'identity_index'], axis=1)
+ df_uuids.to_csv(fp_out) \ No newline at end of file
diff --git a/megapixels/commands/datasets/identity_meta_lfw.py b/megapixels/commands/datasets/identity_meta_lfw.py
new file mode 100644
index 00000000..45386b23
--- /dev/null
+++ b/megapixels/commands/datasets/identity_meta_lfw.py
@@ -0,0 +1,93 @@
+'''
+add identity from description using subdir
+'''
+import click
+
+from app.settings import types
+from app.models.dataset import Dataset
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+from app.utils.logger_utils import Logger
+
+log = Logger.getLogger()
+
+@click.command()
+@click.option('-i', '--input', 'opt_fp_in', required=True,
+ help='Identity meta file')
+@click.option('-o', '--output', 'opt_fp_out', default=None,
+ help='Override enum output filename CSV')
+@click.option('--column', 'opt_identity_key', default='identity_key',
+ help='Match column')
+@click.option('--data_store', 'opt_data_store',
+ type=cfg.DataStoreVar,
+ default=click_utils.get_default(types.DataStore.SSD),
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('-f', '--force', 'opt_force', is_flag=True,
+ help='Force overwrite file')
+@click.pass_context
+def cli(ctx, opt_fp_in, opt_fp_out, opt_identity_key, opt_data_store, opt_force):
+ """Display image info"""
+
+ import sys
+ from glob import glob
+ from os.path import join
+ from pathlib import Path
+ import time
+
+ import pandas as pd
+ import cv2 as cv
+ from tqdm import tqdm
+
+ from app.utils import file_utils, im_utils
+ from app.models.data_store import DataStore
+
+ log = Logger.getLogger()
+
+ # output file
+ opt_dataset = types.Dataset.LFW
+ data_store = DataStore(opt_data_store, opt_dataset)
+ fp_out = data_store.metadata(types.Metadata.IDENTITY) if opt_fp_out is None else opt_fp_out
+ # exit if exists
+ log.debug(fp_out)
+ if not opt_force and Path(fp_out).exists():
+ log.error('File exists. Use "-f / --force" to overwite')
+ return
+
+ # init dataset
+ # load file records
+ fp_record = data_store.metadata(types.Metadata.FILE_RECORD)
+ df_record = pd.read_csv(fp_record).set_index('index')
+
+ # load identity meta
+ # this file is maybe prepared in a Jupyter notebook
+ # the "identity_key"
+ df_identity_meta = pd.read_csv(opt_fp_in).set_index('index')
+ # create a new file called 'identity.csv'
+ identities = []
+ # iterate records and get identity index where 'identity_key' matches
+ log.debug(type(df_record))
+ identity_indices = []
+ for record_idx, ds_record in tqdm(df_record.iterrows(), total=len(df_record)):
+ identity_value = ds_record[opt_identity_key]
+ identity_index = ds_record.identity_index
+ ds_identity_meta = df_identity_meta.loc[(df_identity_meta[opt_identity_key] == identity_value)]
+ if identity_index not in identity_indices:
+ identity_indices.append(identity_index)
+ identities.append({
+ 'description': ds_identity_meta.description.values[0],
+ 'name': ds_identity_meta.name.values[0],
+ 'images': ds_identity_meta.images.values[0],
+ 'gender': ds_identity_meta.gender.values[0],
+ })
+
+ # write to csv
+ df_identity = pd.DataFrame.from_dict(identities)
+ df_identity.index.name = 'index'
+ df_identity.to_csv(fp_out)
+ '''
+ index,name,name_orig,description,gender,images,image_index,identity_key
+ 0,A. J. Cook,AJ Cook,Canadian actress,f,1,0,AJ_Cook
+ '''
+
+
diff --git a/megapixels/commands/datasets/identity_meta_vgg_face2.py b/megapixels/commands/datasets/identity_meta_vgg_face2.py
new file mode 100644
index 00000000..85b6644d
--- /dev/null
+++ b/megapixels/commands/datasets/identity_meta_vgg_face2.py
@@ -0,0 +1,88 @@
+'''
+add identity from description using subdir
+'''
+import click
+
+from app.settings import types
+from app.models.dataset import Dataset
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+from app.utils.logger_utils import Logger
+
+log = Logger.getLogger()
+
+@click.command()
+@click.option('-i', '--input', 'opt_fp_in', required=True,
+ help='Identity meta file')
+@click.option('-o', '--output', 'opt_fp_out', default=None,
+ help='Override enum output filename CSV')
+@click.option('--data_store', 'opt_data_store',
+ type=cfg.DataStoreVar,
+ default=click_utils.get_default(types.DataStore.SSD),
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('-f', '--force', 'opt_force', is_flag=True,
+ help='Force overwrite file')
+@click.pass_context
+def cli(ctx, opt_fp_in, opt_fp_out, opt_data_store, opt_force):
+ """Display image info"""
+
+ import sys
+ from glob import glob
+ from os.path import join
+ from pathlib import Path
+ import time
+
+ import pandas as pd
+ import cv2 as cv
+ from tqdm import tqdm
+
+ from app.utils import file_utils, im_utils
+ from app.models.data_store import DataStore
+
+ log = Logger.getLogger()
+
+ # output file
+ opt_dataset = types.Dataset.VGG_FACE2
+ data_store = DataStore(opt_data_store, opt_dataset)
+ fp_out = data_store.metadata(types.Metadata.IDENTITY) if opt_fp_out is None else opt_fp_out
+ # exit if exists
+ log.debug(fp_out)
+ if not opt_force and Path(fp_out).exists():
+ log.error('File exists. Use "-f / --force" to overwite')
+ return
+
+ # init dataset
+ # load file records
+ identity_key = 'identity_key'
+ fp_record = data_store.metadata(types.Metadata.FILE_RECORD)
+ df_record = pd.read_csv(fp_record).set_index('index')
+
+ # load identity meta
+ # this file is maybe prepared in a Jupyter notebook
+ # the "identity_key"
+ df_identity_meta = pd.read_csv(opt_fp_in).set_index('index')
+ # create a new file called 'identity.csv'
+ identities = []
+ # iterate records and get identity index where 'identity_key' matches
+ log.debug(type(df_record))
+ identity_indices = []
+ for ds_record in tqdm(df_record.itertuples(), total=len(df_record)):
+ identity_value = ds_record.identity_key
+ identity_index = ds_record.identity_index
+ ds_identity_meta = df_identity_meta.loc[(df_identity_meta[identity_key] == identity_value)]
+ if identity_index not in identity_indices:
+ identity_indices.append(identity_index)
+ identities.append({
+ 'description': ds_identity_meta.description.values[0],
+ 'name': ds_identity_meta.name.values[0],
+ 'images': ds_identity_meta.images.values[0],
+ 'gender': ds_identity_meta.gender.values[0],
+ })
+
+ # write to csv
+ df_identity = pd.DataFrame.from_dict(identities)
+ df_identity.index.name = 'index'
+ df_identity.to_csv(fp_out)
+
+
diff --git a/megapixels/commands/datasets/lookup.py b/megapixels/commands/datasets/lookup.py
index e84bdf3e..c1c66c19 100644
--- a/megapixels/commands/datasets/lookup.py
+++ b/megapixels/commands/datasets/lookup.py
@@ -6,12 +6,14 @@ from app.utils import click_utils
from app.settings import app_cfg as cfg
from app.utils.logger_utils import Logger
+log = Logger.getLogger()
+
@click.command()
-@click.option('--index', 'opt_index', type=int,
+@click.option('--index', 'opt_index', type=int, required=True,
help='Vector index to lookup')
@click.option('--data_store', 'opt_data_store',
type=cfg.DataStoreVar,
- default=click_utils.get_default(types.DataStore.NAS),
+ default=click_utils.get_default(types.DataStore.SSD),
show_default=True,
help=click_utils.show_help(types.Dataset))
@click.option('--dataset', 'opt_dataset',
@@ -19,12 +21,8 @@ from app.utils.logger_utils import Logger
required=True,
show_default=True,
help=click_utils.show_help(types.Dataset))
-@click.option('--metadata', 'opt_metadata_type', required=True,
- type=cfg.MetadataVar,
- show_default=True,
- help=click_utils.show_help(types.Metadata))
@click.pass_context
-def cli(ctx, opt_index, opt_data_store, opt_dataset, opt_metadata_type):
+def cli(ctx, opt_index, opt_data_store, opt_dataset):
"""Display image info"""
import sys
@@ -37,22 +35,21 @@ def cli(ctx, opt_index, opt_data_store, opt_dataset, opt_metadata_type):
import cv2 as cv
from tqdm import tqdm
- from app.utils import file_utils, im_utils, path_utils
+ from app.utils import file_utils, im_utils
+ from app.models.data_store import DataStore
log = Logger.getLogger()
-
- log.info(f'creating dataset: {opt_dataset}')
- dataset = Dataset(opt_dataset)
- # loads all CSV files, may take a while
- log.info(f'loading dataset...')
- dataset.load(opt_data_store)
+ # init dataset
+ dataset = Dataset(opt_data_store, opt_dataset)
+ #dataset.load_face_vectors()
+ dataset.load_records()
+ dataset.load_identities()
+ # set data store and load files
# find image records
- image_record = dataset.roi_idx_to_record(opt_index)
- # debug
+ image_record = dataset.index_to_record(opt_index)
image_record.summarize()
# load image
- fp_im = image_record.filepath
- im = cv.imread(fp_im)
+ im = cv.imread(image_record.filepath)
# display
cv.imshow('', im)
# cv gui
diff --git a/megapixels/commands/datasets/records.py b/megapixels/commands/datasets/records.py
new file mode 100644
index 00000000..80de5040
--- /dev/null
+++ b/megapixels/commands/datasets/records.py
@@ -0,0 +1,159 @@
+'''
+
+'''
+import click
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+from app.utils.logger_utils import Logger
+
+log = Logger.getLogger()
+
+identity_sources = ['subdir', 'subdir_head', 'subdir_tail']
+
+@click.command()
+@click.option('-i', '--input', 'opt_fp_in', default=None,
+ help='Override enum input filename CSV')
+@click.option('-o', '--output', 'opt_fp_out', default=None,
+ help='Override enum output filename CSV')
+@click.option('-m', '--media', 'opt_dir_media', default=None,
+ help='Override enum media directory')
+@click.option('--data_store', 'opt_data_store',
+ type=cfg.DataStoreVar,
+ default=click_utils.get_default(types.DataStore.SSD),
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--dataset', 'opt_dataset',
+ type=cfg.DatasetVar,
+ required=True,
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None),
+ help='Slice list of files')
+@click.option('-t', '--threads', 'opt_threads', default=12,
+ help='Number of threads')
+@click.option('-f', '--force', 'opt_force', is_flag=True,
+ help='Force overwrite file')
+@click.option('--identity', 'opt_identity', default=None, type=click.Choice(identity_sources),
+ help='Identity source, blank for no identity')
+@click.option('--recursive/--no-recursive', 'opt_recursive', is_flag=True, default=False,
+ help='Use glob recursion (slower)')
+@click.pass_context
+def cli(ctx, opt_fp_in, opt_fp_out, opt_dataset, opt_data_store, opt_dir_media, opt_slice, opt_threads,
+ opt_identity, opt_force, opt_recursive):
+ """Generates sha256, uuid, and identity index CSV file"""
+
+ import sys
+ from glob import glob
+ from os.path import join
+ from pathlib import Path
+ import time
+ from multiprocessing.dummy import Pool as ThreadPool
+ import random
+ import uuid
+
+ import pandas as pd
+ from tqdm import tqdm
+ from glob import glob
+
+ from app.models.data_store import DataStore
+ from app.utils import file_utils, im_utils
+
+
+ # set data_store
+ data_store = DataStore(opt_data_store, opt_dataset)
+ # get filepath out
+ fp_out = data_store.metadata(types.Metadata.FILE_RECORD) if opt_fp_out is None else opt_fp_out
+ # exit if exists
+ if not opt_force and Path(fp_out).exists():
+ log.error('File exists. Use "-f / --force" to overwite')
+ return
+
+ # ----------------------------------------------------------------
+ # glob files
+
+ fp_in = opt_fp_in if opt_fp_in is not None else data_store.media_images_original()
+ log.info(f'Globbing {fp_in}')
+ fp_ims = file_utils.glob_multi(fp_in, ['jpg', 'png'], recursive=opt_recursive)
+ # fail if none
+ if not fp_ims:
+ log.error('No images. Try with "--recursive"')
+ return
+ # slice to reduce
+ if opt_slice:
+ fp_ims = fp_ims[opt_slice[0]:opt_slice[1]]
+ log.info('Found {:,} images'.format(len(fp_ims)))
+
+
+ # ----------------------------------------------------------------
+ # multithread process into SHA256
+
+ pbar = tqdm(total=len(fp_ims))
+
+ def as_sha256(fp_im):
+ pbar.update(1)
+ return file_utils.sha256(fp_im)
+
+ # convert to thread pool
+ sha256s = [] # ?
+ pool = ThreadPool(opt_threads)
+ with tqdm(total=len(fp_ims)) as pbar:
+ sha256s = pool.map(as_sha256, fp_ims)
+ pbar.close()
+
+
+ # ----------------------------------------------------------------
+ # convert data to dict
+
+ data = []
+ for sha256, fp_im in zip(sha256s, fp_ims):
+ fpp_im = Path(fp_im)
+ subdir = str(fpp_im.parent.relative_to(fp_in))
+
+ if opt_identity:
+ subdirs = subdir.split('/')
+ if not len(subdirs) > 0:
+ log.error(f'Could not split subdir: "{subdir}. Try different option for "--identity"')
+ log.error('exiting')
+ return
+ if opt_identity == 'subdir':
+ identity = subdirs[0] # use first/only part
+ elif opt_identity == 'subdir_head':
+ identity = subdirs[0] # use first part of subdir path
+ elif opt_identity == 'subdir_tail':
+ identity = subdirs[-1] # use last part of subdir path
+ else:
+ identity = ''
+
+ data.append({
+ 'subdir': subdir,
+ 'fn': fpp_im.stem,
+ 'ext': fpp_im.suffix.replace('.',''),
+ 'sha256': sha256,
+ 'uuid': uuid.uuid4(),
+ 'identity_key': identity
+ })
+
+ log.info(f'adding identity index using: "{opt_identity}". This may take a while...')
+ # convert dict to DataFrame
+ df_records = pd.DataFrame.from_dict(data)
+ # sort based on identity_key
+ df_records = df_records.sort_values(by=['identity_key'], ascending=True)
+ # add new column for identity
+ df_records['identity_index'] = [-1] * len(df_records)
+ # populate the identity_index
+ df_records_identity_groups = df_records.groupby('identity_key')
+ # enumerate groups to create identity indices
+ for identity_index, df_records_identity_group_tuple in enumerate(df_records_identity_groups):
+ identity_key, df_records_identity_group = df_records_identity_group_tuple
+ for ds_record in df_records_identity_group.itertuples():
+ df_records.at[ds_record.Index, 'identity_index'] = identity_index
+ # reset index after being sorted
+ df_records = df_records.reset_index(drop=True)
+ df_records.index.name = 'index' # reassign 'index' as primary key column
+ # write to CSV
+ file_utils.mkdirs(fp_out)
+ df_records.to_csv(fp_out)
+ # done
+ log.info(f'wrote rows: {len(df_records)} to {fp_out}') \ No newline at end of file
diff --git a/megapixels/commands/datasets/s3.py b/megapixels/commands/datasets/s3.py
deleted file mode 100644
index 7769896b..00000000
--- a/megapixels/commands/datasets/s3.py
+++ /dev/null
@@ -1,47 +0,0 @@
-import click
-
-from app.settings import types
-from app.utils import click_utils
-from app.settings import app_cfg as cfg
-
-s3_dirs = {'media': cfg.S3_MEDIA_ROOT, 'metadata': cfg.S3_METADATA_ROOT}
-
-@click.command()
-@click.option('-i', '--input', 'opt_fps_in', required=True, multiple=True,
- help='Input directory')
-@click.option('--name', 'opt_dataset_name', required=True,
- help='Dataset key (eg "lfw"')
-@click.option('-a', '--action', 'opt_action', type=click.Choice(['sync', 'put']), default='sync',
- help='S3 action')
-@click.option('-t', '--type', 'opt_type', type=click.Choice(s3_dirs.keys()), required=True,
- help='S3 location')
-@click.option('--dry-run', 'opt_dryrun', is_flag=True, default=False)
-@click.pass_context
-def cli(ctx, opt_fps_in, opt_dataset_name, opt_action, opt_type, opt_dryrun):
- """Syncs files with S3/spaces server"""
-
- from os.path import join
- from pathlib import Path
-
- from tqdm import tqdm
- import pandas as pd
- import subprocess
-
- from app.utils import logger_utils, file_utils
-
- # -------------------------------------------------
- # init here
-
- log = logger_utils.Logger.getLogger()
- for opt_fp_in in opt_fps_in:
- dir_dst = join(s3_dirs[opt_type], opt_dataset_name, '')
- if Path(opt_fp_in).is_dir():
- fp_src = join(opt_fp_in, '') # add trailing slashes
- else:
- fp_src = join(opt_fp_in)
- cmd = ['s3cmd', opt_action, fp_src, dir_dst, '-P', '--follow-symlinks']
- log.info(' '.join(cmd))
- if not opt_dryrun:
- subprocess.call(cmd)
-
- \ No newline at end of file
diff --git a/megapixels/commands/datasets/s3_sync.py b/megapixels/commands/datasets/s3_sync.py
new file mode 100644
index 00000000..3098d9be
--- /dev/null
+++ b/megapixels/commands/datasets/s3_sync.py
@@ -0,0 +1,57 @@
+import click
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+
+s3_dirs = {'media': cfg.S3_MEDIA_URL, 'metadata': cfg.S3_METADATA_URL}
+
+@click.command()
+@click.option('--data_store', 'opt_data_store',
+ type=cfg.DataStoreVar,
+ default=click_utils.get_default(types.DataStore.SSD),
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--dataset', 'opt_dataset',
+ type=cfg.DatasetVar,
+ required=True,
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('-t', '--type', 'opt_type', type=click.Choice(s3_dirs.keys()), required=True,
+ help='S3 location')
+@click.option('--dry-run', 'opt_dryrun', is_flag=True, default=False)
+@click.pass_context
+def cli(ctx, opt_data_store, opt_dataset, opt_type, opt_dryrun):
+ """Syncs files with S3/spaces server"""
+
+ from os.path import join
+ from pathlib import Path
+
+ from tqdm import tqdm
+ import pandas as pd
+ import subprocess
+
+ from app.utils import logger_utils, file_utils
+ from app.models.data_store import DataStore
+
+ # -------------------------------------------------
+ # init here
+
+ log = logger_utils.Logger.getLogger()
+
+ # set data_store
+ data_store = DataStore(opt_data_store, opt_dataset)
+ dataset_name = opt_dataset.name.lower()
+ if opt_type == 'media':
+ dir_src = join(data_store.uuid_dir(), '')
+ dir_dst = join(s3_dirs[opt_type], dataset_name, '')
+ elif opt_type == 'metadata':
+ dir_src = join(data_store.metadata_dir(), '')
+ dir_dst = join(s3_dirs[opt_type], dataset_name, '')
+
+ cmd = ['s3cmd', 'sync', dir_src, dir_dst, '-P', '--follow-symlinks']
+ log.info(' '.join(cmd))
+ if not opt_dryrun:
+ subprocess.call(cmd)
+
+ \ No newline at end of file
diff --git a/megapixels/commands/datasets/sha256.py b/megapixels/commands/datasets/sha256.py
deleted file mode 100644
index 4c734073..00000000
--- a/megapixels/commands/datasets/sha256.py
+++ /dev/null
@@ -1,89 +0,0 @@
-import click
-
-from app.settings import types
-from app.utils import click_utils
-from app.settings import app_cfg as cfg
-from app.utils.logger_utils import Logger
-
-log = Logger.getLogger()
-
-@click.command()
-@click.option('-i', '--input', 'opt_fp_in', required=True,
- help='Input directory')
-@click.option('-m', '--media', 'opt_dir_media', required=True,
- help='Input media directory')
-@click.option('-o', '--output', 'opt_fp_out', required=True,
- help='Output directory')
-@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None),
- help='Slice list of files')
-@click.option('-t', '--threads', 'opt_threads', default=4,
- help='Number of threads')
-@click.option('-f', '--force', 'opt_force', is_flag=True,
- help='Force overwrite file')
-@click.pass_context
-def cli(ctx, opt_fp_in, opt_dir_media, opt_fp_out, opt_slice, opt_threads, opt_force):
- """Multithreading test"""
-
- from glob import glob
- from os.path import join
- from pathlib import Path
- import time
- from multiprocessing.dummy import Pool as ThreadPool
- import random
-
- import pandas as pd
- from tqdm import tqdm
- from glob import glob
-
- from app.utils import file_utils, im_utils
-
-
- if not opt_force and Path(opt_fp_out).exists():
- log.error('File exists. Use "-f / --force" to overwite')
- return
-
- df_files = pd.read_csv(opt_fp_in).set_index('index')
-
- if opt_slice:
- df_files = df_files[opt_slice[0]:opt_slice[1]]
-
- log.info('Processing {:,} images'.format(len(df_files)))
-
-
- # prepare list of images to multithread into sha256s
- file_objs = []
- for ds_file in df_files.itertuples():
- fp_im = join(opt_dir_media, str(ds_file.subdir), f"{ds_file.fn}.{ds_file.ext}")
- file_objs.append({'fp': fp_im, 'index': ds_file.Index})
-
- # convert to thread pool
- pbar = tqdm(total=len(file_objs))
-
- def as_sha256(file_obj):
- pbar.update(1)
- file_obj['sha256'] = file_utils.sha256(file_obj['fp'])
- return file_obj
-
- # multithread pool
- pool_file_objs = []
- st = time.time()
- pool = ThreadPool(opt_threads)
- with tqdm(total=len(file_objs)) as pbar:
- pool_file_objs = pool.map(as_sha256, file_objs)
- pbar.close()
-
- # convert data to dict
- data = []
- for pool_file_obj in pool_file_objs:
- data.append( {
- 'sha256': pool_file_obj['sha256'],
- 'index': pool_file_obj['index']
- })
-
- # save to CSV
- file_utils.mkdirs(opt_fp_out)
- df = pd.DataFrame.from_dict(data)
- df.to_csv(opt_fp_out, index=False)
-
- # timing
- log.info('time: {:.2f}, theads: {}'.format(time.time() - st, opt_threads)) \ No newline at end of file
diff --git a/megapixels/commands/datasets/symlink.py b/megapixels/commands/datasets/symlink.py
deleted file mode 100644
index 70ec6c46..00000000
--- a/megapixels/commands/datasets/symlink.py
+++ /dev/null
@@ -1,45 +0,0 @@
-import click
-
-from app.settings import types
-from app.utils import click_utils
-from app.settings import app_cfg as cfg
-
-@click.command()
-@click.option('-i', '--input', 'opt_fp_in', required=True,
- help='Input records CSV')
-@click.option('-m', '--media', 'opt_fp_media', required=True,
- help='Input media directory')
-@click.option('-o', '--output', 'opt_fp_out', required=True,
- help='Output directory')
-@click.pass_context
-def cli(ctx, opt_fp_in, opt_fp_media, opt_fp_out):
- """Symlinks images to new directory for S3"""
-
- import sys
- import os
- from os.path import join
- from pathlib import Path
-
- from tqdm import tqdm
- import pandas as pd
-
- from app.utils import logger_utils, file_utils
-
- # -------------------------------------------------
- # init here
-
- log = logger_utils.Logger.getLogger()
-
- df_records = pd.read_csv(opt_fp_in)
- nrows = len(df_records)
-
- file_utils.mkdirs(opt_fp_out)
-
- for record_id, row in tqdm(df_records.iterrows(), total=nrows):
- # make image path
- df = df_records.iloc[record_id]
- fpp_src = Path(join(opt_fp_media, df['subdir'], '{}.{}'.format(df['fn'], df['ext'])))
- fpp_dst = Path(join(opt_fp_out, '{}.{}'.format(df['uuid'], df['ext'])))
- fpp_dst.symlink_to(fpp_src)
-
- log.info('symlinked {:,} files'.format(nrows)) \ No newline at end of file
diff --git a/megapixels/commands/datasets/symlink_uuid.py b/megapixels/commands/datasets/symlink_uuid.py
new file mode 100644
index 00000000..7c5faa95
--- /dev/null
+++ b/megapixels/commands/datasets/symlink_uuid.py
@@ -0,0 +1,57 @@
+import click
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+
+@click.command()
+@click.option('-i', '--input', 'opt_fp_in', default=None,
+ help='Override enum input filename CSV')
+@click.option('-o', '--output', 'opt_fp_out', default=None,
+ help='Override enum output filename CSV')
+@click.option('--data_store', 'opt_data_store',
+ type=cfg.DataStoreVar,
+ default=click_utils.get_default(types.DataStore.SSD),
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--dataset', 'opt_dataset',
+ type=cfg.DatasetVar,
+ required=True,
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.pass_context
+def cli(ctx, opt_fp_in, opt_fp_out, opt_data_store, opt_dataset):
+ """Symlinks images to new directory for S3"""
+
+ import sys
+ import os
+ from os.path import join
+ from pathlib import Path
+
+ from tqdm import tqdm
+ import pandas as pd
+
+ from app.utils import logger_utils, file_utils
+ from app.models.data_store import DataStore
+
+ # -------------------------------------------------
+ # init here
+
+ log = logger_utils.Logger.getLogger()
+
+ # set data_store
+ data_store = DataStore(opt_data_store, opt_dataset)
+ fp_records = data_store.metadata(types.Metadata.FILE_RECORD)
+ df_records = pd.read_csv(fp_records).set_index('index')
+ nrows = len(df_records)
+
+ dir_out = data_store.uuid_dir() if opt_fp_out is None else opt_fp_out
+ file_utils.mkdirs(dir_out)
+
+ for ds_record in tqdm(df_records.itertuples(), total=nrows):
+ # make image path
+ fp_src = data_store.face(ds_record.subdir, ds_record.fn, ds_record.ext)
+ fp_dst = data_store.face_uuid(ds_record.uuid, ds_record.ext)
+ Path(fp_dst).symlink_to(Path(fp_src))
+
+ log.info('symlinked {:,} files'.format(nrows)) \ No newline at end of file
diff --git a/megapixels/commands/demo/face_analysis.py b/megapixels/commands/demo/face_analysis.py
new file mode 100644
index 00000000..6721a02d
--- /dev/null
+++ b/megapixels/commands/demo/face_analysis.py
@@ -0,0 +1,56 @@
+import click
+
+from app.settings import types
+from app.models.dataset import Dataset
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+from app.utils.logger_utils import Logger
+
+@click.command()
+@click.option('--data_store', 'opt_data_store',
+ type=cfg.DataStoreVar,
+ default=click_utils.get_default(types.DataStore.NAS),
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--dataset', 'opt_dataset',
+ type=cfg.DatasetVar,
+ required=True,
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.pass_context
+def cli(ctx, opt_index, opt_data_store, opt_dataset):
+ """Display image info"""
+
+ import sys
+ from glob import glob
+ from os.path import join
+ from pathlib import Path
+ import time
+
+ import pandas as pd
+ import cv2 as cv
+ from tqdm import tqdm
+
+ from app.utils import file_utils, im_utils, path_utils
+
+ log = Logger.getLogger()
+
+ dataset = Dataset(opt_dataset).load(opt_data_store)
+ # find image records
+ image_record = dataset.roi_idx_to_record(opt_index)
+ # debug
+ image_record.summarize()
+ # load image
+ fp_im = image_record.filepath
+ im = cv.imread(fp_im)
+ # display
+ cv.imshow('', im)
+ # cv gui
+ while True:
+ k = cv.waitKey(1) & 0xFF
+ if k == 27 or k == ord('q'): # ESC
+ cv.destroyAllWindows()
+ sys.exit()
+ elif k != 255:
+ # any key to continue
+ break \ No newline at end of file
diff --git a/megapixels/commands/demo/face_search.py b/megapixels/commands/demo/face_search.py
new file mode 100644
index 00000000..0452cc9d
--- /dev/null
+++ b/megapixels/commands/demo/face_search.py
@@ -0,0 +1,94 @@
+import click
+
+from app.settings import types
+from app.models.dataset import Dataset
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+from app.utils.logger_utils import Logger
+
+@click.command()
+@click.option('-i', '--input', 'opt_fp_in', required=True,
+ help='Input face image')
+@click.option('--data_store', 'opt_data_store',
+ type=cfg.DataStoreVar,
+ default=click_utils.get_default(types.DataStore.SSD),
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--dataset', 'opt_dataset',
+ type=cfg.DatasetVar,
+ required=True,
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--gpu', 'opt_gpu', default=0,
+ help='GPU index (use -1 for CPU)')
+@click.pass_context
+def cli(ctx, opt_fp_in, opt_data_store, opt_dataset, opt_gpu):
+ """Display image info"""
+
+ import sys
+ from glob import glob
+ from os.path import join
+ from pathlib import Path
+ import time
+
+ import imutils
+ import pandas as pd
+ import cv2 as cv
+ import dlib
+ from tqdm import tqdm
+
+ from app.utils import file_utils, im_utils
+ from app.models.data_store import DataStore, DataStoreS3
+ from app.processors import face_detector
+ from app.processors import face_recognition
+
+ log = Logger.getLogger()
+
+ # init face detection
+ detector = face_detector.DetectorDLIBHOG()
+
+ # init face recognition
+ recognition = face_recognition.RecognitionDLIB(gpu=opt_gpu)
+
+ # load query image
+ im_query = cv.imread(opt_fp_in)
+ # get detection as BBox object
+ bboxes = detector.detect(im_query, largest=True)
+ bbox = bboxes[0]
+ dim = im_query.shape[:2][::-1]
+ bbox = bbox.to_dim(dim) # convert back to real dimensions
+
+ if not bbox:
+ log.error('No face detected. Exiting')
+ return
+
+ # extract the face vectors
+ vec_query = recognition.vec(im_query, bbox)
+
+ # load dataset CSVs
+ dataset = Dataset(opt_data_store, opt_dataset)
+
+ # find matches
+ image_records = dataset.find_matches(vec_query, n_results=5)
+
+ # summary
+ ims_match = [im_query]
+ for image_record in image_records:
+ image_record.summarize()
+ log.info(f'{image_record.filepath}')
+ im_match = cv.imread(image_record.filepath)
+ ims_match.append(im_match)
+
+ montages = imutils.build_montages(ims_match, (256, 256), (3,2))
+
+ for i, montage in enumerate(montages):
+ cv.imshow(f'{i}', montage)
+ # cv gui
+ while True:
+ k = cv.waitKey(1) & 0xFF
+ if k == 27 or k == ord('q'): # ESC
+ cv.destroyAllWindows()
+ sys.exit()
+ elif k != 255:
+ # any key to continue
+ break \ No newline at end of file