summaryrefslogtreecommitdiff
path: root/megapixels/commands
diff options
context:
space:
mode:
authoradamhrv <adam@ahprojects.com>2018-12-17 01:37:31 +0100
committeradamhrv <adam@ahprojects.com>2018-12-17 01:37:31 +0100
commit88ec48e1c4d93ba9cd3aa186c068ef2aa4c27c56 (patch)
tree506075c0c8f0d4bbf15e97c6db50b6e055c5bd4e /megapixels/commands
parent23e9fef5dce8b0b15dd94713816b9d7d45f12356 (diff)
fixing dataset procesosrs
Diffstat (limited to 'megapixels/commands')
-rw-r--r--megapixels/commands/cv/cluster.py22
-rw-r--r--megapixels/commands/cv/face_pose.py (renamed from megapixels/commands/cv/gen_pose.py)17
-rw-r--r--megapixels/commands/cv/face_roi.py (renamed from megapixels/commands/cv/gen_rois.py)17
-rw-r--r--megapixels/commands/cv/face_vector.py (renamed from megapixels/commands/cv/gen_face_vec.py)18
-rw-r--r--megapixels/commands/datasets/filter_by_pose.py41
-rw-r--r--megapixels/commands/datasets/gen_filepath.py4
-rw-r--r--megapixels/commands/datasets/gen_sha256.py152
-rw-r--r--megapixels/commands/datasets/gen_uuid.py2
-rw-r--r--megapixels/commands/datasets/identity_meta_lfw.py93
-rw-r--r--megapixels/commands/datasets/identity_meta_vgg_face2.py88
-rw-r--r--megapixels/commands/datasets/lookup.py9
-rw-r--r--megapixels/commands/datasets/records.py159
-rw-r--r--megapixels/commands/datasets/s3.py47
-rw-r--r--megapixels/commands/datasets/s3_sync.py57
-rw-r--r--megapixels/commands/datasets/symlink.py45
-rw-r--r--megapixels/commands/datasets/symlink_uuid.py57
-rw-r--r--megapixels/commands/demo/face_search.py3
17 files changed, 518 insertions, 313 deletions
diff --git a/megapixels/commands/cv/cluster.py b/megapixels/commands/cv/cluster.py
index 94334133..419091a0 100644
--- a/megapixels/commands/cv/cluster.py
+++ b/megapixels/commands/cv/cluster.py
@@ -23,20 +23,20 @@ from app.utils.logger_utils import Logger
@click.pass_context
def cli(ctx, opt_data_store, opt_dataset, opt_metadata):
"""Display image info"""
-
- # cluster the embeddings
-print("[INFO] clustering...")
-clt = DBSCAN(metric="euclidean", n_jobs=args["jobs"])
-clt.fit(encodings)
-
-# determine the total number of unique faces found in the dataset
-labelIDs = np.unique(clt.labels_)
-numUniqueFaces = len(np.where(labelIDs > -1)[0])
-print("[INFO] # unique faces: {}".format(numUniqueFaces))
+
+ # cluster the embeddings
+ print("[INFO] clustering...")
+ clt = DBSCAN(metric="euclidean", n_jobs=args["jobs"])
+ clt.fit(encodings)
+
+ # determine the total number of unique faces found in the dataset
+ labelIDs = np.unique(clt.labels_)
+ numUniqueFaces = len(np.where(labelIDs > -1)[0])
+ print("[INFO] # unique faces: {}".format(numUniqueFaces))
# load and display image
im = cv.imread(fp_im)
cv.imshow('', im)
-
+
while True:
k = cv.waitKey(1) & 0xFF
if k == 27 or k == ord('q'): # ESC
diff --git a/megapixels/commands/cv/gen_pose.py b/megapixels/commands/cv/face_pose.py
index aefadb00..e7ffb7ac 100644
--- a/megapixels/commands/cv/gen_pose.py
+++ b/megapixels/commands/cv/face_pose.py
@@ -76,27 +76,26 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset,
face_landmarks = LandmarksDLIB()
# load filepath data
- fp_filepath = data_store.metadata(types.Metadata.FILEPATH)
- df_filepath = pd.read_csv(fp_filepath)
+ fp_record = data_store.metadata(types.Metadata.FILE_RECORD)
+ df_record = pd.read_csv(fp_record).set_index('index')
# load ROI data
fp_roi = data_store.metadata(types.Metadata.FACE_ROI)
- df_roi = pd.read_csv(fp_roi)
+ df_roi = pd.read_csv(fp_roi).set_index('index')
# slice if you want
if opt_slice:
df_roi = df_roi[opt_slice[0]:opt_slice[1]]
# group by image index (speedup if multiple faces per image)
- df_img_groups = df_roi.groupby('image_index')
+ df_img_groups = df_roi.groupby('record_index')
log.debug('processing {:,} groups'.format(len(df_img_groups)))
# store poses and convert to DataFrame
poses = []
# iterate
- for image_index, df_img_group in tqdm(df_img_groups):
+ for record_index, df_img_group in tqdm(df_img_groups):
# make fp
- ds_file = df_filepath.iloc[image_index]
- fp_im = data_store.face_image(ds_file.subdir, ds_file.fn, ds_file.ext)
- #fp_im = join(opt_dir_media, ds_file.subdir, '{}.{}'.format(ds_file.fn, ds_file.ext))
+ ds_record = df_record.iloc[record_index]
+ fp_im = data_store.face_image(ds_record.subdir, ds_record.fn, ds_record.ext)
im = cv.imread(fp_im)
# get bbox
x = df_img_group.x.values[0]
@@ -130,7 +129,7 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset,
break
# add image index and append to result CSV data
- pose_degrees['image_index'] = image_index
+ pose_degrees['record_index'] = record_index
poses.append(pose_degrees)
diff --git a/megapixels/commands/cv/gen_rois.py b/megapixels/commands/cv/face_roi.py
index 20dd598a..d7248aee 100644
--- a/megapixels/commands/cv/gen_rois.py
+++ b/megapixels/commands/cv/face_roi.py
@@ -103,20 +103,19 @@ def cli(ctx, opt_fp_in, opt_dir_media, opt_fp_out, opt_data_store, opt_dataset,
# get list of files to process
- fp_in = data_store.metadata(types.Metadata.FILEPATH) if opt_fp_in is None else opt_fp_in
- df_files = pd.read_csv(fp_in).set_index('index')
+ fp_in = data_store.metadata(types.Metadata.FILE_RECORD) if opt_fp_in is None else opt_fp_in
+ df_records = pd.read_csv(fp_in).set_index('index')
if opt_slice:
- df_files = df_files[opt_slice[0]:opt_slice[1]]
- log.debug('processing {:,} files'.format(len(df_files)))
+ df_records = df_records[opt_slice[0]:opt_slice[1]]
+ log.debug('processing {:,} files'.format(len(df_records)))
# filter out grayscale
color_filter = color_filters[opt_color_filter]
data = []
- for df_file in tqdm(df_files.itertuples(), total=len(df_files)):
- fp_im = data_store.face_image(str(df_file.subdir), str(df_file.fn), str(df_file.ext))
- #fp_im = join(opt_dir_media, str(df_file.subdir), f'{df_file.fn}.{df_file.ext}')
+ for df_record in tqdm(df_records.itertuples(), total=len(df_records)):
+ fp_im = data_store.face_image(str(df_record.subdir), str(df_record.fn), str(df_record.ext))
im = cv.imread(fp_im)
# filter out color or grayscale iamges
@@ -139,7 +138,7 @@ def cli(ctx, opt_fp_in, opt_dir_media, opt_fp_out, opt_data_store, opt_dataset,
for bbox in bboxes:
roi = {
- 'image_index': int(df_file.Index),
+ 'record_index': int(df_record.Index),
'x': bbox.x,
'y': bbox.y,
'w': bbox.w,
@@ -169,4 +168,4 @@ def cli(ctx, opt_fp_in, opt_dir_media, opt_fp_out, opt_data_store, opt_dataset,
file_utils.mkdirs(fp_out)
df = pd.DataFrame.from_dict(data)
df.index.name = 'index'
- df.to_csv(opt_fp_out) \ No newline at end of file
+ df.to_csv(fp_out) \ No newline at end of file
diff --git a/megapixels/commands/cv/gen_face_vec.py b/megapixels/commands/cv/face_vector.py
index 83e1460d..203f73eb 100644
--- a/megapixels/commands/cv/gen_face_vec.py
+++ b/megapixels/commands/cv/face_vector.py
@@ -76,15 +76,17 @@ def cli(ctx, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset, opt_size,
facerec = face_recognition.RecognitionDLIB()
# load data
- df_file = pd.read_csv(data_store.metadata(types.Metadata.FILEPATH)).set_index('index')
- df_roi = pd.read_csv(data_store.metadata(types.Metadata.FACE_ROI)).set_index('index')
+ fp_record = data_store.metadata(types.Metadata.FILE_RECORD)
+ df_record = pd.read_csv(fp_record).set_index('index')
+ fp_roi = data_store.metadata(types.Metadata.FACE_ROI)
+ df_roi = pd.read_csv(fp_roi).set_index('index')
if opt_slice:
df_roi = df_roi[opt_slice[0]:opt_slice[1]]
# -------------------------------------------------
# process here
- df_img_groups = df_roi.groupby('image_index')
+ df_img_groups = df_roi.groupby('record_index')
log.debug('processing {:,} groups'.format(len(df_img_groups)))
vecs = []
@@ -92,9 +94,9 @@ def cli(ctx, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset, opt_size,
for image_index, df_img_group in tqdm(df_img_groups):
# make fp
roi_index = df_img_group.index.values[0]
- log.debug(f'roi_index: {roi_index}, image_index: {image_index}')
- ds_file = df_file.loc[roi_index] # locate image meta
- #ds_file = df_file.loc['index', image_index] # locate image meta
+ # log.debug(f'roi_index: {roi_index}, image_index: {image_index}')
+ ds_file = df_record.loc[roi_index] # locate image meta
+ #ds_file = df_record.loc['index', image_index] # locate image meta
fp_im = data_store.face_image(str(ds_file.subdir), str(ds_file.fn), str(ds_file.ext))
im = cv.imread(fp_im)
@@ -119,5 +121,5 @@ def cli(ctx, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset, opt_size,
# save date
df = pd.DataFrame.from_dict(vecs)
df.index.name = 'index'
- #file_utils.mkdirs(fp_out)
- #df.to_csv(fp_out) \ No newline at end of file
+ file_utils.mkdirs(fp_out)
+ df.to_csv(fp_out) \ No newline at end of file
diff --git a/megapixels/commands/datasets/filter_by_pose.py b/megapixels/commands/datasets/filter_by_pose.py
index 6fdbef98..a588b18e 100644
--- a/megapixels/commands/datasets/filter_by_pose.py
+++ b/megapixels/commands/datasets/filter_by_pose.py
@@ -53,17 +53,11 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_data_store, opt_dataset, opt_yaw, opt_ro
fp_roi = data_store.metadata(types.Metadata.FACE_ROI)
df_roi = pd.read_csv(fp_roi).set_index('index')
# load filepath
- fp_filepath = data_store.metadata(types.Metadata.FILEPATH)
- df_filepath = pd.read_csv(fp_filepath).set_index('index')
- # load uuid
- fp_uuid= data_store.metadata(types.Metadata.UUID)
- df_uuid = pd.read_csv(fp_uuid).set_index('index')
- # load sha256 index
- fp_sha256 = data_store.metadata(types.Metadata.SHA256)
- df_sha256 = pd.read_csv(fp_sha256).set_index('index')
+ fp_record = data_store.metadata(types.Metadata.FILE_RECORD)
+ df_record = pd.read_csv(fp_record).set_index('index')
# debug
log.info('Processing {:,} rows'.format(len(df_pose)))
- n_rows = len(df_pose)
+ n_rows = len(df_record)
# filter out extreme poses
invalid_indices = []
@@ -74,28 +68,29 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_data_store, opt_dataset, opt_yaw, opt_ro
invalid_indices.append(ds_pose.Index) # unique file indexs
# filter out valid/invalid
- log.info(invalid_indices[:20])
+ log.info(f'indices 0-20: {invalid_indices[:20]}')
log.info(f'Removing {len(invalid_indices)} invalid indices...')
- df_filepath = df_filepath.drop(df_pose.index[invalid_indices])
- df_sha256 = df_sha256.drop(df_pose.index[invalid_indices])
- df_uuid = df_uuid.drop(df_pose.index[invalid_indices])
- df_roi = df_roi.drop(df_pose.index[invalid_indices])
+ df_record = df_record.drop(df_record.index[invalid_indices])
+ df_roi = df_roi.drop(df_roi.index[invalid_indices])
df_pose = df_pose.drop(df_pose.index[invalid_indices])
- log.info(f'Removed {n_rows - len(df_pose)}')
+ log.info(f'Removed {n_rows - len(df_record)}')
# move file to make backup
dir_bkup = join(Path(fp_pose).parent, f'backup_{datetime.now():%Y%m%d_%M%S}')
file_utils.mkdirs(dir_bkup)
# move files to backup
- shutil.move(fp_filepath, join(dir_bkup, Path(fp_filepath).name))
- shutil.move(fp_sha256, join(dir_bkup, Path(fp_sha256).name))
- shutil.move(fp_uuid, join(dir_bkup, Path(fp_uuid).name))
+ shutil.move(fp_record, join(dir_bkup, Path(fp_record).name))
shutil.move(fp_roi, join(dir_bkup, Path(fp_roi).name))
shutil.move(fp_pose, join(dir_bkup, Path(fp_pose).name))
- # save filtered poses
- df_filepath.to_csv(fp_filepath)
- df_sha256.to_csv(fp_sha256)
- df_uuid.to_csv(fp_uuid)
+ # resave file records
+ df_record = df_record.reset_index(drop=True)
+ df_record.index.name = 'index'
+ df_record.to_csv(fp_record)
+ # resave ROI
+ df_roi = df_roi.reset_index(drop=True)
+ df_roi.index.name = 'index'
df_roi.to_csv(fp_roi)
+ # resave pose
+ df_pose = df_pose.reset_index(drop=True)
+ df_pose.index.name = 'index'
df_pose.to_csv(fp_pose)
-
diff --git a/megapixels/commands/datasets/gen_filepath.py b/megapixels/commands/datasets/gen_filepath.py
index e06fee6b..5db405c0 100644
--- a/megapixels/commands/datasets/gen_filepath.py
+++ b/megapixels/commands/datasets/gen_filepath.py
@@ -50,7 +50,7 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_data_store, opt_dataset, opt_slice,
from tqdm import tqdm
from glob import glob
- from app.models import DataStore
+ from app.models.data_store import DataStore
from app.utils import file_utils, im_utils
data_store = DataStore(opt_data_store, opt_dataset)
@@ -97,6 +97,6 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_data_store, opt_dataset, opt_slice,
file_utils.mkdirs(fp_out)
df_filepath = pd.DataFrame.from_dict(data)
df_filepath = df_filepath.sort_values(by=['subdir'], ascending=True)
- df_filepath = df_filepath.reset_index(drop=True)
+ df_filepath = df_filepath.reset_index()
df_filepath.index.name = 'index'
df_filepath.to_csv(fp_out) \ No newline at end of file
diff --git a/megapixels/commands/datasets/gen_sha256.py b/megapixels/commands/datasets/gen_sha256.py
deleted file mode 100644
index 1616eebf..00000000
--- a/megapixels/commands/datasets/gen_sha256.py
+++ /dev/null
@@ -1,152 +0,0 @@
-'''
-
-'''
-import click
-
-from app.settings import types
-from app.utils import click_utils
-from app.settings import app_cfg as cfg
-from app.utils.logger_utils import Logger
-
-log = Logger.getLogger()
-
-identity_sources = ['subdir', 'subdir_head', 'subdir_tail']
-
-@click.command()
-@click.option('-i', '--input', 'opt_fp_in', default=None,
- help='Override enum input filename CSV')
-@click.option('-o', '--output', 'opt_fp_out', default=None,
- help='Override enum output filename CSV')
-@click.option('-m', '--media', 'opt_dir_media', default=None,
- help='Override enum media directory')
-@click.option('--data_store', 'opt_data_store',
- type=cfg.DataStoreVar,
- default=click_utils.get_default(types.DataStore.NAS),
- show_default=True,
- help=click_utils.show_help(types.Dataset))
-@click.option('--dataset', 'opt_dataset',
- type=cfg.DatasetVar,
- required=True,
- show_default=True,
- help=click_utils.show_help(types.Dataset))
-@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None),
- help='Slice list of files')
-@click.option('-t', '--threads', 'opt_threads', default=12,
- help='Number of threads')
-@click.option('-f', '--force', 'opt_force', is_flag=True,
- help='Force overwrite file')
-@click.option('--identity', 'opt_identity', default='subdir_tail', type=click.Choice(identity_sources),
- help='Identity source, blank for no identity')
-@click.pass_context
-def cli(ctx, opt_fp_in, opt_fp_out, opt_dataset, opt_data_store, opt_dir_media, opt_slice, opt_threads,
- opt_identity, opt_force):
- """Generates sha256/identity index CSV file"""
-
- import sys
- from glob import glob
- from os.path import join
- from pathlib import Path
- import time
- from multiprocessing.dummy import Pool as ThreadPool
- import random
-
- import pandas as pd
- from tqdm import tqdm
- from glob import glob
-
- from app.models import DataStore
- from app.utils import file_utils, im_utils
-
-
- # set data_store
- data_store = DataStore(opt_data_store, opt_dataset)
- # get filepath out
- fp_out = data_store.metadata(types.Metadata.SHA256) if opt_fp_out is None else opt_fp_out
- # exit if exists
- if not opt_force and Path(fp_out).exists():
- log.error('File exists. Use "-f / --force" to overwite')
- return
- # get filepath in
- fp_in = data_store.metadata(types.Metadata.FILEPATH)
- df_files = pd.read_csv(fp_in).set_index('index')
- # slice if you want
- if opt_slice:
- df_files = df_files[opt_slice[0]:opt_slice[1]]
-
- log.info('Processing {:,} images'.format(len(df_files)))
-
-
- # prepare list of images to multithread into sha256s
- dir_media = data_store.media_images_original() if opt_dir_media is None else opt_dir_media
- file_objs = []
- for ds_file in df_files.itertuples():
- fp_im = join(dir_media, str(ds_file.subdir), f"{ds_file.fn}.{ds_file.ext}")
- # find the image_index
- # append the subdir option, sort by this then increment by unique subdir
- file_obj = {'fp': fp_im, 'index': ds_file.Index}
- if opt_identity:
- subdirs = ds_file.subdir.split('/')
- if not len(subdirs) > 0:
- log.error(f'Could not split subdir: "{ds_file.subdir}. Try different option for "--identity"')
- log.error('exiting')
- return
- if opt_identity == 'subdir':
- subdir = subdirs[0]
- elif opt_identity == 'subdir_head':
- # use first part of subdir path
- subdir = subdirs[0]
- elif opt_identity == 'subdir_tail':
- # use last part of subdir path
- subdir = subdirs[-1]
- file_obj['identity_subdir'] = subdir
- file_objs.append(file_obj)
-
- # convert to thread pool
- pbar = tqdm(total=len(file_objs))
-
- def as_sha256(file_obj):
- pbar.update(1)
- file_obj['sha256'] = file_utils.sha256(file_obj['fp'])
- return file_obj
-
- # multithread pool
- pool_file_objs = []
- st = time.time()
- pool = ThreadPool(opt_threads)
- with tqdm(total=len(file_objs)) as pbar:
- pool_file_objs = pool.map(as_sha256, file_objs)
- pbar.close()
-
- # convert data to dict
- data = []
- for pool_file_obj in pool_file_objs:
- data.append( {
- 'sha256': pool_file_obj['sha256'],
- 'index': pool_file_obj['index'],
- 'identity_subdir': pool_file_obj.get('identity_subdir', ''),
- })
-
- # sort based on identity_subdir
- # save to CSV
- df_sha256 = pd.DataFrame.from_dict(data)
- # add new column for identity
- df_sha256['identity_index'] = [1] * len(df_sha256)
- df_sha256 = df_sha256.sort_values(by=['identity_subdir'], ascending=True)
- df_sha256_identity_groups = df_sha256.groupby('identity_subdir')
- for identity_index, df_sha256_identity_group_tuple in enumerate(df_sha256_identity_groups):
- identity_subdir, df_sha256_identity_group = df_sha256_identity_group_tuple
- for ds_sha256 in df_sha256_identity_group.itertuples():
- df_sha256.at[ds_sha256.Index, 'identity_index'] = identity_index
- # drop temp identity subdir column
- df_sha256 = df_sha256.drop('identity_subdir', axis=1)
- # write to CSV
- log.info(f'rows: {len(df_sha256)}')
- file_utils.mkdirs(fp_out)
- df_sha256.set_index('index')
- df_sha256 = df_sha256.sort_values(['index'], ascending=[True])
- df_sha256.to_csv(fp_out, index=False)
-
- # timing
- log.info(f'wrote file: {fp_out}')
- log.info('time: {:.2f}, theads: {}'.format(time.time() - st, opt_threads))
- \ No newline at end of file
diff --git a/megapixels/commands/datasets/gen_uuid.py b/megapixels/commands/datasets/gen_uuid.py
index 612c43ee..d7e7b52c 100644
--- a/megapixels/commands/datasets/gen_uuid.py
+++ b/megapixels/commands/datasets/gen_uuid.py
@@ -37,7 +37,7 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_data_store, opt_dataset, opt_force):
from tqdm import tqdm
import pandas as pd
- from app.models import DataStore
+ from app.models.data_store import DataStore
# set data_store
diff --git a/megapixels/commands/datasets/identity_meta_lfw.py b/megapixels/commands/datasets/identity_meta_lfw.py
new file mode 100644
index 00000000..45386b23
--- /dev/null
+++ b/megapixels/commands/datasets/identity_meta_lfw.py
@@ -0,0 +1,93 @@
+'''
+add identity from description using subdir
+'''
+import click
+
+from app.settings import types
+from app.models.dataset import Dataset
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+from app.utils.logger_utils import Logger
+
+log = Logger.getLogger()
+
+@click.command()
+@click.option('-i', '--input', 'opt_fp_in', required=True,
+ help='Identity meta file')
+@click.option('-o', '--output', 'opt_fp_out', default=None,
+ help='Override enum output filename CSV')
+@click.option('--column', 'opt_identity_key', default='identity_key',
+ help='Match column')
+@click.option('--data_store', 'opt_data_store',
+ type=cfg.DataStoreVar,
+ default=click_utils.get_default(types.DataStore.SSD),
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('-f', '--force', 'opt_force', is_flag=True,
+ help='Force overwrite file')
+@click.pass_context
+def cli(ctx, opt_fp_in, opt_fp_out, opt_identity_key, opt_data_store, opt_force):
+ """Display image info"""
+
+ import sys
+ from glob import glob
+ from os.path import join
+ from pathlib import Path
+ import time
+
+ import pandas as pd
+ import cv2 as cv
+ from tqdm import tqdm
+
+ from app.utils import file_utils, im_utils
+ from app.models.data_store import DataStore
+
+ log = Logger.getLogger()
+
+ # output file
+ opt_dataset = types.Dataset.LFW
+ data_store = DataStore(opt_data_store, opt_dataset)
+ fp_out = data_store.metadata(types.Metadata.IDENTITY) if opt_fp_out is None else opt_fp_out
+ # exit if exists
+ log.debug(fp_out)
+ if not opt_force and Path(fp_out).exists():
+ log.error('File exists. Use "-f / --force" to overwite')
+ return
+
+ # init dataset
+ # load file records
+ fp_record = data_store.metadata(types.Metadata.FILE_RECORD)
+ df_record = pd.read_csv(fp_record).set_index('index')
+
+ # load identity meta
+ # this file is maybe prepared in a Jupyter notebook
+ # the "identity_key"
+ df_identity_meta = pd.read_csv(opt_fp_in).set_index('index')
+ # create a new file called 'identity.csv'
+ identities = []
+ # iterate records and get identity index where 'identity_key' matches
+ log.debug(type(df_record))
+ identity_indices = []
+ for record_idx, ds_record in tqdm(df_record.iterrows(), total=len(df_record)):
+ identity_value = ds_record[opt_identity_key]
+ identity_index = ds_record.identity_index
+ ds_identity_meta = df_identity_meta.loc[(df_identity_meta[opt_identity_key] == identity_value)]
+ if identity_index not in identity_indices:
+ identity_indices.append(identity_index)
+ identities.append({
+ 'description': ds_identity_meta.description.values[0],
+ 'name': ds_identity_meta.name.values[0],
+ 'images': ds_identity_meta.images.values[0],
+ 'gender': ds_identity_meta.gender.values[0],
+ })
+
+ # write to csv
+ df_identity = pd.DataFrame.from_dict(identities)
+ df_identity.index.name = 'index'
+ df_identity.to_csv(fp_out)
+ '''
+ index,name,name_orig,description,gender,images,image_index,identity_key
+ 0,A. J. Cook,AJ Cook,Canadian actress,f,1,0,AJ_Cook
+ '''
+
+
diff --git a/megapixels/commands/datasets/identity_meta_vgg_face2.py b/megapixels/commands/datasets/identity_meta_vgg_face2.py
new file mode 100644
index 00000000..85b6644d
--- /dev/null
+++ b/megapixels/commands/datasets/identity_meta_vgg_face2.py
@@ -0,0 +1,88 @@
+'''
+add identity from description using subdir
+'''
+import click
+
+from app.settings import types
+from app.models.dataset import Dataset
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+from app.utils.logger_utils import Logger
+
+log = Logger.getLogger()
+
+@click.command()
+@click.option('-i', '--input', 'opt_fp_in', required=True,
+ help='Identity meta file')
+@click.option('-o', '--output', 'opt_fp_out', default=None,
+ help='Override enum output filename CSV')
+@click.option('--data_store', 'opt_data_store',
+ type=cfg.DataStoreVar,
+ default=click_utils.get_default(types.DataStore.SSD),
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('-f', '--force', 'opt_force', is_flag=True,
+ help='Force overwrite file')
+@click.pass_context
+def cli(ctx, opt_fp_in, opt_fp_out, opt_data_store, opt_force):
+ """Display image info"""
+
+ import sys
+ from glob import glob
+ from os.path import join
+ from pathlib import Path
+ import time
+
+ import pandas as pd
+ import cv2 as cv
+ from tqdm import tqdm
+
+ from app.utils import file_utils, im_utils
+ from app.models.data_store import DataStore
+
+ log = Logger.getLogger()
+
+ # output file
+ opt_dataset = types.Dataset.VGG_FACE2
+ data_store = DataStore(opt_data_store, opt_dataset)
+ fp_out = data_store.metadata(types.Metadata.IDENTITY) if opt_fp_out is None else opt_fp_out
+ # exit if exists
+ log.debug(fp_out)
+ if not opt_force and Path(fp_out).exists():
+ log.error('File exists. Use "-f / --force" to overwite')
+ return
+
+ # init dataset
+ # load file records
+ identity_key = 'identity_key'
+ fp_record = data_store.metadata(types.Metadata.FILE_RECORD)
+ df_record = pd.read_csv(fp_record).set_index('index')
+
+ # load identity meta
+ # this file is maybe prepared in a Jupyter notebook
+ # the "identity_key"
+ df_identity_meta = pd.read_csv(opt_fp_in).set_index('index')
+ # create a new file called 'identity.csv'
+ identities = []
+ # iterate records and get identity index where 'identity_key' matches
+ log.debug(type(df_record))
+ identity_indices = []
+ for ds_record in tqdm(df_record.itertuples(), total=len(df_record)):
+ identity_value = ds_record.identity_key
+ identity_index = ds_record.identity_index
+ ds_identity_meta = df_identity_meta.loc[(df_identity_meta[identity_key] == identity_value)]
+ if identity_index not in identity_indices:
+ identity_indices.append(identity_index)
+ identities.append({
+ 'description': ds_identity_meta.description.values[0],
+ 'name': ds_identity_meta.name.values[0],
+ 'images': ds_identity_meta.images.values[0],
+ 'gender': ds_identity_meta.gender.values[0],
+ })
+
+ # write to csv
+ df_identity = pd.DataFrame.from_dict(identities)
+ df_identity.index.name = 'index'
+ df_identity.to_csv(fp_out)
+
+
diff --git a/megapixels/commands/datasets/lookup.py b/megapixels/commands/datasets/lookup.py
index 5a2a171e..c1c66c19 100644
--- a/megapixels/commands/datasets/lookup.py
+++ b/megapixels/commands/datasets/lookup.py
@@ -13,7 +13,7 @@ log = Logger.getLogger()
help='Vector index to lookup')
@click.option('--data_store', 'opt_data_store',
type=cfg.DataStoreVar,
- default=click_utils.get_default(types.DataStore.NAS),
+ default=click_utils.get_default(types.DataStore.SSD),
show_default=True,
help=click_utils.show_help(types.Dataset))
@click.option('--dataset', 'opt_dataset',
@@ -41,11 +41,12 @@ def cli(ctx, opt_index, opt_data_store, opt_dataset):
log = Logger.getLogger()
# init dataset
dataset = Dataset(opt_data_store, opt_dataset)
+ #dataset.load_face_vectors()
+ dataset.load_records()
+ dataset.load_identities()
# set data store and load files
- dataset.load()
# find image records
- image_record = dataset.roi_idx_to_record(opt_index)
- # debug
+ image_record = dataset.index_to_record(opt_index)
image_record.summarize()
# load image
im = cv.imread(image_record.filepath)
diff --git a/megapixels/commands/datasets/records.py b/megapixels/commands/datasets/records.py
new file mode 100644
index 00000000..80de5040
--- /dev/null
+++ b/megapixels/commands/datasets/records.py
@@ -0,0 +1,159 @@
+'''
+
+'''
+import click
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+from app.utils.logger_utils import Logger
+
+log = Logger.getLogger()
+
+identity_sources = ['subdir', 'subdir_head', 'subdir_tail']
+
+@click.command()
+@click.option('-i', '--input', 'opt_fp_in', default=None,
+ help='Override enum input filename CSV')
+@click.option('-o', '--output', 'opt_fp_out', default=None,
+ help='Override enum output filename CSV')
+@click.option('-m', '--media', 'opt_dir_media', default=None,
+ help='Override enum media directory')
+@click.option('--data_store', 'opt_data_store',
+ type=cfg.DataStoreVar,
+ default=click_utils.get_default(types.DataStore.SSD),
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--dataset', 'opt_dataset',
+ type=cfg.DatasetVar,
+ required=True,
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None),
+ help='Slice list of files')
+@click.option('-t', '--threads', 'opt_threads', default=12,
+ help='Number of threads')
+@click.option('-f', '--force', 'opt_force', is_flag=True,
+ help='Force overwrite file')
+@click.option('--identity', 'opt_identity', default=None, type=click.Choice(identity_sources),
+ help='Identity source, blank for no identity')
+@click.option('--recursive/--no-recursive', 'opt_recursive', is_flag=True, default=False,
+ help='Use glob recursion (slower)')
+@click.pass_context
+def cli(ctx, opt_fp_in, opt_fp_out, opt_dataset, opt_data_store, opt_dir_media, opt_slice, opt_threads,
+ opt_identity, opt_force, opt_recursive):
+ """Generates sha256, uuid, and identity index CSV file"""
+
+ import sys
+ from glob import glob
+ from os.path import join
+ from pathlib import Path
+ import time
+ from multiprocessing.dummy import Pool as ThreadPool
+ import random
+ import uuid
+
+ import pandas as pd
+ from tqdm import tqdm
+ from glob import glob
+
+ from app.models.data_store import DataStore
+ from app.utils import file_utils, im_utils
+
+
+ # set data_store
+ data_store = DataStore(opt_data_store, opt_dataset)
+ # get filepath out
+ fp_out = data_store.metadata(types.Metadata.FILE_RECORD) if opt_fp_out is None else opt_fp_out
+ # exit if exists
+ if not opt_force and Path(fp_out).exists():
+ log.error('File exists. Use "-f / --force" to overwite')
+ return
+
+ # ----------------------------------------------------------------
+ # glob files
+
+ fp_in = opt_fp_in if opt_fp_in is not None else data_store.media_images_original()
+ log.info(f'Globbing {fp_in}')
+ fp_ims = file_utils.glob_multi(fp_in, ['jpg', 'png'], recursive=opt_recursive)
+ # fail if none
+ if not fp_ims:
+ log.error('No images. Try with "--recursive"')
+ return
+ # slice to reduce
+ if opt_slice:
+ fp_ims = fp_ims[opt_slice[0]:opt_slice[1]]
+ log.info('Found {:,} images'.format(len(fp_ims)))
+
+
+ # ----------------------------------------------------------------
+ # multithread process into SHA256
+
+ pbar = tqdm(total=len(fp_ims))
+
+ def as_sha256(fp_im):
+ pbar.update(1)
+ return file_utils.sha256(fp_im)
+
+ # convert to thread pool
+ sha256s = [] # ?
+ pool = ThreadPool(opt_threads)
+ with tqdm(total=len(fp_ims)) as pbar:
+ sha256s = pool.map(as_sha256, fp_ims)
+ pbar.close()
+
+
+ # ----------------------------------------------------------------
+ # convert data to dict
+
+ data = []
+ for sha256, fp_im in zip(sha256s, fp_ims):
+ fpp_im = Path(fp_im)
+ subdir = str(fpp_im.parent.relative_to(fp_in))
+
+ if opt_identity:
+ subdirs = subdir.split('/')
+ if not len(subdirs) > 0:
+ log.error(f'Could not split subdir: "{subdir}. Try different option for "--identity"')
+ log.error('exiting')
+ return
+ if opt_identity == 'subdir':
+ identity = subdirs[0] # use first/only part
+ elif opt_identity == 'subdir_head':
+ identity = subdirs[0] # use first part of subdir path
+ elif opt_identity == 'subdir_tail':
+ identity = subdirs[-1] # use last part of subdir path
+ else:
+ identity = ''
+
+ data.append({
+ 'subdir': subdir,
+ 'fn': fpp_im.stem,
+ 'ext': fpp_im.suffix.replace('.',''),
+ 'sha256': sha256,
+ 'uuid': uuid.uuid4(),
+ 'identity_key': identity
+ })
+
+ log.info(f'adding identity index using: "{opt_identity}". This may take a while...')
+ # convert dict to DataFrame
+ df_records = pd.DataFrame.from_dict(data)
+ # sort based on identity_key
+ df_records = df_records.sort_values(by=['identity_key'], ascending=True)
+ # add new column for identity
+ df_records['identity_index'] = [-1] * len(df_records)
+ # populate the identity_index
+ df_records_identity_groups = df_records.groupby('identity_key')
+ # enumerate groups to create identity indices
+ for identity_index, df_records_identity_group_tuple in enumerate(df_records_identity_groups):
+ identity_key, df_records_identity_group = df_records_identity_group_tuple
+ for ds_record in df_records_identity_group.itertuples():
+ df_records.at[ds_record.Index, 'identity_index'] = identity_index
+ # reset index after being sorted
+ df_records = df_records.reset_index(drop=True)
+ df_records.index.name = 'index' # reassign 'index' as primary key column
+ # write to CSV
+ file_utils.mkdirs(fp_out)
+ df_records.to_csv(fp_out)
+ # done
+ log.info(f'wrote rows: {len(df_records)} to {fp_out}') \ No newline at end of file
diff --git a/megapixels/commands/datasets/s3.py b/megapixels/commands/datasets/s3.py
deleted file mode 100644
index 7769896b..00000000
--- a/megapixels/commands/datasets/s3.py
+++ /dev/null
@@ -1,47 +0,0 @@
-import click
-
-from app.settings import types
-from app.utils import click_utils
-from app.settings import app_cfg as cfg
-
-s3_dirs = {'media': cfg.S3_MEDIA_ROOT, 'metadata': cfg.S3_METADATA_ROOT}
-
-@click.command()
-@click.option('-i', '--input', 'opt_fps_in', required=True, multiple=True,
- help='Input directory')
-@click.option('--name', 'opt_dataset_name', required=True,
- help='Dataset key (eg "lfw"')
-@click.option('-a', '--action', 'opt_action', type=click.Choice(['sync', 'put']), default='sync',
- help='S3 action')
-@click.option('-t', '--type', 'opt_type', type=click.Choice(s3_dirs.keys()), required=True,
- help='S3 location')
-@click.option('--dry-run', 'opt_dryrun', is_flag=True, default=False)
-@click.pass_context
-def cli(ctx, opt_fps_in, opt_dataset_name, opt_action, opt_type, opt_dryrun):
- """Syncs files with S3/spaces server"""
-
- from os.path import join
- from pathlib import Path
-
- from tqdm import tqdm
- import pandas as pd
- import subprocess
-
- from app.utils import logger_utils, file_utils
-
- # -------------------------------------------------
- # init here
-
- log = logger_utils.Logger.getLogger()
- for opt_fp_in in opt_fps_in:
- dir_dst = join(s3_dirs[opt_type], opt_dataset_name, '')
- if Path(opt_fp_in).is_dir():
- fp_src = join(opt_fp_in, '') # add trailing slashes
- else:
- fp_src = join(opt_fp_in)
- cmd = ['s3cmd', opt_action, fp_src, dir_dst, '-P', '--follow-symlinks']
- log.info(' '.join(cmd))
- if not opt_dryrun:
- subprocess.call(cmd)
-
- \ No newline at end of file
diff --git a/megapixels/commands/datasets/s3_sync.py b/megapixels/commands/datasets/s3_sync.py
new file mode 100644
index 00000000..3098d9be
--- /dev/null
+++ b/megapixels/commands/datasets/s3_sync.py
@@ -0,0 +1,57 @@
+import click
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+
+s3_dirs = {'media': cfg.S3_MEDIA_URL, 'metadata': cfg.S3_METADATA_URL}
+
+@click.command()
+@click.option('--data_store', 'opt_data_store',
+ type=cfg.DataStoreVar,
+ default=click_utils.get_default(types.DataStore.SSD),
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--dataset', 'opt_dataset',
+ type=cfg.DatasetVar,
+ required=True,
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('-t', '--type', 'opt_type', type=click.Choice(s3_dirs.keys()), required=True,
+ help='S3 location')
+@click.option('--dry-run', 'opt_dryrun', is_flag=True, default=False)
+@click.pass_context
+def cli(ctx, opt_data_store, opt_dataset, opt_type, opt_dryrun):
+ """Syncs files with S3/spaces server"""
+
+ from os.path import join
+ from pathlib import Path
+
+ from tqdm import tqdm
+ import pandas as pd
+ import subprocess
+
+ from app.utils import logger_utils, file_utils
+ from app.models.data_store import DataStore
+
+ # -------------------------------------------------
+ # init here
+
+ log = logger_utils.Logger.getLogger()
+
+ # set data_store
+ data_store = DataStore(opt_data_store, opt_dataset)
+ dataset_name = opt_dataset.name.lower()
+ if opt_type == 'media':
+ dir_src = join(data_store.uuid_dir(), '')
+ dir_dst = join(s3_dirs[opt_type], dataset_name, '')
+ elif opt_type == 'metadata':
+ dir_src = join(data_store.metadata_dir(), '')
+ dir_dst = join(s3_dirs[opt_type], dataset_name, '')
+
+ cmd = ['s3cmd', 'sync', dir_src, dir_dst, '-P', '--follow-symlinks']
+ log.info(' '.join(cmd))
+ if not opt_dryrun:
+ subprocess.call(cmd)
+
+ \ No newline at end of file
diff --git a/megapixels/commands/datasets/symlink.py b/megapixels/commands/datasets/symlink.py
deleted file mode 100644
index 70ec6c46..00000000
--- a/megapixels/commands/datasets/symlink.py
+++ /dev/null
@@ -1,45 +0,0 @@
-import click
-
-from app.settings import types
-from app.utils import click_utils
-from app.settings import app_cfg as cfg
-
-@click.command()
-@click.option('-i', '--input', 'opt_fp_in', required=True,
- help='Input records CSV')
-@click.option('-m', '--media', 'opt_fp_media', required=True,
- help='Input media directory')
-@click.option('-o', '--output', 'opt_fp_out', required=True,
- help='Output directory')
-@click.pass_context
-def cli(ctx, opt_fp_in, opt_fp_media, opt_fp_out):
- """Symlinks images to new directory for S3"""
-
- import sys
- import os
- from os.path import join
- from pathlib import Path
-
- from tqdm import tqdm
- import pandas as pd
-
- from app.utils import logger_utils, file_utils
-
- # -------------------------------------------------
- # init here
-
- log = logger_utils.Logger.getLogger()
-
- df_records = pd.read_csv(opt_fp_in)
- nrows = len(df_records)
-
- file_utils.mkdirs(opt_fp_out)
-
- for record_id, row in tqdm(df_records.iterrows(), total=nrows):
- # make image path
- df = df_records.iloc[record_id]
- fpp_src = Path(join(opt_fp_media, df['subdir'], '{}.{}'.format(df['fn'], df['ext'])))
- fpp_dst = Path(join(opt_fp_out, '{}.{}'.format(df['uuid'], df['ext'])))
- fpp_dst.symlink_to(fpp_src)
-
- log.info('symlinked {:,} files'.format(nrows)) \ No newline at end of file
diff --git a/megapixels/commands/datasets/symlink_uuid.py b/megapixels/commands/datasets/symlink_uuid.py
new file mode 100644
index 00000000..7c5faa95
--- /dev/null
+++ b/megapixels/commands/datasets/symlink_uuid.py
@@ -0,0 +1,57 @@
+import click
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+
+@click.command()
+@click.option('-i', '--input', 'opt_fp_in', default=None,
+ help='Override enum input filename CSV')
+@click.option('-o', '--output', 'opt_fp_out', default=None,
+ help='Override enum output filename CSV')
+@click.option('--data_store', 'opt_data_store',
+ type=cfg.DataStoreVar,
+ default=click_utils.get_default(types.DataStore.SSD),
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--dataset', 'opt_dataset',
+ type=cfg.DatasetVar,
+ required=True,
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.pass_context
+def cli(ctx, opt_fp_in, opt_fp_out, opt_data_store, opt_dataset):
+ """Symlinks images to new directory for S3"""
+
+ import sys
+ import os
+ from os.path import join
+ from pathlib import Path
+
+ from tqdm import tqdm
+ import pandas as pd
+
+ from app.utils import logger_utils, file_utils
+ from app.models.data_store import DataStore
+
+ # -------------------------------------------------
+ # init here
+
+ log = logger_utils.Logger.getLogger()
+
+ # set data_store
+ data_store = DataStore(opt_data_store, opt_dataset)
+ fp_records = data_store.metadata(types.Metadata.FILE_RECORD)
+ df_records = pd.read_csv(fp_records).set_index('index')
+ nrows = len(df_records)
+
+ dir_out = data_store.uuid_dir() if opt_fp_out is None else opt_fp_out
+ file_utils.mkdirs(dir_out)
+
+ for ds_record in tqdm(df_records.itertuples(), total=nrows):
+ # make image path
+ fp_src = data_store.face(ds_record.subdir, ds_record.fn, ds_record.ext)
+ fp_dst = data_store.face_uuid(ds_record.uuid, ds_record.ext)
+ Path(fp_dst).symlink_to(Path(fp_src))
+
+ log.info('symlinked {:,} files'.format(nrows)) \ No newline at end of file
diff --git a/megapixels/commands/demo/face_search.py b/megapixels/commands/demo/face_search.py
index 08b2323d..0452cc9d 100644
--- a/megapixels/commands/demo/face_search.py
+++ b/megapixels/commands/demo/face_search.py
@@ -45,10 +45,9 @@ def cli(ctx, opt_fp_in, opt_data_store, opt_dataset, opt_gpu):
log = Logger.getLogger()
# init face detection
+ detector = face_detector.DetectorDLIBHOG()
# init face recognition
- detector = face_detector.DetectorDLIBHOG()
- # face recognition/vector
recognition = face_recognition.RecognitionDLIB(gpu=opt_gpu)
# load query image