summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--megapixels/app/settings/types.py2
-rw-r--r--megapixels/commands/cv/face_roi.py4
-rw-r--r--megapixels/commands/cv/face_vector.py2
-rw-r--r--megapixels/commands/datasets/records.py40
-rw-r--r--megapixels/commands/demo/face_search.py4
5 files changed, 31 insertions, 21 deletions
diff --git a/megapixels/app/settings/types.py b/megapixels/app/settings/types.py
index ee6f8de5..0805c5bd 100644
--- a/megapixels/app/settings/types.py
+++ b/megapixels/app/settings/types.py
@@ -49,7 +49,7 @@ class Metadata(Enum):
FACE_LANDMARKS_3D = range(7)
class Dataset(Enum):
- LFW, VGG_FACE2 = range(2)
+ LFW, VGG_FACE2, MSCELEB, UCCS, UMD_FACES = range(5)
# ---------------------------------------------------------------------
diff --git a/megapixels/commands/cv/face_roi.py b/megapixels/commands/cv/face_roi.py
index d7248aee..a08566a8 100644
--- a/megapixels/commands/cv/face_roi.py
+++ b/megapixels/commands/cv/face_roi.py
@@ -115,7 +115,7 @@ def cli(ctx, opt_fp_in, opt_dir_media, opt_fp_out, opt_data_store, opt_dataset,
data = []
for df_record in tqdm(df_records.itertuples(), total=len(df_records)):
- fp_im = data_store.face_image(str(df_record.subdir), str(df_record.fn), str(df_record.ext))
+ fp_im = data_store.face(str(df_record.subdir), str(df_record.fn), str(df_record.ext))
im = cv.imread(fp_im)
# filter out color or grayscale iamges
@@ -149,10 +149,10 @@ def cli(ctx, opt_fp_in, opt_dir_media, opt_fp_out, opt_data_store, opt_dataset,
# debug display
if opt_display and len(bboxes):
- bbox_dim = bbox.to_dim(im.shape[:2][::-1]) # w,h
im_md = im_utils.resize(im, width=min(1200, opt_size[0]))
for bbox in bboxes:
bbox_dim = bbox.to_dim(im_md.shape[:2][::-1])
+ log.debug(f'bbox: {bbox_dim}')
cv.rectangle(im_md, bbox_dim.pt_tl, bbox_dim.pt_br, (0,255,0), 3)
cv.imshow('', im_md)
while True:
diff --git a/megapixels/commands/cv/face_vector.py b/megapixels/commands/cv/face_vector.py
index cd816f9f..7200d73b 100644
--- a/megapixels/commands/cv/face_vector.py
+++ b/megapixels/commands/cv/face_vector.py
@@ -115,7 +115,7 @@ def cli(ctx, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset, opt_size,
# padding=opt_padding not yet implemented in 19.16 but merged in master
vec = facerec.vec(im, bbox_dim, jitters=opt_jitters)
vec_str = ','.join([repr(x) for x in vec]) # convert to string for CSV
- vecs.append( {'roi_index': roi_index, 'image_index': image_index, 'vec': vec_str})
+ vecs.append( {'roi_index': roi_index, 'record_index': image_index, 'vec': vec_str})
# save date
diff --git a/megapixels/commands/datasets/records.py b/megapixels/commands/datasets/records.py
index 80de5040..b6ef618b 100644
--- a/megapixels/commands/datasets/records.py
+++ b/megapixels/commands/datasets/records.py
@@ -107,10 +107,12 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_dataset, opt_data_store, opt_dir_media,
# convert data to dict
data = []
+ indentity_count = 0
for sha256, fp_im in zip(sha256s, fp_ims):
fpp_im = Path(fp_im)
subdir = str(fpp_im.parent.relative_to(fp_in))
+
if opt_identity:
subdirs = subdir.split('/')
if not len(subdirs) > 0:
@@ -124,7 +126,8 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_dataset, opt_data_store, opt_dir_media,
elif opt_identity == 'subdir_tail':
identity = subdirs[-1] # use last part of subdir path
else:
- identity = ''
+ identity = indentity_count # use incrementing number
+ indentity_count += 1
data.append({
'subdir': subdir,
@@ -135,22 +138,27 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_dataset, opt_data_store, opt_dir_media,
'identity_key': identity
})
- log.info(f'adding identity index using: "{opt_identity}". This may take a while...')
- # convert dict to DataFrame
df_records = pd.DataFrame.from_dict(data)
- # sort based on identity_key
- df_records = df_records.sort_values(by=['identity_key'], ascending=True)
- # add new column for identity
- df_records['identity_index'] = [-1] * len(df_records)
- # populate the identity_index
- df_records_identity_groups = df_records.groupby('identity_key')
- # enumerate groups to create identity indices
- for identity_index, df_records_identity_group_tuple in enumerate(df_records_identity_groups):
- identity_key, df_records_identity_group = df_records_identity_group_tuple
- for ds_record in df_records_identity_group.itertuples():
- df_records.at[ds_record.Index, 'identity_index'] = identity_index
- # reset index after being sorted
- df_records = df_records.reset_index(drop=True)
+ if opt_identity:
+ log.info(f'adding identity index using: "{opt_identity}". This may take a while...')
+ # convert dict to DataFrame
+ # sort based on identity_key
+ df_records = df_records.sort_values(by=['identity_key'], ascending=True)
+ # add new column for identity
+ df_records['identity_index'] = [-1] * len(df_records)
+ # populate the identity_index
+ df_records_identity_groups = df_records.groupby('identity_key')
+ # enumerate groups to create identity indices
+ for identity_index, df_records_identity_group_tuple in enumerate(df_records_identity_groups):
+ identity_key, df_records_identity_group = df_records_identity_group_tuple
+ for ds_record in df_records_identity_group.itertuples():
+ df_records.at[ds_record.Index, 'identity_index'] = identity_index
+ # reset index after being sorted
+ df_records = df_records.reset_index(drop=True)
+ else:
+ # name everyone person 1, 2, 3...
+ pass
+
df_records.index.name = 'index' # reassign 'index' as primary key column
# write to CSV
file_utils.mkdirs(fp_out)
diff --git a/megapixels/commands/demo/face_search.py b/megapixels/commands/demo/face_search.py
index 34a25762..6e4bcdad 100644
--- a/megapixels/commands/demo/face_search.py
+++ b/megapixels/commands/demo/face_search.py
@@ -21,6 +21,8 @@ log = Logger.getLogger()
required=True,
show_default=True,
help=click_utils.show_help(types.Dataset))
+@click.option('--results', 'opt_results', default=5,
+ help='Number of match results to display')
@click.option('--gpu', 'opt_gpu', default=0,
help='GPU index (use -1 for CPU')
@click.pass_context
@@ -73,7 +75,7 @@ def cli(ctx, opt_fp_in, opt_data_store, opt_dataset, opt_gpu):
vec_query = recognition.vec(im_query, bbox)
# find matches
- image_records = dataset.find_matches(vec_query, n_results=5)
+ image_records = dataset.find_matches(vec_query, n_results=opt_results)
# summary
ims_match = [im_query]