From 88ec48e1c4d93ba9cd3aa186c068ef2aa4c27c56 Mon Sep 17 00:00:00 2001 From: adamhrv Date: Mon, 17 Dec 2018 01:37:31 +0100 Subject: fixing dataset procesosrs --- megapixels/commands/datasets/records.py | 159 ++++++++++++++++++++++++++++++++ 1 file changed, 159 insertions(+) create mode 100644 megapixels/commands/datasets/records.py (limited to 'megapixels/commands/datasets/records.py') diff --git a/megapixels/commands/datasets/records.py b/megapixels/commands/datasets/records.py new file mode 100644 index 00000000..80de5040 --- /dev/null +++ b/megapixels/commands/datasets/records.py @@ -0,0 +1,159 @@ +''' + +''' +import click + +from app.settings import types +from app.utils import click_utils +from app.settings import app_cfg as cfg +from app.utils.logger_utils import Logger + +log = Logger.getLogger() + +identity_sources = ['subdir', 'subdir_head', 'subdir_tail'] + +@click.command() +@click.option('-i', '--input', 'opt_fp_in', default=None, + help='Override enum input filename CSV') +@click.option('-o', '--output', 'opt_fp_out', default=None, + help='Override enum output filename CSV') +@click.option('-m', '--media', 'opt_dir_media', default=None, + help='Override enum media directory') +@click.option('--data_store', 'opt_data_store', + type=cfg.DataStoreVar, + default=click_utils.get_default(types.DataStore.SSD), + show_default=True, + help=click_utils.show_help(types.Dataset)) +@click.option('--dataset', 'opt_dataset', + type=cfg.DatasetVar, + required=True, + show_default=True, + help=click_utils.show_help(types.Dataset)) +@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None), + help='Slice list of files') +@click.option('-t', '--threads', 'opt_threads', default=12, + help='Number of threads') +@click.option('-f', '--force', 'opt_force', is_flag=True, + help='Force overwrite file') +@click.option('--identity', 'opt_identity', default=None, type=click.Choice(identity_sources), + help='Identity source, blank for no identity') +@click.option('--recursive/--no-recursive', 'opt_recursive', is_flag=True, default=False, + help='Use glob recursion (slower)') +@click.pass_context +def cli(ctx, opt_fp_in, opt_fp_out, opt_dataset, opt_data_store, opt_dir_media, opt_slice, opt_threads, + opt_identity, opt_force, opt_recursive): + """Generates sha256, uuid, and identity index CSV file""" + + import sys + from glob import glob + from os.path import join + from pathlib import Path + import time + from multiprocessing.dummy import Pool as ThreadPool + import random + import uuid + + import pandas as pd + from tqdm import tqdm + from glob import glob + + from app.models.data_store import DataStore + from app.utils import file_utils, im_utils + + + # set data_store + data_store = DataStore(opt_data_store, opt_dataset) + # get filepath out + fp_out = data_store.metadata(types.Metadata.FILE_RECORD) if opt_fp_out is None else opt_fp_out + # exit if exists + if not opt_force and Path(fp_out).exists(): + log.error('File exists. Use "-f / --force" to overwite') + return + + # ---------------------------------------------------------------- + # glob files + + fp_in = opt_fp_in if opt_fp_in is not None else data_store.media_images_original() + log.info(f'Globbing {fp_in}') + fp_ims = file_utils.glob_multi(fp_in, ['jpg', 'png'], recursive=opt_recursive) + # fail if none + if not fp_ims: + log.error('No images. Try with "--recursive"') + return + # slice to reduce + if opt_slice: + fp_ims = fp_ims[opt_slice[0]:opt_slice[1]] + log.info('Found {:,} images'.format(len(fp_ims))) + + + # ---------------------------------------------------------------- + # multithread process into SHA256 + + pbar = tqdm(total=len(fp_ims)) + + def as_sha256(fp_im): + pbar.update(1) + return file_utils.sha256(fp_im) + + # convert to thread pool + sha256s = [] # ? + pool = ThreadPool(opt_threads) + with tqdm(total=len(fp_ims)) as pbar: + sha256s = pool.map(as_sha256, fp_ims) + pbar.close() + + + # ---------------------------------------------------------------- + # convert data to dict + + data = [] + for sha256, fp_im in zip(sha256s, fp_ims): + fpp_im = Path(fp_im) + subdir = str(fpp_im.parent.relative_to(fp_in)) + + if opt_identity: + subdirs = subdir.split('/') + if not len(subdirs) > 0: + log.error(f'Could not split subdir: "{subdir}. Try different option for "--identity"') + log.error('exiting') + return + if opt_identity == 'subdir': + identity = subdirs[0] # use first/only part + elif opt_identity == 'subdir_head': + identity = subdirs[0] # use first part of subdir path + elif opt_identity == 'subdir_tail': + identity = subdirs[-1] # use last part of subdir path + else: + identity = '' + + data.append({ + 'subdir': subdir, + 'fn': fpp_im.stem, + 'ext': fpp_im.suffix.replace('.',''), + 'sha256': sha256, + 'uuid': uuid.uuid4(), + 'identity_key': identity + }) + + log.info(f'adding identity index using: "{opt_identity}". This may take a while...') + # convert dict to DataFrame + df_records = pd.DataFrame.from_dict(data) + # sort based on identity_key + df_records = df_records.sort_values(by=['identity_key'], ascending=True) + # add new column for identity + df_records['identity_index'] = [-1] * len(df_records) + # populate the identity_index + df_records_identity_groups = df_records.groupby('identity_key') + # enumerate groups to create identity indices + for identity_index, df_records_identity_group_tuple in enumerate(df_records_identity_groups): + identity_key, df_records_identity_group = df_records_identity_group_tuple + for ds_record in df_records_identity_group.itertuples(): + df_records.at[ds_record.Index, 'identity_index'] = identity_index + # reset index after being sorted + df_records = df_records.reset_index(drop=True) + df_records.index.name = 'index' # reassign 'index' as primary key column + # write to CSV + file_utils.mkdirs(fp_out) + df_records.to_csv(fp_out) + # done + log.info(f'wrote rows: {len(df_records)} to {fp_out}') \ No newline at end of file -- cgit v1.2.3-70-g09d2 From 5340bee951c18910fd764241945f1f136b5a22b4 Mon Sep 17 00:00:00 2001 From: adamhrv Date: Sun, 23 Dec 2018 01:24:24 +0100 Subject: fixing face roi --- megapixels/app/settings/types.py | 2 +- megapixels/commands/cv/face_roi.py | 4 ++-- megapixels/commands/cv/face_vector.py | 2 +- megapixels/commands/datasets/records.py | 40 ++++++++++++++++++++------------- megapixels/commands/demo/face_search.py | 4 +++- 5 files changed, 31 insertions(+), 21 deletions(-) (limited to 'megapixels/commands/datasets/records.py') diff --git a/megapixels/app/settings/types.py b/megapixels/app/settings/types.py index ee6f8de5..0805c5bd 100644 --- a/megapixels/app/settings/types.py +++ b/megapixels/app/settings/types.py @@ -49,7 +49,7 @@ class Metadata(Enum): FACE_LANDMARKS_3D = range(7) class Dataset(Enum): - LFW, VGG_FACE2 = range(2) + LFW, VGG_FACE2, MSCELEB, UCCS, UMD_FACES = range(5) # --------------------------------------------------------------------- diff --git a/megapixels/commands/cv/face_roi.py b/megapixels/commands/cv/face_roi.py index d7248aee..a08566a8 100644 --- a/megapixels/commands/cv/face_roi.py +++ b/megapixels/commands/cv/face_roi.py @@ -115,7 +115,7 @@ def cli(ctx, opt_fp_in, opt_dir_media, opt_fp_out, opt_data_store, opt_dataset, data = [] for df_record in tqdm(df_records.itertuples(), total=len(df_records)): - fp_im = data_store.face_image(str(df_record.subdir), str(df_record.fn), str(df_record.ext)) + fp_im = data_store.face(str(df_record.subdir), str(df_record.fn), str(df_record.ext)) im = cv.imread(fp_im) # filter out color or grayscale iamges @@ -149,10 +149,10 @@ def cli(ctx, opt_fp_in, opt_dir_media, opt_fp_out, opt_data_store, opt_dataset, # debug display if opt_display and len(bboxes): - bbox_dim = bbox.to_dim(im.shape[:2][::-1]) # w,h im_md = im_utils.resize(im, width=min(1200, opt_size[0])) for bbox in bboxes: bbox_dim = bbox.to_dim(im_md.shape[:2][::-1]) + log.debug(f'bbox: {bbox_dim}') cv.rectangle(im_md, bbox_dim.pt_tl, bbox_dim.pt_br, (0,255,0), 3) cv.imshow('', im_md) while True: diff --git a/megapixels/commands/cv/face_vector.py b/megapixels/commands/cv/face_vector.py index cd816f9f..7200d73b 100644 --- a/megapixels/commands/cv/face_vector.py +++ b/megapixels/commands/cv/face_vector.py @@ -115,7 +115,7 @@ def cli(ctx, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset, opt_size, # padding=opt_padding not yet implemented in 19.16 but merged in master vec = facerec.vec(im, bbox_dim, jitters=opt_jitters) vec_str = ','.join([repr(x) for x in vec]) # convert to string for CSV - vecs.append( {'roi_index': roi_index, 'image_index': image_index, 'vec': vec_str}) + vecs.append( {'roi_index': roi_index, 'record_index': image_index, 'vec': vec_str}) # save date diff --git a/megapixels/commands/datasets/records.py b/megapixels/commands/datasets/records.py index 80de5040..b6ef618b 100644 --- a/megapixels/commands/datasets/records.py +++ b/megapixels/commands/datasets/records.py @@ -107,10 +107,12 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_dataset, opt_data_store, opt_dir_media, # convert data to dict data = [] + indentity_count = 0 for sha256, fp_im in zip(sha256s, fp_ims): fpp_im = Path(fp_im) subdir = str(fpp_im.parent.relative_to(fp_in)) + if opt_identity: subdirs = subdir.split('/') if not len(subdirs) > 0: @@ -124,7 +126,8 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_dataset, opt_data_store, opt_dir_media, elif opt_identity == 'subdir_tail': identity = subdirs[-1] # use last part of subdir path else: - identity = '' + identity = indentity_count # use incrementing number + indentity_count += 1 data.append({ 'subdir': subdir, @@ -135,22 +138,27 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_dataset, opt_data_store, opt_dir_media, 'identity_key': identity }) - log.info(f'adding identity index using: "{opt_identity}". This may take a while...') - # convert dict to DataFrame df_records = pd.DataFrame.from_dict(data) - # sort based on identity_key - df_records = df_records.sort_values(by=['identity_key'], ascending=True) - # add new column for identity - df_records['identity_index'] = [-1] * len(df_records) - # populate the identity_index - df_records_identity_groups = df_records.groupby('identity_key') - # enumerate groups to create identity indices - for identity_index, df_records_identity_group_tuple in enumerate(df_records_identity_groups): - identity_key, df_records_identity_group = df_records_identity_group_tuple - for ds_record in df_records_identity_group.itertuples(): - df_records.at[ds_record.Index, 'identity_index'] = identity_index - # reset index after being sorted - df_records = df_records.reset_index(drop=True) + if opt_identity: + log.info(f'adding identity index using: "{opt_identity}". This may take a while...') + # convert dict to DataFrame + # sort based on identity_key + df_records = df_records.sort_values(by=['identity_key'], ascending=True) + # add new column for identity + df_records['identity_index'] = [-1] * len(df_records) + # populate the identity_index + df_records_identity_groups = df_records.groupby('identity_key') + # enumerate groups to create identity indices + for identity_index, df_records_identity_group_tuple in enumerate(df_records_identity_groups): + identity_key, df_records_identity_group = df_records_identity_group_tuple + for ds_record in df_records_identity_group.itertuples(): + df_records.at[ds_record.Index, 'identity_index'] = identity_index + # reset index after being sorted + df_records = df_records.reset_index(drop=True) + else: + # name everyone person 1, 2, 3... + pass + df_records.index.name = 'index' # reassign 'index' as primary key column # write to CSV file_utils.mkdirs(fp_out) diff --git a/megapixels/commands/demo/face_search.py b/megapixels/commands/demo/face_search.py index 34a25762..6e4bcdad 100644 --- a/megapixels/commands/demo/face_search.py +++ b/megapixels/commands/demo/face_search.py @@ -21,6 +21,8 @@ log = Logger.getLogger() required=True, show_default=True, help=click_utils.show_help(types.Dataset)) +@click.option('--results', 'opt_results', default=5, + help='Number of match results to display') @click.option('--gpu', 'opt_gpu', default=0, help='GPU index (use -1 for CPU') @click.pass_context @@ -73,7 +75,7 @@ def cli(ctx, opt_fp_in, opt_data_store, opt_dataset, opt_gpu): vec_query = recognition.vec(im_query, bbox) # find matches - image_records = dataset.find_matches(vec_query, n_results=5) + image_records = dataset.find_matches(vec_query, n_results=opt_results) # summary ims_match = [im_query] -- cgit v1.2.3-70-g09d2