summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--megapixels/app/models/data_store.py20
-rw-r--r--megapixels/app/models/dataset.py125
-rw-r--r--megapixels/app/settings/app_cfg.py2
-rw-r--r--megapixels/app/settings/types.py2
-rw-r--r--megapixels/app/utils/file_utils.py12
-rw-r--r--megapixels/commands/cv/cluster.py22
-rw-r--r--megapixels/commands/cv/face_pose.py (renamed from megapixels/commands/cv/gen_pose.py)17
-rw-r--r--megapixels/commands/cv/face_roi.py (renamed from megapixels/commands/cv/gen_rois.py)17
-rw-r--r--megapixels/commands/cv/face_vector.py (renamed from megapixels/commands/cv/gen_face_vec.py)18
-rw-r--r--megapixels/commands/datasets/filter_by_pose.py41
-rw-r--r--megapixels/commands/datasets/gen_filepath.py4
-rw-r--r--megapixels/commands/datasets/gen_sha256.py152
-rw-r--r--megapixels/commands/datasets/gen_uuid.py2
-rw-r--r--megapixels/commands/datasets/identity_meta_lfw.py93
-rw-r--r--megapixels/commands/datasets/identity_meta_vgg_face2.py88
-rw-r--r--megapixels/commands/datasets/lookup.py9
-rw-r--r--megapixels/commands/datasets/records.py159
-rw-r--r--megapixels/commands/datasets/s3.py47
-rw-r--r--megapixels/commands/datasets/s3_sync.py57
-rw-r--r--megapixels/commands/datasets/symlink.py45
-rw-r--r--megapixels/commands/datasets/symlink_uuid.py57
-rw-r--r--megapixels/commands/demo/face_search.py3
-rw-r--r--megapixels/notebooks/_local_scratch.ipynb196
-rw-r--r--megapixels/notebooks/datasets/lfw/lfw_make_identity_csv.ipynb510
-rw-r--r--megapixels/notebooks/examples/face_recognition_demo.ipynb4
25 files changed, 1242 insertions, 460 deletions
diff --git a/megapixels/app/models/data_store.py b/megapixels/app/models/data_store.py
index 8ec1f8ba..244aba60 100644
--- a/megapixels/app/models/data_store.py
+++ b/megapixels/app/models/data_store.py
@@ -21,15 +21,27 @@ class DataStore:
def metadata(self, enum_type):
return join(self.dir_metadata, f'{enum_type.name.lower()}.csv')
+ def metadata(self, enum_type):
+ return join(self.dir_metadata)
+
def media_images_original(self):
return join(self.dir_media, 'original')
- def face_image(self, subdir, fn, ext):
+ def face(self, subdir, fn, ext):
return join(self.dir_media, 'original', subdir, f'{fn}.{ext}')
- def face_image_crop(self, subdir, fn, ext):
+ def face_crop(self, subdir, fn, ext):
return join(self.dir_media, 'cropped', subdir, f'{fn}.{ext}')
+ def face_uuid(self, uuid, ext):
+ return join(self.dir_media, 'uuid',f'{uuid}.{ext}')
+
+ def face_crop_uuid(self, uuid, ext):
+ return join(self.dir_media, 'uuid', f'{uuid}.{ext}')
+
+ def uuid_dir(self):
+ return join(self.dir_media, 'uuid')
+
class DataStoreS3:
# S3 server
@@ -40,11 +52,11 @@ class DataStoreS3:
def metadata(self, opt_metadata_type, ext='csv'):
return join(self._dir_metadata, f'{opt_metadata_type.name.lower()}.{ext}')
- def face_image(self, opt_uuid, ext='jpg'):
+ def face(self, opt_uuid, ext='jpg'):
#return join(self._dir_media, 'original', f'{opt_uuid}.{ext}')
return join(self._dir_media, f'{opt_uuid}.{ext}')
- def face_image_crop(self, opt_uuid, ext='jpg'):
+ def face_crop(self, opt_uuid, ext='jpg'):
# not currently using?
return join(self._dir_media, 'cropped', f'{opt_uuid}.{ext}')
diff --git a/megapixels/app/models/dataset.py b/megapixels/app/models/dataset.py
index 8fef8a7e..35e10465 100644
--- a/megapixels/app/models/dataset.py
+++ b/megapixels/app/models/dataset.py
@@ -23,7 +23,7 @@ from app.utils.logger_utils import Logger
class Dataset:
- def __init__(self, opt_data_store, opt_dataset_type, load_files=True):
+ def __init__(self, opt_data_store, opt_dataset_type):
self._dataset_type = opt_dataset_type # enum type
self.log = Logger.getLogger()
self._metadata = {}
@@ -31,31 +31,62 @@ class Dataset:
self._nullframe = pd.DataFrame() # empty placeholder
self.data_store = DataStore(opt_data_store, self._dataset_type)
self.data_store_s3 = DataStoreS3(self._dataset_type)
- self.load_metadata()
- def load_metadata(self):
- '''Loads all CSV files into (dict) of DataFrames'''
- self.log.info(f'creating dataset: {self._dataset_type}...')
- for metadata_type in types.Metadata:
- self.log.info(f'load metadata: {metadata_type}')
- fp_csv = self.data_store.metadata(metadata_type)
- self.log.info(f'loading: {fp_csv}')
- if Path(fp_csv).is_file():
- self._metadata[metadata_type] = pd.read_csv(fp_csv).set_index('index')
- if metadata_type == types.Metadata.FACE_VECTOR:
- # convert DataFrame to list of floats
- self._face_vectors = self.df_to_vec_list(self._metadata[metadata_type])
- self.log.info(f'build face vector dict: {len(self._face_vectors)}')
- self._metadata[metadata_type].drop('vec', axis=1, inplace=True)
- else:
- self.log.error(f'File not found: {fp_csv}. Exiting.')
- sys.exit()
- self.log.info('finished loading')
+ def load_face_vectors(self):
+ metadata_type = types.Metadata.FACE_VECTOR
+ fp_csv = self.data_store.metadata(metadata_type)
+ self.log.info(f'loading: {fp_csv}')
+ if Path(fp_csv).is_file():
+ self._metadata[metadata_type] = pd.read_csv(fp_csv).set_index('index')
+ # convert DataFrame to list of floats
+ self._face_vectors = self.df_vecs_to_dict(self._metadata[metadata_type])
+ self._face_vector_idxs = self.df_vec_idxs_to_dict(self._metadata[metadata_type])
+ self.log.info(f'build face vector dict: {len(self._face_vectors)}')
+ # remove the face vector column, it can be several GB of memory
+ self._metadata[metadata_type].drop('vec', axis=1, inplace=True)
+ else:
+ self.log.error(f'File not found: {fp_csv}. Exiting.')
+ sys.exit()
+
+ def load_records(self):
+ metadata_type = types.Metadata.FILE_RECORD
+ fp_csv = self.data_store.metadata(metadata_type)
+ self.log.info(f'loading: {fp_csv}')
+ if Path(fp_csv).is_file():
+ self._metadata[metadata_type] = pd.read_csv(fp_csv).set_index('index')
+ else:
+ self.log.error(f'File not found: {fp_csv}. Exiting.')
+ sys.exit()
+
+ def load_identities(self):
+ metadata_type = types.Metadata.IDENTITY
+ fp_csv = self.data_store.metadata(metadata_type)
+ self.log.info(f'loading: {fp_csv}')
+ if Path(fp_csv).is_file():
+ self._metadata[metadata_type] = pd.read_csv(fp_csv).set_index('index')
+ else:
+ self.log.error(f'File not found: {fp_csv}. Exiting.')
+ sys.exit()
def metadata(self, opt_metadata_type):
- return self._metadata.get(opt_metadata_type, self._nullframe)
+ return self._metadata.get(opt_metadata_type, None)
- def roi_idx_to_record(self, vector_index):
+ def index_to_record(self, index):
+ # get record meta
+ df_record = self._metadata[types.Metadata.FILE_RECORD]
+ ds_record = df_record.iloc[index]
+ identity_index = ds_record.identity_index
+ # get identity meta
+ df_identity = self._metadata[types.Metadata.IDENTITY]
+ # future datasets can have multiple identities per images
+ ds_identities = df_identity.iloc[identity_index]
+ # get filepath and S3 url
+ fp_im = self.data_store.face_image(ds_record.subdir, ds_record.fn, ds_record.ext)
+ s3_url = self.data_store_s3.face_image(ds_record.uuid)
+ image_record = ImageRecord(ds_record, fp_im, s3_url, ds_identities=ds_identities)
+ return image_record
+
+ def vector_to_record(self, record_index):
'''Accumulates image and its metadata'''
df_face_vector = self._metadata[types.Metadata.FACE_VECTOR]
ds_face_vector = df_face_vector.iloc[vector_index]
@@ -115,18 +146,24 @@ class Dataset:
for match_idx in match_idxs:
# get the corresponding face vector row
- self.log.debug(f'find match index: {match_idx}')
- image_record = self.roi_idx_to_record(match_idx)
+ roi_index = self._face_vector_roi_idxs[match_idx]
+ self.log.debug(f'find match index: {match_idx}, --> roi_index: {roi_index}')
+ image_record = self.roi_idx_to_record(roi_index)
image_records.append(image_record)
return image_records
# ----------------------------------------------------------------------
# utilities
- def df_to_vec_list(self, df):
+ def df_vecs_to_dict(self, df):
# convert the DataFrame CSV to float list of vecs
return [list(map(float,x.vec.split(','))) for x in df.itertuples()]
+ def df_vec_idxs_to_dict(self, df):
+ # convert the DataFrame CSV to float list of vecs
+ #return [x.roi_index for x in df.itertuples()]
+ return [x.image_index for x in df.itertuples()]
+
def similar(self, query_vec, n_results):
'''Finds most similar N indices of query face vector
:query_vec: (list) of 128 floating point numbers of face encoding
@@ -141,37 +178,35 @@ class Dataset:
class ImageRecord:
- def __init__(self, image_index, sha256, uuid, bbox, filepath, url):
- self.image_index = image_index
- self.sha256 = sha256
- self.uuid = uuid
- self.bbox = bbox
- self.filepath = filepath
+ def __init__(self, ds_record, fp, url, ds_rois=None, ds_identities=None):
+ # maybe more other meta will go there
+ self.image_index = ds_record.index
+ self.sha256 = ds_record.sha256
+ self.uuid = ds_record.uuid
+ self.filepath = fp
self.url = url
- self._identity = None
+ self._identities = []
+ # image records contain ROIs
+ # ROIs are linked to identities
+
+ #self._identities = [Identity(x) for x in ds_identities]
@property
- def identity(self):
+ def identity(self, index):
return self._identity
- @identity.setter
- def identity(self, value):
- self._identity = value
-
def summarize(self):
'''Summarizes data for debugging'''
log = Logger.getLogger()
log.info(f'filepath: {self.filepath}')
log.info(f'sha256: {self.sha256}')
log.info(f'UUID: {self.uuid}')
- log.info(f'BBox: {self.bbox}')
- log.info(f's3 url: {self.url}')
- if self._identity:
- log.info(f'name: {self._identity.name}')
- log.info(f'age: {self._identity.age}')
- log.info(f'gender: {self._identity.gender}')
- log.info(f'nationality: {self._identity.nationality}')
- log.info(f'images: {self._identity.n_images}')
+ log.info(f'S3 url: {self.url}')
+ for identity in self._identities:
+ log.info(f'fullname: {identity.fullname}')
+ log.info(f'description: {identity.description}')
+ log.info(f'gender: {identity.gender}')
+ log.info(f'images: {identity.n_images}')
class Identity:
diff --git a/megapixels/app/settings/app_cfg.py b/megapixels/app/settings/app_cfg.py
index 7f9ed187..0c28b315 100644
--- a/megapixels/app/settings/app_cfg.py
+++ b/megapixels/app/settings/app_cfg.py
@@ -87,7 +87,7 @@ CKPT_ZERO_PADDING = 9
HASH_TREE_DEPTH = 3
HASH_BRANCH_SIZE = 3
-DLIB_FACEREC_JITTERS = 5 # number of face recognition jitters
+DLIB_FACEREC_JITTERS = 25 # number of face recognition jitters
DLIB_FACEREC_PADDING = 0.25 # default dlib
POSE_MINMAX_YAW = (-25,25)
diff --git a/megapixels/app/settings/types.py b/megapixels/app/settings/types.py
index 685744aa..754be618 100644
--- a/megapixels/app/settings/types.py
+++ b/megapixels/app/settings/types.py
@@ -45,7 +45,7 @@ class LogLevel(Enum):
# --------------------------------------------------------------------
class Metadata(Enum):
- IDENTITY, FILEPATH, SHA256, UUID, FACE_VECTOR, FACE_POSE, FACE_ROI = range(7)
+ IDENTITY, FILE_RECORD, FACE_VECTOR, FACE_POSE, FACE_ROI = range(5)
class Dataset(Enum):
LFW, VGG_FACE2 = range(2)
diff --git a/megapixels/app/utils/file_utils.py b/megapixels/app/utils/file_utils.py
index 80239fe2..5c7b39d1 100644
--- a/megapixels/app/utils/file_utils.py
+++ b/megapixels/app/utils/file_utils.py
@@ -40,10 +40,16 @@ log = logging.getLogger(cfg.LOGGER_NAME)
# File I/O read/write little helpers
# ------------------------------------------
-def glob_multi(dir_in, exts):
+def glob_multi(dir_in, exts, recursive=False):
files = []
- for e in exts:
- files.append(glob(join(dir_in, '*.{}'.format(e))))
+ for ext in exts:
+ if recursive:
+ fp_glob = join(dir_in, '**/*.{}'.format(ext))
+ log.info(f'glob {fp_glob}')
+ files += glob(fp_glob, recursive=True)
+ else:
+ fp_glob = join(dir_in, '*.{}'.format(ext))
+ files += glob(fp_glob)
return files
diff --git a/megapixels/commands/cv/cluster.py b/megapixels/commands/cv/cluster.py
index 94334133..419091a0 100644
--- a/megapixels/commands/cv/cluster.py
+++ b/megapixels/commands/cv/cluster.py
@@ -23,20 +23,20 @@ from app.utils.logger_utils import Logger
@click.pass_context
def cli(ctx, opt_data_store, opt_dataset, opt_metadata):
"""Display image info"""
-
- # cluster the embeddings
-print("[INFO] clustering...")
-clt = DBSCAN(metric="euclidean", n_jobs=args["jobs"])
-clt.fit(encodings)
-
-# determine the total number of unique faces found in the dataset
-labelIDs = np.unique(clt.labels_)
-numUniqueFaces = len(np.where(labelIDs > -1)[0])
-print("[INFO] # unique faces: {}".format(numUniqueFaces))
+
+ # cluster the embeddings
+ print("[INFO] clustering...")
+ clt = DBSCAN(metric="euclidean", n_jobs=args["jobs"])
+ clt.fit(encodings)
+
+ # determine the total number of unique faces found in the dataset
+ labelIDs = np.unique(clt.labels_)
+ numUniqueFaces = len(np.where(labelIDs > -1)[0])
+ print("[INFO] # unique faces: {}".format(numUniqueFaces))
# load and display image
im = cv.imread(fp_im)
cv.imshow('', im)
-
+
while True:
k = cv.waitKey(1) & 0xFF
if k == 27 or k == ord('q'): # ESC
diff --git a/megapixels/commands/cv/gen_pose.py b/megapixels/commands/cv/face_pose.py
index aefadb00..e7ffb7ac 100644
--- a/megapixels/commands/cv/gen_pose.py
+++ b/megapixels/commands/cv/face_pose.py
@@ -76,27 +76,26 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset,
face_landmarks = LandmarksDLIB()
# load filepath data
- fp_filepath = data_store.metadata(types.Metadata.FILEPATH)
- df_filepath = pd.read_csv(fp_filepath)
+ fp_record = data_store.metadata(types.Metadata.FILE_RECORD)
+ df_record = pd.read_csv(fp_record).set_index('index')
# load ROI data
fp_roi = data_store.metadata(types.Metadata.FACE_ROI)
- df_roi = pd.read_csv(fp_roi)
+ df_roi = pd.read_csv(fp_roi).set_index('index')
# slice if you want
if opt_slice:
df_roi = df_roi[opt_slice[0]:opt_slice[1]]
# group by image index (speedup if multiple faces per image)
- df_img_groups = df_roi.groupby('image_index')
+ df_img_groups = df_roi.groupby('record_index')
log.debug('processing {:,} groups'.format(len(df_img_groups)))
# store poses and convert to DataFrame
poses = []
# iterate
- for image_index, df_img_group in tqdm(df_img_groups):
+ for record_index, df_img_group in tqdm(df_img_groups):
# make fp
- ds_file = df_filepath.iloc[image_index]
- fp_im = data_store.face_image(ds_file.subdir, ds_file.fn, ds_file.ext)
- #fp_im = join(opt_dir_media, ds_file.subdir, '{}.{}'.format(ds_file.fn, ds_file.ext))
+ ds_record = df_record.iloc[record_index]
+ fp_im = data_store.face_image(ds_record.subdir, ds_record.fn, ds_record.ext)
im = cv.imread(fp_im)
# get bbox
x = df_img_group.x.values[0]
@@ -130,7 +129,7 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset,
break
# add image index and append to result CSV data
- pose_degrees['image_index'] = image_index
+ pose_degrees['record_index'] = record_index
poses.append(pose_degrees)
diff --git a/megapixels/commands/cv/gen_rois.py b/megapixels/commands/cv/face_roi.py
index 20dd598a..d7248aee 100644
--- a/megapixels/commands/cv/gen_rois.py
+++ b/megapixels/commands/cv/face_roi.py
@@ -103,20 +103,19 @@ def cli(ctx, opt_fp_in, opt_dir_media, opt_fp_out, opt_data_store, opt_dataset,
# get list of files to process
- fp_in = data_store.metadata(types.Metadata.FILEPATH) if opt_fp_in is None else opt_fp_in
- df_files = pd.read_csv(fp_in).set_index('index')
+ fp_in = data_store.metadata(types.Metadata.FILE_RECORD) if opt_fp_in is None else opt_fp_in
+ df_records = pd.read_csv(fp_in).set_index('index')
if opt_slice:
- df_files = df_files[opt_slice[0]:opt_slice[1]]
- log.debug('processing {:,} files'.format(len(df_files)))
+ df_records = df_records[opt_slice[0]:opt_slice[1]]
+ log.debug('processing {:,} files'.format(len(df_records)))
# filter out grayscale
color_filter = color_filters[opt_color_filter]
data = []
- for df_file in tqdm(df_files.itertuples(), total=len(df_files)):
- fp_im = data_store.face_image(str(df_file.subdir), str(df_file.fn), str(df_file.ext))
- #fp_im = join(opt_dir_media, str(df_file.subdir), f'{df_file.fn}.{df_file.ext}')
+ for df_record in tqdm(df_records.itertuples(), total=len(df_records)):
+ fp_im = data_store.face_image(str(df_record.subdir), str(df_record.fn), str(df_record.ext))
im = cv.imread(fp_im)
# filter out color or grayscale iamges
@@ -139,7 +138,7 @@ def cli(ctx, opt_fp_in, opt_dir_media, opt_fp_out, opt_data_store, opt_dataset,
for bbox in bboxes:
roi = {
- 'image_index': int(df_file.Index),
+ 'record_index': int(df_record.Index),
'x': bbox.x,
'y': bbox.y,
'w': bbox.w,
@@ -169,4 +168,4 @@ def cli(ctx, opt_fp_in, opt_dir_media, opt_fp_out, opt_data_store, opt_dataset,
file_utils.mkdirs(fp_out)
df = pd.DataFrame.from_dict(data)
df.index.name = 'index'
- df.to_csv(opt_fp_out) \ No newline at end of file
+ df.to_csv(fp_out) \ No newline at end of file
diff --git a/megapixels/commands/cv/gen_face_vec.py b/megapixels/commands/cv/face_vector.py
index 83e1460d..203f73eb 100644
--- a/megapixels/commands/cv/gen_face_vec.py
+++ b/megapixels/commands/cv/face_vector.py
@@ -76,15 +76,17 @@ def cli(ctx, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset, opt_size,
facerec = face_recognition.RecognitionDLIB()
# load data
- df_file = pd.read_csv(data_store.metadata(types.Metadata.FILEPATH)).set_index('index')
- df_roi = pd.read_csv(data_store.metadata(types.Metadata.FACE_ROI)).set_index('index')
+ fp_record = data_store.metadata(types.Metadata.FILE_RECORD)
+ df_record = pd.read_csv(fp_record).set_index('index')
+ fp_roi = data_store.metadata(types.Metadata.FACE_ROI)
+ df_roi = pd.read_csv(fp_roi).set_index('index')
if opt_slice:
df_roi = df_roi[opt_slice[0]:opt_slice[1]]
# -------------------------------------------------
# process here
- df_img_groups = df_roi.groupby('image_index')
+ df_img_groups = df_roi.groupby('record_index')
log.debug('processing {:,} groups'.format(len(df_img_groups)))
vecs = []
@@ -92,9 +94,9 @@ def cli(ctx, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset, opt_size,
for image_index, df_img_group in tqdm(df_img_groups):
# make fp
roi_index = df_img_group.index.values[0]
- log.debug(f'roi_index: {roi_index}, image_index: {image_index}')
- ds_file = df_file.loc[roi_index] # locate image meta
- #ds_file = df_file.loc['index', image_index] # locate image meta
+ # log.debug(f'roi_index: {roi_index}, image_index: {image_index}')
+ ds_file = df_record.loc[roi_index] # locate image meta
+ #ds_file = df_record.loc['index', image_index] # locate image meta
fp_im = data_store.face_image(str(ds_file.subdir), str(ds_file.fn), str(ds_file.ext))
im = cv.imread(fp_im)
@@ -119,5 +121,5 @@ def cli(ctx, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset, opt_size,
# save date
df = pd.DataFrame.from_dict(vecs)
df.index.name = 'index'
- #file_utils.mkdirs(fp_out)
- #df.to_csv(fp_out) \ No newline at end of file
+ file_utils.mkdirs(fp_out)
+ df.to_csv(fp_out) \ No newline at end of file
diff --git a/megapixels/commands/datasets/filter_by_pose.py b/megapixels/commands/datasets/filter_by_pose.py
index 6fdbef98..a588b18e 100644
--- a/megapixels/commands/datasets/filter_by_pose.py
+++ b/megapixels/commands/datasets/filter_by_pose.py
@@ -53,17 +53,11 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_data_store, opt_dataset, opt_yaw, opt_ro
fp_roi = data_store.metadata(types.Metadata.FACE_ROI)
df_roi = pd.read_csv(fp_roi).set_index('index')
# load filepath
- fp_filepath = data_store.metadata(types.Metadata.FILEPATH)
- df_filepath = pd.read_csv(fp_filepath).set_index('index')
- # load uuid
- fp_uuid= data_store.metadata(types.Metadata.UUID)
- df_uuid = pd.read_csv(fp_uuid).set_index('index')
- # load sha256 index
- fp_sha256 = data_store.metadata(types.Metadata.SHA256)
- df_sha256 = pd.read_csv(fp_sha256).set_index('index')
+ fp_record = data_store.metadata(types.Metadata.FILE_RECORD)
+ df_record = pd.read_csv(fp_record).set_index('index')
# debug
log.info('Processing {:,} rows'.format(len(df_pose)))
- n_rows = len(df_pose)
+ n_rows = len(df_record)
# filter out extreme poses
invalid_indices = []
@@ -74,28 +68,29 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_data_store, opt_dataset, opt_yaw, opt_ro
invalid_indices.append(ds_pose.Index) # unique file indexs
# filter out valid/invalid
- log.info(invalid_indices[:20])
+ log.info(f'indices 0-20: {invalid_indices[:20]}')
log.info(f'Removing {len(invalid_indices)} invalid indices...')
- df_filepath = df_filepath.drop(df_pose.index[invalid_indices])
- df_sha256 = df_sha256.drop(df_pose.index[invalid_indices])
- df_uuid = df_uuid.drop(df_pose.index[invalid_indices])
- df_roi = df_roi.drop(df_pose.index[invalid_indices])
+ df_record = df_record.drop(df_record.index[invalid_indices])
+ df_roi = df_roi.drop(df_roi.index[invalid_indices])
df_pose = df_pose.drop(df_pose.index[invalid_indices])
- log.info(f'Removed {n_rows - len(df_pose)}')
+ log.info(f'Removed {n_rows - len(df_record)}')
# move file to make backup
dir_bkup = join(Path(fp_pose).parent, f'backup_{datetime.now():%Y%m%d_%M%S}')
file_utils.mkdirs(dir_bkup)
# move files to backup
- shutil.move(fp_filepath, join(dir_bkup, Path(fp_filepath).name))
- shutil.move(fp_sha256, join(dir_bkup, Path(fp_sha256).name))
- shutil.move(fp_uuid, join(dir_bkup, Path(fp_uuid).name))
+ shutil.move(fp_record, join(dir_bkup, Path(fp_record).name))
shutil.move(fp_roi, join(dir_bkup, Path(fp_roi).name))
shutil.move(fp_pose, join(dir_bkup, Path(fp_pose).name))
- # save filtered poses
- df_filepath.to_csv(fp_filepath)
- df_sha256.to_csv(fp_sha256)
- df_uuid.to_csv(fp_uuid)
+ # resave file records
+ df_record = df_record.reset_index(drop=True)
+ df_record.index.name = 'index'
+ df_record.to_csv(fp_record)
+ # resave ROI
+ df_roi = df_roi.reset_index(drop=True)
+ df_roi.index.name = 'index'
df_roi.to_csv(fp_roi)
+ # resave pose
+ df_pose = df_pose.reset_index(drop=True)
+ df_pose.index.name = 'index'
df_pose.to_csv(fp_pose)
-
diff --git a/megapixels/commands/datasets/gen_filepath.py b/megapixels/commands/datasets/gen_filepath.py
index e06fee6b..5db405c0 100644
--- a/megapixels/commands/datasets/gen_filepath.py
+++ b/megapixels/commands/datasets/gen_filepath.py
@@ -50,7 +50,7 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_data_store, opt_dataset, opt_slice,
from tqdm import tqdm
from glob import glob
- from app.models import DataStore
+ from app.models.data_store import DataStore
from app.utils import file_utils, im_utils
data_store = DataStore(opt_data_store, opt_dataset)
@@ -97,6 +97,6 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_data_store, opt_dataset, opt_slice,
file_utils.mkdirs(fp_out)
df_filepath = pd.DataFrame.from_dict(data)
df_filepath = df_filepath.sort_values(by=['subdir'], ascending=True)
- df_filepath = df_filepath.reset_index(drop=True)
+ df_filepath = df_filepath.reset_index()
df_filepath.index.name = 'index'
df_filepath.to_csv(fp_out) \ No newline at end of file
diff --git a/megapixels/commands/datasets/gen_sha256.py b/megapixels/commands/datasets/gen_sha256.py
deleted file mode 100644
index 1616eebf..00000000
--- a/megapixels/commands/datasets/gen_sha256.py
+++ /dev/null
@@ -1,152 +0,0 @@
-'''
-
-'''
-import click
-
-from app.settings import types
-from app.utils import click_utils
-from app.settings import app_cfg as cfg
-from app.utils.logger_utils import Logger
-
-log = Logger.getLogger()
-
-identity_sources = ['subdir', 'subdir_head', 'subdir_tail']
-
-@click.command()
-@click.option('-i', '--input', 'opt_fp_in', default=None,
- help='Override enum input filename CSV')
-@click.option('-o', '--output', 'opt_fp_out', default=None,
- help='Override enum output filename CSV')
-@click.option('-m', '--media', 'opt_dir_media', default=None,
- help='Override enum media directory')
-@click.option('--data_store', 'opt_data_store',
- type=cfg.DataStoreVar,
- default=click_utils.get_default(types.DataStore.NAS),
- show_default=True,
- help=click_utils.show_help(types.Dataset))
-@click.option('--dataset', 'opt_dataset',
- type=cfg.DatasetVar,
- required=True,
- show_default=True,
- help=click_utils.show_help(types.Dataset))
-@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None),
- help='Slice list of files')
-@click.option('-t', '--threads', 'opt_threads', default=12,
- help='Number of threads')
-@click.option('-f', '--force', 'opt_force', is_flag=True,
- help='Force overwrite file')
-@click.option('--identity', 'opt_identity', default='subdir_tail', type=click.Choice(identity_sources),
- help='Identity source, blank for no identity')
-@click.pass_context
-def cli(ctx, opt_fp_in, opt_fp_out, opt_dataset, opt_data_store, opt_dir_media, opt_slice, opt_threads,
- opt_identity, opt_force):
- """Generates sha256/identity index CSV file"""
-
- import sys
- from glob import glob
- from os.path import join
- from pathlib import Path
- import time
- from multiprocessing.dummy import Pool as ThreadPool
- import random
-
- import pandas as pd
- from tqdm import tqdm
- from glob import glob
-
- from app.models import DataStore
- from app.utils import file_utils, im_utils
-
-
- # set data_store
- data_store = DataStore(opt_data_store, opt_dataset)
- # get filepath out
- fp_out = data_store.metadata(types.Metadata.SHA256) if opt_fp_out is None else opt_fp_out
- # exit if exists
- if not opt_force and Path(fp_out).exists():
- log.error('File exists. Use "-f / --force" to overwite')
- return
- # get filepath in
- fp_in = data_store.metadata(types.Metadata.FILEPATH)
- df_files = pd.read_csv(fp_in).set_index('index')
- # slice if you want
- if opt_slice:
- df_files = df_files[opt_slice[0]:opt_slice[1]]
-
- log.info('Processing {:,} images'.format(len(df_files)))
-
-
- # prepare list of images to multithread into sha256s
- dir_media = data_store.media_images_original() if opt_dir_media is None else opt_dir_media
- file_objs = []
- for ds_file in df_files.itertuples():
- fp_im = join(dir_media, str(ds_file.subdir), f"{ds_file.fn}.{ds_file.ext}")
- # find the image_index
- # append the subdir option, sort by this then increment by unique subdir
- file_obj = {'fp': fp_im, 'index': ds_file.Index}
- if opt_identity:
- subdirs = ds_file.subdir.split('/')
- if not len(subdirs) > 0:
- log.error(f'Could not split subdir: "{ds_file.subdir}. Try different option for "--identity"')
- log.error('exiting')
- return
- if opt_identity == 'subdir':
- subdir = subdirs[0]
- elif opt_identity == 'subdir_head':
- # use first part of subdir path
- subdir = subdirs[0]
- elif opt_identity == 'subdir_tail':
- # use last part of subdir path
- subdir = subdirs[-1]
- file_obj['identity_subdir'] = subdir
- file_objs.append(file_obj)
-
- # convert to thread pool
- pbar = tqdm(total=len(file_objs))
-
- def as_sha256(file_obj):
- pbar.update(1)
- file_obj['sha256'] = file_utils.sha256(file_obj['fp'])
- return file_obj
-
- # multithread pool
- pool_file_objs = []
- st = time.time()
- pool = ThreadPool(opt_threads)
- with tqdm(total=len(file_objs)) as pbar:
- pool_file_objs = pool.map(as_sha256, file_objs)
- pbar.close()
-
- # convert data to dict
- data = []
- for pool_file_obj in pool_file_objs:
- data.append( {
- 'sha256': pool_file_obj['sha256'],
- 'index': pool_file_obj['index'],
- 'identity_subdir': pool_file_obj.get('identity_subdir', ''),
- })
-
- # sort based on identity_subdir
- # save to CSV
- df_sha256 = pd.DataFrame.from_dict(data)
- # add new column for identity
- df_sha256['identity_index'] = [1] * len(df_sha256)
- df_sha256 = df_sha256.sort_values(by=['identity_subdir'], ascending=True)
- df_sha256_identity_groups = df_sha256.groupby('identity_subdir')
- for identity_index, df_sha256_identity_group_tuple in enumerate(df_sha256_identity_groups):
- identity_subdir, df_sha256_identity_group = df_sha256_identity_group_tuple
- for ds_sha256 in df_sha256_identity_group.itertuples():
- df_sha256.at[ds_sha256.Index, 'identity_index'] = identity_index
- # drop temp identity subdir column
- df_sha256 = df_sha256.drop('identity_subdir', axis=1)
- # write to CSV
- log.info(f'rows: {len(df_sha256)}')
- file_utils.mkdirs(fp_out)
- df_sha256.set_index('index')
- df_sha256 = df_sha256.sort_values(['index'], ascending=[True])
- df_sha256.to_csv(fp_out, index=False)
-
- # timing
- log.info(f'wrote file: {fp_out}')
- log.info('time: {:.2f}, theads: {}'.format(time.time() - st, opt_threads))
- \ No newline at end of file
diff --git a/megapixels/commands/datasets/gen_uuid.py b/megapixels/commands/datasets/gen_uuid.py
index 612c43ee..d7e7b52c 100644
--- a/megapixels/commands/datasets/gen_uuid.py
+++ b/megapixels/commands/datasets/gen_uuid.py
@@ -37,7 +37,7 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_data_store, opt_dataset, opt_force):
from tqdm import tqdm
import pandas as pd
- from app.models import DataStore
+ from app.models.data_store import DataStore
# set data_store
diff --git a/megapixels/commands/datasets/identity_meta_lfw.py b/megapixels/commands/datasets/identity_meta_lfw.py
new file mode 100644
index 00000000..45386b23
--- /dev/null
+++ b/megapixels/commands/datasets/identity_meta_lfw.py
@@ -0,0 +1,93 @@
+'''
+add identity from description using subdir
+'''
+import click
+
+from app.settings import types
+from app.models.dataset import Dataset
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+from app.utils.logger_utils import Logger
+
+log = Logger.getLogger()
+
+@click.command()
+@click.option('-i', '--input', 'opt_fp_in', required=True,
+ help='Identity meta file')
+@click.option('-o', '--output', 'opt_fp_out', default=None,
+ help='Override enum output filename CSV')
+@click.option('--column', 'opt_identity_key', default='identity_key',
+ help='Match column')
+@click.option('--data_store', 'opt_data_store',
+ type=cfg.DataStoreVar,
+ default=click_utils.get_default(types.DataStore.SSD),
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('-f', '--force', 'opt_force', is_flag=True,
+ help='Force overwrite file')
+@click.pass_context
+def cli(ctx, opt_fp_in, opt_fp_out, opt_identity_key, opt_data_store, opt_force):
+ """Display image info"""
+
+ import sys
+ from glob import glob
+ from os.path import join
+ from pathlib import Path
+ import time
+
+ import pandas as pd
+ import cv2 as cv
+ from tqdm import tqdm
+
+ from app.utils import file_utils, im_utils
+ from app.models.data_store import DataStore
+
+ log = Logger.getLogger()
+
+ # output file
+ opt_dataset = types.Dataset.LFW
+ data_store = DataStore(opt_data_store, opt_dataset)
+ fp_out = data_store.metadata(types.Metadata.IDENTITY) if opt_fp_out is None else opt_fp_out
+ # exit if exists
+ log.debug(fp_out)
+ if not opt_force and Path(fp_out).exists():
+ log.error('File exists. Use "-f / --force" to overwite')
+ return
+
+ # init dataset
+ # load file records
+ fp_record = data_store.metadata(types.Metadata.FILE_RECORD)
+ df_record = pd.read_csv(fp_record).set_index('index')
+
+ # load identity meta
+ # this file is maybe prepared in a Jupyter notebook
+ # the "identity_key"
+ df_identity_meta = pd.read_csv(opt_fp_in).set_index('index')
+ # create a new file called 'identity.csv'
+ identities = []
+ # iterate records and get identity index where 'identity_key' matches
+ log.debug(type(df_record))
+ identity_indices = []
+ for record_idx, ds_record in tqdm(df_record.iterrows(), total=len(df_record)):
+ identity_value = ds_record[opt_identity_key]
+ identity_index = ds_record.identity_index
+ ds_identity_meta = df_identity_meta.loc[(df_identity_meta[opt_identity_key] == identity_value)]
+ if identity_index not in identity_indices:
+ identity_indices.append(identity_index)
+ identities.append({
+ 'description': ds_identity_meta.description.values[0],
+ 'name': ds_identity_meta.name.values[0],
+ 'images': ds_identity_meta.images.values[0],
+ 'gender': ds_identity_meta.gender.values[0],
+ })
+
+ # write to csv
+ df_identity = pd.DataFrame.from_dict(identities)
+ df_identity.index.name = 'index'
+ df_identity.to_csv(fp_out)
+ '''
+ index,name,name_orig,description,gender,images,image_index,identity_key
+ 0,A. J. Cook,AJ Cook,Canadian actress,f,1,0,AJ_Cook
+ '''
+
+
diff --git a/megapixels/commands/datasets/identity_meta_vgg_face2.py b/megapixels/commands/datasets/identity_meta_vgg_face2.py
new file mode 100644
index 00000000..85b6644d
--- /dev/null
+++ b/megapixels/commands/datasets/identity_meta_vgg_face2.py
@@ -0,0 +1,88 @@
+'''
+add identity from description using subdir
+'''
+import click
+
+from app.settings import types
+from app.models.dataset import Dataset
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+from app.utils.logger_utils import Logger
+
+log = Logger.getLogger()
+
+@click.command()
+@click.option('-i', '--input', 'opt_fp_in', required=True,
+ help='Identity meta file')
+@click.option('-o', '--output', 'opt_fp_out', default=None,
+ help='Override enum output filename CSV')
+@click.option('--data_store', 'opt_data_store',
+ type=cfg.DataStoreVar,
+ default=click_utils.get_default(types.DataStore.SSD),
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('-f', '--force', 'opt_force', is_flag=True,
+ help='Force overwrite file')
+@click.pass_context
+def cli(ctx, opt_fp_in, opt_fp_out, opt_data_store, opt_force):
+ """Display image info"""
+
+ import sys
+ from glob import glob
+ from os.path import join
+ from pathlib import Path
+ import time
+
+ import pandas as pd
+ import cv2 as cv
+ from tqdm import tqdm
+
+ from app.utils import file_utils, im_utils
+ from app.models.data_store import DataStore
+
+ log = Logger.getLogger()
+
+ # output file
+ opt_dataset = types.Dataset.VGG_FACE2
+ data_store = DataStore(opt_data_store, opt_dataset)
+ fp_out = data_store.metadata(types.Metadata.IDENTITY) if opt_fp_out is None else opt_fp_out
+ # exit if exists
+ log.debug(fp_out)
+ if not opt_force and Path(fp_out).exists():
+ log.error('File exists. Use "-f / --force" to overwite')
+ return
+
+ # init dataset
+ # load file records
+ identity_key = 'identity_key'
+ fp_record = data_store.metadata(types.Metadata.FILE_RECORD)
+ df_record = pd.read_csv(fp_record).set_index('index')
+
+ # load identity meta
+ # this file is maybe prepared in a Jupyter notebook
+ # the "identity_key"
+ df_identity_meta = pd.read_csv(opt_fp_in).set_index('index')
+ # create a new file called 'identity.csv'
+ identities = []
+ # iterate records and get identity index where 'identity_key' matches
+ log.debug(type(df_record))
+ identity_indices = []
+ for ds_record in tqdm(df_record.itertuples(), total=len(df_record)):
+ identity_value = ds_record.identity_key
+ identity_index = ds_record.identity_index
+ ds_identity_meta = df_identity_meta.loc[(df_identity_meta[identity_key] == identity_value)]
+ if identity_index not in identity_indices:
+ identity_indices.append(identity_index)
+ identities.append({
+ 'description': ds_identity_meta.description.values[0],
+ 'name': ds_identity_meta.name.values[0],
+ 'images': ds_identity_meta.images.values[0],
+ 'gender': ds_identity_meta.gender.values[0],
+ })
+
+ # write to csv
+ df_identity = pd.DataFrame.from_dict(identities)
+ df_identity.index.name = 'index'
+ df_identity.to_csv(fp_out)
+
+
diff --git a/megapixels/commands/datasets/lookup.py b/megapixels/commands/datasets/lookup.py
index 5a2a171e..c1c66c19 100644
--- a/megapixels/commands/datasets/lookup.py
+++ b/megapixels/commands/datasets/lookup.py
@@ -13,7 +13,7 @@ log = Logger.getLogger()
help='Vector index to lookup')
@click.option('--data_store', 'opt_data_store',
type=cfg.DataStoreVar,
- default=click_utils.get_default(types.DataStore.NAS),
+ default=click_utils.get_default(types.DataStore.SSD),
show_default=True,
help=click_utils.show_help(types.Dataset))
@click.option('--dataset', 'opt_dataset',
@@ -41,11 +41,12 @@ def cli(ctx, opt_index, opt_data_store, opt_dataset):
log = Logger.getLogger()
# init dataset
dataset = Dataset(opt_data_store, opt_dataset)
+ #dataset.load_face_vectors()
+ dataset.load_records()
+ dataset.load_identities()
# set data store and load files
- dataset.load()
# find image records
- image_record = dataset.roi_idx_to_record(opt_index)
- # debug
+ image_record = dataset.index_to_record(opt_index)
image_record.summarize()
# load image
im = cv.imread(image_record.filepath)
diff --git a/megapixels/commands/datasets/records.py b/megapixels/commands/datasets/records.py
new file mode 100644
index 00000000..80de5040
--- /dev/null
+++ b/megapixels/commands/datasets/records.py
@@ -0,0 +1,159 @@
+'''
+
+'''
+import click
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+from app.utils.logger_utils import Logger
+
+log = Logger.getLogger()
+
+identity_sources = ['subdir', 'subdir_head', 'subdir_tail']
+
+@click.command()
+@click.option('-i', '--input', 'opt_fp_in', default=None,
+ help='Override enum input filename CSV')
+@click.option('-o', '--output', 'opt_fp_out', default=None,
+ help='Override enum output filename CSV')
+@click.option('-m', '--media', 'opt_dir_media', default=None,
+ help='Override enum media directory')
+@click.option('--data_store', 'opt_data_store',
+ type=cfg.DataStoreVar,
+ default=click_utils.get_default(types.DataStore.SSD),
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--dataset', 'opt_dataset',
+ type=cfg.DatasetVar,
+ required=True,
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None),
+ help='Slice list of files')
+@click.option('-t', '--threads', 'opt_threads', default=12,
+ help='Number of threads')
+@click.option('-f', '--force', 'opt_force', is_flag=True,
+ help='Force overwrite file')
+@click.option('--identity', 'opt_identity', default=None, type=click.Choice(identity_sources),
+ help='Identity source, blank for no identity')
+@click.option('--recursive/--no-recursive', 'opt_recursive', is_flag=True, default=False,
+ help='Use glob recursion (slower)')
+@click.pass_context
+def cli(ctx, opt_fp_in, opt_fp_out, opt_dataset, opt_data_store, opt_dir_media, opt_slice, opt_threads,
+ opt_identity, opt_force, opt_recursive):
+ """Generates sha256, uuid, and identity index CSV file"""
+
+ import sys
+ from glob import glob
+ from os.path import join
+ from pathlib import Path
+ import time
+ from multiprocessing.dummy import Pool as ThreadPool
+ import random
+ import uuid
+
+ import pandas as pd
+ from tqdm import tqdm
+ from glob import glob
+
+ from app.models.data_store import DataStore
+ from app.utils import file_utils, im_utils
+
+
+ # set data_store
+ data_store = DataStore(opt_data_store, opt_dataset)
+ # get filepath out
+ fp_out = data_store.metadata(types.Metadata.FILE_RECORD) if opt_fp_out is None else opt_fp_out
+ # exit if exists
+ if not opt_force and Path(fp_out).exists():
+ log.error('File exists. Use "-f / --force" to overwite')
+ return
+
+ # ----------------------------------------------------------------
+ # glob files
+
+ fp_in = opt_fp_in if opt_fp_in is not None else data_store.media_images_original()
+ log.info(f'Globbing {fp_in}')
+ fp_ims = file_utils.glob_multi(fp_in, ['jpg', 'png'], recursive=opt_recursive)
+ # fail if none
+ if not fp_ims:
+ log.error('No images. Try with "--recursive"')
+ return
+ # slice to reduce
+ if opt_slice:
+ fp_ims = fp_ims[opt_slice[0]:opt_slice[1]]
+ log.info('Found {:,} images'.format(len(fp_ims)))
+
+
+ # ----------------------------------------------------------------
+ # multithread process into SHA256
+
+ pbar = tqdm(total=len(fp_ims))
+
+ def as_sha256(fp_im):
+ pbar.update(1)
+ return file_utils.sha256(fp_im)
+
+ # convert to thread pool
+ sha256s = [] # ?
+ pool = ThreadPool(opt_threads)
+ with tqdm(total=len(fp_ims)) as pbar:
+ sha256s = pool.map(as_sha256, fp_ims)
+ pbar.close()
+
+
+ # ----------------------------------------------------------------
+ # convert data to dict
+
+ data = []
+ for sha256, fp_im in zip(sha256s, fp_ims):
+ fpp_im = Path(fp_im)
+ subdir = str(fpp_im.parent.relative_to(fp_in))
+
+ if opt_identity:
+ subdirs = subdir.split('/')
+ if not len(subdirs) > 0:
+ log.error(f'Could not split subdir: "{subdir}. Try different option for "--identity"')
+ log.error('exiting')
+ return
+ if opt_identity == 'subdir':
+ identity = subdirs[0] # use first/only part
+ elif opt_identity == 'subdir_head':
+ identity = subdirs[0] # use first part of subdir path
+ elif opt_identity == 'subdir_tail':
+ identity = subdirs[-1] # use last part of subdir path
+ else:
+ identity = ''
+
+ data.append({
+ 'subdir': subdir,
+ 'fn': fpp_im.stem,
+ 'ext': fpp_im.suffix.replace('.',''),
+ 'sha256': sha256,
+ 'uuid': uuid.uuid4(),
+ 'identity_key': identity
+ })
+
+ log.info(f'adding identity index using: "{opt_identity}". This may take a while...')
+ # convert dict to DataFrame
+ df_records = pd.DataFrame.from_dict(data)
+ # sort based on identity_key
+ df_records = df_records.sort_values(by=['identity_key'], ascending=True)
+ # add new column for identity
+ df_records['identity_index'] = [-1] * len(df_records)
+ # populate the identity_index
+ df_records_identity_groups = df_records.groupby('identity_key')
+ # enumerate groups to create identity indices
+ for identity_index, df_records_identity_group_tuple in enumerate(df_records_identity_groups):
+ identity_key, df_records_identity_group = df_records_identity_group_tuple
+ for ds_record in df_records_identity_group.itertuples():
+ df_records.at[ds_record.Index, 'identity_index'] = identity_index
+ # reset index after being sorted
+ df_records = df_records.reset_index(drop=True)
+ df_records.index.name = 'index' # reassign 'index' as primary key column
+ # write to CSV
+ file_utils.mkdirs(fp_out)
+ df_records.to_csv(fp_out)
+ # done
+ log.info(f'wrote rows: {len(df_records)} to {fp_out}') \ No newline at end of file
diff --git a/megapixels/commands/datasets/s3.py b/megapixels/commands/datasets/s3.py
deleted file mode 100644
index 7769896b..00000000
--- a/megapixels/commands/datasets/s3.py
+++ /dev/null
@@ -1,47 +0,0 @@
-import click
-
-from app.settings import types
-from app.utils import click_utils
-from app.settings import app_cfg as cfg
-
-s3_dirs = {'media': cfg.S3_MEDIA_ROOT, 'metadata': cfg.S3_METADATA_ROOT}
-
-@click.command()
-@click.option('-i', '--input', 'opt_fps_in', required=True, multiple=True,
- help='Input directory')
-@click.option('--name', 'opt_dataset_name', required=True,
- help='Dataset key (eg "lfw"')
-@click.option('-a', '--action', 'opt_action', type=click.Choice(['sync', 'put']), default='sync',
- help='S3 action')
-@click.option('-t', '--type', 'opt_type', type=click.Choice(s3_dirs.keys()), required=True,
- help='S3 location')
-@click.option('--dry-run', 'opt_dryrun', is_flag=True, default=False)
-@click.pass_context
-def cli(ctx, opt_fps_in, opt_dataset_name, opt_action, opt_type, opt_dryrun):
- """Syncs files with S3/spaces server"""
-
- from os.path import join
- from pathlib import Path
-
- from tqdm import tqdm
- import pandas as pd
- import subprocess
-
- from app.utils import logger_utils, file_utils
-
- # -------------------------------------------------
- # init here
-
- log = logger_utils.Logger.getLogger()
- for opt_fp_in in opt_fps_in:
- dir_dst = join(s3_dirs[opt_type], opt_dataset_name, '')
- if Path(opt_fp_in).is_dir():
- fp_src = join(opt_fp_in, '') # add trailing slashes
- else:
- fp_src = join(opt_fp_in)
- cmd = ['s3cmd', opt_action, fp_src, dir_dst, '-P', '--follow-symlinks']
- log.info(' '.join(cmd))
- if not opt_dryrun:
- subprocess.call(cmd)
-
- \ No newline at end of file
diff --git a/megapixels/commands/datasets/s3_sync.py b/megapixels/commands/datasets/s3_sync.py
new file mode 100644
index 00000000..3098d9be
--- /dev/null
+++ b/megapixels/commands/datasets/s3_sync.py
@@ -0,0 +1,57 @@
+import click
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+
+s3_dirs = {'media': cfg.S3_MEDIA_URL, 'metadata': cfg.S3_METADATA_URL}
+
+@click.command()
+@click.option('--data_store', 'opt_data_store',
+ type=cfg.DataStoreVar,
+ default=click_utils.get_default(types.DataStore.SSD),
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--dataset', 'opt_dataset',
+ type=cfg.DatasetVar,
+ required=True,
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('-t', '--type', 'opt_type', type=click.Choice(s3_dirs.keys()), required=True,
+ help='S3 location')
+@click.option('--dry-run', 'opt_dryrun', is_flag=True, default=False)
+@click.pass_context
+def cli(ctx, opt_data_store, opt_dataset, opt_type, opt_dryrun):
+ """Syncs files with S3/spaces server"""
+
+ from os.path import join
+ from pathlib import Path
+
+ from tqdm import tqdm
+ import pandas as pd
+ import subprocess
+
+ from app.utils import logger_utils, file_utils
+ from app.models.data_store import DataStore
+
+ # -------------------------------------------------
+ # init here
+
+ log = logger_utils.Logger.getLogger()
+
+ # set data_store
+ data_store = DataStore(opt_data_store, opt_dataset)
+ dataset_name = opt_dataset.name.lower()
+ if opt_type == 'media':
+ dir_src = join(data_store.uuid_dir(), '')
+ dir_dst = join(s3_dirs[opt_type], dataset_name, '')
+ elif opt_type == 'metadata':
+ dir_src = join(data_store.metadata_dir(), '')
+ dir_dst = join(s3_dirs[opt_type], dataset_name, '')
+
+ cmd = ['s3cmd', 'sync', dir_src, dir_dst, '-P', '--follow-symlinks']
+ log.info(' '.join(cmd))
+ if not opt_dryrun:
+ subprocess.call(cmd)
+
+ \ No newline at end of file
diff --git a/megapixels/commands/datasets/symlink.py b/megapixels/commands/datasets/symlink.py
deleted file mode 100644
index 70ec6c46..00000000
--- a/megapixels/commands/datasets/symlink.py
+++ /dev/null
@@ -1,45 +0,0 @@
-import click
-
-from app.settings import types
-from app.utils import click_utils
-from app.settings import app_cfg as cfg
-
-@click.command()
-@click.option('-i', '--input', 'opt_fp_in', required=True,
- help='Input records CSV')
-@click.option('-m', '--media', 'opt_fp_media', required=True,
- help='Input media directory')
-@click.option('-o', '--output', 'opt_fp_out', required=True,
- help='Output directory')
-@click.pass_context
-def cli(ctx, opt_fp_in, opt_fp_media, opt_fp_out):
- """Symlinks images to new directory for S3"""
-
- import sys
- import os
- from os.path import join
- from pathlib import Path
-
- from tqdm import tqdm
- import pandas as pd
-
- from app.utils import logger_utils, file_utils
-
- # -------------------------------------------------
- # init here
-
- log = logger_utils.Logger.getLogger()
-
- df_records = pd.read_csv(opt_fp_in)
- nrows = len(df_records)
-
- file_utils.mkdirs(opt_fp_out)
-
- for record_id, row in tqdm(df_records.iterrows(), total=nrows):
- # make image path
- df = df_records.iloc[record_id]
- fpp_src = Path(join(opt_fp_media, df['subdir'], '{}.{}'.format(df['fn'], df['ext'])))
- fpp_dst = Path(join(opt_fp_out, '{}.{}'.format(df['uuid'], df['ext'])))
- fpp_dst.symlink_to(fpp_src)
-
- log.info('symlinked {:,} files'.format(nrows)) \ No newline at end of file
diff --git a/megapixels/commands/datasets/symlink_uuid.py b/megapixels/commands/datasets/symlink_uuid.py
new file mode 100644
index 00000000..7c5faa95
--- /dev/null
+++ b/megapixels/commands/datasets/symlink_uuid.py
@@ -0,0 +1,57 @@
+import click
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+
+@click.command()
+@click.option('-i', '--input', 'opt_fp_in', default=None,
+ help='Override enum input filename CSV')
+@click.option('-o', '--output', 'opt_fp_out', default=None,
+ help='Override enum output filename CSV')
+@click.option('--data_store', 'opt_data_store',
+ type=cfg.DataStoreVar,
+ default=click_utils.get_default(types.DataStore.SSD),
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.option('--dataset', 'opt_dataset',
+ type=cfg.DatasetVar,
+ required=True,
+ show_default=True,
+ help=click_utils.show_help(types.Dataset))
+@click.pass_context
+def cli(ctx, opt_fp_in, opt_fp_out, opt_data_store, opt_dataset):
+ """Symlinks images to new directory for S3"""
+
+ import sys
+ import os
+ from os.path import join
+ from pathlib import Path
+
+ from tqdm import tqdm
+ import pandas as pd
+
+ from app.utils import logger_utils, file_utils
+ from app.models.data_store import DataStore
+
+ # -------------------------------------------------
+ # init here
+
+ log = logger_utils.Logger.getLogger()
+
+ # set data_store
+ data_store = DataStore(opt_data_store, opt_dataset)
+ fp_records = data_store.metadata(types.Metadata.FILE_RECORD)
+ df_records = pd.read_csv(fp_records).set_index('index')
+ nrows = len(df_records)
+
+ dir_out = data_store.uuid_dir() if opt_fp_out is None else opt_fp_out
+ file_utils.mkdirs(dir_out)
+
+ for ds_record in tqdm(df_records.itertuples(), total=nrows):
+ # make image path
+ fp_src = data_store.face(ds_record.subdir, ds_record.fn, ds_record.ext)
+ fp_dst = data_store.face_uuid(ds_record.uuid, ds_record.ext)
+ Path(fp_dst).symlink_to(Path(fp_src))
+
+ log.info('symlinked {:,} files'.format(nrows)) \ No newline at end of file
diff --git a/megapixels/commands/demo/face_search.py b/megapixels/commands/demo/face_search.py
index 08b2323d..0452cc9d 100644
--- a/megapixels/commands/demo/face_search.py
+++ b/megapixels/commands/demo/face_search.py
@@ -45,10 +45,9 @@ def cli(ctx, opt_fp_in, opt_data_store, opt_dataset, opt_gpu):
log = Logger.getLogger()
# init face detection
+ detector = face_detector.DetectorDLIBHOG()
# init face recognition
- detector = face_detector.DetectorDLIBHOG()
- # face recognition/vector
recognition = face_recognition.RecognitionDLIB(gpu=opt_gpu)
# load query image
diff --git a/megapixels/notebooks/_local_scratch.ipynb b/megapixels/notebooks/_local_scratch.ipynb
index 167b6ddd..cee17cba 100644
--- a/megapixels/notebooks/_local_scratch.ipynb
+++ b/megapixels/notebooks/_local_scratch.ipynb
@@ -1,161 +1,173 @@
{
"cells": [
{
- "cell_type": "code",
- "execution_count": 1,
+ "cell_type": "markdown",
"metadata": {},
- "outputs": [],
"source": [
- "import pandas as pd\n",
- "import cv2 as cv\n",
- "import numpy as np\n",
- "%matplotlib inline\n",
- "import matplotlib.pyplot as plt"
+ "# Scratch pad"
]
},
{
"cell_type": "code",
- "execution_count": 2,
+ "execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
- "import sys\n",
"from glob import glob\n",
"from os.path import join\n",
"from pathlib import Path\n",
+ "import random\n",
+ "\n",
+ "import pandas as pd\n",
+ "import cv2 as cv\n",
+ "import numpy as np\n",
+ "%matplotlib inline\n",
+ "import matplotlib.pyplot as plt\n",
+ "\n",
+ "import sys\n",
"sys.path.append('/work/megapixels_dev/megapixels')\n",
"from app.models.bbox import BBox\n",
- "#from app.utils import im_utils\n",
- "import random"
+ "from app.utils import im_utils, file_utils"
]
},
{
"cell_type": "code",
- "execution_count": 3,
+ "execution_count": 5,
"metadata": {},
"outputs": [],
"source": [
- "dir_ims = '/data_store_ssd/apps/megapixels/datasets/umd_faces/faces/'"
+ "a= [1]"
]
},
{
"cell_type": "code",
- "execution_count": 4,
+ "execution_count": 6,
"metadata": {},
"outputs": [
{
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "0\n"
- ]
+ "data": {
+ "text/plain": [
+ "1"
+ ]
+ },
+ "execution_count": 6,
+ "metadata": {},
+ "output_type": "execute_result"
}
],
"source": [
- "fp_ims = glob(join(dir_ims, '*.png'))\n",
- "print(len(fp_ims))"
+ "a[-1]"
]
},
{
"cell_type": "code",
- "execution_count": 9,
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Help on function choice in module random:\n",
- "\n",
- "choice(self, seq)\n",
- " Choose a random element from a non-empty sequence.\n",
- "\n"
- ]
- }
- ],
- "source": [
- "help(random.sample)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 33,
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "[1, 8, 0, 6, 3] True\n"
- ]
- }
- ],
- "source": [
- "a = list(range(0,10))\n",
- "b = random.sample(a, 5)\n",
- "print(b, len(set(b))==5)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
+ "execution_count": 32,
"metadata": {},
"outputs": [],
"source": [
- "from random import randint\n",
- "imu"
+ "fp_filepath = '/data_store_ssd/datasets/people/lfw/metadata/filepath.csv'\n",
+ "df_filepath = pd.read_csv(fp_filepath)"
]
},
{
"cell_type": "code",
- "execution_count": null,
+ "execution_count": 48,
"metadata": {},
"outputs": [],
"source": [
- "import face_alignment\n",
- "from skimage import io\n",
- "\n",
- "fa = face_alignment.FaceAlignment(face_alignment.LandmarksType._3D, flip_input=False, device='cuda')"
+ "image_index = 12467"
]
},
{
"cell_type": "code",
- "execution_count": null,
+ "execution_count": 55,
"metadata": {},
- "outputs": [],
- "source": [
- "fp_im = np.random.choice(fp_ims)\n",
- "im = io.imread(fp_im)\n",
- "preds = fa.get_landmarks(im)\n",
- "print(preds[0])"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "12474\n"
+ ]
+ },
+ {
+ "data": {
+ "text/plain": [
+ "index 12851\n",
+ "ext jpg\n",
+ "fn Vladimir_Putin_0029\n",
+ "subdir Vladimir_Putin\n",
+ "Name: 12474, dtype: object"
+ ]
+ },
+ "execution_count": 55,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
"source": [
- "import json"
+ "image_index += 1\n",
+ "print(image_index)\n",
+ "df_filepath.iloc[image_index]"
]
},
{
"cell_type": "code",
- "execution_count": null,
+ "execution_count": 56,
"metadata": {},
"outputs": [],
"source": [
- "print(len(preds[0]))\n"
+ "import imutils"
]
},
{
"cell_type": "code",
- "execution_count": null,
+ "execution_count": 57,
"metadata": {},
- "outputs": [],
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Help on function build_montages in module imutils.convenience:\n",
+ "\n",
+ "build_montages(image_list, image_shape, montage_shape)\n",
+ " ---------------------------------------------------------------------------------------------\n",
+ " author: Kyle Hounslow\n",
+ " ---------------------------------------------------------------------------------------------\n",
+ " Converts a list of single images into a list of 'montage' images of specified rows and columns.\n",
+ " A new montage image is started once rows and columns of montage image is filled.\n",
+ " Empty space of incomplete montage images are filled with black pixels\n",
+ " ---------------------------------------------------------------------------------------------\n",
+ " :param image_list: python list of input images\n",
+ " :param image_shape: tuple, size each image will be resized to for display (width, height)\n",
+ " :param montage_shape: tuple, shape of image montage (width, height)\n",
+ " :return: list of montage images in numpy array format\n",
+ " ---------------------------------------------------------------------------------------------\n",
+ " \n",
+ " example usage:\n",
+ " \n",
+ " # load single image\n",
+ " img = cv2.imread('lena.jpg')\n",
+ " # duplicate image 25 times\n",
+ " num_imgs = 25\n",
+ " img_list = []\n",
+ " for i in xrange(num_imgs):\n",
+ " img_list.append(img)\n",
+ " # convert image list into a montage of 256x256 images tiled in a 5x5 montage\n",
+ " montages = make_montages_of_images(img_list, (256, 256), (5, 5))\n",
+ " # iterate through montages and display\n",
+ " for montage in montages:\n",
+ " cv2.imshow('montage image', montage)\n",
+ " cv2.waitKey(0)\n",
+ " \n",
+ " ----------------------------------------------------------------------------------------------\n",
+ "\n"
+ ]
+ }
+ ],
"source": [
- "with open('test.json', 'w') as fp:\n",
- " json.dump(preds[0].tolist(), fp)"
+ "help(imutils.build_montages)"
]
},
{
@@ -182,7 +194,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.6.5"
+ "version": "3.6.6"
}
},
"nbformat": 4,
diff --git a/megapixels/notebooks/datasets/lfw/lfw_make_identity_csv.ipynb b/megapixels/notebooks/datasets/lfw/lfw_make_identity_csv.ipynb
new file mode 100644
index 00000000..039614f0
--- /dev/null
+++ b/megapixels/notebooks/datasets/lfw/lfw_make_identity_csv.ipynb
@@ -0,0 +1,510 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Add identity ID to index"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from os.path import join\n",
+ "from pathlib import Path\n",
+ "import difflib\n",
+ "\n",
+ "from tqdm import tqdm_notebook as tqdm\n",
+ "import pandas as pd\n",
+ "import numpy as np"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# names\n",
+ "DATA_STORE = '/data_store_ssd/'\n",
+ "dir_dataset = 'datasets/people/lfw/metadata'"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 23,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# split records into index and uuids\n",
+ "fp_identity_in = join(DATA_STORE, dir_dataset, 'identities_old.csv')\n",
+ "fp_identity_out = join(DATA_STORE, dir_dataset, 'identity_lookup.csv')\n",
+ "\n",
+ "df_identity = pd.read_csv(fp_identity_in).set_index('index')"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 24,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "<div>\n",
+ "<style scoped>\n",
+ " .dataframe tbody tr th:only-of-type {\n",
+ " vertical-align: middle;\n",
+ " }\n",
+ "\n",
+ " .dataframe tbody tr th {\n",
+ " vertical-align: top;\n",
+ " }\n",
+ "\n",
+ " .dataframe thead th {\n",
+ " text-align: right;\n",
+ " }\n",
+ "</style>\n",
+ "<table border=\"1\" class=\"dataframe\">\n",
+ " <thead>\n",
+ " <tr style=\"text-align: right;\">\n",
+ " <th></th>\n",
+ " <th>name</th>\n",
+ " <th>name_orig</th>\n",
+ " <th>description</th>\n",
+ " <th>gender</th>\n",
+ " <th>images</th>\n",
+ " <th>image_index</th>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th>index</th>\n",
+ " <th></th>\n",
+ " <th></th>\n",
+ " <th></th>\n",
+ " <th></th>\n",
+ " <th></th>\n",
+ " <th></th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <th>0</th>\n",
+ " <td>A. J. Cook</td>\n",
+ " <td>AJ Cook</td>\n",
+ " <td>Canadian actress</td>\n",
+ " <td>f</td>\n",
+ " <td>1</td>\n",
+ " <td>0</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th>1</th>\n",
+ " <td>AJ Lamas</td>\n",
+ " <td>AJ Lamas</td>\n",
+ " <td>American actor</td>\n",
+ " <td>m</td>\n",
+ " <td>1</td>\n",
+ " <td>1</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th>2</th>\n",
+ " <td>Aaron Eckhart</td>\n",
+ " <td>Aaron Eckhart</td>\n",
+ " <td>American actor</td>\n",
+ " <td>m</td>\n",
+ " <td>1</td>\n",
+ " <td>2</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th>3</th>\n",
+ " <td>Aaron Guiel</td>\n",
+ " <td>Aaron Guiel</td>\n",
+ " <td>Professional baseball player</td>\n",
+ " <td>m</td>\n",
+ " <td>1</td>\n",
+ " <td>3</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th>4</th>\n",
+ " <td>Aaron Patterson</td>\n",
+ " <td>Aaron Patterson</td>\n",
+ " <td>Author</td>\n",
+ " <td>m</td>\n",
+ " <td>1</td>\n",
+ " <td>4</td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n",
+ "</div>"
+ ],
+ "text/plain": [
+ " name name_orig description gender \\\n",
+ "index \n",
+ "0 A. J. Cook AJ Cook Canadian actress f \n",
+ "1 AJ Lamas AJ Lamas American actor m \n",
+ "2 Aaron Eckhart Aaron Eckhart American actor m \n",
+ "3 Aaron Guiel Aaron Guiel Professional baseball player m \n",
+ "4 Aaron Patterson Aaron Patterson Author m \n",
+ "\n",
+ " images image_index \n",
+ "index \n",
+ "0 1 0 \n",
+ "1 1 1 \n",
+ "2 1 2 \n",
+ "3 1 3 \n",
+ "4 1 4 "
+ ]
+ },
+ "execution_count": 24,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df_identity.head()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 25,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "<div>\n",
+ "<style scoped>\n",
+ " .dataframe tbody tr th:only-of-type {\n",
+ " vertical-align: middle;\n",
+ " }\n",
+ "\n",
+ " .dataframe tbody tr th {\n",
+ " vertical-align: top;\n",
+ " }\n",
+ "\n",
+ " .dataframe thead th {\n",
+ " text-align: right;\n",
+ " }\n",
+ "</style>\n",
+ "<table border=\"1\" class=\"dataframe\">\n",
+ " <thead>\n",
+ " <tr style=\"text-align: right;\">\n",
+ " <th></th>\n",
+ " <th>name</th>\n",
+ " <th>name_orig</th>\n",
+ " <th>description</th>\n",
+ " <th>gender</th>\n",
+ " <th>images</th>\n",
+ " <th>image_index</th>\n",
+ " <th>subdir</th>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th>index</th>\n",
+ " <th></th>\n",
+ " <th></th>\n",
+ " <th></th>\n",
+ " <th></th>\n",
+ " <th></th>\n",
+ " <th></th>\n",
+ " <th></th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <th>0</th>\n",
+ " <td>A. J. Cook</td>\n",
+ " <td>AJ Cook</td>\n",
+ " <td>Canadian actress</td>\n",
+ " <td>f</td>\n",
+ " <td>1</td>\n",
+ " <td>0</td>\n",
+ " <td></td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th>1</th>\n",
+ " <td>AJ Lamas</td>\n",
+ " <td>AJ Lamas</td>\n",
+ " <td>American actor</td>\n",
+ " <td>m</td>\n",
+ " <td>1</td>\n",
+ " <td>1</td>\n",
+ " <td></td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n",
+ "</div>"
+ ],
+ "text/plain": [
+ " name name_orig description gender images image_index \\\n",
+ "index \n",
+ "0 A. J. Cook AJ Cook Canadian actress f 1 0 \n",
+ "1 AJ Lamas AJ Lamas American actor m 1 1 \n",
+ "\n",
+ " subdir \n",
+ "index \n",
+ "0 \n",
+ "1 "
+ ]
+ },
+ "execution_count": 25,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "# associate each file with an identity\n",
+ "df_identity['subdir'] = [''] * len(df_identity)\n",
+ "df_identity.head(2)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 26,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "application/vnd.jupyter.widget-view+json": {
+ "model_id": "ece5c11b90954b25b1f1e28fc2fe6b55",
+ "version_major": 2,
+ "version_minor": 0
+ },
+ "text/plain": [
+ "HBox(children=(IntProgress(value=0, max=5749), HTML(value='')))"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "\n"
+ ]
+ }
+ ],
+ "source": [
+ "for row in tqdm(df_identity.itertuples(), total=len(df_identity)):\n",
+ " name = row.name_orig\n",
+ " subdir = name.replace(' ','_')\n",
+ " df_identity.at[row.Index, 'subdir'] = subdir"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 27,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "<div>\n",
+ "<style scoped>\n",
+ " .dataframe tbody tr th:only-of-type {\n",
+ " vertical-align: middle;\n",
+ " }\n",
+ "\n",
+ " .dataframe tbody tr th {\n",
+ " vertical-align: top;\n",
+ " }\n",
+ "\n",
+ " .dataframe thead th {\n",
+ " text-align: right;\n",
+ " }\n",
+ "</style>\n",
+ "<table border=\"1\" class=\"dataframe\">\n",
+ " <thead>\n",
+ " <tr style=\"text-align: right;\">\n",
+ " <th></th>\n",
+ " <th>name</th>\n",
+ " <th>name_orig</th>\n",
+ " <th>description</th>\n",
+ " <th>gender</th>\n",
+ " <th>images</th>\n",
+ " <th>image_index</th>\n",
+ " <th>subdir</th>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th>index</th>\n",
+ " <th></th>\n",
+ " <th></th>\n",
+ " <th></th>\n",
+ " <th></th>\n",
+ " <th></th>\n",
+ " <th></th>\n",
+ " <th></th>\n",
+ " </tr>\n",
+ " </thead>\n",
+ " <tbody>\n",
+ " <tr>\n",
+ " <th>0</th>\n",
+ " <td>A. J. Cook</td>\n",
+ " <td>AJ Cook</td>\n",
+ " <td>Canadian actress</td>\n",
+ " <td>f</td>\n",
+ " <td>1</td>\n",
+ " <td>0</td>\n",
+ " <td>AJ_Cook</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th>1</th>\n",
+ " <td>AJ Lamas</td>\n",
+ " <td>AJ Lamas</td>\n",
+ " <td>American actor</td>\n",
+ " <td>m</td>\n",
+ " <td>1</td>\n",
+ " <td>1</td>\n",
+ " <td>AJ_Lamas</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th>2</th>\n",
+ " <td>Aaron Eckhart</td>\n",
+ " <td>Aaron Eckhart</td>\n",
+ " <td>American actor</td>\n",
+ " <td>m</td>\n",
+ " <td>1</td>\n",
+ " <td>2</td>\n",
+ " <td>Aaron_Eckhart</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th>3</th>\n",
+ " <td>Aaron Guiel</td>\n",
+ " <td>Aaron Guiel</td>\n",
+ " <td>Professional baseball player</td>\n",
+ " <td>m</td>\n",
+ " <td>1</td>\n",
+ " <td>3</td>\n",
+ " <td>Aaron_Guiel</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th>4</th>\n",
+ " <td>Aaron Patterson</td>\n",
+ " <td>Aaron Patterson</td>\n",
+ " <td>Author</td>\n",
+ " <td>m</td>\n",
+ " <td>1</td>\n",
+ " <td>4</td>\n",
+ " <td>Aaron_Patterson</td>\n",
+ " </tr>\n",
+ " </tbody>\n",
+ "</table>\n",
+ "</div>"
+ ],
+ "text/plain": [
+ " name name_orig description gender \\\n",
+ "index \n",
+ "0 A. J. Cook AJ Cook Canadian actress f \n",
+ "1 AJ Lamas AJ Lamas American actor m \n",
+ "2 Aaron Eckhart Aaron Eckhart American actor m \n",
+ "3 Aaron Guiel Aaron Guiel Professional baseball player m \n",
+ "4 Aaron Patterson Aaron Patterson Author m \n",
+ "\n",
+ " images image_index subdir \n",
+ "index \n",
+ "0 1 0 AJ_Cook \n",
+ "1 1 1 AJ_Lamas \n",
+ "2 1 2 Aaron_Eckhart \n",
+ "3 1 3 Aaron_Guiel \n",
+ "4 1 4 Aaron_Patterson "
+ ]
+ },
+ "execution_count": 27,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df_identity.head()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 29,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df_identity.to_csv(fp_identity_out)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 138,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# make a clean index separate from files"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 145,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "'AJ Lamas'"
+ ]
+ },
+ "execution_count": 145,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "#df_identies = pd.read_csv('identities.csv')\n",
+ "df_identities.iloc[1]['name']"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 149,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "1 2 3 4\n"
+ ]
+ }
+ ],
+ "source": [
+ "a = [1,2,3,4]\n",
+ "\n",
+ "print(*a)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python [conda env:megapixels]",
+ "language": "python",
+ "name": "conda-env-megapixels-py"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.6.6"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/megapixels/notebooks/examples/face_recognition_demo.ipynb b/megapixels/notebooks/examples/face_recognition_demo.ipynb
index 68c5f3b6..804c63b6 100644
--- a/megapixels/notebooks/examples/face_recognition_demo.ipynb
+++ b/megapixels/notebooks/examples/face_recognition_demo.ipynb
@@ -402,7 +402,9 @@
"execution_count": null,
"metadata": {},
"outputs": [],
- "source": []
+ "source": [
+ "import imutils"
+ ]
},
{
"cell_type": "code",