summaryrefslogtreecommitdiff
path: root/megapixels/app/models
diff options
context:
space:
mode:
authorJules Laplace <julescarbon@gmail.com>2018-12-17 20:20:04 +0100
committerJules Laplace <julescarbon@gmail.com>2018-12-17 20:20:04 +0100
commit6626e3086ca9c5ce2317f437aae94afacd6f1360 (patch)
treeb6a35419eacd6f3853f1042a9c2ceb234bada0d6 /megapixels/app/models
parente67871d26f2e73861187e86110e240dd7718ea51 (diff)
parentc7e73f613fc5189c0adeda9fd693cb6aca3d4247 (diff)
Merge branch 'master' of github.com:adamhrv/megapixels_dev
Diffstat (limited to 'megapixels/app/models')
-rw-r--r--megapixels/app/models/data_store.py67
-rw-r--r--megapixels/app/models/dataset.py148
2 files changed, 163 insertions, 52 deletions
diff --git a/megapixels/app/models/data_store.py b/megapixels/app/models/data_store.py
new file mode 100644
index 00000000..244aba60
--- /dev/null
+++ b/megapixels/app/models/data_store.py
@@ -0,0 +1,67 @@
+import os
+from os.path import join
+import logging
+
+from app.settings import app_cfg as cfg
+from app.settings import types
+
+
+# -------------------------------------------------------------------------
+# Metadata and media files
+# -------------------------------------------------------------------------
+
+class DataStore:
+ # local data store
+ def __init__(self, opt_data_store, opt_dataset):
+ self.data_store = join(f'/data_store_{opt_data_store.name.lower()}')
+ self.dir_dataset = join(self.data_store, 'datasets', cfg.DIR_PEOPLE, opt_dataset.name.lower())
+ self.dir_media = join(self.dir_dataset, 'media')
+ self.dir_metadata = join(self.dir_dataset, 'metadata')
+
+ def metadata(self, enum_type):
+ return join(self.dir_metadata, f'{enum_type.name.lower()}.csv')
+
+ def metadata(self, enum_type):
+ return join(self.dir_metadata)
+
+ def media_images_original(self):
+ return join(self.dir_media, 'original')
+
+ def face(self, subdir, fn, ext):
+ return join(self.dir_media, 'original', subdir, f'{fn}.{ext}')
+
+ def face_crop(self, subdir, fn, ext):
+ return join(self.dir_media, 'cropped', subdir, f'{fn}.{ext}')
+
+ def face_uuid(self, uuid, ext):
+ return join(self.dir_media, 'uuid',f'{uuid}.{ext}')
+
+ def face_crop_uuid(self, uuid, ext):
+ return join(self.dir_media, 'uuid', f'{uuid}.{ext}')
+
+ def uuid_dir(self):
+ return join(self.dir_media, 'uuid')
+
+
+class DataStoreS3:
+ # S3 server
+ def __init__(self, opt_dataset):
+ self._dir_media = join(cfg.S3_HTTP_MEDIA_URL, opt_dataset.name.lower())
+ self._dir_metadata = join(cfg.S3_HTTP_METADATA_URL, opt_dataset.name.lower())
+
+ def metadata(self, opt_metadata_type, ext='csv'):
+ return join(self._dir_metadata, f'{opt_metadata_type.name.lower()}.{ext}')
+
+ def face(self, opt_uuid, ext='jpg'):
+ #return join(self._dir_media, 'original', f'{opt_uuid}.{ext}')
+ return join(self._dir_media, f'{opt_uuid}.{ext}')
+
+ def face_crop(self, opt_uuid, ext='jpg'):
+ # not currently using?
+ return join(self._dir_media, 'cropped', f'{opt_uuid}.{ext}')
+
+
+
+# -------------------------------------------------------------------------
+# Models
+# ------------------------------------------------------------------------- \ No newline at end of file
diff --git a/megapixels/app/models/dataset.py b/megapixels/app/models/dataset.py
index 11d568a5..35e10465 100644
--- a/megapixels/app/models/dataset.py
+++ b/megapixels/app/models/dataset.py
@@ -2,6 +2,7 @@
Dataset model: container for all CSVs about a dataset
"""
import os
+import sys
from os.path import join
from pathlib import Path
import logging
@@ -12,7 +13,8 @@ import numpy as np
from app.settings import app_cfg as cfg
from app.settings import types
from app.models.bbox import BBox
-from app.utils import file_utils, im_utils, path_utils
+from app.utils import file_utils, im_utils
+from app.models.data_store import DataStore, DataStoreS3
from app.utils.logger_utils import Logger
# -------------------------------------------------------------------------
@@ -21,36 +23,70 @@ from app.utils.logger_utils import Logger
class Dataset:
- def __init__(self, opt_dataset_type, opt_data_store=types.DataStore.NAS):
+ def __init__(self, opt_data_store, opt_dataset_type):
self._dataset_type = opt_dataset_type # enum type
self.log = Logger.getLogger()
self._metadata = {}
self._face_vectors = []
self._nullframe = pd.DataFrame() # empty placeholder
- self.data_store = path_utils.DataStore(opt_data_store, self._dataset_type)
- self.data_store_s3 = path_utils.DataStoreS3(self._dataset_type)
+ self.data_store = DataStore(opt_data_store, self._dataset_type)
+ self.data_store_s3 = DataStoreS3(self._dataset_type)
- def load(self, opt_data_store):
- '''Loads all CSV files into (dict) of DataFrames'''
- for metadata_type in types.Metadata:
- self.log.info(f'load metadata: {metadata_type}')
- fp_csv = self.data_store.metadata(metadata_type)
- self.log.info(f'loading: {fp_csv}')
- if Path(fp_csv).is_file():
- self._metadata[metadata_type] = pd.read_csv(fp_csv).set_index('index')
- if metadata_type == types.Metadata.FACE_VECTOR:
- # convert DataFrame to list of floats
- self._face_vecs = self.df_to_vec_list(self._metadata[metadata_type])
- self._metadata[metadata_type].drop('vec', axis=1, inplace=True)
- else:
- self.log.error('File not found: {fp_csv}. Replaced with empty DataFrame')
- self._metadata[metadata_type] = self._nullframe
- self.log.info('finished loading')
+ def load_face_vectors(self):
+ metadata_type = types.Metadata.FACE_VECTOR
+ fp_csv = self.data_store.metadata(metadata_type)
+ self.log.info(f'loading: {fp_csv}')
+ if Path(fp_csv).is_file():
+ self._metadata[metadata_type] = pd.read_csv(fp_csv).set_index('index')
+ # convert DataFrame to list of floats
+ self._face_vectors = self.df_vecs_to_dict(self._metadata[metadata_type])
+ self._face_vector_idxs = self.df_vec_idxs_to_dict(self._metadata[metadata_type])
+ self.log.info(f'build face vector dict: {len(self._face_vectors)}')
+ # remove the face vector column, it can be several GB of memory
+ self._metadata[metadata_type].drop('vec', axis=1, inplace=True)
+ else:
+ self.log.error(f'File not found: {fp_csv}. Exiting.')
+ sys.exit()
+
+ def load_records(self):
+ metadata_type = types.Metadata.FILE_RECORD
+ fp_csv = self.data_store.metadata(metadata_type)
+ self.log.info(f'loading: {fp_csv}')
+ if Path(fp_csv).is_file():
+ self._metadata[metadata_type] = pd.read_csv(fp_csv).set_index('index')
+ else:
+ self.log.error(f'File not found: {fp_csv}. Exiting.')
+ sys.exit()
+
+ def load_identities(self):
+ metadata_type = types.Metadata.IDENTITY
+ fp_csv = self.data_store.metadata(metadata_type)
+ self.log.info(f'loading: {fp_csv}')
+ if Path(fp_csv).is_file():
+ self._metadata[metadata_type] = pd.read_csv(fp_csv).set_index('index')
+ else:
+ self.log.error(f'File not found: {fp_csv}. Exiting.')
+ sys.exit()
def metadata(self, opt_metadata_type):
- return self._metadata.get(opt_metadata_type, self._nullframe)
+ return self._metadata.get(opt_metadata_type, None)
- def roi_idx_to_record(self, vector_index):
+ def index_to_record(self, index):
+ # get record meta
+ df_record = self._metadata[types.Metadata.FILE_RECORD]
+ ds_record = df_record.iloc[index]
+ identity_index = ds_record.identity_index
+ # get identity meta
+ df_identity = self._metadata[types.Metadata.IDENTITY]
+ # future datasets can have multiple identities per images
+ ds_identities = df_identity.iloc[identity_index]
+ # get filepath and S3 url
+ fp_im = self.data_store.face_image(ds_record.subdir, ds_record.fn, ds_record.ext)
+ s3_url = self.data_store_s3.face_image(ds_record.uuid)
+ image_record = ImageRecord(ds_record, fp_im, s3_url, ds_identities=ds_identities)
+ return image_record
+
+ def vector_to_record(self, record_index):
'''Accumulates image and its metadata'''
df_face_vector = self._metadata[types.Metadata.FACE_VECTOR]
ds_face_vector = df_face_vector.iloc[vector_index]
@@ -80,7 +116,7 @@ class Dataset:
image_record = ImageRecord(image_index, sha256, uuid, bbox, fp_im, fp_url)
# now get the identity index (if available)
identity_index = ds_sha256.identity_index
- if identity_index:
+ if identity_index > -1:
# then use the identity index to get the identity meta
df_identity = df_filepath = self._metadata[types.Metadata.IDENTITY]
ds_identity = df_identity.iloc[identity_index]
@@ -95,27 +131,38 @@ class Dataset:
identity = Identity(identity_index, name=name, desc=desc, gender=gender, n_images=n_images,
url=url, age=age, nationality=nationality)
image_record.identity = identity
+ else:
+ self.log.info(f'no identity index: {ds_sha256}')
return image_record
- def matches(self, query_vec, n_results=5, threshold=0.5):
+ def find_matches(self, query_vec, n_results=5, threshold=0.6):
image_records = [] # list of image matches w/identity if available
# find most similar feature vectors indexes
- match_idxs = self.similar(query_vec, n_results, threshold)
+ #match_idxs = self.similar(query_vec, n_results, threshold)
+ sim_scores = np.linalg.norm(np.array([query_vec]) - np.array(self._face_vectors), axis=1)
+ match_idxs = np.argpartition(sim_scores, n_results)[:n_results]
+
for match_idx in match_idxs:
# get the corresponding face vector row
- image_record = self.roi_idx_to_record(match_idx)
- results.append(image_record)
+ roi_index = self._face_vector_roi_idxs[match_idx]
+ self.log.debug(f'find match index: {match_idx}, --> roi_index: {roi_index}')
+ image_record = self.roi_idx_to_record(roi_index)
+ image_records.append(image_record)
return image_records
# ----------------------------------------------------------------------
# utilities
- def df_to_vec_list(self, df):
+ def df_vecs_to_dict(self, df):
# convert the DataFrame CSV to float list of vecs
- vecs = [list(map(float,x.vec.split(','))) for x in df.itertuples()]
- return vecs
+ return [list(map(float,x.vec.split(','))) for x in df.itertuples()]
+
+ def df_vec_idxs_to_dict(self, df):
+ # convert the DataFrame CSV to float list of vecs
+ #return [x.roi_index for x in df.itertuples()]
+ return [x.image_index for x in df.itertuples()]
def similar(self, query_vec, n_results):
'''Finds most similar N indices of query face vector
@@ -124,45 +171,42 @@ class Dataset:
:returns (list) of (int) indices
'''
# uses np.linalg based on the ageitgey/face_recognition code
- vecs_sim_scores = np.linalg.norm(np.array([query_vec]) - np.array(self._face_vectors), axis=1)
- top_idxs = np.argpartition(vecs_sim_scores, n_results)[:n_results]
+
return top_idxs
class ImageRecord:
- def __init__(self, image_index, sha256, uuid, bbox, filepath, url):
- self.image_index = image_index
- self.sha256 = sha256
- self.uuid = uuid
- self.bbox = bbox
- self.filepath = filepath
+ def __init__(self, ds_record, fp, url, ds_rois=None, ds_identities=None):
+ # maybe more other meta will go there
+ self.image_index = ds_record.index
+ self.sha256 = ds_record.sha256
+ self.uuid = ds_record.uuid
+ self.filepath = fp
self.url = url
- self._identity = None
+ self._identities = []
+ # image records contain ROIs
+ # ROIs are linked to identities
+
+ #self._identities = [Identity(x) for x in ds_identities]
@property
- def identity(self):
+ def identity(self, index):
return self._identity
- @identity.setter
- def identity(self, value):
- self._identity = value
-
def summarize(self):
'''Summarizes data for debugging'''
log = Logger.getLogger()
log.info(f'filepath: {self.filepath}')
log.info(f'sha256: {self.sha256}')
log.info(f'UUID: {self.uuid}')
- log.info(f'BBox: {self.bbox}')
- log.info(f's3 url: {self.url}')
- if self._identity:
- log.info(f'name: {self._identity.name}')
- log.info(f'age: {self._identity.age}')
- log.info(f'gender: {self._identity.gender}')
- log.info(f'nationality: {self._identity.nationality}')
- log.info(f'images: {self._identity.n_images}')
+ log.info(f'S3 url: {self.url}')
+ for identity in self._identities:
+ log.info(f'fullname: {identity.fullname}')
+ log.info(f'description: {identity.description}')
+ log.info(f'gender: {identity.gender}')
+ log.info(f'images: {identity.n_images}')
class Identity: