diff options
| author | Adam Harvey <adam@ahprojects.com> | 2018-12-23 01:37:03 +0100 |
|---|---|---|
| committer | Adam Harvey <adam@ahprojects.com> | 2018-12-23 01:37:03 +0100 |
| commit | 4452e02e8b04f3476273574a875bb60cfbb4568b (patch) | |
| tree | 3ffa44f9621b736250a8b94da14a187dc785c2fe /megapixels/app/models/dataset.py | |
| parent | 2a65f7a157bd4bace970cef73529867b0e0a374d (diff) | |
| parent | 5340bee951c18910fd764241945f1f136b5a22b4 (diff) | |
.
Diffstat (limited to 'megapixels/app/models/dataset.py')
| -rw-r--r-- | megapixels/app/models/dataset.py | 229 |
1 files changed, 229 insertions, 0 deletions
diff --git a/megapixels/app/models/dataset.py b/megapixels/app/models/dataset.py new file mode 100644 index 00000000..eb0109a7 --- /dev/null +++ b/megapixels/app/models/dataset.py @@ -0,0 +1,229 @@ +""" +Dataset model: container for all CSVs about a dataset +""" +import os +import sys +from os.path import join +from pathlib import Path +import logging + +import pandas as pd +import numpy as np + +from app.settings import app_cfg as cfg +from app.settings import types +from app.models.bbox import BBox +from app.utils import file_utils, im_utils +from app.models.data_store import DataStore, DataStoreS3 +from app.utils.logger_utils import Logger + +# ------------------------------------------------------------------------- +# Dataset +# ------------------------------------------------------------------------- + +class Dataset: + + def __init__(self, opt_data_store, opt_dataset_type): + self._dataset_type = opt_dataset_type # enum type + self.log = Logger.getLogger() + self._metadata = {} + self._face_vectors = [] + self._nullframe = pd.DataFrame() # empty placeholder + self.data_store = DataStore(opt_data_store, self._dataset_type) + self.data_store_s3 = DataStoreS3(self._dataset_type) + + def load_face_vectors(self): + metadata_type = types.Metadata.FACE_VECTOR + fp_csv = self.data_store.metadata(metadata_type) + self.log.info(f'loading: {fp_csv}') + if Path(fp_csv).is_file(): + self._metadata[metadata_type] = pd.read_csv(fp_csv).set_index('index') + # convert DataFrame to list of floats + self._face_vectors = self.df_vecs_to_dict(self._metadata[metadata_type]) + self._face_vector_roi_idxs = self.df_vec_roi_idxs_to_dict(self._metadata[metadata_type]) + self.log.info(f'build face vector dict: {len(self._face_vectors)}') + # remove the face vector column, it can be several GB of memory + self._metadata[metadata_type].drop('vec', axis=1, inplace=True) + else: + self.log.error(f'File not found: {fp_csv}. Exiting.') + sys.exit() + + def load_records(self): + metadata_type = types.Metadata.FILE_RECORD + fp_csv = self.data_store.metadata(metadata_type) + self.log.info(f'loading: {fp_csv}') + if Path(fp_csv).is_file(): + self._metadata[metadata_type] = pd.read_csv(fp_csv).set_index('index') + else: + self.log.error(f'File not found: {fp_csv}. Exiting.') + sys.exit() + + def load_identities(self): + metadata_type = types.Metadata.IDENTITY + fp_csv = self.data_store.metadata(metadata_type) + self.log.info(f'loading: {fp_csv}') + if Path(fp_csv).is_file(): + self._metadata[metadata_type] = pd.read_csv(fp_csv).set_index('index') + else: + self.log.error(f'File not found: {fp_csv}. Exiting.') + sys.exit() + + def metadata(self, opt_metadata_type): + return self._metadata.get(opt_metadata_type, None) + + def index_to_record(self, index): + # get record meta + df_record = self._metadata[types.Metadata.FILE_RECORD] + ds_record = df_record.iloc[index] + identity_index = ds_record.identity_index + # get identity meta + df_identity = self._metadata[types.Metadata.IDENTITY] + # future datasets can have multiple identities per images + ds_identities = df_identity.iloc[identity_index] + # get filepath and S3 url + fp_im = self.data_store.face(ds_record.subdir, ds_record.fn, ds_record.ext) + s3_url = self.data_store_s3.face(ds_record.uuid) + image_record = ImageRecord(ds_record, fp_im, s3_url, ds_identities=ds_identities) + return image_record + + def vector_to_record(self, record_index): + '''Accumulates image and its metadata''' + df_face_vector = self._metadata[types.Metadata.FACE_VECTOR] + ds_face_vector = df_face_vector.iloc[vector_index] + # get the match's ROI index + image_index = ds_face_vector.image_index + # get the roi dataframe + df_face_roi = self._metadata[types.Metadata.FACE_ROI] + ds_roi = df_face_roi.iloc[image_index] + # create BBox + dim = (ds_roi.image_width, ds_roi.image_height) + bbox = BBox.from_xywh_dim(ds_roi.x, ds_roi.y, ds_roi.w, ds_roi.y, dim) + # use the ROI index to get identity index from the identity DataFrame + df_sha256 = self._metadata[types.Metadata.SHA256] + ds_sha256 = df_sha256.iloc[image_index] + sha256 = ds_sha256.sha256 + # get the local filepath + df_filepath = self._metadata[types.Metadata.FILEPATH] + ds_file = df_filepath.iloc[image_index] + fp_im = self.data_store.face_image(ds_file.subdir, ds_file.fn, ds_file.ext)\ + # get remote path + df_uuid = self._metadata[types.Metadata.UUID] + ds_uuid = df_uuid.iloc[image_index] + uuid = ds_uuid.uuid + fp_url = self.data_store_s3.face_image(uuid) + fp_url_crop = self.data_store_s3.face_image_crop(uuid) + + image_record = ImageRecord(image_index, sha256, uuid, bbox, fp_im, fp_url) + # now get the identity index (if available) + identity_index = ds_sha256.identity_index + if identity_index > -1: + # then use the identity index to get the identity meta + df_identity = df_filepath = self._metadata[types.Metadata.IDENTITY] + ds_identity = df_identity.iloc[identity_index] + # get the name and description + name = ds_identity.fullname + desc = ds_identity.description + gender = ds_identity.gender + n_images = ds_identity.images + url = '(url)' # TODO + age = '(age)' # TODO + nationality = '(nationality)' + identity = Identity(identity_index, name=name, desc=desc, gender=gender, n_images=n_images, + url=url, age=age, nationality=nationality) + image_record.identity = identity + else: + self.log.info(f'no identity index: {ds_sha256}') + + return image_record + + + def find_matches(self, query_vec, n_results=5, threshold=0.6): + image_records = [] # list of image matches w/identity if available + # find most similar feature vectors indexes + #match_idxs = self.similar(query_vec, n_results, threshold) + sim_scores = np.linalg.norm(np.array([query_vec]) - np.array(self._face_vectors), axis=1) + match_idxs = np.argpartition(sim_scores, n_results)[:n_results] + + for match_idx in match_idxs: + # get the corresponding face vector row + roi_index = self._face_vector_roi_idxs[match_idx] + df_record = self._metadata[types.Metadata.FILE_RECORD] + ds_record = df_record.iloc[roi_index] + self.log.debug(f'find match index: {match_idx}, --> roi_index: {roi_index}') + fp_im = self.data_store.face(ds_record.subdir, ds_record.fn, ds_record.ext) + s3_url = self.data_store_s3.face(ds_record.uuid) + image_record = ImageRecord(ds_record, fp_im, s3_url) + #roi_index = self._face_vector_roi_idxs[match_idx] + #image_record = self.roi_idx_to_record(roi_index) + image_records.append(image_record) + return image_records + + # ---------------------------------------------------------------------- + # utilities + + def df_vecs_to_dict(self, df): + # convert the DataFrame CSV to float list of vecs + return [list(map(float,x.vec.split(','))) for x in df.itertuples()] + + def df_vec_roi_idxs_to_dict(self, df): + # convert the DataFrame CSV to float list of vecs + #return [x.roi_index for x in df.itertuples()] + return [x.roi_index for x in df.itertuples()] + + def similar(self, query_vec, n_results): + '''Finds most similar N indices of query face vector + :query_vec: (list) of 128 floating point numbers of face encoding + :n_results: (int) number of most similar indices to return + :returns (list) of (int) indices + ''' + # uses np.linalg based on the ageitgey/face_recognition code + + return top_idxs + + + +class ImageRecord: + + def __init__(self, ds_record, fp, url, ds_rois=None, ds_identities=None): + # maybe more other meta will go there + self.image_index = ds_record.index + self.sha256 = ds_record.sha256 + self.uuid = ds_record.uuid + self.filepath = fp + self.url = url + self._identities = [] + # image records contain ROIs + # ROIs are linked to identities + + #self._identities = [Identity(x) for x in ds_identities] + + @property + def identity(self, index): + return self._identity + + def summarize(self): + '''Summarizes data for debugging''' + log = Logger.getLogger() + log.info(f'filepath: {self.filepath}') + log.info(f'sha256: {self.sha256}') + log.info(f'UUID: {self.uuid}') + log.info(f'S3 url: {self.url}') + for identity in self._identities: + log.info(f'fullname: {identity.fullname}') + log.info(f'description: {identity.description}') + log.info(f'gender: {identity.gender}') + log.info(f'images: {identity.n_images}') + + +class Identity: + + def __init__(self, idx, name='NA', desc='NA', gender='NA', n_images=1, + url='NA', age='NA', nationality='NA'): + self.index = idx + self.name = name + self.description = desc + self.gender = gender + self.n_images = n_images + self.url = url + self.age = age + self.nationality = nationality |
