""" Dataset model: container for all CSVs about a dataset """ import os import sys from os.path import join from pathlib import Path import logging import pandas as pd import numpy as np from app.settings import app_cfg as cfg from app.settings import types from app.models.bbox import BBox from app.utils import file_utils, im_utils from app.models.data_store import DataStore, DataStoreS3 from app.utils.logger_utils import Logger # ------------------------------------------------------------------------- # Dataset # ------------------------------------------------------------------------- class Dataset: def __init__(self, opt_data_store, opt_dataset_type): self._dataset_type = opt_dataset_type # enum type self.log = Logger.getLogger() self._metadata = {} self._face_vectors = [] self._nullframe = pd.DataFrame() # empty placeholder self.data_store = DataStore(opt_data_store, self._dataset_type) self.data_store_s3 = DataStoreS3(self._dataset_type) def _load_face_vectors(self): metadata_type = types.Metadata.FACE_VECTOR fp_csv = self.data_store.metadata(metadata_type) self.log.info(f'loading: {fp_csv}') if Path(fp_csv).is_file(): self._metadata[metadata_type] = pd.read_csv(fp_csv).set_index('index') # convert DataFrame to list of floats self._face_vectors = self.df_vecs_to_dict(self._metadata[metadata_type]) self._face_vector_roi_idxs = self.df_vec_roi_idxs_to_dict(self._metadata[metadata_type]) self.log.info(f'build face vector dict: {len(self._face_vectors)}') # remove the face vector column, it can be several GB of memory self._metadata[metadata_type].drop('vec', axis=1, inplace=True) #n_dims = len(self._metadata[metadata_type].keys()) - 2 #drop_keys = [f'd{i}' for i in range(1,n_dims+1)] #self._metadata[metadata_type].drop(drop_keys, axis=1, inplace=True) else: self.log.error(f'File not found: {fp_csv}. Exiting.') sys.exit() def _load_file_records(self): metadata_type = types.Metadata.FILE_RECORD fp_csv = self.data_store.metadata(metadata_type) self.log.info(f'loading: {fp_csv}') if Path(fp_csv).is_file(): self._metadata[metadata_type] = pd.read_csv(fp_csv, dtype=cfg.FILE_RECORD_DTYPES).set_index('index') else: self.log.error(f'File not found: {fp_csv}. Exiting.') sys.exit() def _load_metadata(self, metadata_type): fp_csv = self.data_store.metadata(metadata_type) self.log.info(f'loading: {fp_csv}') if Path(fp_csv).is_file(): self._metadata[metadata_type] = pd.read_csv(fp_csv).set_index('index') else: self.log.error(f'File not found: {fp_csv}. Exiting.') sys.exit() def load_metadata(self, metadata_type): if metadata_type == types.Metadata.FILE_RECORD: self._load_file_records() elif metadata_type == types.Metadata.FACE_VECTOR: self._load_face_vectors() else: self._load_metadata(metadata_type) def metadata(self, opt_metadata_type): return self._metadata.get(opt_metadata_type, None) def index_to_record(self, index): # get record meta df_record = self._metadata[types.Metadata.FILE_RECORD] ds_record = df_record.iloc[index] identity_index = ds_record.identity_index # get identity meta df_identity = self._metadata[types.Metadata.IDENTITY] # future datasets can have multiple identities per images #ds_identities = df_identity.iloc[identity_index] # get filepath and S3 url fp_im = self.data_store.face(ds_record.subdir, ds_record.fn, ds_record.ext) s3_url = self.data_store_s3.face(ds_record.uuid) image_record = ImageRecord(ds_record, fp_im, s3_url) return image_record def vector_to_record(self, record_index): '''Accumulates image and its metadata''' df_face_vector = self._metadata[types.Metadata.FACE_VECTOR] ds_face_vector = df_face_vector.iloc[vector_index] # get the match's ROI index image_index = ds_face_vector.image_index # get the roi dataframe df_face_roi = self._metadata[types.Metadata.FACE_ROI] ds_roi = df_face_roi.iloc[image_index] # create BBox dim = (ds_roi.image_width, ds_roi.image_height) bbox = BBox.from_xywh_dim(ds_roi.x, ds_roi.y, ds_roi.w, ds_roi.y, dim) # use the ROI index to get identity index from the identity DataFrame df_sha256 = self._metadata[types.Metadata.SHA256] ds_sha256 = df_sha256.iloc[image_index] sha256 = ds_sha256.sha256 # get the local filepath df_filepath = self._metadata[types.Metadata.FILEPATH] ds_file = df_filepath.iloc[image_index] fp_im = self.data_store.face_image(ds_file.subdir, ds_file.fn, ds_file.ext)\ # get remote path df_uuid = self._metadata[types.Metadata.UUID] ds_uuid = df_uuid.iloc[image_index] uuid = ds_uuid.uuid fp_url = self.data_store_s3.face_image(uuid) fp_url_crop = self.data_store_s3.face_image_crop(uuid) image_record = ImageRecord(image_index, sha256, uuid, bbox, fp_im, fp_url) # now get the identity index (if available) identity_index = ds_sha256.identity_index if identity_index > -1: # then use the identity index to get the identity meta df_identity = df_filepath = self._metadata[types.Metadata.IDENTITY] ds_identity = df_identity.iloc[identity_index] # get the name and description name = ds_identity.fullname desc = ds_identity.description gender = ds_identity.gender n_images = ds_identity.images url = '(url)' # TODO age = '(age)' # TODO nationality = '(nationality)' identity = Identity(identity_index, name=name, desc=desc, gender=gender, n_images=n_images, url=url, age=age, nationality=nationality) image_record.identity = identity else: self.log.info(f'no identity index: {ds_sha256}') return image_record def find_matches(self, query_vec, n_results=5, threshold=0.6): image_records = [] # list of image matches w/identity if available # find most similar feature vectors indexes #match_idxs = self.similar(query_vec, n_results, threshold) sim_scores = np.linalg.norm(np.array([query_vec]) - np.array(self._face_vectors), axis=1) match_idxs = np.argpartition(sim_scores, range(n_results))[:n_results] df_record = self._metadata[types.Metadata.FILE_RECORD] df_vector = self._metadata[types.Metadata.FACE_VECTOR] df_roi = self._metadata[types.Metadata.FACE_ROI] if types.Metadata.IDENTITY in self._metadata.keys(): df_identity = self._metadata[types.Metadata.IDENTITY] else: df_identity = None identities = [] for match_idx in match_idxs: # get the corresponding face vector row roi_index = self._face_vector_roi_idxs[match_idx] ds_roi = df_roi.iloc[roi_index] record_idx = int(ds_roi.record_index) ds_record = df_record.iloc[record_idx] self.log.debug(f'find match index: {match_idx}, --> roi_index: {roi_index}') fp_im = self.data_store.face(ds_record.subdir, ds_record.fn, ds_record.ext) s3_url = self.data_store_s3.face(ds_record.uuid) identities = [] bbox_norm = BBox.from_xywh_norm(ds_roi.x, ds_roi.y, ds_roi.w, ds_roi.w) if types.Metadata.IDENTITY in self._metadata.keys(): ds_id = df_identity.loc[df_identity['identity_key'] == ds_record.identity_key].iloc[0] identity = Identity(record_idx, name_display=ds_id.name_display, description=ds_id.description, gender=ds_id.gender, roi_index=roi_index, identity_key=ds_id.identity_key, num_images=ds_id.num_images) else: identity = None image_record = ImageRecord(ds_record, fp_im, s3_url, bbox_norm, identity=identity) image_records.append(image_record) return image_records # ---------------------------------------------------------------------- # utilities def df_vecs_to_dict(self, df_vec): # convert the DataFrame CSV to float list of vecs # n_dims = len(df_vec.keys()) - 2 # number of columns with 'd1, d2,...d256' #return [[df[f'd{i}'] for i in range(1,n_dims+1)] for df_idx, df in df_vec.iterrows()] # return [[df[f'd{i}'] for i in range(1,n_dims+1)] for df_idx, df in df_vec.iterrows()] return [list(map(float, x.vec.split(','))) for x in df_vec.itertuples()] def df_vec_roi_idxs_to_dict(self, df): # convert the DataFrame CSV to float list of vecs #return [x.roi_index for x in df.itertuples()] return [int(x.roi_index) for i,x in df.iterrows()] def similar(self, query_vec, n_results): '''Finds most similar N indices of query face vector :query_vec: (list) of 128 floating point numbers of face encoding :n_results: (int) number of most similar indices to return :returns (list) of (int) indices ''' # uses np.linalg based on the ageitgey/face_recognition code return top_idxs class ImageRecord: def __init__(self, ds_record, fp, url, bbox_norm, identity=None): # maybe more other meta will go there self.image_index = ds_record.index self.sha256 = ds_record.sha256 self.uuid = ds_record.uuid self.filepath = fp self.width = ds_record.width self.height = ds_record.height self.url = url self.bbox = bbox_norm self.identity = identity # image records contain ROIs # ROIs are linked to identities def summarize(self): '''Summarizes data for debugging''' log = Logger.getLogger() log.info(f'filepath: {self.filepath}') log.info(f'sha256: {self.sha256}') log.info(f'UUID: {self.uuid}') log.info(f'S3 url: {self.url}') if self.identity: log.info(f'name: {self.identity.name_display}') log.info(f'description: {self.identity.description}') log.info(f'gender: {self.identity.gender}') log.info(f'images: {self.identity.num_images}') class Identity: def __init__(self, idx, identity_key=None, name_display=None, num_images=None, description=None, gender=None, roi_index=None): self.index = idx self.name_display = name_display self.description = description self.gender = gender self.roi_index = roi_index self.num_images = num_images