diff options
Diffstat (limited to 'megapixels/app/models')
| -rw-r--r-- | megapixels/app/models/bbox.py | 17 | ||||
| -rw-r--r-- | megapixels/app/models/dataset.py | 25 |
2 files changed, 32 insertions, 10 deletions
diff --git a/megapixels/app/models/bbox.py b/megapixels/app/models/bbox.py index f1216698..f65f7373 100644 --- a/megapixels/app/models/bbox.py +++ b/megapixels/app/models/bbox.py @@ -1,4 +1,5 @@ import math +import random from dlib import rectangle as dlib_rectangle import numpy as np @@ -127,9 +128,23 @@ class BBox: d = int(math.sqrt(math.pow(dcx, 2) + math.pow(dcy, 2))) return d + # ----------------------------------------------------------------- # Modify + def jitter(self, amt): + '''Jitters BBox in x,y,w,h values. Used for face feature extraction + :param amt: (float) percentage of BBox for maximum translation + :returns (BBox) + ''' + w = self._width + (self._width * random.uniform(-amt, amt)) + h = self._height + (self._height * random.uniform(-amt, amt)) + cx = self._cx + (self._cx * random.uniform(-amt, amt)) + cy = self._cy + (self._cy * random.uniform(-amt, amt)) + x1, y1 = np.clip((cx - w/2, cy - h/2), 0.0, 1.0) + x2, y2 = np.clip((cx + w/2, cy + h/2), 0.0, 1.0) + return BBox(x1, y1, x2, y2) + def expand(self, per): """Expands BBox by percentage :param per: (float) percentage to expand 0.0 - 1.0 @@ -186,7 +201,7 @@ class BBox: # print(adj) r = np.add(np.array(r), adj) - return BBox(*r) + return BBox(*r) # updats all BBox values # ----------------------------------------------------------------- diff --git a/megapixels/app/models/dataset.py b/megapixels/app/models/dataset.py index eb0109a7..bbef9ff5 100644 --- a/megapixels/app/models/dataset.py +++ b/megapixels/app/models/dataset.py @@ -44,6 +44,9 @@ class Dataset: self.log.info(f'build face vector dict: {len(self._face_vectors)}') # remove the face vector column, it can be several GB of memory self._metadata[metadata_type].drop('vec', axis=1, inplace=True) + #n_dims = len(self._metadata[metadata_type].keys()) - 2 + #drop_keys = [f'd{i}' for i in range(1,n_dims+1)] + #self._metadata[metadata_type].drop(drop_keys, axis=1, inplace=True) else: self.log.error(f'File not found: {fp_csv}. Exiting.') sys.exit() @@ -53,7 +56,7 @@ class Dataset: fp_csv = self.data_store.metadata(metadata_type) self.log.info(f'loading: {fp_csv}') if Path(fp_csv).is_file(): - self._metadata[metadata_type] = pd.read_csv(fp_csv).set_index('index') + self._metadata[metadata_type] = pd.read_csv(fp_csv, dtype={'fn':str}).set_index('index') else: self.log.error(f'File not found: {fp_csv}. Exiting.') sys.exit() @@ -142,33 +145,37 @@ class Dataset: # find most similar feature vectors indexes #match_idxs = self.similar(query_vec, n_results, threshold) sim_scores = np.linalg.norm(np.array([query_vec]) - np.array(self._face_vectors), axis=1) - match_idxs = np.argpartition(sim_scores, n_results)[:n_results] + match_idxs = np.argpartition(sim_scores, range(n_results))[:n_results] + df_vector = self._metadata[types.Metadata.FACE_VECTOR] + df_record = self._metadata[types.Metadata.FILE_RECORD] + for match_idx in match_idxs: # get the corresponding face vector row roi_index = self._face_vector_roi_idxs[match_idx] - df_record = self._metadata[types.Metadata.FILE_RECORD] - ds_record = df_record.iloc[roi_index] + record_idx = df_vector.iloc[roi_index].record_index + ds_record = df_record.iloc[record_idx] self.log.debug(f'find match index: {match_idx}, --> roi_index: {roi_index}') fp_im = self.data_store.face(ds_record.subdir, ds_record.fn, ds_record.ext) s3_url = self.data_store_s3.face(ds_record.uuid) image_record = ImageRecord(ds_record, fp_im, s3_url) - #roi_index = self._face_vector_roi_idxs[match_idx] - #image_record = self.roi_idx_to_record(roi_index) image_records.append(image_record) return image_records # ---------------------------------------------------------------------- # utilities - def df_vecs_to_dict(self, df): + def df_vecs_to_dict(self, df_vec): # convert the DataFrame CSV to float list of vecs - return [list(map(float,x.vec.split(','))) for x in df.itertuples()] + # n_dims = len(df_vec.keys()) - 2 # number of columns with 'd1, d2,...d256' + #return [[df[f'd{i}'] for i in range(1,n_dims+1)] for df_idx, df in df_vec.iterrows()] + # return [[df[f'd{i}'] for i in range(1,n_dims+1)] for df_idx, df in df_vec.iterrows()] + return [list(map(float, x.vec.split(','))) for x in df_vec.itertuples()] def df_vec_roi_idxs_to_dict(self, df): # convert the DataFrame CSV to float list of vecs #return [x.roi_index for x in df.itertuples()] - return [x.roi_index for x in df.itertuples()] + return [int(x.roi_index) for i,x in df.iterrows()] def similar(self, query_vec, n_results): '''Finds most similar N indices of query face vector |
