summaryrefslogtreecommitdiff
path: root/megapixels/app/models
diff options
context:
space:
mode:
Diffstat (limited to 'megapixels/app/models')
-rw-r--r--megapixels/app/models/data_store.py3
-rw-r--r--megapixels/app/models/dataset.py67
2 files changed, 48 insertions, 22 deletions
diff --git a/megapixels/app/models/data_store.py b/megapixels/app/models/data_store.py
index 626c9da4..a8d6916f 100644
--- a/megapixels/app/models/data_store.py
+++ b/megapixels/app/models/data_store.py
@@ -24,6 +24,9 @@ class DataStore:
def metadata_dir(self):
return join(self.dir_metadata)
+ def media_dir(self):
+ return join(self.dir_media)
+
def media_images_original(self):
return join(self.dir_media, 'original')
diff --git a/megapixels/app/models/dataset.py b/megapixels/app/models/dataset.py
index bbef9ff5..1b91467b 100644
--- a/megapixels/app/models/dataset.py
+++ b/megapixels/app/models/dataset.py
@@ -32,7 +32,7 @@ class Dataset:
self.data_store = DataStore(opt_data_store, self._dataset_type)
self.data_store_s3 = DataStoreS3(self._dataset_type)
- def load_face_vectors(self):
+ def _load_face_vectors(self):
metadata_type = types.Metadata.FACE_VECTOR
fp_csv = self.data_store.metadata(metadata_type)
self.log.info(f'loading: {fp_csv}')
@@ -51,18 +51,17 @@ class Dataset:
self.log.error(f'File not found: {fp_csv}. Exiting.')
sys.exit()
- def load_records(self):
+ def _load_file_records(self):
metadata_type = types.Metadata.FILE_RECORD
fp_csv = self.data_store.metadata(metadata_type)
self.log.info(f'loading: {fp_csv}')
if Path(fp_csv).is_file():
- self._metadata[metadata_type] = pd.read_csv(fp_csv, dtype={'fn':str}).set_index('index')
+ self._metadata[metadata_type] = pd.read_csv(fp_csv, dtype=cfg.FILE_RECORD_DTYPES).set_index('index')
else:
self.log.error(f'File not found: {fp_csv}. Exiting.')
sys.exit()
- def load_identities(self):
- metadata_type = types.Metadata.IDENTITY
+ def _load_metadata(self, metadata_type):
fp_csv = self.data_store.metadata(metadata_type)
self.log.info(f'loading: {fp_csv}')
if Path(fp_csv).is_file():
@@ -70,6 +69,14 @@ class Dataset:
else:
self.log.error(f'File not found: {fp_csv}. Exiting.')
sys.exit()
+
+ def load_metadata(self, metadata_type):
+ if metadata_type == types.Metadata.FILE_RECORD:
+ self._load_file_records()
+ elif metadata_type == types.Metadata.FACE_VECTOR:
+ self._load_face_vectors()
+ else:
+ self._load_metadata(metadata_type)
def metadata(self, opt_metadata_type):
return self._metadata.get(opt_metadata_type, None)
@@ -82,11 +89,11 @@ class Dataset:
# get identity meta
df_identity = self._metadata[types.Metadata.IDENTITY]
# future datasets can have multiple identities per images
- ds_identities = df_identity.iloc[identity_index]
+ #ds_identities = df_identity.iloc[identity_index]
# get filepath and S3 url
fp_im = self.data_store.face(ds_record.subdir, ds_record.fn, ds_record.ext)
s3_url = self.data_store_s3.face(ds_record.uuid)
- image_record = ImageRecord(ds_record, fp_im, s3_url, ds_identities=ds_identities)
+ image_record = ImageRecord(ds_record, fp_im, s3_url)
return image_record
def vector_to_record(self, record_index):
@@ -149,7 +156,14 @@ class Dataset:
df_vector = self._metadata[types.Metadata.FACE_VECTOR]
df_record = self._metadata[types.Metadata.FILE_RECORD]
-
+ if types.Metadata.IDENTITY in self._metadata.keys():
+ df_identity = self._metadata[types.Metadata.IDENTITY]
+ else:
+ df_identity = None
+ df_roi = self._metadata[types.Metadata.FACE_ROI]
+
+ identities = []
+
for match_idx in match_idxs:
# get the corresponding face vector row
roi_index = self._face_vector_roi_idxs[match_idx]
@@ -158,7 +172,18 @@ class Dataset:
self.log.debug(f'find match index: {match_idx}, --> roi_index: {roi_index}')
fp_im = self.data_store.face(ds_record.subdir, ds_record.fn, ds_record.ext)
s3_url = self.data_store_s3.face(ds_record.uuid)
- image_record = ImageRecord(ds_record, fp_im, s3_url)
+ identities = []
+ if types.Metadata.IDENTITY in self._metadata.keys():
+ ds_id = df_identity.loc[df_identity['identity_key'] == ik].iloc[0]
+ identity = Identity(idx,
+ name_display=ds_id.name_display,
+ name_full=ds_id.name_full,
+ description=ds_id.description,
+ gender=ds_id.gender,
+ image_index=ds_id.image_index,
+ identity_key=ds_id.identity_key)
+ identities.append(identity)
+ image_record = ImageRecord(ds_record, fp_im, s3_url, identities=identities)
image_records.append(image_record)
return image_records
@@ -191,19 +216,20 @@ class Dataset:
class ImageRecord:
- def __init__(self, ds_record, fp, url, ds_rois=None, ds_identities=None):
+ def __init__(self, ds_record, fp, url, rois=None, identities=None):
# maybe more other meta will go there
self.image_index = ds_record.index
self.sha256 = ds_record.sha256
self.uuid = ds_record.uuid
self.filepath = fp
+ self.width = ds_record.width
+ self.height = ds_record.height
self.url = url
- self._identities = []
+ self.rois = rois
+ self.identities = identities
# image records contain ROIs
# ROIs are linked to identities
- #self._identities = [Identity(x) for x in ds_identities]
-
@property
def identity(self, index):
return self._identity
@@ -215,7 +241,7 @@ class ImageRecord:
log.info(f'sha256: {self.sha256}')
log.info(f'UUID: {self.uuid}')
log.info(f'S3 url: {self.url}')
- for identity in self._identities:
+ for identity in self.identities:
log.info(f'fullname: {identity.fullname}')
log.info(f'description: {identity.description}')
log.info(f'gender: {identity.gender}')
@@ -224,13 +250,10 @@ class ImageRecord:
class Identity:
- def __init__(self, idx, name='NA', desc='NA', gender='NA', n_images=1,
- url='NA', age='NA', nationality='NA'):
+ def __init__(self, idx, name_display=None, name_full=None, description=None, gender=None, roi=None):
self.index = idx
- self.name = name
- self.description = desc
+ self.name_display = name_display
+ self.name_full = name_full
+ self.description = description
self.gender = gender
- self.n_images = n_images
- self.url = url
- self.age = age
- self.nationality = nationality
+ self.roi = roi