summaryrefslogtreecommitdiff
path: root/megapixels/app
diff options
context:
space:
mode:
authorJules Laplace <julescarbon@gmail.com>2018-12-16 01:14:40 +0100
committerJules Laplace <julescarbon@gmail.com>2018-12-16 01:14:40 +0100
commitf9616b08ce0fa8ab5d60b544b5c0ad1212f201b8 (patch)
treedf2036f710122c9f4979785c643b76622b7c5989 /megapixels/app
parent63335120a5800142ebe827bd10a1a0106c24b8d8 (diff)
parent10f467b64e3be528ac246d5cf664d675aca3e7f3 (diff)
Merge branch 'master' of github.com:adamhrv/megapixels_dev
Diffstat (limited to 'megapixels/app')
-rw-r--r--megapixels/app/models/dataset.py179
-rw-r--r--megapixels/app/processors/face_age.py28
-rw-r--r--megapixels/app/processors/face_beauty.py27
-rw-r--r--megapixels/app/processors/face_emotion.py28
-rw-r--r--megapixels/app/processors/face_gender.py27
-rw-r--r--megapixels/app/processors/face_landmarks_3d.py27
-rw-r--r--megapixels/app/processors/face_mesh.py27
-rw-r--r--megapixels/app/settings/app_cfg.py16
-rw-r--r--megapixels/app/settings/paths.py163
-rw-r--r--megapixels/app/settings/types.py44
-rw-r--r--megapixels/app/utils/path_utils.py52
11 files changed, 445 insertions, 173 deletions
diff --git a/megapixels/app/models/dataset.py b/megapixels/app/models/dataset.py
new file mode 100644
index 00000000..11d568a5
--- /dev/null
+++ b/megapixels/app/models/dataset.py
@@ -0,0 +1,179 @@
+"""
+Dataset model: container for all CSVs about a dataset
+"""
+import os
+from os.path import join
+from pathlib import Path
+import logging
+
+import pandas as pd
+import numpy as np
+
+from app.settings import app_cfg as cfg
+from app.settings import types
+from app.models.bbox import BBox
+from app.utils import file_utils, im_utils, path_utils
+from app.utils.logger_utils import Logger
+
+# -------------------------------------------------------------------------
+# Dataset
+# -------------------------------------------------------------------------
+
+class Dataset:
+
+ def __init__(self, opt_dataset_type, opt_data_store=types.DataStore.NAS):
+ self._dataset_type = opt_dataset_type # enum type
+ self.log = Logger.getLogger()
+ self._metadata = {}
+ self._face_vectors = []
+ self._nullframe = pd.DataFrame() # empty placeholder
+ self.data_store = path_utils.DataStore(opt_data_store, self._dataset_type)
+ self.data_store_s3 = path_utils.DataStoreS3(self._dataset_type)
+
+ def load(self, opt_data_store):
+ '''Loads all CSV files into (dict) of DataFrames'''
+ for metadata_type in types.Metadata:
+ self.log.info(f'load metadata: {metadata_type}')
+ fp_csv = self.data_store.metadata(metadata_type)
+ self.log.info(f'loading: {fp_csv}')
+ if Path(fp_csv).is_file():
+ self._metadata[metadata_type] = pd.read_csv(fp_csv).set_index('index')
+ if metadata_type == types.Metadata.FACE_VECTOR:
+ # convert DataFrame to list of floats
+ self._face_vecs = self.df_to_vec_list(self._metadata[metadata_type])
+ self._metadata[metadata_type].drop('vec', axis=1, inplace=True)
+ else:
+ self.log.error('File not found: {fp_csv}. Replaced with empty DataFrame')
+ self._metadata[metadata_type] = self._nullframe
+ self.log.info('finished loading')
+
+ def metadata(self, opt_metadata_type):
+ return self._metadata.get(opt_metadata_type, self._nullframe)
+
+ def roi_idx_to_record(self, vector_index):
+ '''Accumulates image and its metadata'''
+ df_face_vector = self._metadata[types.Metadata.FACE_VECTOR]
+ ds_face_vector = df_face_vector.iloc[vector_index]
+ # get the match's ROI index
+ image_index = ds_face_vector.image_index
+ # get the roi dataframe
+ df_face_roi = self._metadata[types.Metadata.FACE_ROI]
+ ds_roi = df_face_roi.iloc[image_index]
+ # create BBox
+ dim = (ds_roi.image_width, ds_roi.image_height)
+ bbox = BBox.from_xywh_dim(ds_roi.x, ds_roi.y, ds_roi.w, ds_roi.y, dim)
+ # use the ROI index to get identity index from the identity DataFrame
+ df_sha256 = self._metadata[types.Metadata.SHA256]
+ ds_sha256 = df_sha256.iloc[image_index]
+ sha256 = ds_sha256.sha256
+ # get the local filepath
+ df_filepath = self._metadata[types.Metadata.FILEPATH]
+ ds_file = df_filepath.iloc[image_index]
+ fp_im = self.data_store.face_image(ds_file.subdir, ds_file.fn, ds_file.ext)\
+ # get remote path
+ df_uuid = self._metadata[types.Metadata.UUID]
+ ds_uuid = df_uuid.iloc[image_index]
+ uuid = ds_uuid.uuid
+ fp_url = self.data_store_s3.face_image(uuid)
+ fp_url_crop = self.data_store_s3.face_image_crop(uuid)
+
+ image_record = ImageRecord(image_index, sha256, uuid, bbox, fp_im, fp_url)
+ # now get the identity index (if available)
+ identity_index = ds_sha256.identity_index
+ if identity_index:
+ # then use the identity index to get the identity meta
+ df_identity = df_filepath = self._metadata[types.Metadata.IDENTITY]
+ ds_identity = df_identity.iloc[identity_index]
+ # get the name and description
+ name = ds_identity.fullname
+ desc = ds_identity.description
+ gender = ds_identity.gender
+ n_images = ds_identity.images
+ url = '(url)' # TODO
+ age = '(age)' # TODO
+ nationality = '(nationality)'
+ identity = Identity(identity_index, name=name, desc=desc, gender=gender, n_images=n_images,
+ url=url, age=age, nationality=nationality)
+ image_record.identity = identity
+
+ return image_record
+
+
+ def matches(self, query_vec, n_results=5, threshold=0.5):
+ image_records = [] # list of image matches w/identity if available
+ # find most similar feature vectors indexes
+ match_idxs = self.similar(query_vec, n_results, threshold)
+ for match_idx in match_idxs:
+ # get the corresponding face vector row
+ image_record = self.roi_idx_to_record(match_idx)
+ results.append(image_record)
+ return image_records
+
+ # ----------------------------------------------------------------------
+ # utilities
+
+ def df_to_vec_list(self, df):
+ # convert the DataFrame CSV to float list of vecs
+ vecs = [list(map(float,x.vec.split(','))) for x in df.itertuples()]
+ return vecs
+
+ def similar(self, query_vec, n_results):
+ '''Finds most similar N indices of query face vector
+ :query_vec: (list) of 128 floating point numbers of face encoding
+ :n_results: (int) number of most similar indices to return
+ :returns (list) of (int) indices
+ '''
+ # uses np.linalg based on the ageitgey/face_recognition code
+ vecs_sim_scores = np.linalg.norm(np.array([query_vec]) - np.array(self._face_vectors), axis=1)
+ top_idxs = np.argpartition(vecs_sim_scores, n_results)[:n_results]
+ return top_idxs
+
+
+
+class ImageRecord:
+
+ def __init__(self, image_index, sha256, uuid, bbox, filepath, url):
+ self.image_index = image_index
+ self.sha256 = sha256
+ self.uuid = uuid
+ self.bbox = bbox
+ self.filepath = filepath
+ self.url = url
+ self._identity = None
+
+ @property
+ def identity(self):
+ return self._identity
+
+ @identity.setter
+ def identity(self, value):
+ self._identity = value
+
+ def summarize(self):
+ '''Summarizes data for debugging'''
+ log = Logger.getLogger()
+ log.info(f'filepath: {self.filepath}')
+ log.info(f'sha256: {self.sha256}')
+ log.info(f'UUID: {self.uuid}')
+ log.info(f'BBox: {self.bbox}')
+ log.info(f's3 url: {self.url}')
+ if self._identity:
+ log.info(f'name: {self._identity.name}')
+ log.info(f'age: {self._identity.age}')
+ log.info(f'gender: {self._identity.gender}')
+ log.info(f'nationality: {self._identity.nationality}')
+ log.info(f'images: {self._identity.n_images}')
+
+
+class Identity:
+
+ def __init__(self, idx, name='NA', desc='NA', gender='NA', n_images=1,
+ url='NA', age='NA', nationality='NA'):
+ self.index = idx
+ self.name = name
+ self.description = desc
+ self.gender = gender
+ self.n_images = n_images
+ self.url = url
+ self.age = age
+ self.nationality = nationality
diff --git a/megapixels/app/processors/face_age.py b/megapixels/app/processors/face_age.py
new file mode 100644
index 00000000..222858a5
--- /dev/null
+++ b/megapixels/app/processors/face_age.py
@@ -0,0 +1,28 @@
+import os
+from os.path import join
+from pathlib import Path
+import math
+
+import cv2 as cv
+import numpy as np
+import imutils
+
+from app.utils import im_utils, logger_utils
+from app.models.bbox import BBox
+from app.settings import app_cfg as cfg
+from app.settings import types
+
+
+
+class FaceAge:
+
+ # Estimates face age
+
+ def __init__(self):
+ self.log = logger_utils.Logger.getLogger()
+ pass
+
+
+ def age(self):
+ # use enum typed emotions
+ return {'age': types.Age.ADULT, 'confidence': 0.5} \ No newline at end of file
diff --git a/megapixels/app/processors/face_beauty.py b/megapixels/app/processors/face_beauty.py
new file mode 100644
index 00000000..a1ddd9f8
--- /dev/null
+++ b/megapixels/app/processors/face_beauty.py
@@ -0,0 +1,27 @@
+import os
+from os.path import join
+from pathlib import Path
+import math
+
+import cv2 as cv
+import numpy as np
+import imutils
+
+from app.utils import im_utils, logger_utils
+from app.models.bbox import BBox
+from app.settings import app_cfg as cfg
+from app.settings import types
+
+
+
+class FaceBeauty:
+
+ # Estimates beauty using CNN
+
+ def __init__(self):
+ self.log = logger_utils.Logger.getLogger()
+ pass
+
+
+ def beauty(self):
+ return 0.5 \ No newline at end of file
diff --git a/megapixels/app/processors/face_emotion.py b/megapixels/app/processors/face_emotion.py
new file mode 100644
index 00000000..c45da9ba
--- /dev/null
+++ b/megapixels/app/processors/face_emotion.py
@@ -0,0 +1,28 @@
+import os
+from os.path import join
+from pathlib import Path
+import math
+
+import cv2 as cv
+import numpy as np
+import imutils
+
+from app.utils import im_utils, logger_utils
+from app.models.bbox import BBox
+from app.settings import app_cfg as cfg
+from app.settings import types
+
+
+
+class FaceEmotion:
+
+ # Estimates face emotion
+
+ def __init__(self):
+ self.log = logger_utils.Logger.getLogger()
+ pass
+
+
+ def emotion(self):
+ # use enum typed emotions
+ return {'emotion': types.Emotion.NEUTRAL, 'confidence': 0.5} \ No newline at end of file
diff --git a/megapixels/app/processors/face_gender.py b/megapixels/app/processors/face_gender.py
new file mode 100644
index 00000000..ee152098
--- /dev/null
+++ b/megapixels/app/processors/face_gender.py
@@ -0,0 +1,27 @@
+import os
+from os.path import join
+from pathlib import Path
+import math
+
+import cv2 as cv
+import numpy as np
+import imutils
+
+from app.utils import im_utils, logger_utils
+from app.models.bbox import BBox
+from app.settings import app_cfg as cfg
+from app.settings import types
+
+
+
+class FaceGender:
+
+ # Estimates gender using CNN
+
+ def __init__(self):
+ self.log = logger_utils.Logger.getLogger()
+ pass
+
+
+ def gender(self):
+ return 'm' \ No newline at end of file
diff --git a/megapixels/app/processors/face_landmarks_3d.py b/megapixels/app/processors/face_landmarks_3d.py
new file mode 100644
index 00000000..84a423b0
--- /dev/null
+++ b/megapixels/app/processors/face_landmarks_3d.py
@@ -0,0 +1,27 @@
+import os
+from os.path import join
+from pathlib import Path
+import math
+
+import cv2 as cv
+import numpy as np
+import imutils
+
+from app.utils import im_utils, logger_utils
+from app.models.bbox import BBox
+from app.settings import app_cfg as cfg
+from app.settings import types
+
+
+
+class FaceLandmarks3D:
+
+ # Estimates 3D facial landmarks
+
+ def __init__(self):
+ self.log = logger_utils.Logger.getLogger()
+ pass
+
+
+ def landmarks(self):
+ return [1,2,3,4,100] \ No newline at end of file
diff --git a/megapixels/app/processors/face_mesh.py b/megapixels/app/processors/face_mesh.py
new file mode 100644
index 00000000..2d3deb4f
--- /dev/null
+++ b/megapixels/app/processors/face_mesh.py
@@ -0,0 +1,27 @@
+import os
+from os.path import join
+from pathlib import Path
+import math
+
+import cv2 as cv
+import numpy as np
+import imutils
+
+from app.utils import im_utils, logger_utils
+from app.models.bbox import BBox
+from app.settings import app_cfg as cfg
+from app.settings import types
+
+
+
+class FaceMesh3D:
+
+ # Estimates 3D face mesh
+
+ def __init__(self):
+ self.log = logger_utils.Logger.getLogger()
+ pass
+
+
+ def mesh(self):
+ return [1,2,3,4,100] \ No newline at end of file
diff --git a/megapixels/app/settings/app_cfg.py b/megapixels/app/settings/app_cfg.py
index d25936e6..50eaf576 100644
--- a/megapixels/app/settings/app_cfg.py
+++ b/megapixels/app/settings/app_cfg.py
@@ -4,15 +4,9 @@ import logging
import collections
from dotenv import load_dotenv
-import cv2 as cv
-
from app.settings import types
from app.utils import click_utils
-# -----------------------------------------------------------------------------
-# Metadata type names
-# -----------------------------------------------------------------------------
-
# -----------------------------------------------------------------------------
# Enun lists used for custom Click Params
@@ -23,6 +17,7 @@ HaarCascadeVar = click_utils.ParamVar(types.HaarCascade)
LogLevelVar = click_utils.ParamVar(types.LogLevel)
MetadataVar = click_utils.ParamVar(types.Metadata)
DatasetVar = click_utils.ParamVar(types.Dataset)
+DataStoreVar = click_utils.ParamVar(types.DataStore)
# # data_store
DATA_STORE = '/data_store_hdd/'
@@ -34,6 +29,7 @@ DIR_DATSET_NAS = join(DIR_DATASETS, 'people')
DIR_APPS = join(DATA_STORE,'apps')
DIR_APP = join(DIR_APPS,'megapixels')
DIR_MODELS = join(DIR_APP,'models')
+DIR_PEOPLE = 'people'
# # Frameworks
DIR_MODELS_CAFFE = join(DIR_MODELS,'caffe')
@@ -121,8 +117,12 @@ LOGFILE_FORMAT = "%(log_color)s%(levelname)-8s%(reset)s %(cyan)s%(filename)s:%(l
# -----------------------------------------------------------------------------
# S3 storage
# -----------------------------------------------------------------------------
-S3_MEDIA_ROOT = 's3://megapixels/v1/media/'
-S3_METADATA_ROOT = 's3://megapixels/v1/metadata/'
+S3_ROOT_URL = 's3://megapixels/v1/'
+S3_MEDIA_URL = join(S3_ROOT_URL, 'media')
+S3_METADATA_URL = join(S3_ROOT_URL, 'metadata')
+S3_HTTP_URL = 'https://megapixels.nyc3.digitaloceanspaces.com/v1/'
+S3_HTTP_MEDIA_URL = join(S3_HTTP_URL, 'media')
+S3_HTTP_METADATA_URL = join(S3_HTTP_URL, 'metadata')
# -----------------------------------------------------------------------------
# Static site generator
diff --git a/megapixels/app/settings/paths.py b/megapixels/app/settings/paths.py
deleted file mode 100644
index bc1333ba..00000000
--- a/megapixels/app/settings/paths.py
+++ /dev/null
@@ -1,163 +0,0 @@
-import os
-from os.path import join
-import logging
-
-from vframe.settings import vframe_cfg as vcfg
-from vframe.settings import types
-
-class Paths:
-
- # class properties
- MAPPINGS_DATE = vcfg.SUGARCUBE_DATES[0]
- DIR_APP_VFRAME = 'apps/vframe/'
- DIR_APP_SA = 'apps/syrianarchive'
- DIR_MODELS_VFRAME = join(DIR_APP_VFRAME, 'models')
- DIR_DARKNET = join(DIR_MODELS_VFRAME, 'darknet/pjreddie')
- DIR_DARKNET_VFRAME = join(DIR_MODELS_VFRAME, 'darknet/vframe')
- DIR_MEDIA = join(DIR_APP_SA, 'media')
- DIR_METADATA = join(DIR_APP_SA, 'metadata')
- DIR_RECORDS = join(DIR_APP_SA, 'records')
- DIR_REPORTS = join(DIR_APP_SA, 'reports')
-
-
- def __init__(self):
- pass
-
- @classmethod
- def DataStorePath(cls, data_store=types.DataStore.HDD):
- return '/data_store_{}'.format(data_store.name.lower())
-
- # -------------------------------------------------------------------------------
- # Darknet Paths
-
- @classmethod
- def darknet_classes(cls, data_store=types.DataStore.HDD, opt_net=types.DetectorNet.COCO):
- if opt_net == types.DetectorNet.COCO:
- fp = join(cls.DIR_DARKNET, 'data', 'coco.names')
- elif opt_net == types.DetectorNet.COCO_SPP:
- fp = join(cls.DIR_DARKNET, 'data', 'coco.names')
- elif opt_net == types.DetectorNet.VOC:
- fp = join(cls.DIR_DARKNET, 'data', 'voc.names')
- elif opt_net == types.DetectorNet.OPENIMAGES:
- fp = join(cls.DIR_DARKNET, 'data', 'openimages.names')
- elif opt_net == types.DetectorNet.SUBMUNITION:
- fp = join(cls.DIR_DARKNET_VFRAME, 'munitions_09b', 'classes.txt')
- return join(cls.DataStorePath(data_store), fp)
-
- @classmethod
- def darknet_data(cls, data_store=types.DataStore.HDD, opt_net=types.DetectorNet.COCO, as_bytes=True):
- if opt_net == types.DetectorNet.COCO:
- fp = join(cls.DIR_DARKNET, 'cfg', 'coco.data')
- elif opt_net == types.DetectorNet.COCO_SPP:
- fp = join(cls.DIR_DARKNET, 'cfg', 'coco.data')
- elif opt_net == types.DetectorNet.VOC:
- fp = join(cls.DIR_DARKNET, 'cfg', 'voc.data')
- elif opt_net == types.DetectorNet.OPENIMAGES:
- fp = join(cls.DIR_DARKNET, 'cfg', 'openimages.data')
- elif opt_net == types.DetectorNet.SUBMUNITION:
- fp = join(cls.DIR_DARKNET_VFRAME, 'munitions_09b', 'meta.data')
- fp = join(cls.DataStorePath(data_store), fp)
- if as_bytes:
- return bytes(fp, encoding="utf-8")
- else:
- return fp
-
-
- @classmethod
- def darknet_cfg(cls, data_store=types.DataStore.HDD, opt_net=types.DetectorNet.COCO, as_bytes=True):
- if opt_net == types.DetectorNet.COCO:
- fp = join(cls.DIR_DARKNET, 'cfg', 'yolov3.cfg')
- elif opt_net == types.DetectorNet.COCO_SPP:
- fp = join(cls.DIR_DARKNET, 'cfg', 'yolov3-spp.cfg')
- elif opt_net == types.DetectorNet.VOC:
- fp = join(cls.DIR_DARKNET, 'cfg', 'yolov3-voc.cfg')
- elif opt_net == types.DetectorNet.OPENIMAGES:
- fp = join(cls.DIR_DARKNET, 'cfg', 'yolov3-openimages.cfg')
- elif opt_net == types.DetectorNet.SUBMUNITION:
- fp = join(cls.DIR_DARKNET_VFRAME, 'munitions_09b', 'yolov3.cfg')
- fp = join(cls.DataStorePath(data_store), fp)
- if as_bytes:
- return bytes(fp, encoding="utf-8")
- else:
- return fp
-
- @classmethod
- def darknet_weights(cls, data_store=types.DataStore.HDD, opt_net=types.DetectorNet.COCO, as_bytes=True):
- if opt_net == types.DetectorNet.COCO:
- fp = join(cls.DIR_DARKNET, 'weights', 'yolov3.weights')
- elif opt_net == types.DetectorNet.COCO_SPP:
- fp = join(cls.DIR_DARKNET, 'weights', 'yolov3-spp.weights')
- elif opt_net == types.DetectorNet.VOC:
- fp = join(cls.DIR_DARKNET, 'weights', 'yolov3-voc.weights')
- elif opt_net == types.DetectorNet.OPENIMAGES:
- fp = join(cls.DIR_DARKNET, 'weights', 'yolov3-openimages.weights')
- elif opt_net == types.DetectorNet.SUBMUNITION:
- fp = join(cls.DIR_DARKNET_VFRAME, 'munitions_09b/weights', 'yolov3_40000.weights')
- fp = join(cls.DataStorePath(data_store), fp)
- if as_bytes:
- return bytes(fp, encoding="utf-8")
- else:
- return fp
-
- # -------------------------------------------------------------------------------
- # Metadata Paths
-
- @classmethod
- def mapping_index(cls, opt_date, data_store=types.DataStore.HDD, verified=types.Verified.VERIFIED,
- file_format=types.FileExt.PKL):
- """Returns filepath to a mapping file. Mapping files are the original Suguarcube mapping data"""
- fname = 'index.pkl' if file_format == types.FileExt.PKL else 'index.json'
- # data_store = 'data_store_{}'.format(data_store.name.lower())
- date_str = opt_date.name.lower()
- fp = join(cls.DataStorePath(data_store), cls.DIR_METADATA, 'mapping', date_str, verified.name.lower(), fname)
- return fp
-
- @classmethod
- def media_record_index(cls, data_store=types.DataStore.HDD, verified=types.Verified.VERIFIED,
- file_format=types.FileExt.PKL):
- """Returns filepath to a mapping file. Mapping files are the original Suguarcube mapping data"""
- fname = 'index.pkl' if file_format == types.FileExt.PKL else 'index.json'
- metadata_type = types.Metadata.MEDIA_RECORD.name.lower()
- fp = join(cls.DataStorePath(data_store), cls.DIR_METADATA, metadata_type, verified.name.lower(), fname)
- return fp
-
- @classmethod
- def metadata_index(cls, metadata_type, data_store=types.DataStore.HDD,
- verified=types.Verified.VERIFIED, file_format=types.FileExt.PKL):
- """Uses key from enum to get folder name and construct filepath"""
- fname = 'index.pkl' if file_format == types.FileExt.PKL else 'index.json'
- fp = join(cls.DataStorePath(data_store), cls.DIR_METADATA, metadata_type.name.lower(),
- verified.name.lower(), fname)
- return fp
-
- @classmethod
- def metadata_dir(cls, metadata_type, data_store=types.DataStore.HDD, verified=types.Verified.VERIFIED):
- """Uses key from enum to get folder name and construct filepath"""
- fp = join(cls.DataStorePath(data_store), cls.DIR_METADATA, metadata_type.name.lower(),
- verified.name.lower())
- return fp
-
- @classmethod
- def metadata_tree_dir(cls, metadata_type, data_store=types.DataStore.HDD):
- """Uses key from enum to get folder name and construct filepath"""
- fp = join(cls.DataStorePath(data_store), cls.DIR_METADATA, metadata_type.name.lower())
- return fp
-
- @classmethod
- def media_dir(cls, media_type, data_store=types.DataStore.HDD, verified=types.Verified.VERIFIED):
- """Returns the directory path to a media directory"""
- fp = join(cls.DataStorePath(data_store), cls.DIR_MEDIA, media_type.name.lower(), verified.name.lower())
- return fp
-
- # @classmethod
- # def keyframe(cls, dir_media, idx, image_size=types.ImageSize.MEDIUM):
- # """Returns path to keyframe image using supplied cls.media directory"""
- # idx = str(idx).zfill(vcfg.ZERO_PADDING)
- # size_label = vcfg.IMAGE_SIZE_LABELS[image_size]
- # fp = join(dir_media, sha256_tree, sha256, idx, size_label, 'index.jpg')
- # return fp
-
- @classmethod
- def dnn(cls):
- """Returns configurations for available DNNs"""
- pass \ No newline at end of file
diff --git a/megapixels/app/settings/types.py b/megapixels/app/settings/types.py
index 7157436d..685744aa 100644
--- a/megapixels/app/settings/types.py
+++ b/megapixels/app/settings/types.py
@@ -22,7 +22,15 @@ class CVTarget(Enum):
class HaarCascade(Enum):
FRONTAL, ALT, ALT2, PROFILE = range(4)
-
+
+# ---------------------------------------------------------------------
+# Storage
+# --------------------------------------------------------------------
+
+class DataStore(Enum):
+ """Storage devices. Paths are symlinked to root (eg /data_store_nas)"""
+ NAS, HDD, SSD, S3 = range(4)
+
# ---------------------------------------------------------------------
# Logger, monitoring
# --------------------------------------------------------------------
@@ -37,7 +45,39 @@ class LogLevel(Enum):
# --------------------------------------------------------------------
class Metadata(Enum):
- IDENTITIES, POSES, ROIS, FILE_META, SHAS, UUIDS, FACE_VECTORS = range(7)
+ IDENTITY, FILEPATH, SHA256, UUID, FACE_VECTOR, FACE_POSE, FACE_ROI = range(7)
class Dataset(Enum):
LFW, VGG_FACE2 = range(2)
+
+
+# ---------------------------------------------------------------------
+# Face analysis types
+# --------------------------------------------------------------------
+class FaceEmotion(Enum):
+ # Map these to text strings for web display
+ NEUTRAL, HAPPY, SAD, ANGRY, FRUSTURATED = range(5)
+
+class FaceBeauty(Enum):
+ # Map these to text strings for web display
+ AVERAGE, BELOW_AVERAGE, ABOVE_AVERAGE = range(3)
+
+class FaceYaw(Enum):
+ # Map these to text strings for web display
+ FAR_LEFT, LEFT, CENTER, RIGHT, FAR_RIGHT = range(5)
+
+class FacePitch(Enum):
+ # Map these to text strings for web display
+ FAR_DOWN, DOWN, CENTER, UP, FAR_UP = range(5)
+
+class FaceRoll(Enum):
+ # Map these to text strings for web display
+ FAR_DOWN, DOWN, CENTER, UP, FAR_UP = range(5)
+
+class FaceAge(Enum):
+ # Map these to text strings for web display
+ CHILD, TEENAGER, YOUNG_ADULT, ADULT, MATURE_ADULT, SENIOR = range(6)
+
+class Confidence(Enum):
+ # Map these to text strings for web display
+ VERY_LOW, LOW, MEDIUM, MEDIUM_HIGH, HIGH, VERY_HIGH = range(6) \ No newline at end of file
diff --git a/megapixels/app/utils/path_utils.py b/megapixels/app/utils/path_utils.py
new file mode 100644
index 00000000..b0262ea0
--- /dev/null
+++ b/megapixels/app/utils/path_utils.py
@@ -0,0 +1,52 @@
+import os
+from os.path import join
+import logging
+
+from app.settings import app_cfg as cfg
+from app.settings import types
+
+
+# -------------------------------------------------------------------------
+# Metadata and media files
+# -------------------------------------------------------------------------
+
+class DataStore:
+ # local data store
+ def __init__(self, opt_data_store, opt_dataset):
+ self.data_store = join(f'/data_store_{opt_data_store.name.lower()}')
+ self.dir_dataset = join(self.data_store, 'datasets', cfg.DIR_PEOPLE, opt_dataset.name.lower())
+ self.dir_media = join(self.dir_dataset, 'media')
+ self.dir_metadata = join(self.dir_dataset, 'metadata')
+
+ def metadata(self, enum_type):
+ return join(self.dir_metadata, f'{enum_type.name.lower()}.csv')
+
+ def face_image(self, subdir, fn, ext):
+ return join(self.dir_media, 'original', subdir, f'{fn}.{ext}')
+
+ def face_image_crop(self, subdir, fn, ext):
+ return join(self.dir_media, 'cropped', subdir, f'{fn}.{ext}')
+
+
+class DataStoreS3:
+ # S3 server
+ def __init__(self, opt_dataset):
+ self._dir_media = join(cfg.S3_HTTP_MEDIA_URL, opt_dataset.name.lower())
+ self._dir_metadata = join(cfg.S3_HTTP_METADATA_URL, opt_dataset.name.lower())
+
+ def metadata(self, opt_metadata_type, ext='csv'):
+ return join(self._dir_metadata, f'{opt_metadata_type.name.lower()}.{ext}')
+
+ def face_image(self, opt_uuid, ext='jpg'):
+ #return join(self._dir_media, 'original', f'{opt_uuid}.{ext}')
+ return join(self._dir_media, f'{opt_uuid}.{ext}')
+
+ def face_image_crop(self, opt_uuid, ext='jpg'):
+ # not currently using?
+ return join(self._dir_media, 'cropped', f'{opt_uuid}.{ext}')
+
+
+
+# -------------------------------------------------------------------------
+# Models
+# ------------------------------------------------------------------------- \ No newline at end of file