summaryrefslogtreecommitdiff
path: root/megapixels/app
diff options
context:
space:
mode:
Diffstat (limited to 'megapixels/app')
-rw-r--r--megapixels/app/models/bbox.py6
-rw-r--r--megapixels/app/processors/face_detector.py48
-rw-r--r--megapixels/app/processors/face_pose.py148
-rw-r--r--megapixels/app/processors/face_recognition.py29
-rw-r--r--megapixels/app/settings/app_cfg.py7
-rw-r--r--megapixels/app/settings/paths.py163
-rw-r--r--megapixels/app/settings/types.py12
7 files changed, 302 insertions, 111 deletions
diff --git a/megapixels/app/models/bbox.py b/megapixels/app/models/bbox.py
index e6da960e..55a92512 100644
--- a/megapixels/app/models/bbox.py
+++ b/megapixels/app/models/bbox.py
@@ -262,6 +262,12 @@ class BBox:
rect = cls.normalize(cls, rect, dim)
return cls(*rect)
+ def __str__(self):
+ return f'BBox: ({self._x1},{self._y1}), ({self._x2}, {self._y2}), width:{self._width}, height:{self._height}'
+
+ def __repr__(self):
+ return f'BBox: ({self._x1},{self._y1}), ({self._x2}, {self._y2}), width:{self._width}, height:{self._height}'
+
def str(self):
"""Return BBox as a string "x1, y1, x2, y2" """
return self.as_box()
diff --git a/megapixels/app/processors/face_detector.py b/megapixels/app/processors/face_detector.py
index 593e9feb..3a90c557 100644
--- a/megapixels/app/processors/face_detector.py
+++ b/megapixels/app/processors/face_detector.py
@@ -24,15 +24,15 @@ class DetectorMTCNN:
from mtcnn.mtcnn import MTCNN
self.detector = MTCNN()
- def detect(self, im, opt_size=(400,400), opt_conf_thresh=None, opt_pyramids=None, opt_largest=False):
+ def detect(self, im, size=(400,400), conf_thresh=None, pyramids=None, largest=False):
'''Detects face using MTCNN and returns (list) of BBox
:param im: (numpy.ndarray) image
:returns list of BBox
'''
bboxes = []
- #conf_thresh = self.conf_thresh if opt_conf_thresh is None else opt_conf_thresh
- #pyramids = self.pyramids if opt_pyramids is None else opt_pyramids
- dnn_size = self.dnn_size if opt_size is None else opt_size
+ #conf_thresh = self.conf_thresh if conf_thresh is None else conf_thresh
+ #pyramids = self.pyramids if pyramids is None else pyramids
+ dnn_size = self.dnn_size if size is None else size
im = im_utils.resize(im, width=dnn_size[0], height=dnn_size[1])
dim = im.shape[:2][::-1]
@@ -43,7 +43,7 @@ class DetectorMTCNN:
bbox = BBox.from_xywh_dim(*rect, dim)
bboxes.append(bbox)
- if opt_largest and len(bboxes) > 1:
+ if largest and len(bboxes) > 1:
# only keep largest
bboxes.sort(key=operator.attrgetter('area'), reverse=True)
bboxes = [bboxes[0]]
@@ -70,34 +70,33 @@ class DetectorDLIBCNN:
pyramids = 0
conf_thresh = 0.85
- def __init__(self, opt_gpu=0):
+ def __init__(self, gpu=0):
import dlib
self.log = logger_utils.Logger.getLogger()
cuda_visible_devices = os.getenv('CUDA_VISIBLE_DEVICES', '')
- os.environ['CUDA_VISIBLE_DEVICES'] = str(opt_gpu)
+ os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu)
self.log.info('load model: {}'.format(cfg.DIR_MODELS_DLIB_CNN))
self.detector = dlib.cnn_face_detection_model_v1(cfg.DIR_MODELS_DLIB_CNN)
os.environ['CUDA_VISIBLE_DEVICES'] = cuda_visible_devices # reset
- def detect(self, im, opt_size=None, opt_conf_thresh=None, opt_pyramids=None, opt_largest=False):
+ def detect(self, im, size=None, conf_thresh=None, pyramids=None, largest=False):
bboxes = []
- conf_thresh = self.conf_thresh if opt_conf_thresh is None else opt_conf_thresh
- pyramids = self.pyramids if opt_pyramids is None else opt_pyramids
- dnn_size = self.dnn_size if opt_size is None else opt_size
+ conf_thresh = self.conf_thresh if conf_thresh is None else conf_thresh
+ pyramids = self.pyramids if pyramids is None else pyramids
+ dnn_size = self.dnn_size if size is None else size
# resize image
im = im_utils.resize(im, width=dnn_size[0], height=dnn_size[1])
dim = im.shape[:2][::-1]
im = im_utils.bgr2rgb(im) # convert to RGB for dlib
# run detector
- mmod_rects = self.detector(im, opt_pyramids)
+ mmod_rects = self.detector(im, pyramids)
# sort results
for mmod_rect in mmod_rects:
- self.log.debug('conf: {}, this: {}'.format(conf_thresh, mmod_rect.confidence))
if mmod_rect.confidence > conf_thresh:
bbox = BBox.from_dlib_dim(mmod_rect.rect, dim)
bboxes.append(bbox)
- if opt_largest and len(bboxes) > 1:
+ if largest and len(bboxes) > 1:
# only keep largest
bboxes.sort(key=operator.attrgetter('area'), reverse=True)
bboxes = [bboxes[0]]
@@ -116,25 +115,24 @@ class DetectorDLIBHOG:
self.log = logger_utils.Logger.getLogger()
self.detector = dlib.get_frontal_face_detector()
- def detect(self, im, opt_size=None, opt_conf_thresh=None, opt_pyramids=0, opt_largest=False):
- conf_thresh = self.conf_thresh if opt_conf_thresh is None else opt_conf_thresh
- dnn_size = self.size if opt_size is None else opt_size
- pyramids = self.pyramids if opt_pyramids is None else opt_pyramids
+ def detect(self, im, size=None, conf_thresh=None, pyramids=0, largest=False):
+ conf_thresh = self.conf_thresh if conf_thresh is None else conf_thresh
+ dnn_size = self.size if size is None else size
+ pyramids = self.pyramids if pyramids is None else pyramids
- im = im_utils.resize(im, width=opt_size[0], height=opt_size[1])
+ im = im_utils.resize(im, width=dnn_size[0], height=dnn_size[1])
dim = im.shape[:2][::-1]
im = im_utils.bgr2rgb(im) # ?
hog_results = self.detector.run(im, pyramids)
bboxes = []
if len(hog_results[0]) > 0:
- self.log.debug(hog_results)
for rect, score, direction in zip(*hog_results):
if score > conf_thresh:
bbox = BBox.from_dlib_dim(rect, dim)
bboxes.append(bbox)
- if opt_largest and len(bboxes) > 1:
+ if largest and len(bboxes) > 1:
# only keep largest
bboxes.sort(key=operator.attrgetter('area'), reverse=True)
bboxes = [bboxes[0]]
@@ -157,10 +155,10 @@ class DetectorCVDNN:
self.net.setPreferableBackend(cv.dnn.DNN_BACKEND_OPENCV)
self.net.setPreferableTarget(cv.dnn.DNN_TARGET_CPU)
- def detect(self, im, opt_size=None, opt_conf_thresh=None, opt_largest=False, opt_pyramids=None):
+ def detect(self, im, size=None, conf_thresh=None, largest=False, pyramids=None):
"""Detects faces and returns (list) of (BBox)"""
- conf_thresh = self.conf_thresh if opt_conf_thresh is None else opt_conf_thresh
- dnn_size = self.size if opt_size is None else opt_size
+ conf_thresh = self.conf_thresh if conf_thresh is None else conf_thresh
+ dnn_size = self.size if size is None else size
im = cv.resize(im, dnn_size)
blob = cv.dnn.blobFromImage(im, self.dnn_scale, dnn_size, self.dnn_mean)
self.net.setInput(blob)
@@ -173,7 +171,7 @@ class DetectorCVDNN:
rect_norm = net_outputs[0, 0, i, 3:7]
bboxes.append(BBox(*rect_norm))
- if opt_largest and len(bboxes) > 1:
+ if largest and len(bboxes) > 1:
# only keep largest
bboxes.sort(key=operator.attrgetter('area'), reverse=True)
bboxes = [bboxes[0]]
diff --git a/megapixels/app/processors/face_pose.py b/megapixels/app/processors/face_pose.py
index 67ac685d..f2548b32 100644
--- a/megapixels/app/processors/face_pose.py
+++ b/megapixels/app/processors/face_pose.py
@@ -22,89 +22,83 @@ class FacePoseDLIB:
def __init__(self):
pass
- def pose(self, landmarks, dim):
- '''Calculates pose
- '''
- degrees = compute_pose_degrees(landmarks, dim)
- return degrees
+ def pose(self, landmarks, dim, project_points=False):
+ # computes pose using 6 / 68 points from dlib face landmarks
+ # based on learnopencv.com and
+ # https://github.com/jerryhouuu/Face-Yaw-Roll-Pitch-from-Pose-Estimation-using-OpenCV/
+ # NB: not as accurate as MTCNN, see @jerryhouuu for ideas
+
+ pose_points_idx = (30, 8, 36, 45, 48, 54)
+ axis = np.float32([[500,0,0], [0,500,0], [0,0,500]])
+
+ # 3D model points.
+ model_points = np.array([
+ (0.0, 0.0, 0.0), # Nose tip
+ (0.0, -330.0, -65.0), # Chin
+ (-225.0, 170.0, -135.0), # Left eye left corner
+ (225.0, 170.0, -135.0), # Right eye right corne
+ (-150.0, -150.0, -125.0), # Left Mouth corner
+ (150.0, -150.0, -125.0) # Right mouth corner
+ ])
+
+ # Assuming no lens distortion
+ dist_coeffs = np.zeros((4,1))
-# -----------------------------------------------------------
-# utilities
-# -----------------------------------------------------------
+ # find 6 pose points
+ pose_points = []
+ for j, idx in enumerate(pose_points_idx):
+ pt = landmarks[idx]
+ pose_points.append((pt[0], pt[1]))
+ pose_points = np.array(pose_points, dtype='double') # convert to double
+
+ # create camera matrix
+ focal_length = dim[0]
+ center = (dim[0]/2, dim[1]/2)
+ cam_mat = np.array(
+ [[focal_length, 0, center[0]],
+ [0, focal_length, center[1]],
+ [0, 1, 1]], dtype = "double")
+
+ # solve PnP for rotation and translation
+ (success, rot_vec, tran_vec) = cv.solvePnP(model_points, pose_points,
+ cam_mat, dist_coeffs,
+ flags=cv.SOLVEPNP_ITERATIVE)
-def compute_pose_degrees(landmarks, dim):
- # computes pose using 6 / 68 points from dlib face landmarks
- # based on learnopencv.com and
- # https://github.com/jerryhouuu/Face-Yaw-Roll-Pitch-from-Pose-Estimation-using-OpenCV/
- # NB: not as accurate as MTCNN, see @jerryhouuu for ideas
-
- pose_points_idx = (30, 8, 36, 45, 48, 54)
- axis = np.float32([[500,0,0], [0,500,0], [0,0,500]])
-
- # 3D model points.
- model_points = np.array([
- (0.0, 0.0, 0.0), # Nose tip
- (0.0, -330.0, -65.0), # Chin
- (-225.0, 170.0, -135.0), # Left eye left corner
- (225.0, 170.0, -135.0), # Right eye right corne
- (-150.0, -150.0, -125.0), # Left Mouth corner
- (150.0, -150.0, -125.0) # Right mouth corner
- ])
-
- # Assuming no lens distortion
- dist_coeffs = np.zeros((4,1))
+ result = {}
- # find 6 pose points
- pose_points = []
- for j, idx in enumerate(pose_points_idx):
- pt = landmarks[idx]
- pose_points.append((pt[0], pt[1]))
- pose_points = np.array(pose_points, dtype='double') # convert to double
-
- # create camera matrix
- focal_length = dim[0]
- center = (dim[0]/2, dim[1]/2)
- cam_mat = np.array(
- [[focal_length, 0, center[0]],
- [0, focal_length, center[1]],
- [0, 1, 1]], dtype = "double")
-
- # solve PnP for rotation and translation
- (success, rot_vec, tran_vec) = cv.solvePnP(model_points, pose_points,
- cam_mat, dist_coeffs,
- flags=cv.SOLVEPNP_ITERATIVE)
+ # project points
+ if project_points:
+ pts_im, jac = cv.projectPoints(axis, rot_vec, tran_vec, cam_mat, dist_coeffs)
+ pts_model, jac2 = cv.projectPoints(model_points, rot_vec, tran_vec, cam_mat, dist_coeffs)
+ result['points_model'] = pts_model
+ result['points_image'] = pts_im
+ result['point_nose'] = tuple(landmarks[pose_points_idx[0]])
- # project points
- #pts_im, jac = cv.projectPoints(axis, rot_vec, tran_vec, cam_mat, dist_coeffs)
- #pts_model, jac2 = cv.projectPoints(model_points, rot_vec, tran_vec, cam_mat, dist_coeffs)
- rvec_matrix = cv.Rodrigues(rot_vec)[0]
-
- # convert to degrees
- proj_matrix = np.hstack((rvec_matrix, tran_vec))
- eulerAngles = cv.decomposeProjectionMatrix(proj_matrix)[6]
- pitch, yaw, roll = [math.radians(x) for x in eulerAngles]
- pitch = math.degrees(math.asin(math.sin(pitch)))
- roll = -math.degrees(math.asin(math.sin(roll)))
- yaw = math.degrees(math.asin(math.sin(yaw)))
- degrees = {'pitch': pitch, 'roll': roll, 'yaw': yaw}
-
- # add nose point
- #pt_nose = tuple(landmarks[pose_points_idx[0]])
- return degrees
- #return pts_im, pts_model, degrees, pt_nose
+ rvec_matrix = cv.Rodrigues(rot_vec)[0]
+
+ # convert to degrees
+ proj_matrix = np.hstack((rvec_matrix, tran_vec))
+ eulerAngles = cv.decomposeProjectionMatrix(proj_matrix)[6]
+ pitch, yaw, roll = [math.radians(x) for x in eulerAngles]
+ pitch = math.degrees(math.asin(math.sin(pitch)))
+ roll = -math.degrees(math.asin(math.sin(roll)))
+ yaw = math.degrees(math.asin(math.sin(yaw)))
+ degrees = {'pitch': pitch, 'roll': roll, 'yaw': yaw}
+ result['degrees'] = degrees
+ return result
-def draw_pose(im, pts_im, pts_model, pt_nose):
- cv.line(im, pt_nose, tuple(pts_im[1].ravel()), (0,255,0), 3) #GREEN
- cv.line(im, pt_nose, tuple(pts_im[0].ravel()), (255,0,), 3) #BLUE
- cv.line(im, pt_nose, tuple(pts_im[2].ravel()), (0,0,255), 3) #RED
- return im
+ def draw_pose(self, im, pts_im, pts_model, pt_nose):
+ cv.line(im, pt_nose, tuple(pts_im[1].ravel()), (0,255,0), 3) #GREEN
+ cv.line(im, pt_nose, tuple(pts_im[0].ravel()), (255,0,), 3) #BLUE
+ cv.line(im, pt_nose, tuple(pts_im[2].ravel()), (0,0,255), 3) #RED
-def draw_degrees(im, degrees, color=(0,255,0)):
- for i, item in enumerate(degrees.items()):
- k, v = item
- t = '{}: {:.2f}'.format(k, v)
- origin = (10, 30 + (25 * i))
- cv.putText(im, t, origin, cv.FONT_HERSHEY_SIMPLEX, 0.5, color, thickness=2, lineType=2) \ No newline at end of file
+
+ def draw_degrees(self, im, degrees, color=(0,255,0)):
+ for i, item in enumerate(degrees.items()):
+ k, v = item
+ t = '{}: {:.2f}'.format(k, v)
+ origin = (10, 30 + (25 * i))
+ cv.putText(im, t, origin, cv.FONT_HERSHEY_SIMPLEX, 0.5, color, thickness=2, lineType=2) \ No newline at end of file
diff --git a/megapixels/app/processors/face_recognition.py b/megapixels/app/processors/face_recognition.py
index 9c3a301d..e0b9f752 100644
--- a/megapixels/app/processors/face_recognition.py
+++ b/megapixels/app/processors/face_recognition.py
@@ -17,25 +17,38 @@ class RecognitionDLIB:
# https://github.com/davisking/dlib/blob/master/python_examples/face_recognition.py
# facerec.compute_face_descriptor(img, shape, 100, 0.25)
- def __init__(self, opt_gpu=0):
+ def __init__(self, gpu=0):
self.log = logger_utils.Logger.getLogger()
- if opt_gpu > 0:
+
+ if gpu > -1:
cuda_visible_devices = os.getenv('CUDA_VISIBLE_DEVICES', '')
- os.environ['CUDA_VISIBLE_DEVICES'] = str(opt_gpu)
- self.predictor = dlib.shape_predictor(cfg.DIR_MODELS_DLIB_5PT)
- self.facerec = dlib.face_recognition_model_v1(cfg.DIR_MODELS_DLIB_FACEREC_RESNET)
+ os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu)
+
+ self.predictor = dlib.shape_predictor(cfg.DIR_MODELS_DLIB_5PT)
+ self.facerec = dlib.face_recognition_model_v1(cfg.DIR_MODELS_DLIB_FACEREC_RESNET)
+
+ if gpu > -1:
os.environ['CUDA_VISIBLE_DEVICES'] = cuda_visible_devices # reset GPU env
+
def vec(self, im, bbox, width=100,
- jitters=cfg.DLIB_FACEREC_JITTERS, padding=cfg.DLIB_FACEREC_PADDING):
+ jitters=cfg.DLIB_FACEREC_JITTERS, padding=cfg.DLIB_FACEREC_PADDING):
# Converts image and bbox into 128d vector
# scale the image so the face is always 100x100 pixels
+ #self.log.debug('compute scale')
scale = width / bbox.width
- im = cv.resize(im, (scale, scale), interploation=cv.INTER_LANCZOS4)
+ #im = cv.resize(im, (scale, scale), cv.INTER_LANCZOS4)
+ #self.log.debug('resize')
+ cv.resize(im, None, fx=scale, fy=scale, interpolation=cv.INTER_LANCZOS4)
+ #self.log.debug('to dlib')
bbox_dlib = bbox.to_dlib()
+ #self.log.debug('precitor')
face_shape = self.predictor(im, bbox_dlib)
- vec = self.facerec.compute_face_descriptor(im, face_shape, jitters, padding)
+ # vec = self.facerec.compute_face_descriptor(im, face_shape, jitters, padding)
+ #self.log.debug('vec')
+ vec = self.facerec.compute_face_descriptor(im, face_shape, jitters)
+ #vec = self.facerec.compute_face_descriptor(im, face_shape)
return vec
diff --git a/megapixels/app/settings/app_cfg.py b/megapixels/app/settings/app_cfg.py
index 51392bcc..0507366f 100644
--- a/megapixels/app/settings/app_cfg.py
+++ b/megapixels/app/settings/app_cfg.py
@@ -9,6 +9,10 @@ import cv2 as cv
from app.settings import types
from app.utils import click_utils
+# -----------------------------------------------------------------------------
+# Metadata type names
+# -----------------------------------------------------------------------------
+
# -----------------------------------------------------------------------------
# Enun lists used for custom Click Params
@@ -17,6 +21,8 @@ from app.utils import click_utils
FaceDetectNetVar = click_utils.ParamVar(types.FaceDetectNet)
HaarCascadeVar = click_utils.ParamVar(types.HaarCascade)
LogLevelVar = click_utils.ParamVar(types.LogLevel)
+MetadataVar = click_utils.ParamVar(types.Metadata)
+DatasetVar = click_utils.ParamVar(types.Dataset)
# # data_store
DATA_STORE = '/data_store_hdd/'
@@ -24,6 +30,7 @@ DATA_STORE_NAS = '/data_store_nas/'
DATA_STORE_HDD = '/data_store_hdd/'
DATA_STORE_SSD = '/data_store_ssd/'
DIR_DATASETS = join(DATA_STORE,'datasets')
+DIR_DATSET_NAS = join(DIR_DATASETS, 'people')
DIR_APPS = join(DATA_STORE,'apps')
DIR_APP = join(DIR_APPS,'megapixels')
DIR_MODELS = join(DIR_APP,'models')
diff --git a/megapixels/app/settings/paths.py b/megapixels/app/settings/paths.py
new file mode 100644
index 00000000..bc1333ba
--- /dev/null
+++ b/megapixels/app/settings/paths.py
@@ -0,0 +1,163 @@
+import os
+from os.path import join
+import logging
+
+from vframe.settings import vframe_cfg as vcfg
+from vframe.settings import types
+
+class Paths:
+
+ # class properties
+ MAPPINGS_DATE = vcfg.SUGARCUBE_DATES[0]
+ DIR_APP_VFRAME = 'apps/vframe/'
+ DIR_APP_SA = 'apps/syrianarchive'
+ DIR_MODELS_VFRAME = join(DIR_APP_VFRAME, 'models')
+ DIR_DARKNET = join(DIR_MODELS_VFRAME, 'darknet/pjreddie')
+ DIR_DARKNET_VFRAME = join(DIR_MODELS_VFRAME, 'darknet/vframe')
+ DIR_MEDIA = join(DIR_APP_SA, 'media')
+ DIR_METADATA = join(DIR_APP_SA, 'metadata')
+ DIR_RECORDS = join(DIR_APP_SA, 'records')
+ DIR_REPORTS = join(DIR_APP_SA, 'reports')
+
+
+ def __init__(self):
+ pass
+
+ @classmethod
+ def DataStorePath(cls, data_store=types.DataStore.HDD):
+ return '/data_store_{}'.format(data_store.name.lower())
+
+ # -------------------------------------------------------------------------------
+ # Darknet Paths
+
+ @classmethod
+ def darknet_classes(cls, data_store=types.DataStore.HDD, opt_net=types.DetectorNet.COCO):
+ if opt_net == types.DetectorNet.COCO:
+ fp = join(cls.DIR_DARKNET, 'data', 'coco.names')
+ elif opt_net == types.DetectorNet.COCO_SPP:
+ fp = join(cls.DIR_DARKNET, 'data', 'coco.names')
+ elif opt_net == types.DetectorNet.VOC:
+ fp = join(cls.DIR_DARKNET, 'data', 'voc.names')
+ elif opt_net == types.DetectorNet.OPENIMAGES:
+ fp = join(cls.DIR_DARKNET, 'data', 'openimages.names')
+ elif opt_net == types.DetectorNet.SUBMUNITION:
+ fp = join(cls.DIR_DARKNET_VFRAME, 'munitions_09b', 'classes.txt')
+ return join(cls.DataStorePath(data_store), fp)
+
+ @classmethod
+ def darknet_data(cls, data_store=types.DataStore.HDD, opt_net=types.DetectorNet.COCO, as_bytes=True):
+ if opt_net == types.DetectorNet.COCO:
+ fp = join(cls.DIR_DARKNET, 'cfg', 'coco.data')
+ elif opt_net == types.DetectorNet.COCO_SPP:
+ fp = join(cls.DIR_DARKNET, 'cfg', 'coco.data')
+ elif opt_net == types.DetectorNet.VOC:
+ fp = join(cls.DIR_DARKNET, 'cfg', 'voc.data')
+ elif opt_net == types.DetectorNet.OPENIMAGES:
+ fp = join(cls.DIR_DARKNET, 'cfg', 'openimages.data')
+ elif opt_net == types.DetectorNet.SUBMUNITION:
+ fp = join(cls.DIR_DARKNET_VFRAME, 'munitions_09b', 'meta.data')
+ fp = join(cls.DataStorePath(data_store), fp)
+ if as_bytes:
+ return bytes(fp, encoding="utf-8")
+ else:
+ return fp
+
+
+ @classmethod
+ def darknet_cfg(cls, data_store=types.DataStore.HDD, opt_net=types.DetectorNet.COCO, as_bytes=True):
+ if opt_net == types.DetectorNet.COCO:
+ fp = join(cls.DIR_DARKNET, 'cfg', 'yolov3.cfg')
+ elif opt_net == types.DetectorNet.COCO_SPP:
+ fp = join(cls.DIR_DARKNET, 'cfg', 'yolov3-spp.cfg')
+ elif opt_net == types.DetectorNet.VOC:
+ fp = join(cls.DIR_DARKNET, 'cfg', 'yolov3-voc.cfg')
+ elif opt_net == types.DetectorNet.OPENIMAGES:
+ fp = join(cls.DIR_DARKNET, 'cfg', 'yolov3-openimages.cfg')
+ elif opt_net == types.DetectorNet.SUBMUNITION:
+ fp = join(cls.DIR_DARKNET_VFRAME, 'munitions_09b', 'yolov3.cfg')
+ fp = join(cls.DataStorePath(data_store), fp)
+ if as_bytes:
+ return bytes(fp, encoding="utf-8")
+ else:
+ return fp
+
+ @classmethod
+ def darknet_weights(cls, data_store=types.DataStore.HDD, opt_net=types.DetectorNet.COCO, as_bytes=True):
+ if opt_net == types.DetectorNet.COCO:
+ fp = join(cls.DIR_DARKNET, 'weights', 'yolov3.weights')
+ elif opt_net == types.DetectorNet.COCO_SPP:
+ fp = join(cls.DIR_DARKNET, 'weights', 'yolov3-spp.weights')
+ elif opt_net == types.DetectorNet.VOC:
+ fp = join(cls.DIR_DARKNET, 'weights', 'yolov3-voc.weights')
+ elif opt_net == types.DetectorNet.OPENIMAGES:
+ fp = join(cls.DIR_DARKNET, 'weights', 'yolov3-openimages.weights')
+ elif opt_net == types.DetectorNet.SUBMUNITION:
+ fp = join(cls.DIR_DARKNET_VFRAME, 'munitions_09b/weights', 'yolov3_40000.weights')
+ fp = join(cls.DataStorePath(data_store), fp)
+ if as_bytes:
+ return bytes(fp, encoding="utf-8")
+ else:
+ return fp
+
+ # -------------------------------------------------------------------------------
+ # Metadata Paths
+
+ @classmethod
+ def mapping_index(cls, opt_date, data_store=types.DataStore.HDD, verified=types.Verified.VERIFIED,
+ file_format=types.FileExt.PKL):
+ """Returns filepath to a mapping file. Mapping files are the original Suguarcube mapping data"""
+ fname = 'index.pkl' if file_format == types.FileExt.PKL else 'index.json'
+ # data_store = 'data_store_{}'.format(data_store.name.lower())
+ date_str = opt_date.name.lower()
+ fp = join(cls.DataStorePath(data_store), cls.DIR_METADATA, 'mapping', date_str, verified.name.lower(), fname)
+ return fp
+
+ @classmethod
+ def media_record_index(cls, data_store=types.DataStore.HDD, verified=types.Verified.VERIFIED,
+ file_format=types.FileExt.PKL):
+ """Returns filepath to a mapping file. Mapping files are the original Suguarcube mapping data"""
+ fname = 'index.pkl' if file_format == types.FileExt.PKL else 'index.json'
+ metadata_type = types.Metadata.MEDIA_RECORD.name.lower()
+ fp = join(cls.DataStorePath(data_store), cls.DIR_METADATA, metadata_type, verified.name.lower(), fname)
+ return fp
+
+ @classmethod
+ def metadata_index(cls, metadata_type, data_store=types.DataStore.HDD,
+ verified=types.Verified.VERIFIED, file_format=types.FileExt.PKL):
+ """Uses key from enum to get folder name and construct filepath"""
+ fname = 'index.pkl' if file_format == types.FileExt.PKL else 'index.json'
+ fp = join(cls.DataStorePath(data_store), cls.DIR_METADATA, metadata_type.name.lower(),
+ verified.name.lower(), fname)
+ return fp
+
+ @classmethod
+ def metadata_dir(cls, metadata_type, data_store=types.DataStore.HDD, verified=types.Verified.VERIFIED):
+ """Uses key from enum to get folder name and construct filepath"""
+ fp = join(cls.DataStorePath(data_store), cls.DIR_METADATA, metadata_type.name.lower(),
+ verified.name.lower())
+ return fp
+
+ @classmethod
+ def metadata_tree_dir(cls, metadata_type, data_store=types.DataStore.HDD):
+ """Uses key from enum to get folder name and construct filepath"""
+ fp = join(cls.DataStorePath(data_store), cls.DIR_METADATA, metadata_type.name.lower())
+ return fp
+
+ @classmethod
+ def media_dir(cls, media_type, data_store=types.DataStore.HDD, verified=types.Verified.VERIFIED):
+ """Returns the directory path to a media directory"""
+ fp = join(cls.DataStorePath(data_store), cls.DIR_MEDIA, media_type.name.lower(), verified.name.lower())
+ return fp
+
+ # @classmethod
+ # def keyframe(cls, dir_media, idx, image_size=types.ImageSize.MEDIUM):
+ # """Returns path to keyframe image using supplied cls.media directory"""
+ # idx = str(idx).zfill(vcfg.ZERO_PADDING)
+ # size_label = vcfg.IMAGE_SIZE_LABELS[image_size]
+ # fp = join(dir_media, sha256_tree, sha256, idx, size_label, 'index.jpg')
+ # return fp
+
+ @classmethod
+ def dnn(cls):
+ """Returns configurations for available DNNs"""
+ pass \ No newline at end of file
diff --git a/megapixels/app/settings/types.py b/megapixels/app/settings/types.py
index e9107803..7157436d 100644
--- a/megapixels/app/settings/types.py
+++ b/megapixels/app/settings/types.py
@@ -7,7 +7,6 @@ def find_type(name, enum_type):
return None
-
class FaceDetectNet(Enum):
"""Scene text detector networks"""
HAAR, DLIB_CNN, DLIB_HOG, CVDNN, MTCNN = range(5)
@@ -31,3 +30,14 @@ class HaarCascade(Enum):
class LogLevel(Enum):
"""Loger vebosity"""
DEBUG, INFO, WARN, ERROR, CRITICAL = range(5)
+
+
+# ---------------------------------------------------------------------
+# Metadata types
+# --------------------------------------------------------------------
+
+class Metadata(Enum):
+ IDENTITIES, POSES, ROIS, FILE_META, SHAS, UUIDS, FACE_VECTORS = range(7)
+
+class Dataset(Enum):
+ LFW, VGG_FACE2 = range(2)