import os from os.path import join import logging import collections from dotenv import load_dotenv from app.settings import types from app.utils import click_utils from pathlib import Path import codecs codecs.register(lambda name: codecs.lookup('utf8') if name == 'utf8mb4' else None) # ----------------------------------------------------------------------------- # Enun lists used for custom Click Params # ----------------------------------------------------------------------------- LogLevelVar = click_utils.ParamVar(types.LogLevel) MetadataVar = click_utils.ParamVar(types.Metadata) DatasetVar = click_utils.ParamVar(types.Dataset) DataStoreVar = click_utils.ParamVar(types.DataStore) # Face analysis HaarCascadeVar = click_utils.ParamVar(types.HaarCascade) FaceDetectNetVar = click_utils.ParamVar(types.FaceDetectNet) FaceExtractorVar = click_utils.ParamVar(types.FaceExtractor) FaceLandmark2D_5Var = click_utils.ParamVar(types.FaceLandmark2D_5) FaceLandmark2D_68Var = click_utils.ParamVar(types.FaceLandmark2D_68) FaceLandmark3D_68Var = click_utils.ParamVar(types.FaceLandmark3D_68) # Person/Body detector BodyDetectNetVar = click_utils.ParamVar(types.BodyDetectNet) # base path DIR_SELF = os.path.dirname(os.path.realpath(__file__)) DIR_ROOT = Path(DIR_SELF).parent.parent.parent # # data_store DATA_STORE = '/data_store_hdd/' DATA_STORE_NAS = '/data_store_nas/' DATA_STORE_HDD = '/data_store_hdd/' DATA_STORE_SSD = '/data_store_ssd/' DIR_DATASETS = join(DATA_STORE,'datasets') DIR_DATSET_NAS = join(DIR_DATASETS, 'people') DIR_APPS = join(DATA_STORE,'apps') DIR_APP = join(DIR_APPS,'megapixels') DIR_MODELS = join(DIR_APP,'models') DIR_PEOPLE = 'people' # # Frameworks DIR_MODELS_CAFFE = join(DIR_MODELS,'caffe') DIR_MODELS_DARKNET = join(DIR_MODELS,'darknet') DIR_MODELS_DARKNET_PJREDDIE = join(DIR_MODELS_DARKNET, 'pjreddie') DIR_MODELS_PYTORCH = join(DIR_MODELS,'pytorch') DIR_MODELS_TORCH = join(DIR_MODELS,'torch') DIR_MODELS_MXNET = join(DIR_MODELS,'mxnet') DIR_MODELS_KERAS = join(DIR_MODELS,'keras') DIR_MODELS_TF = join(DIR_MODELS,'tensorflow') DIR_MODELS_DLIB = join(DIR_MODELS,'dlib') DIR_MODELS_DLIB_CNN = join(DIR_MODELS_DLIB, 'mmod_human_face_detector.dat') DIR_MODELS_DLIB_5PT = join(DIR_MODELS_DLIB, 'shape_predictor_5_face_landmarks.dat') DIR_MODELS_DLIB_68PT = join(DIR_MODELS_DLIB, 'shape_predictor_68_face_landmarks.dat') DIR_MODELS_DLIB_FACEREC_RESNET = join(DIR_MODELS_DLIB, 'dlib_face_recognition_resnet_model_v1.dat') DIR_FAISS = join(DIR_APP, 'faiss') DIR_FAISS_INDEXES = join(DIR_FAISS, 'indexes') DIR_FAISS_METADATA = join(DIR_FAISS, 'metadata') DIR_FAISS_RECIPES = join(DIR_FAISS, 'recipes') # Test images DIR_TEST_IMAGES = join(DIR_APP, 'test', 'images') # ----------------------------------------------------------------------------- # .env config for keys # ----------------------------------------------------------------------------- FP_KNOWLEDGE_GRAPH_ENV = join(DIR_ROOT, 'env/google_knowledge_graph_api.env') # DIR_DOTENV = join(DIR_APP, '.env') load_dotenv() # dotenv_path=DIR_DOTENV) # ----------------------------------------------------------------------------- # Drawing, GUI settings # ----------------------------------------------------------------------------- DIR_ASSETS = join(DIR_APP, 'assets') FP_FONT = join(DIR_ASSETS, 'font') # ----------------------------------------------------------------------------- # click chair settings # ----------------------------------------------------------------------------- DIR_COMMANDS_PROC = 'commands/processor' DIR_COMMANDS_VIZ = 'commands/visualize' DIR_COMMANDS_ADMIN = 'commands/admin' DIR_COMMANDS_DATASETS = 'commands/datasets' DIR_COMMANDS_FAISS = 'commands/faiss' DIR_COMMANDS_MISC = 'commands/misc' DIR_COMMANDS_SITE = 'commands/site' DIR_COMMANDS_DEMO = 'commands/demo' DIR_COMMANDS_MSC = 'commands/msc' # ----------------------------------------------------------------------------- # Filesystem settings # hash trees enforce a maximum number of directories per directory # ----------------------------------------------------------------------------- ZERO_PADDING = 6 # padding for enumerated image filenames #FRAME_NAME_ZERO_PADDING = 6 # is this active?? CKPT_ZERO_PADDING = 9 HASH_TREE_DEPTH = 3 HASH_BRANCH_SIZE = 3 DLIB_FACEREC_JITTERS = 5 # number of face recognition jitters #DLIB_FACEREC_PADDING = 0.25 # default dlib FACEREC_PADDING = 0.3 # VGG FACE2 recommended DEFAULT_SIZE_FACE_DETECT = (480,480) DEFAULT_JITTER_AMT = 0.015 # used for OpenCV DNN face detector with VGG2 face feature extractor DEFAULT_NUM_JITTERS = 4 # used for smothing the facial feature extraction DEFAULT_FACE_PADDING_VGG_FACE2 = 0.3 POSE_MINMAX_YAW = (-25,25) POSE_MINMAX_ROLL = (-15,15) POSE_MINMAX_PITCH = (-10,10) POSE_MINMAX_YAW = (-40,40) POSE_MINMAX_ROLL = (-35,35) POSE_MINMAX_PITCH = (-25,25) # ----------------------------------------------------------------------------- # Pandas data # ----------------------------------------------------------------------------- FILE_RECORD_DTYPES = {'fn':str, 'subdir': str} # ----------------------------------------------------------------------------- # Logging options exposed for custom click Params # ----------------------------------------------------------------------------- LOGGER_NAME = 'app' LOGLEVELS = { types.LogLevel.DEBUG: logging.DEBUG, types.LogLevel.INFO: logging.INFO, types.LogLevel.WARN: logging.WARN, types.LogLevel.ERROR: logging.ERROR, types.LogLevel.CRITICAL: logging.CRITICAL } LOGLEVEL_OPT_DEFAULT = types.LogLevel.DEBUG.name #LOGFILE_FORMAT = "%(asctime)s: %(levelname)s: %(message)s" #LOGFILE_FORMAT = "%(levelname)s:%(name)s: %(message)s" #LOGFILE_FORMAT = "%(levelname)s: %(message)s" #LOGFILE_FORMAT = "%(filename)s:%(lineno)s %(funcName)s() %(message)s" # colored logs """ black, red, green, yellow, blue, purple, cyan and white. {color}, fg_{color}, bg_{color}: Foreground and background colors. bold, bold_{color}, fg_bold_{color}, bg_bold_{color}: Bold/bright colors. reset: Clear all formatting (both foreground and background colors). """ LOGFILE_FORMAT = "%(log_color)s%(levelname)-8s%(reset)s %(cyan)s%(filename)s:%(lineno)s:%(bold_cyan)s%(funcName)s() %(reset)s%(message)s" # ----------------------------------------------------------------------------- # S3 storage # ----------------------------------------------------------------------------- S3_ROOT_URL = 's3://megapixels/v1/' S3_MEDIA_URL = join(S3_ROOT_URL, 'media') S3_METADATA_URL = join(S3_ROOT_URL, 'metadata') S3_HTTP_URL = 'https://megapixels.nyc3.digitaloceanspaces.com/v1/' S3_HTTP_MEDIA_URL = join(S3_HTTP_URL, 'media') S3_HTTP_METADATA_URL = join(S3_HTTP_URL, 'metadata') # ----------------------------------------------------------------------------- # Static site generator # ----------------------------------------------------------------------------- S3_SITE_PATH = "v1/site" S3_DATASETS_PATH = "v1" # datasets is already in the filename DIR_SITE_PUBLIC = "../site/public" DIR_SITE_CONTENT = "../site/content/pages" DIR_SITE_TEMPLATES = "../site/templates" DIR_SITE_INCLUDES = "../site/includes" DIR_SITE_USER_CONTENT = "../site/public/user_content" DIR_SITE_DATASETS = "../site/datasets/" DIR_SITE_FINAL_CITATIONS = "../site/datasets/final/" GOOGLE_ACCOUNT_CREDS_PATH = os.path.join("../", os.getenv("GOOGLE_ACCOUNT_CREDS_PATH")) # ----------------------------------------------------------------------------- # Celery # ----------------------------------------------------------------------------- CELERY_BROKER_URL = 'redis://localhost:6379/0' CELERY_RESULT_BACKEND = 'redis://localhost:6379/0' # ----------------------------------------------------------------------------- # Build settings # ----------------------------------------------------------------------------- BUILD_RESEARCH = False