summaryrefslogtreecommitdiff
path: root/megapixels/app/utils/identity_utils.py
diff options
context:
space:
mode:
Diffstat (limited to 'megapixels/app/utils/identity_utils.py')
-rw-r--r--megapixels/app/utils/identity_utils.py78
1 files changed, 69 insertions, 9 deletions
diff --git a/megapixels/app/utils/identity_utils.py b/megapixels/app/utils/identity_utils.py
index e090d16e..f9ed009e 100644
--- a/megapixels/app/utils/identity_utils.py
+++ b/megapixels/app/utils/identity_utils.py
@@ -5,22 +5,82 @@ import unidecode
import difflib
from app.settings import types
+from app.models.data_store import DataStore
from app.utils import logger_utils
log = logger_utils.Logger.getLogger()
+'''
+class Dataset(Enum):
+ LFW, VGG_FACE, VGG_FACE2, MSCELEB, UCCS, UMD_FACES, SCUT_FBP, UCF_SELFIE, UTK, \
+ CASIA_WEBFACE, AFW, PUBFIG83, HELEN, PIPA, MEGAFACE, BRAINWASH, IMDB_WIKI = range(16)
+'''
# Get list of names based on Dataset type
-def get_names(enum_dataset):
- if enum_dataset == types.Dataset.LFW:
- dir_lfw = '/data_store_hdd/datasets/people/lfw/media/original/'
- names_orig = [x for x in os.listdir(dir_lfw)]
+def get_names(opt_dataset, opt_data_store=types.DataStore.HDD):
+ data_store = DataStore(opt_data_store, opt_dataset)
+ dir_dataset = data_store.dir_dataset # path to dataset root
+ dir_media_orig = data_store.dir_media_original
+ if opt_dataset == types.Dataset.AFW:
+ # Annotated Faces in the Wild
+ pass
+ elif opt_dataset == types.Dataset.BRAINWASH:
+ # Brainwash IP Cam dataset
+ pass
+ elif opt_dataset == types.Dataset.CASIA_WEBFACE:
+ #
+ pass
+ elif opt_dataset == types.Dataset.HELEN:
+ # Helen
+ pass
+ elif opt_dataset == types.Dataset.IMDB_WIKI:
+ # University of Tennessee Knoxville
+ pass
+ elif opt_dataset == types.Dataset.LAG:
+ # Large Age Gap
+ pass
+ elif opt_dataset == types.Dataset.LFW:
+ # Labeled Faces in The Wild
+ names_orig = [x for x in os.listdir(dir_media_orig)]
names_query = [x.replace('_', ' ') for x in names_orig]
- result = {'names_orig': names_orig, 'names_query': names_query}
- elif enum_dataset == types.Dataset.YOUTUBE_FACES:
- names = [x for x in names if 'labeled faces.txt' not in x]
+ elif opt_dataset == types.Dataset.MEGAFACE:
+ # MegaFace
+ pass
+ elif opt_dataset == types.Dataset.MSCELEB:
+ # MS Celeb
+ pass
+ elif opt_dataset == types.Dataset.PIPA:
+ # People in Photo Albums
+ pass
+ elif opt_dataset == types.Dataset.PUBFIG83:
+ # PubFig83
+ names_orig = [x for x in os.listdir(dir_media_orig) if Path(x).suffix is not '.txt']
+ names_query = [x.replace('_', ' ') for x in names_orig]
+ elif opt_dataset == types.Dataset.SCUT_FBP:
+ # SCUT Facial Beauty Perception
+ pass
+ elif opt_dataset == types.Dataset.UCCS:
+ # Unconstrianed College Students
+ pass
+ elif opt_dataset == types.Dataset.UMD_FACES:
+ # University of Maryland Faces
+ pass
+ elif opt_dataset == types.Dataset.UTK:
+ # University of Tennessee Knoxville
+ pass
+ elif opt_dataset == types.Dataset.UCF_SELFIE:
+ # University of Central Florida Selfie
+ pass
+ elif opt_dataset == types.Dataset.VGG_FACE:
+ # Visual Geometry Group Face 1
+ pass
+ elif opt_dataset == types.Dataset.VGG_FACE2:
+ # Visual Geometry Group Face 2
+ pass
else:
- log.warn(f'{enum_dataset} not yet implemented')
- result = {}
+ log.warn(f'{opt_dataset} not yet implemented')
+ names_orig = []
+ names_query = []
+ result = {'names_orig': names_orig, 'names_query': names_query}
return result
def similarity(a, b):