summaryrefslogtreecommitdiff
path: root/megapixels/commands
diff options
context:
space:
mode:
Diffstat (limited to 'megapixels/commands')
-rw-r--r--megapixels/commands/cv/face_roi.py4
-rw-r--r--megapixels/commands/cv/face_vector.py8
-rw-r--r--megapixels/commands/demo/face_age.py106
-rw-r--r--megapixels/commands/demo/face_beauty.py28
-rw-r--r--megapixels/commands/demo/face_detection.py2
-rw-r--r--megapixels/commands/demo/face_emotion.py106
-rw-r--r--megapixels/commands/demo/face_gender.py106
-rw-r--r--megapixels/commands/demo/face_vector.py7
8 files changed, 340 insertions, 27 deletions
diff --git a/megapixels/commands/cv/face_roi.py b/megapixels/commands/cv/face_roi.py
index 6d42924e..70fff401 100644
--- a/megapixels/commands/cv/face_roi.py
+++ b/megapixels/commands/cv/face_roi.py
@@ -94,11 +94,11 @@ def cli(ctx, opt_fp_in, opt_dir_media, opt_fp_out, opt_data_store, opt_dataset,
if opt_detector_type == types.FaceDetectNet.CVDNN:
detector = face_detector.DetectorCVDNN()
elif opt_detector_type == types.FaceDetectNet.DLIB_CNN:
- detector = face_detector.DetectorDLIBCNN(opt_gpu)
+ detector = face_detector.DetectorDLIBCNN(gpu=opt_gpu)
elif opt_detector_type == types.FaceDetectNet.DLIB_HOG:
detector = face_detector.DetectorDLIBHOG()
elif opt_detector_type == types.FaceDetectNet.MTCNN:
- detector = face_detector.DetectorMTCNN()
+ detector = face_detector.DetectorMTCNN(gpu=opt_gpu)
elif opt_detector_type == types.FaceDetectNet.HAAR:
log.error('{} not yet implemented'.format(opt_detector_type.name))
return
diff --git a/megapixels/commands/cv/face_vector.py b/megapixels/commands/cv/face_vector.py
index 9251c053..4df647f5 100644
--- a/megapixels/commands/cv/face_vector.py
+++ b/megapixels/commands/cv/face_vector.py
@@ -13,7 +13,7 @@ from app.settings import app_cfg as cfg
help='Override enum output filename CSV')
@click.option('-m', '--media', 'opt_dir_media', default=None,
help='Override enum media directory')
-@click.option('--data_store', 'opt_data_store',
+@click.option('--store', 'opt_data_store',
type=cfg.DataStoreVar,
default=click_utils.get_default(types.DataStore.HDD),
show_default=True,
@@ -105,8 +105,10 @@ def cli(ctx, opt_fp_out, opt_dir_media, opt_data_store, opt_dataset, opt_size,
# compute vec
# padding=opt_padding not yet implemented in dlib===19.16 but merged in master
vec = facerec.vec(im, bbox_dim, jitters=opt_jitters)
- vec_str = ','.join([repr(x) for x in vec]) # convert to string for CSV
- vecs.append( {'roi_index': roi_index, 'record_index': record_index, 'vec': vec_str})
+ vec_flat = facerec.flatten(vec)
+ vec_flat['roi_index'] = roi_index
+ vec_flat['record_index'] = record_index
+ vecs.append(vec_flat)
# create DataFrame and save to CSV
diff --git a/megapixels/commands/demo/face_age.py b/megapixels/commands/demo/face_age.py
new file mode 100644
index 00000000..45ae5190
--- /dev/null
+++ b/megapixels/commands/demo/face_age.py
@@ -0,0 +1,106 @@
+import click
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+
+
+@click.command()
+@click.option('-i', '--input', 'opt_fp_in', default=None, required=True,
+ help='Image filepath')
+@click.option('-o', '--output', 'opt_fp_out', default=None,
+ help='GIF output path')
+@click.option('--size', 'opt_size',
+ type=(int, int), default=(300, 300),
+ help='Output image size')
+@click.option('-g', '--gpu', 'opt_gpu', default=0,
+ help='GPU index')
+@click.option('-f', '--force', 'opt_force', is_flag=True,
+ help='Force overwrite file')
+@click.option('--display/--no-display', 'opt_display', is_flag=True, default=False,
+ help='Display detections to debug')
+@click.pass_context
+def cli(ctx, opt_fp_in, opt_fp_out, opt_gpu, opt_size, opt_force, opt_display):
+ """Face detector demo"""
+
+ import sys
+ import os
+ from os.path import join
+ from pathlib import Path
+ import time
+
+ from tqdm import tqdm
+ import numpy as np
+ import pandas as pd
+ import cv2 as cv
+ import dlib
+
+ from app.utils import logger_utils, file_utils, im_utils, display_utils, draw_utils
+ from app.utils import plot_utils
+ from app.processors import face_detector, face_age
+ from app.models.data_store import DataStore
+
+
+ log = logger_utils.Logger.getLogger()
+
+
+ # -------------------------------------------------
+ # load image
+
+ im = cv.imread(opt_fp_in)
+ im_resized = im_utils.resize(im, width=opt_size[0], height=opt_size[1])
+
+ # ----------------------------------------------------------------------------
+ # detect face
+
+ face_detector = face_detector.DetectorDLIBCNN(gpu=opt_gpu) # -1 for CPU
+ bboxes = face_detector.detect(im_resized, largest=True)
+ bbox = bboxes[0]
+ dim = im_resized.shape[:2][::-1]
+ bbox_dim = bbox.to_dim(dim)
+ if not bbox:
+ log.error('no face detected')
+ return
+ else:
+ log.info(f'face detected: {bbox_dim.to_xyxy()}')
+
+
+ # ----------------------------------------------------------------------------
+ # age
+
+ age_predictor = face_age.FaceAge(gpu=opt_gpu)
+ age_score = age_predictor.age(im_resized, bbox_dim)
+
+
+ # ----------------------------------------------------------------------------
+ # output
+
+ log.info(f'Face coords: {bbox_dim} face')
+ log.info(f'age score: {(100*age_score):.2f}')
+
+
+ # ----------------------------------------------------------------------------
+ # draw
+
+ # draw 2d landmarks
+ im_age = im_resized.copy()
+ draw_utils.draw_bbox(im_age, bbox_dim)
+ txt = f'age score: {(100*age_score):.2f}'
+ draw_utils.draw_text(im_age, bbox_dim.pt_tl, txt)
+
+
+ # ----------------------------------------------------------------------------
+ # save
+
+ if opt_fp_out:
+ # save pose only
+ cv.imwrite(opt_fp_out, im_age)
+
+
+ # ----------------------------------------------------------------------------
+ # display
+
+ if opt_display:
+ # show all images here
+ cv.imshow('age', im_age)
+ display_utils.handle_keyboard() \ No newline at end of file
diff --git a/megapixels/commands/demo/face_beauty.py b/megapixels/commands/demo/face_beauty.py
index b1612f7c..d31c5cee 100644
--- a/megapixels/commands/demo/face_beauty.py
+++ b/megapixels/commands/demo/face_beauty.py
@@ -1,6 +1,3 @@
-"""
-"""
-
import click
from app.settings import types
@@ -51,25 +48,23 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_gpu, opt_size, opt_force, opt_display):
# load image
im = cv.imread(opt_fp_in)
- # im = cv.cvtColor(im, cv.COLOR_BGR2RGB)
- if im.shape[0] > 1280:
- new_shape = (1280, im.shape[1] * 1280 / im.shape[0])
- elif im.shape[1] > 1280:
- new_shape = (im.shape[0] * 1280 / im.shape[1], 1280)
- elif im.shape[0] < 640 or im.shape[1] < 640:
- new_shape = (im.shape[0] * 2, im.shape[1] * 2)
- else:
- new_shape = im.shape[0:2]
+ im_resized = im_utils.resize(im, width=opt_size[0], height=opt_size[1])
- im_resized = cv.resize(im, (int(new_shape[1]), int(new_shape[0])))
- #im_resized = im_utils.resize(im, width=opt_size[0], height=opt_size[1])
+ # TODO fix Keras CPU/GPU device selection issue
+ # NB: GPU visibility issues with dlib/keras
+ # Wrap this with cuda toggle and run before init dlib GPU
+
+ device_cur = os.getenv('CUDA_VISIBLE_DEVICES', '')
+ os.environ['CUDA_VISIBLE_DEVICES'] = ''
+ beauty_predictor = face_beauty.FaceBeauty()
+ os.environ['CUDA_VISIBLE_DEVICES'] = device_cur
# ----------------------------------------------------------------------------
# detect face
- face_detector = face_detector.DetectorDLIBCNN() # -1 for CPU
+ face_detector = face_detector.DetectorDLIBCNN(gpu=opt_gpu) # -1 for CPU
bboxes = face_detector.detect(im_resized, largest=True)
bbox = bboxes[0]
dim = im_resized.shape[:2][::-1]
@@ -82,8 +77,7 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_gpu, opt_size, opt_force, opt_display):
# ----------------------------------------------------------------------------
# beauty
-
- beauty_predictor = face_beauty.FaceBeauty()
+
beauty_score = beauty_predictor.beauty(im_resized, bbox_dim)
diff --git a/megapixels/commands/demo/face_detection.py b/megapixels/commands/demo/face_detection.py
index fb23704b..488cc80d 100644
--- a/megapixels/commands/demo/face_detection.py
+++ b/megapixels/commands/demo/face_detection.py
@@ -39,8 +39,6 @@ def cli(ctx, opt_fp_in, opt_fp_out, opt_gpu, opt_size, opt_force, opt_display):
import pandas as pd
import cv2 as cv
import dlib
- from PIL import Image
- import matplotlib.pyplot as plt
from app.utils import logger_utils, file_utils, im_utils, display_utils, draw_utils
from app.utils import plot_utils
diff --git a/megapixels/commands/demo/face_emotion.py b/megapixels/commands/demo/face_emotion.py
new file mode 100644
index 00000000..5e06eace
--- /dev/null
+++ b/megapixels/commands/demo/face_emotion.py
@@ -0,0 +1,106 @@
+import click
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+
+
+@click.command()
+@click.option('-i', '--input', 'opt_fp_in', default=None, required=True,
+ help='Imemotion filepath')
+@click.option('-o', '--output', 'opt_fp_out', default=None,
+ help='GIF output path')
+@click.option('--size', 'opt_size',
+ type=(int, int), default=(300, 300),
+ help='Output imemotion size')
+@click.option('-g', '--gpu', 'opt_gpu', default=0,
+ help='GPU index')
+@click.option('-f', '--force', 'opt_force', is_flag=True,
+ help='Force overwrite file')
+@click.option('--display/--no-display', 'opt_display', is_flag=True, default=False,
+ help='Display detections to debug')
+@click.pass_context
+def cli(ctx, opt_fp_in, opt_fp_out, opt_gpu, opt_size, opt_force, opt_display):
+ """Face detector demo"""
+
+ import sys
+ import os
+ from os.path import join
+ from pathlib import Path
+ import time
+
+ from tqdm import tqdm
+ import numpy as np
+ import pandas as pd
+ import cv2 as cv
+ import dlib
+
+ from app.utils import logger_utils, file_utils, im_utils, display_utils, draw_utils
+ from app.utils import plot_utils
+ from app.processors import face_detector, face_emotion
+ from app.models.data_store import DataStore
+
+
+ log = logger_utils.Logger.getLogger()
+
+
+ # -------------------------------------------------
+ # load imemotion
+
+ im = cv.imread(opt_fp_in)
+ im_resized = im_utils.resize(im, width=opt_size[0], height=opt_size[1])
+
+ # ----------------------------------------------------------------------------
+ # detect face
+
+ face_detector = face_detector.DetectorDLIBCNN(gpu=opt_gpu) # -1 for CPU
+ bboxes = face_detector.detect(im_resized, largest=True)
+ bbox = bboxes[0]
+ dim = im_resized.shape[:2][::-1]
+ bbox_dim = bbox.to_dim(dim)
+ if not bbox:
+ log.error('no face detected')
+ return
+ else:
+ log.info(f'face detected: {bbox_dim.to_xyxy()}')
+
+
+ # ----------------------------------------------------------------------------
+ # emotion
+
+ emotion_predictor = face_emotion.FaceEmotion(gpu=opt_gpu)
+ emotion_score = emotion_predictor.emotion(im_resized, bbox_dim)
+
+
+ # ----------------------------------------------------------------------------
+ # output
+
+ log.info(f'Face coords: {bbox_dim} face')
+ log.info(f'emotion score: {(100*emotion_score):.2f}')
+
+
+ # ----------------------------------------------------------------------------
+ # draw
+
+ # draw 2d landmarks
+ im_emotion = im_resized.copy()
+ draw_utils.draw_bbox(im_emotion, bbox_dim)
+ txt = f'emotion score: {(100*emotion_score):.2f}'
+ draw_utils.draw_text(im_emotion, bbox_dim.pt_tl, txt)
+
+
+ # ----------------------------------------------------------------------------
+ # save
+
+ if opt_fp_out:
+ # save pose only
+ cv.imwrite(opt_fp_out, im_emotion)
+
+
+ # ----------------------------------------------------------------------------
+ # display
+
+ if opt_display:
+ # show all imemotions here
+ cv.imshow('emotion', im_emotion)
+ display_utils.handle_keyboard() \ No newline at end of file
diff --git a/megapixels/commands/demo/face_gender.py b/megapixels/commands/demo/face_gender.py
new file mode 100644
index 00000000..8e8c86f3
--- /dev/null
+++ b/megapixels/commands/demo/face_gender.py
@@ -0,0 +1,106 @@
+import click
+
+from app.settings import types
+from app.utils import click_utils
+from app.settings import app_cfg as cfg
+
+
+@click.command()
+@click.option('-i', '--input', 'opt_fp_in', default=None, required=True,
+ help='Imgender filepath')
+@click.option('-o', '--output', 'opt_fp_out', default=None,
+ help='GIF output path')
+@click.option('--size', 'opt_size',
+ type=(int, int), default=(300, 300),
+ help='Output imgender size')
+@click.option('-g', '--gpu', 'opt_gpu', default=0,
+ help='GPU index')
+@click.option('-f', '--force', 'opt_force', is_flag=True,
+ help='Force overwrite file')
+@click.option('--display/--no-display', 'opt_display', is_flag=True, default=False,
+ help='Display detections to debug')
+@click.pass_context
+def cli(ctx, opt_fp_in, opt_fp_out, opt_gpu, opt_size, opt_force, opt_display):
+ """Face detector demo"""
+
+ import sys
+ import os
+ from os.path import join
+ from pathlib import Path
+ import time
+
+ from tqdm import tqdm
+ import numpy as np
+ import pandas as pd
+ import cv2 as cv
+ import dlib
+
+ from app.utils import logger_utils, file_utils, im_utils, display_utils, draw_utils
+ from app.utils import plot_utils
+ from app.processors import face_detector, face_gender
+ from app.models.data_store import DataStore
+
+
+ log = logger_utils.Logger.getLogger()
+
+
+ # -------------------------------------------------
+ # load imgender
+
+ im = cv.imread(opt_fp_in)
+ im_resized = im_utils.resize(im, width=opt_size[0], height=opt_size[1])
+
+ # ----------------------------------------------------------------------------
+ # detect face
+
+ face_detector = face_detector.DetectorDLIBCNN(gpu=opt_gpu) # -1 for CPU
+ bboxes = face_detector.detect(im_resized, largest=True)
+ bbox = bboxes[0]
+ dim = im_resized.shape[:2][::-1]
+ bbox_dim = bbox.to_dim(dim)
+ if not bbox:
+ log.error('no face detected')
+ return
+ else:
+ log.info(f'face detected: {bbox_dim.to_xyxy()}')
+
+
+ # ----------------------------------------------------------------------------
+ # gender
+
+ gender_predictor = face_gender.FaceGender(gpu=opt_gpu)
+ gender_score = gender_predictor.gender(im_resized, bbox_dim)
+
+
+ # ----------------------------------------------------------------------------
+ # output
+
+ log.info(f'Face coords: {bbox_dim} face')
+ log.info(f'gender score: {(100*gender_score):.2f}')
+
+
+ # ----------------------------------------------------------------------------
+ # draw
+
+ # draw 2d landmarks
+ im_gender = im_resized.copy()
+ draw_utils.draw_bbox(im_gender, bbox_dim)
+ txt = f'gender score: {(100*gender_score):.2f}'
+ draw_utils.draw_text(im_gender, bbox_dim.pt_tl, txt)
+
+
+ # ----------------------------------------------------------------------------
+ # save
+
+ if opt_fp_out:
+ # save pose only
+ cv.imwrite(opt_fp_out, im_gender)
+
+
+ # ----------------------------------------------------------------------------
+ # display
+
+ if opt_display:
+ # show all imgenders here
+ cv.imshow('gender', im_gender)
+ display_utils.handle_keyboard() \ No newline at end of file
diff --git a/megapixels/commands/demo/face_vector.py b/megapixels/commands/demo/face_vector.py
index 1104f923..3ff68001 100644
--- a/megapixels/commands/demo/face_vector.py
+++ b/megapixels/commands/demo/face_vector.py
@@ -68,10 +68,11 @@ def cli(ctx, opt_fp_in, opt_gpu, opt_size, opt_display):
# generate face vectors, only to test if feature extraction works
from app.processors import face_recognition
- face_rec = face_recognition.RecognitionDLIB()
- vec = face_rec.vec(im_resized, bbox_dim)
+ facerec = face_recognition.RecognitionDLIB()
+ vec = facerec.vec(im_resized, bbox_dim)
+ vec_flat = facerec.flatten(vec)
log.info(f'generated vector. showing vec[0:10]:')
- log.info(f'\n{vec[0:10]}')
+ log.info(f'\n{vec_flat}')
if opt_display:
draw_utils.draw_bbox(im_resized, bbox_dim)