From 198147bef9976a41046c3c513dc4d33babf7a238 Mon Sep 17 00:00:00 2001 From: Jules Laplace Date: Sun, 13 Jan 2019 22:20:06 +0100 Subject: extracting 3d facial vectors --- megapixels/app/server/tasks/demo.py | 244 ++++++++++++++++++++++++++++++++++++ 1 file changed, 244 insertions(+) create mode 100644 megapixels/app/server/tasks/demo.py (limited to 'megapixels/app/server/tasks/demo.py') diff --git a/megapixels/app/server/tasks/demo.py b/megapixels/app/server/tasks/demo.py new file mode 100644 index 00000000..acc5dbac --- /dev/null +++ b/megapixels/app/server/tasks/demo.py @@ -0,0 +1,244 @@ + +import app.settings.app_cfg as cfg +from app.server.tasks import celery + +from celery.utils.log import get_task_logger +log = get_task_logger(__name__) + +opt_size = (256, 256,) + +@celery.task(bind=True) +def demo_task(self, uuid_name, fn): + + import sys + import os + from os.path import join + from pathlib import Path + import time + + import numpy as np + import cv2 as cv + import dlib + from PIL import Image + import matplotlib.pyplot as plt + + from app.utils import logger_utils, file_utils, im_utils, display_utils, draw_utils + from app.utils import plot_utils + from app.processors import face_detector, face_landmarks + from app.models.data_store import DataStore + + # TODO add selective testing + opt_gpu = -1 + opt_run_pose = True + opt_run_2d_68 = True + opt_run_3d_68 = True + opt_run_3d_68 = True + paths + + meta = { + 'step': 0, + 'total': 3, + 'message': 'Starting', + 'uuid': uuid_name, + 'data': {}, + } + paths = [] + + def step(msg, step=0): + meta['step'] += step + meta['message'] = msg + log.debug('> {}'.format(msg)) + self.update_state(state='PROCESSING', meta=meta) + + step('Loading image') + self.update_state(state='PROCESSING', meta=meta) + + # os.path.join('/user_content/', fn) + + # ------------------------------------------------- + # init here + + # load image + im = cv.imread(fn) + im_resized = im_utils.resize(im, width=opt_size[0], height=opt_size[1]) + + # ---------------------------------------------------------------------------- + # detect face + + face_detector_instance = face_detector.DetectorDLIBCNN(gpu=opt_gpu) # -1 for CPU + step('Detecting face') + st = time.time() + bboxes = face_detector_instance.detect(im_resized, largest=True) + bbox = bboxes[0] + dim = im_resized.shape[:2][::-1] + bbox_dim = bbox.to_dim(dim) + if not bbox: + log.error('No face detected') + meta['error'] = 'No face detected' + self.update_state(state='FAILURE', meta=meta) + return meta + else: + log.info(f'Detected face in {(time.time() - st):.2f}s') + + + # ---------------------------------------------------------------------------- + # detect 3D landmarks + + step('Generating 3D Landmarks') + log.info('loading 3D landmark generator files...') + landmark_detector_3d_68 = face_landmarks.FaceAlignment3D_68(gpu=opt_gpu) # -1 for CPU + log.info('generating 3D landmarks...') + st = time.time() + points_3d_68 = landmark_detector_3d_68.landmarks(im_resized, bbox_dim.to_xyxy()) + log.info(f'generated 3D landmarks in {(time.time() - st):.2f}s') + log.info('') + + # draw 3d landmarks + im_landmarks_3d_68 = im_resized.copy() + draw_utils.draw_landmarks3D(im_landmarks_3d_68, points_3d_68) + draw_utils.draw_bbox(im_landmarks_3d_68, bbox_dim) + + save_image('landmarks_3d_68', '3D Landmarks', im_landmarks_3d_68) + + def save_image(key, title, data): + fn = '{}_{}.jpg'.format(uuid_name, key) + fpath = os.path.join(cfg.DIR_SITE_USER_CONTENT, fn) + paths.append(fpath) + cv.imwrite(fpath, im_landmarks_3d_68) + + meta['data']['landmarks_3d_68'] = { + 'title': '3D Landmarks', + 'url': os.path.join('/user_content/', fn), + } + step('Generated 3D Landmarks', step=0) + + # ---------------------------------------------------------------------------- + # generate 3D GIF animation + + # step('Generating GIF Animation') + # log.info('generating 3D animation...') + # if not opt_fp_out: + # fpp_im = Path(opt_fp_in) + # fp_out = join(fpp_im.parent, f'{fpp_im.stem}_anim.gif') + # else: + # fp_out = opt_fp_out + # st = time.time() + # plot_utils.generate_3d_landmark_anim(np.array(points_3d_68), fp_out, + # size=opt_gif_size, num_frames=opt_gif_frames) + # log.info(f'Generated animation in {(time.time() - st):.2f}s') + # log.info(f'Saved to: {fp_out}') + # log.info('') + + + # # ---------------------------------------------------------------------------- + # # generate face vectors, only to test if feature extraction works + + # step('Generating face vectors') + # log.info('initialize face recognition model...') + # from app.processors import face_recognition + # face_rec = face_recognition.RecognitionDLIB() + # st = time.time() + # log.info('generating face vector...') + # vec = face_rec.vec(im_resized, bbox_dim) + # log.info(f'generated face vector in {(time.time() - st):.2f}s') + # log.info('') + + + # # ---------------------------------------------------------------------------- + # # generate 68 point landmarks using dlib + + # step('Generating 2D 68PT landmarks') + # log.info('initializing face landmarks 68 dlib...') + # from app.processors import face_landmarks + # landmark_detector_2d_68 = face_landmarks.Dlib2D_68() + # log.info('generating 2D 68PT landmarks...') + # st = time.time() + # points_2d_68 = landmark_detector_2d_68.landmarks(im_resized, bbox_dim) + # log.info(f'generated 2D 68PT face landmarks in {(time.time() - st):.2f}s') + # log.info('') + + + # # ---------------------------------------------------------------------------- + # # generate pose from 68 point 2D landmarks + + # if opt_run_pose: + # step('Generating pose') + # log.info('initialize pose...') + # from app.processors import face_pose + # pose_detector = face_pose.FacePoseDLIB() + # log.info('generating pose...') + # st = time.time() + # pose_data = pose_detector.pose(points_2d_68, dim) + # log.info(f'generated pose {(time.time() - st):.2f}s') + # log.info('') + + + # # ---------------------------------------------------------------------------- + # # generate pose from 68 point 2D landmarks + + step('Done') + + # done + # self.log.debug('Add age real') + # self.log.debug('Add age apparent') + # self.log.debug('Add gender') + + + # # 3DDFA + # self.log.debug('Add depth') + # self.log.debug('Add pncc') + + # # TODO + # self.log.debug('Add 3D face model') + # self.log.debug('Add face texture flat') + # self.log.debug('Add ethnicity') + + # display + # draw bbox + + # # draw 2d landmarks + # im_landmarks_2d_68 = im_resized.copy() + # draw_utils.draw_landmarks2D(im_landmarks_2d_68, points_2d_68) + # draw_utils.draw_bbox(im_landmarks_2d_68, bbox_dim) + + # # draw pose + # if opt_run_pose: + # im_pose = im_resized.copy() + # draw_utils.draw_pose(im_pose, pose_data['point_nose'], pose_data['points']) + # draw_utils.draw_degrees(im_pose, pose_data) + + # # draw animated GIF + # im = Image.open(fp_out) + # im_frames = [] + # duration = im.info['duration'] + # try: + # while True: + # im.seek(len(im_frames)) + # mypalette = im.getpalette() + # im.putpalette(mypalette) + # im_jpg = Image.new("RGB", im.size) + # im_jpg.paste(im) + # im_np = im_utils.pil2np(im_jpg.copy()) + # im_frames.append(im_np) + # except EOFError: + # pass # end of GIF sequence + + # n_frames = len(im_frames) + # frame_number = 0 + + # # show all images here + # cv.imshow('Original', im_resized) + # cv.imshow('2D 68PT Landmarks', im_landmarks_2d_68) + # cv.imshow('3D 68PT Landmarks', im_landmarks_3d_68) + # cv.imshow('Pose', im_pose) + # cv.imshow('3D 68pt GIF', im_frames[frame_number]) + + log.debug('done!!') + + for path in paths: + if os.path.exists(path): + os.remove(path) + + meta['step'] = meta['total'] + meta['state'] = 'SUCCESS' + return meta -- cgit v1.2.3-70-g09d2 From 3bd9bb2a7c3106fe69e607458d6fe4577092c1ef Mon Sep 17 00:00:00 2001 From: Jules Laplace Date: Sun, 13 Jan 2019 22:42:37 +0100 Subject: displaying animated gif --- client/faceAnalysis/faceAnalysis.result.js | 4 +- megapixels/app/server/tasks/demo.py | 183 ++++++++++++----------------- 2 files changed, 75 insertions(+), 112 deletions(-) (limited to 'megapixels/app/server/tasks/demo.py') diff --git a/client/faceAnalysis/faceAnalysis.result.js b/client/faceAnalysis/faceAnalysis.result.js index fd079529..62ff174c 100644 --- a/client/faceAnalysis/faceAnalysis.result.js +++ b/client/faceAnalysis/faceAnalysis.result.js @@ -62,7 +62,9 @@ class FaceAnalysisResult extends Component {
) } - const results = ['blur_fn', 'landmarks_3d_68'].map(tag => { + + console.log(data.data) + const results = ['blur_fn', 'points_3d_68', 'landmarks_3d_68', 'landmarks_2d_68', 'pose'].map(tag => { if (tag in data.data) { const { title, url } = data.data[tag] return ( diff --git a/megapixels/app/server/tasks/demo.py b/megapixels/app/server/tasks/demo.py index acc5dbac..38a0a3c2 100644 --- a/megapixels/app/server/tasks/demo.py +++ b/megapixels/app/server/tasks/demo.py @@ -33,7 +33,9 @@ def demo_task(self, uuid_name, fn): opt_run_2d_68 = True opt_run_3d_68 = True opt_run_3d_68 = True - paths + + opt_gif_size = (256, 256,) + opt_gif_frames = 15 meta = { 'step': 0, @@ -50,6 +52,17 @@ def demo_task(self, uuid_name, fn): log.debug('> {}'.format(msg)) self.update_state(state='PROCESSING', meta=meta) + def save_image(key, title, data): + fn = '{}_{}.jpg'.format(uuid_name, key) + fpath = os.path.join(cfg.DIR_SITE_USER_CONTENT, fn) + paths.append(fpath) + cv.imwrite(fpath, data) + + meta['data'][key] = { + 'title': title, + 'url': os.path.join('/user_content/', fn), + } + step('Loading image') self.update_state(state='PROCESSING', meta=meta) @@ -100,81 +113,69 @@ def demo_task(self, uuid_name, fn): save_image('landmarks_3d_68', '3D Landmarks', im_landmarks_3d_68) - def save_image(key, title, data): - fn = '{}_{}.jpg'.format(uuid_name, key) - fpath = os.path.join(cfg.DIR_SITE_USER_CONTENT, fn) - paths.append(fpath) - cv.imwrite(fpath, im_landmarks_3d_68) - - meta['data']['landmarks_3d_68'] = { - 'title': '3D Landmarks', - 'url': os.path.join('/user_content/', fn), - } - step('Generated 3D Landmarks', step=0) - # ---------------------------------------------------------------------------- # generate 3D GIF animation - # step('Generating GIF Animation') - # log.info('generating 3D animation...') - # if not opt_fp_out: - # fpp_im = Path(opt_fp_in) - # fp_out = join(fpp_im.parent, f'{fpp_im.stem}_anim.gif') - # else: - # fp_out = opt_fp_out - # st = time.time() - # plot_utils.generate_3d_landmark_anim(np.array(points_3d_68), fp_out, - # size=opt_gif_size, num_frames=opt_gif_frames) - # log.info(f'Generated animation in {(time.time() - st):.2f}s') - # log.info(f'Saved to: {fp_out}') - # log.info('') - - - # # ---------------------------------------------------------------------------- - # # generate face vectors, only to test if feature extraction works - - # step('Generating face vectors') - # log.info('initialize face recognition model...') - # from app.processors import face_recognition - # face_rec = face_recognition.RecognitionDLIB() - # st = time.time() - # log.info('generating face vector...') - # vec = face_rec.vec(im_resized, bbox_dim) - # log.info(f'generated face vector in {(time.time() - st):.2f}s') - # log.info('') - - - # # ---------------------------------------------------------------------------- - # # generate 68 point landmarks using dlib - - # step('Generating 2D 68PT landmarks') - # log.info('initializing face landmarks 68 dlib...') - # from app.processors import face_landmarks - # landmark_detector_2d_68 = face_landmarks.Dlib2D_68() - # log.info('generating 2D 68PT landmarks...') - # st = time.time() - # points_2d_68 = landmark_detector_2d_68.landmarks(im_resized, bbox_dim) - # log.info(f'generated 2D 68PT face landmarks in {(time.time() - st):.2f}s') - # log.info('') - - - # # ---------------------------------------------------------------------------- - # # generate pose from 68 point 2D landmarks + step('Generating GIF Animation') + log.info('generating 3D animation...') + + fn = '{}_{}.gif'.format(uuid_name, '3d') + fp_out = os.path.join(cfg.DIR_SITE_USER_CONTENT, fn) + paths.append(fp_out) + + st = time.time() + plot_utils.generate_3d_landmark_anim(np.array(points_3d_68), fp_out, + size=opt_gif_size, num_frames=opt_gif_frames) + log.info(f'Generated animation in {(time.time() - st):.2f}s') + log.info(f'Saved to: {fp_out}') + log.info('') + + meta['data']['points_3d_68'] = points_3d_68 + meta['data']['points_3d_68'] = { + 'title': '3D Animated GIF', + 'url': os.path.join('/user_content/', fn), + } + + # ---------------------------------------------------------------------------- + # generate 68 point landmarks using dlib - # if opt_run_pose: - # step('Generating pose') - # log.info('initialize pose...') - # from app.processors import face_pose - # pose_detector = face_pose.FacePoseDLIB() - # log.info('generating pose...') - # st = time.time() - # pose_data = pose_detector.pose(points_2d_68, dim) - # log.info(f'generated pose {(time.time() - st):.2f}s') - # log.info('') + step('Generating 2D 68PT landmarks') + log.info('initializing face landmarks 68 dlib...') + from app.processors import face_landmarks + landmark_detector_2d_68 = face_landmarks.Dlib2D_68() + log.info('generating 2D 68PT landmarks...') + st = time.time() + points_2d_68 = landmark_detector_2d_68.landmarks(im_resized, bbox_dim) + log.info(f'generated 2D 68PT face landmarks in {(time.time() - st):.2f}s') + log.info('') + # draw 2d landmarks + im_landmarks_2d_68 = im_resized.copy() + draw_utils.draw_landmarks2D(im_landmarks_2d_68, points_2d_68) + draw_utils.draw_bbox(im_landmarks_2d_68, bbox_dim) + save_image('landmarks_2d_68', '2D Landmarks', im_landmarks_2d_68) - # # ---------------------------------------------------------------------------- - # # generate pose from 68 point 2D landmarks + # ---------------------------------------------------------------------------- + # generate pose from 68 point 2D landmarks + + if opt_run_pose: + step('Generating pose') + log.info('initialize pose...') + from app.processors import face_pose + pose_detector = face_pose.FacePoseDLIB() + log.info('generating pose...') + st = time.time() + pose_data = pose_detector.pose(points_2d_68, dim) + log.info(f'generated pose {(time.time() - st):.2f}s') + log.info('') + + im_pose = im_resized.copy() + draw_utils.draw_pose(im_pose, pose_data['point_nose'], pose_data['points']) + draw_utils.draw_degrees(im_pose, pose_data) + save_image('pose', 'Pose', im_pose) + + # ---------------------------------------------------------------------------- + # generate pose from 68 point 2D landmarks step('Done') @@ -183,7 +184,6 @@ def demo_task(self, uuid_name, fn): # self.log.debug('Add age apparent') # self.log.debug('Add gender') - # # 3DDFA # self.log.debug('Add depth') # self.log.debug('Add pncc') @@ -193,48 +193,9 @@ def demo_task(self, uuid_name, fn): # self.log.debug('Add face texture flat') # self.log.debug('Add ethnicity') - # display - # draw bbox - - # # draw 2d landmarks - # im_landmarks_2d_68 = im_resized.copy() - # draw_utils.draw_landmarks2D(im_landmarks_2d_68, points_2d_68) - # draw_utils.draw_bbox(im_landmarks_2d_68, bbox_dim) - - # # draw pose - # if opt_run_pose: - # im_pose = im_resized.copy() - # draw_utils.draw_pose(im_pose, pose_data['point_nose'], pose_data['points']) - # draw_utils.draw_degrees(im_pose, pose_data) - - # # draw animated GIF - # im = Image.open(fp_out) - # im_frames = [] - # duration = im.info['duration'] - # try: - # while True: - # im.seek(len(im_frames)) - # mypalette = im.getpalette() - # im.putpalette(mypalette) - # im_jpg = Image.new("RGB", im.size) - # im_jpg.paste(im) - # im_np = im_utils.pil2np(im_jpg.copy()) - # im_frames.append(im_np) - # except EOFError: - # pass # end of GIF sequence - - # n_frames = len(im_frames) - # frame_number = 0 - - # # show all images here - # cv.imshow('Original', im_resized) - # cv.imshow('2D 68PT Landmarks', im_landmarks_2d_68) - # cv.imshow('3D 68PT Landmarks', im_landmarks_3d_68) - # cv.imshow('Pose', im_pose) - # cv.imshow('3D 68pt GIF', im_frames[frame_number]) - log.debug('done!!') + time.sleep(3) for path in paths: if os.path.exists(path): os.remove(path) -- cgit v1.2.3-70-g09d2 From e3cfc630f52bce03c3e0e213ca93be6f748a77cb Mon Sep 17 00:00:00 2001 From: Jules Laplace Date: Sun, 13 Jan 2019 22:54:09 +0100 Subject: age/gender --- client/faceAnalysis/faceAnalysis.result.js | 7 +++- megapixels/app/server/tasks/demo.py | 61 ++++++++++++++++++++++++++---- 2 files changed, 59 insertions(+), 9 deletions(-) (limited to 'megapixels/app/server/tasks/demo.py') diff --git a/client/faceAnalysis/faceAnalysis.result.js b/client/faceAnalysis/faceAnalysis.result.js index 62ff174c..1c8a2ffb 100644 --- a/client/faceAnalysis/faceAnalysis.result.js +++ b/client/faceAnalysis/faceAnalysis.result.js @@ -64,7 +64,10 @@ class FaceAnalysisResult extends Component { } console.log(data.data) - const results = ['blur_fn', 'points_3d_68', 'landmarks_3d_68', 'landmarks_2d_68', 'pose'].map(tag => { + const results = [ + 'blur_fn', 'points_3d_68', 'landmarks_3d_68', 'landmarks_2d_68', 'pose', + 'age_real', 'age_apparent', 'gender' + ].map(tag => { if (tag in data.data) { const { title, url } = data.data[tag] return ( @@ -79,7 +82,7 @@ class FaceAnalysisResult extends Component { return (
- {!(step && total && message) ? '' : ({step} / {total}: {message})} + {!(step && total && message) ? '' : (Step {step} / {total}: {message})}
{results}
diff --git a/megapixels/app/server/tasks/demo.py b/megapixels/app/server/tasks/demo.py index 38a0a3c2..5143dd56 100644 --- a/megapixels/app/server/tasks/demo.py +++ b/megapixels/app/server/tasks/demo.py @@ -24,7 +24,7 @@ def demo_task(self, uuid_name, fn): from app.utils import logger_utils, file_utils, im_utils, display_utils, draw_utils from app.utils import plot_utils - from app.processors import face_detector, face_landmarks + from app.processors import face_detector, face_landmarks, face_age_gender from app.models.data_store import DataStore # TODO add selective testing @@ -175,14 +175,61 @@ def demo_task(self, uuid_name, fn): save_image('pose', 'Pose', im_pose) # ---------------------------------------------------------------------------- - # generate pose from 68 point 2D landmarks + # age - step('Done') + # real + age_real_predictor = face_age_gender.FaceAgeReal() + st = time.time() + age_real = age_real_predictor.predict(im_resized, bbox_dim) + log.info(f'age real took: {(time.time()-st)/1000:.5f}s') + + # apparent + age_apparent_predictor = face_age_gender.FaceAgeApparent() + st = time.time() + age_apparent = age_apparent_predictor.predict(im_resized, bbox_dim) + log.info(f'age apparent took: {(time.time()-st)/1000:.5f}s') + + # gender + gender_predictor = face_age_gender.FaceGender() + st = time.time() + gender = gender_predictor.predict(im_resized, bbox_dim) + log.info(f'gender took: {(time.time()-st)/1000:.5f}s') - # done - # self.log.debug('Add age real') - # self.log.debug('Add age apparent') - # self.log.debug('Add gender') + # ---------------------------------------------------------------------------- + # output + + log.info(f'Face coords: {bbox_dim} face') + log.info(f'Age (real): {(age_real):.2f}') + log.info(f'Age (apparent): {(age_apparent):.2f}') + log.info(f'gender: {gender}') + + + # ---------------------------------------------------------------------------- + # draw + + # draw real age + im_age_real = im_resized.copy() + draw_utils.draw_bbox(im_age_real, bbox_dim) + txt = f'{(age_real):.2f}' + draw_utils.draw_text(im_age_real, bbox_dim.pt_tl, txt) + + # apparent age + im_age_apparent = im_resized.copy() + draw_utils.draw_bbox(im_age_apparent, bbox_dim) + txt = f'{(age_apparent):.2f}' + draw_utils.draw_text(im_age_apparent, bbox_dim.pt_tl, txt) + + # gender + im_gender = im_resized.copy() + draw_utils.draw_bbox(im_age_apparent, bbox_dim) + txt = f"M: {gender['m']:.2f}, F: {gender['f']:.2f}" + draw_utils.draw_text(im_gender, (10, dim[1]-20), txt) + + save_image('age_real', 'Age (Real)', im_age_real) + save_image('age_apparent', 'Age (Apparent)', im_age_apparent) + save_image('gender', 'Gender', im_gender) + + step('Done') # # 3DDFA # self.log.debug('Add depth') -- cgit v1.2.3-70-g09d2 From 13655c42f9dd844a68f8c60a614641cdfa4c4277 Mon Sep 17 00:00:00 2001 From: Jules Laplace Date: Sun, 13 Jan 2019 23:23:17 +0100 Subject: displayiing all statistics --- client/faceAnalysis/faceAnalysis.actions.js | 2 +- client/faceAnalysis/faceAnalysis.result.js | 29 +++++++++-- megapixels/app/server/tasks/demo.py | 78 ++++++++++++++++------------- 3 files changed, 71 insertions(+), 38 deletions(-) (limited to 'megapixels/app/server/tasks/demo.py') diff --git a/client/faceAnalysis/faceAnalysis.actions.js b/client/faceAnalysis/faceAnalysis.actions.js index 2d372c1e..4a6fe6ed 100644 --- a/client/faceAnalysis/faceAnalysis.actions.js +++ b/client/faceAnalysis/faceAnalysis.actions.js @@ -81,7 +81,7 @@ export const upload = (payload, file) => dispatch => { post(url.upload(), fd) .then(data => { // console.log('loaded!', tag, data) - dispatch(polled(tag, data)) + dispatch(loaded(tag, data)) const { result, taskURL } = data if (result && taskURL) { poll(payload, taskURL)(dispatch) diff --git a/client/faceAnalysis/faceAnalysis.result.js b/client/faceAnalysis/faceAnalysis.result.js index 1c8a2ffb..e7a4c6de 100644 --- a/client/faceAnalysis/faceAnalysis.result.js +++ b/client/faceAnalysis/faceAnalysis.result.js @@ -66,7 +66,6 @@ class FaceAnalysisResult extends Component { console.log(data.data) const results = [ 'blur_fn', 'points_3d_68', 'landmarks_3d_68', 'landmarks_2d_68', 'pose', - 'age_real', 'age_apparent', 'gender' ].map(tag => { if (tag in data.data) { const { title, url } = data.data[tag] @@ -80,14 +79,38 @@ class FaceAnalysisResult extends Component { return null }).filter(a => a) + const statisticsLabels = ['Age (Real)', 'Age (Apparent)', 'Gender', 'Beauty score', 'Emotion'] + const statistics = [ + 'age_real', 'age_apparent', 'gender', 'beauty', 'emotion' + ].map((tag, i) => { + if (tag in data.data.statistics) { + return ( + + + {statisticsLabels[i]} + + + {data.data.statistics[tag]} + + + ) + } + return null + }).filter(a => a) + return (
- {!(step && total && message) ? '' : (Step {step} / {total}: {message})}
{results}
+ {!!statistics.length && ( + + {statistics} +
+ )}
- Query took {(timing / 1000).toFixed(2)} s. + Step {step} / {total} {message} + Query {step === total ? 'took' : 'timer:'} {(timing / 1000).toFixed(2)} s.
) diff --git a/megapixels/app/server/tasks/demo.py b/megapixels/app/server/tasks/demo.py index 5143dd56..c27b08b5 100644 --- a/megapixels/app/server/tasks/demo.py +++ b/megapixels/app/server/tasks/demo.py @@ -24,7 +24,8 @@ def demo_task(self, uuid_name, fn): from app.utils import logger_utils, file_utils, im_utils, display_utils, draw_utils from app.utils import plot_utils - from app.processors import face_detector, face_landmarks, face_age_gender + from app.processors import face_detector, face_landmarks, face_age_gender, face_beauty + # , face_emotion from app.models.data_store import DataStore # TODO add selective testing @@ -39,16 +40,16 @@ def demo_task(self, uuid_name, fn): meta = { 'step': 0, - 'total': 3, + 'total': 10, 'message': 'Starting', 'uuid': uuid_name, - 'data': {}, + 'data': { 'statistics': {} }, } paths = [] - def step(msg, step=0): - meta['step'] += step + def step(msg, step=1): meta['message'] = msg + meta['step'] += step log.debug('> {}'.format(msg)) self.update_state(state='PROCESSING', meta=meta) @@ -178,56 +179,65 @@ def demo_task(self, uuid_name, fn): # age # real + step('Running age predictor') age_real_predictor = face_age_gender.FaceAgeReal() st = time.time() age_real = age_real_predictor.predict(im_resized, bbox_dim) log.info(f'age real took: {(time.time()-st)/1000:.5f}s') + meta['data']['statistics']['age_real'] = f'{(age_real):.2f}' # apparent age_apparent_predictor = face_age_gender.FaceAgeApparent() st = time.time() age_apparent = age_apparent_predictor.predict(im_resized, bbox_dim) log.info(f'age apparent took: {(time.time()-st)/1000:.5f}s') + meta['data']['statistics']['age_apparent'] = f'{(age_apparent):.2f}' # gender + step('Running gender predictor') gender_predictor = face_age_gender.FaceGender() st = time.time() gender = gender_predictor.predict(im_resized, bbox_dim) log.info(f'gender took: {(time.time()-st)/1000:.5f}s') + meta['data']['statistics']['gender'] = f"M: {gender['m']:.2f}, F: {gender['f']:.2f}" - # ---------------------------------------------------------------------------- - # output - - log.info(f'Face coords: {bbox_dim} face') - log.info(f'Age (real): {(age_real):.2f}') - log.info(f'Age (apparent): {(age_apparent):.2f}') - log.info(f'gender: {gender}') - + # # ---------------------------------------------------------------------------- + # # emotion - # ---------------------------------------------------------------------------- - # draw + # emotion_predictor = face_emotion.FaceEmotion(gpu=opt_gpu) + # emotion_score = emotion_predictor.emotion(im_resized, bbox_dim) + # log.info(f'emotion score: {(100*emotion_score):.2f}') - # draw real age - im_age_real = im_resized.copy() - draw_utils.draw_bbox(im_age_real, bbox_dim) - txt = f'{(age_real):.2f}' - draw_utils.draw_text(im_age_real, bbox_dim.pt_tl, txt) + # im_emotion = im_resized.copy() + # draw_utils.draw_bbox(im_emotion, bbox_dim) + # txt = f'emotion score: {(100*emotion_score):.2f}' + # draw_utils.draw_text(im_emotion, bbox_dim.pt_tl, txt) + # save_image('emotion', 'Emotion', im_emotion) - # apparent age - im_age_apparent = im_resized.copy() - draw_utils.draw_bbox(im_age_apparent, bbox_dim) - txt = f'{(age_apparent):.2f}' - draw_utils.draw_text(im_age_apparent, bbox_dim.pt_tl, txt) - # gender - im_gender = im_resized.copy() - draw_utils.draw_bbox(im_age_apparent, bbox_dim) - txt = f"M: {gender['m']:.2f}, F: {gender['f']:.2f}" - draw_utils.draw_text(im_gender, (10, dim[1]-20), txt) - - save_image('age_real', 'Age (Real)', im_age_real) - save_image('age_apparent', 'Age (Apparent)', im_age_apparent) - save_image('gender', 'Gender', im_gender) + # ---------------------------------------------------------------------------- + # beauty + + # TODO fix Keras CPU/GPU device selection issue + # NB: GPU visibility issues with dlib/keras + # Wrap this with cuda toggle and run before init dlib GPU + + step('Running beauty predictor') + device_cur = os.getenv('CUDA_VISIBLE_DEVICES', '') + os.environ['CUDA_VISIBLE_DEVICES'] = '' + beauty_predictor = face_beauty.FaceBeauty() + os.environ['CUDA_VISIBLE_DEVICES'] = device_cur + + beauty_score = beauty_predictor.beauty(im_resized, bbox_dim) + log.info(f'beauty score: {(100*beauty_score):.2f}') + + # # draw 2d landmarks + # im_beauty = im_resized.copy() + # draw_utils.draw_bbox(im_beauty, bbox_dim) + # txt = f'Beauty score: {(100*beauty_score):.2f}' + # draw_utils.draw_text(im_beauty, bbox_dim.pt_tl, txt) + # save_image('beauty', 'Beauty', im_beauty) + meta['data']['statistics']['beauty'] = f'{(100*beauty_score):.2f}' step('Done') -- cgit v1.2.3-70-g09d2