From 334ea5a2a91da853dc6faf7f48aaa12599201218 Mon Sep 17 00:00:00 2001 From: Jules Laplace Date: Fri, 11 Jan 2019 20:38:36 +0100 Subject: enable celery tasks --- megapixels/app/server/tasks/__init__.py | 47 +++++++++++++++++++++++++++++++++ 1 file changed, 47 insertions(+) create mode 100644 megapixels/app/server/tasks/__init__.py (limited to 'megapixels/app/server/tasks/__init__.py') diff --git a/megapixels/app/server/tasks/__init__.py b/megapixels/app/server/tasks/__init__.py new file mode 100644 index 00000000..bac7309f --- /dev/null +++ b/megapixels/app/server/tasks/__init__.py @@ -0,0 +1,47 @@ +import simplejson as json +from app.settings import app_cfg as cfg +from celery import Celery + +celery = Celery(__name__, backend=cfg.CELERY_RESULT_BACKEND, broker=cfg.CELERY_BROKER_URL) + +from app.server.tasks.sleep import sleep_task +# from app.server.tasks.blur import blur_task + +def list_active_tasks(): + dropdown = {} + for k,v in task_lookup.items(): + if 'active' not in v or v['active'] is not False: + is_default = 'default' in v and v['default'] is True + task = { + 'name': k, + 'title': v['title'], + 'selected': is_default, + } + dropdown[k] = task + return dropdown + +################################################################### +# Add all valid tasks to this lookup. +# Set 'active': False to disable a task +# Set 'default': True to define the default task + +task_lookup = { + 'sleep': { + 'title': 'Sleep Test', + 'task': sleep_task, + 'active': True, + 'default': True, + }, + # 'blur': { + # 'title': 'Blur', + # 'task': blur_task, + # 'active': False, + # }, + # 'task_dull': { + # 'title': 'DullDream V2', + # 'task': task_dull, + # 'active': True, + # 'default': True, + # } +} + -- cgit v1.2.3-70-g09d2 From 47b6ae0f8ad2f49692222bb0c800e7ba1eb4b94b Mon Sep 17 00:00:00 2001 From: Jules Laplace Date: Sun, 13 Jan 2019 00:54:13 +0100 Subject: face info page --- client/faceAnalysis/faceAnalysis.actions.js | 20 +-- client/faceAnalysis/faceAnalysis.query.js | 4 +- client/faceAnalysis/faceAnalysis.reducer.js | 7 + client/index.js | 2 + client/types.js | 2 +- megapixels/app/server/api.py | 1 - megapixels/app/server/api_task.py | 40 +++--- megapixels/app/server/tasks/__init__.py | 19 ++- megapixels/app/server/tasks/blur.py | 23 +--- megapixels/app/server/tasks/fullmonte.py | 199 ++++++++++++++++++++++++++++ megapixels/app/utils/im_utils.py | 12 ++ site/public/info/index.html | 53 ++++++++ 12 files changed, 322 insertions(+), 60 deletions(-) create mode 100644 megapixels/app/server/tasks/fullmonte.py create mode 100644 site/public/info/index.html (limited to 'megapixels/app/server/tasks/__init__.py') diff --git a/client/faceAnalysis/faceAnalysis.actions.js b/client/faceAnalysis/faceAnalysis.actions.js index 90d7156f..6a318b5d 100644 --- a/client/faceAnalysis/faceAnalysis.actions.js +++ b/client/faceAnalysis/faceAnalysis.actions.js @@ -8,7 +8,7 @@ import { get, post } from '../util' // urls const url = { - upload: (dataset) => process.env.API_HOST + '/api/dataset/' + dataset + '/face', + upload: () => process.env.API_HOST + '/task/upload/sleep', } export const publicUrl = { } @@ -26,6 +26,11 @@ const loaded = (tag, data, offset = 0) => ({ data, offset }) +const polled = (data, offset = 0) => ({ + type: types.faceAnalysis.poll, + data, + offset +}) const error = (tag, err) => ({ type: types.faceAnalysis.error, tag, @@ -42,7 +47,7 @@ export const updateOptions = opt => dispatch => { export const upload = (payload, file) => dispatch => { // const { options } = store.getState().faceAnalysis - const tag = 'result' + const tag = 'task' const fd = new FormData() fd.append('query_img', file) // fd.append('limit', options.perPage) @@ -62,14 +67,13 @@ const POLL_DELAY = 500 let pollTimeout = null export const poll = (payload, taskURL) => dispatch => { - const tag = 'poll' clearTimeout(pollTimeout) - dispatch(loading(tag)) get(taskURL) .then(data => { - dispatch(loaded(tag, data)) - // check if complete - pollTimeout = setTimeout(() => poll(payload, taskURL), POLL_DELAY) + dispatch(polled(data)) + if (!data.complete) { + pollTimeout = setTimeout(() => poll(payload, taskURL), POLL_DELAY) + } }) - .catch(err => dispatch(error(tag, err))) + .catch(err => dispatch(error('result', err))) } diff --git a/client/faceAnalysis/faceAnalysis.query.js b/client/faceAnalysis/faceAnalysis.query.js index 86dbe1ae..6b92b70d 100644 --- a/client/faceAnalysis/faceAnalysis.query.js +++ b/client/faceAnalysis/faceAnalysis.query.js @@ -57,8 +57,8 @@ class FaceAnalysisQuery extends Component { {'Put yourself under the microscope of various facial recognition algorithms. See what can be determined from a photo.'}

    -
  1. Upload a photo of yourself
  2. -
  3. {'Your search data is never stored and immediately cleared '} +
  4. Upload a photo of yourself and be judged by the algorithm
  5. +
  6. {'Your search data is only stored for the duration of this analysis and is immediately cleared '} {'once you leave this page.'}

diff --git a/client/faceAnalysis/faceAnalysis.reducer.js b/client/faceAnalysis/faceAnalysis.reducer.js index d8e914ab..54a6d5eb 100644 --- a/client/faceAnalysis/faceAnalysis.reducer.js +++ b/client/faceAnalysis/faceAnalysis.reducer.js @@ -2,6 +2,7 @@ import * as types from '../types' const initialState = () => ({ query: {}, + task: {}, result: {}, loading: false, }) @@ -20,6 +21,12 @@ export default function faceAnalysisReducer(state = initialState(), action) { [action.tag]: action.data, } + case types.faceAnalysis.poll: + return { + ...state, + result: action.data, + } + case types.faceAnalysis.error: return { ...state, diff --git a/client/index.js b/client/index.js index 40be2841..96f2c8c8 100644 --- a/client/index.js +++ b/client/index.js @@ -20,6 +20,8 @@ function appendReactApplet(el, payload) { } function fetchDataset(payload) { + if (payload.command === 'face_analysis') return new Promise(resolve => resolve()) + if (payload.dataset === 'info') return new Promise(resolve => resolve()) const url = "https://megapixels.nyc3.digitaloceanspaces.com/v1/citations/" + payload.dataset + ".json" return fetch(url, { mode: 'cors' }).then(r => r.json()) } diff --git a/client/types.js b/client/types.js index 2d35ec36..fd9aa3e0 100644 --- a/client/types.js +++ b/client/types.js @@ -7,7 +7,7 @@ export const tagAsType = (type, names) => ( ) export const faceAnalysis = tagAsType('faceAnalysis', [ - 'loading', 'loaded', 'error', 'update_options', + 'loading', 'loaded', 'poll', 'error', 'update_options', ]) export const faceSearch = tagAsType('faceSearch', [ diff --git a/megapixels/app/server/api.py b/megapixels/app/server/api.py index 48279040..663f52cc 100644 --- a/megapixels/app/server/api.py +++ b/megapixels/app/server/api.py @@ -30,7 +30,6 @@ def index(): """List the datasets and their fields""" return jsonify({ 'datasets': list_datasets() }) - @api.route('/dataset/') def show(dataset_name): """Show the data that a dataset will return""" diff --git a/megapixels/app/server/api_task.py b/megapixels/app/server/api_task.py index 36990997..23e11454 100644 --- a/megapixels/app/server/api_task.py +++ b/megapixels/app/server/api_task.py @@ -6,30 +6,21 @@ import dlib import simplejson as json import numpy as np from flask import Blueprint, request, jsonify -from PIL import Image # todo: try to remove PIL dependency - -# from app.models.sql_factory import load_sql_datasets, list_datasets, get_dataset, get_table -# from app.utils.im_utils import pil2np +from PIL import Image, ImageOps # todo: try to remove PIL dependency from celery.result import AsyncResult from app.server.tasks import celery from app.server.tasks import task_lookup, list_active_tasks +# from app.models.sql_factory import load_sql_datasets, list_datasets, get_dataset, get_table -api_task = Blueprint('api_task', __name__) +api_task = Blueprint('task', __name__) @api_task.route('/') def index(): """List active tasks""" return jsonify(list_active_tasks) -# from flask import render_template, redirect, url_for, send_from_directory -# from flask import request, make_response, jsonify -# from . import main, utils -# from PIL import Image, ImageOps -# import cv2 as cv -# import imutils - -@api_task.route('//') +@api_task.route('/status//') def task_status(task_name, task_id): """Return celery image processing status""" if task_name in task_lookup: @@ -69,6 +60,9 @@ def task_status(task_name, task_id): @api_task.route('/upload/sleep', methods=['GET', 'POST']) def sleep_test(): + """ + Test the Celery system using a task that sleeps. + """ async_task = task_lookup['sleep']['task'].apply_async(args=['sleep_test']) task_url = '/task/{}/{}'.format('sleep', async_task.id) return jsonify({ @@ -76,10 +70,12 @@ def sleep_test(): 'task_url': task_url, }) -@api_task.route('/upload', methods=['POST']) -def upload(): - style = request.form['style'] - print('style',style) +@api_task.route('/upload/:style', methods=['POST']) +def upload(style): + """ + Process a images in a particular style + """ + print('style: {}'.format(style)) if style in task_lookup: task = task_lookup[style]['task'] print('task',task) @@ -103,19 +99,19 @@ def upload(): # convert PNG to JPG print('[+] Resizing image') - # LOL MaskRCNN needs to be run outside of the Celery Task im = Image.open(file.stream).convert('RGB') - im = ImageOps.fit(im,(512,512)) + im = ImageOps.fit(im, (256, 256)) # # Save image to disk # print('[+] Save image to {}'.format(fpath)) # im.save(fpath, 'JPEG', quality=100) # im_pil_256 = im.resize((256,256)) - print('[+] ensure_np...') - im_np = imx.ensure_np(im_pil_256) + # print('[+] ensure_np...') + # im_np = imx.ensure_np(im_pil_256) celery_result = { + im: im, } print('[+] Start celery') @@ -124,6 +120,6 @@ def upload(): return jsonify({ 'result': True, - 'task_url': task_url, + 'taskURL': task_url, 'uuid': uuid_name }) diff --git a/megapixels/app/server/tasks/__init__.py b/megapixels/app/server/tasks/__init__.py index bac7309f..fd6e398a 100644 --- a/megapixels/app/server/tasks/__init__.py +++ b/megapixels/app/server/tasks/__init__.py @@ -5,7 +5,7 @@ from celery import Celery celery = Celery(__name__, backend=cfg.CELERY_RESULT_BACKEND, broker=cfg.CELERY_BROKER_URL) from app.server.tasks.sleep import sleep_task -# from app.server.tasks.blur import blur_task +from app.server.tasks.blur import blur_task def list_active_tasks(): dropdown = {} @@ -32,16 +32,15 @@ task_lookup = { 'active': True, 'default': True, }, - # 'blur': { - # 'title': 'Blur', - # 'task': blur_task, - # 'active': False, - # }, - # 'task_dull': { - # 'title': 'DullDream V2', - # 'task': task_dull, + 'blur': { + 'title': 'Blur', + 'task': blur_task, + 'active': False, + }, + # 'fullmonte': { + # 'title': 'TIA facial processing pipeline', + # 'task': fullmonte, # 'active': True, # 'default': True, # } } - diff --git a/megapixels/app/server/tasks/blur.py b/megapixels/app/server/tasks/blur.py index ede75e6a..3b7e20be 100644 --- a/megapixels/app/server/tasks/blur.py +++ b/megapixels/app/server/tasks/blur.py @@ -3,14 +3,14 @@ import sys import time import datetime import json -from PIL import Image, ImageFilter +from PIL import Image import cv2 as cv import numpy as np -from . import main, utils -from .. import basemodels +from app.utils.im_utils import ensure_np, ensure_pil from flask import current_app as app -from .paths import get_paths -celery = basemodels.celery + +from app.server.tasks import celery + from celery.utils.log import get_task_logger celery_logger = get_task_logger(__name__) import imutils @@ -37,9 +37,9 @@ def blur_task(self, uuid_name, extra): 'uuid': uuid_name }) - im_np = utils.ensure_np(im) + im_np = ensure_np(im) im_blur = cv.blur(im_np, (5,5), 1.0) - im_blur_pil = utils.ensure_pil(im_blur) + im_blur_pil = ensure_pil(im_blur) fn = uuid_name + '_blur.jpg' fpath = os.path.join(render_dir, fn) @@ -52,15 +52,6 @@ def blur_task(self, uuid_name, extra): time.sleep(3) - self.update_state( - state = 'PROCESSING', - meta = { - 'percent': 0.50, - 'message': 'Sleeping for some reason', - 'uuid': uuid_name - }) - time.sleep(2) - self.update_state( state = 'PROCESSING', meta = { diff --git a/megapixels/app/server/tasks/fullmonte.py b/megapixels/app/server/tasks/fullmonte.py new file mode 100644 index 00000000..17ca9403 --- /dev/null +++ b/megapixels/app/server/tasks/fullmonte.py @@ -0,0 +1,199 @@ + +import sys +import os +from os.path import join +from pathlib import Path +import time + +import numpy as np +import cv2 as cv +import dlib +from PIL import Image +import matplotlib.pyplot as plt + +from app.utils import logger_utils, file_utils, im_utils, display_utils, draw_utils +from app.utils import plot_utils +from app.processors import face_detector, face_landmarks +from app.models.data_store import DataStore + +@celery.task(bind=True) +def fullmonte_task(self, uuid_name): + return + + # TOOD add selective testing + opt_run_pose = True + opt_run_2d_68 = True + opt_run_3d_68 = True + opt_run_3d_68 = True + + # ------------------------------------------------- + # init here + + + log = logger_utils.Logger.getLogger() + + # load image + im = cv.imread(opt_fp_in) + im_resized = im_utils.resize(im, width=opt_size[0], height=opt_size[1]) + + + # ---------------------------------------------------------------------------- + # detect face + + face_detector = face_detector.DetectorDLIBCNN(gpu=opt_gpu) # -1 for CPU + log.info('detecting face...') + st = time.time() + bboxes = face_detector.detect(im_resized, largest=True) + bbox = bboxes[0] + dim = im_resized.shape[:2][::-1] + bbox_dim = bbox.to_dim(dim) + if not bbox: + log.error('no face detected') + return + else: + log.info(f'Detected face in {(time.time() - st):.2f}s') + log.info('') + + + # ---------------------------------------------------------------------------- + # detect 3D landmarks + + log.info('loading 3D landmark generator files...') + landmark_detector_3d_68 = face_landmarks.FaceAlignment3D_68(gpu=opt_gpu) # -1 for CPU + log.info('generating 3D landmarks...') + st = time.time() + points_3d_68 = landmark_detector_3d_68.landmarks(im_resized, bbox_dim.to_xyxy()) + log.info(f'generated 3D landmarks in {(time.time() - st):.2f}s') + log.info('') + + + # ---------------------------------------------------------------------------- + # generate 3D GIF animation + + log.info('generating 3D animation...') + if not opt_fp_out: + fpp_im = Path(opt_fp_in) + fp_out = join(fpp_im.parent, f'{fpp_im.stem}_anim.gif') + else: + fp_out = opt_fp_out + st = time.time() + plot_utils.generate_3d_landmark_anim(np.array(points_3d_68), fp_out, + size=opt_gif_size, num_frames=opt_gif_frames) + log.info(f'Generated animation in {(time.time() - st):.2f}s') + log.info(f'Saved to: {fp_out}') + log.info('') + + + # ---------------------------------------------------------------------------- + # generate face vectors, only to test if feature extraction works + + log.info('initialize face recognition model...') + from app.processors import face_recognition + face_rec = face_recognition.RecognitionDLIB() + st = time.time() + log.info('generating face vector...') + vec = face_rec.vec(im_resized, bbox_dim) + log.info(f'generated face vector in {(time.time() - st):.2f}s') + log.info('') + + + # ---------------------------------------------------------------------------- + # generate 68 point landmarks using dlib + + log.info('initializing face landmarks 68 dlib...') + from app.processors import face_landmarks + landmark_detector_2d_68 = face_landmarks.Dlib2D_68() + log.info('generating 2D 68PT landmarks...') + st = time.time() + points_2d_68 = landmark_detector_2d_68.landmarks(im_resized, bbox_dim) + log.info(f'generated 2D 68PT face landmarks in {(time.time() - st):.2f}s') + log.info('') + + + # ---------------------------------------------------------------------------- + # generate pose from 68 point 2D landmarks + + if opt_run_pose: + log.info('initialize pose...') + from app.processors import face_pose + pose_detector = face_pose.FacePoseDLIB() + log.info('generating pose...') + st = time.time() + pose_data = pose_detector.pose(points_2d_68, dim) + log.info(f'generated pose {(time.time() - st):.2f}s') + log.info('') + + + # ---------------------------------------------------------------------------- + # generate pose from 68 point 2D landmarks + + # done + self.log.debug('Add age real') + self.log.debug('Add age apparent') + self.log.debug('Add gender') + + + # 3DDFA + self.log.debug('Add depth') + self.log.debug('Add pncc') + + # TODO + self.log.debug('Add 3D face model') + self.log.debug('Add face texture flat') + self.log.debug('Add ethnicity') + + # display + if opt_display: + + # draw bbox + + # draw 3d landmarks + im_landmarks_3d_68 = im_resized.copy() + draw_utils.draw_landmarks3D(im_landmarks_3d_68, points_3d_68) + draw_utils.draw_bbox(im_landmarks_3d_68, bbox_dim) + + # draw 2d landmarks + im_landmarks_2d_68 = im_resized.copy() + draw_utils.draw_landmarks2D(im_landmarks_2d_68, points_2d_68) + draw_utils.draw_bbox(im_landmarks_2d_68, bbox_dim) + + # draw pose + if opt_run_pose: + im_pose = im_resized.copy() + draw_utils.draw_pose(im_pose, pose_data['point_nose'], pose_data['points']) + draw_utils.draw_degrees(im_pose, pose_data) + + # draw animated GIF + im = Image.open(fp_out) + im_frames = [] + duration = im.info['duration'] + try: + while True: + im.seek(len(im_frames)) + mypalette = im.getpalette() + im.putpalette(mypalette) + im_jpg = Image.new("RGB", im.size) + im_jpg.paste(im) + im_np = im_utils.pil2np(im_jpg.copy()) + im_frames.append(im_np) + except EOFError: + pass # end of GIF sequence + + n_frames = len(im_frames) + frame_number = 0 + + while True: + # show all images here + cv.imshow('Original', im_resized) + cv.imshow('2D 68PT Landmarks', im_landmarks_2d_68) + cv.imshow('3D 68PT Landmarks', im_landmarks_3d_68) + cv.imshow('Pose', im_pose) + cv.imshow('3D 68pt GIF', im_frames[frame_number]) + frame_number = (frame_number + 1) % n_frames + k = cv.waitKey(duration) & 0xFF + if k == 27 or k == ord('q'): # ESC + cv.destroyAllWindows() + sys.exit() + elif k != 255: + # any key to continue + break \ No newline at end of file diff --git a/megapixels/app/utils/im_utils.py b/megapixels/app/utils/im_utils.py index e882c67f..d36c1c32 100644 --- a/megapixels/app/utils/im_utils.py +++ b/megapixels/app/utils/im_utils.py @@ -19,7 +19,19 @@ from torch.autograd import Variable from sklearn.metrics.pairwise import cosine_similarity import datetime +def ensure_pil(im): + """Ensure image is Pillow format""" + try: + im.verify() + return im + except: + return Image.fromarray(im.astype('uint8'), 'RGB') +def ensure_np(im): + """Ensure image is numpy array""" + if type(im) == np.ndarray: + return im + return np.asarray(im, np.uint8) def num_channels(im): '''Returns number of channels in numpy.ndarray image''' diff --git a/site/public/info/index.html b/site/public/info/index.html new file mode 100644 index 00000000..0d7b2d2e --- /dev/null +++ b/site/public/info/index.html @@ -0,0 +1,53 @@ + + + + MegaPixels + + + + + + + + + + + + +

+ + +
MegaPixels
+ The Darkside of Datasets +
+ +
+
+ +

What do facial recognition algorithms see?

+

Results are only stored for the duration of the analysis and are deleted when you leave this page.

+
+ +
+ + + + + \ No newline at end of file -- cgit v1.2.3-70-g09d2 From 198147bef9976a41046c3c513dc4d33babf7a238 Mon Sep 17 00:00:00 2001 From: Jules Laplace Date: Sun, 13 Jan 2019 22:20:06 +0100 Subject: extracting 3d facial vectors --- client/faceAnalysis/faceAnalysis.actions.js | 2 +- client/faceAnalysis/faceAnalysis.result.js | 26 ++- megapixels/app/server/api.py | 2 - megapixels/app/server/api_task.py | 13 +- megapixels/app/server/tasks/__init__.py | 15 +- megapixels/app/server/tasks/blur.py | 15 +- megapixels/app/server/tasks/demo.py | 244 ++++++++++++++++++++++++++++ megapixels/app/server/tasks/fullmonte.py | 199 ----------------------- site/assets/css/applets.css | 6 +- 9 files changed, 292 insertions(+), 230 deletions(-) create mode 100644 megapixels/app/server/tasks/demo.py delete mode 100644 megapixels/app/server/tasks/fullmonte.py (limited to 'megapixels/app/server/tasks/__init__.py') diff --git a/client/faceAnalysis/faceAnalysis.actions.js b/client/faceAnalysis/faceAnalysis.actions.js index f8d8973f..2d372c1e 100644 --- a/client/faceAnalysis/faceAnalysis.actions.js +++ b/client/faceAnalysis/faceAnalysis.actions.js @@ -8,7 +8,7 @@ import { get, post } from '../util' // urls const url = { - upload: () => process.env.API_HOST + '/task/upload/blur', + upload: () => process.env.API_HOST + '/task/upload/demo', } export const publicUrl = { } diff --git a/client/faceAnalysis/faceAnalysis.result.js b/client/faceAnalysis/faceAnalysis.result.js index 63a23d65..fd079529 100644 --- a/client/faceAnalysis/faceAnalysis.result.js +++ b/client/faceAnalysis/faceAnalysis.result.js @@ -1,7 +1,6 @@ import React, { Component } from 'react' import { bindActionCreators } from 'redux' import { connect } from 'react-redux' -import { courtesyS } from '../util' import * as actions from './faceAnalysis.actions' import { Loader } from '../common' @@ -63,16 +62,25 @@ class FaceAnalysisResult extends Component {
) } - let blurImg = data.data.blur_fn && ( -
- - Blurred image -
- ) + const results = ['blur_fn', 'landmarks_3d_68'].map(tag => { + if (tag in data.data) { + const { title, url } = data.data[tag] + return ( +
+ + {title} +
+ ) + } + return null + }).filter(a => a) + return ( -
+
{!(step && total && message) ? '' : ({step} / {total}: {message})} - {blurImg} +
+ {results} +
Query took {(timing / 1000).toFixed(2)} s.
diff --git a/megapixels/app/server/api.py b/megapixels/app/server/api.py index 5ad454d8..b3bce9bc 100644 --- a/megapixels/app/server/api.py +++ b/megapixels/app/server/api.py @@ -1,5 +1,3 @@ -import logging -import logging.handlers import os import re import time diff --git a/megapixels/app/server/api_task.py b/megapixels/app/server/api_task.py index c9bc19ed..57ae9f7d 100644 --- a/megapixels/app/server/api_task.py +++ b/megapixels/app/server/api_task.py @@ -27,7 +27,8 @@ def task_status(task_name, task_id): if task_name in task_lookup: task = task_lookup[task_name]['task'].AsyncResult(task_id) # task = AsyncResult(task_id, app=celery) - else: + + if task_name not in task_lookup or task.info is None: return jsonify({ 'state': 'error', 'percent': 100, @@ -75,10 +76,16 @@ def sleep_test(): @api_task.route('/upload/blur', methods=['POST']) def upload(): + return process('blur') + +@api_task.route('/upload/demo', methods=['POST']) +def demo(): + return process('demo') + +def process(style): """ - Process a images in a particular style + Process an image in a particular style """ - style = 'blur' print('style: {}'.format(style)) if style in task_lookup: task = task_lookup[style]['task'] diff --git a/megapixels/app/server/tasks/__init__.py b/megapixels/app/server/tasks/__init__.py index fd6e398a..c0db0be5 100644 --- a/megapixels/app/server/tasks/__init__.py +++ b/megapixels/app/server/tasks/__init__.py @@ -6,6 +6,7 @@ celery = Celery(__name__, backend=cfg.CELERY_RESULT_BACKEND, broker=cfg.CELERY_B from app.server.tasks.sleep import sleep_task from app.server.tasks.blur import blur_task +from app.server.tasks.demo import demo_task def list_active_tasks(): dropdown = {} @@ -35,12 +36,12 @@ task_lookup = { 'blur': { 'title': 'Blur', 'task': blur_task, - 'active': False, + 'active': True, }, - # 'fullmonte': { - # 'title': 'TIA facial processing pipeline', - # 'task': fullmonte, - # 'active': True, - # 'default': True, - # } + 'demo': { + 'title': 'Facial processing pipeline', + 'task': demo_task, + 'active': True, + 'default': True, + } } diff --git a/megapixels/app/server/tasks/blur.py b/megapixels/app/server/tasks/blur.py index 42977097..74798cee 100644 --- a/megapixels/app/server/tasks/blur.py +++ b/megapixels/app/server/tasks/blur.py @@ -14,14 +14,14 @@ import app.settings.app_cfg as cfg from app.server.tasks import celery from celery.utils.log import get_task_logger -celery_logger = get_task_logger(__name__) +log = get_task_logger(__name__) import imutils @celery.task(bind=True) def blur_task(self, uuid_name, fn): """Process image and update during""" - celery_logger.debug('process_image_task, uuid: {}'.format(uuid_name)) - celery_logger.debug('fn: {}'.format(fn)) + log.debug('process_image_task, uuid: {}'.format(uuid_name)) + log.debug('fn: {}'.format(fn)) files = [] @@ -48,7 +48,7 @@ def blur_task(self, uuid_name, fn): fn = uuid_name + '_blur.jpg' fpath = os.path.join(cfg.DIR_SITE_USER_CONTENT, fn) im_blur_pil.save(fpath, 'JPEG', quality=80) - celery_logger.debug('fpath: {}'.format(fpath)) + log.debug('fpath: {}'.format(fpath)) print('fpath: {}'.format(fpath)) # files.append({ @@ -58,7 +58,10 @@ def blur_task(self, uuid_name, fn): meta['step'] += 1 meta['message'] = 'Applying blur' - meta['data']['blur_fn'] = os.path.join('/user_content/', fn) + meta['data']['blur_fn'] = { + 'title': 'Blurred image', + 'url': os.path.join('/user_content/', fn) + } self.update_state(state='PROCESSING', meta=meta) time.sleep(3) @@ -70,7 +73,7 @@ def blur_task(self, uuid_name, fn): self.update_state(state='PROCESSING', meta=meta) time.sleep(2) - celery_logger.debug('done!!') + log.debug('done!!') meta['step'] = meta['total'] meta['state'] = 'complete' diff --git a/megapixels/app/server/tasks/demo.py b/megapixels/app/server/tasks/demo.py new file mode 100644 index 00000000..acc5dbac --- /dev/null +++ b/megapixels/app/server/tasks/demo.py @@ -0,0 +1,244 @@ + +import app.settings.app_cfg as cfg +from app.server.tasks import celery + +from celery.utils.log import get_task_logger +log = get_task_logger(__name__) + +opt_size = (256, 256,) + +@celery.task(bind=True) +def demo_task(self, uuid_name, fn): + + import sys + import os + from os.path import join + from pathlib import Path + import time + + import numpy as np + import cv2 as cv + import dlib + from PIL import Image + import matplotlib.pyplot as plt + + from app.utils import logger_utils, file_utils, im_utils, display_utils, draw_utils + from app.utils import plot_utils + from app.processors import face_detector, face_landmarks + from app.models.data_store import DataStore + + # TODO add selective testing + opt_gpu = -1 + opt_run_pose = True + opt_run_2d_68 = True + opt_run_3d_68 = True + opt_run_3d_68 = True + paths + + meta = { + 'step': 0, + 'total': 3, + 'message': 'Starting', + 'uuid': uuid_name, + 'data': {}, + } + paths = [] + + def step(msg, step=0): + meta['step'] += step + meta['message'] = msg + log.debug('> {}'.format(msg)) + self.update_state(state='PROCESSING', meta=meta) + + step('Loading image') + self.update_state(state='PROCESSING', meta=meta) + + # os.path.join('/user_content/', fn) + + # ------------------------------------------------- + # init here + + # load image + im = cv.imread(fn) + im_resized = im_utils.resize(im, width=opt_size[0], height=opt_size[1]) + + # ---------------------------------------------------------------------------- + # detect face + + face_detector_instance = face_detector.DetectorDLIBCNN(gpu=opt_gpu) # -1 for CPU + step('Detecting face') + st = time.time() + bboxes = face_detector_instance.detect(im_resized, largest=True) + bbox = bboxes[0] + dim = im_resized.shape[:2][::-1] + bbox_dim = bbox.to_dim(dim) + if not bbox: + log.error('No face detected') + meta['error'] = 'No face detected' + self.update_state(state='FAILURE', meta=meta) + return meta + else: + log.info(f'Detected face in {(time.time() - st):.2f}s') + + + # ---------------------------------------------------------------------------- + # detect 3D landmarks + + step('Generating 3D Landmarks') + log.info('loading 3D landmark generator files...') + landmark_detector_3d_68 = face_landmarks.FaceAlignment3D_68(gpu=opt_gpu) # -1 for CPU + log.info('generating 3D landmarks...') + st = time.time() + points_3d_68 = landmark_detector_3d_68.landmarks(im_resized, bbox_dim.to_xyxy()) + log.info(f'generated 3D landmarks in {(time.time() - st):.2f}s') + log.info('') + + # draw 3d landmarks + im_landmarks_3d_68 = im_resized.copy() + draw_utils.draw_landmarks3D(im_landmarks_3d_68, points_3d_68) + draw_utils.draw_bbox(im_landmarks_3d_68, bbox_dim) + + save_image('landmarks_3d_68', '3D Landmarks', im_landmarks_3d_68) + + def save_image(key, title, data): + fn = '{}_{}.jpg'.format(uuid_name, key) + fpath = os.path.join(cfg.DIR_SITE_USER_CONTENT, fn) + paths.append(fpath) + cv.imwrite(fpath, im_landmarks_3d_68) + + meta['data']['landmarks_3d_68'] = { + 'title': '3D Landmarks', + 'url': os.path.join('/user_content/', fn), + } + step('Generated 3D Landmarks', step=0) + + # ---------------------------------------------------------------------------- + # generate 3D GIF animation + + # step('Generating GIF Animation') + # log.info('generating 3D animation...') + # if not opt_fp_out: + # fpp_im = Path(opt_fp_in) + # fp_out = join(fpp_im.parent, f'{fpp_im.stem}_anim.gif') + # else: + # fp_out = opt_fp_out + # st = time.time() + # plot_utils.generate_3d_landmark_anim(np.array(points_3d_68), fp_out, + # size=opt_gif_size, num_frames=opt_gif_frames) + # log.info(f'Generated animation in {(time.time() - st):.2f}s') + # log.info(f'Saved to: {fp_out}') + # log.info('') + + + # # ---------------------------------------------------------------------------- + # # generate face vectors, only to test if feature extraction works + + # step('Generating face vectors') + # log.info('initialize face recognition model...') + # from app.processors import face_recognition + # face_rec = face_recognition.RecognitionDLIB() + # st = time.time() + # log.info('generating face vector...') + # vec = face_rec.vec(im_resized, bbox_dim) + # log.info(f'generated face vector in {(time.time() - st):.2f}s') + # log.info('') + + + # # ---------------------------------------------------------------------------- + # # generate 68 point landmarks using dlib + + # step('Generating 2D 68PT landmarks') + # log.info('initializing face landmarks 68 dlib...') + # from app.processors import face_landmarks + # landmark_detector_2d_68 = face_landmarks.Dlib2D_68() + # log.info('generating 2D 68PT landmarks...') + # st = time.time() + # points_2d_68 = landmark_detector_2d_68.landmarks(im_resized, bbox_dim) + # log.info(f'generated 2D 68PT face landmarks in {(time.time() - st):.2f}s') + # log.info('') + + + # # ---------------------------------------------------------------------------- + # # generate pose from 68 point 2D landmarks + + # if opt_run_pose: + # step('Generating pose') + # log.info('initialize pose...') + # from app.processors import face_pose + # pose_detector = face_pose.FacePoseDLIB() + # log.info('generating pose...') + # st = time.time() + # pose_data = pose_detector.pose(points_2d_68, dim) + # log.info(f'generated pose {(time.time() - st):.2f}s') + # log.info('') + + + # # ---------------------------------------------------------------------------- + # # generate pose from 68 point 2D landmarks + + step('Done') + + # done + # self.log.debug('Add age real') + # self.log.debug('Add age apparent') + # self.log.debug('Add gender') + + + # # 3DDFA + # self.log.debug('Add depth') + # self.log.debug('Add pncc') + + # # TODO + # self.log.debug('Add 3D face model') + # self.log.debug('Add face texture flat') + # self.log.debug('Add ethnicity') + + # display + # draw bbox + + # # draw 2d landmarks + # im_landmarks_2d_68 = im_resized.copy() + # draw_utils.draw_landmarks2D(im_landmarks_2d_68, points_2d_68) + # draw_utils.draw_bbox(im_landmarks_2d_68, bbox_dim) + + # # draw pose + # if opt_run_pose: + # im_pose = im_resized.copy() + # draw_utils.draw_pose(im_pose, pose_data['point_nose'], pose_data['points']) + # draw_utils.draw_degrees(im_pose, pose_data) + + # # draw animated GIF + # im = Image.open(fp_out) + # im_frames = [] + # duration = im.info['duration'] + # try: + # while True: + # im.seek(len(im_frames)) + # mypalette = im.getpalette() + # im.putpalette(mypalette) + # im_jpg = Image.new("RGB", im.size) + # im_jpg.paste(im) + # im_np = im_utils.pil2np(im_jpg.copy()) + # im_frames.append(im_np) + # except EOFError: + # pass # end of GIF sequence + + # n_frames = len(im_frames) + # frame_number = 0 + + # # show all images here + # cv.imshow('Original', im_resized) + # cv.imshow('2D 68PT Landmarks', im_landmarks_2d_68) + # cv.imshow('3D 68PT Landmarks', im_landmarks_3d_68) + # cv.imshow('Pose', im_pose) + # cv.imshow('3D 68pt GIF', im_frames[frame_number]) + + log.debug('done!!') + + for path in paths: + if os.path.exists(path): + os.remove(path) + + meta['step'] = meta['total'] + meta['state'] = 'SUCCESS' + return meta diff --git a/megapixels/app/server/tasks/fullmonte.py b/megapixels/app/server/tasks/fullmonte.py deleted file mode 100644 index 8215656a..00000000 --- a/megapixels/app/server/tasks/fullmonte.py +++ /dev/null @@ -1,199 +0,0 @@ - -import sys -import os -from os.path import join -from pathlib import Path -import time - -import numpy as np -import cv2 as cv -import dlib -from PIL import Image -import matplotlib.pyplot as plt - -from app.utils import logger_utils, file_utils, im_utils, display_utils, draw_utils -from app.utils import plot_utils -from app.processors import face_detector, face_landmarks -from app.models.data_store import DataStore - -@celery.task(bind=True) -def fullmonte_task(self, uuid_name, fn): - # TOOD add selective testing - opt_run_pose = True - opt_run_2d_68 = True - opt_run_3d_68 = True - opt_run_3d_68 = True - - return - - # ------------------------------------------------- - # init here - - - log = logger_utils.Logger.getLogger() - - # load image - im = cv.imread(opt_fp_in) - im_resized = im_utils.resize(im, width=opt_size[0], height=opt_size[1]) - - - # ---------------------------------------------------------------------------- - # detect face - - face_detector = face_detector.DetectorDLIBCNN(gpu=opt_gpu) # -1 for CPU - log.info('detecting face...') - st = time.time() - bboxes = face_detector.detect(im_resized, largest=True) - bbox = bboxes[0] - dim = im_resized.shape[:2][::-1] - bbox_dim = bbox.to_dim(dim) - if not bbox: - log.error('no face detected') - return - else: - log.info(f'Detected face in {(time.time() - st):.2f}s') - log.info('') - - - # ---------------------------------------------------------------------------- - # detect 3D landmarks - - log.info('loading 3D landmark generator files...') - landmark_detector_3d_68 = face_landmarks.FaceAlignment3D_68(gpu=opt_gpu) # -1 for CPU - log.info('generating 3D landmarks...') - st = time.time() - points_3d_68 = landmark_detector_3d_68.landmarks(im_resized, bbox_dim.to_xyxy()) - log.info(f'generated 3D landmarks in {(time.time() - st):.2f}s') - log.info('') - - - # ---------------------------------------------------------------------------- - # generate 3D GIF animation - - log.info('generating 3D animation...') - if not opt_fp_out: - fpp_im = Path(opt_fp_in) - fp_out = join(fpp_im.parent, f'{fpp_im.stem}_anim.gif') - else: - fp_out = opt_fp_out - st = time.time() - plot_utils.generate_3d_landmark_anim(np.array(points_3d_68), fp_out, - size=opt_gif_size, num_frames=opt_gif_frames) - log.info(f'Generated animation in {(time.time() - st):.2f}s') - log.info(f'Saved to: {fp_out}') - log.info('') - - - # ---------------------------------------------------------------------------- - # generate face vectors, only to test if feature extraction works - - log.info('initialize face recognition model...') - from app.processors import face_recognition - face_rec = face_recognition.RecognitionDLIB() - st = time.time() - log.info('generating face vector...') - vec = face_rec.vec(im_resized, bbox_dim) - log.info(f'generated face vector in {(time.time() - st):.2f}s') - log.info('') - - - # ---------------------------------------------------------------------------- - # generate 68 point landmarks using dlib - - log.info('initializing face landmarks 68 dlib...') - from app.processors import face_landmarks - landmark_detector_2d_68 = face_landmarks.Dlib2D_68() - log.info('generating 2D 68PT landmarks...') - st = time.time() - points_2d_68 = landmark_detector_2d_68.landmarks(im_resized, bbox_dim) - log.info(f'generated 2D 68PT face landmarks in {(time.time() - st):.2f}s') - log.info('') - - - # ---------------------------------------------------------------------------- - # generate pose from 68 point 2D landmarks - - if opt_run_pose: - log.info('initialize pose...') - from app.processors import face_pose - pose_detector = face_pose.FacePoseDLIB() - log.info('generating pose...') - st = time.time() - pose_data = pose_detector.pose(points_2d_68, dim) - log.info(f'generated pose {(time.time() - st):.2f}s') - log.info('') - - - # ---------------------------------------------------------------------------- - # generate pose from 68 point 2D landmarks - - # done - self.log.debug('Add age real') - self.log.debug('Add age apparent') - self.log.debug('Add gender') - - - # 3DDFA - self.log.debug('Add depth') - self.log.debug('Add pncc') - - # TODO - self.log.debug('Add 3D face model') - self.log.debug('Add face texture flat') - self.log.debug('Add ethnicity') - - # display - if opt_display: - - # draw bbox - - # draw 3d landmarks - im_landmarks_3d_68 = im_resized.copy() - draw_utils.draw_landmarks3D(im_landmarks_3d_68, points_3d_68) - draw_utils.draw_bbox(im_landmarks_3d_68, bbox_dim) - - # draw 2d landmarks - im_landmarks_2d_68 = im_resized.copy() - draw_utils.draw_landmarks2D(im_landmarks_2d_68, points_2d_68) - draw_utils.draw_bbox(im_landmarks_2d_68, bbox_dim) - - # draw pose - if opt_run_pose: - im_pose = im_resized.copy() - draw_utils.draw_pose(im_pose, pose_data['point_nose'], pose_data['points']) - draw_utils.draw_degrees(im_pose, pose_data) - - # draw animated GIF - im = Image.open(fp_out) - im_frames = [] - duration = im.info['duration'] - try: - while True: - im.seek(len(im_frames)) - mypalette = im.getpalette() - im.putpalette(mypalette) - im_jpg = Image.new("RGB", im.size) - im_jpg.paste(im) - im_np = im_utils.pil2np(im_jpg.copy()) - im_frames.append(im_np) - except EOFError: - pass # end of GIF sequence - - n_frames = len(im_frames) - frame_number = 0 - - while True: - # show all images here - cv.imshow('Original', im_resized) - cv.imshow('2D 68PT Landmarks', im_landmarks_2d_68) - cv.imshow('3D 68PT Landmarks', im_landmarks_3d_68) - cv.imshow('Pose', im_pose) - cv.imshow('3D 68pt GIF', im_frames[frame_number]) - frame_number = (frame_number + 1) % n_frames - k = cv.waitKey(duration) & 0xFF - if k == 27 or k == ord('q'): # ESC - cv.destroyAllWindows() - sys.exit() - elif k != 255: - # any key to continue - break \ No newline at end of file diff --git a/site/assets/css/applets.css b/site/assets/css/applets.css index e5b73562..0c566a9f 100644 --- a/site/assets/css/applets.css +++ b/site/assets/css/applets.css @@ -126,12 +126,12 @@ /* analysis results */ -.analysisContainer .result div { +.analysisContainer .results div { width: 256px; text-align: center; - border: 1px solid white; padding: 10px; + margin: 10px; } -.analysisContainer .result div img { +.analysisContainer .results div img { max-width: 100%; } -- cgit v1.2.3-70-g09d2