From 334ea5a2a91da853dc6faf7f48aaa12599201218 Mon Sep 17 00:00:00 2001 From: Jules Laplace Date: Fri, 11 Jan 2019 20:38:36 +0100 Subject: enable celery tasks --- site/assets/css/applets.css | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'site') diff --git a/site/assets/css/applets.css b/site/assets/css/applets.css index 9c37354a..b2b3c85e 100644 --- a/site/assets/css/applets.css +++ b/site/assets/css/applets.css @@ -59,8 +59,8 @@ } .img .bbox { position: absolute; - color: rgba(255,0,0,1); - background: rgba(255,0,0,0.05); + color: rgba(255,255,255,1); + background: rgba(255,255,255,255.05); border: 1px solid; } .cta { -- cgit v1.2.3-70-g09d2 From 384be7d882d1402220b10bd5b2d0037226b41785 Mon Sep 17 00:00:00 2001 From: Jules Laplace Date: Sat, 12 Jan 2019 19:02:19 +0100 Subject: returning face results again --- client/faceSearch/faceSearch.result.js | 4 ++-- megapixels/app/models/sql_factory.py | 22 ++++++++++++---------- megapixels/app/processors/faiss.py | 3 +++ megapixels/app/server/api.py | 1 + site/assets/css/applets.css | 2 +- 5 files changed, 19 insertions(+), 13 deletions(-) (limited to 'site') diff --git a/client/faceSearch/faceSearch.result.js b/client/faceSearch/faceSearch.result.js index 95534830..c2509033 100644 --- a/client/faceSearch/faceSearch.result.js +++ b/client/faceSearch/faceSearch.result.js @@ -72,8 +72,8 @@ class FaceSearchResult extends Component { } const els = results.map((result, i) => { const distance = distances[i] - const { uuid } = result.uuid - const { x, y, w, h } = result.roi + const { uuid } = result.file_record + const { x, y, w, h } = result.face_roi const { fullname, gender, description, images } = result.identity const bbox = { left: (100 * x) + '%', diff --git a/megapixels/app/models/sql_factory.py b/megapixels/app/models/sql_factory.py index a71eabb0..5b3cb5a3 100644 --- a/megapixels/app/models/sql_factory.py +++ b/megapixels/app/models/sql_factory.py @@ -96,22 +96,24 @@ class SqlDataset: Get an identity given an ID. """ # id += 1 - print('fetching {}'.format(id)) - file_record_table = self.get_table('file_record') file_record = file_record_table.query.filter(file_record_table.id == id).first() + if not file_record: + return None + identity_table = self.get_table('identity') identity = identity_table.query.filter(identity_table.id == file_record.identity_id).first() - if file_record and identity: - return { - 'file_record': file_record.toJSON(), - 'identity': identity.toJSON(), - 'face_roi': self.select('face_roi', id), - 'face_pose': self.select('face_pose', id), - } - return {} + if not identity: + return None + + return { + 'file_record': file_record.toJSON(), + 'identity': identity.toJSON(), + 'face_roi': self.select('face_roi', id), + 'face_pose': self.select('face_pose', id), + } def search_name(self, q): """ diff --git a/megapixels/app/processors/faiss.py b/megapixels/app/processors/faiss.py index 5156ad71..ab067fd0 100644 --- a/megapixels/app/processors/faiss.py +++ b/megapixels/app/processors/faiss.py @@ -30,6 +30,9 @@ def build_faiss_database(name, recipe): vec_fn = os.path.join(cfg.DIR_FAISS_METADATA, name, "vecs.csv") index_fn = os.path.join(cfg.DIR_FAISS_INDEXES, name + ".index") + if not os.path.exists(vec_fn): + return + index = faiss.index_factory(recipe.dim, recipe.factory_type) keys, rows = load_csv_safe(vec_fn) diff --git a/megapixels/app/server/api.py b/megapixels/app/server/api.py index af3db4d0..5f80a0c4 100644 --- a/megapixels/app/server/api.py +++ b/megapixels/app/server/api.py @@ -114,6 +114,7 @@ def upload(dataset_name): ids.append(_i+1) identities = [ dataset.get_identity(int(_i)) for _i in ids ] + identities = list(filter(None, identities)) # print(distances) # print(ids) diff --git a/site/assets/css/applets.css b/site/assets/css/applets.css index b2b3c85e..b64da4b7 100644 --- a/site/assets/css/applets.css +++ b/site/assets/css/applets.css @@ -60,7 +60,7 @@ .img .bbox { position: absolute; color: rgba(255,255,255,1); - background: rgba(255,255,255,255.05); + background: rgba(255,255,255,0.05); border: 1px solid; } .cta { -- cgit v1.2.3-70-g09d2 From 47b6ae0f8ad2f49692222bb0c800e7ba1eb4b94b Mon Sep 17 00:00:00 2001 From: Jules Laplace Date: Sun, 13 Jan 2019 00:54:13 +0100 Subject: face info page --- client/faceAnalysis/faceAnalysis.actions.js | 20 +-- client/faceAnalysis/faceAnalysis.query.js | 4 +- client/faceAnalysis/faceAnalysis.reducer.js | 7 + client/index.js | 2 + client/types.js | 2 +- megapixels/app/server/api.py | 1 - megapixels/app/server/api_task.py | 40 +++--- megapixels/app/server/tasks/__init__.py | 19 ++- megapixels/app/server/tasks/blur.py | 23 +--- megapixels/app/server/tasks/fullmonte.py | 199 ++++++++++++++++++++++++++++ megapixels/app/utils/im_utils.py | 12 ++ site/public/info/index.html | 53 ++++++++ 12 files changed, 322 insertions(+), 60 deletions(-) create mode 100644 megapixels/app/server/tasks/fullmonte.py create mode 100644 site/public/info/index.html (limited to 'site') diff --git a/client/faceAnalysis/faceAnalysis.actions.js b/client/faceAnalysis/faceAnalysis.actions.js index 90d7156f..6a318b5d 100644 --- a/client/faceAnalysis/faceAnalysis.actions.js +++ b/client/faceAnalysis/faceAnalysis.actions.js @@ -8,7 +8,7 @@ import { get, post } from '../util' // urls const url = { - upload: (dataset) => process.env.API_HOST + '/api/dataset/' + dataset + '/face', + upload: () => process.env.API_HOST + '/task/upload/sleep', } export const publicUrl = { } @@ -26,6 +26,11 @@ const loaded = (tag, data, offset = 0) => ({ data, offset }) +const polled = (data, offset = 0) => ({ + type: types.faceAnalysis.poll, + data, + offset +}) const error = (tag, err) => ({ type: types.faceAnalysis.error, tag, @@ -42,7 +47,7 @@ export const updateOptions = opt => dispatch => { export const upload = (payload, file) => dispatch => { // const { options } = store.getState().faceAnalysis - const tag = 'result' + const tag = 'task' const fd = new FormData() fd.append('query_img', file) // fd.append('limit', options.perPage) @@ -62,14 +67,13 @@ const POLL_DELAY = 500 let pollTimeout = null export const poll = (payload, taskURL) => dispatch => { - const tag = 'poll' clearTimeout(pollTimeout) - dispatch(loading(tag)) get(taskURL) .then(data => { - dispatch(loaded(tag, data)) - // check if complete - pollTimeout = setTimeout(() => poll(payload, taskURL), POLL_DELAY) + dispatch(polled(data)) + if (!data.complete) { + pollTimeout = setTimeout(() => poll(payload, taskURL), POLL_DELAY) + } }) - .catch(err => dispatch(error(tag, err))) + .catch(err => dispatch(error('result', err))) } diff --git a/client/faceAnalysis/faceAnalysis.query.js b/client/faceAnalysis/faceAnalysis.query.js index 86dbe1ae..6b92b70d 100644 --- a/client/faceAnalysis/faceAnalysis.query.js +++ b/client/faceAnalysis/faceAnalysis.query.js @@ -57,8 +57,8 @@ class FaceAnalysisQuery extends Component { {'Put yourself under the microscope of various facial recognition algorithms. See what can be determined from a photo.'}

    -
  1. Upload a photo of yourself
  2. -
  3. {'Your search data is never stored and immediately cleared '} +
  4. Upload a photo of yourself and be judged by the algorithm
  5. +
  6. {'Your search data is only stored for the duration of this analysis and is immediately cleared '} {'once you leave this page.'}

diff --git a/client/faceAnalysis/faceAnalysis.reducer.js b/client/faceAnalysis/faceAnalysis.reducer.js index d8e914ab..54a6d5eb 100644 --- a/client/faceAnalysis/faceAnalysis.reducer.js +++ b/client/faceAnalysis/faceAnalysis.reducer.js @@ -2,6 +2,7 @@ import * as types from '../types' const initialState = () => ({ query: {}, + task: {}, result: {}, loading: false, }) @@ -20,6 +21,12 @@ export default function faceAnalysisReducer(state = initialState(), action) { [action.tag]: action.data, } + case types.faceAnalysis.poll: + return { + ...state, + result: action.data, + } + case types.faceAnalysis.error: return { ...state, diff --git a/client/index.js b/client/index.js index 40be2841..96f2c8c8 100644 --- a/client/index.js +++ b/client/index.js @@ -20,6 +20,8 @@ function appendReactApplet(el, payload) { } function fetchDataset(payload) { + if (payload.command === 'face_analysis') return new Promise(resolve => resolve()) + if (payload.dataset === 'info') return new Promise(resolve => resolve()) const url = "https://megapixels.nyc3.digitaloceanspaces.com/v1/citations/" + payload.dataset + ".json" return fetch(url, { mode: 'cors' }).then(r => r.json()) } diff --git a/client/types.js b/client/types.js index 2d35ec36..fd9aa3e0 100644 --- a/client/types.js +++ b/client/types.js @@ -7,7 +7,7 @@ export const tagAsType = (type, names) => ( ) export const faceAnalysis = tagAsType('faceAnalysis', [ - 'loading', 'loaded', 'error', 'update_options', + 'loading', 'loaded', 'poll', 'error', 'update_options', ]) export const faceSearch = tagAsType('faceSearch', [ diff --git a/megapixels/app/server/api.py b/megapixels/app/server/api.py index 48279040..663f52cc 100644 --- a/megapixels/app/server/api.py +++ b/megapixels/app/server/api.py @@ -30,7 +30,6 @@ def index(): """List the datasets and their fields""" return jsonify({ 'datasets': list_datasets() }) - @api.route('/dataset/') def show(dataset_name): """Show the data that a dataset will return""" diff --git a/megapixels/app/server/api_task.py b/megapixels/app/server/api_task.py index 36990997..23e11454 100644 --- a/megapixels/app/server/api_task.py +++ b/megapixels/app/server/api_task.py @@ -6,30 +6,21 @@ import dlib import simplejson as json import numpy as np from flask import Blueprint, request, jsonify -from PIL import Image # todo: try to remove PIL dependency - -# from app.models.sql_factory import load_sql_datasets, list_datasets, get_dataset, get_table -# from app.utils.im_utils import pil2np +from PIL import Image, ImageOps # todo: try to remove PIL dependency from celery.result import AsyncResult from app.server.tasks import celery from app.server.tasks import task_lookup, list_active_tasks +# from app.models.sql_factory import load_sql_datasets, list_datasets, get_dataset, get_table -api_task = Blueprint('api_task', __name__) +api_task = Blueprint('task', __name__) @api_task.route('/') def index(): """List active tasks""" return jsonify(list_active_tasks) -# from flask import render_template, redirect, url_for, send_from_directory -# from flask import request, make_response, jsonify -# from . import main, utils -# from PIL import Image, ImageOps -# import cv2 as cv -# import imutils - -@api_task.route('//') +@api_task.route('/status//') def task_status(task_name, task_id): """Return celery image processing status""" if task_name in task_lookup: @@ -69,6 +60,9 @@ def task_status(task_name, task_id): @api_task.route('/upload/sleep', methods=['GET', 'POST']) def sleep_test(): + """ + Test the Celery system using a task that sleeps. + """ async_task = task_lookup['sleep']['task'].apply_async(args=['sleep_test']) task_url = '/task/{}/{}'.format('sleep', async_task.id) return jsonify({ @@ -76,10 +70,12 @@ def sleep_test(): 'task_url': task_url, }) -@api_task.route('/upload', methods=['POST']) -def upload(): - style = request.form['style'] - print('style',style) +@api_task.route('/upload/:style', methods=['POST']) +def upload(style): + """ + Process a images in a particular style + """ + print('style: {}'.format(style)) if style in task_lookup: task = task_lookup[style]['task'] print('task',task) @@ -103,19 +99,19 @@ def upload(): # convert PNG to JPG print('[+] Resizing image') - # LOL MaskRCNN needs to be run outside of the Celery Task im = Image.open(file.stream).convert('RGB') - im = ImageOps.fit(im,(512,512)) + im = ImageOps.fit(im, (256, 256)) # # Save image to disk # print('[+] Save image to {}'.format(fpath)) # im.save(fpath, 'JPEG', quality=100) # im_pil_256 = im.resize((256,256)) - print('[+] ensure_np...') - im_np = imx.ensure_np(im_pil_256) + # print('[+] ensure_np...') + # im_np = imx.ensure_np(im_pil_256) celery_result = { + im: im, } print('[+] Start celery') @@ -124,6 +120,6 @@ def upload(): return jsonify({ 'result': True, - 'task_url': task_url, + 'taskURL': task_url, 'uuid': uuid_name }) diff --git a/megapixels/app/server/tasks/__init__.py b/megapixels/app/server/tasks/__init__.py index bac7309f..fd6e398a 100644 --- a/megapixels/app/server/tasks/__init__.py +++ b/megapixels/app/server/tasks/__init__.py @@ -5,7 +5,7 @@ from celery import Celery celery = Celery(__name__, backend=cfg.CELERY_RESULT_BACKEND, broker=cfg.CELERY_BROKER_URL) from app.server.tasks.sleep import sleep_task -# from app.server.tasks.blur import blur_task +from app.server.tasks.blur import blur_task def list_active_tasks(): dropdown = {} @@ -32,16 +32,15 @@ task_lookup = { 'active': True, 'default': True, }, - # 'blur': { - # 'title': 'Blur', - # 'task': blur_task, - # 'active': False, - # }, - # 'task_dull': { - # 'title': 'DullDream V2', - # 'task': task_dull, + 'blur': { + 'title': 'Blur', + 'task': blur_task, + 'active': False, + }, + # 'fullmonte': { + # 'title': 'TIA facial processing pipeline', + # 'task': fullmonte, # 'active': True, # 'default': True, # } } - diff --git a/megapixels/app/server/tasks/blur.py b/megapixels/app/server/tasks/blur.py index ede75e6a..3b7e20be 100644 --- a/megapixels/app/server/tasks/blur.py +++ b/megapixels/app/server/tasks/blur.py @@ -3,14 +3,14 @@ import sys import time import datetime import json -from PIL import Image, ImageFilter +from PIL import Image import cv2 as cv import numpy as np -from . import main, utils -from .. import basemodels +from app.utils.im_utils import ensure_np, ensure_pil from flask import current_app as app -from .paths import get_paths -celery = basemodels.celery + +from app.server.tasks import celery + from celery.utils.log import get_task_logger celery_logger = get_task_logger(__name__) import imutils @@ -37,9 +37,9 @@ def blur_task(self, uuid_name, extra): 'uuid': uuid_name }) - im_np = utils.ensure_np(im) + im_np = ensure_np(im) im_blur = cv.blur(im_np, (5,5), 1.0) - im_blur_pil = utils.ensure_pil(im_blur) + im_blur_pil = ensure_pil(im_blur) fn = uuid_name + '_blur.jpg' fpath = os.path.join(render_dir, fn) @@ -52,15 +52,6 @@ def blur_task(self, uuid_name, extra): time.sleep(3) - self.update_state( - state = 'PROCESSING', - meta = { - 'percent': 0.50, - 'message': 'Sleeping for some reason', - 'uuid': uuid_name - }) - time.sleep(2) - self.update_state( state = 'PROCESSING', meta = { diff --git a/megapixels/app/server/tasks/fullmonte.py b/megapixels/app/server/tasks/fullmonte.py new file mode 100644 index 00000000..17ca9403 --- /dev/null +++ b/megapixels/app/server/tasks/fullmonte.py @@ -0,0 +1,199 @@ + +import sys +import os +from os.path import join +from pathlib import Path +import time + +import numpy as np +import cv2 as cv +import dlib +from PIL import Image +import matplotlib.pyplot as plt + +from app.utils import logger_utils, file_utils, im_utils, display_utils, draw_utils +from app.utils import plot_utils +from app.processors import face_detector, face_landmarks +from app.models.data_store import DataStore + +@celery.task(bind=True) +def fullmonte_task(self, uuid_name): + return + + # TOOD add selective testing + opt_run_pose = True + opt_run_2d_68 = True + opt_run_3d_68 = True + opt_run_3d_68 = True + + # ------------------------------------------------- + # init here + + + log = logger_utils.Logger.getLogger() + + # load image + im = cv.imread(opt_fp_in) + im_resized = im_utils.resize(im, width=opt_size[0], height=opt_size[1]) + + + # ---------------------------------------------------------------------------- + # detect face + + face_detector = face_detector.DetectorDLIBCNN(gpu=opt_gpu) # -1 for CPU + log.info('detecting face...') + st = time.time() + bboxes = face_detector.detect(im_resized, largest=True) + bbox = bboxes[0] + dim = im_resized.shape[:2][::-1] + bbox_dim = bbox.to_dim(dim) + if not bbox: + log.error('no face detected') + return + else: + log.info(f'Detected face in {(time.time() - st):.2f}s') + log.info('') + + + # ---------------------------------------------------------------------------- + # detect 3D landmarks + + log.info('loading 3D landmark generator files...') + landmark_detector_3d_68 = face_landmarks.FaceAlignment3D_68(gpu=opt_gpu) # -1 for CPU + log.info('generating 3D landmarks...') + st = time.time() + points_3d_68 = landmark_detector_3d_68.landmarks(im_resized, bbox_dim.to_xyxy()) + log.info(f'generated 3D landmarks in {(time.time() - st):.2f}s') + log.info('') + + + # ---------------------------------------------------------------------------- + # generate 3D GIF animation + + log.info('generating 3D animation...') + if not opt_fp_out: + fpp_im = Path(opt_fp_in) + fp_out = join(fpp_im.parent, f'{fpp_im.stem}_anim.gif') + else: + fp_out = opt_fp_out + st = time.time() + plot_utils.generate_3d_landmark_anim(np.array(points_3d_68), fp_out, + size=opt_gif_size, num_frames=opt_gif_frames) + log.info(f'Generated animation in {(time.time() - st):.2f}s') + log.info(f'Saved to: {fp_out}') + log.info('') + + + # ---------------------------------------------------------------------------- + # generate face vectors, only to test if feature extraction works + + log.info('initialize face recognition model...') + from app.processors import face_recognition + face_rec = face_recognition.RecognitionDLIB() + st = time.time() + log.info('generating face vector...') + vec = face_rec.vec(im_resized, bbox_dim) + log.info(f'generated face vector in {(time.time() - st):.2f}s') + log.info('') + + + # ---------------------------------------------------------------------------- + # generate 68 point landmarks using dlib + + log.info('initializing face landmarks 68 dlib...') + from app.processors import face_landmarks + landmark_detector_2d_68 = face_landmarks.Dlib2D_68() + log.info('generating 2D 68PT landmarks...') + st = time.time() + points_2d_68 = landmark_detector_2d_68.landmarks(im_resized, bbox_dim) + log.info(f'generated 2D 68PT face landmarks in {(time.time() - st):.2f}s') + log.info('') + + + # ---------------------------------------------------------------------------- + # generate pose from 68 point 2D landmarks + + if opt_run_pose: + log.info('initialize pose...') + from app.processors import face_pose + pose_detector = face_pose.FacePoseDLIB() + log.info('generating pose...') + st = time.time() + pose_data = pose_detector.pose(points_2d_68, dim) + log.info(f'generated pose {(time.time() - st):.2f}s') + log.info('') + + + # ---------------------------------------------------------------------------- + # generate pose from 68 point 2D landmarks + + # done + self.log.debug('Add age real') + self.log.debug('Add age apparent') + self.log.debug('Add gender') + + + # 3DDFA + self.log.debug('Add depth') + self.log.debug('Add pncc') + + # TODO + self.log.debug('Add 3D face model') + self.log.debug('Add face texture flat') + self.log.debug('Add ethnicity') + + # display + if opt_display: + + # draw bbox + + # draw 3d landmarks + im_landmarks_3d_68 = im_resized.copy() + draw_utils.draw_landmarks3D(im_landmarks_3d_68, points_3d_68) + draw_utils.draw_bbox(im_landmarks_3d_68, bbox_dim) + + # draw 2d landmarks + im_landmarks_2d_68 = im_resized.copy() + draw_utils.draw_landmarks2D(im_landmarks_2d_68, points_2d_68) + draw_utils.draw_bbox(im_landmarks_2d_68, bbox_dim) + + # draw pose + if opt_run_pose: + im_pose = im_resized.copy() + draw_utils.draw_pose(im_pose, pose_data['point_nose'], pose_data['points']) + draw_utils.draw_degrees(im_pose, pose_data) + + # draw animated GIF + im = Image.open(fp_out) + im_frames = [] + duration = im.info['duration'] + try: + while True: + im.seek(len(im_frames)) + mypalette = im.getpalette() + im.putpalette(mypalette) + im_jpg = Image.new("RGB", im.size) + im_jpg.paste(im) + im_np = im_utils.pil2np(im_jpg.copy()) + im_frames.append(im_np) + except EOFError: + pass # end of GIF sequence + + n_frames = len(im_frames) + frame_number = 0 + + while True: + # show all images here + cv.imshow('Original', im_resized) + cv.imshow('2D 68PT Landmarks', im_landmarks_2d_68) + cv.imshow('3D 68PT Landmarks', im_landmarks_3d_68) + cv.imshow('Pose', im_pose) + cv.imshow('3D 68pt GIF', im_frames[frame_number]) + frame_number = (frame_number + 1) % n_frames + k = cv.waitKey(duration) & 0xFF + if k == 27 or k == ord('q'): # ESC + cv.destroyAllWindows() + sys.exit() + elif k != 255: + # any key to continue + break \ No newline at end of file diff --git a/megapixels/app/utils/im_utils.py b/megapixels/app/utils/im_utils.py index e882c67f..d36c1c32 100644 --- a/megapixels/app/utils/im_utils.py +++ b/megapixels/app/utils/im_utils.py @@ -19,7 +19,19 @@ from torch.autograd import Variable from sklearn.metrics.pairwise import cosine_similarity import datetime +def ensure_pil(im): + """Ensure image is Pillow format""" + try: + im.verify() + return im + except: + return Image.fromarray(im.astype('uint8'), 'RGB') +def ensure_np(im): + """Ensure image is numpy array""" + if type(im) == np.ndarray: + return im + return np.asarray(im, np.uint8) def num_channels(im): '''Returns number of channels in numpy.ndarray image''' diff --git a/site/public/info/index.html b/site/public/info/index.html new file mode 100644 index 00000000..0d7b2d2e --- /dev/null +++ b/site/public/info/index.html @@ -0,0 +1,53 @@ + + + + MegaPixels + + + + + + + + + + + + +

+ + +
MegaPixels
+ The Darkside of Datasets +
+ +
+
+ +

What do facial recognition algorithms see?

+

Results are only stored for the duration of the analysis and are deleted when you leave this page.

+
+ +
+ + + + + \ No newline at end of file -- cgit v1.2.3-70-g09d2 From b4ed297a6dc73ec5f5cf2772ca1b754ea3f98cae Mon Sep 17 00:00:00 2001 From: Jules Laplace Date: Sun, 13 Jan 2019 21:06:51 +0100 Subject: basic blurring applet --- .gitignore | 1 + client/common/upload.helpers.js | 27 +++++------ client/faceAnalysis/faceAnalysis.actions.js | 21 ++++++--- client/faceAnalysis/faceAnalysis.container.js | 2 +- client/faceAnalysis/faceAnalysis.query.js | 12 ++++- client/faceAnalysis/faceAnalysis.reducer.js | 8 ++++ client/faceAnalysis/faceAnalysis.result.js | 26 ++++++++--- megapixels/app/server/api.py | 1 - megapixels/app/server/api_task.py | 10 ++-- megapixels/app/server/tasks/blur.py | 67 ++++++++++++++------------- megapixels/app/server/tasks/fullmonte.py | 6 +-- megapixels/app/server/tasks/sleep.py | 2 +- megapixels/app/settings/app_cfg.py | 1 + site/assets/css/applets.css | 17 ++++++- 14 files changed, 132 insertions(+), 69 deletions(-) (limited to 'site') diff --git a/.gitignore b/.gitignore index 30c69fbe..b800c5b8 100644 --- a/.gitignore +++ b/.gitignore @@ -158,4 +158,5 @@ scraper/reports/papers/ .creds site/assets/js/dist/ +site/public/user_content diff --git a/client/common/upload.helpers.js b/client/common/upload.helpers.js index eb42a993..4b38fb09 100644 --- a/client/common/upload.helpers.js +++ b/client/common/upload.helpers.js @@ -1,6 +1,6 @@ import ExifReader from 'exifreader' -export const MAX_SIDE = 300 +export const MAX_SIDE = 256 function base64ToUint8Array(string, start, finish) { start = start || 0 @@ -110,16 +110,17 @@ export function renderToCanvas(img, options) { options = options || {} // Canvas max size for any side - const maxSize = MAX_SIDE + const maxSide = MAX_SIDE const canvas = document.createElement('canvas') const ctx = canvas.getContext('2d') const initialScale = options.scale || 1 // Scale to needed to constrain canvas to max size - let scale = getScale(img.width * initialScale, img.height * initialScale, maxSize, maxSize, true) + let scale = getScale(img.naturalWidth * initialScale, img.naturalHeight * initialScale, maxSide, maxSide, true) + console.log(scale) // Still need to apply the user defined scale scale *= initialScale - canvas.width = Math.round(img.width * scale) - canvas.height = Math.round(img.height * scale) + canvas.width = Math.round(img.naturalWidth * scale) + canvas.height = Math.round(img.naturalHeight * scale) const { correctOrientation } = options const jpeg = !!img.src.match(/data:image\/jpeg|\.jpeg$|\.jpg$/i) const hasDataURI = !!img.src.match(/^data:/) @@ -144,12 +145,12 @@ export function renderToCanvas(img, options) { export function renderThumbnail(img) { const resized = renderToCanvas(img, { correctOrientation: true }) - const canvas = document.createElement('canvas') // document.querySelector('#user_photo_canvas') - const ctx = canvas.getContext('2d') - ctx.fillStyle = 'black' - ctx.fillRect(0, 0, MAX_SIDE, MAX_SIDE) - const xOffset = (MAX_SIDE - resized.width) / 2 - const yOffset = (MAX_SIDE - resized.height) / 2 - ctx.drawImage(resized, xOffset, yOffset) - return canvas + // const canvas = document.createElement('canvas') // document.querySelector('#user_photo_canvas') + // const ctx = canvas.getContext('2d') + // ctx.fillStyle = 'black' + // ctx.fillRect(0, 0, MAX_SIDE, MAX_SIDE) + // const xOffset = (MAX_SIDE - resized.width) / 2 + // const yOffset = (MAX_SIDE - resized.height) / 2 + // ctx.drawImage(resized, xOffset, yOffset, resized.width, resized.height) + return resized } diff --git a/client/faceAnalysis/faceAnalysis.actions.js b/client/faceAnalysis/faceAnalysis.actions.js index 860d3292..f8d8973f 100644 --- a/client/faceAnalysis/faceAnalysis.actions.js +++ b/client/faceAnalysis/faceAnalysis.actions.js @@ -16,17 +16,20 @@ export const publicUrl = { // standard loading events const loading = (tag, offset) => ({ + ts: Date.now(), type: types.faceAnalysis.loading, tag, offset }) const loaded = (tag, data, offset = 0) => ({ + ts: Date.now(), type: types.faceAnalysis.loaded, tag, data, offset }) const polled = (data, offset = 0) => ({ + ts: Date.now(), type: types.faceAnalysis.poll, data, offset @@ -52,13 +55,19 @@ let pollTimeout = null export const poll = (payload, taskURL) => dispatch => { clearTimeout(pollTimeout) - console.log('polling...') + // console.log('polling...') get(taskURL) .then(data => { - console.log('poll', data) + // console.log('poll', data) dispatch(polled(data)) - if (data.state !== 'error' && data.state !== 'complete') { - pollTimeout = setTimeout(() => poll(payload, taskURL), POLL_DELAY) + // console.log(data.state) + if (data.state === 'COMPLETE' || data.state === 'SUCCESS') { + console.log('complete!') + } else if (data.state === 'ERROR' || data.state === 'FAILURE') { + console.log('errorr!') + dispatch(error(data)) + } else { + pollTimeout = setTimeout(() => poll(payload, taskURL)(dispatch), POLL_DELAY) } }) .catch(err => dispatch(error('result', err))) @@ -71,8 +80,8 @@ export const upload = (payload, file) => dispatch => { dispatch(loading(tag)) post(url.upload(), fd) .then(data => { - console.log('loaded!', tag, data) - dispatch(loaded(tag, data)) + // console.log('loaded!', tag, data) + dispatch(polled(tag, data)) const { result, taskURL } = data if (result && taskURL) { poll(payload, taskURL)(dispatch) diff --git a/client/faceAnalysis/faceAnalysis.container.js b/client/faceAnalysis/faceAnalysis.container.js index a86bcaa4..24848455 100644 --- a/client/faceAnalysis/faceAnalysis.container.js +++ b/client/faceAnalysis/faceAnalysis.container.js @@ -12,7 +12,7 @@ class FaceAnalysisContainer extends Component { const { payload } = this.props // console.log(payload) return ( -
+
diff --git a/client/faceAnalysis/faceAnalysis.query.js b/client/faceAnalysis/faceAnalysis.query.js index a79e3e78..33dd641f 100644 --- a/client/faceAnalysis/faceAnalysis.query.js +++ b/client/faceAnalysis/faceAnalysis.query.js @@ -19,13 +19,23 @@ class FaceAnalysisQuery extends Component { } upload(blob) { + if (this.state.image) { + URL.revokeObjectURL(this.state.image) + } + const url = URL.createObjectURL(blob) + this.setState({ image: url }) this.props.actions.upload(this.props.payload, blob) } + componentWillUnmount() { + if (this.state.image) { + URL.revokeObjectURL(this.state.image) + } + } + render() { const { result } = this.props const { image } = this.state - console.log(result) const style = {} if (image) { style.backgroundImage = 'url(' + image + ')' diff --git a/client/faceAnalysis/faceAnalysis.reducer.js b/client/faceAnalysis/faceAnalysis.reducer.js index de6e5b0a..d9be7447 100644 --- a/client/faceAnalysis/faceAnalysis.reducer.js +++ b/client/faceAnalysis/faceAnalysis.reducer.js @@ -5,25 +5,32 @@ const initialState = () => ({ task: {}, result: {}, loading: false, + startTime: 0, + timing: 0, }) export default function faceAnalysisReducer(state = initialState(), action) { + const { startTime } = state switch (action.type) { case types.faceAnalysis.loading: return { ...state, + startTime: action.ts, + timing: 0, [action.tag]: { loading: true }, } case types.faceAnalysis.loaded: return { ...state, + timing: action.ts - startTime, [action.tag]: action.data, } case types.faceAnalysis.poll: return { ...state, + timing: action.ts - startTime, result: action.data, } @@ -31,6 +38,7 @@ export default function faceAnalysisReducer(state = initialState(), action) { console.log('error', action) return { ...state, + timing: action.ts - startTime, [action.tag]: { error: action.err }, } diff --git a/client/faceAnalysis/faceAnalysis.result.js b/client/faceAnalysis/faceAnalysis.result.js index f9531eba..63a23d65 100644 --- a/client/faceAnalysis/faceAnalysis.result.js +++ b/client/faceAnalysis/faceAnalysis.result.js @@ -36,8 +36,10 @@ const errors = { class FaceAnalysisResult extends Component { render() { - const { query, task, result, loading, error } = this.props.result - console.log(this.props.result) + const { result, timing } = this.props + const { data, error, loading, message } = result + let { step, total } = data || {} + // console.log(step, total) if (loading) { return (
@@ -48,7 +50,6 @@ class FaceAnalysisResult extends Component {
) } - console.log(task, result) if (error) { // console.log(error) let errorMessage = errors[error] || errors.error @@ -56,12 +57,24 @@ class FaceAnalysisResult extends Component {
{errorMessage}
) } - if (!task && !result) return - + // console.log(result) + if (!total) { + return ( +
+ ) + } + let blurImg = data.data.blur_fn && ( +
+ + Blurred image +
+ ) return (
+ {!(step && total && message) ? '' : ({step} / {total}: {message})} + {blurImg}
- Query took {query.timing.toFixed(2)} seconds + Query took {(timing / 1000).toFixed(2)} s.
) @@ -71,6 +84,7 @@ class FaceAnalysisResult extends Component { const mapStateToProps = state => ({ query: state.faceAnalysis.query, result: state.faceAnalysis.result, + timing: state.faceAnalysis.timing, options: state.faceAnalysis.options, }) diff --git a/megapixels/app/server/api.py b/megapixels/app/server/api.py index 663f52cc..5ad454d8 100644 --- a/megapixels/app/server/api.py +++ b/megapixels/app/server/api.py @@ -39,7 +39,6 @@ def show(dataset_name): else: return jsonify({ 'status': 404 }) - @api.route('/dataset//face', methods=['POST']) def upload(dataset_name): """Query an image against FAISS and return the matching identities""" diff --git a/megapixels/app/server/api_task.py b/megapixels/app/server/api_task.py index fb24c154..c9bc19ed 100644 --- a/megapixels/app/server/api_task.py +++ b/megapixels/app/server/api_task.py @@ -31,22 +31,23 @@ def task_status(task_name, task_id): return jsonify({ 'state': 'error', 'percent': 100, - 'message': 'Unknown task' + 'message': 'Unknown task', }) - # app.logger.info('task state: {}'.format(task.state)) if task.state == 'PENDING': response = { 'state': task.state, 'percent': 0, - 'message': 'Pending...' + 'message': 'Pending...', + 'data': task.info, } elif task.state != 'FAILURE': response = { 'state': task.state, 'percent': task.info.get('percent', 0), 'uuid': task.info.get('uuid', 0), - 'message': task.info.get('message', '') + 'message': task.info.get('message', ''), + 'data': task.info, } if 'result' in task.info: response['result'] = task.info['result'] @@ -56,6 +57,7 @@ def task_status(task_name, task_id): 'state': task.state, 'percent': 100, 'message': str(task.info), # this is the exception raised + 'data': task.info, } return jsonify(response) diff --git a/megapixels/app/server/tasks/blur.py b/megapixels/app/server/tasks/blur.py index d1f67f54..42977097 100644 --- a/megapixels/app/server/tasks/blur.py +++ b/megapixels/app/server/tasks/blur.py @@ -9,6 +9,8 @@ import numpy as np from app.utils.im_utils import ensure_np, ensure_pil from flask import current_app as app +import app.settings.app_cfg as cfg + from app.server.tasks import celery from celery.utils.log import get_task_logger @@ -19,57 +21,58 @@ import imutils def blur_task(self, uuid_name, fn): """Process image and update during""" celery_logger.debug('process_image_task, uuid: {}'.format(uuid_name)) + celery_logger.debug('fn: {}'.format(fn)) files = [] + meta = { + 'step': 0, + 'total': 3, + 'message': 'Starting', + 'uuid': uuid_name, + 'data': {}, + } + self.update_state(state='PROCESSING', meta=meta) + im = Image.open(fn).convert('RGB') + os.remove(fn) - self.update_state( - state = 'PROCESSING', - meta = { - 'percent': 0.25, - 'message': 'Applying blur', - 'uuid': uuid_name - }) + meta['step'] += 1 + meta['message'] = 'Applying blur' + self.update_state(state='PROCESSING', meta=meta) im_np = ensure_np(im) im_blur = cv.blur(im_np, (5,5), 1.0) im_blur_pil = ensure_pil(im_blur) fn = uuid_name + '_blur.jpg' - # fpath = os.path.join(render_dir, fn) - # im_blur_pil.save(fpath, 'JPEG', quality=95) + fpath = os.path.join(cfg.DIR_SITE_USER_CONTENT, fn) + im_blur_pil.save(fpath, 'JPEG', quality=80) + celery_logger.debug('fpath: {}'.format(fpath)) + print('fpath: {}'.format(fpath)) # files.append({ # 'title': 'Blurred image', # 'fn': render_uri + uuid_name + '_blur.jpg' # }) + meta['step'] += 1 + meta['message'] = 'Applying blur' + meta['data']['blur_fn'] = os.path.join('/user_content/', fn) + self.update_state(state='PROCESSING', meta=meta) time.sleep(3) - self.update_state( - state = 'PROCESSING', - meta = { - 'percent': 0.75, - 'message': 'Sleeping some more', - 'uuid': uuid_name - }) - time.sleep(2) + if os.path.exists(fpath): + os.remove(fpath) - data = { - 'uuid': uuid_name, - 'date': str(datetime.datetime.now()), - 'files': files - } + meta['step'] += 1 + meta['message'] = 'Securely deleting user content' + self.update_state(state='PROCESSING', meta=meta) + time.sleep(2) - # json_path = os.path.join(json_dir, uuid_name + '.json') - # with open(json_path, 'w') as json_file: - # json.dump(data, json_file) + celery_logger.debug('done!!') + + meta['step'] = meta['total'] + meta['state'] = 'complete' + return meta - celery_logger.debug('ok') - - return { - 'percent': 100, - 'state': 'complete', - 'uuid': uuid_name, - } diff --git a/megapixels/app/server/tasks/fullmonte.py b/megapixels/app/server/tasks/fullmonte.py index 17ca9403..8215656a 100644 --- a/megapixels/app/server/tasks/fullmonte.py +++ b/megapixels/app/server/tasks/fullmonte.py @@ -17,15 +17,15 @@ from app.processors import face_detector, face_landmarks from app.models.data_store import DataStore @celery.task(bind=True) -def fullmonte_task(self, uuid_name): - return - +def fullmonte_task(self, uuid_name, fn): # TOOD add selective testing opt_run_pose = True opt_run_2d_68 = True opt_run_3d_68 = True opt_run_3d_68 = True + return + # ------------------------------------------------- # init here diff --git a/megapixels/app/server/tasks/sleep.py b/megapixels/app/server/tasks/sleep.py index 9b91cc52..fa40b0e9 100644 --- a/megapixels/app/server/tasks/sleep.py +++ b/megapixels/app/server/tasks/sleep.py @@ -22,7 +22,7 @@ def sleep_task(self, uuid_name): for i,m in enumerate(msgs): percent = int(float(i)/float(len(msgs))*100.0) self.update_state( - state = 'PROCESSING', + state = 'processing', meta = { 'percent': percent, 'message': m['msg'], diff --git a/megapixels/app/settings/app_cfg.py b/megapixels/app/settings/app_cfg.py index a8f41819..fea47572 100644 --- a/megapixels/app/settings/app_cfg.py +++ b/megapixels/app/settings/app_cfg.py @@ -148,6 +148,7 @@ S3_DATASETS_PATH = "v1" # datasets is already in the filename DIR_SITE_PUBLIC = "../site/public" DIR_SITE_CONTENT = "../site/content" DIR_SITE_TEMPLATES = "../site/templates" +DIR_SITE_USER_CONTENT = "../site/public/user_content" # ----------------------------------------------------------------------------- # Celery diff --git a/site/assets/css/applets.css b/site/assets/css/applets.css index b64da4b7..e5b73562 100644 --- a/site/assets/css/applets.css +++ b/site/assets/css/applets.css @@ -25,6 +25,9 @@ font-size: 9pt; padding-top: 10px; } + +/* search results */ + .results { margin-top: 10px; padding-bottom: 10px; @@ -119,4 +122,16 @@ } .tabulator-row.tabulator-row-even { background-color: rgba(255,255,255,0.1); -} \ No newline at end of file +} + +/* analysis results */ + +.analysisContainer .result div { + width: 256px; + text-align: center; + border: 1px solid white; + padding: 10px; +} +.analysisContainer .result div img { + max-width: 100%; +} -- cgit v1.2.3-70-g09d2 From 198147bef9976a41046c3c513dc4d33babf7a238 Mon Sep 17 00:00:00 2001 From: Jules Laplace Date: Sun, 13 Jan 2019 22:20:06 +0100 Subject: extracting 3d facial vectors --- client/faceAnalysis/faceAnalysis.actions.js | 2 +- client/faceAnalysis/faceAnalysis.result.js | 26 ++- megapixels/app/server/api.py | 2 - megapixels/app/server/api_task.py | 13 +- megapixels/app/server/tasks/__init__.py | 15 +- megapixels/app/server/tasks/blur.py | 15 +- megapixels/app/server/tasks/demo.py | 244 ++++++++++++++++++++++++++++ megapixels/app/server/tasks/fullmonte.py | 199 ----------------------- site/assets/css/applets.css | 6 +- 9 files changed, 292 insertions(+), 230 deletions(-) create mode 100644 megapixels/app/server/tasks/demo.py delete mode 100644 megapixels/app/server/tasks/fullmonte.py (limited to 'site') diff --git a/client/faceAnalysis/faceAnalysis.actions.js b/client/faceAnalysis/faceAnalysis.actions.js index f8d8973f..2d372c1e 100644 --- a/client/faceAnalysis/faceAnalysis.actions.js +++ b/client/faceAnalysis/faceAnalysis.actions.js @@ -8,7 +8,7 @@ import { get, post } from '../util' // urls const url = { - upload: () => process.env.API_HOST + '/task/upload/blur', + upload: () => process.env.API_HOST + '/task/upload/demo', } export const publicUrl = { } diff --git a/client/faceAnalysis/faceAnalysis.result.js b/client/faceAnalysis/faceAnalysis.result.js index 63a23d65..fd079529 100644 --- a/client/faceAnalysis/faceAnalysis.result.js +++ b/client/faceAnalysis/faceAnalysis.result.js @@ -1,7 +1,6 @@ import React, { Component } from 'react' import { bindActionCreators } from 'redux' import { connect } from 'react-redux' -import { courtesyS } from '../util' import * as actions from './faceAnalysis.actions' import { Loader } from '../common' @@ -63,16 +62,25 @@ class FaceAnalysisResult extends Component {
) } - let blurImg = data.data.blur_fn && ( -
- - Blurred image -
- ) + const results = ['blur_fn', 'landmarks_3d_68'].map(tag => { + if (tag in data.data) { + const { title, url } = data.data[tag] + return ( +
+ + {title} +
+ ) + } + return null + }).filter(a => a) + return ( -
+
{!(step && total && message) ? '' : ({step} / {total}: {message})} - {blurImg} +
+ {results} +
Query took {(timing / 1000).toFixed(2)} s.
diff --git a/megapixels/app/server/api.py b/megapixels/app/server/api.py index 5ad454d8..b3bce9bc 100644 --- a/megapixels/app/server/api.py +++ b/megapixels/app/server/api.py @@ -1,5 +1,3 @@ -import logging -import logging.handlers import os import re import time diff --git a/megapixels/app/server/api_task.py b/megapixels/app/server/api_task.py index c9bc19ed..57ae9f7d 100644 --- a/megapixels/app/server/api_task.py +++ b/megapixels/app/server/api_task.py @@ -27,7 +27,8 @@ def task_status(task_name, task_id): if task_name in task_lookup: task = task_lookup[task_name]['task'].AsyncResult(task_id) # task = AsyncResult(task_id, app=celery) - else: + + if task_name not in task_lookup or task.info is None: return jsonify({ 'state': 'error', 'percent': 100, @@ -75,10 +76,16 @@ def sleep_test(): @api_task.route('/upload/blur', methods=['POST']) def upload(): + return process('blur') + +@api_task.route('/upload/demo', methods=['POST']) +def demo(): + return process('demo') + +def process(style): """ - Process a images in a particular style + Process an image in a particular style """ - style = 'blur' print('style: {}'.format(style)) if style in task_lookup: task = task_lookup[style]['task'] diff --git a/megapixels/app/server/tasks/__init__.py b/megapixels/app/server/tasks/__init__.py index fd6e398a..c0db0be5 100644 --- a/megapixels/app/server/tasks/__init__.py +++ b/megapixels/app/server/tasks/__init__.py @@ -6,6 +6,7 @@ celery = Celery(__name__, backend=cfg.CELERY_RESULT_BACKEND, broker=cfg.CELERY_B from app.server.tasks.sleep import sleep_task from app.server.tasks.blur import blur_task +from app.server.tasks.demo import demo_task def list_active_tasks(): dropdown = {} @@ -35,12 +36,12 @@ task_lookup = { 'blur': { 'title': 'Blur', 'task': blur_task, - 'active': False, + 'active': True, }, - # 'fullmonte': { - # 'title': 'TIA facial processing pipeline', - # 'task': fullmonte, - # 'active': True, - # 'default': True, - # } + 'demo': { + 'title': 'Facial processing pipeline', + 'task': demo_task, + 'active': True, + 'default': True, + } } diff --git a/megapixels/app/server/tasks/blur.py b/megapixels/app/server/tasks/blur.py index 42977097..74798cee 100644 --- a/megapixels/app/server/tasks/blur.py +++ b/megapixels/app/server/tasks/blur.py @@ -14,14 +14,14 @@ import app.settings.app_cfg as cfg from app.server.tasks import celery from celery.utils.log import get_task_logger -celery_logger = get_task_logger(__name__) +log = get_task_logger(__name__) import imutils @celery.task(bind=True) def blur_task(self, uuid_name, fn): """Process image and update during""" - celery_logger.debug('process_image_task, uuid: {}'.format(uuid_name)) - celery_logger.debug('fn: {}'.format(fn)) + log.debug('process_image_task, uuid: {}'.format(uuid_name)) + log.debug('fn: {}'.format(fn)) files = [] @@ -48,7 +48,7 @@ def blur_task(self, uuid_name, fn): fn = uuid_name + '_blur.jpg' fpath = os.path.join(cfg.DIR_SITE_USER_CONTENT, fn) im_blur_pil.save(fpath, 'JPEG', quality=80) - celery_logger.debug('fpath: {}'.format(fpath)) + log.debug('fpath: {}'.format(fpath)) print('fpath: {}'.format(fpath)) # files.append({ @@ -58,7 +58,10 @@ def blur_task(self, uuid_name, fn): meta['step'] += 1 meta['message'] = 'Applying blur' - meta['data']['blur_fn'] = os.path.join('/user_content/', fn) + meta['data']['blur_fn'] = { + 'title': 'Blurred image', + 'url': os.path.join('/user_content/', fn) + } self.update_state(state='PROCESSING', meta=meta) time.sleep(3) @@ -70,7 +73,7 @@ def blur_task(self, uuid_name, fn): self.update_state(state='PROCESSING', meta=meta) time.sleep(2) - celery_logger.debug('done!!') + log.debug('done!!') meta['step'] = meta['total'] meta['state'] = 'complete' diff --git a/megapixels/app/server/tasks/demo.py b/megapixels/app/server/tasks/demo.py new file mode 100644 index 00000000..acc5dbac --- /dev/null +++ b/megapixels/app/server/tasks/demo.py @@ -0,0 +1,244 @@ + +import app.settings.app_cfg as cfg +from app.server.tasks import celery + +from celery.utils.log import get_task_logger +log = get_task_logger(__name__) + +opt_size = (256, 256,) + +@celery.task(bind=True) +def demo_task(self, uuid_name, fn): + + import sys + import os + from os.path import join + from pathlib import Path + import time + + import numpy as np + import cv2 as cv + import dlib + from PIL import Image + import matplotlib.pyplot as plt + + from app.utils import logger_utils, file_utils, im_utils, display_utils, draw_utils + from app.utils import plot_utils + from app.processors import face_detector, face_landmarks + from app.models.data_store import DataStore + + # TODO add selective testing + opt_gpu = -1 + opt_run_pose = True + opt_run_2d_68 = True + opt_run_3d_68 = True + opt_run_3d_68 = True + paths + + meta = { + 'step': 0, + 'total': 3, + 'message': 'Starting', + 'uuid': uuid_name, + 'data': {}, + } + paths = [] + + def step(msg, step=0): + meta['step'] += step + meta['message'] = msg + log.debug('> {}'.format(msg)) + self.update_state(state='PROCESSING', meta=meta) + + step('Loading image') + self.update_state(state='PROCESSING', meta=meta) + + # os.path.join('/user_content/', fn) + + # ------------------------------------------------- + # init here + + # load image + im = cv.imread(fn) + im_resized = im_utils.resize(im, width=opt_size[0], height=opt_size[1]) + + # ---------------------------------------------------------------------------- + # detect face + + face_detector_instance = face_detector.DetectorDLIBCNN(gpu=opt_gpu) # -1 for CPU + step('Detecting face') + st = time.time() + bboxes = face_detector_instance.detect(im_resized, largest=True) + bbox = bboxes[0] + dim = im_resized.shape[:2][::-1] + bbox_dim = bbox.to_dim(dim) + if not bbox: + log.error('No face detected') + meta['error'] = 'No face detected' + self.update_state(state='FAILURE', meta=meta) + return meta + else: + log.info(f'Detected face in {(time.time() - st):.2f}s') + + + # ---------------------------------------------------------------------------- + # detect 3D landmarks + + step('Generating 3D Landmarks') + log.info('loading 3D landmark generator files...') + landmark_detector_3d_68 = face_landmarks.FaceAlignment3D_68(gpu=opt_gpu) # -1 for CPU + log.info('generating 3D landmarks...') + st = time.time() + points_3d_68 = landmark_detector_3d_68.landmarks(im_resized, bbox_dim.to_xyxy()) + log.info(f'generated 3D landmarks in {(time.time() - st):.2f}s') + log.info('') + + # draw 3d landmarks + im_landmarks_3d_68 = im_resized.copy() + draw_utils.draw_landmarks3D(im_landmarks_3d_68, points_3d_68) + draw_utils.draw_bbox(im_landmarks_3d_68, bbox_dim) + + save_image('landmarks_3d_68', '3D Landmarks', im_landmarks_3d_68) + + def save_image(key, title, data): + fn = '{}_{}.jpg'.format(uuid_name, key) + fpath = os.path.join(cfg.DIR_SITE_USER_CONTENT, fn) + paths.append(fpath) + cv.imwrite(fpath, im_landmarks_3d_68) + + meta['data']['landmarks_3d_68'] = { + 'title': '3D Landmarks', + 'url': os.path.join('/user_content/', fn), + } + step('Generated 3D Landmarks', step=0) + + # ---------------------------------------------------------------------------- + # generate 3D GIF animation + + # step('Generating GIF Animation') + # log.info('generating 3D animation...') + # if not opt_fp_out: + # fpp_im = Path(opt_fp_in) + # fp_out = join(fpp_im.parent, f'{fpp_im.stem}_anim.gif') + # else: + # fp_out = opt_fp_out + # st = time.time() + # plot_utils.generate_3d_landmark_anim(np.array(points_3d_68), fp_out, + # size=opt_gif_size, num_frames=opt_gif_frames) + # log.info(f'Generated animation in {(time.time() - st):.2f}s') + # log.info(f'Saved to: {fp_out}') + # log.info('') + + + # # ---------------------------------------------------------------------------- + # # generate face vectors, only to test if feature extraction works + + # step('Generating face vectors') + # log.info('initialize face recognition model...') + # from app.processors import face_recognition + # face_rec = face_recognition.RecognitionDLIB() + # st = time.time() + # log.info('generating face vector...') + # vec = face_rec.vec(im_resized, bbox_dim) + # log.info(f'generated face vector in {(time.time() - st):.2f}s') + # log.info('') + + + # # ---------------------------------------------------------------------------- + # # generate 68 point landmarks using dlib + + # step('Generating 2D 68PT landmarks') + # log.info('initializing face landmarks 68 dlib...') + # from app.processors import face_landmarks + # landmark_detector_2d_68 = face_landmarks.Dlib2D_68() + # log.info('generating 2D 68PT landmarks...') + # st = time.time() + # points_2d_68 = landmark_detector_2d_68.landmarks(im_resized, bbox_dim) + # log.info(f'generated 2D 68PT face landmarks in {(time.time() - st):.2f}s') + # log.info('') + + + # # ---------------------------------------------------------------------------- + # # generate pose from 68 point 2D landmarks + + # if opt_run_pose: + # step('Generating pose') + # log.info('initialize pose...') + # from app.processors import face_pose + # pose_detector = face_pose.FacePoseDLIB() + # log.info('generating pose...') + # st = time.time() + # pose_data = pose_detector.pose(points_2d_68, dim) + # log.info(f'generated pose {(time.time() - st):.2f}s') + # log.info('') + + + # # ---------------------------------------------------------------------------- + # # generate pose from 68 point 2D landmarks + + step('Done') + + # done + # self.log.debug('Add age real') + # self.log.debug('Add age apparent') + # self.log.debug('Add gender') + + + # # 3DDFA + # self.log.debug('Add depth') + # self.log.debug('Add pncc') + + # # TODO + # self.log.debug('Add 3D face model') + # self.log.debug('Add face texture flat') + # self.log.debug('Add ethnicity') + + # display + # draw bbox + + # # draw 2d landmarks + # im_landmarks_2d_68 = im_resized.copy() + # draw_utils.draw_landmarks2D(im_landmarks_2d_68, points_2d_68) + # draw_utils.draw_bbox(im_landmarks_2d_68, bbox_dim) + + # # draw pose + # if opt_run_pose: + # im_pose = im_resized.copy() + # draw_utils.draw_pose(im_pose, pose_data['point_nose'], pose_data['points']) + # draw_utils.draw_degrees(im_pose, pose_data) + + # # draw animated GIF + # im = Image.open(fp_out) + # im_frames = [] + # duration = im.info['duration'] + # try: + # while True: + # im.seek(len(im_frames)) + # mypalette = im.getpalette() + # im.putpalette(mypalette) + # im_jpg = Image.new("RGB", im.size) + # im_jpg.paste(im) + # im_np = im_utils.pil2np(im_jpg.copy()) + # im_frames.append(im_np) + # except EOFError: + # pass # end of GIF sequence + + # n_frames = len(im_frames) + # frame_number = 0 + + # # show all images here + # cv.imshow('Original', im_resized) + # cv.imshow('2D 68PT Landmarks', im_landmarks_2d_68) + # cv.imshow('3D 68PT Landmarks', im_landmarks_3d_68) + # cv.imshow('Pose', im_pose) + # cv.imshow('3D 68pt GIF', im_frames[frame_number]) + + log.debug('done!!') + + for path in paths: + if os.path.exists(path): + os.remove(path) + + meta['step'] = meta['total'] + meta['state'] = 'SUCCESS' + return meta diff --git a/megapixels/app/server/tasks/fullmonte.py b/megapixels/app/server/tasks/fullmonte.py deleted file mode 100644 index 8215656a..00000000 --- a/megapixels/app/server/tasks/fullmonte.py +++ /dev/null @@ -1,199 +0,0 @@ - -import sys -import os -from os.path import join -from pathlib import Path -import time - -import numpy as np -import cv2 as cv -import dlib -from PIL import Image -import matplotlib.pyplot as plt - -from app.utils import logger_utils, file_utils, im_utils, display_utils, draw_utils -from app.utils import plot_utils -from app.processors import face_detector, face_landmarks -from app.models.data_store import DataStore - -@celery.task(bind=True) -def fullmonte_task(self, uuid_name, fn): - # TOOD add selective testing - opt_run_pose = True - opt_run_2d_68 = True - opt_run_3d_68 = True - opt_run_3d_68 = True - - return - - # ------------------------------------------------- - # init here - - - log = logger_utils.Logger.getLogger() - - # load image - im = cv.imread(opt_fp_in) - im_resized = im_utils.resize(im, width=opt_size[0], height=opt_size[1]) - - - # ---------------------------------------------------------------------------- - # detect face - - face_detector = face_detector.DetectorDLIBCNN(gpu=opt_gpu) # -1 for CPU - log.info('detecting face...') - st = time.time() - bboxes = face_detector.detect(im_resized, largest=True) - bbox = bboxes[0] - dim = im_resized.shape[:2][::-1] - bbox_dim = bbox.to_dim(dim) - if not bbox: - log.error('no face detected') - return - else: - log.info(f'Detected face in {(time.time() - st):.2f}s') - log.info('') - - - # ---------------------------------------------------------------------------- - # detect 3D landmarks - - log.info('loading 3D landmark generator files...') - landmark_detector_3d_68 = face_landmarks.FaceAlignment3D_68(gpu=opt_gpu) # -1 for CPU - log.info('generating 3D landmarks...') - st = time.time() - points_3d_68 = landmark_detector_3d_68.landmarks(im_resized, bbox_dim.to_xyxy()) - log.info(f'generated 3D landmarks in {(time.time() - st):.2f}s') - log.info('') - - - # ---------------------------------------------------------------------------- - # generate 3D GIF animation - - log.info('generating 3D animation...') - if not opt_fp_out: - fpp_im = Path(opt_fp_in) - fp_out = join(fpp_im.parent, f'{fpp_im.stem}_anim.gif') - else: - fp_out = opt_fp_out - st = time.time() - plot_utils.generate_3d_landmark_anim(np.array(points_3d_68), fp_out, - size=opt_gif_size, num_frames=opt_gif_frames) - log.info(f'Generated animation in {(time.time() - st):.2f}s') - log.info(f'Saved to: {fp_out}') - log.info('') - - - # ---------------------------------------------------------------------------- - # generate face vectors, only to test if feature extraction works - - log.info('initialize face recognition model...') - from app.processors import face_recognition - face_rec = face_recognition.RecognitionDLIB() - st = time.time() - log.info('generating face vector...') - vec = face_rec.vec(im_resized, bbox_dim) - log.info(f'generated face vector in {(time.time() - st):.2f}s') - log.info('') - - - # ---------------------------------------------------------------------------- - # generate 68 point landmarks using dlib - - log.info('initializing face landmarks 68 dlib...') - from app.processors import face_landmarks - landmark_detector_2d_68 = face_landmarks.Dlib2D_68() - log.info('generating 2D 68PT landmarks...') - st = time.time() - points_2d_68 = landmark_detector_2d_68.landmarks(im_resized, bbox_dim) - log.info(f'generated 2D 68PT face landmarks in {(time.time() - st):.2f}s') - log.info('') - - - # ---------------------------------------------------------------------------- - # generate pose from 68 point 2D landmarks - - if opt_run_pose: - log.info('initialize pose...') - from app.processors import face_pose - pose_detector = face_pose.FacePoseDLIB() - log.info('generating pose...') - st = time.time() - pose_data = pose_detector.pose(points_2d_68, dim) - log.info(f'generated pose {(time.time() - st):.2f}s') - log.info('') - - - # ---------------------------------------------------------------------------- - # generate pose from 68 point 2D landmarks - - # done - self.log.debug('Add age real') - self.log.debug('Add age apparent') - self.log.debug('Add gender') - - - # 3DDFA - self.log.debug('Add depth') - self.log.debug('Add pncc') - - # TODO - self.log.debug('Add 3D face model') - self.log.debug('Add face texture flat') - self.log.debug('Add ethnicity') - - # display - if opt_display: - - # draw bbox - - # draw 3d landmarks - im_landmarks_3d_68 = im_resized.copy() - draw_utils.draw_landmarks3D(im_landmarks_3d_68, points_3d_68) - draw_utils.draw_bbox(im_landmarks_3d_68, bbox_dim) - - # draw 2d landmarks - im_landmarks_2d_68 = im_resized.copy() - draw_utils.draw_landmarks2D(im_landmarks_2d_68, points_2d_68) - draw_utils.draw_bbox(im_landmarks_2d_68, bbox_dim) - - # draw pose - if opt_run_pose: - im_pose = im_resized.copy() - draw_utils.draw_pose(im_pose, pose_data['point_nose'], pose_data['points']) - draw_utils.draw_degrees(im_pose, pose_data) - - # draw animated GIF - im = Image.open(fp_out) - im_frames = [] - duration = im.info['duration'] - try: - while True: - im.seek(len(im_frames)) - mypalette = im.getpalette() - im.putpalette(mypalette) - im_jpg = Image.new("RGB", im.size) - im_jpg.paste(im) - im_np = im_utils.pil2np(im_jpg.copy()) - im_frames.append(im_np) - except EOFError: - pass # end of GIF sequence - - n_frames = len(im_frames) - frame_number = 0 - - while True: - # show all images here - cv.imshow('Original', im_resized) - cv.imshow('2D 68PT Landmarks', im_landmarks_2d_68) - cv.imshow('3D 68PT Landmarks', im_landmarks_3d_68) - cv.imshow('Pose', im_pose) - cv.imshow('3D 68pt GIF', im_frames[frame_number]) - frame_number = (frame_number + 1) % n_frames - k = cv.waitKey(duration) & 0xFF - if k == 27 or k == ord('q'): # ESC - cv.destroyAllWindows() - sys.exit() - elif k != 255: - # any key to continue - break \ No newline at end of file diff --git a/site/assets/css/applets.css b/site/assets/css/applets.css index e5b73562..0c566a9f 100644 --- a/site/assets/css/applets.css +++ b/site/assets/css/applets.css @@ -126,12 +126,12 @@ /* analysis results */ -.analysisContainer .result div { +.analysisContainer .results div { width: 256px; text-align: center; - border: 1px solid white; padding: 10px; + margin: 10px; } -.analysisContainer .result div img { +.analysisContainer .results div img { max-width: 100%; } -- cgit v1.2.3-70-g09d2 From a712efd526481ad3743c860c1fb142889bb33cf4 Mon Sep 17 00:00:00 2001 From: Jules Laplace Date: Sun, 13 Jan 2019 23:51:23 +0100 Subject: html --- site/public/test/index.html | 16 ++++++++-------- site/public/test/style/index.html | 9 +++------ 2 files changed, 11 insertions(+), 14 deletions(-) (limited to 'site') diff --git a/site/public/test/index.html b/site/public/test/index.html index 41f8eda5..b4d16036 100644 --- a/site/public/test/index.html +++ b/site/public/test/index.html @@ -30,14 +30,14 @@

Megapixels UI Tests

diff --git a/site/public/test/style/index.html b/site/public/test/style/index.html index f25f1daf..2a41b8b1 100644 --- a/site/public/test/style/index.html +++ b/site/public/test/style/index.html @@ -30,7 +30,7 @@

Style Examples

← Back to test index

-
Style Guide Test
Style Guide Test
Date
17-Jan-2019
Numbers
17
Identities
12,139
But also
This is a test of the stylesheet

Header 1

+
Style Guide Test
Style Guide Test
Date
17-Jan-2019
Numbers
17
Identities
12,139
But also
This is a test of the stylesheet

Header 1

Header 2

Header 3

Header 4

@@ -53,17 +53,14 @@
Person 2
Person 2
Person 3. Let me tell you about Person 3.  This person has a very long description with text which wraps like crazy
Person 3. Let me tell you about Person 3. This person has a very long description with text which wraps like crazy

est, qui dolorem ipsum, quia dolor sit amet consectetur adipisci[ng] velit, sed quia non-numquam [do] eius modi tempora inci[di]dunt, ut labore et dolore magnam aliquam quaerat voluptatem.

-
This image is extremely wide and the text beneath it will wrap but thats fine because it can also contain <a href="https://example.com/">hyperlinks</a>! Yes, you read that right—hyperlinks! Lorem ipsum dolor sit amet ad volotesque sic hoc ad nauseam
This image is extremely wide and the text beneath it will wrap but that's fine because it can also contain hyperlinks! Yes, you read that right—hyperlinks! Lorem ipsum dolor sit amet ad volotesque sic hoc ad nauseam

Inline code has back-ticks around it.

+
This image is extremely wide and the text beneath it will wrap but thats fine because it can also contain <a href="https://example.com/">hyperlinks</a>! Yes, you read that right—hyperlinks! Lorem ipsum dolor sit amet ad volotesque sic hoc ad nauseam
This image is extremely wide and the text beneath it will wrap but that's fine because it can also contain hyperlinks! Yes, you read that right—hyperlinks! Lorem ipsum dolor sit amet ad volotesque sic hoc ad nauseam

Inline code has back-ticks around it.

var s = "JavaScript syntax highlighting";
 alert(s);
 
s = "Python syntax highlighting"
 print(s)
 
-
Generic code block. Note that code blocks that are not so marked will not appear.
-But let's throw in a <b>tag</b>.
-
-

Horizontal rule

+
tag."]}'>

Horizontal rule


Citations below here

-- cgit v1.2.3-70-g09d2