summaryrefslogtreecommitdiff
path: root/megapixels/app
diff options
context:
space:
mode:
Diffstat (limited to 'megapixels/app')
-rw-r--r--megapixels/app/models/sql_factory.py180
-rw-r--r--megapixels/app/processors/faiss.py5
-rw-r--r--megapixels/app/server/api.py57
-rw-r--r--megapixels/app/server/api_task.py124
-rw-r--r--megapixels/app/server/create.py20
-rw-r--r--megapixels/app/server/tasks/__init__.py47
-rw-r--r--megapixels/app/server/tasks/blur.py81
-rw-r--r--megapixels/app/server/tasks/demo.py262
-rw-r--r--megapixels/app/server/tasks/sleep.py38
-rw-r--r--megapixels/app/settings/app_cfg.py7
-rw-r--r--megapixels/app/utils/im_utils.py12
11 files changed, 756 insertions, 77 deletions
diff --git a/megapixels/app/models/sql_factory.py b/megapixels/app/models/sql_factory.py
index a580f28e..5b3cb5a3 100644
--- a/megapixels/app/models/sql_factory.py
+++ b/megapixels/app/models/sql_factory.py
@@ -3,7 +3,7 @@ import glob
import time
import pandas as pd
-from sqlalchemy import create_engine, Table, Column, String, Integer, DateTime, Float
+from sqlalchemy import create_engine, Table, Column, String, Integer, DateTime, Float, func
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
@@ -62,7 +62,8 @@ def load_sql_dataset(path, replace=False, engine=None, base_model=None):
df = pd.read_csv(fn)
# fix columns that are named "index", a sql reserved word
df.reindex_axis(sorted(df.columns), axis=1)
- df.columns = sorted(table.__table__.columns).keys()
+ columns = [column.name for column in table.__table__.columns]
+ df.columns = columns
df.to_sql(name=table.__tablename__, con=engine, if_exists='replace', index=False)
return dataset
@@ -82,87 +83,153 @@ class SqlDataset:
self.base_model = base_model
def describe(self):
+ """
+ List the available SQL tables for a given dataset.
+ """
return {
'name': self.name,
'tables': list(self.tables.keys()),
}
def get_identity(self, id):
- table = self.get_table('identity_meta')
+ """
+ Get an identity given an ID.
+ """
# id += 1
- identity = table.query.filter(table.image_id <= id).order_by(table.image_id.desc()).first().toJSON()
+ file_record_table = self.get_table('file_record')
+ file_record = file_record_table.query.filter(file_record_table.id == id).first()
+
+ if not file_record:
+ return None
+
+ identity_table = self.get_table('identity')
+ identity = identity_table.query.filter(identity_table.id == file_record.identity_id).first()
+
+ if not identity:
+ return None
+
return {
- 'uuid': self.select('uuids', id),
- 'identity': identity,
- 'roi': self.select('roi', id),
- 'pose': self.select('pose', id),
+ 'file_record': file_record.toJSON(),
+ 'identity': identity.toJSON(),
+ 'face_roi': self.select('face_roi', id),
+ 'face_pose': self.select('face_pose', id),
}
def search_name(self, q):
- table = self.get_table('identity_meta')
- uuid_table = self.get_table('uuids')
+ """
+ Find an identity by name.
+ """
+ table = self.get_table('identity')
+ identity_list = table.query.filter(table.fullname.ilike(q)).order_by(table.fullname.desc()).limit(15)
+ return identity_list
- identity = table.query.filter(table.fullname.like(q)).order_by(table.fullname.desc()).limit(30)
+ def search_description(self, q):
+ """
+ Find an identity by description.
+ """
+ table = self.get_table('identity')
+ identity_list = table.query.filter(table.description.ilike(q)).order_by(table.description.desc()).limit(15)
+ return identity_list
+
+ def get_file_records_for_identities(self, identity_list):
+ """
+ Given a list of identities, map these to file records.
+ """
identities = []
- for row in identity:
- uuid = uuid_table.query.filter(uuid_table.id == row.image_id).first()
- identities.append({
- 'uuid': uuid.toJSON(),
- 'identity': row.toJSON(),
- })
+ file_record_table = self.get_table('file_record')
+ for row in identity_list:
+ file_record = file_record_table.query.filter(file_record_table.identity_id == row.id).first()
+ if file_record:
+ identities.append({
+ 'file_record': file_record.toJSON(),
+ 'identity': row.toJSON(),
+ })
return identities
def select(self, table, id):
+ """
+ Perform a generic select.
+ """
table = self.get_table(table)
if not table:
return None
session = Session()
# for obj in session.query(table).filter_by(id=id):
- # print(table)
+ # print(table)
obj = session.query(table).filter(table.id == id).first()
session.close()
return obj.toJSON()
def get_table(self, type):
+ """
+ Get one of these memoized, dynamically generated tables.
+ """
if type in self.tables:
return self.tables[type]
- elif type == 'uuids':
- self.tables[type] = self.uuid_table()
- elif type == 'roi':
- self.tables[type] = self.roi_table()
- elif type == 'identity_meta':
+ elif type == 'file_record':
+ self.tables[type] = self.file_record_table()
+ elif type == 'identity':
self.tables[type] = self.identity_table()
- elif type == 'pose':
- self.tables[type] = self.pose_table()
+ elif type == 'face_roi':
+ self.tables[type] = self.face_roi_table()
+ elif type == 'face_pose':
+ self.tables[type] = self.face_pose_table()
else:
return None
return self.tables[type]
- # ==> uuids.csv <==
- # index,uuid
- # 0,f03fd921-2d56-4e83-8115-f658d6a72287
- def uuid_table(self):
- class UUID(self.base_model):
- __tablename__ = self.name + "_uuid"
+ # ==> file_record.csv <==
+ # index,ext,fn,identity_key,sha256,subdir,uuid,identity_index
+ def file_record_table(self):
+ class FileRecord(self.base_model):
+ __tablename__ = self.name + "_file_record"
id = Column(Integer, primary_key=True)
+ ext = Column(String(3, convert_unicode=True), nullable=False)
+ fn = Column(String(36, convert_unicode=True), nullable=False)
+ identity_key = Column(String(36, convert_unicode=True), nullable=False)
+ sha256 = Column(String(36, convert_unicode=True), nullable=False)
+ subdir = Column(String(36, convert_unicode=True), nullable=False)
uuid = Column(String(36, convert_unicode=True), nullable=False)
+ identity_id = Column(Integer)
def toJSON(self):
return {
'id': self.id,
'uuid': self.uuid,
+ 'identity_id': self.identity_id,
}
- return UUID
+ return FileRecord
- # ==> roi.csv <==
+ # ==> identity.csv <==
+ # index,description,gender,images,fullname
+ # 0,A. J. Cook,Canadian actress,f,1,0
+ def identity_table(self):
+ class Identity(self.base_model):
+ __tablename__ = self.name + "_identity"
+ id = Column(Integer, primary_key=True)
+ description = Column(String(36, convert_unicode=True), nullable=False)
+ gender = Column(String(1, convert_unicode=True), nullable=False)
+ images = Column(Integer, nullable=False)
+ fullname = Column(String(36, convert_unicode=True), nullable=False)
+ def toJSON(self):
+ return {
+ 'id': self.id,
+ 'fullname': self.fullname,
+ 'images': self.images,
+ 'gender': self.gender,
+ 'description': self.description,
+ }
+ return Identity
+
+ # ==> face_roi.csv <==
# index,h,image_height,image_index,image_width,w,x,y
# 0,0.33000000000000007,250,0,250,0.32999999999999996,0.33666666666666667,0.35
- def roi_table(self):
- class ROI(self.base_model):
+ def face_roi_table(self):
+ class FaceROI(self.base_model):
__tablename__ = self.name + "_roi"
id = Column(Integer, primary_key=True)
h = Column(Float, nullable=False)
image_height = Column(Integer, nullable=False)
- image_index = Column(Integer, nullable=False)
+ record_id = Column(Integer, nullable=False)
image_width = Column(Integer, nullable=False)
w = Column(Float, nullable=False)
x = Column(Float, nullable=False)
@@ -170,7 +237,7 @@ class SqlDataset:
def toJSON(self):
return {
'id': self.id,
- 'image_index': self.image_index,
+ 'record_id': self.record_id,
'image_height': self.image_height,
'image_width': self.image_width,
'w': self.w,
@@ -178,48 +245,25 @@ class SqlDataset:
'x': self.x,
'y': self.y,
}
- return ROI
-
- # ==> identity.csv <==
- # index,fullname,description,gender,images,image_index
- # 0,A. J. Cook,Canadian actress,f,1,0
- def identity_table(self):
- class Identity(self.base_model):
- __tablename__ = self.name + "_identity"
- id = Column(Integer, primary_key=True)
- fullname = Column(String(36, convert_unicode=True), nullable=False)
- description = Column(String(36, convert_unicode=True), nullable=False)
- gender = Column(String(1, convert_unicode=True), nullable=False)
- images = Column(Integer, nullable=False)
- image_id = Column(Integer, nullable=False)
- def toJSON(self):
- return {
- 'id': self.id,
- 'image_id': self.image_id,
- 'fullname': self.fullname,
- 'images': self.images,
- 'gender': self.gender,
- 'description': self.description,
- }
- return Identity
+ return FaceROI
- # ==> pose.csv <==
- # index,image_index,pitch,roll,yaw
+ # ==> face_pose.csv <==
+ # index,record_index,pitch,roll,yaw
# 0,0,11.16264458441435,10.415885631337728,22.99719032415318
- def pose_table(self):
- class Pose(self.base_model):
+ def face_pose_table(self):
+ class FacePose(self.base_model):
__tablename__ = self.name + "_pose"
id = Column(Integer, primary_key=True)
- image_id = Column(Integer, primary_key=True)
+ record_id = Column(Integer, nullable=False)
pitch = Column(Float, nullable=False)
roll = Column(Float, nullable=False)
yaw = Column(Float, nullable=False)
def toJSON(self):
return {
'id': self.id,
- 'image_id': self.image_id,
+ 'record_id': self.record_id,
'pitch': self.pitch,
'roll': self.roll,
'yaw': self.yaw,
}
- return Pose
+ return FacePose
diff --git a/megapixels/app/processors/faiss.py b/megapixels/app/processors/faiss.py
index 5156ad71..0de8ec69 100644
--- a/megapixels/app/processors/faiss.py
+++ b/megapixels/app/processors/faiss.py
@@ -27,9 +27,12 @@ def build_all_faiss_databases():
build_faiss_database(name, DefaultRecipe())
def build_faiss_database(name, recipe):
- vec_fn = os.path.join(cfg.DIR_FAISS_METADATA, name, "vecs.csv")
+ vec_fn = os.path.join(cfg.DIR_FAISS_METADATA, name, "face_vector.csv")
index_fn = os.path.join(cfg.DIR_FAISS_INDEXES, name + ".index")
+ if not os.path.exists(vec_fn):
+ return
+
index = faiss.index_factory(recipe.dim, recipe.factory_type)
keys, rows = load_csv_safe(vec_fn)
diff --git a/megapixels/app/server/api.py b/megapixels/app/server/api.py
index 3683d5fd..b3bce9bc 100644
--- a/megapixels/app/server/api.py
+++ b/megapixels/app/server/api.py
@@ -3,6 +3,7 @@ import re
import time
import dlib
import numpy as np
+import operator
from flask import Blueprint, request, jsonify
from PIL import Image # todo: try to remove PIL dependency
@@ -27,7 +28,6 @@ def index():
"""List the datasets and their fields"""
return jsonify({ 'datasets': list_datasets() })
-
@api.route('/dataset/<dataset_name>')
def show(dataset_name):
"""Show the data that a dataset will return"""
@@ -37,7 +37,6 @@ def show(dataset_name):
else:
return jsonify({ 'status': 404 })
-
@api.route('/dataset/<dataset_name>/face', methods=['POST'])
def upload(dataset_name):
"""Query an image against FAISS and return the matching identities"""
@@ -108,9 +107,10 @@ def upload(dataset_name):
for _d, _i in zip(distances, indexes):
if _d <= THRESHOLD:
dists.append(round(float(_d), 2))
- ids.append(_i+1)
+ ids.append(_i)
- results = [ dataset.get_identity(int(_i)) for _i in ids ]
+ identities = [ dataset.get_identity(int(_i)) for _i in ids ]
+ identities = list(filter(None, identities))
# print(distances)
# print(ids)
@@ -127,7 +127,7 @@ def upload(dataset_name):
# print(results)
return jsonify({
'query': query,
- 'results': results,
+ 'results': identities,
'distances': dists,
})
@@ -139,15 +139,56 @@ def name_lookup(dataset_name):
dataset = get_dataset(dataset_name)
q = request.args.get('q')
- # print(q)
+ q = re.sub('[^a-zA-Z. ]+', '*', q)
+ terms = q.split(' ')
query = {
'q': q,
'timing': time.time() - start,
}
- results = dataset.search_name(q + '%') if q else None
+
+ if len(terms) == 0:
+ return jsonify({ 'query': query, 'results': [] })
+
+ lookup = {}
+ results_lookup = {}
+
+ names = dataset.search_name(q + '%')
+ for name in names:
+ if name.id in lookup:
+ print(name.fullname)
+ lookup[name.id] += 4
+ else:
+ print(name.fullname)
+ lookup[name.id] = 4
+ results_lookup[name.id] = name
- # print(results)
+ for i, term in enumerate(terms[0:5]):
+ search_term = '%' + term + '%'
+ names = dataset.search_name(search_term) if len(term) > 0 else []
+ descriptions = dataset.search_description(search_term) if len(term) > 0 else []
+ for name in names:
+ if name.id in lookup:
+ print(name.fullname)
+ lookup[name.id] += 2
+ else:
+ print(name.fullname)
+ lookup[name.id] = 2
+ results_lookup[name.id] = name
+ for name in descriptions:
+ if name.id in lookup:
+ print(name.fullname)
+ lookup[name.id] += 1
+ else:
+ print(name.fullname)
+ lookup[name.id] = 1
+ results_lookup[name.id] = name
+
+ sorted_names = sorted(lookup.items(), key=operator.itemgetter(1), reverse=True)[0:10]
+ top_names = [results_lookup[item[0]] for item in sorted_names]
+ results = dataset.get_file_records_for_identities(top_names)
+
+ print(results)
return jsonify({
'query': query,
'results': results,
diff --git a/megapixels/app/server/api_task.py b/megapixels/app/server/api_task.py
new file mode 100644
index 00000000..57ae9f7d
--- /dev/null
+++ b/megapixels/app/server/api_task.py
@@ -0,0 +1,124 @@
+import os
+import re
+import uuid
+import time
+import dlib
+import tempfile
+import simplejson as json
+import numpy as np
+from flask import Blueprint, request, jsonify
+from PIL import Image, ImageOps # todo: try to remove PIL dependency
+
+from celery.result import AsyncResult
+from app.server.tasks import celery
+from app.server.tasks import task_lookup, list_active_tasks
+# from app.models.sql_factory import load_sql_datasets, list_datasets, get_dataset, get_table
+
+api_task = Blueprint('task', __name__)
+
+@api_task.route('/')
+def index():
+ """List active tasks"""
+ return jsonify(list_active_tasks)
+
+@api_task.route('/status/<task_name>/<task_id>')
+def task_status(task_name, task_id):
+ """Return celery image processing status"""
+ if task_name in task_lookup:
+ task = task_lookup[task_name]['task'].AsyncResult(task_id)
+ # task = AsyncResult(task_id, app=celery)
+
+ if task_name not in task_lookup or task.info is None:
+ return jsonify({
+ 'state': 'error',
+ 'percent': 100,
+ 'message': 'Unknown task',
+ })
+ # app.logger.info('task state: {}'.format(task.state))
+ if task.state == 'PENDING':
+ response = {
+ 'state': task.state,
+ 'percent': 0,
+ 'message': 'Pending...',
+ 'data': task.info,
+ }
+ elif task.state != 'FAILURE':
+ response = {
+ 'state': task.state,
+ 'percent': task.info.get('percent', 0),
+ 'uuid': task.info.get('uuid', 0),
+ 'message': task.info.get('message', ''),
+ 'data': task.info,
+ }
+ if 'result' in task.info:
+ response['result'] = task.info['result']
+ else:
+ # something went wrong in the background job
+ response = {
+ 'state': task.state,
+ 'percent': 100,
+ 'message': str(task.info), # this is the exception raised
+ 'data': task.info,
+ }
+ return jsonify(response)
+
+@api_task.route('/upload/sleep', methods=['GET', 'POST'])
+def sleep_test():
+ """
+ Test the Celery system using a task that sleeps.
+ """
+ async_task = task_lookup['sleep']['task'].apply_async(args=['sleep_test'])
+ task_url = '/task/status/{}/{}'.format('sleep', async_task.id)
+ return jsonify({
+ 'result': True,
+ 'task_url': task_url,
+ })
+
+@api_task.route('/upload/blur', methods=['POST'])
+def upload():
+ return process('blur')
+
+@api_task.route('/upload/demo', methods=['POST'])
+def demo():
+ return process('demo')
+
+def process(style):
+ """
+ Process an image in a particular style
+ """
+ print('style: {}'.format(style))
+ if style in task_lookup:
+ task = task_lookup[style]['task']
+ print('task', task)
+ else:
+ return jsonify({
+ 'result': False,
+ 'error': 'Unknown task',
+ })
+
+ print('get file...')
+ file = request.files['query_img']
+
+ uuid_str = str(uuid.uuid4())
+
+ print('[+] style: {}'.format(style))
+ print('[+] uuid_name: {}'.format(uuid_str))
+
+ im = Image.open(file.stream).convert('RGB')
+ im = ImageOps.fit(im, (256, 256,))
+
+ tmpfile = tempfile.NamedTemporaryFile(delete=False)
+
+ # Save image to disk
+ print('[+] Save image to temporary file')
+ im.save(tmpfile, 'JPEG', quality=80)
+
+ print('[+] Start celery')
+ async_task = task.apply_async(args=[uuid_str, tmpfile.name])
+ task_url = '/task/status/{}/{}'.format(style, async_task.id)
+
+ return jsonify({
+ 'result': True,
+ 'taskURL': task_url,
+ 'uuid': uuid_str
+ })
diff --git a/megapixels/app/server/create.py b/megapixels/app/server/create.py
index 4b1333b9..a1ce56df 100644
--- a/megapixels/app/server/create.py
+++ b/megapixels/app/server/create.py
@@ -1,8 +1,25 @@
+import logging
+import logging.handlers
+
+logger = logging.getLogger("")
+logger.setLevel(logging.DEBUG)
+handler = logging.handlers.RotatingFileHandler("flask.log",
+ maxBytes=3000000, backupCount=2)
+formatter = logging.Formatter(
+ '[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s')
+handler.setFormatter(formatter)
+logger.addHandler(handler)
+logging.getLogger().addHandler(logging.StreamHandler())
+
+logging.debug("starting app")
+
from flask import Flask, Blueprint, jsonify, send_from_directory
from flask_sqlalchemy import SQLAlchemy
from app.models.sql_factory import connection_url, load_sql_datasets
+from app.settings import app_cfg as cfg
from app.server.api import api
+from app.server.api_task import api_task
db = SQLAlchemy()
@@ -13,11 +30,14 @@ def create_app(script_info=None):
app = Flask(__name__, static_folder='static', static_url_path='')
app.config['SQLALCHEMY_DATABASE_URI'] = connection_url
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
+ app.config['CELERY_BROKER_URL'] = cfg.CELERY_BROKER_URL
+ app.config['CELERY_RESULT_BACKEND'] = cfg.CELERY_RESULT_BACKEND
db.init_app(app)
datasets = load_sql_datasets(replace=False, base_model=db.Model)
app.register_blueprint(api, url_prefix='/api')
+ app.register_blueprint(api_task, url_prefix='/task')
app.add_url_rule('/<path:file_relative_path_to_root>', 'serve_page', serve_page, methods=['GET'])
@app.route('/', methods=['GET'])
diff --git a/megapixels/app/server/tasks/__init__.py b/megapixels/app/server/tasks/__init__.py
new file mode 100644
index 00000000..c0db0be5
--- /dev/null
+++ b/megapixels/app/server/tasks/__init__.py
@@ -0,0 +1,47 @@
+import simplejson as json
+from app.settings import app_cfg as cfg
+from celery import Celery
+
+celery = Celery(__name__, backend=cfg.CELERY_RESULT_BACKEND, broker=cfg.CELERY_BROKER_URL)
+
+from app.server.tasks.sleep import sleep_task
+from app.server.tasks.blur import blur_task
+from app.server.tasks.demo import demo_task
+
+def list_active_tasks():
+ dropdown = {}
+ for k,v in task_lookup.items():
+ if 'active' not in v or v['active'] is not False:
+ is_default = 'default' in v and v['default'] is True
+ task = {
+ 'name': k,
+ 'title': v['title'],
+ 'selected': is_default,
+ }
+ dropdown[k] = task
+ return dropdown
+
+###################################################################
+# Add all valid tasks to this lookup.
+# Set 'active': False to disable a task
+# Set 'default': True to define the default task
+
+task_lookup = {
+ 'sleep': {
+ 'title': 'Sleep Test',
+ 'task': sleep_task,
+ 'active': True,
+ 'default': True,
+ },
+ 'blur': {
+ 'title': 'Blur',
+ 'task': blur_task,
+ 'active': True,
+ },
+ 'demo': {
+ 'title': 'Facial processing pipeline',
+ 'task': demo_task,
+ 'active': True,
+ 'default': True,
+ }
+}
diff --git a/megapixels/app/server/tasks/blur.py b/megapixels/app/server/tasks/blur.py
new file mode 100644
index 00000000..74798cee
--- /dev/null
+++ b/megapixels/app/server/tasks/blur.py
@@ -0,0 +1,81 @@
+import os
+import sys
+import time
+import datetime
+import json
+from PIL import Image
+import cv2 as cv
+import numpy as np
+from app.utils.im_utils import ensure_np, ensure_pil
+from flask import current_app as app
+
+import app.settings.app_cfg as cfg
+
+from app.server.tasks import celery
+
+from celery.utils.log import get_task_logger
+log = get_task_logger(__name__)
+import imutils
+
+@celery.task(bind=True)
+def blur_task(self, uuid_name, fn):
+ """Process image and update during"""
+ log.debug('process_image_task, uuid: {}'.format(uuid_name))
+ log.debug('fn: {}'.format(fn))
+
+ files = []
+
+ meta = {
+ 'step': 0,
+ 'total': 3,
+ 'message': 'Starting',
+ 'uuid': uuid_name,
+ 'data': {},
+ }
+ self.update_state(state='PROCESSING', meta=meta)
+
+ im = Image.open(fn).convert('RGB')
+ os.remove(fn)
+
+ meta['step'] += 1
+ meta['message'] = 'Applying blur'
+ self.update_state(state='PROCESSING', meta=meta)
+
+ im_np = ensure_np(im)
+ im_blur = cv.blur(im_np, (5,5), 1.0)
+ im_blur_pil = ensure_pil(im_blur)
+
+ fn = uuid_name + '_blur.jpg'
+ fpath = os.path.join(cfg.DIR_SITE_USER_CONTENT, fn)
+ im_blur_pil.save(fpath, 'JPEG', quality=80)
+ log.debug('fpath: {}'.format(fpath))
+ print('fpath: {}'.format(fpath))
+
+ # files.append({
+ # 'title': 'Blurred image',
+ # 'fn': render_uri + uuid_name + '_blur.jpg'
+ # })
+
+ meta['step'] += 1
+ meta['message'] = 'Applying blur'
+ meta['data']['blur_fn'] = {
+ 'title': 'Blurred image',
+ 'url': os.path.join('/user_content/', fn)
+ }
+ self.update_state(state='PROCESSING', meta=meta)
+ time.sleep(3)
+
+ if os.path.exists(fpath):
+ os.remove(fpath)
+
+ meta['step'] += 1
+ meta['message'] = 'Securely deleting user content'
+ self.update_state(state='PROCESSING', meta=meta)
+ time.sleep(2)
+
+ log.debug('done!!')
+
+ meta['step'] = meta['total']
+ meta['state'] = 'complete'
+ return meta
+
diff --git a/megapixels/app/server/tasks/demo.py b/megapixels/app/server/tasks/demo.py
new file mode 100644
index 00000000..c27b08b5
--- /dev/null
+++ b/megapixels/app/server/tasks/demo.py
@@ -0,0 +1,262 @@
+
+import app.settings.app_cfg as cfg
+from app.server.tasks import celery
+
+from celery.utils.log import get_task_logger
+log = get_task_logger(__name__)
+
+opt_size = (256, 256,)
+
+@celery.task(bind=True)
+def demo_task(self, uuid_name, fn):
+
+ import sys
+ import os
+ from os.path import join
+ from pathlib import Path
+ import time
+
+ import numpy as np
+ import cv2 as cv
+ import dlib
+ from PIL import Image
+ import matplotlib.pyplot as plt
+
+ from app.utils import logger_utils, file_utils, im_utils, display_utils, draw_utils
+ from app.utils import plot_utils
+ from app.processors import face_detector, face_landmarks, face_age_gender, face_beauty
+ # , face_emotion
+ from app.models.data_store import DataStore
+
+ # TODO add selective testing
+ opt_gpu = -1
+ opt_run_pose = True
+ opt_run_2d_68 = True
+ opt_run_3d_68 = True
+ opt_run_3d_68 = True
+
+ opt_gif_size = (256, 256,)
+ opt_gif_frames = 15
+
+ meta = {
+ 'step': 0,
+ 'total': 10,
+ 'message': 'Starting',
+ 'uuid': uuid_name,
+ 'data': { 'statistics': {} },
+ }
+ paths = []
+
+ def step(msg, step=1):
+ meta['message'] = msg
+ meta['step'] += step
+ log.debug('> {}'.format(msg))
+ self.update_state(state='PROCESSING', meta=meta)
+
+ def save_image(key, title, data):
+ fn = '{}_{}.jpg'.format(uuid_name, key)
+ fpath = os.path.join(cfg.DIR_SITE_USER_CONTENT, fn)
+ paths.append(fpath)
+ cv.imwrite(fpath, data)
+
+ meta['data'][key] = {
+ 'title': title,
+ 'url': os.path.join('/user_content/', fn),
+ }
+
+ step('Loading image')
+ self.update_state(state='PROCESSING', meta=meta)
+
+ # os.path.join('/user_content/', fn)
+
+ # -------------------------------------------------
+ # init here
+
+ # load image
+ im = cv.imread(fn)
+ im_resized = im_utils.resize(im, width=opt_size[0], height=opt_size[1])
+
+ # ----------------------------------------------------------------------------
+ # detect face
+
+ face_detector_instance = face_detector.DetectorDLIBCNN(gpu=opt_gpu) # -1 for CPU
+ step('Detecting face')
+ st = time.time()
+ bboxes = face_detector_instance.detect(im_resized, largest=True)
+ bbox = bboxes[0]
+ dim = im_resized.shape[:2][::-1]
+ bbox_dim = bbox.to_dim(dim)
+ if not bbox:
+ log.error('No face detected')
+ meta['error'] = 'No face detected'
+ self.update_state(state='FAILURE', meta=meta)
+ return meta
+ else:
+ log.info(f'Detected face in {(time.time() - st):.2f}s')
+
+
+ # ----------------------------------------------------------------------------
+ # detect 3D landmarks
+
+ step('Generating 3D Landmarks')
+ log.info('loading 3D landmark generator files...')
+ landmark_detector_3d_68 = face_landmarks.FaceAlignment3D_68(gpu=opt_gpu) # -1 for CPU
+ log.info('generating 3D landmarks...')
+ st = time.time()
+ points_3d_68 = landmark_detector_3d_68.landmarks(im_resized, bbox_dim.to_xyxy())
+ log.info(f'generated 3D landmarks in {(time.time() - st):.2f}s')
+ log.info('')
+
+ # draw 3d landmarks
+ im_landmarks_3d_68 = im_resized.copy()
+ draw_utils.draw_landmarks3D(im_landmarks_3d_68, points_3d_68)
+ draw_utils.draw_bbox(im_landmarks_3d_68, bbox_dim)
+
+ save_image('landmarks_3d_68', '3D Landmarks', im_landmarks_3d_68)
+
+ # ----------------------------------------------------------------------------
+ # generate 3D GIF animation
+
+ step('Generating GIF Animation')
+ log.info('generating 3D animation...')
+
+ fn = '{}_{}.gif'.format(uuid_name, '3d')
+ fp_out = os.path.join(cfg.DIR_SITE_USER_CONTENT, fn)
+ paths.append(fp_out)
+
+ st = time.time()
+ plot_utils.generate_3d_landmark_anim(np.array(points_3d_68), fp_out,
+ size=opt_gif_size, num_frames=opt_gif_frames)
+ log.info(f'Generated animation in {(time.time() - st):.2f}s')
+ log.info(f'Saved to: {fp_out}')
+ log.info('')
+
+ meta['data']['points_3d_68'] = points_3d_68
+ meta['data']['points_3d_68'] = {
+ 'title': '3D Animated GIF',
+ 'url': os.path.join('/user_content/', fn),
+ }
+
+ # ----------------------------------------------------------------------------
+ # generate 68 point landmarks using dlib
+
+ step('Generating 2D 68PT landmarks')
+ log.info('initializing face landmarks 68 dlib...')
+ from app.processors import face_landmarks
+ landmark_detector_2d_68 = face_landmarks.Dlib2D_68()
+ log.info('generating 2D 68PT landmarks...')
+ st = time.time()
+ points_2d_68 = landmark_detector_2d_68.landmarks(im_resized, bbox_dim)
+ log.info(f'generated 2D 68PT face landmarks in {(time.time() - st):.2f}s')
+ log.info('')
+
+ # draw 2d landmarks
+ im_landmarks_2d_68 = im_resized.copy()
+ draw_utils.draw_landmarks2D(im_landmarks_2d_68, points_2d_68)
+ draw_utils.draw_bbox(im_landmarks_2d_68, bbox_dim)
+ save_image('landmarks_2d_68', '2D Landmarks', im_landmarks_2d_68)
+
+ # ----------------------------------------------------------------------------
+ # generate pose from 68 point 2D landmarks
+
+ if opt_run_pose:
+ step('Generating pose')
+ log.info('initialize pose...')
+ from app.processors import face_pose
+ pose_detector = face_pose.FacePoseDLIB()
+ log.info('generating pose...')
+ st = time.time()
+ pose_data = pose_detector.pose(points_2d_68, dim)
+ log.info(f'generated pose {(time.time() - st):.2f}s')
+ log.info('')
+
+ im_pose = im_resized.copy()
+ draw_utils.draw_pose(im_pose, pose_data['point_nose'], pose_data['points'])
+ draw_utils.draw_degrees(im_pose, pose_data)
+ save_image('pose', 'Pose', im_pose)
+
+ # ----------------------------------------------------------------------------
+ # age
+
+ # real
+ step('Running age predictor')
+ age_real_predictor = face_age_gender.FaceAgeReal()
+ st = time.time()
+ age_real = age_real_predictor.predict(im_resized, bbox_dim)
+ log.info(f'age real took: {(time.time()-st)/1000:.5f}s')
+ meta['data']['statistics']['age_real'] = f'{(age_real):.2f}'
+
+ # apparent
+ age_apparent_predictor = face_age_gender.FaceAgeApparent()
+ st = time.time()
+ age_apparent = age_apparent_predictor.predict(im_resized, bbox_dim)
+ log.info(f'age apparent took: {(time.time()-st)/1000:.5f}s')
+ meta['data']['statistics']['age_apparent'] = f'{(age_apparent):.2f}'
+
+ # gender
+ step('Running gender predictor')
+ gender_predictor = face_age_gender.FaceGender()
+ st = time.time()
+ gender = gender_predictor.predict(im_resized, bbox_dim)
+ log.info(f'gender took: {(time.time()-st)/1000:.5f}s')
+ meta['data']['statistics']['gender'] = f"M: {gender['m']:.2f}, F: {gender['f']:.2f}"
+
+ # # ----------------------------------------------------------------------------
+ # # emotion
+
+ # emotion_predictor = face_emotion.FaceEmotion(gpu=opt_gpu)
+ # emotion_score = emotion_predictor.emotion(im_resized, bbox_dim)
+ # log.info(f'emotion score: {(100*emotion_score):.2f}')
+
+ # im_emotion = im_resized.copy()
+ # draw_utils.draw_bbox(im_emotion, bbox_dim)
+ # txt = f'emotion score: {(100*emotion_score):.2f}'
+ # draw_utils.draw_text(im_emotion, bbox_dim.pt_tl, txt)
+ # save_image('emotion', 'Emotion', im_emotion)
+
+
+ # ----------------------------------------------------------------------------
+ # beauty
+
+ # TODO fix Keras CPU/GPU device selection issue
+ # NB: GPU visibility issues with dlib/keras
+ # Wrap this with cuda toggle and run before init dlib GPU
+
+ step('Running beauty predictor')
+ device_cur = os.getenv('CUDA_VISIBLE_DEVICES', '')
+ os.environ['CUDA_VISIBLE_DEVICES'] = ''
+ beauty_predictor = face_beauty.FaceBeauty()
+ os.environ['CUDA_VISIBLE_DEVICES'] = device_cur
+
+ beauty_score = beauty_predictor.beauty(im_resized, bbox_dim)
+ log.info(f'beauty score: {(100*beauty_score):.2f}')
+
+ # # draw 2d landmarks
+ # im_beauty = im_resized.copy()
+ # draw_utils.draw_bbox(im_beauty, bbox_dim)
+ # txt = f'Beauty score: {(100*beauty_score):.2f}'
+ # draw_utils.draw_text(im_beauty, bbox_dim.pt_tl, txt)
+ # save_image('beauty', 'Beauty', im_beauty)
+ meta['data']['statistics']['beauty'] = f'{(100*beauty_score):.2f}'
+
+ step('Done')
+
+ # # 3DDFA
+ # self.log.debug('Add depth')
+ # self.log.debug('Add pncc')
+
+ # # TODO
+ # self.log.debug('Add 3D face model')
+ # self.log.debug('Add face texture flat')
+ # self.log.debug('Add ethnicity')
+
+ log.debug('done!!')
+
+ time.sleep(3)
+ for path in paths:
+ if os.path.exists(path):
+ os.remove(path)
+
+ meta['step'] = meta['total']
+ meta['state'] = 'SUCCESS'
+ return meta
diff --git a/megapixels/app/server/tasks/sleep.py b/megapixels/app/server/tasks/sleep.py
new file mode 100644
index 00000000..fa40b0e9
--- /dev/null
+++ b/megapixels/app/server/tasks/sleep.py
@@ -0,0 +1,38 @@
+import time
+
+# from .. import basemodels
+# celery = basemodels.celery
+
+from celery.utils.log import get_task_logger
+celery_logger = get_task_logger(__name__)
+
+from app.server.tasks import celery
+
+import imutils
+
+@celery.task(bind=True)
+def sleep_task(self, uuid_name):
+ celery_logger.debug('sleep_task'.format(uuid_name))
+ msgs = [
+ {'msg':'Uploaded OK','time':.1},
+ {'msg':'Segmenting Image...','time':2},
+ {'msg':'Found: Person, Horse','time':1},
+ {'msg':'Creating Pix2Pix','time':2}
+ ]
+ for i,m in enumerate(msgs):
+ percent = int(float(i)/float(len(msgs))*100.0)
+ self.update_state(
+ state = 'processing',
+ meta = {
+ 'percent': percent,
+ 'message': m['msg'],
+ 'uuid': uuid_name
+ })
+ celery_logger.debug(m['msg'])
+ time.sleep(m['time'])
+
+ return {
+ 'percent': 100,
+ 'state': 'complete',
+ 'uuid': uuid_name
+ }
diff --git a/megapixels/app/settings/app_cfg.py b/megapixels/app/settings/app_cfg.py
index fde9fed7..14e2493c 100644
--- a/megapixels/app/settings/app_cfg.py
+++ b/megapixels/app/settings/app_cfg.py
@@ -150,3 +150,10 @@ S3_DATASETS_PATH = "v1" # datasets is already in the filename
DIR_SITE_PUBLIC = "../site/public"
DIR_SITE_CONTENT = "../site/content"
DIR_SITE_TEMPLATES = "../site/templates"
+DIR_SITE_USER_CONTENT = "../site/public/user_content"
+
+# -----------------------------------------------------------------------------
+# Celery
+# -----------------------------------------------------------------------------
+CELERY_BROKER_URL = 'redis://localhost:6379/0'
+CELERY_RESULT_BACKEND = 'redis://localhost:6379/0'
diff --git a/megapixels/app/utils/im_utils.py b/megapixels/app/utils/im_utils.py
index e882c67f..d36c1c32 100644
--- a/megapixels/app/utils/im_utils.py
+++ b/megapixels/app/utils/im_utils.py
@@ -19,7 +19,19 @@ from torch.autograd import Variable
from sklearn.metrics.pairwise import cosine_similarity
import datetime
+def ensure_pil(im):
+ """Ensure image is Pillow format"""
+ try:
+ im.verify()
+ return im
+ except:
+ return Image.fromarray(im.astype('uint8'), 'RGB')
+def ensure_np(im):
+ """Ensure image is numpy array"""
+ if type(im) == np.ndarray:
+ return im
+ return np.asarray(im, np.uint8)
def num_channels(im):
'''Returns number of channels in numpy.ndarray image'''