summaryrefslogtreecommitdiff
path: root/megapixels/app/server
diff options
context:
space:
mode:
authoradamhrv <adam@ahprojects.com>2019-01-14 22:25:25 +0100
committeradamhrv <adam@ahprojects.com>2019-01-14 22:25:25 +0100
commitdf9d364e3664f45c65cac5990d3d742b990217fa (patch)
tree8842d844a5ea8e6c87599b8683009cba23262713 /megapixels/app/server
parent2fedd95fcee3f048c5f24333ffdb9bb4e13eafe2 (diff)
parent3b2f0dc6d969fa323fe8775b4269e17c60192431 (diff)
Merge branch 'master' of github.com:adamhrv/megapixels_dev
Diffstat (limited to 'megapixels/app/server')
-rw-r--r--megapixels/app/server/api.py57
-rw-r--r--megapixels/app/server/api_task.py124
-rw-r--r--megapixels/app/server/create.py20
-rw-r--r--megapixels/app/server/tasks/__init__.py47
-rw-r--r--megapixels/app/server/tasks/blur.py81
-rw-r--r--megapixels/app/server/tasks/demo.py262
-rw-r--r--megapixels/app/server/tasks/sleep.py38
7 files changed, 621 insertions, 8 deletions
diff --git a/megapixels/app/server/api.py b/megapixels/app/server/api.py
index 3683d5fd..b3bce9bc 100644
--- a/megapixels/app/server/api.py
+++ b/megapixels/app/server/api.py
@@ -3,6 +3,7 @@ import re
import time
import dlib
import numpy as np
+import operator
from flask import Blueprint, request, jsonify
from PIL import Image # todo: try to remove PIL dependency
@@ -27,7 +28,6 @@ def index():
"""List the datasets and their fields"""
return jsonify({ 'datasets': list_datasets() })
-
@api.route('/dataset/<dataset_name>')
def show(dataset_name):
"""Show the data that a dataset will return"""
@@ -37,7 +37,6 @@ def show(dataset_name):
else:
return jsonify({ 'status': 404 })
-
@api.route('/dataset/<dataset_name>/face', methods=['POST'])
def upload(dataset_name):
"""Query an image against FAISS and return the matching identities"""
@@ -108,9 +107,10 @@ def upload(dataset_name):
for _d, _i in zip(distances, indexes):
if _d <= THRESHOLD:
dists.append(round(float(_d), 2))
- ids.append(_i+1)
+ ids.append(_i)
- results = [ dataset.get_identity(int(_i)) for _i in ids ]
+ identities = [ dataset.get_identity(int(_i)) for _i in ids ]
+ identities = list(filter(None, identities))
# print(distances)
# print(ids)
@@ -127,7 +127,7 @@ def upload(dataset_name):
# print(results)
return jsonify({
'query': query,
- 'results': results,
+ 'results': identities,
'distances': dists,
})
@@ -139,15 +139,56 @@ def name_lookup(dataset_name):
dataset = get_dataset(dataset_name)
q = request.args.get('q')
- # print(q)
+ q = re.sub('[^a-zA-Z. ]+', '*', q)
+ terms = q.split(' ')
query = {
'q': q,
'timing': time.time() - start,
}
- results = dataset.search_name(q + '%') if q else None
+
+ if len(terms) == 0:
+ return jsonify({ 'query': query, 'results': [] })
+
+ lookup = {}
+ results_lookup = {}
+
+ names = dataset.search_name(q + '%')
+ for name in names:
+ if name.id in lookup:
+ print(name.fullname)
+ lookup[name.id] += 4
+ else:
+ print(name.fullname)
+ lookup[name.id] = 4
+ results_lookup[name.id] = name
- # print(results)
+ for i, term in enumerate(terms[0:5]):
+ search_term = '%' + term + '%'
+ names = dataset.search_name(search_term) if len(term) > 0 else []
+ descriptions = dataset.search_description(search_term) if len(term) > 0 else []
+ for name in names:
+ if name.id in lookup:
+ print(name.fullname)
+ lookup[name.id] += 2
+ else:
+ print(name.fullname)
+ lookup[name.id] = 2
+ results_lookup[name.id] = name
+ for name in descriptions:
+ if name.id in lookup:
+ print(name.fullname)
+ lookup[name.id] += 1
+ else:
+ print(name.fullname)
+ lookup[name.id] = 1
+ results_lookup[name.id] = name
+
+ sorted_names = sorted(lookup.items(), key=operator.itemgetter(1), reverse=True)[0:10]
+ top_names = [results_lookup[item[0]] for item in sorted_names]
+ results = dataset.get_file_records_for_identities(top_names)
+
+ print(results)
return jsonify({
'query': query,
'results': results,
diff --git a/megapixels/app/server/api_task.py b/megapixels/app/server/api_task.py
new file mode 100644
index 00000000..57ae9f7d
--- /dev/null
+++ b/megapixels/app/server/api_task.py
@@ -0,0 +1,124 @@
+import os
+import re
+import uuid
+import time
+import dlib
+import tempfile
+import simplejson as json
+import numpy as np
+from flask import Blueprint, request, jsonify
+from PIL import Image, ImageOps # todo: try to remove PIL dependency
+
+from celery.result import AsyncResult
+from app.server.tasks import celery
+from app.server.tasks import task_lookup, list_active_tasks
+# from app.models.sql_factory import load_sql_datasets, list_datasets, get_dataset, get_table
+
+api_task = Blueprint('task', __name__)
+
+@api_task.route('/')
+def index():
+ """List active tasks"""
+ return jsonify(list_active_tasks)
+
+@api_task.route('/status/<task_name>/<task_id>')
+def task_status(task_name, task_id):
+ """Return celery image processing status"""
+ if task_name in task_lookup:
+ task = task_lookup[task_name]['task'].AsyncResult(task_id)
+ # task = AsyncResult(task_id, app=celery)
+
+ if task_name not in task_lookup or task.info is None:
+ return jsonify({
+ 'state': 'error',
+ 'percent': 100,
+ 'message': 'Unknown task',
+ })
+ # app.logger.info('task state: {}'.format(task.state))
+ if task.state == 'PENDING':
+ response = {
+ 'state': task.state,
+ 'percent': 0,
+ 'message': 'Pending...',
+ 'data': task.info,
+ }
+ elif task.state != 'FAILURE':
+ response = {
+ 'state': task.state,
+ 'percent': task.info.get('percent', 0),
+ 'uuid': task.info.get('uuid', 0),
+ 'message': task.info.get('message', ''),
+ 'data': task.info,
+ }
+ if 'result' in task.info:
+ response['result'] = task.info['result']
+ else:
+ # something went wrong in the background job
+ response = {
+ 'state': task.state,
+ 'percent': 100,
+ 'message': str(task.info), # this is the exception raised
+ 'data': task.info,
+ }
+ return jsonify(response)
+
+@api_task.route('/upload/sleep', methods=['GET', 'POST'])
+def sleep_test():
+ """
+ Test the Celery system using a task that sleeps.
+ """
+ async_task = task_lookup['sleep']['task'].apply_async(args=['sleep_test'])
+ task_url = '/task/status/{}/{}'.format('sleep', async_task.id)
+ return jsonify({
+ 'result': True,
+ 'task_url': task_url,
+ })
+
+@api_task.route('/upload/blur', methods=['POST'])
+def upload():
+ return process('blur')
+
+@api_task.route('/upload/demo', methods=['POST'])
+def demo():
+ return process('demo')
+
+def process(style):
+ """
+ Process an image in a particular style
+ """
+ print('style: {}'.format(style))
+ if style in task_lookup:
+ task = task_lookup[style]['task']
+ print('task', task)
+ else:
+ return jsonify({
+ 'result': False,
+ 'error': 'Unknown task',
+ })
+
+ print('get file...')
+ file = request.files['query_img']
+
+ uuid_str = str(uuid.uuid4())
+
+ print('[+] style: {}'.format(style))
+ print('[+] uuid_name: {}'.format(uuid_str))
+
+ im = Image.open(file.stream).convert('RGB')
+ im = ImageOps.fit(im, (256, 256,))
+
+ tmpfile = tempfile.NamedTemporaryFile(delete=False)
+
+ # Save image to disk
+ print('[+] Save image to temporary file')
+ im.save(tmpfile, 'JPEG', quality=80)
+
+ print('[+] Start celery')
+ async_task = task.apply_async(args=[uuid_str, tmpfile.name])
+ task_url = '/task/status/{}/{}'.format(style, async_task.id)
+
+ return jsonify({
+ 'result': True,
+ 'taskURL': task_url,
+ 'uuid': uuid_str
+ })
diff --git a/megapixels/app/server/create.py b/megapixels/app/server/create.py
index 4b1333b9..a1ce56df 100644
--- a/megapixels/app/server/create.py
+++ b/megapixels/app/server/create.py
@@ -1,8 +1,25 @@
+import logging
+import logging.handlers
+
+logger = logging.getLogger("")
+logger.setLevel(logging.DEBUG)
+handler = logging.handlers.RotatingFileHandler("flask.log",
+ maxBytes=3000000, backupCount=2)
+formatter = logging.Formatter(
+ '[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s')
+handler.setFormatter(formatter)
+logger.addHandler(handler)
+logging.getLogger().addHandler(logging.StreamHandler())
+
+logging.debug("starting app")
+
from flask import Flask, Blueprint, jsonify, send_from_directory
from flask_sqlalchemy import SQLAlchemy
from app.models.sql_factory import connection_url, load_sql_datasets
+from app.settings import app_cfg as cfg
from app.server.api import api
+from app.server.api_task import api_task
db = SQLAlchemy()
@@ -13,11 +30,14 @@ def create_app(script_info=None):
app = Flask(__name__, static_folder='static', static_url_path='')
app.config['SQLALCHEMY_DATABASE_URI'] = connection_url
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
+ app.config['CELERY_BROKER_URL'] = cfg.CELERY_BROKER_URL
+ app.config['CELERY_RESULT_BACKEND'] = cfg.CELERY_RESULT_BACKEND
db.init_app(app)
datasets = load_sql_datasets(replace=False, base_model=db.Model)
app.register_blueprint(api, url_prefix='/api')
+ app.register_blueprint(api_task, url_prefix='/task')
app.add_url_rule('/<path:file_relative_path_to_root>', 'serve_page', serve_page, methods=['GET'])
@app.route('/', methods=['GET'])
diff --git a/megapixels/app/server/tasks/__init__.py b/megapixels/app/server/tasks/__init__.py
new file mode 100644
index 00000000..c0db0be5
--- /dev/null
+++ b/megapixels/app/server/tasks/__init__.py
@@ -0,0 +1,47 @@
+import simplejson as json
+from app.settings import app_cfg as cfg
+from celery import Celery
+
+celery = Celery(__name__, backend=cfg.CELERY_RESULT_BACKEND, broker=cfg.CELERY_BROKER_URL)
+
+from app.server.tasks.sleep import sleep_task
+from app.server.tasks.blur import blur_task
+from app.server.tasks.demo import demo_task
+
+def list_active_tasks():
+ dropdown = {}
+ for k,v in task_lookup.items():
+ if 'active' not in v or v['active'] is not False:
+ is_default = 'default' in v and v['default'] is True
+ task = {
+ 'name': k,
+ 'title': v['title'],
+ 'selected': is_default,
+ }
+ dropdown[k] = task
+ return dropdown
+
+###################################################################
+# Add all valid tasks to this lookup.
+# Set 'active': False to disable a task
+# Set 'default': True to define the default task
+
+task_lookup = {
+ 'sleep': {
+ 'title': 'Sleep Test',
+ 'task': sleep_task,
+ 'active': True,
+ 'default': True,
+ },
+ 'blur': {
+ 'title': 'Blur',
+ 'task': blur_task,
+ 'active': True,
+ },
+ 'demo': {
+ 'title': 'Facial processing pipeline',
+ 'task': demo_task,
+ 'active': True,
+ 'default': True,
+ }
+}
diff --git a/megapixels/app/server/tasks/blur.py b/megapixels/app/server/tasks/blur.py
new file mode 100644
index 00000000..74798cee
--- /dev/null
+++ b/megapixels/app/server/tasks/blur.py
@@ -0,0 +1,81 @@
+import os
+import sys
+import time
+import datetime
+import json
+from PIL import Image
+import cv2 as cv
+import numpy as np
+from app.utils.im_utils import ensure_np, ensure_pil
+from flask import current_app as app
+
+import app.settings.app_cfg as cfg
+
+from app.server.tasks import celery
+
+from celery.utils.log import get_task_logger
+log = get_task_logger(__name__)
+import imutils
+
+@celery.task(bind=True)
+def blur_task(self, uuid_name, fn):
+ """Process image and update during"""
+ log.debug('process_image_task, uuid: {}'.format(uuid_name))
+ log.debug('fn: {}'.format(fn))
+
+ files = []
+
+ meta = {
+ 'step': 0,
+ 'total': 3,
+ 'message': 'Starting',
+ 'uuid': uuid_name,
+ 'data': {},
+ }
+ self.update_state(state='PROCESSING', meta=meta)
+
+ im = Image.open(fn).convert('RGB')
+ os.remove(fn)
+
+ meta['step'] += 1
+ meta['message'] = 'Applying blur'
+ self.update_state(state='PROCESSING', meta=meta)
+
+ im_np = ensure_np(im)
+ im_blur = cv.blur(im_np, (5,5), 1.0)
+ im_blur_pil = ensure_pil(im_blur)
+
+ fn = uuid_name + '_blur.jpg'
+ fpath = os.path.join(cfg.DIR_SITE_USER_CONTENT, fn)
+ im_blur_pil.save(fpath, 'JPEG', quality=80)
+ log.debug('fpath: {}'.format(fpath))
+ print('fpath: {}'.format(fpath))
+
+ # files.append({
+ # 'title': 'Blurred image',
+ # 'fn': render_uri + uuid_name + '_blur.jpg'
+ # })
+
+ meta['step'] += 1
+ meta['message'] = 'Applying blur'
+ meta['data']['blur_fn'] = {
+ 'title': 'Blurred image',
+ 'url': os.path.join('/user_content/', fn)
+ }
+ self.update_state(state='PROCESSING', meta=meta)
+ time.sleep(3)
+
+ if os.path.exists(fpath):
+ os.remove(fpath)
+
+ meta['step'] += 1
+ meta['message'] = 'Securely deleting user content'
+ self.update_state(state='PROCESSING', meta=meta)
+ time.sleep(2)
+
+ log.debug('done!!')
+
+ meta['step'] = meta['total']
+ meta['state'] = 'complete'
+ return meta
+
diff --git a/megapixels/app/server/tasks/demo.py b/megapixels/app/server/tasks/demo.py
new file mode 100644
index 00000000..c27b08b5
--- /dev/null
+++ b/megapixels/app/server/tasks/demo.py
@@ -0,0 +1,262 @@
+
+import app.settings.app_cfg as cfg
+from app.server.tasks import celery
+
+from celery.utils.log import get_task_logger
+log = get_task_logger(__name__)
+
+opt_size = (256, 256,)
+
+@celery.task(bind=True)
+def demo_task(self, uuid_name, fn):
+
+ import sys
+ import os
+ from os.path import join
+ from pathlib import Path
+ import time
+
+ import numpy as np
+ import cv2 as cv
+ import dlib
+ from PIL import Image
+ import matplotlib.pyplot as plt
+
+ from app.utils import logger_utils, file_utils, im_utils, display_utils, draw_utils
+ from app.utils import plot_utils
+ from app.processors import face_detector, face_landmarks, face_age_gender, face_beauty
+ # , face_emotion
+ from app.models.data_store import DataStore
+
+ # TODO add selective testing
+ opt_gpu = -1
+ opt_run_pose = True
+ opt_run_2d_68 = True
+ opt_run_3d_68 = True
+ opt_run_3d_68 = True
+
+ opt_gif_size = (256, 256,)
+ opt_gif_frames = 15
+
+ meta = {
+ 'step': 0,
+ 'total': 10,
+ 'message': 'Starting',
+ 'uuid': uuid_name,
+ 'data': { 'statistics': {} },
+ }
+ paths = []
+
+ def step(msg, step=1):
+ meta['message'] = msg
+ meta['step'] += step
+ log.debug('> {}'.format(msg))
+ self.update_state(state='PROCESSING', meta=meta)
+
+ def save_image(key, title, data):
+ fn = '{}_{}.jpg'.format(uuid_name, key)
+ fpath = os.path.join(cfg.DIR_SITE_USER_CONTENT, fn)
+ paths.append(fpath)
+ cv.imwrite(fpath, data)
+
+ meta['data'][key] = {
+ 'title': title,
+ 'url': os.path.join('/user_content/', fn),
+ }
+
+ step('Loading image')
+ self.update_state(state='PROCESSING', meta=meta)
+
+ # os.path.join('/user_content/', fn)
+
+ # -------------------------------------------------
+ # init here
+
+ # load image
+ im = cv.imread(fn)
+ im_resized = im_utils.resize(im, width=opt_size[0], height=opt_size[1])
+
+ # ----------------------------------------------------------------------------
+ # detect face
+
+ face_detector_instance = face_detector.DetectorDLIBCNN(gpu=opt_gpu) # -1 for CPU
+ step('Detecting face')
+ st = time.time()
+ bboxes = face_detector_instance.detect(im_resized, largest=True)
+ bbox = bboxes[0]
+ dim = im_resized.shape[:2][::-1]
+ bbox_dim = bbox.to_dim(dim)
+ if not bbox:
+ log.error('No face detected')
+ meta['error'] = 'No face detected'
+ self.update_state(state='FAILURE', meta=meta)
+ return meta
+ else:
+ log.info(f'Detected face in {(time.time() - st):.2f}s')
+
+
+ # ----------------------------------------------------------------------------
+ # detect 3D landmarks
+
+ step('Generating 3D Landmarks')
+ log.info('loading 3D landmark generator files...')
+ landmark_detector_3d_68 = face_landmarks.FaceAlignment3D_68(gpu=opt_gpu) # -1 for CPU
+ log.info('generating 3D landmarks...')
+ st = time.time()
+ points_3d_68 = landmark_detector_3d_68.landmarks(im_resized, bbox_dim.to_xyxy())
+ log.info(f'generated 3D landmarks in {(time.time() - st):.2f}s')
+ log.info('')
+
+ # draw 3d landmarks
+ im_landmarks_3d_68 = im_resized.copy()
+ draw_utils.draw_landmarks3D(im_landmarks_3d_68, points_3d_68)
+ draw_utils.draw_bbox(im_landmarks_3d_68, bbox_dim)
+
+ save_image('landmarks_3d_68', '3D Landmarks', im_landmarks_3d_68)
+
+ # ----------------------------------------------------------------------------
+ # generate 3D GIF animation
+
+ step('Generating GIF Animation')
+ log.info('generating 3D animation...')
+
+ fn = '{}_{}.gif'.format(uuid_name, '3d')
+ fp_out = os.path.join(cfg.DIR_SITE_USER_CONTENT, fn)
+ paths.append(fp_out)
+
+ st = time.time()
+ plot_utils.generate_3d_landmark_anim(np.array(points_3d_68), fp_out,
+ size=opt_gif_size, num_frames=opt_gif_frames)
+ log.info(f'Generated animation in {(time.time() - st):.2f}s')
+ log.info(f'Saved to: {fp_out}')
+ log.info('')
+
+ meta['data']['points_3d_68'] = points_3d_68
+ meta['data']['points_3d_68'] = {
+ 'title': '3D Animated GIF',
+ 'url': os.path.join('/user_content/', fn),
+ }
+
+ # ----------------------------------------------------------------------------
+ # generate 68 point landmarks using dlib
+
+ step('Generating 2D 68PT landmarks')
+ log.info('initializing face landmarks 68 dlib...')
+ from app.processors import face_landmarks
+ landmark_detector_2d_68 = face_landmarks.Dlib2D_68()
+ log.info('generating 2D 68PT landmarks...')
+ st = time.time()
+ points_2d_68 = landmark_detector_2d_68.landmarks(im_resized, bbox_dim)
+ log.info(f'generated 2D 68PT face landmarks in {(time.time() - st):.2f}s')
+ log.info('')
+
+ # draw 2d landmarks
+ im_landmarks_2d_68 = im_resized.copy()
+ draw_utils.draw_landmarks2D(im_landmarks_2d_68, points_2d_68)
+ draw_utils.draw_bbox(im_landmarks_2d_68, bbox_dim)
+ save_image('landmarks_2d_68', '2D Landmarks', im_landmarks_2d_68)
+
+ # ----------------------------------------------------------------------------
+ # generate pose from 68 point 2D landmarks
+
+ if opt_run_pose:
+ step('Generating pose')
+ log.info('initialize pose...')
+ from app.processors import face_pose
+ pose_detector = face_pose.FacePoseDLIB()
+ log.info('generating pose...')
+ st = time.time()
+ pose_data = pose_detector.pose(points_2d_68, dim)
+ log.info(f'generated pose {(time.time() - st):.2f}s')
+ log.info('')
+
+ im_pose = im_resized.copy()
+ draw_utils.draw_pose(im_pose, pose_data['point_nose'], pose_data['points'])
+ draw_utils.draw_degrees(im_pose, pose_data)
+ save_image('pose', 'Pose', im_pose)
+
+ # ----------------------------------------------------------------------------
+ # age
+
+ # real
+ step('Running age predictor')
+ age_real_predictor = face_age_gender.FaceAgeReal()
+ st = time.time()
+ age_real = age_real_predictor.predict(im_resized, bbox_dim)
+ log.info(f'age real took: {(time.time()-st)/1000:.5f}s')
+ meta['data']['statistics']['age_real'] = f'{(age_real):.2f}'
+
+ # apparent
+ age_apparent_predictor = face_age_gender.FaceAgeApparent()
+ st = time.time()
+ age_apparent = age_apparent_predictor.predict(im_resized, bbox_dim)
+ log.info(f'age apparent took: {(time.time()-st)/1000:.5f}s')
+ meta['data']['statistics']['age_apparent'] = f'{(age_apparent):.2f}'
+
+ # gender
+ step('Running gender predictor')
+ gender_predictor = face_age_gender.FaceGender()
+ st = time.time()
+ gender = gender_predictor.predict(im_resized, bbox_dim)
+ log.info(f'gender took: {(time.time()-st)/1000:.5f}s')
+ meta['data']['statistics']['gender'] = f"M: {gender['m']:.2f}, F: {gender['f']:.2f}"
+
+ # # ----------------------------------------------------------------------------
+ # # emotion
+
+ # emotion_predictor = face_emotion.FaceEmotion(gpu=opt_gpu)
+ # emotion_score = emotion_predictor.emotion(im_resized, bbox_dim)
+ # log.info(f'emotion score: {(100*emotion_score):.2f}')
+
+ # im_emotion = im_resized.copy()
+ # draw_utils.draw_bbox(im_emotion, bbox_dim)
+ # txt = f'emotion score: {(100*emotion_score):.2f}'
+ # draw_utils.draw_text(im_emotion, bbox_dim.pt_tl, txt)
+ # save_image('emotion', 'Emotion', im_emotion)
+
+
+ # ----------------------------------------------------------------------------
+ # beauty
+
+ # TODO fix Keras CPU/GPU device selection issue
+ # NB: GPU visibility issues with dlib/keras
+ # Wrap this with cuda toggle and run before init dlib GPU
+
+ step('Running beauty predictor')
+ device_cur = os.getenv('CUDA_VISIBLE_DEVICES', '')
+ os.environ['CUDA_VISIBLE_DEVICES'] = ''
+ beauty_predictor = face_beauty.FaceBeauty()
+ os.environ['CUDA_VISIBLE_DEVICES'] = device_cur
+
+ beauty_score = beauty_predictor.beauty(im_resized, bbox_dim)
+ log.info(f'beauty score: {(100*beauty_score):.2f}')
+
+ # # draw 2d landmarks
+ # im_beauty = im_resized.copy()
+ # draw_utils.draw_bbox(im_beauty, bbox_dim)
+ # txt = f'Beauty score: {(100*beauty_score):.2f}'
+ # draw_utils.draw_text(im_beauty, bbox_dim.pt_tl, txt)
+ # save_image('beauty', 'Beauty', im_beauty)
+ meta['data']['statistics']['beauty'] = f'{(100*beauty_score):.2f}'
+
+ step('Done')
+
+ # # 3DDFA
+ # self.log.debug('Add depth')
+ # self.log.debug('Add pncc')
+
+ # # TODO
+ # self.log.debug('Add 3D face model')
+ # self.log.debug('Add face texture flat')
+ # self.log.debug('Add ethnicity')
+
+ log.debug('done!!')
+
+ time.sleep(3)
+ for path in paths:
+ if os.path.exists(path):
+ os.remove(path)
+
+ meta['step'] = meta['total']
+ meta['state'] = 'SUCCESS'
+ return meta
diff --git a/megapixels/app/server/tasks/sleep.py b/megapixels/app/server/tasks/sleep.py
new file mode 100644
index 00000000..fa40b0e9
--- /dev/null
+++ b/megapixels/app/server/tasks/sleep.py
@@ -0,0 +1,38 @@
+import time
+
+# from .. import basemodels
+# celery = basemodels.celery
+
+from celery.utils.log import get_task_logger
+celery_logger = get_task_logger(__name__)
+
+from app.server.tasks import celery
+
+import imutils
+
+@celery.task(bind=True)
+def sleep_task(self, uuid_name):
+ celery_logger.debug('sleep_task'.format(uuid_name))
+ msgs = [
+ {'msg':'Uploaded OK','time':.1},
+ {'msg':'Segmenting Image...','time':2},
+ {'msg':'Found: Person, Horse','time':1},
+ {'msg':'Creating Pix2Pix','time':2}
+ ]
+ for i,m in enumerate(msgs):
+ percent = int(float(i)/float(len(msgs))*100.0)
+ self.update_state(
+ state = 'processing',
+ meta = {
+ 'percent': percent,
+ 'message': m['msg'],
+ 'uuid': uuid_name
+ })
+ celery_logger.debug(m['msg'])
+ time.sleep(m['time'])
+
+ return {
+ 'percent': 100,
+ 'state': 'complete',
+ 'uuid': uuid_name
+ }