summaryrefslogtreecommitdiff
path: root/megapixels
diff options
context:
space:
mode:
authoradamhrv <adam@ahprojects.com>2018-12-16 19:38:54 +0100
committeradamhrv <adam@ahprojects.com>2018-12-16 19:38:54 +0100
commit23e9fef5dce8b0b15dd94713816b9d7d45f12356 (patch)
tree3ca9ffe3adce76318450991bfc613073470b604c /megapixels
parent759027d5fbfd6665082f72a3ceaeef68c2d2142e (diff)
parent6431d06048791763f3644b3a0457cc9c4f1df6d3 (diff)
Merge branch 'master' of github.com:adamhrv/megapixels_dev
Diffstat (limited to 'megapixels')
-rw-r--r--megapixels/app/server/api.py43
-rw-r--r--megapixels/app/server/create.py7
-rw-r--r--megapixels/app/site/builder.py22
-rw-r--r--megapixels/app/site/parser.py60
-rw-r--r--megapixels/app/site/s3.py12
-rw-r--r--megapixels/cli_flask.py1
-rw-r--r--megapixels/commands/site/build.py14
7 files changed, 140 insertions, 19 deletions
diff --git a/megapixels/app/server/api.py b/megapixels/app/server/api.py
index c5e27dd2..cf8241bd 100644
--- a/megapixels/app/server/api.py
+++ b/megapixels/app/server/api.py
@@ -1,9 +1,12 @@
import os
import re
import time
+import dlib
from flask import Blueprint, request, jsonify
from PIL import Image # todo: try to remove PIL dependency
+from app.processors import face_recognition
+from app.processors import face_detector
from app.models.sql_factory import list_datasets, get_dataset, get_table
sanitize_re = re.compile('[\W]+')
@@ -23,8 +26,10 @@ def show(name):
else:
return jsonify({ 'status': 404 })
-@api.route('/dataset/<dataset>/face', methods=['POST'])
+@api.route('/dataset/<name>/face', methods=['POST'])
def upload(name):
+ start = time.time()
+ dataset = get_dataset(name)
file = request.files['query_img']
fn = file.filename
if fn.endswith('blob'):
@@ -37,11 +42,43 @@ def upload(name):
img = Image.open(file.stream).convert('RGB')
- # vec = db.load_feature_vector_from_file(uploaded_img_path)
- # vec = fe.extract(img)
+ # Face detection
+ detector = face_detector.DetectorDLIBHOG()
+
+ # get detection as BBox object
+ bboxes = detector.detect(im, largest=True)
+ bbox = bboxes[0]
+ dim = im.shape[:2][::-1]
+ bbox = bbox.to_dim(dim) # convert back to real dimensions
+
+ # face recognition/vector
+ recognition = face_recognition.RecognitionDLIB(gpu=-1)
+
# print(vec.shape)
# results = db.search(vec, limit=limit)
+ # with the result we have an ID
+ # query the sql dataset for the UUID etc here
+
+ query = {
+ 'timing': time.time() - start,
+ }
+ results = []
+
+ print(results)
+ return jsonify({
+ 'query': query,
+ 'results': results,
+ })
+
+@api.route('/dataset/<name>/name', methods=['GET'])
+def name_lookup(dataset):
+ start = time.time()
+ dataset = get_dataset(name)
+
+ # we have a query from the request query string...
+ # use this to do a like* query on the identities_meta table
+
query = {
'timing': time.time() - start,
}
diff --git a/megapixels/app/server/create.py b/megapixels/app/server/create.py
index c1f41dc4..4b1333b9 100644
--- a/megapixels/app/server/create.py
+++ b/megapixels/app/server/create.py
@@ -7,6 +7,9 @@ from app.server.api import api
db = SQLAlchemy()
def create_app(script_info=None):
+ """
+ functional pattern for creating the flask app
+ """
app = Flask(__name__, static_folder='static', static_url_path='')
app.config['SQLALCHEMY_DATABASE_URI'] = connection_url
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
@@ -37,6 +40,10 @@ def create_app(script_info=None):
return app
def serve_page(file_relative_path_to_root):
+ """
+ trying to get this to serve /path/ with /path/index.html,
+ ...but it doesnt actually matter for production...
+ """
if file_relative_path_to_root[-1] == '/':
file_relative_path_to_root += 'index.html'
return send_from_directory("static", file_relative_path_to_root)
diff --git a/megapixels/app/site/builder.py b/megapixels/app/site/builder.py
index 42e25768..ff1a0c83 100644
--- a/megapixels/app/site/builder.py
+++ b/megapixels/app/site/builder.py
@@ -15,6 +15,12 @@ env = Environment(
)
def build_page(fn, research_posts):
+ """
+ build a single page from markdown into the appropriate template
+ - writes it to site/public/
+ - syncs any assets with s3
+ - handles certain index pages...
+ """
metadata, sections = parser.read_metadata(fn)
if metadata is None:
@@ -60,9 +66,10 @@ def build_page(fn, research_posts):
with open(output_fn, "w") as file:
file.write(html)
- print("______")
-
def build_research_index(research_posts):
+ """
+ build the index of research (blog) posts
+ """
metadata, sections = parser.read_metadata('../site/content/research/index.md')
template = env.get_template("page.html")
s3_path = s3.make_s3_path(cfg.S3_SITE_PATH, metadata['path'])
@@ -79,7 +86,18 @@ def build_research_index(research_posts):
file.write(html)
def build_site():
+ """
+ build the site! =^)
+ """
research_posts = parser.read_research_post_index()
for fn in glob.iglob(os.path.join(cfg.DIR_SITE_CONTENT, "**/*.md"), recursive=True):
build_page(fn, research_posts)
build_research_index(research_posts)
+
+def build_file(fn):
+ """
+ build just one page from a filename! =^)
+ """
+ research_posts = parser.read_research_post_index()
+ fn = os.path.join(cfg.DIR_SITE_CONTENT, fn)
+ build_page(fn, research_posts)
diff --git a/megapixels/app/site/parser.py b/megapixels/app/site/parser.py
index d78cc402..ecfae0cb 100644
--- a/megapixels/app/site/parser.py
+++ b/megapixels/app/site/parser.py
@@ -11,6 +11,10 @@ renderer = mistune.Renderer(escape=False)
markdown = mistune.Markdown(renderer=renderer)
def fix_images(lines, s3_path):
+ """
+ do our own tranformation of the markdown around images to handle wide images etc
+ lines: markdown lines
+ """
real_lines = []
block = "\n\n".join(lines)
for line in block.split("\n"):
@@ -29,6 +33,9 @@ def fix_images(lines, s3_path):
return "\n".join(real_lines)
def format_section(lines, s3_path, type=''):
+ """
+ format a normal markdown section
+ """
if len(lines):
lines = fix_images(lines, s3_path)
if type:
@@ -38,36 +45,57 @@ def format_section(lines, s3_path, type=''):
return ""
def format_metadata(section):
+ """
+ format a metadata section (+ key: value pairs)
+ """
meta = []
for line in section.split('\n'):
key, value = line[2:].split(': ', 1)
meta.append("<div><div class='gray'>{}</div><div>{}</div></div>".format(key, value))
return "<section><div class='meta'>{}</div></section>".format(''.join(meta))
-def format_applet(section):
- payload = section.replace('```', '').strip().split('\n')
+def format_applet(section, s3_path):
+ # print(section)
+ payload = section.strip('```').strip().strip('```').strip().split('\n')
applet = {}
+ print(payload)
if ': ' in payload[0]:
command, opt = payload[0].split(': ')
else:
command = payload[0]
opt = None
+ if command == 'python':
+ return format_section([ section ], s3_path)
+
applet['command'] = command
if opt:
applet['opt'] = opt
- if command == 'load file':
- applet['fields'] = payload[1]
- return "<section><div class='applet' data-payload='{}'></div></section>".format(json.dumps(applet))
+ if command == 'load_file':
+ if opt[0:4] != 'http':
+ applet['opt'] = s3_path + opt
+ if len(payload) > 1:
+ applet['fields'] = payload[1:]
+ return "<section class='applet_container'><div class='applet' data-payload='{}'></div></section>".format(json.dumps(applet))
def parse_markdown(sections, s3_path, skip_h1=False):
+ """
+ parse page into sections, preprocess the markdown to handle our modifications
+ """
groups = []
current_group = []
for section in sections:
if skip_h1 and section.startswith('# '):
continue
- elif section.startswith('```'):
+ elif section.strip().startswith('```'):
groups.append(format_section(current_group, s3_path))
- groups.append(format_applet(section))
+ current_group = []
+ current_group.append(section)
+ if section.strip().endswith('```'):
+ groups.append(format_applet("\n\n".join(current_group), s3_path))
+ current_group = []
+ elif section.strip().endswith('```'):
+ current_group.append(section)
+ groups.append(format_applet("\n\n".join(current_group), s3_path))
current_group = []
elif section.startswith('+ '):
groups.append(format_section(current_group, s3_path))
@@ -88,6 +116,9 @@ def parse_markdown(sections, s3_path, skip_h1=False):
return content
def parse_research_index(research_posts):
+ """
+ Generate an index file for the research pages
+ """
content = "<div class='research_index'>"
for post in research_posts:
s3_path = s3.make_s3_path(cfg.S3_SITE_PATH, post['path'])
@@ -105,6 +136,9 @@ def parse_research_index(research_posts):
return content
def read_metadata(fn):
+ """
+ Read in read a markdown file and extract the metadata
+ """
with open(fn, "r") as file:
data = file.read()
data = data.replace("\n ", "\n")
@@ -128,6 +162,9 @@ default_metadata = {
}
def parse_metadata_section(metadata, section):
+ """
+ parse a metadata key: value pair
+ """
for line in section.split("\n"):
if ': ' not in line:
continue
@@ -135,6 +172,11 @@ def parse_metadata_section(metadata, section):
metadata[key.lower()] = value
def parse_metadata(fn, sections):
+ """
+ parse the metadata headers in a markdown file
+ (everything before the second ---------)
+ also generates appropriate urls for this page :)
+ """
found_meta = False
metadata = {}
valid_sections = []
@@ -172,9 +214,13 @@ def parse_metadata(fn, sections):
metadata['sync'] = metadata['sync'] != 'false'
metadata['author_html'] = '<br>'.join(metadata['authors'].split(','))
+
return metadata, valid_sections
def read_research_post_index():
+ """
+ Generate an index of the research (blog) posts
+ """
posts = []
for fn in sorted(glob.glob('../site/content/research/*/index.md')):
metadata, valid_sections = read_metadata(fn)
diff --git a/megapixels/app/site/s3.py b/megapixels/app/site/s3.py
index 99726a4d..18133078 100644
--- a/megapixels/app/site/s3.py
+++ b/megapixels/app/site/s3.py
@@ -3,13 +3,17 @@ import glob
import boto3
def sync_directory(base_fn, s3_path, metadata):
+ """
+ Synchronize a local assets folder with S3
+ """
+ if not metadata['sync']:
+ return
+
fns = {}
for fn in glob.glob(os.path.join(base_fn, 'assets/*')):
+ # print(fn)
fns[os.path.basename(fn)] = True
- if not metadata['sync']:
- return
-
remote_path = s3_path + metadata['url']
session = boto3.session.Session()
@@ -28,6 +32,7 @@ def sync_directory(base_fn, s3_path, metadata):
if 'Contents' in directory:
for obj in directory['Contents']:
s3_fn = obj['Key']
+ # print(s3_fn)
fn = os.path.basename(s3_fn)
local_fn = os.path.join(base_fn, 'assets', fn)
if fn in fns:
@@ -49,6 +54,7 @@ def sync_directory(base_fn, s3_path, metadata):
for fn in fns:
local_fn = os.path.join(base_fn, 'assets', fn)
s3_fn = os.path.join(remote_path, 'assets', fn)
+ print(s3_fn)
print("s3 create {}".format(s3_fn))
s3_client.upload_file(
local_fn,
diff --git a/megapixels/cli_flask.py b/megapixels/cli_flask.py
index 369bec01..e80526c6 100644
--- a/megapixels/cli_flask.py
+++ b/megapixels/cli_flask.py
@@ -1,5 +1,6 @@
# --------------------------------------------------------
# wrapper for flask CLI API
+# NB: python cli_flask.py run
# --------------------------------------------------------
import click
diff --git a/megapixels/commands/site/build.py b/megapixels/commands/site/build.py
index 0a76a9ac..2d344899 100644
--- a/megapixels/commands/site/build.py
+++ b/megapixels/commands/site/build.py
@@ -4,12 +4,18 @@ Build the static site
import click
-from app.site.builder import build_site
+from app.site.builder import build_site, build_file
@click.command()
+@click.option('-i', '--input', 'input_file', required=False,
+ help='File to generate')
@click.pass_context
-def cli(ctx):
+def cli(ctx, input_file):
"""Build the static site
"""
- print('Building the site...')
- build_site()
+ if input_file:
+ print('Building {}'.format(input_file))
+ build_file(input_file)
+ else:
+ print('Building the site...')
+ build_site()