diff options
| author | adamhrv <adam@ahprojects.com> | 2019-01-14 22:25:25 +0100 |
|---|---|---|
| committer | adamhrv <adam@ahprojects.com> | 2019-01-14 22:25:25 +0100 |
| commit | df9d364e3664f45c65cac5990d3d742b990217fa (patch) | |
| tree | 8842d844a5ea8e6c87599b8683009cba23262713 | |
| parent | 2fedd95fcee3f048c5f24333ffdb9bb4e13eafe2 (diff) | |
| parent | 3b2f0dc6d969fa323fe8775b4269e17c60192431 (diff) | |
Merge branch 'master' of github.com:adamhrv/megapixels_dev
40 files changed, 1556 insertions, 291 deletions
diff --git a/.eslintrc.js b/.eslintrc.js index 364bcad6..1d5aed90 100644 --- a/.eslintrc.js +++ b/.eslintrc.js @@ -16,7 +16,7 @@ module.exports = { } }, "rules": { - "react/prop-types": 1, + "react/prop-types": 0, "react/jsx-uses-vars": 2, "no-underscore-dangle": 0, "comma-dangle": ["error", "only-multiline"], @@ -37,6 +37,7 @@ module.exports = { "object-curly-newline": 0, "class-methods-use-this": 0, "quotes": "off", + "no-console": "off", }, "env": { "browser": true, @@ -159,4 +159,5 @@ scraper/reports/papers/ .creds site/assets/js/dist/ +site/public/user_content @@ -17,6 +17,7 @@ pip install requests simplejson click pdfminer.six pip install urllib3 flask flask_sqlalchemy mysql-connector pip install pymediainfo tqdm opencv-python imutils pip install scikit-image python-dotenv imagehash scikit-learn colorlog +pip install celery keras tensorflow sudo apt-get install libmysqlclient-dev diff --git a/client/actions.js b/client/actions.js index 2be8229d..7007eb76 100644 --- a/client/actions.js +++ b/client/actions.js @@ -1,7 +1,9 @@ +import * as faceAnalysis from './faceAnalysis/faceAnalysis.actions' import * as faceSearch from './faceSearch/faceSearch.actions' import * as nameSearch from './nameSearch/nameSearch.actions' export { + faceAnalysis, faceSearch, nameSearch, } diff --git a/client/applet.js b/client/applet.js index 80d40657..25291401 100644 --- a/client/applet.js +++ b/client/applet.js @@ -1,12 +1,15 @@ import React, { Component } from 'react' import { Container as FaceSearchContainer } from './faceSearch' +import { Container as FaceAnalysisContainer } from './faceAnalysis' import { Container as NameSearchContainer } from './nameSearch' export default class Applet extends Component { render() { // console.log(this.props) switch (this.props.payload.cmd) { + case 'face_analysis': + return <FaceAnalysisContainer {...this.props} /> case 'face_search': return <FaceSearchContainer {...this.props} /> case 'name_search': diff --git a/client/common/index.js b/client/common/index.js index cfb34b32..cbd3166e 100644 --- a/client/common/index.js +++ b/client/common/index.js @@ -3,6 +3,7 @@ import DetectionBoxes from './detectionBoxes.component' import DetectionList from './detectionList.component' // import Header from './header.component' import Loader from './loader.component' +import UploadImage from './uploadImage.component' import Sidebar from './sidebar.component' import Gate from './gate.component' import Video from './video.component' @@ -12,6 +13,7 @@ import './common.css' export { Sidebar, Loader, + UploadImage, Gate, TableObject, TableArray, diff --git a/client/faceSearch/upload.js b/client/common/upload.helpers.js index f18bdce6..4b38fb09 100644 --- a/client/faceSearch/upload.js +++ b/client/common/upload.helpers.js @@ -1,139 +1,46 @@ -const MAX_SIDE = 300 +import ExifReader from 'exifreader' -function render(img){ - var resized = renderToCanvas(img, { correctOrientation: true }) - var canvas = document.createElement('canvas') // document.querySelector('#user_photo_canvas') - ctx = canvas.getContext('2d') - ctx.fillStyle = 'black' - ctx.fillRect(0, 0, MAX_SIDE, MAX_SIDE) - var x_offset = (MAX_SIDE - resized.width) / 2 - var y_offset = (MAX_SIDE - resized.height) / 2 - ctx.drawImage(resized, x_offset, y_offset) - return canvas -} -function renderToCanvas(img, options) { - if (!img) return - options = options || {} - - // Canvas max size for any side - var maxSize = MAX_SIDE - var canvas = document.createElement('canvas') - var ctx = canvas.getContext('2d') - var initialScale = options.scale || 1 - // Scale to needed to constrain canvas to max size - var scale = getScale(img.width * initialScale, img.height * initialScale, maxSize, maxSize, true) - // Still need to apply the user defined scale - scale *= initialScale - var width = canvas.width = Math.round(img.width * scale) - var height = canvas.height = Math.round(img.height * scale) - var correctOrientation = options.correctOrientation - var jpeg = !!img.src.match(/data:image\/jpeg|\.jpeg$|\.jpg$/i) - var hasDataURI = !!img.src.match(/^data:/) - - ctx.save() - - // Can only correct orientation on JPEGs represented as dataURIs - // for the time being - if (correctOrientation && jpeg && hasDataURI) { - applyOrientationCorrection(canvas, ctx, img.src) - } - // Resize image if too large - if (scale !== 1) { - ctx.scale(scale, scale) - } - - ctx.drawImage(img, 0, 0) - ctx.restore() - - return canvas -} +export const MAX_SIDE = 256 -function getScale(width, height, viewportWidth, viewportHeight, fillViewport) { - fillViewport = !!fillViewport - var landscape = (width / height) > (viewportWidth / viewportHeight) - if (landscape) { - if (fillViewport) { - return fitVertical() - } else if (width > viewportWidth) { - return fitHorizontal() - } - } else { - if (fillViewport) { - return fitHorizontal() - } else if (height > viewportHeight) { - return fitVertical() - } - } - return 1 - - function fitHorizontal() { - return viewportWidth / width - } - - function fitVertical() { - return viewportHeight / height +function base64ToUint8Array(string, start, finish) { + start = start || 0 + finish = finish || string.length + // atob that shit + const binary = atob(string) + const buffer = new Uint8Array(binary.length) + for (let i = start; i < finish; i++) { + buffer[i] = binary.charCodeAt(i) } + return buffer } -function applyOrientationCorrection(canvas, ctx, uri) { - var orientation = getOrientation(uri) - // Only apply transform if there is some non-normal orientation - if (orientation && orientation !== 1) { - var transform = orientationToTransform[orientation] - var rotation = transform.rotation - var mirror = transform.mirror - var flipAspect = rotation === 90 || rotation === 270 - if (flipAspect) { - // Fancy schmancy swap algo - canvas.width = canvas.height + canvas.width - canvas.height = canvas.width - canvas.height - canvas.width -= canvas.height - } - if (rotation > 0) { - applyRotation(canvas, ctx, rotation) - } +function getOrientation(uri) { + // Split off the base64 data + const base64String = uri.split(',')[1] + // Read off first 128KB, which is all we need to + // get the EXIF data + const arr = base64ToUint8Array(base64String, 0, 2 ** 17) + try { + const tags = ExifReader.load(arr.buffer) + // console.log(tags) + return tags.Orientation + } catch (err) { + return 1 } } function applyRotation(canvas, ctx, deg) { - var radians = deg * (Math.PI / 180) + const radians = deg * (Math.PI / 180) if (deg === 90) { ctx.translate(canvas.width, 0) } else if (deg === 180) { ctx.translate(canvas.width, canvas.height) - } else if (deg == 270) { + } else if (deg === 270) { ctx.translate(0, canvas.height) } ctx.rotate(radians) } -function getOrientation (uri) { - var exif = new ExifReader - // Split off the base64 data - var base64String = uri.split(',')[1] - // Read off first 128KB, which is all we need to - // get the EXIF data - var arr = base64ToUint8Array(base64String, 0, Math.pow(2, 17)) - try { - exif.load(arr.buffer) - return exif.getTagValue('Orientation') - } catch (err) { - return 1 - } -} - -function base64ToUint8Array(string, start, finish) { - var start = start || 0 - var finish = finish || string.length - // atob that shit - var binary = atob(string) - var buffer = new Uint8Array(binary.length) - for (var i = start; i < finish; i++) { - buffer[i] = binary.charCodeAt(i) - } - return buffer -} - /** * Mapping from EXIF orientation values to data * regarding the rotation and mirroring necessary to @@ -141,7 +48,7 @@ function base64ToUint8Array(string, start, finish) { * Derived from: * http://www.daveperrett.com/articles/2012/07/28/exif-orientation-handling-is-a-ghetto/ */ -var orientationToTransform = { +const orientationToTransform = { 1: { rotation: 0, mirror: false }, 2: { rotation: 0, mirror: true }, 3: { rotation: 180, mirror: false }, @@ -152,3 +59,98 @@ var orientationToTransform = { 8: { rotation: 270, mirror: false } } +function applyOrientationCorrection(canvas, ctx, uri) { + const orientation = getOrientation(uri) + // Only apply transform if there is some non-normal orientation + if (orientation && orientation !== 1) { + const transform = orientationToTransform[orientation] + const { rotation } = transform + const flipAspect = rotation === 90 || rotation === 270 + if (flipAspect) { + // Fancy schmancy swap algo + canvas.width = canvas.height + canvas.width + canvas.height = canvas.width - canvas.height + canvas.width -= canvas.height + } + if (rotation > 0) { + applyRotation(canvas, ctx, rotation) + } + } +} + +function getScale(width, height, viewportWidth, viewportHeight, fillViewport) { + function fitHorizontal() { + return viewportWidth / width + } + function fitVertical() { + return viewportHeight / height + } + fillViewport = !!fillViewport + const landscape = (width / height) > (viewportWidth / viewportHeight) + if (landscape) { + if (fillViewport) { + return fitVertical() + } + if (width > viewportWidth) { + return fitHorizontal() + } + } else { + if (fillViewport) { + return fitHorizontal() + } + if (height > viewportHeight) { + return fitVertical() + } + } + return 1 +} + +export function renderToCanvas(img, options) { + if (!img) return null + options = options || {} + + // Canvas max size for any side + const maxSide = MAX_SIDE + const canvas = document.createElement('canvas') + const ctx = canvas.getContext('2d') + const initialScale = options.scale || 1 + // Scale to needed to constrain canvas to max size + let scale = getScale(img.naturalWidth * initialScale, img.naturalHeight * initialScale, maxSide, maxSide, true) + console.log(scale) + // Still need to apply the user defined scale + scale *= initialScale + canvas.width = Math.round(img.naturalWidth * scale) + canvas.height = Math.round(img.naturalHeight * scale) + const { correctOrientation } = options + const jpeg = !!img.src.match(/data:image\/jpeg|\.jpeg$|\.jpg$/i) + const hasDataURI = !!img.src.match(/^data:/) + + ctx.save() + + // Can only correct orientation on JPEGs represented as dataURIs + // for the time being + if (correctOrientation && jpeg && hasDataURI) { + applyOrientationCorrection(canvas, ctx, img.src) + } + // Resize image if too large + if (scale !== 1) { + ctx.scale(scale, scale) + } + + ctx.drawImage(img, 0, 0) + ctx.restore() + + return canvas +} + +export function renderThumbnail(img) { + const resized = renderToCanvas(img, { correctOrientation: true }) + // const canvas = document.createElement('canvas') // document.querySelector('#user_photo_canvas') + // const ctx = canvas.getContext('2d') + // ctx.fillStyle = 'black' + // ctx.fillRect(0, 0, MAX_SIDE, MAX_SIDE) + // const xOffset = (MAX_SIDE - resized.width) / 2 + // const yOffset = (MAX_SIDE - resized.height) / 2 + // ctx.drawImage(resized, xOffset, yOffset, resized.width, resized.height) + return resized +} diff --git a/client/common/uploadImage.component.js b/client/common/uploadImage.component.js new file mode 100644 index 00000000..bc88828e --- /dev/null +++ b/client/common/uploadImage.component.js @@ -0,0 +1,47 @@ +import React, { Component } from 'react' + +import { renderThumbnail } from './upload.helpers' + +export default class UploadImageComponent extends Component { + upload(e) { + const files = e.dataTransfer ? e.dataTransfer.files : e.target.files + let i + let file + for (i = 0; i < files.length; i++) { + file = files[i] + if (file && file.type.match('image.*')) break + } + if (!file) return + const fr = new FileReader() + fr.onload = fileReaderEvent => { + fr.onload = null + const img = new Image() + img.onload = () => { + img.onload = null + this.resizeAndUpload(img) + } + img.src = fileReaderEvent.target.result + } + fr.readAsDataURL(files[0]) + } + + resizeAndUpload(img) { + const canvas = renderThumbnail(img) + canvas.toBlob(blob => { + // console.log(blob) + this.props.onUpload(blob) + }, 'image/jpeg', 80) + } + + render() { + return ( + <input + type="file" + name="img" + accept="image/*" + onChange={this.upload.bind(this)} + required + /> + ) + } +} diff --git a/client/faceAnalysis/faceAnalysis.actions.js b/client/faceAnalysis/faceAnalysis.actions.js new file mode 100644 index 00000000..4a6fe6ed --- /dev/null +++ b/client/faceAnalysis/faceAnalysis.actions.js @@ -0,0 +1,91 @@ +// import fetchJsonp from 'fetch-jsonp' +import * as types from '../types' +// import { hashPath } from '../util' +import { store } from '../store' +import { get, post } from '../util' +// import querystring from 'query-string' + +// urls + +const url = { + upload: () => process.env.API_HOST + '/task/upload/demo', +} +export const publicUrl = { +} + +// standard loading events + +const loading = (tag, offset) => ({ + ts: Date.now(), + type: types.faceAnalysis.loading, + tag, + offset +}) +const loaded = (tag, data, offset = 0) => ({ + ts: Date.now(), + type: types.faceAnalysis.loaded, + tag, + data, + offset +}) +const polled = (data, offset = 0) => ({ + ts: Date.now(), + type: types.faceAnalysis.poll, + data, + offset +}) +const error = (tag, err) => ({ + type: types.faceAnalysis.error, + tag, + err +}) + +// search UI functions + +export const updateOptions = opt => dispatch => { + dispatch({ type: types.faceAnalysis.update_options, opt }) +} + +// API functions + +// task polling + +const POLL_DELAY = 500 +let pollTimeout = null + +export const poll = (payload, taskURL) => dispatch => { + clearTimeout(pollTimeout) + // console.log('polling...') + get(taskURL) + .then(data => { + // console.log('poll', data) + dispatch(polled(data)) + // console.log(data.state) + if (data.state === 'COMPLETE' || data.state === 'SUCCESS') { + console.log('complete!') + } else if (data.state === 'ERROR' || data.state === 'FAILURE') { + console.log('errorr!') + dispatch(error(data)) + } else { + pollTimeout = setTimeout(() => poll(payload, taskURL)(dispatch), POLL_DELAY) + } + }) + .catch(err => dispatch(error('result', err))) +} + +export const upload = (payload, file) => dispatch => { + const tag = 'task' + const fd = new FormData() + fd.append('query_img', file) + dispatch(loading(tag)) + post(url.upload(), fd) + .then(data => { + // console.log('loaded!', tag, data) + dispatch(loaded(tag, data)) + const { result, taskURL } = data + if (result && taskURL) { + poll(payload, taskURL)(dispatch) + } + }) + .catch(err => dispatch(error(tag, err))) +} diff --git a/client/faceAnalysis/faceAnalysis.container.js b/client/faceAnalysis/faceAnalysis.container.js new file mode 100644 index 00000000..24848455 --- /dev/null +++ b/client/faceAnalysis/faceAnalysis.container.js @@ -0,0 +1,24 @@ +import React, { Component } from 'react' +import { bindActionCreators } from 'redux' +import { connect } from 'react-redux' + +import * as actions from './faceAnalysis.actions' + +import FaceAnalysisQuery from './faceAnalysis.query' +import FaceAnalysisResult from './faceAnalysis.result' + +class FaceAnalysisContainer extends Component { + render() { + const { payload } = this.props + // console.log(payload) + return ( + <div className='analysisContainer'> + <FaceAnalysisQuery payload={payload} /> + <FaceAnalysisResult payload={payload} /> + </div> + ) + } +} + + +export default FaceAnalysisContainer diff --git a/client/faceAnalysis/faceAnalysis.query.js b/client/faceAnalysis/faceAnalysis.query.js new file mode 100644 index 00000000..33dd641f --- /dev/null +++ b/client/faceAnalysis/faceAnalysis.query.js @@ -0,0 +1,86 @@ +import React, { Component } from 'react' +import { bindActionCreators } from 'redux' +import { connect } from 'react-redux' + +import { Loader, UploadImage } from '../common' +import * as actions from './faceAnalysis.actions' + +// function parse_bbox(s) { +// // "BBox: (77,86), (166, 177), width:89, height:91" +// try { +// const [x, y, w, h, width, height] = s.replace(/\D/g, ' ').replace(/\s+/g, ' ').trim().split(' ') +// return { x, y, w, h } +// } +// } + +class FaceAnalysisQuery extends Component { + state = { + image: null + } + + upload(blob) { + if (this.state.image) { + URL.revokeObjectURL(this.state.image) + } + const url = URL.createObjectURL(blob) + this.setState({ image: url }) + this.props.actions.upload(this.props.payload, blob) + } + + componentWillUnmount() { + if (this.state.image) { + URL.revokeObjectURL(this.state.image) + } + } + + render() { + const { result } = this.props + const { image } = this.state + const style = {} + if (image) { + style.backgroundImage = 'url(' + image + ')' + style.backgroundSize = 'cover' + style.opacity = 1 + } + return ( + <div className='query row'> + <div className='uploadContainer'> + <div style={style}> + {image ? null : <img src="/assets/img/icon_camera.svg" />} + <UploadImage onUpload={this.upload.bind(this)} /> + </div> + {result.loading && ( + <div className='loading' style={style}> + <Loader /> + </div> + )} + </div> + <div className='cta'> + <h2>Face Analysis</h2> + <p> + {'Put yourself under the microscope of various facial recognition algorithms. See what can be determined from a photo.'} + </p> + <ol> + <li>Upload a photo of yourself and be judged by the algorithm</li> + <li>{'Your search data is only stored for the duration of this analysis and is immediately cleared '} + {'once you leave this page.'}</li> + </ol> + <p> + Read more about <a href='/about/privacy/'>privacy</a>. + </p> + </div> + </div> + ) + } +} + +const mapStateToProps = state => ({ + result: state.faceAnalysis.result, + options: state.faceAnalysis.options, +}) + +const mapDispatchToProps = dispatch => ({ + actions: bindActionCreators({ ...actions }, dispatch), +}) + +export default connect(mapStateToProps, mapDispatchToProps)(FaceAnalysisQuery) diff --git a/client/faceAnalysis/faceAnalysis.reducer.js b/client/faceAnalysis/faceAnalysis.reducer.js new file mode 100644 index 00000000..d9be7447 --- /dev/null +++ b/client/faceAnalysis/faceAnalysis.reducer.js @@ -0,0 +1,48 @@ +import * as types from '../types' + +const initialState = () => ({ + query: {}, + task: {}, + result: {}, + loading: false, + startTime: 0, + timing: 0, +}) + +export default function faceAnalysisReducer(state = initialState(), action) { + const { startTime } = state + switch (action.type) { + case types.faceAnalysis.loading: + return { + ...state, + startTime: action.ts, + timing: 0, + [action.tag]: { loading: true }, + } + + case types.faceAnalysis.loaded: + return { + ...state, + timing: action.ts - startTime, + [action.tag]: action.data, + } + + case types.faceAnalysis.poll: + return { + ...state, + timing: action.ts - startTime, + result: action.data, + } + + case types.faceAnalysis.error: + console.log('error', action) + return { + ...state, + timing: action.ts - startTime, + [action.tag]: { error: action.err }, + } + + default: + return state + } +} diff --git a/client/faceAnalysis/faceAnalysis.result.js b/client/faceAnalysis/faceAnalysis.result.js new file mode 100644 index 00000000..9d77f258 --- /dev/null +++ b/client/faceAnalysis/faceAnalysis.result.js @@ -0,0 +1,131 @@ +import React, { Component } from 'react' +import { bindActionCreators } from 'redux' +import { connect } from 'react-redux' + +import * as actions from './faceAnalysis.actions' +import { Loader } from '../common' + +const errors = { + bbox: ( + <div> + <h2>No face found</h2> + {"Sorry, we didn't detect a face in that image. "} + {"Please choose an image where the face is large and clear."} + </div> + ), + nomatch: ( + <div> + <h2>{"You're clear"}</h2> + {"No images in this dataset match your face. We show only matches above 70% probability."} + </div> + ), + error: ( + <div> + <h2>{"No matches found"}</h2> + {"Sorry, an error occured."} + </div> + ), + not_an_image: ( + <div> + <h2>{"Not an image"}</h2> + {"Sorry, the file you uploaded was not recognized as an image. Please upload a JPG or PNG image and try again."} + </div> + ), +} + +class FaceAnalysisResult extends Component { + render() { + const { result, timing } = this.props + const { data, error, loading, message } = result + let { step, total } = data || {} + // console.log(step, total) + if (loading) { + return ( + <div className='result'> + <div> + <Loader /><br /> + <h2>Uploading...</h2> + </div> + </div> + ) + } + if (error) { + // console.log(error) + let errorMessage = errors[error] || errors.error + return ( + <div className='result'>{errorMessage}</div> + ) + } + // console.log(result) + if (!total) { + return ( + <div className='result'></div> + ) + } + + console.log(data.data) + const results = [ + 'blur_fn', 'points_3d_68', 'landmarks_3d_68', 'landmarks_2d_68', 'pose', + ].map(tag => { + if (tag in data.data) { + const { title, url } = data.data[tag] + return ( + <div key={tag}> + <img src={url} /> + <span>{title}</span> + </div> + ) + } + return null + }).filter(a => a) + + const statisticsLabels = ['Age (Real)', 'Age (Apparent)', 'Gender', 'Beauty score', 'Emotion'] + const statistics = [ + 'age_real', 'age_apparent', 'gender', 'beauty', 'emotion' + ].map((tag, i) => { + if (tag in data.data.statistics) { + return ( + <tr key={tag}> + <td> + {statisticsLabels[i]} + </td> + <td> + {data.data.statistics[tag]} + </td> + </tr> + ) + } + return null + }).filter(a => a) + + return ( + <div> + <div className='results'> + {results} + </div> + {!!statistics.length && ( + <table> + {statistics} + </table> + )} + <div className="about"> + <small>Step {step} / {total} {message}</small><br /> + <small>Query {step === total ? 'took' : 'timer:'} {(timing / 1000).toFixed(2)} s.</small> + </div> + </div> + ) + } +} + +const mapStateToProps = state => ({ + query: state.faceAnalysis.query, + result: state.faceAnalysis.result, + timing: state.faceAnalysis.timing, + options: state.faceAnalysis.options, +}) + +const mapDispatchToProps = dispatch => ({ + actions: bindActionCreators({ ...actions }, dispatch), +}) + +export default connect(mapStateToProps, mapDispatchToProps)(FaceAnalysisResult) diff --git a/client/faceAnalysis/index.js b/client/faceAnalysis/index.js new file mode 100644 index 00000000..efa39ded --- /dev/null +++ b/client/faceAnalysis/index.js @@ -0,0 +1,5 @@ +import Container from './faceAnalysis.container' + +export { + Container, +} diff --git a/client/faceSearch/faceSearch.query.js b/client/faceSearch/faceSearch.query.js index 9f778ca0..2d140813 100644 --- a/client/faceSearch/faceSearch.query.js +++ b/client/faceSearch/faceSearch.query.js @@ -2,7 +2,7 @@ import React, { Component } from 'react' import { bindActionCreators } from 'redux' import { connect } from 'react-redux' -import { Loader } from '../common' +import { Loader, UploadImage } from '../common' import * as actions from './faceSearch.actions' // function parse_bbox(s) { @@ -18,23 +18,8 @@ class FaceSearchQuery extends Component { image: null } - upload(e) { - const { payload } = this.props - const files = e.dataTransfer ? e.dataTransfer.files : e.target.files - let i - let file - for (i = 0; i < files.length; i++) { - file = files[i] - if (file && file.type.match('image.*')) break - } - if (!file) return - const fr = new FileReader() - fr.onload = () => { - fr.onload = null - this.setState({ image: fr.result }) - } - fr.readAsDataURL(files[0]) - this.props.actions.upload(this.props.payload, file) + upload(blob) { + this.props.actions.upload(this.props.payload, blob) } render() { @@ -50,22 +35,15 @@ class FaceSearchQuery extends Component { return ( <div className='query row'> <div className='uploadContainer'> - {result.loading ? - <div className='loading' style={style}> - <Loader /> - </div> - : <div style={style}> + <div style={style}> {image ? null : <img src="/assets/img/icon_camera.svg" />} - - <input - type="file" - name="img" - accept="image/*" - onChange={this.upload.bind(this)} - required - /> + <UploadImage onUpload={this.upload.bind(this)} /> </div> - } + {result.loading && ( + <div className='loading' style={style}> + <Loader /> + </div> + )} </div> <div className='cta'> <h2>Search by Image</h2> diff --git a/client/faceSearch/faceSearch.result.js b/client/faceSearch/faceSearch.result.js index 95534830..c2509033 100644 --- a/client/faceSearch/faceSearch.result.js +++ b/client/faceSearch/faceSearch.result.js @@ -72,8 +72,8 @@ class FaceSearchResult extends Component { } const els = results.map((result, i) => { const distance = distances[i] - const { uuid } = result.uuid - const { x, y, w, h } = result.roi + const { uuid } = result.file_record + const { x, y, w, h } = result.face_roi const { fullname, gender, description, images } = result.identity const bbox = { left: (100 * x) + '%', diff --git a/client/index.js b/client/index.js index 40be2841..96f2c8c8 100644 --- a/client/index.js +++ b/client/index.js @@ -20,6 +20,8 @@ function appendReactApplet(el, payload) { } function fetchDataset(payload) { + if (payload.command === 'face_analysis') return new Promise(resolve => resolve()) + if (payload.dataset === 'info') return new Promise(resolve => resolve()) const url = "https://megapixels.nyc3.digitaloceanspaces.com/v1/citations/" + payload.dataset + ".json" return fetch(url, { mode: 'cors' }).then(r => r.json()) } diff --git a/client/nameSearch/nameSearch.query.js b/client/nameSearch/nameSearch.query.js index 629b7b1d..99c1da84 100644 --- a/client/nameSearch/nameSearch.query.js +++ b/client/nameSearch/nameSearch.query.js @@ -11,8 +11,8 @@ class NameSearchQuery extends Component { handleInput(value) { this.setState({ q: value }) - if (value.strip().length > 1) { - this.props.actions.search(this.props.payload, value.strip()) + if (value.trim().length > 1) { + this.props.actions.search(this.props.payload, value.trim()) } } diff --git a/client/nameSearch/nameSearch.result.js b/client/nameSearch/nameSearch.result.js index 9e20228c..38c544cc 100644 --- a/client/nameSearch/nameSearch.result.js +++ b/client/nameSearch/nameSearch.result.js @@ -50,7 +50,7 @@ class NameSearchResult extends Component { ) } const els = results.map((result, i) => { - const { uuid } = result.uuid + const { uuid } = result.file_record const { fullname, gender, description, images } = result.identity return ( <div key={i}> diff --git a/client/store.js b/client/store.js index 13612f2d..e896bc58 100644 --- a/client/store.js +++ b/client/store.js @@ -1,10 +1,12 @@ import { applyMiddleware, compose, combineReducers, createStore } from 'redux' import thunk from 'redux-thunk' +import faceAnalysisReducer from './faceAnalysis/faceAnalysis.reducer' import faceSearchReducer from './faceSearch/faceSearch.reducer' import nameSearchReducer from './nameSearch/nameSearch.reducer' const rootReducer = combineReducers({ + faceAnalysis: faceAnalysisReducer, faceSearch: faceSearchReducer, nameSearch: nameSearchReducer, }) diff --git a/client/types.js b/client/types.js index fb1fbe30..fd9aa3e0 100644 --- a/client/types.js +++ b/client/types.js @@ -6,6 +6,10 @@ export const tagAsType = (type, names) => ( }, {}) ) +export const faceAnalysis = tagAsType('faceAnalysis', [ + 'loading', 'loaded', 'poll', 'error', 'update_options', +]) + export const faceSearch = tagAsType('faceSearch', [ 'loading', 'loaded', 'error', 'update_options', ]) diff --git a/client/util.js b/client/util.js index f181ad0f..d0db0d98 100644 --- a/client/util.js +++ b/client/util.js @@ -82,6 +82,21 @@ export const preloadImage = opt => { /* AJAX */ +export const get = (uri, data) => { + let headers = { + Accept: 'application/json, application/xml, text/play, text/html, *.*', + } + let opt = { + method: 'GET', + body: data, + headers, + // credentials: 'include', + } + // console.log(headers) + // headers['X-CSRFToken'] = csrftoken + return fetch(uri, opt).then(res => res.json()) +} + export const post = (uri, data) => { let headers if (data instanceof FormData) { @@ -99,7 +114,7 @@ export const post = (uri, data) => { method: 'POST', body: data, headers, - credentials: 'include', + // credentials: 'include', } // console.log(headers) // headers['X-CSRFToken'] = csrftoken diff --git a/megapixels/app/models/sql_factory.py b/megapixels/app/models/sql_factory.py index a580f28e..5b3cb5a3 100644 --- a/megapixels/app/models/sql_factory.py +++ b/megapixels/app/models/sql_factory.py @@ -3,7 +3,7 @@ import glob import time import pandas as pd -from sqlalchemy import create_engine, Table, Column, String, Integer, DateTime, Float +from sqlalchemy import create_engine, Table, Column, String, Integer, DateTime, Float, func from sqlalchemy.orm import sessionmaker from sqlalchemy.ext.declarative import declarative_base @@ -62,7 +62,8 @@ def load_sql_dataset(path, replace=False, engine=None, base_model=None): df = pd.read_csv(fn) # fix columns that are named "index", a sql reserved word df.reindex_axis(sorted(df.columns), axis=1) - df.columns = sorted(table.__table__.columns).keys() + columns = [column.name for column in table.__table__.columns] + df.columns = columns df.to_sql(name=table.__tablename__, con=engine, if_exists='replace', index=False) return dataset @@ -82,87 +83,153 @@ class SqlDataset: self.base_model = base_model def describe(self): + """ + List the available SQL tables for a given dataset. + """ return { 'name': self.name, 'tables': list(self.tables.keys()), } def get_identity(self, id): - table = self.get_table('identity_meta') + """ + Get an identity given an ID. + """ # id += 1 - identity = table.query.filter(table.image_id <= id).order_by(table.image_id.desc()).first().toJSON() + file_record_table = self.get_table('file_record') + file_record = file_record_table.query.filter(file_record_table.id == id).first() + + if not file_record: + return None + + identity_table = self.get_table('identity') + identity = identity_table.query.filter(identity_table.id == file_record.identity_id).first() + + if not identity: + return None + return { - 'uuid': self.select('uuids', id), - 'identity': identity, - 'roi': self.select('roi', id), - 'pose': self.select('pose', id), + 'file_record': file_record.toJSON(), + 'identity': identity.toJSON(), + 'face_roi': self.select('face_roi', id), + 'face_pose': self.select('face_pose', id), } def search_name(self, q): - table = self.get_table('identity_meta') - uuid_table = self.get_table('uuids') + """ + Find an identity by name. + """ + table = self.get_table('identity') + identity_list = table.query.filter(table.fullname.ilike(q)).order_by(table.fullname.desc()).limit(15) + return identity_list - identity = table.query.filter(table.fullname.like(q)).order_by(table.fullname.desc()).limit(30) + def search_description(self, q): + """ + Find an identity by description. + """ + table = self.get_table('identity') + identity_list = table.query.filter(table.description.ilike(q)).order_by(table.description.desc()).limit(15) + return identity_list + + def get_file_records_for_identities(self, identity_list): + """ + Given a list of identities, map these to file records. + """ identities = [] - for row in identity: - uuid = uuid_table.query.filter(uuid_table.id == row.image_id).first() - identities.append({ - 'uuid': uuid.toJSON(), - 'identity': row.toJSON(), - }) + file_record_table = self.get_table('file_record') + for row in identity_list: + file_record = file_record_table.query.filter(file_record_table.identity_id == row.id).first() + if file_record: + identities.append({ + 'file_record': file_record.toJSON(), + 'identity': row.toJSON(), + }) return identities def select(self, table, id): + """ + Perform a generic select. + """ table = self.get_table(table) if not table: return None session = Session() # for obj in session.query(table).filter_by(id=id): - # print(table) + # print(table) obj = session.query(table).filter(table.id == id).first() session.close() return obj.toJSON() def get_table(self, type): + """ + Get one of these memoized, dynamically generated tables. + """ if type in self.tables: return self.tables[type] - elif type == 'uuids': - self.tables[type] = self.uuid_table() - elif type == 'roi': - self.tables[type] = self.roi_table() - elif type == 'identity_meta': + elif type == 'file_record': + self.tables[type] = self.file_record_table() + elif type == 'identity': self.tables[type] = self.identity_table() - elif type == 'pose': - self.tables[type] = self.pose_table() + elif type == 'face_roi': + self.tables[type] = self.face_roi_table() + elif type == 'face_pose': + self.tables[type] = self.face_pose_table() else: return None return self.tables[type] - # ==> uuids.csv <== - # index,uuid - # 0,f03fd921-2d56-4e83-8115-f658d6a72287 - def uuid_table(self): - class UUID(self.base_model): - __tablename__ = self.name + "_uuid" + # ==> file_record.csv <== + # index,ext,fn,identity_key,sha256,subdir,uuid,identity_index + def file_record_table(self): + class FileRecord(self.base_model): + __tablename__ = self.name + "_file_record" id = Column(Integer, primary_key=True) + ext = Column(String(3, convert_unicode=True), nullable=False) + fn = Column(String(36, convert_unicode=True), nullable=False) + identity_key = Column(String(36, convert_unicode=True), nullable=False) + sha256 = Column(String(36, convert_unicode=True), nullable=False) + subdir = Column(String(36, convert_unicode=True), nullable=False) uuid = Column(String(36, convert_unicode=True), nullable=False) + identity_id = Column(Integer) def toJSON(self): return { 'id': self.id, 'uuid': self.uuid, + 'identity_id': self.identity_id, } - return UUID + return FileRecord - # ==> roi.csv <== + # ==> identity.csv <== + # index,description,gender,images,fullname + # 0,A. J. Cook,Canadian actress,f,1,0 + def identity_table(self): + class Identity(self.base_model): + __tablename__ = self.name + "_identity" + id = Column(Integer, primary_key=True) + description = Column(String(36, convert_unicode=True), nullable=False) + gender = Column(String(1, convert_unicode=True), nullable=False) + images = Column(Integer, nullable=False) + fullname = Column(String(36, convert_unicode=True), nullable=False) + def toJSON(self): + return { + 'id': self.id, + 'fullname': self.fullname, + 'images': self.images, + 'gender': self.gender, + 'description': self.description, + } + return Identity + + # ==> face_roi.csv <== # index,h,image_height,image_index,image_width,w,x,y # 0,0.33000000000000007,250,0,250,0.32999999999999996,0.33666666666666667,0.35 - def roi_table(self): - class ROI(self.base_model): + def face_roi_table(self): + class FaceROI(self.base_model): __tablename__ = self.name + "_roi" id = Column(Integer, primary_key=True) h = Column(Float, nullable=False) image_height = Column(Integer, nullable=False) - image_index = Column(Integer, nullable=False) + record_id = Column(Integer, nullable=False) image_width = Column(Integer, nullable=False) w = Column(Float, nullable=False) x = Column(Float, nullable=False) @@ -170,7 +237,7 @@ class SqlDataset: def toJSON(self): return { 'id': self.id, - 'image_index': self.image_index, + 'record_id': self.record_id, 'image_height': self.image_height, 'image_width': self.image_width, 'w': self.w, @@ -178,48 +245,25 @@ class SqlDataset: 'x': self.x, 'y': self.y, } - return ROI - - # ==> identity.csv <== - # index,fullname,description,gender,images,image_index - # 0,A. J. Cook,Canadian actress,f,1,0 - def identity_table(self): - class Identity(self.base_model): - __tablename__ = self.name + "_identity" - id = Column(Integer, primary_key=True) - fullname = Column(String(36, convert_unicode=True), nullable=False) - description = Column(String(36, convert_unicode=True), nullable=False) - gender = Column(String(1, convert_unicode=True), nullable=False) - images = Column(Integer, nullable=False) - image_id = Column(Integer, nullable=False) - def toJSON(self): - return { - 'id': self.id, - 'image_id': self.image_id, - 'fullname': self.fullname, - 'images': self.images, - 'gender': self.gender, - 'description': self.description, - } - return Identity + return FaceROI - # ==> pose.csv <== - # index,image_index,pitch,roll,yaw + # ==> face_pose.csv <== + # index,record_index,pitch,roll,yaw # 0,0,11.16264458441435,10.415885631337728,22.99719032415318 - def pose_table(self): - class Pose(self.base_model): + def face_pose_table(self): + class FacePose(self.base_model): __tablename__ = self.name + "_pose" id = Column(Integer, primary_key=True) - image_id = Column(Integer, primary_key=True) + record_id = Column(Integer, nullable=False) pitch = Column(Float, nullable=False) roll = Column(Float, nullable=False) yaw = Column(Float, nullable=False) def toJSON(self): return { 'id': self.id, - 'image_id': self.image_id, + 'record_id': self.record_id, 'pitch': self.pitch, 'roll': self.roll, 'yaw': self.yaw, } - return Pose + return FacePose diff --git a/megapixels/app/processors/faiss.py b/megapixels/app/processors/faiss.py index 5156ad71..0de8ec69 100644 --- a/megapixels/app/processors/faiss.py +++ b/megapixels/app/processors/faiss.py @@ -27,9 +27,12 @@ def build_all_faiss_databases(): build_faiss_database(name, DefaultRecipe()) def build_faiss_database(name, recipe): - vec_fn = os.path.join(cfg.DIR_FAISS_METADATA, name, "vecs.csv") + vec_fn = os.path.join(cfg.DIR_FAISS_METADATA, name, "face_vector.csv") index_fn = os.path.join(cfg.DIR_FAISS_INDEXES, name + ".index") + if not os.path.exists(vec_fn): + return + index = faiss.index_factory(recipe.dim, recipe.factory_type) keys, rows = load_csv_safe(vec_fn) diff --git a/megapixels/app/server/api.py b/megapixels/app/server/api.py index 3683d5fd..b3bce9bc 100644 --- a/megapixels/app/server/api.py +++ b/megapixels/app/server/api.py @@ -3,6 +3,7 @@ import re import time import dlib import numpy as np +import operator from flask import Blueprint, request, jsonify from PIL import Image # todo: try to remove PIL dependency @@ -27,7 +28,6 @@ def index(): """List the datasets and their fields""" return jsonify({ 'datasets': list_datasets() }) - @api.route('/dataset/<dataset_name>') def show(dataset_name): """Show the data that a dataset will return""" @@ -37,7 +37,6 @@ def show(dataset_name): else: return jsonify({ 'status': 404 }) - @api.route('/dataset/<dataset_name>/face', methods=['POST']) def upload(dataset_name): """Query an image against FAISS and return the matching identities""" @@ -108,9 +107,10 @@ def upload(dataset_name): for _d, _i in zip(distances, indexes): if _d <= THRESHOLD: dists.append(round(float(_d), 2)) - ids.append(_i+1) + ids.append(_i) - results = [ dataset.get_identity(int(_i)) for _i in ids ] + identities = [ dataset.get_identity(int(_i)) for _i in ids ] + identities = list(filter(None, identities)) # print(distances) # print(ids) @@ -127,7 +127,7 @@ def upload(dataset_name): # print(results) return jsonify({ 'query': query, - 'results': results, + 'results': identities, 'distances': dists, }) @@ -139,15 +139,56 @@ def name_lookup(dataset_name): dataset = get_dataset(dataset_name) q = request.args.get('q') - # print(q) + q = re.sub('[^a-zA-Z. ]+', '*', q) + terms = q.split(' ') query = { 'q': q, 'timing': time.time() - start, } - results = dataset.search_name(q + '%') if q else None + + if len(terms) == 0: + return jsonify({ 'query': query, 'results': [] }) + + lookup = {} + results_lookup = {} + + names = dataset.search_name(q + '%') + for name in names: + if name.id in lookup: + print(name.fullname) + lookup[name.id] += 4 + else: + print(name.fullname) + lookup[name.id] = 4 + results_lookup[name.id] = name - # print(results) + for i, term in enumerate(terms[0:5]): + search_term = '%' + term + '%' + names = dataset.search_name(search_term) if len(term) > 0 else [] + descriptions = dataset.search_description(search_term) if len(term) > 0 else [] + for name in names: + if name.id in lookup: + print(name.fullname) + lookup[name.id] += 2 + else: + print(name.fullname) + lookup[name.id] = 2 + results_lookup[name.id] = name + for name in descriptions: + if name.id in lookup: + print(name.fullname) + lookup[name.id] += 1 + else: + print(name.fullname) + lookup[name.id] = 1 + results_lookup[name.id] = name + + sorted_names = sorted(lookup.items(), key=operator.itemgetter(1), reverse=True)[0:10] + top_names = [results_lookup[item[0]] for item in sorted_names] + results = dataset.get_file_records_for_identities(top_names) + + print(results) return jsonify({ 'query': query, 'results': results, diff --git a/megapixels/app/server/api_task.py b/megapixels/app/server/api_task.py new file mode 100644 index 00000000..57ae9f7d --- /dev/null +++ b/megapixels/app/server/api_task.py @@ -0,0 +1,124 @@ +import os +import re +import uuid +import time +import dlib +import tempfile +import simplejson as json +import numpy as np +from flask import Blueprint, request, jsonify +from PIL import Image, ImageOps # todo: try to remove PIL dependency + +from celery.result import AsyncResult +from app.server.tasks import celery +from app.server.tasks import task_lookup, list_active_tasks +# from app.models.sql_factory import load_sql_datasets, list_datasets, get_dataset, get_table + +api_task = Blueprint('task', __name__) + +@api_task.route('/') +def index(): + """List active tasks""" + return jsonify(list_active_tasks) + +@api_task.route('/status/<task_name>/<task_id>') +def task_status(task_name, task_id): + """Return celery image processing status""" + if task_name in task_lookup: + task = task_lookup[task_name]['task'].AsyncResult(task_id) + # task = AsyncResult(task_id, app=celery) + + if task_name not in task_lookup or task.info is None: + return jsonify({ + 'state': 'error', + 'percent': 100, + 'message': 'Unknown task', + }) + # app.logger.info('task state: {}'.format(task.state)) + if task.state == 'PENDING': + response = { + 'state': task.state, + 'percent': 0, + 'message': 'Pending...', + 'data': task.info, + } + elif task.state != 'FAILURE': + response = { + 'state': task.state, + 'percent': task.info.get('percent', 0), + 'uuid': task.info.get('uuid', 0), + 'message': task.info.get('message', ''), + 'data': task.info, + } + if 'result' in task.info: + response['result'] = task.info['result'] + else: + # something went wrong in the background job + response = { + 'state': task.state, + 'percent': 100, + 'message': str(task.info), # this is the exception raised + 'data': task.info, + } + return jsonify(response) + +@api_task.route('/upload/sleep', methods=['GET', 'POST']) +def sleep_test(): + """ + Test the Celery system using a task that sleeps. + """ + async_task = task_lookup['sleep']['task'].apply_async(args=['sleep_test']) + task_url = '/task/status/{}/{}'.format('sleep', async_task.id) + return jsonify({ + 'result': True, + 'task_url': task_url, + }) + +@api_task.route('/upload/blur', methods=['POST']) +def upload(): + return process('blur') + +@api_task.route('/upload/demo', methods=['POST']) +def demo(): + return process('demo') + +def process(style): + """ + Process an image in a particular style + """ + print('style: {}'.format(style)) + if style in task_lookup: + task = task_lookup[style]['task'] + print('task', task) + else: + return jsonify({ + 'result': False, + 'error': 'Unknown task', + }) + + print('get file...') + file = request.files['query_img'] + + uuid_str = str(uuid.uuid4()) + + print('[+] style: {}'.format(style)) + print('[+] uuid_name: {}'.format(uuid_str)) + + im = Image.open(file.stream).convert('RGB') + im = ImageOps.fit(im, (256, 256,)) + + tmpfile = tempfile.NamedTemporaryFile(delete=False) + + # Save image to disk + print('[+] Save image to temporary file') + im.save(tmpfile, 'JPEG', quality=80) + + print('[+] Start celery') + async_task = task.apply_async(args=[uuid_str, tmpfile.name]) + task_url = '/task/status/{}/{}'.format(style, async_task.id) + + return jsonify({ + 'result': True, + 'taskURL': task_url, + 'uuid': uuid_str + }) diff --git a/megapixels/app/server/create.py b/megapixels/app/server/create.py index 4b1333b9..a1ce56df 100644 --- a/megapixels/app/server/create.py +++ b/megapixels/app/server/create.py @@ -1,8 +1,25 @@ +import logging +import logging.handlers + +logger = logging.getLogger("") +logger.setLevel(logging.DEBUG) +handler = logging.handlers.RotatingFileHandler("flask.log", + maxBytes=3000000, backupCount=2) +formatter = logging.Formatter( + '[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s') +handler.setFormatter(formatter) +logger.addHandler(handler) +logging.getLogger().addHandler(logging.StreamHandler()) + +logging.debug("starting app") + from flask import Flask, Blueprint, jsonify, send_from_directory from flask_sqlalchemy import SQLAlchemy from app.models.sql_factory import connection_url, load_sql_datasets +from app.settings import app_cfg as cfg from app.server.api import api +from app.server.api_task import api_task db = SQLAlchemy() @@ -13,11 +30,14 @@ def create_app(script_info=None): app = Flask(__name__, static_folder='static', static_url_path='') app.config['SQLALCHEMY_DATABASE_URI'] = connection_url app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False + app.config['CELERY_BROKER_URL'] = cfg.CELERY_BROKER_URL + app.config['CELERY_RESULT_BACKEND'] = cfg.CELERY_RESULT_BACKEND db.init_app(app) datasets = load_sql_datasets(replace=False, base_model=db.Model) app.register_blueprint(api, url_prefix='/api') + app.register_blueprint(api_task, url_prefix='/task') app.add_url_rule('/<path:file_relative_path_to_root>', 'serve_page', serve_page, methods=['GET']) @app.route('/', methods=['GET']) diff --git a/megapixels/app/server/tasks/__init__.py b/megapixels/app/server/tasks/__init__.py new file mode 100644 index 00000000..c0db0be5 --- /dev/null +++ b/megapixels/app/server/tasks/__init__.py @@ -0,0 +1,47 @@ +import simplejson as json +from app.settings import app_cfg as cfg +from celery import Celery + +celery = Celery(__name__, backend=cfg.CELERY_RESULT_BACKEND, broker=cfg.CELERY_BROKER_URL) + +from app.server.tasks.sleep import sleep_task +from app.server.tasks.blur import blur_task +from app.server.tasks.demo import demo_task + +def list_active_tasks(): + dropdown = {} + for k,v in task_lookup.items(): + if 'active' not in v or v['active'] is not False: + is_default = 'default' in v and v['default'] is True + task = { + 'name': k, + 'title': v['title'], + 'selected': is_default, + } + dropdown[k] = task + return dropdown + +################################################################### +# Add all valid tasks to this lookup. +# Set 'active': False to disable a task +# Set 'default': True to define the default task + +task_lookup = { + 'sleep': { + 'title': 'Sleep Test', + 'task': sleep_task, + 'active': True, + 'default': True, + }, + 'blur': { + 'title': 'Blur', + 'task': blur_task, + 'active': True, + }, + 'demo': { + 'title': 'Facial processing pipeline', + 'task': demo_task, + 'active': True, + 'default': True, + } +} diff --git a/megapixels/app/server/tasks/blur.py b/megapixels/app/server/tasks/blur.py new file mode 100644 index 00000000..74798cee --- /dev/null +++ b/megapixels/app/server/tasks/blur.py @@ -0,0 +1,81 @@ +import os +import sys +import time +import datetime +import json +from PIL import Image +import cv2 as cv +import numpy as np +from app.utils.im_utils import ensure_np, ensure_pil +from flask import current_app as app + +import app.settings.app_cfg as cfg + +from app.server.tasks import celery + +from celery.utils.log import get_task_logger +log = get_task_logger(__name__) +import imutils + +@celery.task(bind=True) +def blur_task(self, uuid_name, fn): + """Process image and update during""" + log.debug('process_image_task, uuid: {}'.format(uuid_name)) + log.debug('fn: {}'.format(fn)) + + files = [] + + meta = { + 'step': 0, + 'total': 3, + 'message': 'Starting', + 'uuid': uuid_name, + 'data': {}, + } + self.update_state(state='PROCESSING', meta=meta) + + im = Image.open(fn).convert('RGB') + os.remove(fn) + + meta['step'] += 1 + meta['message'] = 'Applying blur' + self.update_state(state='PROCESSING', meta=meta) + + im_np = ensure_np(im) + im_blur = cv.blur(im_np, (5,5), 1.0) + im_blur_pil = ensure_pil(im_blur) + + fn = uuid_name + '_blur.jpg' + fpath = os.path.join(cfg.DIR_SITE_USER_CONTENT, fn) + im_blur_pil.save(fpath, 'JPEG', quality=80) + log.debug('fpath: {}'.format(fpath)) + print('fpath: {}'.format(fpath)) + + # files.append({ + # 'title': 'Blurred image', + # 'fn': render_uri + uuid_name + '_blur.jpg' + # }) + + meta['step'] += 1 + meta['message'] = 'Applying blur' + meta['data']['blur_fn'] = { + 'title': 'Blurred image', + 'url': os.path.join('/user_content/', fn) + } + self.update_state(state='PROCESSING', meta=meta) + time.sleep(3) + + if os.path.exists(fpath): + os.remove(fpath) + + meta['step'] += 1 + meta['message'] = 'Securely deleting user content' + self.update_state(state='PROCESSING', meta=meta) + time.sleep(2) + + log.debug('done!!') + + meta['step'] = meta['total'] + meta['state'] = 'complete' + return meta + diff --git a/megapixels/app/server/tasks/demo.py b/megapixels/app/server/tasks/demo.py new file mode 100644 index 00000000..c27b08b5 --- /dev/null +++ b/megapixels/app/server/tasks/demo.py @@ -0,0 +1,262 @@ + +import app.settings.app_cfg as cfg +from app.server.tasks import celery + +from celery.utils.log import get_task_logger +log = get_task_logger(__name__) + +opt_size = (256, 256,) + +@celery.task(bind=True) +def demo_task(self, uuid_name, fn): + + import sys + import os + from os.path import join + from pathlib import Path + import time + + import numpy as np + import cv2 as cv + import dlib + from PIL import Image + import matplotlib.pyplot as plt + + from app.utils import logger_utils, file_utils, im_utils, display_utils, draw_utils + from app.utils import plot_utils + from app.processors import face_detector, face_landmarks, face_age_gender, face_beauty + # , face_emotion + from app.models.data_store import DataStore + + # TODO add selective testing + opt_gpu = -1 + opt_run_pose = True + opt_run_2d_68 = True + opt_run_3d_68 = True + opt_run_3d_68 = True + + opt_gif_size = (256, 256,) + opt_gif_frames = 15 + + meta = { + 'step': 0, + 'total': 10, + 'message': 'Starting', + 'uuid': uuid_name, + 'data': { 'statistics': {} }, + } + paths = [] + + def step(msg, step=1): + meta['message'] = msg + meta['step'] += step + log.debug('> {}'.format(msg)) + self.update_state(state='PROCESSING', meta=meta) + + def save_image(key, title, data): + fn = '{}_{}.jpg'.format(uuid_name, key) + fpath = os.path.join(cfg.DIR_SITE_USER_CONTENT, fn) + paths.append(fpath) + cv.imwrite(fpath, data) + + meta['data'][key] = { + 'title': title, + 'url': os.path.join('/user_content/', fn), + } + + step('Loading image') + self.update_state(state='PROCESSING', meta=meta) + + # os.path.join('/user_content/', fn) + + # ------------------------------------------------- + # init here + + # load image + im = cv.imread(fn) + im_resized = im_utils.resize(im, width=opt_size[0], height=opt_size[1]) + + # ---------------------------------------------------------------------------- + # detect face + + face_detector_instance = face_detector.DetectorDLIBCNN(gpu=opt_gpu) # -1 for CPU + step('Detecting face') + st = time.time() + bboxes = face_detector_instance.detect(im_resized, largest=True) + bbox = bboxes[0] + dim = im_resized.shape[:2][::-1] + bbox_dim = bbox.to_dim(dim) + if not bbox: + log.error('No face detected') + meta['error'] = 'No face detected' + self.update_state(state='FAILURE', meta=meta) + return meta + else: + log.info(f'Detected face in {(time.time() - st):.2f}s') + + + # ---------------------------------------------------------------------------- + # detect 3D landmarks + + step('Generating 3D Landmarks') + log.info('loading 3D landmark generator files...') + landmark_detector_3d_68 = face_landmarks.FaceAlignment3D_68(gpu=opt_gpu) # -1 for CPU + log.info('generating 3D landmarks...') + st = time.time() + points_3d_68 = landmark_detector_3d_68.landmarks(im_resized, bbox_dim.to_xyxy()) + log.info(f'generated 3D landmarks in {(time.time() - st):.2f}s') + log.info('') + + # draw 3d landmarks + im_landmarks_3d_68 = im_resized.copy() + draw_utils.draw_landmarks3D(im_landmarks_3d_68, points_3d_68) + draw_utils.draw_bbox(im_landmarks_3d_68, bbox_dim) + + save_image('landmarks_3d_68', '3D Landmarks', im_landmarks_3d_68) + + # ---------------------------------------------------------------------------- + # generate 3D GIF animation + + step('Generating GIF Animation') + log.info('generating 3D animation...') + + fn = '{}_{}.gif'.format(uuid_name, '3d') + fp_out = os.path.join(cfg.DIR_SITE_USER_CONTENT, fn) + paths.append(fp_out) + + st = time.time() + plot_utils.generate_3d_landmark_anim(np.array(points_3d_68), fp_out, + size=opt_gif_size, num_frames=opt_gif_frames) + log.info(f'Generated animation in {(time.time() - st):.2f}s') + log.info(f'Saved to: {fp_out}') + log.info('') + + meta['data']['points_3d_68'] = points_3d_68 + meta['data']['points_3d_68'] = { + 'title': '3D Animated GIF', + 'url': os.path.join('/user_content/', fn), + } + + # ---------------------------------------------------------------------------- + # generate 68 point landmarks using dlib + + step('Generating 2D 68PT landmarks') + log.info('initializing face landmarks 68 dlib...') + from app.processors import face_landmarks + landmark_detector_2d_68 = face_landmarks.Dlib2D_68() + log.info('generating 2D 68PT landmarks...') + st = time.time() + points_2d_68 = landmark_detector_2d_68.landmarks(im_resized, bbox_dim) + log.info(f'generated 2D 68PT face landmarks in {(time.time() - st):.2f}s') + log.info('') + + # draw 2d landmarks + im_landmarks_2d_68 = im_resized.copy() + draw_utils.draw_landmarks2D(im_landmarks_2d_68, points_2d_68) + draw_utils.draw_bbox(im_landmarks_2d_68, bbox_dim) + save_image('landmarks_2d_68', '2D Landmarks', im_landmarks_2d_68) + + # ---------------------------------------------------------------------------- + # generate pose from 68 point 2D landmarks + + if opt_run_pose: + step('Generating pose') + log.info('initialize pose...') + from app.processors import face_pose + pose_detector = face_pose.FacePoseDLIB() + log.info('generating pose...') + st = time.time() + pose_data = pose_detector.pose(points_2d_68, dim) + log.info(f'generated pose {(time.time() - st):.2f}s') + log.info('') + + im_pose = im_resized.copy() + draw_utils.draw_pose(im_pose, pose_data['point_nose'], pose_data['points']) + draw_utils.draw_degrees(im_pose, pose_data) + save_image('pose', 'Pose', im_pose) + + # ---------------------------------------------------------------------------- + # age + + # real + step('Running age predictor') + age_real_predictor = face_age_gender.FaceAgeReal() + st = time.time() + age_real = age_real_predictor.predict(im_resized, bbox_dim) + log.info(f'age real took: {(time.time()-st)/1000:.5f}s') + meta['data']['statistics']['age_real'] = f'{(age_real):.2f}' + + # apparent + age_apparent_predictor = face_age_gender.FaceAgeApparent() + st = time.time() + age_apparent = age_apparent_predictor.predict(im_resized, bbox_dim) + log.info(f'age apparent took: {(time.time()-st)/1000:.5f}s') + meta['data']['statistics']['age_apparent'] = f'{(age_apparent):.2f}' + + # gender + step('Running gender predictor') + gender_predictor = face_age_gender.FaceGender() + st = time.time() + gender = gender_predictor.predict(im_resized, bbox_dim) + log.info(f'gender took: {(time.time()-st)/1000:.5f}s') + meta['data']['statistics']['gender'] = f"M: {gender['m']:.2f}, F: {gender['f']:.2f}" + + # # ---------------------------------------------------------------------------- + # # emotion + + # emotion_predictor = face_emotion.FaceEmotion(gpu=opt_gpu) + # emotion_score = emotion_predictor.emotion(im_resized, bbox_dim) + # log.info(f'emotion score: {(100*emotion_score):.2f}') + + # im_emotion = im_resized.copy() + # draw_utils.draw_bbox(im_emotion, bbox_dim) + # txt = f'emotion score: {(100*emotion_score):.2f}' + # draw_utils.draw_text(im_emotion, bbox_dim.pt_tl, txt) + # save_image('emotion', 'Emotion', im_emotion) + + + # ---------------------------------------------------------------------------- + # beauty + + # TODO fix Keras CPU/GPU device selection issue + # NB: GPU visibility issues with dlib/keras + # Wrap this with cuda toggle and run before init dlib GPU + + step('Running beauty predictor') + device_cur = os.getenv('CUDA_VISIBLE_DEVICES', '') + os.environ['CUDA_VISIBLE_DEVICES'] = '' + beauty_predictor = face_beauty.FaceBeauty() + os.environ['CUDA_VISIBLE_DEVICES'] = device_cur + + beauty_score = beauty_predictor.beauty(im_resized, bbox_dim) + log.info(f'beauty score: {(100*beauty_score):.2f}') + + # # draw 2d landmarks + # im_beauty = im_resized.copy() + # draw_utils.draw_bbox(im_beauty, bbox_dim) + # txt = f'Beauty score: {(100*beauty_score):.2f}' + # draw_utils.draw_text(im_beauty, bbox_dim.pt_tl, txt) + # save_image('beauty', 'Beauty', im_beauty) + meta['data']['statistics']['beauty'] = f'{(100*beauty_score):.2f}' + + step('Done') + + # # 3DDFA + # self.log.debug('Add depth') + # self.log.debug('Add pncc') + + # # TODO + # self.log.debug('Add 3D face model') + # self.log.debug('Add face texture flat') + # self.log.debug('Add ethnicity') + + log.debug('done!!') + + time.sleep(3) + for path in paths: + if os.path.exists(path): + os.remove(path) + + meta['step'] = meta['total'] + meta['state'] = 'SUCCESS' + return meta diff --git a/megapixels/app/server/tasks/sleep.py b/megapixels/app/server/tasks/sleep.py new file mode 100644 index 00000000..fa40b0e9 --- /dev/null +++ b/megapixels/app/server/tasks/sleep.py @@ -0,0 +1,38 @@ +import time + +# from .. import basemodels +# celery = basemodels.celery + +from celery.utils.log import get_task_logger +celery_logger = get_task_logger(__name__) + +from app.server.tasks import celery + +import imutils + +@celery.task(bind=True) +def sleep_task(self, uuid_name): + celery_logger.debug('sleep_task'.format(uuid_name)) + msgs = [ + {'msg':'Uploaded OK','time':.1}, + {'msg':'Segmenting Image...','time':2}, + {'msg':'Found: Person, Horse','time':1}, + {'msg':'Creating Pix2Pix','time':2} + ] + for i,m in enumerate(msgs): + percent = int(float(i)/float(len(msgs))*100.0) + self.update_state( + state = 'processing', + meta = { + 'percent': percent, + 'message': m['msg'], + 'uuid': uuid_name + }) + celery_logger.debug(m['msg']) + time.sleep(m['time']) + + return { + 'percent': 100, + 'state': 'complete', + 'uuid': uuid_name + } diff --git a/megapixels/app/settings/app_cfg.py b/megapixels/app/settings/app_cfg.py index fde9fed7..14e2493c 100644 --- a/megapixels/app/settings/app_cfg.py +++ b/megapixels/app/settings/app_cfg.py @@ -150,3 +150,10 @@ S3_DATASETS_PATH = "v1" # datasets is already in the filename DIR_SITE_PUBLIC = "../site/public" DIR_SITE_CONTENT = "../site/content" DIR_SITE_TEMPLATES = "../site/templates" +DIR_SITE_USER_CONTENT = "../site/public/user_content" + +# ----------------------------------------------------------------------------- +# Celery +# ----------------------------------------------------------------------------- +CELERY_BROKER_URL = 'redis://localhost:6379/0' +CELERY_RESULT_BACKEND = 'redis://localhost:6379/0' diff --git a/megapixels/app/utils/im_utils.py b/megapixels/app/utils/im_utils.py index e882c67f..d36c1c32 100644 --- a/megapixels/app/utils/im_utils.py +++ b/megapixels/app/utils/im_utils.py @@ -19,7 +19,19 @@ from torch.autograd import Variable from sklearn.metrics.pairwise import cosine_similarity import datetime +def ensure_pil(im): + """Ensure image is Pillow format""" + try: + im.verify() + return im + except: + return Image.fromarray(im.astype('uint8'), 'RGB') +def ensure_np(im): + """Ensure image is numpy array""" + if type(im) == np.ndarray: + return im + return np.asarray(im, np.uint8) def num_channels(im): '''Returns number of channels in numpy.ndarray image''' diff --git a/old/server/run.py b/old/server/run.py index c4c3e8d7..ff2d5009 100644 --- a/old/server/run.py +++ b/old/server/run.py @@ -8,5 +8,5 @@ import logging logging.basicConfig(filename='error.log',level=logging.DEBUG) if __name__ == '__main__': - app.run(host='0.0.0.0',debug=False,threaded=False,port=8000) + app.run(host='0.0.0.0', debug=True, threaded=False, port=8000) pass diff --git a/package-lock.json b/package-lock.json index a42dca34..60a74ece 100644 --- a/package-lock.json +++ b/package-lock.json @@ -2007,9 +2007,9 @@ } }, "connect-history-api-fallback": { - "version": "1.5.0", - "resolved": "https://registry.npmjs.org/connect-history-api-fallback/-/connect-history-api-fallback-1.5.0.tgz", - "integrity": "sha1-sGhzk0vF40T+9hGhlqb6rgruAVo=", + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/connect-history-api-fallback/-/connect-history-api-fallback-1.6.0.tgz", + "integrity": "sha512-e54B99q/OUoH64zYYRf3HBP5z24G38h5D3qXu23JGRoigpX5Ss4r9ZnDk3g0Z8uQC2x2lPaJ+UlWBc1ZWBWdLg==", "dev": true }, "connected-react-router": { @@ -3427,6 +3427,16 @@ "strip-eof": "^1.0.0" } }, + "exif-reader": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/exif-reader/-/exif-reader-1.0.2.tgz", + "integrity": "sha1-AkCLl7YQKOpPReW4k6g2+aoorE8=" + }, + "exifreader": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/exifreader/-/exifreader-2.5.0.tgz", + "integrity": "sha512-jxS+cSjalvtF4Ga1D0G1YbVwPBOUYIGMuVbUNfor1+5CSc4h2FBvFC59HSkpPTqiHnA9zF0W5BW8eV3CjmklcQ==" + }, "exit-hook": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/exit-hook/-/exit-hook-1.1.1.tgz", @@ -3681,9 +3691,9 @@ } }, "follow-redirects": { - "version": "1.5.10", - "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.5.10.tgz", - "integrity": "sha512-0V5l4Cizzvqt5D44aTXbFZz+FtyXV1vrDN6qrelxtfYQKW0KO0W2T/hkE8xvGa/540LkZlkaUjO4ailYTFtHVQ==", + "version": "1.6.1", + "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.6.1.tgz", + "integrity": "sha512-t2JCjbzxQpWvbhts3l6SH1DKzSrx8a+SsaVf4h6bG4kOXUuPYS/kg2Lr4gQSb7eemaHqJkOThF1BGyjlUkO1GQ==", "dev": true, "requires": { "debug": "=3.1.0" @@ -4425,9 +4435,9 @@ "integrity": "sha512-6uHUhOPEBgQ24HM+r6b/QwWfZq+yiFcipKFrOFiBEnWdy5sdzYoi+pJeQaPI5qOLRFqWmAXUPQNsielzdLoecA==" }, "handle-thing": { - "version": "1.2.5", - "resolved": "http://registry.npmjs.org/handle-thing/-/handle-thing-1.2.5.tgz", - "integrity": "sha1-/Xqtcmvxpf0W38KbL3pmAdJxOcQ=", + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/handle-thing/-/handle-thing-2.0.0.tgz", + "integrity": "sha512-d4sze1JNC454Wdo2fkuyzCr6aHcbL6PGGuFAz0Li/NcOm1tCHGnWDRmJP85dh9IhQErTc2svWFEX5xHIOo//kQ==", "dev": true }, "has": { @@ -8338,32 +8348,75 @@ "dev": true }, "spdy": { - "version": "3.4.7", - "resolved": "https://registry.npmjs.org/spdy/-/spdy-3.4.7.tgz", - "integrity": "sha1-Qv9B7OXMD5mjpsKKq7c/XDsDrLw=", + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/spdy/-/spdy-4.0.0.tgz", + "integrity": "sha512-ot0oEGT/PGUpzf/6uk4AWLqkq+irlqHXkrdbk51oWONh3bxQmBuljxPNl66zlRRcIJStWq0QkLUCPOPjgjvU0Q==", "dev": true, "requires": { - "debug": "^2.6.8", - "handle-thing": "^1.2.5", + "debug": "^4.1.0", + "handle-thing": "^2.0.0", "http-deceiver": "^1.2.7", - "safe-buffer": "^5.0.1", "select-hose": "^2.0.0", - "spdy-transport": "^2.0.18" + "spdy-transport": "^3.0.0" + }, + "dependencies": { + "debug": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.1.1.tgz", + "integrity": "sha512-pYAIzeRo8J6KPEaJ0VWOh5Pzkbw/RetuzehGM7QRRX5he4fPHx2rdKMB256ehJCkX+XRQm16eZLqLNS8RSZXZw==", + "dev": true, + "requires": { + "ms": "^2.1.1" + } + }, + "ms": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.1.tgz", + "integrity": "sha512-tgp+dl5cGk28utYktBsrFqA7HKgrhgPsg6Z/EfhWI4gl1Hwq8B/GmY/0oXZ6nF8hDVesS/FpnYaD/kOWhYQvyg==", + "dev": true + } } }, "spdy-transport": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/spdy-transport/-/spdy-transport-2.1.1.tgz", - "integrity": "sha512-q7D8c148escoB3Z7ySCASadkegMmUZW8Wb/Q1u0/XBgDKMO880rLQDj8Twiew/tYi7ghemKUi/whSYOwE17f5Q==", + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/spdy-transport/-/spdy-transport-3.0.0.tgz", + "integrity": "sha512-hsLVFE5SjA6TCisWeJXFKniGGOpBgMLmerfO2aCyCU5s7nJ/rpAepqmFifv/GCbSbueEeAJJnmSQ2rKC/g8Fcw==", "dev": true, "requires": { - "debug": "^2.6.8", - "detect-node": "^2.0.3", + "debug": "^4.1.0", + "detect-node": "^2.0.4", "hpack.js": "^2.1.6", - "obuf": "^1.1.1", - "readable-stream": "^2.2.9", - "safe-buffer": "^5.0.1", - "wbuf": "^1.7.2" + "obuf": "^1.1.2", + "readable-stream": "^3.0.6", + "wbuf": "^1.7.3" + }, + "dependencies": { + "debug": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.1.1.tgz", + "integrity": "sha512-pYAIzeRo8J6KPEaJ0VWOh5Pzkbw/RetuzehGM7QRRX5he4fPHx2rdKMB256ehJCkX+XRQm16eZLqLNS8RSZXZw==", + "dev": true, + "requires": { + "ms": "^2.1.1" + } + }, + "ms": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.1.tgz", + "integrity": "sha512-tgp+dl5cGk28utYktBsrFqA7HKgrhgPsg6Z/EfhWI4gl1Hwq8B/GmY/0oXZ6nF8hDVesS/FpnYaD/kOWhYQvyg==", + "dev": true + }, + "readable-stream": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.1.1.tgz", + "integrity": "sha512-DkN66hPyqDhnIQ6Jcsvx9bFjhw214O4poMBcIMgPVpQvNy9a0e0Uhg5SqySyDKAmUlwt8LonTBz1ezOnM8pUdA==", + "dev": true, + "requires": { + "inherits": "^2.0.3", + "string_decoder": "^1.1.1", + "util-deprecate": "^1.0.1" + } + } } }, "split-string": { @@ -9902,9 +9955,9 @@ } }, "webpack-dev-server": { - "version": "3.1.10", - "resolved": "https://registry.npmjs.org/webpack-dev-server/-/webpack-dev-server-3.1.10.tgz", - "integrity": "sha512-RqOAVjfqZJtQcB0LmrzJ5y4Jp78lv9CK0MZ1YJDTaTmedMZ9PU9FLMQNrMCfVu8hHzaVLVOJKBlGEHMN10z+ww==", + "version": "3.1.14", + "resolved": "https://registry.npmjs.org/webpack-dev-server/-/webpack-dev-server-3.1.14.tgz", + "integrity": "sha512-mGXDgz5SlTxcF3hUpfC8hrQ11yhAttuUQWf1Wmb+6zo3x6rb7b9mIfuQvAPLdfDRCGRGvakBWHdHOa0I9p/EVQ==", "dev": true, "requires": { "ansi-html": "0.0.7", @@ -9926,12 +9979,14 @@ "portfinder": "^1.0.9", "schema-utils": "^1.0.0", "selfsigned": "^1.9.1", + "semver": "^5.6.0", "serve-index": "^1.7.2", "sockjs": "0.3.19", "sockjs-client": "1.3.0", - "spdy": "^3.4.1", + "spdy": "^4.0.0", "strip-ansi": "^3.0.0", "supports-color": "^5.1.0", + "url": "^0.11.0", "webpack-dev-middleware": "3.4.0", "webpack-log": "^2.0.0", "yargs": "12.0.2" @@ -10083,13 +10138,13 @@ } }, "execa": { - "version": "0.10.0", - "resolved": "https://registry.npmjs.org/execa/-/execa-0.10.0.tgz", - "integrity": "sha512-7XOMnz8Ynx1gGo/3hyV9loYNPWM94jG3+3T3Y8tsfSstFmETmENCMU/A/zj8Lyaj1lkgEepKepvd6240tBRvlw==", + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/execa/-/execa-1.0.0.tgz", + "integrity": "sha512-adbxcyWV46qiHyvSp50TKt05tB4tK3HcmF7/nxfAdhnox83seTDbwnaqKO4sXRy7roHAIFqJP/Rw/AuEbX61LA==", "dev": true, "requires": { "cross-spawn": "^6.0.0", - "get-stream": "^3.0.0", + "get-stream": "^4.0.0", "is-stream": "^1.1.0", "npm-run-path": "^2.0.0", "p-finally": "^1.0.0", @@ -10257,6 +10312,15 @@ } } }, + "get-stream": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-4.1.0.tgz", + "integrity": "sha512-GMat4EJ5161kIy2HevLlr4luNjBgvmj413KaQA7jt4V8B4RDsfpHk7WQ9GVqfYyyx8OS/L66Kox+rJRNklLK7w==", + "dev": true, + "requires": { + "pump": "^3.0.0" + } + }, "glob-parent": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-3.1.0.tgz", @@ -10402,12 +10466,12 @@ } }, "os-locale": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/os-locale/-/os-locale-3.0.1.tgz", - "integrity": "sha512-7g5e7dmXPtzcP4bgsZ8ixDVqA7oWYuEz4lOSujeWyliPai4gfVDiFIcwBg3aGCPnmSGfzOKTK3ccPn0CKv3DBw==", + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/os-locale/-/os-locale-3.1.0.tgz", + "integrity": "sha512-Z8l3R4wYWM40/52Z+S265okfFj8Kt2cC2MKY+xNi3kFs+XGI7WXu/I309QQQYbRW4ijiZ+yxs9pqEhJh0DqW3Q==", "dev": true, "requires": { - "execa": "^0.10.0", + "execa": "^1.0.0", "lcid": "^2.0.0", "mem": "^4.0.0" } @@ -10421,6 +10485,16 @@ "has-flag": "^3.0.0" } }, + "url": { + "version": "0.11.0", + "resolved": "https://registry.npmjs.org/url/-/url-0.11.0.tgz", + "integrity": "sha1-ODjpfPxgUh63PFJajlW/3Z4uKPE=", + "dev": true, + "requires": { + "punycode": "1.3.2", + "querystring": "0.2.0" + } + }, "yargs": { "version": "12.0.2", "resolved": "https://registry.npmjs.org/yargs/-/yargs-12.0.2.tgz", diff --git a/package.json b/package.json index feef9b94..fcabb7e1 100644 --- a/package.json +++ b/package.json @@ -30,6 +30,8 @@ "data-uri-to-buffer": "^2.0.0", "date-fns": "^1.29.0", "dotenv": "^6.0.0", + "exif-reader": "^1.0.2", + "exifreader": "^2.5.0", "fetch-jsonp": "^1.1.3", "file-saver": "^2.0.0-rc.3", "history": "^4.7.2", @@ -72,6 +74,6 @@ "uglifyjs-webpack-plugin": "^1.3.0", "webpack": "3.x.x", "webpack-cli": "^3.1.0", - "webpack-dev-server": "^3.1.10" + "webpack-dev-server": "^3.1.14" } } diff --git a/site/assets/css/applets.css b/site/assets/css/applets.css index 9c37354a..0c566a9f 100644 --- a/site/assets/css/applets.css +++ b/site/assets/css/applets.css @@ -25,6 +25,9 @@ font-size: 9pt; padding-top: 10px; } + +/* search results */ + .results { margin-top: 10px; padding-bottom: 10px; @@ -59,8 +62,8 @@ } .img .bbox { position: absolute; - color: rgba(255,0,0,1); - background: rgba(255,0,0,0.05); + color: rgba(255,255,255,1); + background: rgba(255,255,255,0.05); border: 1px solid; } .cta { @@ -119,4 +122,16 @@ } .tabulator-row.tabulator-row-even { background-color: rgba(255,255,255,0.1); -}
\ No newline at end of file +} + +/* analysis results */ + +.analysisContainer .results div { + width: 256px; + text-align: center; + padding: 10px; + margin: 10px; +} +.analysisContainer .results div img { + max-width: 100%; +} diff --git a/site/public/info/index.html b/site/public/info/index.html new file mode 100644 index 00000000..0d7b2d2e --- /dev/null +++ b/site/public/info/index.html @@ -0,0 +1,53 @@ +<!doctype html> +<html> +<head> + <title>MegaPixels</title> + <meta charset="utf-8" /> + <meta name="author" content="Adam Harvey" /> + <meta name="description" content="" /> + <meta name="referrer" content="no-referrer" /> + <meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes" /> + <link rel='stylesheet' href='/assets/css/fonts.css' /> + <link rel='stylesheet' href='/assets/css/tabulator.css' /> + <link rel='stylesheet' href='/assets/css/css.css' /> + <link rel='stylesheet' href='/assets/css/leaflet.css' /> + <link rel='stylesheet' href='/assets/css/applets.css' /> +</head> +<body> + <header> + <a class='slogan' href="/"> + <div class='logo'></div> + <div class='site_name'>MegaPixels</div> + <span class='sub'>The Darkside of Datasets</span> + </a> + <div class='links'> + <a href="/datasets/">Datasets</a> + <a href="/research/">Research</a> + <a href="/about/">About</a> + </div> + </header> + <div class="content"> + + <section><h2>What do facial recognition algorithms see?</h2> +</section><section class='applet_container'><div class='applet' data-payload='{"command": "face_analysis"}'></div></section><section><p>Results are only stored for the duration of the analysis and are deleted when you leave this page.</p> +</section> + + </div> + <footer> + <div> + <a href="/">MegaPixels.cc</a> + <a href="/about/disclaimer/">Disclaimer</a> + <a href="/about/terms/">Terms of Use</a> + <a href="/about/privacy/">Privacy</a> + <a href="/about/">About</a> + <a href="/about/team/">Team</a> + </div> + <div> + MegaPixels ©2017-19 Adam R. Harvey / + <a href="https://ahprojects.com">ahprojects.com</a> + </div> + </footer> +</body> + +<script src="/assets/js/dist/index.js"></script> +</html>
\ No newline at end of file diff --git a/site/public/test/index.html b/site/public/test/index.html index 41f8eda5..b4d16036 100644 --- a/site/public/test/index.html +++ b/site/public/test/index.html @@ -30,14 +30,14 @@ <section><h1>Megapixels UI Tests</h1> <ul> -<li><a href="/test/style/index.html">Style Guide</a></li> -<li><a href="/test/csv/index.html">CSV</a></li> -<li><a href="/test/datasets/index.html">Dataset list</a></li> -<li><a href="/test/citations/index.html">Citation list</a></li> -<li><a href="/test/map/index.html">Citation map</a></li> -<li><a href="/test/face_search/index.html">Face search</a></li> -<li><a href="/test/name_search/index.html">Name search</a></li> -<li><a href="/test/gallery/index.html">Modal image gallery</a></li> +<li><a href="/test/style">Style Guide</a></li> +<li><a href="/test/csv">CSV</a></li> +<li><a href="/test/datasets/">Dataset list</a></li> +<li><a href="/test/citations/">Citation list</a></li> +<li><a href="/test/map/">Citation map</a></li> +<li><a href="/test/face_search/">Face search</a></li> +<li><a href="/test/name_search/">Name search</a></li> +<li><a href="/test/gallery/">Modal image gallery</a></li> </ul> </section> diff --git a/site/public/test/style/index.html b/site/public/test/style/index.html index f25f1daf..2a41b8b1 100644 --- a/site/public/test/style/index.html +++ b/site/public/test/style/index.html @@ -30,7 +30,7 @@ <section><h1>Style Examples</h1> <h3><a href="/test/">← Back to test index</a></h3> -</section><section class='fullwidth'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/site/test/assets/test.jpg' alt='Style Guide Test'><div class='caption'>Style Guide Test</div></div></section><section><div class='meta'><div><div class='gray'>Date</div><div>17-Jan-2019</div></div><div><div class='gray'>Numbers</div><div>17</div></div><div><div class='gray'>Identities</div><div>12,139</div></div><div><div class='gray'>But also</div><div>This is a test of the stylesheet</div></div></div></section><section><h1>Header 1</h1> +</section><section class='wide'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/site/test/assets/test.jpg' alt='Style Guide Test'><div class='caption'>Style Guide Test</div></div></section><section><div class='meta'><div><div class='gray'>Date</div><div>17-Jan-2019</div></div><div><div class='gray'>Numbers</div><div>17</div></div><div><div class='gray'>Identities</div><div>12,139</div></div><div><div class='gray'>But also</div><div>This is a test of the stylesheet</div></div></div></section><section><h1>Header 1</h1> <h2>Header 2</h2> <h3>Header 3</h3> <h4>Header 4</h4> @@ -53,17 +53,14 @@ <div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/site/test/assets/man.jpg' alt='Person 2'><div class='caption'>Person 2</div></div> <div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/site/test/assets/man.jpg' alt='Person 3. Let me tell you about Person 3. This person has a very long description with text which wraps like crazy'><div class='caption'>Person 3. Let me tell you about Person 3. This person has a very long description with text which wraps like crazy</div></div></section><section><blockquote><p>est, qui dolorem ipsum, quia dolor sit amet consectetur adipisci[ng] velit, sed quia non-numquam [do] eius modi tempora inci[di]dunt, ut labore et dolore magnam aliquam quaerat voluptatem.</p> </blockquote> -</section><section class='fullwidth'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/site/test/assets/wide-test.jpg' alt='This image is extremely wide and the text beneath it will wrap but thats fine because it can also contain <a href="https://example.com/">hyperlinks</a>! Yes, you read that right—hyperlinks! Lorem ipsum dolor sit amet ad volotesque sic hoc ad nauseam'><div class='caption'>This image is extremely wide and the text beneath it will wrap but that's fine because it can also contain <a href="https://example.com/">hyperlinks</a>! Yes, you read that right—hyperlinks! Lorem ipsum dolor sit amet ad volotesque sic hoc ad nauseam</div></div></section><section><p>Inline <code>code</code> has <code>back-ticks around</code> it.</p> +</section><section class='wide'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/site/test/assets/wide-test.jpg' alt='This image is extremely wide and the text beneath it will wrap but thats fine because it can also contain <a href="https://example.com/">hyperlinks</a>! Yes, you read that right—hyperlinks! Lorem ipsum dolor sit amet ad volotesque sic hoc ad nauseam'><div class='caption'>This image is extremely wide and the text beneath it will wrap but that's fine because it can also contain <a href="https://example.com/">hyperlinks</a>! Yes, you read that right—hyperlinks! Lorem ipsum dolor sit amet ad volotesque sic hoc ad nauseam</div></div></section><section><p>Inline <code>code</code> has <code>back-ticks around</code> it.</p> </section><section><pre><code class="lang-javascript">var s = "JavaScript syntax highlighting"; alert(s); </code></pre> </section><section><pre><code class="lang-python">s = "Python syntax highlighting" print(s) </code></pre> -</section><section><pre><code class="lang-code">Generic code block. Note that code blocks that are not so marked will not appear. -But let's throw in a <b>tag</b>. -</code></pre> -</section><section><p>Horizontal rule</p> +</section><section class='applet_container'><div class='applet' data-payload='{"command": "No language indicated, so no syntax highlighting. ", "fields": ["But let's throw in a <b>tag</b>."]}'></div></section><section><p>Horizontal rule</p> <hr> <p>Citations below here</p> <div class="footnotes"> |
