diff options
28 files changed, 713 insertions, 260 deletions
@@ -22,7 +22,7 @@ pip install urllib3 flask flask_sqlalchemy mysql-connector pip install pymediainfo tqdm opencv-python imutils pip install scikit-image python-dotenv imagehash scikit-learn colorlog pip install celery keras tensorflow -pip install python.app # OSX only! +pip install python.app # OSX only! needed for matplotlib sudo apt-get install libmysqlclient-dev @@ -40,6 +40,8 @@ ALTER DATABASE megapixels CHARACTER SET = utf8mb4 COLLATE = utf8mb4_unicode_ci; ## Building the site +The most recently built copy of the site is kept in the repo. This is generated directly from NextCloud. Be mindful that NextCloud will create extra copies of things if there are merge conflicts. + ``` npm install npm run build @@ -52,9 +54,12 @@ python cli_site.py build ## Running the site +On OSX, you must run `pythonw` to use matplotlib. + ``` python cli_flask.py run python `which celery` worker -A app.server.tasks --loglevel=info -E redis-server /usr/local/etc/redis.conf npm run watch ``` + diff --git a/client/common/loader.component.js b/client/common/loader.component.js index 5930c63e..df25dd39 100644 --- a/client/common/loader.component.js +++ b/client/common/loader.component.js @@ -1,11 +1,18 @@ import React from 'react' +import Spinner from 'react-spin' export default function Loader() { - return ( - <div className='loaderWrapper'> - <div className='loader'> - <img src="/assets/img/loader.gif" /> - </div> - </div> - ) + const spinCfg = { + width: 5, + radius: 20, + color: 'white', + } + return <Spinner config={spinCfg} /> + // return ( + // <div className='loaderWrapper'> + // <div className='loader'> + // <img src="/assets/img/loader.gif" /> + // </div> + // </div> + // ) } diff --git a/client/faceSearch/faceSearch.query.js b/client/faceSearch/faceSearch.query.js index 2d140813..8a82373e 100644 --- a/client/faceSearch/faceSearch.query.js +++ b/client/faceSearch/faceSearch.query.js @@ -19,6 +19,11 @@ class FaceSearchQuery extends Component { } upload(blob) { + if (this.state.image) { + URL.revokeObjectURL(this.state.image) + } + const url = URL.createObjectURL(blob) + this.setState({ image: url }) this.props.actions.upload(this.props.payload, blob) } @@ -37,17 +42,17 @@ class FaceSearchQuery extends Component { <div className='uploadContainer'> <div style={style}> {image ? null : <img src="/assets/img/icon_camera.svg" />} + {result.loading && ( + <div className='loading' style={style}> + <Loader /> + </div> + )} <UploadImage onUpload={this.upload.bind(this)} /> </div> - {result.loading && ( - <div className='loading' style={style}> - <Loader /> - </div> - )} </div> <div className='cta'> <h2>Search by Image</h2> - <h3>Searching {13456} images</h3> + <h3>Searching {13233} images</h3> <p> {'Use facial recognition to reverse search into the LFW dataset '} {'and see if it contains your photos.'} diff --git a/client/faceSearch/faceSearch.result.js b/client/faceSearch/faceSearch.result.js index c2509033..00dec160 100644 --- a/client/faceSearch/faceSearch.result.js +++ b/client/faceSearch/faceSearch.result.js @@ -49,7 +49,6 @@ class FaceSearchResult extends Component { return ( <div className='result'> <div> - <Loader /><br /> <h2>Searching...</h2> </div> </div> diff --git a/client/index.js b/client/index.js index 96f2c8c8..c9335f14 100644 --- a/client/index.js +++ b/client/index.js @@ -28,7 +28,7 @@ function fetchDataset(payload) { function appendApplets(applets) { applets.forEach(([el, payload]) => { - el.classList.add(payload.cmd) + el.parentNode.classList.add(payload.cmd) switch (payload.cmd) { case 'citations': case 'load_file': @@ -36,7 +36,7 @@ function appendApplets(applets) { appendTable(el, payload) break case 'map': - el.parentNode.classList.add('fullwidth') + el.parentNode.classList.add('wide') appendMap(el, payload) el.classList.add('loaded') break diff --git a/client/map/index.js b/client/map/index.js index 053cf13b..e8543c77 100644 --- a/client/map/index.js +++ b/client/map/index.js @@ -33,6 +33,7 @@ function addMarker(map, latlng, title, subtext) { "<br>", subtext, ].join('')) + return marker } function addArc(map, src, dest) { @@ -74,5 +75,6 @@ export default function append(el, payload) { addArc(map, source, latlng) }) - addMarker(map, source, paper.title, paper.address) + const rootMarker = addMarker(map, source, paper.title, paper.address) + rootMarker.openPopup() } diff --git a/client/nameSearch/nameSearch.query.js b/client/nameSearch/nameSearch.query.js index 99c1da84..c0798c58 100644 --- a/client/nameSearch/nameSearch.query.js +++ b/client/nameSearch/nameSearch.query.js @@ -20,7 +20,7 @@ class NameSearchQuery extends Component { return ( <div className='query'> <h2>Search by Name</h2> - <h3>Searching {13456} identities</h3> + <h3>Searching {13233} identities</h3> <p> {'Enter your name to see if you were included in this dataset..'} </p> diff --git a/megapixels/app/server/api.py b/megapixels/app/server/api.py index b3bce9bc..81fffdd8 100644 --- a/megapixels/app/server/api.py +++ b/megapixels/app/server/api.py @@ -7,7 +7,7 @@ import operator from flask import Blueprint, request, jsonify from PIL import Image # todo: try to remove PIL dependency -from app.processors import face_recognition +from app.processors import face_extractor from app.processors import face_detector from app.processors.faiss import load_faiss_databases from app.models.sql_factory import load_sql_datasets, list_datasets, get_dataset, get_table @@ -66,28 +66,33 @@ def upload(dataset_name): detector = face_detector.DetectorDLIBHOG() # get detection as BBox object - bboxes = detector.detect(im_np, largest=True) + bboxes = detector.detect(im_np, largest=True, pyramids=2) + if not bboxes or not len(bboxes): return jsonify({ 'error': 'bbox' }) - bbox = bboxes[0] - if not bbox: + + bbox_norm = bboxes[0] + if not bbox_norm: return jsonify({ 'error': 'bbox' }) dim = im_np.shape[:2][::-1] - bbox = bbox.to_dim(dim) # convert back to real dimensions + bbox_dim = bbox_norm.to_dim(dim) + # bbox = bbox.to_dim(dim) # convert back to real dimensions # print("got bbox") - if not bbox: + if not bbox_dim: return jsonify({ 'error': 'bbox' }) # extract 128-D vector - recognition = face_recognition.RecognitionDLIB(gpu=-1) - vec = recognition.vec(im_np, bbox) + extractor = face_extractor.ExtractorDLIB() + vec = extractor.extract(im_np, bbox_norm) # NB use norm, not bbox_dim + # recognition = face_recognition.RecognitionDLIB(gpu=-1) + # vec = recognition.vec(im_np, bbox) query = np.array([ vec ]).astype('float32') # query FAISS @@ -122,7 +127,7 @@ def upload(dataset_name): query = { 'timing': round(time.time() - start, 3), - 'bbox': str(bbox), + 'bbox': str(bbox_norm), } # print(results) return jsonify({ diff --git a/megapixels/app/server/tasks/demo.py b/megapixels/app/server/tasks/demo.py index f7db9034..12d83383 100644 --- a/megapixels/app/server/tasks/demo.py +++ b/megapixels/app/server/tasks/demo.py @@ -81,7 +81,7 @@ def demo_task(self, uuid_name, fn): face_detector_instance = face_detector.DetectorDLIBCNN(gpu=opt_gpu) # -1 for CPU step('Detecting face') st = time.time() - bboxes = face_detector_instance.detect(im_resized, largest=True) + bboxes = face_detector_instance.detect(im_resized, largest=True, pyramids=2) bbox = bboxes[0] dim = im_resized.shape[:2][::-1] bbox_dim = bbox.to_dim(dim) diff --git a/package-lock.json b/package-lock.json index 60a74ece..72d44774 100644 --- a/package-lock.json +++ b/package-lock.json @@ -7163,6 +7163,14 @@ } } }, + "react-spin": { + "version": "0.6.2", + "resolved": "https://registry.npmjs.org/react-spin/-/react-spin-0.6.2.tgz", + "integrity": "sha1-56+crRWLjGAdg6qtXbSr0b9CuMk=", + "requires": { + "spin.js": "^2.0.1" + } + }, "read-pkg": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/read-pkg/-/read-pkg-2.0.0.tgz", @@ -8419,6 +8427,11 @@ } } }, + "spin.js": { + "version": "2.3.2", + "resolved": "https://registry.npmjs.org/spin.js/-/spin.js-2.3.2.tgz", + "integrity": "sha1-bKpW1SBnNFD9XPvGlx5tB3LDeho=" + }, "split-string": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/split-string/-/split-string-3.1.0.tgz", diff --git a/package.json b/package.json index fcabb7e1..d007cf2e 100644 --- a/package.json +++ b/package.json @@ -46,6 +46,7 @@ "react-dom": "^16.3.0", "react-hot-loader": "^4.3.0", "react-redux": "^5.0.7", + "react-spin": "^0.6.2", "redux": "^4.0.0", "redux-thunk": "^2.3.0", "snapsvg": "^0.5.1", diff --git a/site/assets/css/applets.css b/site/assets/css/applets.css index 0c566a9f..f437d1e8 100644 --- a/site/assets/css/applets.css +++ b/site/assets/css/applets.css @@ -1,3 +1,6 @@ +.applet_container { + min-height: 340px; +} .applet { margin-bottom: 40px; transition: opacity 0.2s cubic-bezier(0,0,1,1); @@ -28,6 +31,25 @@ /* search results */ +.name_search, .face_search { + box-shadow: inset 0 0 40px #000; + background: #111; + padding: 20px 0; + /*background: black;*/ + width: 100%; +} +.name_search { + margin-top: 0px; + margin-bottom: 20px; +} +.face_search .applet { + max-width: 640px; + margin: 0 auto; +} +.name_search .applet { + max-width: 640px; + margin: 0 auto; +} .results { margin-top: 10px; padding-bottom: 10px; @@ -54,6 +76,9 @@ .results > div:nth-child(3n+1) { margin-left: 0; } +.applet h2 { + font-size: 20pt; +} .query h2 { margin-top: 0; padding-top: 0; } @@ -107,6 +132,12 @@ max-width: 40px; } +.map, .map .applet { + height: 500px; +} +.map { + margin-bottom: 20px; +} /* tabulator */ diff --git a/site/assets/css/css.css b/site/assets/css/css.css index 8239cfc7..50958427 100644 --- a/site/assets/css/css.css +++ b/site/assets/css/css.css @@ -6,6 +6,7 @@ html, body { min-height: 100%; font-family: 'Roboto', sans-serif; color: #b8b8b8; + overflow-x: hidden; } html { background: #191919; @@ -376,11 +377,17 @@ section.fullwidth .image { /* home page */ .hero { - position: relative; width: 100%; - max-width: 1200px; + background: black; + background: linear-gradient(#000,#222); height: 50vw; max-height: 70vh; +} +.hero .inner { + position: relative; + width: 100%; + max-width: 1200px; + height: 100%; display: flex; align-items: center; margin: 0 auto; @@ -388,13 +395,15 @@ section.fullwidth .image { #face_container { pointer-events: none; position: absolute; - width: 50vw; + width: 66vw; height: 50vw; max-height: 70vh; top: 0; - right: 0; - z-index: -1; + right: -16vw; + z-index: 0; text-align: center; + perspective: 500px; + perspective-origin: 50% 80%; } .currentFace { position: absolute; @@ -402,6 +411,7 @@ section.fullwidth .image { width: 100%; left: 0; text-align: center; + font-size: 26px; } .intro { max-width: 640px; @@ -451,7 +461,12 @@ section.fullwidth .image { .desktop .intro .under a:hover { color: #fff; } - +.dataset-intro h2 { + margin-top: 40px; +} +.content .dataset-intro .first_paragraph { + margin-top: 10px; +} /* intro - list of datasets */ .dataset-list { diff --git a/site/assets/demo/cloud/.gitignore b/site/assets/demo/cloud/.gitignore new file mode 100644 index 00000000..2ccbe465 --- /dev/null +++ b/site/assets/demo/cloud/.gitignore @@ -0,0 +1 @@ +/node_modules/ diff --git a/site/assets/demo/cloud/.npmignore b/site/assets/demo/cloud/.npmignore new file mode 100644 index 00000000..dcaf4a28 --- /dev/null +++ b/site/assets/demo/cloud/.npmignore @@ -0,0 +1,4 @@ +/demo/ +/index.html +/rollup.config.js +/src/ diff --git a/site/assets/demo/cloud/LICENSE b/site/assets/demo/cloud/LICENSE new file mode 100644 index 00000000..edeba37c --- /dev/null +++ b/site/assets/demo/cloud/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2017-2018 Sergej Sintschilin + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/site/assets/demo/cloud/README.md b/site/assets/demo/cloud/README.md new file mode 100644 index 00000000..de6e882e --- /dev/null +++ b/site/assets/demo/cloud/README.md @@ -0,0 +1,109 @@ +# THREE.TextSprite + +`class THREE.TextSprite extends THREE.Sprite` + +An instance of `TextSprite` automatically computes the optimal font size depending on the distance to the camera and the size of the renderer canvas. + +## demo + +[Try it out!](https://seregpie.github.io/THREE.TextSprite/) + +## dependencies + +- [THREE.TextTexture](https://github.com/SeregPie/THREE.TextTexture) + +## setup + +### npm + +```shell +npm install three.textsprite +``` + +### ES module + +```javascript +import TextSprite from 'three.textsprite'; +``` + +### browser + +```html +<script src="https://unpkg.com/three"></script> +<script src="https://unpkg.com/three.texttexture"></script> +<script src="https://unpkg.com/three.textsprite"></script> +``` + +The class `TextSprite` will be available under the namespace `THREE`. + +## members + +``` +.constructor({ + material, + maxFontSize, + minFontSize, + redrawInterval, + textSize, + texture, +}) +``` + +| argument | description | +| ---: | :--- | +| `material` | The parameters to pass to the constructor of [`SpriteMaterial`](https://threejs.org/docs/index.html#api/materials/SpriteMaterial). | +| `texture` | The parameters to pass to the constructor of [`TextTexture`](https://github.com/SeregPie/THREE.TextTexture). | + +```javascript +let sprite = new THREE.TextSprite({ + material: { + color: 0xffbbff, + fog: true, + }, + redrawInterval: 250, + textSize: 10, + texture: { + text: 'Carpe Diem', + fontFamily: 'Arial, Helvetica, sans-serif', + }, +}); +scene.add(sprite); +``` + +--- + +`.isTextSprite = true` + +Used to check whether this is an instance of `TextSprite`. + +You should not change this, as it is used internally for optimisation. + +--- + +`.textSize = 1` + +The size of the text. + +--- + +`.redrawInterval = 1` + +The minimum time that must elapse before the canvas is redrawn. If 0, the canvas is redrawn immediately whenever `TextSprite` is rendered, otherwise the redrawing is deferred. + +--- + +`.minFontSize = 0` + +The minimum font size. + +--- + +`.maxFontSize = Infinity` + +The maximum font size. + +--- + +`.dispose()` + +Disposes the texture and the material. diff --git a/site/assets/demo/cloud/THREE.TextSprite.js b/site/assets/demo/cloud/THREE.TextSprite.js new file mode 100644 index 00000000..525d22cd --- /dev/null +++ b/site/assets/demo/cloud/THREE.TextSprite.js @@ -0,0 +1 @@ +(function(a,b){"object"==typeof exports&&"undefined"!=typeof module?module.exports=b(require("three"),require("three.texttexture")):"function"==typeof define&&define.amd?define(["three","three.texttexture"],b):(a.THREE=a.THREE||{},a.THREE.TextSprite=b(a.THREE,a.THREE.TextTexture))})(this,function(a,b){"use strict";function c(a,b,c){var g=Math.round;if(b.domElement.width&&b.domElement.height&&a.material.map.textLines.length){var h=a.getWorldPosition(d).distanceTo(c.getWorldPosition(e));if(h){var i=a.getWorldScale(f).y*b.domElement.height/h;if(i)return g(i/a.material.map.imageHeight)}}return 0}b=b&&b.hasOwnProperty("default")?b["default"]:b;var d=new a.Vector3,e=new a.Vector3,f=new a.Vector3,g=function(d){function e(c){void 0===c&&(c={});var e=c.textSize;void 0===e&&(e=1);var f=c.redrawInterval;void 0===f&&(f=1);var g=c.minFontSize;void 0===g&&(g=0);var h=c.maxFontSize;void 0===h&&(h=1/0);var i=c.material;void 0===i&&(i={});var j=c.texture;void 0===j&&(j={}),d.call(this,new a.SpriteMaterial(Object.assign({},i,{map:new b(j)}))),this.textSize=e,this.redrawInterval=f,this.minFontSize=g,this.maxFontSize=h,this.lastRedraw=0}d&&(e.__proto__=d),e.prototype=Object.create(d&&d.prototype),e.prototype.constructor=e;var f={isTextSprite:{configurable:!0}};return f.isTextSprite.get=function(){return!0},e.prototype.onBeforeRender=function(a,b,c){this.redraw(a,c)},e.prototype.updateScale=function(){this.scale.set(this.material.map.imageAspect,1,1).multiplyScalar(this.textSize*this.material.map.imageHeight)},e.prototype.updateMatrix=function(){for(var a=[],b=arguments.length;b--;)a[b]=arguments[b];return this.updateScale(),d.prototype.updateMatrix.apply(this,a)},e.prototype.redraw=function(a,b){var c=this;this.lastRedraw+this.redrawInterval<Date.now()&&(this.redrawInterval?setTimeout(function(){c.redrawNow(a,b)},1):this.redrawNow(a,b))},e.prototype.redrawNow=function(b,d){this.updateScale(),this.material.map.autoRedraw=!0,this.material.map.fontSize=a.Math.clamp(a.Math.ceilPowerOfTwo(c(this,b,d)),this.minFontSize,this.maxFontSize),this.lastRedraw=Date.now()},e.prototype.dispose=function(){this.material.map.dispose(),this.material.dispose()},Object.defineProperties(e.prototype,f),e}(a.Sprite);return g}); diff --git a/site/assets/demo/cloud/demo/script.js b/site/assets/demo/cloud/demo/script.js new file mode 100644 index 00000000..a6fc540a --- /dev/null +++ b/site/assets/demo/cloud/demo/script.js @@ -0,0 +1,129 @@ +/* eslint-disable */ +(function() { + + var datasets = ["10K US Adult Faces","3D-RMA","3D Dynamic","3DPeS","4DFAB","50 People One Question","aPascal","Aberdeen","Adience","AFAD","AFEW-VA","AffectNet","AFLW","AFW","AgeDB","ALERT Airport","AM-FED","APiS1.0","AR Face","AWE Ears","B3D(AC)","BBC Pose","BPAD","BFM","BioID Face","BJUT-3D","The Bosphorus","BP4D+","BP4D-Spontanous","Brainwash","BU-3DFE","BUHMAP-DB ","CAFE","Caltech 10K Web Faces","Caltech Faces","Caltech Pedestrians","CAMEL","CAS-PEAL","Casablanca","CASIA Webface","CAVIAR4REID","CelebA","CelebFaces+","CFD","ChaLearn","ChokePoint","Cityscapes","CCP","CMDP","CMU PIE","COCO","COCO-a","COCO QA","COFW","CK","CK+","Columbia Gaze","Ongoing Complex Activities","CUHK01","CUHK02","CUHK03","CVC-01","UFI","D3DFACS","Dartmouth Children","Data61 Pedestrian","DeepFashion","DISFA","Long Distance Heterogeneous Face","Duke MTMC","EmotioNet Database","ETHZ Pedestrian","EuroCity Persons","ExpW","Face Research Lab London","FaceScrub","FaceTracer","SFC","Facebook100","Face Place","Faces94","Faces95","Faces96","FIW","FDDB","FEI","FERET","FER+","CMU FiA","300-W","Florida Inmate","FRAV2D","FRAV3D","GRIMACE","FRGC","Gallagher","Gavab","GeoFaces","Georgia Tech Face","Google Makeup","Google (private)","Graz Pedestrian","H3D","HDA+","Helen","Hi4D-ADSIP","HID","Hipsterwars","HollywoodHeads","HRT Transgender","IFAD","IFDB","IIT Dehli Ear","IJB-A","IJB-B","IJB-C","","iLIDS-VID","Images of Groups","IMDB","IMFDB","IMM Face Dataset","Immediacy","imSitu","INRIA Pedestrian","iQIYI-VID dataset ","JAFFE","Jiku Mobile Video Dataset","JPL-Interaction dataset","Karpathy Instagram","KDEF","UB KinFace","KinectFaceDB","KITTI","LAG","Large Scale Person Search","Leeds Sports Pose","Leeds Sports Pose Extended","LFW","LFW-a","LFWP","m2vts","xm2vtsdb","MAFL","MALF","Mapillary","Market 1501","Market 1203","MARS","McGill Real World","Multiple Encounter Dataset","MegaAge","MegaFace","MIFS","MIKKI dataset","MIT CBCL","CBCL","CBCLSS","MIW","MMI Facial Expression Dataset","Moments in Time","MORPH Commercial","MORPH Non-Commercial","MOT","Large MPI Facial Expression","Small MPI Facial Expression","MPIIGaze","MPII Human Pose","MR2","MRP Drone","MsCeleb","MSMT17","MUCT","MUG Faces","MULTIPIE","MTFL","News Dataset","ND-2006","MID","Novaemötions Dataset","Nude Detection","ORL","Penn Fudan","PETA","PETS 2017","PPB","PIPA","PKU","PKU-Reid","Pornography DB","Precarious","PRID","PRW","PSU","PubFig","pubfig83","Put Face","GRID","QMUL-iLIDS","QMUL-SurvFace","RaFD","RAiD","RAP","ReSEED","SAIVT SoftBio","SAMM","Sarc3D","SCface","SCUT-FBP","SCUT HEAD","SDU-VID","SED Dataset","Sheffield Face","Shinpuhkan 2014","Social Relation","SOTON HiD","SVW","STAIR Action","Stanford Drone","Buffy Stickmen","We Are Family Stickmen","Stickmen PASCAL","Stirling/ESRC 3D Face","SUN","SVS","Texas 3DFRD","TinyFace","Tiny Images","TownCenter","TUD-Brussels","TUD-Campus","TUD-Crossing","TUD-Motionparis","TUD-Multiview","TUD-Pedestrian","TUD-Stadtmitte","TVHI","ND-TWINS-2009-2010","UCCS","UCF101","UCF-CC-50","UCF Selfie","UFDD","UMB","UMD","UNBC-McMaster Pain","Urban Tribes","USED Social Event Dataset","UTKFace","V47","VADANA","CIP","VGG Face","VGG Face2","Violent Flows","VIPeR","Phrasal Recognition","VMU","VOC","VQA","WARD","WGT","WIDER","WIDER FACE","WIDER Attribute","WildTrack","YaleFaces","Yale Face Database B","Extended Yale Face Database B ","YawDD","YFCC100M","UOY 3D Face Database","YouTubeFaces","YMU","YouTube Pose","WLFDB","SAL","Semaine","Belfast naturalistic","Belfast induced","VAM-faces","MAHNOB-HCI","DEAP","AMFED","Recola","AVEC13","AVEC14","Mimicry","Meissner Caucasian and African American","Nottingham Scans","Nottingham Originals","Stirling Pain","Utrecht ECVP","Mooney","Visual Commonsense Reasoning","HUFRD Pilgrims Dataset"] + var getRandomText = function() { + return chance.pickone(datasets) + }; + var getRandomFontFamily = function() { + return 'Helvetica, Arial, sans-serif' + // return chance.pickone([ + // 'Georgia, serif', + // '"Palatino Linotype", "Book Antiqua", Palatino, serif', + // '"Times New Roman", Times, serif', + // 'Helvetica, Arial, sans-serif', + // '"Arial Black", Gadget, sans-serif', + // '"Comic Sans MS", cursive, sans-serif', + // 'Impact, Charcoal, sans-serif', + // '"Lucida Sans Unicode", "Lucida Grande", sans-serif', + // 'Tahoma, Geneva, sans-serif', + // '"Trebuchet MS", Helvetica, sans-serif', + // 'Verdana, Geneva, sans-serif', + // '"Courier New", Courier, monospace', + // '"Lucida Console", Monaco, monospace', + // ]); + }; + var getRandomColor = function() { + return chance.pickone([ 0xffffff, 0xffffff, 0xffffff, 0xdddde6, 0x888896 ]) // chance.color({format: 'hex'}); + }; + var getRandomTextSize = function() { + return (1 + Math.random()) * 1/8; + }; + var renderer = new THREE.WebGLRenderer({antialias: true, alpha: true}); + renderer.setPixelRatio(devicePixelRatio); + renderer.setClearColor(0x000000, 0); + document.body.appendChild(renderer.domElement); + var scene = new THREE.Scene(); + var camera = new THREE.PerspectiveCamera(75, 1, 1/128, 128); + camera.position.set(4, 0, 10); + var redrawInterval = 1; + var sprites = Array.from({length: datasets.length}, function(t, i) { + // console.log(i) + var sprite = new THREE.TextSprite({ + textSize: getRandomTextSize(), + redrawInterval: redrawInterval, + material: { + color: getRandomColor(), + }, + texture: { + text: datasets[i], + fontFamily: getRandomFontFamily(), + }, + }); + sprite.position + .setX(Math.random()) + .setY(Math.random()) + .setZ(Math.random()) + .subScalar(1/2) + .setLength(1 + Math.random()) + .multiplyScalar(6); + scene.add(sprite); + return sprite; + }); + var controls = new THREE.OrbitControls(camera, renderer.domElement); + controls.maxDistance = camera.far/2; + controls.enableDamping = true; + controls.dampingFactor = 1/8; + controls.rotateSpeed = 1/4; + controls.zoomSpeed = 1; + controls.keyPanSpeed = 1/2; + var renderScene = function() { + renderer.setSize(document.body.offsetWidth, document.body.offsetHeight); + camera.aspect = renderer.domElement.width / renderer.domElement.height; + camera.updateProjectionMatrix(); + // controls.update(); + camera.position.z += -0.0025 + camera.rotation.y += 0.00001 + renderer.render(scene, camera); + }; + window.addEventListener('resize', renderScene, false); + var startSceneRenderer = function() { + requestAnimationFrame(function() { + setTimeout(startSceneRenderer, 1000/60); + }); + renderScene(); + }; + startSceneRenderer(); + // var gui = new dat.GUI(); + // (function() { + // var guiFolder = gui.addFolder('texture'); + // guiFolder.add({ + // text: function() { + // sprites.forEach(function(sprite) { + // sprite.material.map.text = getRandomText(); + // }); + // }, + // }, 'text'); + // guiFolder.add({ + // fontFamily: function() { + // sprites.forEach(function(sprite) { + // sprite.material.map.fontFamily = getRandomFontFamily(); + // }); + // }, + // }, 'fontFamily'); + // guiFolder.open(); + // })(); + // (function() { + // var guiFolder = gui.addFolder('sprite'); + // guiFolder.add({ + // textSize: function() { + // sprites.forEach(function(sprite) { + // sprite.textSize = getRandomTextSize(); + // }); + // }, + // }, 'textSize'); + // guiFolder.add(Object.defineProperty({}, 'redrawInterval', { + // get: function() { + // return redrawInterval; + // }, + // set: function(value) { + // redrawInterval = value; + // sprites.forEach(function(sprite) { + // sprite.redrawInterval = redrawInterval; + // }); + // }, + // }), 'redrawInterval', 0, 2000, 1); + // guiFolder.open(); + // })(); + +})(); diff --git a/site/assets/demo/cloud/index.html b/site/assets/demo/cloud/index.html new file mode 100644 index 00000000..168a9467 --- /dev/null +++ b/site/assets/demo/cloud/index.html @@ -0,0 +1,31 @@ +<!DOCTYPE html> +<html style="height: 100%;"> + <head> + <meta charset="utf-8"/> + <meta + content="width=device-width, initial-scale=1, maximum-scale=1, user-scalable=no" + name="viewport" + /> + <title>Megapixels Datasets</title> + <script src="https://cdnjs.cloudflare.com/ajax/libs/babel-polyfill/7.0.0/polyfill.min.js"></script> + <script src="https://cdnjs.cloudflare.com/ajax/libs/three.js/97/three.min.js"></script> + <script src="https://unpkg.com/three.texttexture@18.10.24"></script> + <script src="THREE.TextSprite.js"></script> + <link + href="https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.0/normalize.min.css" + rel="stylesheet" + /> + <script src="https://cdn.rawgit.com/mrdoob/three.js/dev/examples/js/controls/OrbitControls.js"></script> + <script src="https://cdnjs.cloudflare.com/ajax/libs/chance/1.0.16/chance.min.js"></script> + </head> + <body + style=" + background: linear-gradient(#333,#001); + font-family: sans-serif; + height: 100%; + overflow: hidden; + " + > + <script src="demo/script.js"></script> + </body> +</html> diff --git a/site/assets/demo/cloud/package.json b/site/assets/demo/cloud/package.json new file mode 100644 index 00000000..f7556104 --- /dev/null +++ b/site/assets/demo/cloud/package.json @@ -0,0 +1,40 @@ +{ + "name": "three.textsprite", + "version": "18.10.24", + "description": "Automatically computes the optimal font size depending on the distance to the camera and the size of the renderer canvas.", + "keywords": [ + "3d", + "canvas", + "class", + "font", + "group", + "object", + "plugin", + "resolution", + "scale", + "size", + "text", + "texture", + "three" + ], + "license": "MIT", + "author": "Sergej Sintschilin <seregpie@gmail.com>", + "main": "THREE.TextSprite.js", + "repository": "https://github.com/SeregPie/THREE.TextSprite.git", + "scripts": { + "build": "rollup -c", + "dev": "rollup -c -w", + "prepublishOnly": "npm run build" + }, + "dependencies": { + "three.texttexture": "^18.10.24" + }, + "devDependencies": { + "rollup": "^0.66.6", + "rollup-plugin-babel-minify": "^6.1.1", + "rollup-plugin-buble": "^0.19.4" + }, + "peerDependencies": { + "three": "^0.97.0" + } +} diff --git a/site/assets/demo/cloud/rollup.config.js b/site/assets/demo/cloud/rollup.config.js new file mode 100644 index 00000000..57415169 --- /dev/null +++ b/site/assets/demo/cloud/rollup.config.js @@ -0,0 +1,25 @@ +import buble from 'rollup-plugin-buble'; +import minify from 'rollup-plugin-babel-minify'; +import path from 'path'; + +import {main} from './package.json'; + +let globals = { + 'three': 'THREE', + 'three.texttexture': 'THREE.TextTexture', +}; + +export default { + input: 'src/index.js', + external: Object.keys(globals), + output: { + file: main, + format: 'umd', + name: path.basename(main, path.extname(main)), + globals, + }, + plugins: [ + buble({objectAssign: 'Object.assign'}), + minify({comments: false}), + ], +}; diff --git a/site/assets/demo/cloud/src/getOptimalFontSize.js b/site/assets/demo/cloud/src/getOptimalFontSize.js new file mode 100644 index 00000000..02787516 --- /dev/null +++ b/site/assets/demo/cloud/src/getOptimalFontSize.js @@ -0,0 +1,18 @@ +import {Vector3} from 'three'; + +let objectWorldPosition = new Vector3(); +let cameraWorldPosition = new Vector3(); +let objectWorldScale = new Vector3(); + +export default function(object, renderer, camera) { + if (renderer.domElement.width && renderer.domElement.height && object.material.map.textLines.length) { + let distance = object.getWorldPosition(objectWorldPosition).distanceTo(camera.getWorldPosition(cameraWorldPosition)); + if (distance) { + let heightInPixels = object.getWorldScale(objectWorldScale).y * renderer.domElement.height / distance; + if (heightInPixels) { + return Math.round(heightInPixels / object.material.map.imageHeight); + } + } + } + return 0; +} diff --git a/site/assets/demo/cloud/src/index.js b/site/assets/demo/cloud/src/index.js new file mode 100644 index 00000000..270891d5 --- /dev/null +++ b/site/assets/demo/cloud/src/index.js @@ -0,0 +1,78 @@ +import { + Math as THREE_Math, + Sprite, + SpriteMaterial, +} from 'three'; +import TextTexture from 'three.texttexture'; + +import getOptimalFontSize from './getOptimalFontSize'; + +export default class extends Sprite { + constructor({ + textSize = 1, + redrawInterval = 1, + minFontSize = 0, + maxFontSize = Infinity, + material = {}, + texture = {}, + } = {}) { + super(new SpriteMaterial({ + ...material, + map: new TextTexture(texture), + })); + this.textSize = textSize; + this.redrawInterval = redrawInterval; + this.minFontSize = minFontSize; + this.maxFontSize = maxFontSize; + this.lastRedraw = 0; + } + + get isTextSprite() { + return true; + } + + onBeforeRender(renderer, scene, camera) { + this.redraw(renderer, camera); + } + + updateScale() { + this.scale + .set(this.material.map.imageAspect, 1, 1) + .multiplyScalar(this.textSize * this.material.map.imageHeight); + } + + updateMatrix(...args) { + this.updateScale(); + return super.updateMatrix(...args); + } + + redraw(renderer, camera) { + if (this.lastRedraw + this.redrawInterval < Date.now()) { + if (this.redrawInterval) { + setTimeout(() => { + this.redrawNow(renderer, camera); + }, 1); + } else { + this.redrawNow(renderer, camera); + } + } + } + + redrawNow(renderer, camera) { + this.updateScale(); + this.material.map.autoRedraw = true; + this.material.map.fontSize = THREE_Math.clamp( + THREE_Math.ceilPowerOfTwo( + getOptimalFontSize(this, renderer, camera) + ), + this.minFontSize, + this.maxFontSize, + ); + this.lastRedraw = Date.now(); + } + + dispose() { + this.material.map.dispose(); + this.material.dispose(); + } +} diff --git a/site/assets/js/app/face.js b/site/assets/js/app/face.js index f3f1f2bf..0a87d2b2 100644 --- a/site/assets/js/app/face.js +++ b/site/assets/js/app/face.js @@ -1,4 +1,5 @@ -var face = (function(){ +/* eslint-disable */ +var faceInit = function () { var container = document.querySelector("#face_container") var camera, controls, scene, renderer var mouse = new THREE.Vector2(0.5, 0.5) @@ -8,6 +9,7 @@ var face = (function(){ var SWAP_TIME = 500 var cubes = [], meshes = [] var currentFace = document.querySelector('.currentFace') + var introEl = document.querySelector('.intro') var faceBuffer = (function () { var a = new Array(FACE_POINT_COUNT) for (let i = 0; i < FACE_POINT_COUNT; i++) { @@ -15,7 +17,9 @@ var face = (function(){ } return a })() + var lastSprite var last_t = 0, start_t = 0 + var bgColor = 0x000000 // 0x191919 var colors = [ 0xff3333, 0xff8833, @@ -32,32 +36,33 @@ var face = (function(){ function init() { fetch("/assets/data/3dlm_0_10.json") - .then(req => req.json()) - .then(data => { - face_names = Object.keys(data) - faces = face_names.map(name => recenter(data[name])) - setup() - build(faces[0]) - updateFace(faces[0]) - setCurrentFace(face_names[0]) - swapTo = faces[0] - animate() - }) + .then(req => req.json()) + .then(data => { + face_names = Object.keys(data) + faces = face_names.map(name => recenter(data[name])) + setup() + build(faces[0]) + updateFace(faces[0]) + setCurrentFace(face_names[0]) + swapTo = faces[0] + animate() + }) } function setup() { - var w = window.innerWidth / 2 + var w = window.innerWidth * 2/3 var h = Math.min(window.innerWidth / 2, window.innerHeight * 0.7) camera = new THREE.PerspectiveCamera(70, w/h, 1, 10000) camera.position.x = 0 camera.position.y = 0 - camera.position.z = 250 + camera.position.z = 200 scene = new THREE.Scene() - scene.background = new THREE.Color(0x191919) + // scene.background = new THREE.Color(bgColor) - renderer = new THREE.WebGLRenderer({ antialias: true }) + renderer = new THREE.WebGLRenderer({ antialias: true, alpha: true }) renderer.setPixelRatio(window.devicePixelRatio) renderer.setSize(w, h) + renderer.setClearColor(0x000000, 0); container.appendChild(renderer.domElement) document.body.addEventListener('mousemove', onMouseMove) // renderer.domElement.addEventListener('mousedown', swap) @@ -75,7 +80,7 @@ var face = (function(){ // console.log("done") // } // }) - setInterval(swap, 5000) + swap() } function build(points) { var matrix = new THREE.Matrix4() @@ -125,26 +130,47 @@ var face = (function(){ return (b-a) * n + a } function swap(){ - if (swapping) return - start_t = last_t - swapping = true swap_count = (swap_count + 1) % faces.length - swapFrom = swapTo + swapFrom = swapTo || faces[0] swapTo = faces[swap_count] setCurrentFace(face_names[swap_count]) + oktween.add({ + from: { n: 0 }, + to: { n: 1 }, + duration: 1000, + easing: oktween.easing.quad_in_out, + update: function(obj){ + lerpPoints(obj.n, swapFrom, swapTo, faceBuffer) + updateFace(faceBuffer) + }, + finished: function(){ + setTimeout(swap, 2000) + } + }) } function setCurrentFace(name) { name = name.replace('.png', '').split('_').filter(s => !s.match(/\d+/)).join(' ') currentFace.innerHTML = name - } - function update_swap(t){ - var n = (t - start_t) / SWAP_TIME - if (n > 1) { - swapping = false - n = 1 - } - lerpPoints(n, swapFrom, swapTo, faceBuffer) - updateFace(faceBuffer) + // if (lastSprite) { + // scene.remove(lastSprite) + // } + // var sprite = new THREE.TextSprite({ + // textSize: 12, + // redrawInterval: 1000, + // material: { + // color: 0xcccccc, + // }, + // texture: { + // text: name, + // fontFamily: '"Roboto", "Helvetica", sans-serif', + // }, + // }); + // sprite.position + // .setX(0) + // .setY(0) + // .setZ(0) + // scene.add(sprite); + // lastSprite = sprite } function updateFace(points) { updateCubeGeometry(points) @@ -214,14 +240,15 @@ var face = (function(){ } function animate(t) { requestAnimationFrame(animate) - if (swapping) update_swap(t) renderer.render(scene, camera) // scene.rotation.y += 0.01 * Math.PI mouseTarget.x += (mouse.x - mouseTarget.x) * 0.1 mouseTarget.y += (mouse.y - mouseTarget.y) * 0.1 scene.rotation.x = (mouseTarget.y - 0.5) * Math.PI / 2 scene.rotation.y = (mouseTarget.x - 0.5) * Math.PI + currentFace.style.transform = "translateZ(0) rotateY(" + (scene.rotation.y/2) + "rad)" // scene.rotation.y += 0.01 last_t = t } -})() +} +faceInit()
\ No newline at end of file diff --git a/site/public/datasets/lfw/index.html b/site/public/datasets/lfw/index.html index 3f7dce60..a6226720 100644 --- a/site/public/datasets/lfw/index.html +++ b/site/public/datasets/lfw/index.html @@ -28,7 +28,7 @@ <div class="content"> <section><h1>Labeled Faces in the Wild</h1> -</section><section><div class='meta'><div><div class='gray'>Created</div><div>2007</div></div><div><div class='gray'>Images</div><div>13,233</div></div><div><div class='gray'>People</div><div>5,749</div></div><div><div class='gray'>Created From</div><div>Yahoo News images</div></div><div><div class='gray'>Search available</div><div>Searchable</div></div></div></section><section class='applet_container'><div class='applet' data-payload='{"command": "face_search"}'></div></section><section class='applet_container'><div class='applet' data-payload='{"command": "name_search"}'></div></section><section class='applet_container'><div class='applet' data-payload='{"command": "load_file assets/lfw_names_gender_kg_min.csv", "fields": ["Name, Images, Gender, Description"]}'></div></section><section class='fullwidth'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/lfw/assets/lfw_feature.jpg' alt='Eighteen of the 5,749 people in the Labeled Faces in the Wild Dataset. The most widely used face dataset for benchmarking commercial face recognition algorithms.'><div class='caption'>Eighteen of the 5,749 people in the Labeled Faces in the Wild Dataset. The most widely used face dataset for benchmarking commercial face recognition algorithms.</div></div></section><section><h3>Intro</h3> +</section><section><div class='meta'><div><div class='gray'>Created</div><div>2007</div></div><div><div class='gray'>Images</div><div>13,233</div></div><div><div class='gray'>People</div><div>5,749</div></div><div><div class='gray'>Created From</div><div>Yahoo News images</div></div><div><div class='gray'>Search available</div><div>Searchable</div></div></div></section><section class='applet_container'><div class='applet' data-payload='{"command": "face_search"}'></div></section><section class='applet_container'><div class='applet' data-payload='{"command": "name_search"}'></div></section><section class='fullwidth'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/lfw/assets/lfw_feature.jpg' alt='Eighteen of the 5,749 people in the Labeled Faces in the Wild Dataset. The most widely used face dataset for benchmarking commercial face recognition algorithms.'><div class='caption'>Eighteen of the 5,749 people in the Labeled Faces in the Wild Dataset. The most widely used face dataset for benchmarking commercial face recognition algorithms.</div></div></section><section><h3>Intro</h3> <p>Labeled Faces in The Wild (LFW) is among the most widely used facial recognition training datasets in the world and is the first of its kind to be created entirely from images posted online. The LFW dataset includes 13,233 images of 5,749 people that were collected between 2002-2004. Use the tools below to check if you were included in this dataset or scroll down to read the analysis.</p> <p>Three paragraphs describing the LFW dataset in a format that can be easily replicated for the other datasets. Nothing too custom. An analysis of the initial research papers with context relative to all the other dataset papers.</p> </section><section class='fullwidth'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/lfw/assets/lfw_montage_everyone_nocrop_1920.jpg' alt=' From George W. Bush to Jamie Lee Curtis: all 5,749 people in the LFW Dataset sorted from most to least images collected.'><div class='caption'> From George W. Bush to Jamie Lee Curtis: all 5,749 people in the LFW Dataset sorted from most to least images collected.</div></div></section><section><h3>LFW by the Numbers</h3> @@ -52,170 +52,28 @@ <li>The faces were detected using the Viola-Jones haarcascade face detector [^lfw_website] [^lfw_survey]</li> <li>Is considered the "most popular benchmark for face recognition" [^lfw_baidu]</li> <li>Is "the most widely used evaluation set in the field of facial recognition" [^lfw_pingan]</li> -<li>Is used by several of the largest tech companies in the world including "Google, Facebook, Microsoft Research Asia, Baidu, Tencent, SenseTime, Face++ and Chinese University of Hong Kong." [^lfw_pingan]</li> -</ul> -<p>need citations</p> -<ul> -<li>All images were copied from Yahoo News between 2002 - 2004 [^lfw_original_paper]</li> -<li>SenseTime, who has relied on LFW for benchmarking their facial recognition performance, is the leading provider of surveillance to the Chinese Government (need citation)</li> +<li><p>Is used by several of the largest tech companies in the world including "Google, Facebook, Microsoft Research Asia, Baidu, Tencent, SenseTime, Face++ and Chinese University of Hong Kong." [^lfw_pingan]</p> +</li> +<li><p>All images were copied from Yahoo News between 2002 - 2004 [^lfw_original_paper]</p> +</li> +<li>SenseTime, who has relied on LFW for benchmarking their facial recognition performance, is the leading provider of surveillance to the Chinese Government</li> </ul> </section><section class='images'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/lfw/assets/lfw_montage_top1_640.jpg' alt=' former President George W. Bush'><div class='caption'> former President George W. Bush</div></div> -<div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/lfw/assets/lfw_montage_top2_4_640.jpg' alt=' Colin Powel (236), Tony Blair (144), and Donald Rumsfeld (121)'><div class='caption'> Colin Powel (236), Tony Blair (144), and Donald Rumsfeld (121)</div></div></section><section><h3>People and Companies using the LFW Dataset</h3> +<div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/lfw/assets/lfw_montage_top2_4_640.jpg' alt=' Colin Powell (236), Tony Blair (144), and Donald Rumsfeld (121)'><div class='caption'> Colin Powell (236), Tony Blair (144), and Donald Rumsfeld (121)</div></div></section><section><h3>People and Companies using the LFW Dataset</h3> <p>This section describes who is using the dataset and for what purposes. It should include specific examples of people or companies with citations and screenshots. This section is followed up by the graph, the map, and then the supplementary material.</p> <p>The LFW dataset is used by numerous companies for <a href="about/glossary#benchmarking">benchmarking</a> algorithms and in some cases <a href="about/glossary#training">training</a>. According to the benchmarking results page [^lfw_results] provided by the authors, over 2 dozen companies have contributed their benchmark results.</p> <p>According to BiometricUpdate.com [^lfw_pingan], LFW is "the most widely used evaluation set in the field of facial recognition, LFW attracts a few dozen teams from around the globe including Google, Facebook, Microsoft Research Asia, Baidu, Tencent, SenseTime, Face++ and Chinese University of Hong Kong."</p> <p>According to researchers at the Baidu Research – Institute of Deep Learning "LFW has been the most popular evaluation benchmark for face recognition, and played a very important role in facilitating the face recognition society to improve algorithm. [^lfw_baidu]."</p> <p>In addition to commercial use as an evaluation tool, alll of the faces in LFW dataset are prepackaged into a popular machine learning code framework called scikit-learn.</p> -</section><section class='applet_container'><div class='applet' data-payload='{"command": "load_file assets/lfw_commercial_use.csv", "fields": ["name_display, company_url, example_url, country, description"]}'></div></section><section><table> -<thead><tr> -<th style="text-align:left">Company</th> -<th style="text-align:left">Country</th> -<th style="text-align:left">Industries</th> -</tr> -</thead> -<tbody> -<tr> -<td style="text-align:left"><a href="http://www.aratek.co">Aratek</a></td> -<td style="text-align:left">China</td> -<td style="text-align:left">Biometric sensors for telecom, civil identification, finance, education, POS, and transportation</td> -</tr> -<tr> -<td style="text-align:left"><a href="http://www.aratek.co">Aratek</a></td> -<td style="text-align:left">China</td> -<td style="text-align:left">Biometric sensors for telecom, civil identification, finance, education, POS, and transportation</td> -</tr> -<tr> -<td style="text-align:left"><a href="http://www.aratek.co">Aratek</a></td> -<td style="text-align:left">China</td> -<td style="text-align:left">Biometric sensors for telecom, civil identification, finance, education, POS, and transportation</td> -</tr> -</tbody> -</table> -<p>Add 2-4 screenshots of companies mentioning LFW here</p> </section><section class='images'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/lfw/assets/lfw_screenshot_01.jpg' alt=' "PING AN Tech facial recognition receives high score in latest LFW test results"'><div class='caption'> "PING AN Tech facial recognition receives high score in latest LFW test results"</div></div> <div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/lfw/assets/lfw_screenshot_02.jpg' alt=' "Face Recognition Performance in LFW benchmark"'><div class='caption'> "Face Recognition Performance in LFW benchmark"</div></div> <div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/lfw/assets/lfw_screenshot_03.jpg' alt=' "The 1st place in face verification challenge, LFW"'><div class='caption'> "The 1st place in face verification challenge, LFW"</div></div></section><section><p>In benchmarking, companies use a dataset to evaluate their algorithms which are typically trained on other data. After training, researchers will use LFW as a benchmark to compare results with other algorithms.</p> <p>For example, Baidu (est. net worth $13B) uses LFW to report results for their "Targeting Ultimate Accuracy: Face Recognition via Deep Embedding". According to the three Baidu researchers who produced the paper:</p> <h3>Citations</h3> -<p>Overall, LFW has at least 456 citations from 123 countries. Sed ut perspiciatis, unde omnis iste natus error sit voluptatem accusantium doloremque laudantium, totam rem aperiam eaque ipsa, quae ab illo inventore veritatis et quasi architecto beatae vitae dicta sunt, explicabo. Nemo enim ipsam voluptatem, quia voluptas sit, aspernatur aut odit aut fugit, sed quia consequuntur magni dolores eos.</p> -<p>Sed ut perspiciatis, unde omnis iste natus error sit voluptatem accusantium doloremque laudantium, totam rem aperiam eaque ipsa, quae ab illo inventore veritatis et quasi architecto beatae vitae dicta sunt, explicabo. Nemo enim ipsam voluptatem, quia voluptas sit, aspernatur aut odit aut fugit, sed quia consequuntur magni dolores eos.</p> -</section><section class='images'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/lfw/assets/temp_graph.jpg' alt='Distribution of citations per year per country for the top 5 countries with citations for the LFW Dataset'><div class='caption'>Distribution of citations per year per country for the top 5 countries with citations for the LFW Dataset</div></div></section><section class='applet_container'><div class='applet' data-payload='{"command": "map"}'></div></section><section class='applet_container'><div class='applet' data-payload='{"command": "citations"}'></div></section><section><h3>Conclusion</h3> +<p>Overall, LFW has at least 116 citations from 11 countries.</p> +</section><section class='applet_container'><div class='applet' data-payload='{"command": "map"}'></div></section><section class='applet_container'><div class='applet' data-payload='{"command": "citations"}'></div></section><section><h3>Conclusion</h3> <p>The LFW face recognition training and evaluation dataset is a historically important face dataset as it was the first popular dataset to be created entirely from Internet images, paving the way for a global trend towards downloading anyone’s face from the Internet and adding it to a dataset. As will be evident with other datasets, LFW’s approach has now become the norm.</p> <p>For all the 5,000 people in this datasets, their face is forever a part of facial recognition history. It would be impossible to remove anyone from the dataset because it is so ubiquitous. For their rest of the lives and forever after, these 5,000 people will continue to be used for training facial recognition surveillance.</p> -<h2>Right to Removal</h2> -<p>If you are affected by disclosure of your identity in this dataset please do contact the authors. Many have stated that they are willing to remove images upon request. The authors of the LFW dataset provide the following email for inquiries:</p> -<p>You can use the following message to request removal from the dataset:</p> -<p>To: Gary Huang <a href="mailto:mailto:gbhuang@cs.umass.edu">mailto:gbhuang@cs.umass.edu</a></p> -<p>Subject: Request for Removal from LFW Face Dataset</p> -<p>Dear [researcher name],</p> -<p>I am writing to you about the "Labeled Faces in The Wild Dataset". Recently I discovered that your dataset includes my identity and I no longer wish to be included in your dataset.</p> -<p>The dataset is being used thousands of companies around the world to improve facial recognition software including usage by governments for the purpose of law enforcement, national security, tracking consumers in retail environments, and tracking individuals through public spaces.</p> -<p>My name as it appears in your dataset is [your name]. Please remove all images from your dataset and inform your newsletter subscribers to likewise update their copies.</p> -<p>- [your name]</p> -<hr> -<h2>Supplementary Data</h2> -<p>Researchers, journ</p> -<table> -<thead><tr> -<th style="text-align:left">Title</th> -<th style="text-align:left">Organization</th> -<th style="text-align:left">Country</th> -<th style="text-align:left">Type</th> -</tr> -</thead> -<tbody> -<tr> -<td style="text-align:left">3D-aided face recognition from videos</td> -<td style="text-align:left">University of Lyon</td> -<td style="text-align:left">France</td> -<td style="text-align:left">edu</td> -</tr> -<tr> -<td style="text-align:left">A Community Detection Approach to Cleaning Extremely Large Face Database</td> -<td style="text-align:left">National University of Defense Technology, China</td> -<td style="text-align:left">China</td> -<td style="text-align:left">edu</td> -</tr> -<tr> -<td style="text-align:left">3D-aided face recognition from videos</td> -<td style="text-align:left">University of Lyon</td> -<td style="text-align:left">France</td> -<td style="text-align:left">edu</td> -</tr> -<tr> -<td style="text-align:left">3D-aided face recognition from videos</td> -<td style="text-align:left">University of Lyon</td> -<td style="text-align:left">France</td> -<td style="text-align:left">edu</td> -</tr> -<tr> -<td style="text-align:left">3D-aided face recognition from videos</td> -<td style="text-align:left">University of Lyon</td> -<td style="text-align:left">France</td> -<td style="text-align:left">edu</td> -</tr> -<tr> -<td style="text-align:left">3D-aided face recognition from videos</td> -<td style="text-align:left">University of Lyon</td> -<td style="text-align:left">France</td> -<td style="text-align:left">edu</td> -</tr> -<tr> -<td style="text-align:left">3D-aided face recognition from videos</td> -<td style="text-align:left">University of Lyon</td> -<td style="text-align:left">France</td> -<td style="text-align:left">edu</td> -</tr> -<tr> -<td style="text-align:left">3D-aided face recognition from videos</td> -<td style="text-align:left">University of Lyon</td> -<td style="text-align:left">France</td> -<td style="text-align:left">edu</td> -</tr> -<tr> -<td style="text-align:left">3D-aided face recognition from videos</td> -<td style="text-align:left">University of Lyon</td> -<td style="text-align:left">France</td> -<td style="text-align:left">edu</td> -</tr> -<tr> -<td style="text-align:left">3D-aided face recognition from videos</td> -<td style="text-align:left">University of Lyon</td> -<td style="text-align:left">France</td> -<td style="text-align:left">edu</td> -</tr> -<tr> -<td style="text-align:left">3D-aided face recognition from videos</td> -<td style="text-align:left">University of Lyon</td> -<td style="text-align:left">France</td> -<td style="text-align:left">edu</td> -</tr> -<tr> -<td style="text-align:left">3D-aided face recognition from videos</td> -<td style="text-align:left">University of Lyon</td> -<td style="text-align:left">France</td> -<td style="text-align:left">edu</td> -</tr> -<tr> -<td style="text-align:left">3D-aided face recognition from videos</td> -<td style="text-align:left">University of Lyon</td> -<td style="text-align:left">France</td> -<td style="text-align:left">edu</td> -</tr> -<tr> -<td style="text-align:left">3D-aided face recognition from videos</td> -<td style="text-align:left">University of Lyon</td> -<td style="text-align:left">France</td> -<td style="text-align:left">edu</td> -</tr> -<tr> -<td style="text-align:left">3D-aided face recognition from videos</td> -<td style="text-align:left">University of Lyon</td> -<td style="text-align:left">France</td> -<td style="text-align:left">edu</td> -</tr> -</tbody> -</table> <h2>Code</h2> </section><section><pre><code class="lang-python">#!/usr/bin/python diff --git a/site/public/index.html b/site/public/index.html index b322b093..d2986084 100644 --- a/site/public/index.html +++ b/site/public/index.html @@ -28,28 +28,30 @@ <div class="content"> <div class='hero'> - <div id="face_container"> - <div class='currentFace'></div> - </div> - <div class='intro'> - <div class='headline'> - MegaPixels is an art project that explores the dark side of face recognition datasets and the future of computer vision. + <div class='inner'> + <div id="face_container"> + <div class='currentFace'></div> </div> + <div class='intro'> + <div class='headline'> + MegaPixels is an art project that explores the dark side of face recognition datasets and the future of computer vision. + </div> - <div class='buttons'> - <a href="/datasets/"><button class='important'>Find Your Face</button></a> - <a href="/analyze/"><button class='normal'>Analyze Your Face</button></a> - </div> + <div class='buttons'> + <a href="/datasets/lfw/"><button class='important'>Find Your Face</button></a> + <a href="/analyze/"><button class='normal'>Analyze Your Face</button></a> + </div> - <div class='under'> - Made by Adam Harvey in collaboration with Jules Laplace, and in partnership with Mozilla.<br/> - <a href='/about/'>Read more about MegaPixels</a> + <div class='under'> + Made by Adam Harvey in collaboration with Jules Laplace, and in partnership with Mozilla.<br/> + <a href='/about/'>Read more about MegaPixels</a> + </div> </div> </div> </div> - <section class='wide'> - <h2>Facial Recognition Datasets</h2> + <section class='wide dataset-intro'> + <h2>Face Recognition Datasets</h2> <div class='right-sidebar'> <h4>SUMMARY</h4> <div class='meta'> @@ -62,15 +64,24 @@ </div> </div> - <p>Regular Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.</p> - <p>Regular Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.</p> - <p>Regular Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.</p> + <p> + MegaPixels is an online art project that explores the history of face recognition from the perspective of datasets. MegaPixels aims to unravel the meanings behind the data and expose the darker corners of the biometric industry that have contributed to its growth. + </p> + <p> + Through a mix of case studies, visualizations, and interactive tools, Megapixels will use face recognition datasets to tell the history of modern biometrics. Many people have contributed to the development of face recignition technology, both wittingly and unwittingly. Not only scientists, but also celebrities and regular internet users have played a part. + </p> + <p> + Face recognition is a mess of contradictinos. It works, yet it doesn't actually work. It's cheap and accessible, but also expensive and out of control. Face recognition research has achieved headline grabbing superhuman accuracies over 99.9%, yet in practice it's also dangerously inaccurate. + </p> + <p> + During a trial installation at Sudkreuz station in Berlin in 2018, 20% of the matches were wrong, a number so low that it should not have any connection to law enforcement or justice. And in London, the Metropolitan police had been using face recognition software that mistakenly identified an alarming 98% of people as criminals, which perhaps is a crime itself. + </p> </section> - <section class='wide'> + <section class='wide dataset-intro'> <h2>Dataset Portraits</h2> <p> - We have prepared detailed studies of some of the more noteworthy datasets. + We have prepared detailed case studies of some of the more noteworthy datasets, including tools to help you learn what is contained in these datasets, and even whether your own face has been used to train these algorithms. </p> <div class="dataset-list"> @@ -108,7 +119,10 @@ </footer> </body> -<script src="/assets/js/vendor/three.min.js"></script> +<script src="https://cdnjs.cloudflare.com/ajax/libs/babel-polyfill/7.0.0/polyfill.min.js"></script> +<script src="https://cdnjs.cloudflare.com/ajax/libs/three.js/97/three.min.js"></script> +<script src="https://unpkg.com/three.texttexture@18.10.24"></script> +<script src="/assets/demo/cloud/THREE.TextSprite.js"></script> <script src="/assets/js/vendor/three.meshline.js"></script> <script src="/assets/js/vendor/oktween.js"></script> <script src="/assets/js/app/face.js"></script> diff --git a/site/templates/home.html b/site/templates/home.html index fc821731..9756e21f 100644 --- a/site/templates/home.html +++ b/site/templates/home.html @@ -2,28 +2,30 @@ {% block content %} <div class='hero'> - <div id="face_container"> - <div class='currentFace'></div> - </div> - <div class='intro'> - <div class='headline'> - MegaPixels is an art project that explores the dark side of face recognition datasets and the future of computer vision. + <div class='inner'> + <div id="face_container"> + <div class='currentFace'></div> </div> + <div class='intro'> + <div class='headline'> + MegaPixels is an art project that explores the dark side of face recognition datasets and the future of computer vision. + </div> - <div class='buttons'> - <a href="/datasets/"><button class='important'>Find Your Face</button></a> - <a href="/analyze/"><button class='normal'>Analyze Your Face</button></a> - </div> + <div class='buttons'> + <a href="/datasets/lfw/"><button class='important'>Find Your Face</button></a> + <a href="/analyze/"><button class='normal'>Analyze Your Face</button></a> + </div> - <div class='under'> - Made by Adam Harvey in collaboration with Jules Laplace, and in partnership with Mozilla.<br/> - <a href='/about/'>Read more about MegaPixels</a> + <div class='under'> + Made by Adam Harvey in collaboration with Jules Laplace, and in partnership with Mozilla.<br/> + <a href='/about/'>Read more about MegaPixels</a> + </div> </div> </div> </div> - <section class='wide'> - <h2>Facial Recognition Datasets</h2> + <section class='wide dataset-intro'> + <h2>Face Recognition Datasets</h2> <div class='right-sidebar'> <h4>SUMMARY</h4> <div class='meta'> @@ -36,15 +38,24 @@ </div> </div> - <p>Regular Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.</p> - <p>Regular Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.</p> - <p>Regular Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.</p> + <p> + MegaPixels is an online art project that explores the history of face recognition from the perspective of datasets. MegaPixels aims to unravel the meanings behind the data and expose the darker corners of the biometric industry that have contributed to its growth. + </p> + <p> + Through a mix of case studies, visualizations, and interactive tools, Megapixels will use face recognition datasets to tell the history of modern biometrics. Many people have contributed to the development of face recignition technology, both wittingly and unwittingly. Not only scientists, but also celebrities and regular internet users have played a part. + </p> + <p> + Face recognition is a mess of contradictinos. It works, yet it doesn't actually work. It's cheap and accessible, but also expensive and out of control. Face recognition research has achieved headline grabbing superhuman accuracies over 99.9%, yet in practice it's also dangerously inaccurate. + </p> + <p> + During a trial installation at Sudkreuz station in Berlin in 2018, 20% of the matches were wrong, a number so low that it should not have any connection to law enforcement or justice. And in London, the Metropolitan police had been using face recognition software that mistakenly identified an alarming 98% of people as criminals, which perhaps is a crime itself. + </p> </section> - <section class='wide'> + <section class='wide dataset-intro'> <h2>Dataset Portraits</h2> <p> - We have prepared detailed studies of some of the more noteworthy datasets. + We have prepared detailed case studies of some of the more noteworthy datasets, including tools to help you learn what is contained in these datasets, and even whether your own face has been used to train these algorithms. </p> <div class="dataset-list"> @@ -61,7 +72,10 @@ {% endblock %} {% block scripts %} -<script src="/assets/js/vendor/three.min.js"></script> +<script src="https://cdnjs.cloudflare.com/ajax/libs/babel-polyfill/7.0.0/polyfill.min.js"></script> +<script src="https://cdnjs.cloudflare.com/ajax/libs/three.js/97/three.min.js"></script> +<script src="https://unpkg.com/three.texttexture@18.10.24"></script> +<script src="/assets/demo/cloud/THREE.TextSprite.js"></script> <script src="/assets/js/vendor/three.meshline.js"></script> <script src="/assets/js/vendor/oktween.js"></script> <script src="/assets/js/app/face.js"></script> |
