summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJules Laplace <julescarbon@gmail.com>2018-12-06 19:39:29 +0100
committerJules Laplace <julescarbon@gmail.com>2018-12-06 19:39:29 +0100
commit2d950c3fa3b8107f941a80f88127ab45e371d128 (patch)
tree239d6b0162b96153d5fc624f1f7ebe6218f51b3e
parent1f97c63136a24ae288f7c946f23b1c7519b2c6e0 (diff)
homepage css
-rw-r--r--builder/README.md3
-rw-r--r--builder/builder.py10
-rw-r--r--builder/parser.py4
-rw-r--r--site/assets/css/css.css93
-rw-r--r--site/assets/js/app/face.js213
-rw-r--r--site/assets/js/app/site.js3
-rw-r--r--site/public/about/credits/index.html1
-rw-r--r--site/public/about/disclaimer/index.html1
-rw-r--r--site/public/about/index.html1
-rw-r--r--site/public/about/press/index.html1
-rw-r--r--site/public/about/privacy/index.html1
-rw-r--r--site/public/about/style/index.html3
-rw-r--r--site/public/about/terms/index.html1
-rw-r--r--site/public/datasets/lfw/index.html160
-rw-r--r--site/public/datasets/lfw/what/index.html1
-rw-r--r--site/public/datasets/vgg_faces2/index.html1
-rw-r--r--site/public/index.html42
-rw-r--r--site/public/research/01_from_1_to_100_pixels/index.html1
-rw-r--r--site/public/research/index.html1
-rw-r--r--site/templates/home.html32
-rw-r--r--site/templates/layout.html1
21 files changed, 524 insertions, 50 deletions
diff --git a/builder/README.md b/builder/README.md
index 1a6d3a1e..57c024cb 100644
--- a/builder/README.md
+++ b/builder/README.md
@@ -19,3 +19,6 @@ authors: Adam Harvey, Berit Gilma, Matthew Stender
Static assets: `v1/site/about/assets/picture.jpg`
Dataset assets: `v1/datasets/lfw/assets/picture.jpg`
+
+## Markup
+
diff --git a/builder/builder.py b/builder/builder.py
index 0e404b88..620fc710 100644
--- a/builder/builder.py
+++ b/builder/builder.py
@@ -29,10 +29,12 @@ def build_page(fn, research_posts):
output_path = public_path + metadata['url']
output_fn = os.path.join(output_path, "index.html")
- is_research = False
+ skip_h1 = False
- if 'research/' in fn:
- is_research = True
+ if metadata['url'] == '/':
+ template = env.get_template("home.html")
+ elif 'research/' in fn:
+ skip_h1 = True
template = env.get_template("research.html")
else:
template = env.get_template("page.html")
@@ -47,7 +49,7 @@ def build_page(fn, research_posts):
if 'index.md' in fn:
s3.sync_directory(dirname, s3_dir, metadata)
- content = parser.parse_markdown(sections, s3_path, skip_h1=is_research)
+ content = parser.parse_markdown(sections, s3_path, skip_h1=skip_h1)
html = template.render(
metadata=metadata,
diff --git a/builder/parser.py b/builder/parser.py
index da3044a0..dd3643bf 100644
--- a/builder/parser.py
+++ b/builder/parser.py
@@ -46,15 +46,13 @@ def format_metadata(section):
def parse_markdown(sections, s3_path, skip_h1=False):
groups = []
current_group = []
- seen_metadata = False
for section in sections:
if skip_h1 and section.startswith('# '):
continue
- elif section.startswith('+ ') and not seen_metadata:
+ elif section.startswith('+ '):
groups.append(format_section(current_group, s3_path))
groups.append(format_metadata(section))
current_group = []
- seen_metadata = True
elif '![wide:' in section:
groups.append(format_section(current_group, s3_path))
groups.append(format_section([section], s3_path, type='wide'))
diff --git a/site/assets/css/css.css b/site/assets/css/css.css
index 843809a8..9ac35699 100644
--- a/site/assets/css/css.css
+++ b/site/assets/css/css.css
@@ -5,9 +5,11 @@ html, body {
width: 100%;
min-height: 100%;
font-family: 'Roboto', sans-serif;
- background: #191919;
color: #b8b8b8;
}
+html {
+ background: #191919;
+}
/* header */
@@ -119,12 +121,14 @@ h1 {
font-size: 24pt;
margin: 75px 0 10px;
padding: 0;
+ transition: color 0.2s cubic-bezier(0,0,1,1);
}
h2, h3 {
margin: 0 0 20px 0;
padding: 0;
font-size: 11pt;
font-weight: 500;
+ transition: color 0.2s cubic-bezier(0,0,1,1);
}
th, .gray, h2, h3 {
@@ -281,6 +285,9 @@ section.wide .image {
max-width: 620px;
margin: 10px auto 0 auto;
}
+
+/* blog index */
+
.research_index {
margin-top: 40px;
}
@@ -289,10 +296,88 @@ section.wide .image {
}
.research_index h1 {
margin-top: 20px;
+ text-decoration: underline;
+}
+.desktop .research_index section:hover h1 {
+ color: #fff;
+}
+.research_index section:hover h2 {
+ color: #ddd;
}
-/* blogpost index */
+/* home page */
-.blogposts div {
- margin-bottom: 5px;
+.hero {
+ position: relative;
+ width: 100%;
+ max-width: 1200px;
+ height: 50vw;
+ max-height: 70vh;
+ display: flex;
+ align-items: center;
+ margin: 0 auto;
+}
+#face_container {
+ pointer-events: none;
+ position: absolute;
+ width: 50vw;
+ height: 50vw;
+ max-height: 70vh;
+ top: 0;
+ right: 0;
+ z-index: -1;
+ text-align: center;
+}
+.currentFace {
+ position: absolute;
+ bottom: 50px;
+ width: 100%;
+ left: 0;
+ text-align: center;
+}
+.intro {
+ max-width: 640px;
+ padding: 75px 0 75px 10px;
+ z-index: 1;
+}
+.intro .headline {
+ font-family: 'Roboto Mono', monospace;
+ font-size: 16pt;
+}
+.intro .buttons {
+ margin: 40px 0;
+}
+.intro button {
+ font-family: 'Roboto', sans-serif;
+ padding: 8px 12px;
+ border-radius: 6px;
+ border: 1px solid transparent;
+ cursor: pointer;
+ font-size: 11pt;
+ margin-right: 10px;
+ transition: color 0.1s cubic-bezier(0,0,1,1), background-color 0.1s cubic-bezier(0,0,1,1);
+}
+.intro button.normal {
+ background: #191919;
+ border-color: #444;
+ color: #ddd;
+}
+.intro button.important {
+ background: #444;
+ border-color: #444;
+ color: #ddd;
+}
+.desktop .intro button:hover {
+ background: #666;
+ border-color: #666;
+ color: #fff;
+}
+.intro .under {
+ color: #888;
+}
+.intro .under a {
+ color: #bbb;
+}
+.desktop .intro .under a:hover {
+ color: #fff;
} \ No newline at end of file
diff --git a/site/assets/js/app/face.js b/site/assets/js/app/face.js
new file mode 100644
index 00000000..e8bcd313
--- /dev/null
+++ b/site/assets/js/app/face.js
@@ -0,0 +1,213 @@
+var face = (function(){
+ var container = document.querySelector("#face_container")
+ var camera, controls, scene, renderer
+ var mouse = new THREE.Vector2(0.5, 0.5)
+ var mouseTarget = new THREE.Vector2(0.5, 0.5)
+ var POINT_SCALE = 1.8
+ var FACE_POINT_COUNT = 68
+ var SWAP_TIME = 500
+ var cubes = [], meshes = []
+ var currentFace = document.querySelector('.currentFace')
+ var faceBuffer = (function () {
+ var a = new Array(FACE_POINT_COUNT)
+ for (let i = 0; i < FACE_POINT_COUNT; i++) {
+ a[i] = new THREE.Vector3()
+ }
+ return a
+ })()
+ var last_t = 0, start_t = 0
+ var colors = [
+ 0xff3333,
+ 0xff8833,
+ 0xffff33,
+ 0x338833,
+ 0x3388ff,
+ 0x3333ff,
+ 0x8833ff,
+ 0xff3388,
+ 0xffffff,
+ ]
+ var swapping = false, swap_count = 0, swapFrom, swapTo, face_names, faces
+ init()
+
+ function init() {
+ fetch("/assets/data/3dlm_0_10.json")
+ .then(req => req.json())
+ .then(data => {
+ face_names = Object.keys(data)
+ faces = face_names.map(name => recenter(data[name]))
+ setup()
+ build(faces[0])
+ updateFace(faces[0])
+ setCurrentFace(face_names[0])
+ swapTo = faces[0]
+ animate()
+ })
+ }
+ function setup() {
+ var w = window.innerWidth / 2
+ var h = Math.min(window.innerWidth / 2, window.innerHeight * 0.7)
+ camera = new THREE.PerspectiveCamera(70, w/h, 1, 10000)
+ camera.position.x = 0
+ camera.position.y = 0
+ camera.position.z = 250
+
+ scene = new THREE.Scene()
+ scene.background = new THREE.Color(0x191919)
+
+ renderer = new THREE.WebGLRenderer({ antialias: true })
+ renderer.setPixelRatio(window.devicePixelRatio)
+ renderer.setSize(w, h)
+ container.appendChild(renderer.domElement)
+ document.body.addEventListener('mousemove', onMouseMove)
+ // renderer.domElement.addEventListener('mousedown', swap)
+ setInterval(swap, 5000)
+ }
+ function build(points) {
+ var matrix = new THREE.Matrix4()
+ var quaternion = new THREE.Quaternion()
+
+ for (var i = 0; i < FACE_POINT_COUNT; i++) {
+ var p = points[i]
+ var geometry = new THREE.BoxBufferGeometry()
+ var position = new THREE.Vector3(p[0], p[1], p[2])
+ var rotation = new THREE.Euler()
+ var scale = new THREE.Vector3()
+ var color = new THREE.Color()
+ scale.x = scale.y = scale.z = POINT_SCALE
+ quaternion.setFromEuler(rotation, false)
+ matrix.compose(position, quaternion, scale)
+ geometry.applyMatrix(matrix)
+ material = new THREE.MeshBasicMaterial({ color: color.setHex(0xffffff) })
+ cube = new THREE.Mesh(geometry, material)
+ scene.add(cube)
+ cubes.push(cube)
+ }
+
+ meshes = getLineGeometry(points).map((geometry, i) => {
+ var color = new THREE.Color()
+ var material = new MeshLineMaterial({
+ color: color.setHex(colors[i % colors.length]),
+ })
+ var line = new MeshLine()
+ line.setGeometry(geometry, _ => 1.5)
+ var mesh = new THREE.Mesh(line.geometry, material)
+ mesh.geometry.dynamic = true
+ scene.add(mesh)
+ return [line, mesh]
+ })
+ }
+ function lerpPoints(n, A, B, C) {
+ for (let i = 0, len = A.length; i < len; i++) {
+ lerpPoint(n, A[i], B[i], C[i])
+ }
+ }
+ function lerpPoint(n, A, B, C) {
+ C.x = lerp(n, A.x, B.x)
+ C.y = lerp(n, A.y, B.y)
+ C.z = lerp(n, A.z, B.z)
+ }
+ function lerp(n, a, b) {
+ return (b-a) * n + a
+ }
+ function swap(){
+ if (swapping) return
+ start_t = last_t
+ swapping = true
+ swap_count = (swap_count + 1) % faces.length
+ swapFrom = swapTo
+ swapTo = faces[swap_count]
+ setCurrentFace(face_names[swap_count])
+ }
+ function setCurrentFace(name) {
+ name = name.replace('.png', '').split('_').filter(s => !s.match(/\d+/)).join(' ')
+ currentFace.innerHTML = name
+ }
+ function update_swap(t){
+ var n = (t - start_t) / SWAP_TIME
+ if (n > 1) {
+ swapping = false
+ n = 1
+ }
+ lerpPoints(n, swapFrom, swapTo, faceBuffer)
+ updateFace(faceBuffer)
+ }
+ function updateFace(points) {
+ updateCubeGeometry(points)
+ updateLineGeometry(points)
+ }
+ function updateCubeGeometry(points) {
+ cubes.forEach((cube, i) => {
+ const p = points[i]
+ cube.position.set(p.x, p.y, p.z)
+ })
+ }
+ function updateLineGeometry(points) {
+ getLineGeometry(points).map((geometry, i) => {
+ var [line, mesh] = meshes[i]
+ line.setGeometry(geometry, _ => 1.5)
+ mesh.geometry.vertices = line.geometry.vertices
+ mesh.geometry.verticesNeedUpdate = true
+ })
+ }
+ function getLineGeometry(points) {
+ return [
+ points.slice(0, 17),
+ points.slice(17, 22),
+ points.slice(22, 27),
+ points.slice(27, 31),
+ points.slice(31, 36),
+ points.slice(36, 42),
+ points.slice(42, 48),
+ points.slice(48)
+ ].map((a, i) => {
+ var geometry = new THREE.Geometry()
+ a.forEach(p => geometry.vertices.push(p))
+ if (i > 4) {
+ geometry.vertices.push(a[0])
+ }
+ return geometry
+ })
+ }
+ function getBounds(obj) {
+ return obj.reduce((a, p) => {
+ return [
+ Math.min(a[0], p[0]),
+ Math.max(a[1], p[0]),
+ Math.min(a[2], p[1]),
+ Math.max(a[3], p[1]),
+ Math.min(a[4], p[2]),
+ Math.max(a[5], p[2]),
+ ]
+ }, [Infinity, -Infinity, Infinity, -Infinity, Infinity, -Infinity])
+ }
+ function recenter(obj) {
+ const bounds = getBounds(obj)
+ const x_width = (bounds[1] - bounds[0]) / 2
+ const y_width = (bounds[3] - bounds[2]) / -3
+ const z_width = (bounds[5] - bounds[4]) / 2
+ return obj.map(p => {
+ p[0] = p[0] - bounds[0] - x_width
+ p[1] = -p[1] + bounds[1] + y_width
+ p[2] = p[2] - bounds[2] + z_width
+ return new THREE.Vector3(p[0], p[1], p[2])
+ })
+ }
+ //
+ function onMouseMove(e) {
+ mouse.x = e.clientX / window.innerWidth
+ mouse.y = e.clientY / window.innerHeight
+ }
+ function animate(t) {
+ requestAnimationFrame(animate)
+ if (swapping) update_swap(t)
+ renderer.render(scene, camera)
+ scene.rotation.y += 0.01 * Math.PI
+ mouseTarget.x += (mouse.x - mouseTarget.x) * 0.1
+ mouseTarget.y += (mouse.y - mouseTarget.y) * 0.1
+ scene.rotation.x = (mouseTarget.y - 0.5) * Math.PI / 2
+ // scene.rotation.y = (mouseTarget.x - 0.5) * Math.PI
+ scene.rotation.y += 0.01
+ last_t = t
+ }
+})()
diff --git a/site/assets/js/app/site.js b/site/assets/js/app/site.js
index 12bee3ec..eb6886c2 100644
--- a/site/assets/js/app/site.js
+++ b/site/assets/js/app/site.js
@@ -7,7 +7,8 @@ const isDesktop = !isMobile
const htmlClassList = document.body.parentNode.classList
htmlClassList.add(isDesktop ? 'desktop' : 'mobile')
-function toArray(A) { return Array.prototype.slice.apply(A) }
+function toArray(a) { return Array.prototype.slice.apply(a) }
+function choice(a) { return a[Math.floor(Math.random()*a.length)]}
var site = (function(){
var site = {}
diff --git a/site/public/about/credits/index.html b/site/public/about/credits/index.html
index f1a28b0e..65bc7ac4 100644
--- a/site/public/about/credits/index.html
+++ b/site/public/about/credits/index.html
@@ -52,5 +52,6 @@
</div>
</footer>
</body>
+
<script src="/assets/js/app/site.js"></script>
</html> \ No newline at end of file
diff --git a/site/public/about/disclaimer/index.html b/site/public/about/disclaimer/index.html
index 5df5d656..b0215bde 100644
--- a/site/public/about/disclaimer/index.html
+++ b/site/public/about/disclaimer/index.html
@@ -52,5 +52,6 @@
</div>
</footer>
</body>
+
<script src="/assets/js/app/site.js"></script>
</html> \ No newline at end of file
diff --git a/site/public/about/index.html b/site/public/about/index.html
index f1a28b0e..65bc7ac4 100644
--- a/site/public/about/index.html
+++ b/site/public/about/index.html
@@ -52,5 +52,6 @@
</div>
</footer>
</body>
+
<script src="/assets/js/app/site.js"></script>
</html> \ No newline at end of file
diff --git a/site/public/about/press/index.html b/site/public/about/press/index.html
index e5763036..09c89165 100644
--- a/site/public/about/press/index.html
+++ b/site/public/about/press/index.html
@@ -50,5 +50,6 @@
</div>
</footer>
</body>
+
<script src="/assets/js/app/site.js"></script>
</html> \ No newline at end of file
diff --git a/site/public/about/privacy/index.html b/site/public/about/privacy/index.html
index 7ad9564f..5675f072 100644
--- a/site/public/about/privacy/index.html
+++ b/site/public/about/privacy/index.html
@@ -129,5 +129,6 @@ You are advised to review this Privacy Policy periodically for any changes. Chan
</div>
</footer>
</body>
+
<script src="/assets/js/app/site.js"></script>
</html> \ No newline at end of file
diff --git a/site/public/about/style/index.html b/site/public/about/style/index.html
index eea861ac..f2c0d4b8 100644
--- a/site/public/about/style/index.html
+++ b/site/public/about/style/index.html
@@ -27,7 +27,7 @@
<div class="content">
<section><h1>Style Examples</h1>
-</section><section class='images'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/site/about/assets/test.jpg' alt='Alt text here'><div class='caption'>Alt text here</div></div></section><section><h1>Header 1</h1>
+</section><section class='images'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/site/about/assets/test.jpg' alt='Alt text here'><div class='caption'>Alt text here</div></div></section><section><div class='meta'><div><div class='gray'>Date</div><div>17-Jan-2019</div></div><div><div class='gray'>Numbers</div><div>17</div></div><div><div class='gray'>Identities</div><div>12,139</div></div><div><div class='gray'>But also</div><div>This is a test of the stylesheet</div></div></div></section><section><h1>Header 1</h1>
<h2>Header 2</h2>
<h3>Header 3</h3>
<h4>Header 4</h4>
@@ -85,5 +85,6 @@ But let's throw in a &lt;b&gt;tag&lt;/b&gt;.
</div>
</footer>
</body>
+
<script src="/assets/js/app/site.js"></script>
</html> \ No newline at end of file
diff --git a/site/public/about/terms/index.html b/site/public/about/terms/index.html
index db8b9e57..078c339f 100644
--- a/site/public/about/terms/index.html
+++ b/site/public/about/terms/index.html
@@ -64,5 +64,6 @@
</div>
</footer>
</body>
+
<script src="/assets/js/app/site.js"></script>
</html> \ No newline at end of file
diff --git a/site/public/datasets/lfw/index.html b/site/public/datasets/lfw/index.html
index 76549d25..39052b44 100644
--- a/site/public/datasets/lfw/index.html
+++ b/site/public/datasets/lfw/index.html
@@ -27,23 +27,22 @@
<div class="content">
<section><h1>Labeled Faces in The Wild</h1>
-</section><section><div class='meta'><div><div class='gray'>Created</div><div>2007</div></div><div><div class='gray'>Images</div><div>13,233</div></div><div><div class='gray'>People</div><div>5,749</div></div><div><div class='gray'>Created From</div><div>Yahoo News images</div></div><div><div class='gray'>Search available</div><div>Searchable</div></div></div></section><section><p>Labeled Faces in The Wild is amongst the most widely used facial recognition training datasets in the world and is the first dataset of its kind to be created entirely from Internet photos. It includes 13,233 images of 5,749 people downloaded from the Internet, otherwise referred to as “The Wild”.</p>
-</section><section class='images'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/lfw/assets/lfw_sample.jpg' alt='Eight out of 5,749 people in the Labeled Faces in the Wild dataset. The face recognition training dataset is created entirely from photos downloaded from the Internet.'><div class='caption'>Eight out of 5,749 people in the Labeled Faces in the Wild dataset. The face recognition training dataset is created entirely from photos downloaded from the Internet.</div></div></section><section><h2>INTRO</h2>
+</section><section><div class='meta'><div><div class='gray'>Created</div><div>2007</div></div><div><div class='gray'>Images</div><div>13,233</div></div><div><div class='gray'>People</div><div>5,749</div></div><div><div class='gray'>Created From</div><div>Yahoo News images</div></div><div><div class='gray'>Search available</div><div>Searchable</div></div></div></section><section><p>Labeled Faces in The Wild (LFW) is amongst the most widely used facial recognition training datasets in the world and is the first of its kind to be created entirely from images that were posted online. The LFW dataset includes 13,233 images of 5,749 people that were collected between 2002-2004. Use the tools below to check if you were included in this dataset or scroll down to read the analysis.</p>
+<p>{INSERT IMAGE SEARCH MODULE}</p>
+<p>{INSERT TEXT SEARCH MODULE}</p>
+</section><section class='images'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/lfw/assets/lfw_feature.jpg' alt='Eight out of 5,749 people in the Labeled Faces in the Wild dataset. The face recognition training dataset is created entirely from photos downloaded from the Internet.'><div class='caption'>Eight out of 5,749 people in the Labeled Faces in the Wild dataset. The face recognition training dataset is created entirely from photos downloaded from the Internet.</div></div></section><section><h2>INTRO</h2>
<p>It began in 2002. Researchers at University of Massachusetts Amherst were developing algorithms for facial recognition and they needed more data. Between 2002-2004 they scraped Yahoo News for images of public figures. Two years later they cleaned up the dataset and repackaged it as Labeled Faces in the Wild (LFW).</p>
<p>Since then the LFW dataset has become one of the most widely used datasets used for evaluating face recognition algorithms. The associated research paper “Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments” has been cited 996 times reaching 45 different countries throughout the world.</p>
<p>The faces come from news stories and are mostly celebrities from the entertainment industry, politicians, and villains. It’s a sampling of current affairs and breaking news that has come to pass. The images, detached from their original context now server a new purpose: to train, evaluate, and improve facial recognition.</p>
<p>As the most widely used facial recognition dataset, it can be said that each individual in LFW has, in a small way, contributed to the current state of the art in facial recognition surveillance. John Cusack, Julianne Moore, Barry Bonds, Osama bin Laden, and even Moby are amongst these biometric pillars, exemplar faces provided the visual dimensions of a new computer vision future.</p>
-</section><section class='wide'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/lfw/assets/lfw_a_to_c.jpg' alt='From Aaron Eckhart to Zydrunas Ilgauskas. A small sampling of the LFW dataset'><div class='caption'>From Aaron Eckhart to Zydrunas Ilgauskas. A small sampling of the LFW dataset</div></div></section><section><p>In addition to commercial use as an evaluation tool, alll of the faces in LFW dataset are prepackaged into a popular machine learning code framework called scikit-learn.</p>
-<h2>Usage</h2>
-<pre><code class="lang-python">#!/usr/bin/python
-from matplotlib import plt
-from sklearn.datasets import fetch_lfw_people
-lfw_people = fetch_lfw_people()
-lfw_person = lfw_people[0]
-plt.imshow(lfw_person)
-</code></pre>
+</section><section class='images'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/lfw/assets/lfw_montage_1280.jpg' alt='The entire LFW dataset cropped to facial regions'><div class='caption'>The entire LFW dataset cropped to facial regions</div></div></section><section><p>In addition to commercial use as an evaluation tool, alll of the faces in LFW dataset are prepackaged into a popular machine learning code framework called scikit-learn.</p>
+<h2>Facts</h2>
+<p>The person with the most images is:
+The person with the least images is:</p>
<h2>Commercial Use</h2>
-<p>The LFW dataset is used by numerous companies for benchmarking algorithms and in some cases training. According to the benchmarking results page [^lfw_results] provided by the authors, over 2 dozen companies have contributed their benchmark results</p>
+<p>The LFW dataset is used by numerous companies for <a href="about/glossary#benchmarking">benchmarking</a> algorithms and in some cases <a href="about/glossary#training">training</a>. According to the benchmarking results page [^lfw_results] provided by the authors, over 2 dozen companies have contributed their benchmark results.</p>
+<p>According to BiometricUpdate.com [^lfw_pingan], LFW is "the most widely used evaluation set in the field of facial recognition, LFW attracts a few dozen teams from around the globe including Google, Facebook, Microsoft Research Asia, Baidu, Tencent, SenseTime, Face++ and Chinese University of Hong Kong."</p>
+<p>According to researchers at the Baidu Research – Institute of Deep Learning "LFW has been the most popular evaluation benchmark for face recognition, and played a very important role in facilitating the face recognition society to improve algorithm. [^lfw_baidu]."</p>
<pre><code>load file: lfw_commercial_use.csv
name_display,company_url,example_url,country,description
</code></pre>
@@ -73,11 +72,24 @@ name_display,company_url,example_url,country,description
</tbody>
</table>
<p>Add 2-4 screenshots of companies mentioning LFW here</p>
-</section><section class='images'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/lfw/assets/lfw_screenshot_01.png' alt='ReadSense'><div class='caption'>ReadSense</div></div></section><section><p>In benchmarking, companies use a dataset to evaluate their algorithms which are typically trained on other data. After training, researchers will use LFW as a benchmark to compare results with other algorithms.</p>
+</section><section class='images'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/lfw/assets/lfw_screenshot_01.jpg' alt=' "PING AN Tech facial recognition receives high score in latest LFW test results"'><div class='caption'> "PING AN Tech facial recognition receives high score in latest LFW test results"</div></div>
+<div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/lfw/assets/lfw_screenshot_02.jpg' alt=' "Face Recognition Performance in LFW benchmark"'><div class='caption'> "Face Recognition Performance in LFW benchmark"</div></div>
+<div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/lfw/assets/lfw_screenshot_03.jpg' alt=' "The 1st place in face verification challenge, LFW"'><div class='caption'> "The 1st place in face verification challenge, LFW"</div></div></section><section><p>In benchmarking, companies use a dataset to evaluate their algorithms which are typically trained on other data. After training, researchers will use LFW as a benchmark to compare results with other algorithms.</p>
<p>For example, Baidu (est. net worth $13B) uses LFW to report results for their "Targeting Ultimate Accuracy: Face Recognition via Deep Embedding". According to the three Baidu researchers who produced the paper:</p>
-<blockquote><p>LFW has been the most popular evaluation benchmark for face recognition, and played a very important role in facilitating the face recognition society to improve algorithm. <sup class="footnote-ref" id="fnref-baidu_lfw"><a href="#fn-baidu_lfw">1</a></sup>.</p>
-</blockquote>
<h2>Citations</h2>
+<p>Overall, LFW has at least 456 citations from 123 countries. Sed ut perspiciatis, unde omnis iste natus error sit voluptatem accusantium doloremque laudantium, totam rem aperiam eaque ipsa, quae ab illo inventore veritatis et quasi architecto beatae vitae dicta sunt, explicabo. Nemo enim ipsam voluptatem, quia voluptas sit, aspernatur aut odit aut fugit, sed quia consequuntur magni dolores eos.</p>
+<p>Sed ut perspiciatis, unde omnis iste natus error sit voluptatem accusantium doloremque laudantium, totam rem aperiam eaque ipsa, quae ab illo inventore veritatis et quasi architecto beatae vitae dicta sunt, explicabo. Nemo enim ipsam voluptatem, quia voluptas sit, aspernatur aut odit aut fugit, sed quia consequuntur magni dolores eos.</p>
+</section><section class='images'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/lfw/assets/temp_graph.jpg' alt='Distribution of citations per year per country for the top 5 countries with citations for the LFW Dataset'><div class='caption'>Distribution of citations per year per country for the top 5 countries with citations for the LFW Dataset</div></div></section><section class='images'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/lfw/assets/temp_map.jpg' alt='Geographic distributions of citations for the LFW Dataset'><div class='caption'>Geographic distributions of citations for the LFW Dataset</div></div></section><section><h2>Conclusion</h2>
+<p>The LFW face recognition training and evaluation dataset is a historically important face dataset as it was the first popular dataset to be created entirely from Internet images, paving the way for a global trend towards downloading anyone’s face from the Internet and adding it to a dataset. As will be evident with other datasets, LFW’s approach has now become the norm.</p>
+<p>For all the 5,000 people in this datasets, their face is forever a part of facial recognition history. It would be impossible to remove anyone from the dataset because it is so ubiquitous. For their rest of the lives and forever after, these 5,000 people will continue to be used for training facial recognition surveillance.</p>
+<h2>Right to Removal</h2>
+<p>If you are affected by disclosure of your identity in this dataset please do contact the authors, many state that they are willing to remove images upon request. The authors of the LFW can be reached from the emails posted in their paper:</p>
+<p>You can use the following message to request removal from the dataset:</p>
+<p>Dear [researcher name],</p>
+<p>I am writing to you about the "LFW Dataset". Recently I have discovered that your dataset includes my identity and no longer wish to be included in your dataset</p>
+<p>MegaPixels is an educational art project developed for academic purposes. In no way does this project aim to villify the researchers who produced the datasets. The aim of this project is to encourage discourse around ethics and consent in artificial intelligence by providing information about these datasets that is otherwise difficult to obtain or inaccessible to other researchers.</p>
+<h2>Supplementary Data</h2>
+<p>Sed ut perspiciatis, unde omnis iste natus error sit voluptatem accusantium doloremque laudantium, totam rem aperiam eaque ipsa, quae ab illo inventore veritatis et quasi architecto beatae vitae dicta sunt, explicabo. Nemo enim ipsam voluptatem, quia voluptas sit, aspernatur aut odit aut fugit, sed quia consequuntur magni dolores eos.</p>
<table>
<thead><tr>
<th style="text-align:left">Title</th>
@@ -99,18 +111,119 @@ name_display,company_url,example_url,country,description
<td style="text-align:left">China</td>
<td style="text-align:left">edu</td>
</tr>
+<tr>
+<td style="text-align:left">3D-aided face recognition from videos</td>
+<td style="text-align:left">University of Lyon</td>
+<td style="text-align:left">France</td>
+<td style="text-align:left">edu</td>
+</tr>
+<tr>
+<td style="text-align:left">3D-aided face recognition from videos</td>
+<td style="text-align:left">University of Lyon</td>
+<td style="text-align:left">France</td>
+<td style="text-align:left">edu</td>
+</tr>
+<tr>
+<td style="text-align:left">3D-aided face recognition from videos</td>
+<td style="text-align:left">University of Lyon</td>
+<td style="text-align:left">France</td>
+<td style="text-align:left">edu</td>
+</tr>
+<tr>
+<td style="text-align:left">3D-aided face recognition from videos</td>
+<td style="text-align:left">University of Lyon</td>
+<td style="text-align:left">France</td>
+<td style="text-align:left">edu</td>
+</tr>
+<tr>
+<td style="text-align:left">3D-aided face recognition from videos</td>
+<td style="text-align:left">University of Lyon</td>
+<td style="text-align:left">France</td>
+<td style="text-align:left">edu</td>
+</tr>
+<tr>
+<td style="text-align:left">3D-aided face recognition from videos</td>
+<td style="text-align:left">University of Lyon</td>
+<td style="text-align:left">France</td>
+<td style="text-align:left">edu</td>
+</tr>
+<tr>
+<td style="text-align:left">3D-aided face recognition from videos</td>
+<td style="text-align:left">University of Lyon</td>
+<td style="text-align:left">France</td>
+<td style="text-align:left">edu</td>
+</tr>
+<tr>
+<td style="text-align:left">3D-aided face recognition from videos</td>
+<td style="text-align:left">University of Lyon</td>
+<td style="text-align:left">France</td>
+<td style="text-align:left">edu</td>
+</tr>
+<tr>
+<td style="text-align:left">3D-aided face recognition from videos</td>
+<td style="text-align:left">University of Lyon</td>
+<td style="text-align:left">France</td>
+<td style="text-align:left">edu</td>
+</tr>
+<tr>
+<td style="text-align:left">3D-aided face recognition from videos</td>
+<td style="text-align:left">University of Lyon</td>
+<td style="text-align:left">France</td>
+<td style="text-align:left">edu</td>
+</tr>
+<tr>
+<td style="text-align:left">3D-aided face recognition from videos</td>
+<td style="text-align:left">University of Lyon</td>
+<td style="text-align:left">France</td>
+<td style="text-align:left">edu</td>
+</tr>
+<tr>
+<td style="text-align:left">3D-aided face recognition from videos</td>
+<td style="text-align:left">University of Lyon</td>
+<td style="text-align:left">France</td>
+<td style="text-align:left">edu</td>
+</tr>
+<tr>
+<td style="text-align:left">3D-aided face recognition from videos</td>
+<td style="text-align:left">University of Lyon</td>
+<td style="text-align:left">France</td>
+<td style="text-align:left">edu</td>
+</tr>
</tbody>
</table>
-<h2>Conclusion</h2>
-<p>The LFW face recognition training and evaluation dataset is a historically important face dataset as it was the first popular dataset to be created entirely from Internet images, paving the way for a global trend towards downloading anyone’s face from the Internet and adding it to a dataset. As will be evident with other datasets, LFW’s approach has now become the norm.</p>
-<p>For all the 5,000 people in this datasets, their face is forever a part of facial recognition history. It would be impossible to remove anyone from the dataset because it is so ubiquitous. For their rest of the lives and forever after, these 5,000 people will continue to be used for training facial recognition surveillance.</p>
-<h2>Notes</h2>
-<p>According to BiometricUpdate.com<sup class="footnote-ref" id="fnref-biometric_update_lfw"><a href="#fn-biometric_update_lfw">2</a></sup>, LFW is "the most widely used evaluation set in the field of facial recognition, LFW attracts a few dozen teams from around the globe including Google, Facebook, Microsoft Research Asia, Baidu, Tencent, SenseTime, Face++ and Chinese University of Hong Kong."</p>
+<h2>Code</h2>
+<pre><code class="lang-python">#!/usr/bin/python
+
+import numpy as np
+from sklearn.datasets import fetch_lfw_people
+import imageio
+import imutils
+
+# download LFW dataset (first run takes a while)
+lfw_people = fetch_lfw_people(min_faces_per_person=1, resize=1, color=True, funneled=False)
+
+# introspect dataset
+n_samples, h, w, c = lfw_people.images.shape
+print(&#39;{:,} images at {}x{}&#39;.format(n_samples, w, h))
+cols, rows = (176, 76)
+n_ims = cols * rows
+
+# build montages
+im_scale = 0.5
+ims = lfw_people.images[:n_ims
+montages = imutils.build_montages(ims, (int(w*im_scale, int(h*im_scale)), (cols, rows))
+montage = montages[0]
+
+# save full montage image
+imageio.imwrite(&#39;lfw_montage_full.png&#39;, montage)
+
+# make a smaller version
+montage_960 = imutils.resize(montage, width=960)
+imageio.imwrite(&#39;lfw_montage_960.jpg&#39;, montage_960)
+</code></pre>
<div class="footnotes">
<hr>
-<ol><li id="fn-baidu_lfw"><p>"Chinese tourist town uses face recognition as an entry pass". New Scientist. November 17, 2016. <a href="https://www.newscientist.com/article/2113176-chinese-tourist-town-uses-face-recognition-as-an-entry-pass/">https://www.newscientist.com/article/2113176-chinese-tourist-town-uses-face-recognition-as-an-entry-pass/</a><a href="#fnref-baidu_lfw" class="footnote">&#8617;</a></p></li>
-<li id="fn-biometric_update_lfw"><p>"PING AN Tech facial recognition receives high score in latest LFW test results". <a href="https://www.biometricupdate.com/201702/ping-an-tech-facial-recognition-receives-high-score-in-latest-lfw-test-results">https://www.biometricupdate.com/201702/ping-an-tech-facial-recognition-receives-high-score-in-latest-lfw-test-results</a><a href="#fnref-biometric_update_lfw" class="footnote">&#8617;</a></p></li>
-</ol>
+<ol></ol>
</div>
</section>
@@ -130,5 +243,6 @@ name_display,company_url,example_url,country,description
</div>
</footer>
</body>
+
<script src="/assets/js/app/site.js"></script>
</html> \ No newline at end of file
diff --git a/site/public/datasets/lfw/what/index.html b/site/public/datasets/lfw/what/index.html
index 52993a79..ceafb35a 100644
--- a/site/public/datasets/lfw/what/index.html
+++ b/site/public/datasets/lfw/what/index.html
@@ -137,5 +137,6 @@ name_display,company_url,example_url,country,description
</div>
</footer>
</body>
+
<script src="/assets/js/app/site.js"></script>
</html> \ No newline at end of file
diff --git a/site/public/datasets/vgg_faces2/index.html b/site/public/datasets/vgg_faces2/index.html
index 95b5f7d7..3f778f71 100644
--- a/site/public/datasets/vgg_faces2/index.html
+++ b/site/public/datasets/vgg_faces2/index.html
@@ -58,5 +58,6 @@
</div>
</footer>
</body>
+
<script src="/assets/js/app/site.js"></script>
</html> \ No newline at end of file
diff --git a/site/public/index.html b/site/public/index.html
index 3ce22936..51006b59 100644
--- a/site/public/index.html
+++ b/site/public/index.html
@@ -26,22 +26,31 @@
</header>
<div class="content">
- <section><p>MegaPixels is an art project that explores the dark side of face recognition training data and the future of computer vision</p>
-<p>Made by Adam Harvey in partnership with Mozilla.<br>
-Read more [about MegaPixels]</p>
-<p>[Explore Datasets] [Explore Algorithms]</p>
-<h2>Facial Recognition Datasets</h2>
+ <div class='hero'>
+ <div id="face_container">
+ <div class='currentFace'></div>
+ </div>
+ <div class='intro'>
+ <div class='headline'>
+ MegaPixels is an art project that explores the dark side of face recognition and the future of computer vision.
+ </div>
+
+ <div class='buttons'>
+ <a href="/datasets/"><button class='important'>Explore Datasets</button></a><a href="/analyze/"><button class='normal'>Analyze Your Face</button></a>
+ </div>
+
+ <div class='under'>
+ Made by Adam Harvey in partnership with Mozilla.<br/>
+ <a href='/about/'>Read more about MegaPixels</a>
+ </div>
+ </div>
+ </div>
+
+ <section><h2>Facial Recognition Datasets</h2>
<p>Regular Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.</p>
<h3>Summary</h3>
-<ul>
-<li>275 datsets found</li>
-<li>Created between the years 1993-2018</li>
-<li>Smallest dataset: 20 images</li>
-<li>Largest dataset: 10,000,000 images</li>
-<li>Highest resolution faces: 450x500 (Unconstrained College Students)</li>
-<li>Lowest resolution faces: 16x20 pixels (QMUL SurvFace)</li>
-</ul>
-</section>
+</section><section><div class='meta'><div><div class='gray'>Found</div><div>275 datasets</div></div><div><div class='gray'>Created between</div><div>1993-2018</div></div><div><div class='gray'>Smallest dataset</div><div>20 images</div></div><div><div class='gray'>Largest dataset</div><div>10,000,000 images</div></div></div></section><section><div class='meta'><div><div class='gray'>Highest resolution faces</div><div>450x500 (Unconstrained College Students)</div></div><div><div class='gray'>Lowest resolution faces</div><div>16x20 pixels (QMUL SurvFace)</div></div></div></section><section></section>
+
</div>
<footer>
@@ -59,5 +68,10 @@ Read more [about MegaPixels]</p>
</div>
</footer>
</body>
+
+<script src="/assets/js/vendor/three.min.js"></script>
+<script src="/assets/js/vendor/three.meshline.js"></script>
+<script src="/assets/js/app/face.js"></script>
+
<script src="/assets/js/app/site.js"></script>
</html> \ No newline at end of file
diff --git a/site/public/research/01_from_1_to_100_pixels/index.html b/site/public/research/01_from_1_to_100_pixels/index.html
index 55e02c6c..b4c85d00 100644
--- a/site/public/research/01_from_1_to_100_pixels/index.html
+++ b/site/public/research/01_from_1_to_100_pixels/index.html
@@ -90,5 +90,6 @@
</div>
</footer>
</body>
+
<script src="/assets/js/app/site.js"></script>
</html> \ No newline at end of file
diff --git a/site/public/research/index.html b/site/public/research/index.html
index 1f61dadf..cf9546e1 100644
--- a/site/public/research/index.html
+++ b/site/public/research/index.html
@@ -46,5 +46,6 @@
</div>
</footer>
</body>
+
<script src="/assets/js/app/site.js"></script>
</html> \ No newline at end of file
diff --git a/site/templates/home.html b/site/templates/home.html
new file mode 100644
index 00000000..436c1ddf
--- /dev/null
+++ b/site/templates/home.html
@@ -0,0 +1,32 @@
+{% extends 'layout.html' %}
+
+{% block content %}
+ <div class='hero'>
+ <div id="face_container">
+ <div class='currentFace'></div>
+ </div>
+ <div class='intro'>
+ <div class='headline'>
+ MegaPixels is an art project that explores the dark side of face recognition and the future of computer vision.
+ </div>
+
+ <div class='buttons'>
+ <a href="/datasets/"><button class='important'>Explore Datasets</button></a><a href="/analyze/"><button class='normal'>Analyze Your Face</button></a>
+ </div>
+
+ <div class='under'>
+ Made by Adam Harvey in partnership with Mozilla.<br/>
+ <a href='/about/'>Read more about MegaPixels</a>
+ </div>
+ </div>
+ </div>
+
+ {{ content }}
+
+{% endblock %}
+
+{% block scripts %}
+<script src="/assets/js/vendor/three.min.js"></script>
+<script src="/assets/js/vendor/three.meshline.js"></script>
+<script src="/assets/js/app/face.js"></script>
+{% endblock %}
diff --git a/site/templates/layout.html b/site/templates/layout.html
index 7558163e..605f9788 100644
--- a/site/templates/layout.html
+++ b/site/templates/layout.html
@@ -42,5 +42,6 @@
</div>
</footer>
</body>
+{% block scripts %}{% endblock %}
<script src="/assets/js/app/site.js"></script>
</html> \ No newline at end of file