summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--client/util/index.js4
-rw-r--r--megapixels/app/site/parser.py17
-rw-r--r--site/assets/css/css.css66
-rw-r--r--site/content/pages/datasets/lfw/index.md2
-rw-r--r--site/public/datasets/lfw/index.html18
5 files changed, 77 insertions, 30 deletions
diff --git a/client/util/index.js b/client/util/index.js
index d0db0d98..0792e24e 100644
--- a/client/util/index.js
+++ b/client/util/index.js
@@ -5,12 +5,16 @@ export const isiPad = !!(navigator.userAgent.match(/iPad/i))
export const isAndroid = !!(navigator.userAgent.match(/Android/i))
export const isMobile = isiPhone || isiPad || isAndroid
export const isDesktop = !isMobile
+export const isFirefox = typeof InstallTrigger !== 'undefined'
export const toArray = a => Array.prototype.slice.apply(a)
export const choice = a => a[Math.floor(Math.random() * a.length)]
const htmlClassList = document.body.parentNode.classList
htmlClassList.add(isDesktop ? 'desktop' : 'mobile')
+if (isFirefox) {
+ htmlClassList.add('firefox')
+}
/* Default image dimensions */
diff --git a/megapixels/app/site/parser.py b/megapixels/app/site/parser.py
index ef83b655..9e904e00 100644
--- a/megapixels/app/site/parser.py
+++ b/megapixels/app/site/parser.py
@@ -10,6 +10,8 @@ import app.site.s3 as s3
renderer = mistune.Renderer(escape=False)
markdown = mistune.Markdown(renderer=renderer)
+footnote_count = 0
+
def parse_markdown(metadata, sections, s3_path, skip_h1=False):
"""
parse page into sections, preprocess the markdown to handle our modifications
@@ -94,7 +96,18 @@ def parse_markdown(metadata, sections, s3_path, skip_h1=False):
if footnote_lookup:
for key, index in footnote_lookup.items():
- content = content.replace(key, '<a href="#{}" class="footnote" title="Footnote {}">{}</a>'.format(key, index, index))
+ global footnote_count
+ footnote_count = 0
+ letters = "abcdefghijklmnopqrstuvwxyz"
+ footnote_backlinks = []
+ def footnote_tag(match):
+ global footnote_count
+ footnote_count += 1
+ footnote_backlinks.append('<a href="#{}_{}">{}</a>'.format(key, footnote_count, letters[footnote_count-1]))
+ return '<a class="footnote_shim" name="{}_{}"> </a><a href="#{}" class="footnote" title="Footnote {}">{}</a>'.format(key, footnote_count, key, index, index)
+ key_regex = re.compile(key.replace('[', '\\[').replace('^', '\\^').replace(']', '\\]'))
+ content = key_regex.sub(footnote_tag, content)
+ footnote_txt = footnote_txt.replace("{}_BACKLINKS".format(index), "".join(footnote_backlinks))
content += footnote_txt
return content
@@ -197,7 +210,7 @@ def format_footnotes(footnotes, s3_path):
continue
key, note = footnote.split(': ', 1)
footnote_index_lookup[key] = index
- footnote_list.append('<a name="{}" class="footnote_anchor">^</a>'.format(key) + markdown(note))
+ footnote_list.append('<a name="{}" class="footnote_shim"></a><span class="backlinks">{}_BACKLINKS</span>'.format(key, index) + markdown(note))
index += 1
footnote_txt = '<section><ul class="footnotes"><li>' + '</li><li>'.join(footnote_list) + '</li></ul></section>'
diff --git a/site/assets/css/css.css b/site/assets/css/css.css
index 0afa3725..4b42657b 100644
--- a/site/assets/css/css.css
+++ b/site/assets/css/css.css
@@ -16,7 +16,8 @@ html {
opacity: 0;
transition: opacity 0.2s cubic-bezier(0,1,1,1);
}
-html.desktop .content, html.mobile .content {
+html.desktop .content,
+html.mobile .content {
opacity: 1;
}
@@ -28,7 +29,7 @@ header {
left: 0;
width: 100%;
height: 70px;
- z-index: 2;
+ z-index: 9999;
background: #1e1e1e;
display: flex;
flex-direction: row;
@@ -53,8 +54,10 @@ header .logo {
height: 30px;
}
header .site_name {
+ font-family: 'Roboto', sans-serif;
font-weight: bold;
color: #fff;
+ font-size: 14px;
}
header .sub {
margin-left: 4px;
@@ -148,7 +151,7 @@ h3 {
margin: 0 0 20px 0;
padding: 0;
font-size: 14pt;
- font-weight: 600;
+ font-weight: 500;
transition: color 0.2s cubic-bezier(0,0,1,1);
}
h4 {
@@ -170,6 +173,8 @@ h4 {
margin: 0;
padding: 0 0 10px 0;
font-family: 'Roboto Mono';
+ font-weight: 400;
+ font-size: 11px;
text-transform: uppercase;
letter-spacing: 2px;
}
@@ -210,13 +215,17 @@ section {
p {
margin: 0 0 20px 0;
line-height: 2;
+ font-size: 15px;
+ font-weight: 400;
}
.content a {
- color: #ff0;
+ color: #fff;
+ text-decoration: none;
+ border-bottom: 1px dashed;
transition: color 0.2s cubic-bezier(0,0,1,1);
}
-.content a:hover {
- color: #fff;
+.desktop .content a:hover {
+ color: #ff8;
}
/* top of post metadata */
@@ -368,7 +377,7 @@ section.fullwidth .image {
.caption {
text-align: left;
font-size: 9pt;
- color: #bbb;
+ color: #999;
max-width: 960px;
margin: 10px auto 0 auto;
font-family: 'Roboto';
@@ -538,17 +547,22 @@ section.intro_section {
font-size: 38px;
line-height: 60px;
margin-bottom: 30px;
- color: #fff;
+ color: #ddd;
+ font-weight: 300;
}
.intro_section .hero_subdesc {
font-size: 18px;
line-height: 36px;
max-width: 640px;
+ font-weight: 300;
color: #ddd;
}
-.intro_section span {
- box-shadow: -10px -10px #000, 10px -10px #000, 10px 10px #000, -10px 10px #000;
- background: #000;
+.intro_section div > span {
+ box-shadow: -10px -10px #1e1e1e, 10px -10px #1e1e1e, 10px 10px #1e1e1e, -10px 10px #1e1e1e;
+ background: #1e1e1e;
+}
+.firefox .intro_section div > span {
+ box-decoration-break: clone;
}
/* footnotes */
@@ -559,22 +573,38 @@ a.footnote {
display: inline-block;
bottom: 10px;
text-decoration: none;
- color: #ff0;
+ color: #ff8;
+ border: 0;
left: 2px;
+ transition-duration: 0s;
+}
+a.footnote_shim {
+ display: inline-block;
+ width: 1px; height: 1px;
+ overflow: hidden;
+ position: relative;
+ top: -90px;
+ visibility: hidden;
}
.right-sidebar a.footnote {
bottom: 8px;
}
.desktop a.footnote:hover {
- background-color: #ff0;
+ background-color: #ff8;
color: #000;
}
-a.footnote_anchor {
- font-weight: bold;
- color: #ff0;
+.backlinks {
margin-right: 10px;
- text-decoration: underline;
- cursor: pointer;
+}
+.content .backlinks a {
+ color: #ff8;
+ font-size: 10px;
+ text-decoration: none;
+ border: 0;
+ font-weight: bold;
+ position: relative;
+ bottom: 5px;
+ margin-right: 2px;
}
ul.footnotes {
list-style-type: decimal;
diff --git a/site/content/pages/datasets/lfw/index.md b/site/content/pages/datasets/lfw/index.md
index 1995e1f9..972fafe2 100644
--- a/site/content/pages/datasets/lfw/index.md
+++ b/site/content/pages/datasets/lfw/index.md
@@ -5,7 +5,7 @@ title: Labeled Faces in The Wild
desc: Labeled Faces in The Wild (LFW) is a database of face photographs designed for studying the problem of unconstrained face recognition.
subdesc: It includes 13,456 images of 4,432 people’s images copied from the Internet during 2002-2004.
image: assets/lfw_feature.jpg
-caption: Eighteen of the 5,749 people in the Labeled Faces in the Wild Dataset. The most widely used face dataset for benchmarking commercial face recognition algorithms.
+caption: A few of the 5,749 people in the Labeled Faces in the Wild Dataset. The most widely used face dataset for benchmarking commercial face recognition algorithms.
slug: lfw
published: 2019-2-23
updated: 2019-2-23
diff --git a/site/public/datasets/lfw/index.html b/site/public/datasets/lfw/index.html
index 54b6aa22..08ec8ee3 100644
--- a/site/public/datasets/lfw/index.html
+++ b/site/public/datasets/lfw/index.html
@@ -28,10 +28,10 @@
<div class="content">
<section class='intro_section' style='background-image: url(https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/lfw/assets/lfw_feature.jpg)'><div class='inner'><div class='hero_desc'><span><span style='color: #ff0000'>Labeled Faces in The Wild</span> (LFW) is a database of face photographs designed for studying the problem of unconstrained face recognition.</span></div><div class='hero_subdesc'><span>It includes 13,456 images of 4,432 people’s images copied from the Internet during 2002-2004.
-</span></div></div></section><section><div class='image'><div class='caption'>Eighteen of the 5,749 people in the Labeled Faces in the Wild Dataset. The most widely used face dataset for benchmarking commercial face recognition algorithms.</div></div></section><section><div class='right-sidebar'><h3>Statistics</h3>
+</span></div></div></section><section><div class='image'><div class='caption'>A few of the 5,749 people in the Labeled Faces in the Wild Dataset. The most widely used face dataset for benchmarking commercial face recognition algorithms.</div></div></section><section><div class='right-sidebar'><h3>Statistics</h3>
<div class='meta'><div><div class='gray'>Years</div><div>2002-2004</div></div><div><div class='gray'>Images</div><div>13,233</div></div><div><div class='gray'>Identities</div><div>5,749</div></div><div><div class='gray'>Origin</div><div>Yahoo News Images</div></div><div><div class='gray'>Funding</div><div>(Possibly, partially CIA)</div></div></div><h3>INSIGHTS</h3>
<ul>
-<li>There are about 3 men for every 1 woman (4,277 men and 1,472 women) in the LFW dataset<a href="#[^lfw_www]" class="footnote" title="Footnote 1">1</a></li>
+<li>There are about 3 men for every 1 woman (4,277 men and 1,472 women) in the LFW dataset<a class="footnote_shim" name="[^lfw_www]_1"> </a><a href="#[^lfw_www]" class="footnote" title="Footnote 1">1</a></li>
<li>The person with the most images is <a href="http://vis-www.cs.umass.edu/lfw/person/George_W_Bush_comp.html">George W. Bush</a> with 530</li>
<li>There are about 3 George W. Bush's for every 1 <a href="http://vis-www.cs.umass.edu/lfw/person/Tony_Blair.html">Tony Blair</a></li>
<li>The LFW dataset includes over 500 actors, 30 models, 10 presidents, 124 basketball players, 24 football players, 11 kings, 7 queens, and 1 <a href="http://vis-www.cs.umass.edu/lfw/person/Moby.html">Moby</a></li>
@@ -39,7 +39,7 @@
<li>The word "future" appears 71 times</li>
</ul>
</div><h2>Labeled Faces in the Wild</h2>
-<p><em>Labeled Faces in The Wild</em> (LFW) is "a database of face photographs designed for studying the problem of unconstrained face recognition<a href="#[^lfw_www]" class="footnote" title="Footnote 1">1</a>. It is used to evaluate and improve the performance of facial recognition algorithms in academic, commercial, and government research. According to BiometricUpdate.com<a href="#[^lfw_pingan]" class="footnote" title="Footnote 3">3</a>, LFW is "the most widely used evaluation set in the field of facial recognition, LFW attracts a few dozen teams from around the globe including Google, Facebook, Microsoft Research Asia, Baidu, Tencent, SenseTime, Face++ and Chinese University of Hong Kong."</p>
+<p><em>Labeled Faces in The Wild</em> (LFW) is "a database of face photographs designed for studying the problem of unconstrained face recognition<a class="footnote_shim" name="[^lfw_www]_2"> </a><a href="#[^lfw_www]" class="footnote" title="Footnote 1">1</a>. It is used to evaluate and improve the performance of facial recognition algorithms in academic, commercial, and government research. According to BiometricUpdate.com<a class="footnote_shim" name="[^lfw_pingan]_1"> </a><a href="#[^lfw_pingan]" class="footnote" title="Footnote 3">3</a>, LFW is "the most widely used evaluation set in the field of facial recognition, LFW attracts a few dozen teams from around the globe including Google, Facebook, Microsoft Research Asia, Baidu, Tencent, SenseTime, Face++ and Chinese University of Hong Kong."</p>
<p>The LFW dataset includes 13,233 images of 5,749 people that were collected between 2002-2004. LFW is a subset of <em>Names of Faces</em> and is part of the first facial recognition training dataset created entirely from images appearing on the Internet. The people appearing in LFW are...</p>
<p>The <em>Names and Faces</em> dataset was the first face recognition dataset created entire from online photos. However, <em>Names and Faces</em> and <em>LFW</em> are not the first face recognition dataset created entirely "in the wild". That title belongs to the <a href="/datasets/ucd_faces/">UCD dataset</a>. Images obtained "in the wild" means using an image without explicit consent or awareness from the subject or photographer.</p>
<h3>Biometric Trade Routes</h3>
@@ -51,11 +51,11 @@
</section><section class='applet_container'><div class='applet' data-payload='{"command": "citations"}'></div></section><section><h3>Additional Information</h3>
<p>(tweet-sized snippets go here)</p>
<ul>
-<li>The LFW dataset is considered the "most popular benchmark for face recognition" <a href="#[^lfw_baidu]" class="footnote" title="Footnote 2">2</a></li>
-<li>The LFW dataset is "the most widely used evaluation set in the field of facial recognition" <a href="#[^lfw_pingan]" class="footnote" title="Footnote 3">3</a></li>
+<li>The LFW dataset is considered the "most popular benchmark for face recognition" <a class="footnote_shim" name="[^lfw_baidu]_1"> </a><a href="#[^lfw_baidu]" class="footnote" title="Footnote 2">2</a></li>
+<li>The LFW dataset is "the most widely used evaluation set in the field of facial recognition" <a class="footnote_shim" name="[^lfw_pingan]_2"> </a><a href="#[^lfw_pingan]" class="footnote" title="Footnote 3">3</a></li>
<li>All images in LFW dataset were obtained "in the wild" meaning without any consent from the subject or from the photographer</li>
<li>The faces in the LFW dataset were detected using the Viola-Jones haarcascade face detector [^lfw_website] [^lfw-survey]</li>
-<li>The LFW dataset is used by several of the largest tech companies in the world including "Google, Facebook, Microsoft Research Asia, Baidu, Tencent, SenseTime, Face++ and Chinese University of Hong Kong." <a href="#[^lfw_pingan]" class="footnote" title="Footnote 3">3</a></li>
+<li>The LFW dataset is used by several of the largest tech companies in the world including "Google, Facebook, Microsoft Research Asia, Baidu, Tencent, SenseTime, Face++ and Chinese University of Hong Kong." <a class="footnote_shim" name="[^lfw_pingan]_3"> </a><a href="#[^lfw_pingan]" class="footnote" title="Footnote 3">3</a></li>
<li>All images in the LFW dataset were copied from Yahoo News between 2002 - 2004</li>
<li>In 2014, two of the four original authors of the LFW dataset received funding from IARPA and ODNI for their followup paper <a href="https://www.semanticscholar.org/paper/Labeled-Faces-in-the-Wild-%3A-Updates-and-New-Huang-Learned-Miller/2d3482dcff69c7417c7b933f22de606a0e8e42d4">Labeled Faces in the Wild: Updates and New Reporting Procedures</a> via IARPA contract number 2014-14071600010</li>
<li>The dataset includes 2 images of <a href="http://vis-www.cs.umass.edu/lfw/person/George_Tenet.html">George Tenet</a>, the former Director of Central Intelligence (DCI) for the Central Intelligence Agency whose facial biometrics were eventually used to help train facial recognition software in China and Russia</li>
@@ -94,9 +94,9 @@ imageio.imwrite(&#39;lfw_montage_960.jpg&#39;, montage)
</code></pre>
</section><section><h3>Supplementary Material</h3>
</section><section class='applet_container'><div class='applet' data-payload='{"command": "load_file assets/lfw_commercial_use.csv", "fields": ["name_display, company_url, example_url, country, description"]}'></div></section><section><p>Text and graphics ©Adam Harvey / megapixels.cc</p>
-</section><section><ul class="footnotes"><li><a name="[^lfw_www]" class="footnote_anchor">^</a><p><a href="http://vis-www.cs.umass.edu/lfw/results.html">http://vis-www.cs.umass.edu/lfw/results.html</a></p>
-</li><li><a name="[^lfw_baidu]" class="footnote_anchor">^</a><p>Jingtuo Liu, Yafeng Deng, Tao Bai, Zhengping Wei, Chang Huang. Targeting Ultimate Accuracy: Face Recognition via Deep Embedding. <a href="https://arxiv.org/abs/1506.07310">https://arxiv.org/abs/1506.07310</a></p>
-</li><li><a name="[^lfw_pingan]" class="footnote_anchor">^</a><p>Lee, Justin. "PING AN Tech facial recognition receives high score in latest LFW test results". BiometricUpdate.com. Feb 13, 2017. <a href="https://www.biometricupdate.com/201702/ping-an-tech-facial-recognition-receives-high-score-in-latest-lfw-test-results">https://www.biometricupdate.com/201702/ping-an-tech-facial-recognition-receives-high-score-in-latest-lfw-test-results</a></p>
+</section><section><ul class="footnotes"><li><a name="[^lfw_www]" class="footnote_shim"></a><span class="backlinks"><a href="#[^lfw_www]_1">a</a><a href="#[^lfw_www]_2">b</a></span><p><a href="http://vis-www.cs.umass.edu/lfw/results.html">http://vis-www.cs.umass.edu/lfw/results.html</a></p>
+</li><li><a name="[^lfw_baidu]" class="footnote_shim"></a><span class="backlinks"><a href="#[^lfw_baidu]_1">a</a></span><p>Jingtuo Liu, Yafeng Deng, Tao Bai, Zhengping Wei, Chang Huang. Targeting Ultimate Accuracy: Face Recognition via Deep Embedding. <a href="https://arxiv.org/abs/1506.07310">https://arxiv.org/abs/1506.07310</a></p>
+</li><li><a name="[^lfw_pingan]" class="footnote_shim"></a><span class="backlinks"><a href="#[^lfw_pingan]_1">a</a><a href="#[^lfw_pingan]_2">b</a><a href="#[^lfw_pingan]_3">c</a></span><p>Lee, Justin. "PING AN Tech facial recognition receives high score in latest LFW test results". BiometricUpdate.com. Feb 13, 2017. <a href="https://www.biometricupdate.com/201702/ping-an-tech-facial-recognition-receives-high-score-in-latest-lfw-test-results">https://www.biometricupdate.com/201702/ping-an-tech-facial-recognition-receives-high-score-in-latest-lfw-test-results</a></p>
</li></ul></section>
</div>