summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJules Laplace <julescarbon@gmail.com>2019-02-27 23:48:35 +0100
committerJules Laplace <julescarbon@gmail.com>2019-02-27 23:48:35 +0100
commit1b008e4b4d11def9b13dc0a800b0d068624d43ae (patch)
tree55c735998df54db3892a18b25814007a4d15d741
parent421adbea75c5a4282630a7399f8b1018c4f0dd90 (diff)
half of a footnote implementation
-rw-r--r--megapixels/app/site/parser.py35
-rw-r--r--site/assets/css/css.css34
-rw-r--r--site/public/datasets/lfw/index.html15
3 files changed, 72 insertions, 12 deletions
diff --git a/megapixels/app/site/parser.py b/megapixels/app/site/parser.py
index 98d9f284..ef83b655 100644
--- a/megapixels/app/site/parser.py
+++ b/megapixels/app/site/parser.py
@@ -18,6 +18,7 @@ def parse_markdown(metadata, sections, s3_path, skip_h1=False):
current_group = []
footnotes = []
in_stats = False
+ in_footnotes = False
ignoring = False
if 'desc' in metadata and 'subdesc' in metadata:
@@ -33,6 +34,7 @@ def parse_markdown(metadata, sections, s3_path, skip_h1=False):
continue
elif section.strip().startswith('### Footnotes'):
groups.append(format_section(current_group, s3_path))
+ current_group = []
footnotes = []
in_footnotes = True
elif in_footnotes:
@@ -82,10 +84,18 @@ def parse_markdown(metadata, sections, s3_path, skip_h1=False):
current_group.append(section)
groups.append(format_section(current_group, s3_path))
+ footnote_txt = ''
+ footnote_lookup = {}
+
if len(footnotes):
- groups.append(format_footnotes(footnotes, s3_path))
+ footnote_txt, footnote_lookup = format_footnotes(footnotes, s3_path)
content = "".join(groups)
+
+ if footnote_lookup:
+ for key, index in footnote_lookup.items():
+ content = content.replace(key, '<a href="#{}" class="footnote" title="Footnote {}">{}</a>'.format(key, index, index))
+ content += footnote_txt
return content
@@ -153,8 +163,10 @@ def format_section(lines, s3_path, type='', tag='section'):
return "<{}>{}</{}>".format(tag, markdown(lines), tag)
return ""
-
def fix_meta(lines):
+ """
+ Format metadata sections before passing to markdown
+ """
new_lines = []
for line in lines:
if line.startswith('+ '):
@@ -162,7 +174,6 @@ def fix_meta(lines):
new_lines.append(line)
return new_lines
-
def format_metadata(section):
"""
format a metadata section (+ key: value pairs)
@@ -173,12 +184,24 @@ def format_metadata(section):
meta.append("<div><div class='gray'>{}</div><div>{}</div></div>".format(key, value))
return "<div class='meta'>{}</div>".format(''.join(meta))
-def format_footnotes(footnotes):
+def format_footnotes(footnotes, s3_path):
+ """
+ Format the footnotes section separately and produce a lookup we can use to update the main site
+ """
footnotes = '\n'.join(footnotes).split('\n')
+ index = 1
+ footnote_index_lookup = {}
+ footnote_list = []
for footnote in footnotes:
if not len(footnote) or '[^' not in footnote:
continue
- key, footnote = footnotes.split(': ')
+ key, note = footnote.split(': ', 1)
+ footnote_index_lookup[key] = index
+ footnote_list.append('<a name="{}" class="footnote_anchor">^</a>'.format(key) + markdown(note))
+ index += 1
+
+ footnote_txt = '<section><ul class="footnotes"><li>' + '</li><li>'.join(footnote_list) + '</li></ul></section>'
+ return footnote_txt, footnote_index_lookup
def format_applet(section, s3_path):
"""
@@ -189,7 +212,7 @@ def format_applet(section, s3_path):
applet = {}
# print(payload)
if ': ' in payload[0]:
- command, opt = payload[0].split(': ')
+ command, opt = payload[0].split(': ', 1)
else:
command = payload[0]
opt = None
diff --git a/site/assets/css/css.css b/site/assets/css/css.css
index fed381a7..8b4241ea 100644
--- a/site/assets/css/css.css
+++ b/site/assets/css/css.css
@@ -548,4 +548,38 @@ section.intro_section {
.intro_section span {
box-shadow: -10px -10px #000, 10px -10px #000, 10px 10px #000, -10px 10px #000;
background: #000;
+}
+
+/* footnotes */
+
+a.footnote {
+ font-size: 10px;
+ position: relative;
+ display: inline-block;
+ bottom: 10px;
+ text-decoration: none;
+ color: #ff0;
+ left: 2px;
+}
+.right-sidebar a.footnote {
+ bottom: 8px;
+}
+.desktop a.footnote:hover {
+ background-color: #ff0;
+ color: #000;
+}
+a.footnote_anchor {
+ font-weight: bold;
+ color: #ff0;
+ margin-right: 10px;
+ text-decoration: underline;
+ cursor: pointer;
+}
+ul.footnotes {
+ list-style-type: decimal;
+ margin-left: 30px;
+}
+li p {
+ margin: 0; padding: 0;
+ display: inline;
} \ No newline at end of file
diff --git a/site/public/datasets/lfw/index.html b/site/public/datasets/lfw/index.html
index 1242df0c..54b6aa22 100644
--- a/site/public/datasets/lfw/index.html
+++ b/site/public/datasets/lfw/index.html
@@ -31,7 +31,7 @@
</span></div></div></section><section><div class='image'><div class='caption'>Eighteen of the 5,749 people in the Labeled Faces in the Wild Dataset. The most widely used face dataset for benchmarking commercial face recognition algorithms.</div></div></section><section><div class='right-sidebar'><h3>Statistics</h3>
<div class='meta'><div><div class='gray'>Years</div><div>2002-2004</div></div><div><div class='gray'>Images</div><div>13,233</div></div><div><div class='gray'>Identities</div><div>5,749</div></div><div><div class='gray'>Origin</div><div>Yahoo News Images</div></div><div><div class='gray'>Funding</div><div>(Possibly, partially CIA)</div></div></div><h3>INSIGHTS</h3>
<ul>
-<li>There are about 3 men for every 1 woman (4,277 men and 1,472 women) in the LFW dataset[^lfw_www]</li>
+<li>There are about 3 men for every 1 woman (4,277 men and 1,472 women) in the LFW dataset<a href="#[^lfw_www]" class="footnote" title="Footnote 1">1</a></li>
<li>The person with the most images is <a href="http://vis-www.cs.umass.edu/lfw/person/George_W_Bush_comp.html">George W. Bush</a> with 530</li>
<li>There are about 3 George W. Bush's for every 1 <a href="http://vis-www.cs.umass.edu/lfw/person/Tony_Blair.html">Tony Blair</a></li>
<li>The LFW dataset includes over 500 actors, 30 models, 10 presidents, 124 basketball players, 24 football players, 11 kings, 7 queens, and 1 <a href="http://vis-www.cs.umass.edu/lfw/person/Moby.html">Moby</a></li>
@@ -39,7 +39,7 @@
<li>The word "future" appears 71 times</li>
</ul>
</div><h2>Labeled Faces in the Wild</h2>
-<p><em>Labeled Faces in The Wild</em> (LFW) is "a database of face photographs designed for studying the problem of unconstrained face recognition[^lfw_www]. It is used to evaluate and improve the performance of facial recognition algorithms in academic, commercial, and government research. According to BiometricUpdate.com[^lfw_pingan], LFW is "the most widely used evaluation set in the field of facial recognition, LFW attracts a few dozen teams from around the globe including Google, Facebook, Microsoft Research Asia, Baidu, Tencent, SenseTime, Face++ and Chinese University of Hong Kong."</p>
+<p><em>Labeled Faces in The Wild</em> (LFW) is "a database of face photographs designed for studying the problem of unconstrained face recognition<a href="#[^lfw_www]" class="footnote" title="Footnote 1">1</a>. It is used to evaluate and improve the performance of facial recognition algorithms in academic, commercial, and government research. According to BiometricUpdate.com<a href="#[^lfw_pingan]" class="footnote" title="Footnote 3">3</a>, LFW is "the most widely used evaluation set in the field of facial recognition, LFW attracts a few dozen teams from around the globe including Google, Facebook, Microsoft Research Asia, Baidu, Tencent, SenseTime, Face++ and Chinese University of Hong Kong."</p>
<p>The LFW dataset includes 13,233 images of 5,749 people that were collected between 2002-2004. LFW is a subset of <em>Names of Faces</em> and is part of the first facial recognition training dataset created entirely from images appearing on the Internet. The people appearing in LFW are...</p>
<p>The <em>Names and Faces</em> dataset was the first face recognition dataset created entire from online photos. However, <em>Names and Faces</em> and <em>LFW</em> are not the first face recognition dataset created entirely "in the wild". That title belongs to the <a href="/datasets/ucd_faces/">UCD dataset</a>. Images obtained "in the wild" means using an image without explicit consent or awareness from the subject or photographer.</p>
<h3>Biometric Trade Routes</h3>
@@ -51,11 +51,11 @@
</section><section class='applet_container'><div class='applet' data-payload='{"command": "citations"}'></div></section><section><h3>Additional Information</h3>
<p>(tweet-sized snippets go here)</p>
<ul>
-<li>The LFW dataset is considered the "most popular benchmark for face recognition" [^lfw_baidu]</li>
-<li>The LFW dataset is "the most widely used evaluation set in the field of facial recognition" [^lfw_pingan]</li>
+<li>The LFW dataset is considered the "most popular benchmark for face recognition" <a href="#[^lfw_baidu]" class="footnote" title="Footnote 2">2</a></li>
+<li>The LFW dataset is "the most widely used evaluation set in the field of facial recognition" <a href="#[^lfw_pingan]" class="footnote" title="Footnote 3">3</a></li>
<li>All images in LFW dataset were obtained "in the wild" meaning without any consent from the subject or from the photographer</li>
<li>The faces in the LFW dataset were detected using the Viola-Jones haarcascade face detector [^lfw_website] [^lfw-survey]</li>
-<li>The LFW dataset is used by several of the largest tech companies in the world including "Google, Facebook, Microsoft Research Asia, Baidu, Tencent, SenseTime, Face++ and Chinese University of Hong Kong." [^lfw_pingan]</li>
+<li>The LFW dataset is used by several of the largest tech companies in the world including "Google, Facebook, Microsoft Research Asia, Baidu, Tencent, SenseTime, Face++ and Chinese University of Hong Kong." <a href="#[^lfw_pingan]" class="footnote" title="Footnote 3">3</a></li>
<li>All images in the LFW dataset were copied from Yahoo News between 2002 - 2004</li>
<li>In 2014, two of the four original authors of the LFW dataset received funding from IARPA and ODNI for their followup paper <a href="https://www.semanticscholar.org/paper/Labeled-Faces-in-the-Wild-%3A-Updates-and-New-Huang-Learned-Miller/2d3482dcff69c7417c7b933f22de606a0e8e42d4">Labeled Faces in the Wild: Updates and New Reporting Procedures</a> via IARPA contract number 2014-14071600010</li>
<li>The dataset includes 2 images of <a href="http://vis-www.cs.umass.edu/lfw/person/George_Tenet.html">George Tenet</a>, the former Director of Central Intelligence (DCI) for the Central Intelligence Agency whose facial biometrics were eventually used to help train facial recognition software in China and Russia</li>
@@ -94,7 +94,10 @@ imageio.imwrite(&#39;lfw_montage_960.jpg&#39;, montage)
</code></pre>
</section><section><h3>Supplementary Material</h3>
</section><section class='applet_container'><div class='applet' data-payload='{"command": "load_file assets/lfw_commercial_use.csv", "fields": ["name_display, company_url, example_url, country, description"]}'></div></section><section><p>Text and graphics ©Adam Harvey / megapixels.cc</p>
-</section>
+</section><section><ul class="footnotes"><li><a name="[^lfw_www]" class="footnote_anchor">^</a><p><a href="http://vis-www.cs.umass.edu/lfw/results.html">http://vis-www.cs.umass.edu/lfw/results.html</a></p>
+</li><li><a name="[^lfw_baidu]" class="footnote_anchor">^</a><p>Jingtuo Liu, Yafeng Deng, Tao Bai, Zhengping Wei, Chang Huang. Targeting Ultimate Accuracy: Face Recognition via Deep Embedding. <a href="https://arxiv.org/abs/1506.07310">https://arxiv.org/abs/1506.07310</a></p>
+</li><li><a name="[^lfw_pingan]" class="footnote_anchor">^</a><p>Lee, Justin. "PING AN Tech facial recognition receives high score in latest LFW test results". BiometricUpdate.com. Feb 13, 2017. <a href="https://www.biometricupdate.com/201702/ping-an-tech-facial-recognition-receives-high-score-in-latest-lfw-test-results">https://www.biometricupdate.com/201702/ping-an-tech-facial-recognition-receives-high-score-in-latest-lfw-test-results</a></p>
+</li></ul></section>
</div>
<footer>