From c8e7a10be948c2405d46d8c3caf4a8c6675eee29 Mon Sep 17 00:00:00 2001 From: Jules Laplace Date: Wed, 27 Feb 2019 19:35:54 +0100 Subject: rebuild --- client/map/index.js | 1 + 1 file changed, 1 insertion(+) (limited to 'client') diff --git a/client/map/index.js b/client/map/index.js index 2a6686be..d38855bf 100644 --- a/client/map/index.js +++ b/client/map/index.js @@ -78,6 +78,7 @@ export default function append(el, payload) { source = [address.lat, address.lng].map(n => parseFloat(n)) } + // ....i dont think the sort order does anything?? citations.sort((a,b) => sortOrder.indexOf(a) - sortOrder.indexOf(b)) .forEach(citation => { const address = citation.addresses[0] -- cgit v1.2.3-70-g09d2 From 9bac173e85865e4f0d1dba5071b40eb7ebe3dd1a Mon Sep 17 00:00:00 2001 From: Jules Laplace Date: Wed, 27 Feb 2019 22:15:03 +0100 Subject: new intro header for datasets page and sidebar --- client/index.js | 6 +-- megapixels/app/site/parser.py | 70 ++++++++++++++++++++++++++---- megapixels/commands/site/watch.py | 2 + site/assets/css/css.css | 72 ++++++++++++++++++++++++++----- site/assets/css/tabulator.css | 2 +- site/content/pages/datasets/lfw/index.md | 25 +++++------ site/content/pages/datasets/uccs/index.md | 2 +- site/public/datasets/lfw/index.html | 36 ++++------------ 8 files changed, 152 insertions(+), 63 deletions(-) (limited to 'client') diff --git a/client/index.js b/client/index.js index c9335f14..37906f30 100644 --- a/client/index.js +++ b/client/index.js @@ -110,9 +110,9 @@ function runApplets() { function main() { const paras = document.querySelectorAll('section p') - if (paras.length) { - paras[0].classList.add('first_paragraph') - } + // if (paras.length) { + // paras[0].classList.add('first_paragraph') + // } toArray(document.querySelectorAll('header .links a')).forEach(tag => { if (window.location.href.match(tag.href)) { tag.classList.add('active') diff --git a/megapixels/app/site/parser.py b/megapixels/app/site/parser.py index 3792e6f1..dc53177b 100644 --- a/megapixels/app/site/parser.py +++ b/megapixels/app/site/parser.py @@ -16,9 +16,30 @@ def parse_markdown(metadata, sections, s3_path, skip_h1=False): """ groups = [] current_group = [] + in_stats = False + + if 'desc' in metadata and 'subdesc' in metadata: + groups.append(intro_section(metadata, s3_path)) + for section in sections: if skip_h1 and section.startswith('# '): continue + elif section.strip().startswith('---'): + continue + elif section.lower().strip().startswith('ignore text'): + break + elif '### Statistics' in section: + if len(current_group): + groups.append(format_section(current_group, s3_path)) + current_group = [] + current_group.append(section) + in_stats = True + elif in_stats and not section.strip().startswith('## '): + current_group.append(section) + elif in_stats and section.strip().startswith('## '): + current_group = [format_section(current_group, s3_path, 'right-sidebar', tag='div')] + current_group.append(section) + in_stats = False elif section.strip().startswith('```'): groups.append(format_section(current_group, s3_path)) current_group = [] @@ -32,7 +53,7 @@ def parse_markdown(metadata, sections, s3_path, skip_h1=False): current_group = [] elif section.startswith('+ '): groups.append(format_section(current_group, s3_path)) - groups.append(format_metadata(section)) + groups.append('
' + format_metadata(section) + '
') current_group = [] elif '![fullwidth:' in section: groups.append(format_section(current_group, s3_path)) @@ -52,6 +73,32 @@ def parse_markdown(metadata, sections, s3_path, skip_h1=False): content = "".join(groups) return content +def intro_section(metadata, s3_path): + """ + Build the intro section for datasets + """ + + section = "
".format(s3_path + metadata['image']) + section += "
" + + parts = [] + if 'desc' in metadata: + desc = metadata['desc'] + if 'color' in metadata and metadata['title'] in desc: + desc = desc.replace(metadata['title'], "{}".format(metadata['color'], metadata['title'])) + section += "
{}
".format(desc, desc) + + if 'subdesc' in metadata: + subdesc = markdown(metadata['subdesc']).replace('

', '').replace('

', '') + section += "
{}
".format(subdesc, subdesc) + + section += "
" + section += "
" + + if 'caption' in metadata: + section += "
{}
".format(metadata['caption']) + + return section def fix_images(lines, s3_path): """ @@ -75,19 +122,26 @@ def fix_images(lines, s3_path): real_lines.append(line) return "\n".join(real_lines) - -def format_section(lines, s3_path, type=''): +def format_section(lines, s3_path, type='', tag='section'): """ format a normal markdown section """ if len(lines): + lines = fix_meta(lines) lines = fix_images(lines, s3_path) if type: - return "
{}
".format(type, markdown(lines)) + return "<{} class='{}'>{}".format(tag, type, markdown(lines), tag) else: - return "
" + markdown(lines) + "
" + return "<{}>{}".format(tag, markdown(lines), tag) return "" +def fix_meta(lines): + new_lines = [] + for line in lines: + if line.startswith('+ '): + line = format_metadata(line) + new_lines.append(line) + return new_lines def format_metadata(section): """ @@ -97,8 +151,7 @@ def format_metadata(section): for line in section.split('\n'): key, value = line[2:].split(': ', 1) meta.append("
{}
{}
".format(key, value)) - return "
{}
".format(''.join(meta)) - + return "
{}
".format(''.join(meta)) def format_applet(section, s3_path): """ @@ -107,12 +160,13 @@ def format_applet(section, s3_path): # print(section) payload = section.strip('```').strip().strip('```').strip().split('\n') applet = {} - print(payload) + # print(payload) if ': ' in payload[0]: command, opt = payload[0].split(': ') else: command = payload[0] opt = None + print(command) if command == 'python' or command == 'javascript' or command == 'code': return format_section([ section ], s3_path) if command == '': diff --git a/megapixels/commands/site/watch.py b/megapixels/commands/site/watch.py index 7fd3ba7c..7bd71038 100644 --- a/megapixels/commands/site/watch.py +++ b/megapixels/commands/site/watch.py @@ -35,6 +35,8 @@ def cli(ctx): observer.schedule(SiteBuilder(), path=cfg.DIR_SITE_CONTENT, recursive=True) observer.start() + build_file(cfg.DIR_SITE_CONTENT + "/datasets/lfw/index.md") + try: while True: time.sleep(1) diff --git a/site/assets/css/css.css b/site/assets/css/css.css index 7b2e19fc..fed381a7 100644 --- a/site/assets/css/css.css +++ b/site/assets/css/css.css @@ -4,12 +4,12 @@ html, body { padding: 0; width: 100%; min-height: 100%; - font-family: 'Roboto', sans-serif; - color: #b8b8b8; + font-family: 'Roboto Mono', sans-serif; + color: #eee; overflow-x: hidden; } html { - background: #191919; + background: #111111; } .content { @@ -146,8 +146,8 @@ h2 { h3 { margin: 0 0 20px 0; padding: 0; - font-size: 11pt; - font-weight: 500; + font-size: 14pt; + font-weight: 600; transition: color 0.2s cubic-bezier(0,0,1,1); } h4 { @@ -165,8 +165,15 @@ h4 { color: #fff; text-decoration: underline; } +.right-sidebar h3 { + margin: 0; + padding: 0 0 10px 0; + font-family: 'Roboto Mono'; + text-transform: uppercase; + letter-spacing: 2px; +} -th, .gray, h3, h4 { +th, .gray { font-family: 'Roboto Mono', monospace; font-weight: 400; text-transform: uppercase; @@ -201,6 +208,7 @@ section { } p { margin: 0 0 20px 0; + line-height: 2; } .content a { color: #ddd; @@ -229,10 +237,13 @@ p { } .right-sidebar { float: right; - width: 200px; + width: 240px; margin-left: 20px; + padding-top: 10px; padding-left: 20px; border-left: 1px solid #444; + font-family: 'Roboto'; + font-size: 14px; } .right-sidebar .meta { flex-direction: column; @@ -240,6 +251,9 @@ p { .right-sidebar .meta > div { margin-bottom: 10px; } +.right-sidebar ul { + margin-bottom: 10px; +} /* lists */ @@ -346,17 +360,17 @@ section.wide .image { } section.fullwidth { width: 100%; - background-size: contain; } section.fullwidth .image { max-width: 100%; } .caption { - text-align: center; + text-align: left; font-size: 9pt; - color: #888; - max-width: 620px; + color: #bbb; + max-width: 960px; margin: 10px auto 0 auto; + font-family: 'Roboto'; } /* blog index */ @@ -499,3 +513,39 @@ section.fullwidth .image { .dataset-list a:nth-child(3n+3) { background-color: rgba(255, 255, 0, 0.1); } .desktop .dataset-list .dataset:nth-child(3n+3):hover { background-color: rgba(255, 255, 0, 0.2); } + + +/* intro section for datasets */ + +section.intro_section { + font-family: 'Roboto Mono'; + width: 100%; + background-size: cover; + background-position: bottom left; + padding: 50px 0; + min-height: 60vh; + display: flex; + justify-content: center; + align-items: center; + background-color: #111111; +} +.intro_section .inner { + max-width: 960px; + margin: 0 auto; +} +.intro_section .hero_desc { + font-size: 38px; + line-height: 60px; + margin-bottom: 30px; + color: #fff; +} +.intro_section .hero_subdesc { + font-size: 18px; + line-height: 36px; + max-width: 640px; + color: #ddd; +} +.intro_section span { + box-shadow: -10px -10px #000, 10px -10px #000, 10px 10px #000, -10px 10px #000; + background: #000; +} \ No newline at end of file diff --git a/site/assets/css/tabulator.css b/site/assets/css/tabulator.css index 200f0c5c..63abf050 100755 --- a/site/assets/css/tabulator.css +++ b/site/assets/css/tabulator.css @@ -493,7 +493,7 @@ display: inline-block; position: relative; box-sizing: border-box; - padding: 4px; + padding: 10px; border-right: 1px solid #333; vertical-align: middle; white-space: nowrap; diff --git a/site/content/pages/datasets/lfw/index.md b/site/content/pages/datasets/lfw/index.md index 48d86e1f..1995e1f9 100644 --- a/site/content/pages/datasets/lfw/index.md +++ b/site/content/pages/datasets/lfw/index.md @@ -2,14 +2,14 @@ status: published title: Labeled Faces in The Wild -desc: Labeled Faces in The Wild (LFW) is a database of face photographs designed for studying the problem of unconstrained face recognition +desc: Labeled Faces in The Wild (LFW) is a database of face photographs designed for studying the problem of unconstrained face recognition. subdesc: It includes 13,456 images of 4,432 people’s images copied from the Internet during 2002-2004. -image: lfw_index.gif +image: assets/lfw_feature.jpg caption: Eighteen of the 5,749 people in the Labeled Faces in the Wild Dataset. The most widely used face dataset for benchmarking commercial face recognition algorithms. slug: lfw published: 2019-2-23 updated: 2019-2-23 -color: #00FF00 +color: #ff0000 authors: Adam Harvey ------------ @@ -22,12 +22,11 @@ authors: Adam Harvey + Origin: Yahoo News Images + Funding: (Possibly, partially CIA) -### Analysis +### INSIGHTS - There are about 3 men for every 1 woman (4,277 men and 1,472 women) in the LFW dataset[^lfw_www] - The person with the most images is [George W. Bush](http://vis-www.cs.umass.edu/lfw/person/George_W_Bush_comp.html) with 530 - There are about 3 George W. Bush's for every 1 [Tony Blair](http://vis-www.cs.umass.edu/lfw/person/Tony_Blair.html) -- 70% of people in the dataset have only 1 image and 29% have 2 or more images - The LFW dataset includes over 500 actors, 30 models, 10 presidents, 124 basketball players, 24 football players, 11 kings, 7 queens, and 1 [Moby](http://vis-www.cs.umass.edu/lfw/person/Moby.html) - In all 3 of the LFW publications [^lfw_original_paper], [^lfw_survey], [^lfw_tech_report] the words "ethics", "consent", and "privacy" appear 0 times - The word "future" appears 71 times @@ -40,20 +39,20 @@ The LFW dataset includes 13,233 images of 5,749 people that were collected betwe The *Names and Faces* dataset was the first face recognition dataset created entire from online photos. However, *Names and Faces* and *LFW* are not the first face recognition dataset created entirely "in the wild". That title belongs to the [UCD dataset](/datasets/ucd_faces/). Images obtained "in the wild" means using an image without explicit consent or awareness from the subject or photographer. -### Synthetic Faces - -To visualize the types of photos in the dataset without explicitly publishing individual's identities a generative adversarial network (GAN) was trained on the entire dataset. The images in this video show a neural network learning the visual latent space and then interpolating between archetypical identities within the LFW dataset. - -![fullwidth:](assets/lfw_synthetic.jpg) - ### Biometric Trade Routes -To understand how this dataset has been used, its citations have been geocoded to show an approximate geographic digital trade route of the biometric data. Lines indicate an organization (education, commercial, or governmental) that has cited the LFW dataset in their research. Data is compiled from [SemanticScholar](https://www.semanticscholar.org). +To understand how this dataset has been used, its citations have been geocoded to show an approximate geographic digital trade route of the biometric data. Lines indicate an organization (education, commercial, or governmental) that has cited the LFW dataset in their research. Data is compiled from [Semantic Scholar](https://www.semanticscholar.org). ``` map ``` +### Synthetic Faces + +To visualize the types of photos in the dataset without explicitly publishing individual's identities a generative adversarial network (GAN) was trained on the entire dataset. The images in this video show a neural network learning the visual latent space and then interpolating between archetypical identities within the LFW dataset. + +![fullwidth:](assets/lfw_synthetic.jpg) + ### Citations Browse or download the geocoded citation data collected for the LFW dataset. @@ -136,6 +135,7 @@ Ignore text below these lines ------- + ### Research - "In our experiments, we used 10000 images and associated captions from the Faces in the wilddata set [3]." @@ -146,6 +146,7 @@ Ignore text below these lines - This research is based upon work supported in part by the Office of the Director of National Intelligence (ODNI), Intelligence Advanced Research Projects Activity (IARPA), via contract number 2014-14071600010. - From "Labeled Faces in the Wild: Updates and New Reporting Procedures" +- 70% of people in the dataset have only 1 image and 29% have 2 or more images ### Footnotes diff --git a/site/content/pages/datasets/uccs/index.md b/site/content/pages/datasets/uccs/index.md index d40dce22..be1d2474 100644 --- a/site/content/pages/datasets/uccs/index.md +++ b/site/content/pages/datasets/uccs/index.md @@ -68,7 +68,7 @@ The more recent UCCS version of the dataset received funding from [^funding_uccs - You are welcomed to use these images for academic and journalistic use including for research papers, news stories, presentations. - Please use the following citation: -```MegaPixels.cc Adam Harvey 2013-2109.``` +```MegaPixels.cc Adam Harvey 2013-2019.``` [^funding_sb]: Sapkota, Archana and Boult, Terrance. "Large Scale Unconstrained Open Set Face Database." 2013. [^funding_uccs]: Günther, M. et. al. "Unconstrained Face Detection and Open-Set Face Recognition Challenge," 2018. Arxiv 1708.02337v3. \ No newline at end of file diff --git a/site/public/datasets/lfw/index.html b/site/public/datasets/lfw/index.html index 86f49c52..1242df0c 100644 --- a/site/public/datasets/lfw/index.html +++ b/site/public/datasets/lfw/index.html @@ -4,7 +4,7 @@ MegaPixels - + @@ -27,26 +27,26 @@
-

Statistics

-
Years
2002-2004
Images
13,233
Identities
5,749
Origin
Yahoo News Images
Funding
(Possibly, partially CIA)

Analysis

+
Labeled Faces in The Wild (LFW) is a database of face photographs designed for studying the problem of unconstrained face recognition.
It includes 13,456 images of 4,432 people’s images copied from the Internet during 2002-2004. +
Eighteen of the 5,749 people in the Labeled Faces in the Wild Dataset. The most widely used face dataset for benchmarking commercial face recognition algorithms.

Labeled Faces in the Wild

Labeled Faces in The Wild (LFW) is "a database of face photographs designed for studying the problem of unconstrained face recognition[^lfw_www]. It is used to evaluate and improve the performance of facial recognition algorithms in academic, commercial, and government research. According to BiometricUpdate.com[^lfw_pingan], LFW is "the most widely used evaluation set in the field of facial recognition, LFW attracts a few dozen teams from around the globe including Google, Facebook, Microsoft Research Asia, Baidu, Tencent, SenseTime, Face++ and Chinese University of Hong Kong."

The LFW dataset includes 13,233 images of 5,749 people that were collected between 2002-2004. LFW is a subset of Names of Faces and is part of the first facial recognition training dataset created entirely from images appearing on the Internet. The people appearing in LFW are...

The Names and Faces dataset was the first face recognition dataset created entire from online photos. However, Names and Faces and LFW are not the first face recognition dataset created entirely "in the wild". That title belongs to the UCD dataset. Images obtained "in the wild" means using an image without explicit consent or awareness from the subject or photographer.

-

Synthetic Faces

+

Biometric Trade Routes

+

To understand how this dataset has been used, its citations have been geocoded to show an approximate geographic digital trade route of the biometric data. Lines indicate an organization (education, commercial, or governmental) that has cited the LFW dataset in their research. Data is compiled from Semantic Scholar.

+

Synthetic Faces

To visualize the types of photos in the dataset without explicitly publishing individual's identities a generative adversarial network (GAN) was trained on the entire dataset. The images in this video show a neural network learning the visual latent space and then interpolating between archetypical identities within the LFW dataset.

-

Biometric Trade Routes

-

To understand how this dataset has been used, its citations have been geocoded to show an approximate geographic digital trade route of the biometric data. Lines indicate an organization (education, commercial, or governmental) that has cited the LFW dataset in their research. Data is compiled from SemanticScholar.

-

Citations

+

Citations

Browse or download the geocoded citation data collected for the LFW dataset.

Additional Information

(tweet-sized snippets go here)

@@ -94,24 +94,6 @@ imageio.imwrite('lfw_montage_960.jpg', montage)

Supplementary Material

Text and graphics ©Adam Harvey / megapixels.cc

-

Ignore text below these lines

-

Research

-
    -
  • "In our experiments, we used 10000 images and associated captions from the Faces in the wilddata set [3]."
  • -
  • "This work was supported in part by the Center for Intelligent Information Retrieval, the Central Intelligence Agency, the National Security Agency and National Science Foundation under CAREER award IIS-0546666 and grant IIS-0326249."
  • -
  • From: "People-LDA: Anchoring Topics to People using Face Recognition" https://www.semanticscholar.org/paper/People-LDA%3A-Anchoring-Topics-to-People-using-Face-Jain-Learned-Miller/10f17534dba06af1ddab96c4188a9c98a020a459 and https://ieeexplore.ieee.org/document/4409055
  • -
  • This paper was presented at IEEE 11th ICCV conference Oct 14-21 and the main LFW paper "Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments" was also published that same year
  • -
  • 10f17534dba06af1ddab96c4188a9c98a020a459

    -
  • -
  • This research is based upon work supported in part by the Office of the Director of National Intelligence (ODNI), Intelligence Advanced Research Projects Activity (IARPA), via contract number 2014-14071600010.

    -
  • -
  • From "Labeled Faces in the Wild: Updates and New Reporting Procedures"
  • -
-

Footnotes

-
-
-
    -
    -- cgit v1.2.3-70-g09d2 From eaf8a163ed3dc15b624188dcf8ae7216b801d73e Mon Sep 17 00:00:00 2001 From: Jules Laplace Date: Thu, 28 Feb 2019 00:01:25 +0100 Subject: weird --- client/tables.js | 1 + 1 file changed, 1 insertion(+) (limited to 'client') diff --git a/client/tables.js b/client/tables.js index 70ab5971..2f4214e1 100644 --- a/client/tables.js +++ b/client/tables.js @@ -65,6 +65,7 @@ export default function append(el, payload) { .then(r => r.text()) .then(text => { try { + console.log(text) const data = csv.toJSON(text, { headers: { included: true } }) // console.log(data) table.setData(data) -- cgit v1.2.3-70-g09d2 From 6711fb0c58e969284e3fcf94bb163c77445e2e13 Mon Sep 17 00:00:00 2001 From: Jules Laplace Date: Thu, 28 Feb 2019 15:56:04 +0100 Subject: footnote back and forth navigation --- client/util/index.js | 4 ++ megapixels/app/site/parser.py | 17 +++++++- site/assets/css/css.css | 66 +++++++++++++++++++++++--------- site/content/pages/datasets/lfw/index.md | 2 +- site/public/datasets/lfw/index.html | 18 ++++----- 5 files changed, 77 insertions(+), 30 deletions(-) (limited to 'client') diff --git a/client/util/index.js b/client/util/index.js index d0db0d98..0792e24e 100644 --- a/client/util/index.js +++ b/client/util/index.js @@ -5,12 +5,16 @@ export const isiPad = !!(navigator.userAgent.match(/iPad/i)) export const isAndroid = !!(navigator.userAgent.match(/Android/i)) export const isMobile = isiPhone || isiPad || isAndroid export const isDesktop = !isMobile +export const isFirefox = typeof InstallTrigger !== 'undefined' export const toArray = a => Array.prototype.slice.apply(a) export const choice = a => a[Math.floor(Math.random() * a.length)] const htmlClassList = document.body.parentNode.classList htmlClassList.add(isDesktop ? 'desktop' : 'mobile') +if (isFirefox) { + htmlClassList.add('firefox') +} /* Default image dimensions */ diff --git a/megapixels/app/site/parser.py b/megapixels/app/site/parser.py index ef83b655..9e904e00 100644 --- a/megapixels/app/site/parser.py +++ b/megapixels/app/site/parser.py @@ -10,6 +10,8 @@ import app.site.s3 as s3 renderer = mistune.Renderer(escape=False) markdown = mistune.Markdown(renderer=renderer) +footnote_count = 0 + def parse_markdown(metadata, sections, s3_path, skip_h1=False): """ parse page into sections, preprocess the markdown to handle our modifications @@ -94,7 +96,18 @@ def parse_markdown(metadata, sections, s3_path, skip_h1=False): if footnote_lookup: for key, index in footnote_lookup.items(): - content = content.replace(key, '{}'.format(key, index, index)) + global footnote_count + footnote_count = 0 + letters = "abcdefghijklmnopqrstuvwxyz" + footnote_backlinks = [] + def footnote_tag(match): + global footnote_count + footnote_count += 1 + footnote_backlinks.append('{}'.format(key, footnote_count, letters[footnote_count-1])) + return ' {}'.format(key, footnote_count, key, index, index) + key_regex = re.compile(key.replace('[', '\\[').replace('^', '\\^').replace(']', '\\]')) + content = key_regex.sub(footnote_tag, content) + footnote_txt = footnote_txt.replace("{}_BACKLINKS".format(index), "".join(footnote_backlinks)) content += footnote_txt return content @@ -197,7 +210,7 @@ def format_footnotes(footnotes, s3_path): continue key, note = footnote.split(': ', 1) footnote_index_lookup[key] = index - footnote_list.append('^'.format(key) + markdown(note)) + footnote_list.append('{}_BACKLINKS'.format(key, index) + markdown(note)) index += 1 footnote_txt = '
    • ' + '
    • '.join(footnote_list) + '
    ' diff --git a/site/assets/css/css.css b/site/assets/css/css.css index 0afa3725..4b42657b 100644 --- a/site/assets/css/css.css +++ b/site/assets/css/css.css @@ -16,7 +16,8 @@ html { opacity: 0; transition: opacity 0.2s cubic-bezier(0,1,1,1); } -html.desktop .content, html.mobile .content { +html.desktop .content, +html.mobile .content { opacity: 1; } @@ -28,7 +29,7 @@ header { left: 0; width: 100%; height: 70px; - z-index: 2; + z-index: 9999; background: #1e1e1e; display: flex; flex-direction: row; @@ -53,8 +54,10 @@ header .logo { height: 30px; } header .site_name { + font-family: 'Roboto', sans-serif; font-weight: bold; color: #fff; + font-size: 14px; } header .sub { margin-left: 4px; @@ -148,7 +151,7 @@ h3 { margin: 0 0 20px 0; padding: 0; font-size: 14pt; - font-weight: 600; + font-weight: 500; transition: color 0.2s cubic-bezier(0,0,1,1); } h4 { @@ -170,6 +173,8 @@ h4 { margin: 0; padding: 0 0 10px 0; font-family: 'Roboto Mono'; + font-weight: 400; + font-size: 11px; text-transform: uppercase; letter-spacing: 2px; } @@ -210,13 +215,17 @@ section { p { margin: 0 0 20px 0; line-height: 2; + font-size: 15px; + font-weight: 400; } .content a { - color: #ff0; + color: #fff; + text-decoration: none; + border-bottom: 1px dashed; transition: color 0.2s cubic-bezier(0,0,1,1); } -.content a:hover { - color: #fff; +.desktop .content a:hover { + color: #ff8; } /* top of post metadata */ @@ -368,7 +377,7 @@ section.fullwidth .image { .caption { text-align: left; font-size: 9pt; - color: #bbb; + color: #999; max-width: 960px; margin: 10px auto 0 auto; font-family: 'Roboto'; @@ -538,17 +547,22 @@ section.intro_section { font-size: 38px; line-height: 60px; margin-bottom: 30px; - color: #fff; + color: #ddd; + font-weight: 300; } .intro_section .hero_subdesc { font-size: 18px; line-height: 36px; max-width: 640px; + font-weight: 300; color: #ddd; } -.intro_section span { - box-shadow: -10px -10px #000, 10px -10px #000, 10px 10px #000, -10px 10px #000; - background: #000; +.intro_section div > span { + box-shadow: -10px -10px #1e1e1e, 10px -10px #1e1e1e, 10px 10px #1e1e1e, -10px 10px #1e1e1e; + background: #1e1e1e; +} +.firefox .intro_section div > span { + box-decoration-break: clone; } /* footnotes */ @@ -559,22 +573,38 @@ a.footnote { display: inline-block; bottom: 10px; text-decoration: none; - color: #ff0; + color: #ff8; + border: 0; left: 2px; + transition-duration: 0s; +} +a.footnote_shim { + display: inline-block; + width: 1px; height: 1px; + overflow: hidden; + position: relative; + top: -90px; + visibility: hidden; } .right-sidebar a.footnote { bottom: 8px; } .desktop a.footnote:hover { - background-color: #ff0; + background-color: #ff8; color: #000; } -a.footnote_anchor { - font-weight: bold; - color: #ff0; +.backlinks { margin-right: 10px; - text-decoration: underline; - cursor: pointer; +} +.content .backlinks a { + color: #ff8; + font-size: 10px; + text-decoration: none; + border: 0; + font-weight: bold; + position: relative; + bottom: 5px; + margin-right: 2px; } ul.footnotes { list-style-type: decimal; diff --git a/site/content/pages/datasets/lfw/index.md b/site/content/pages/datasets/lfw/index.md index 1995e1f9..972fafe2 100644 --- a/site/content/pages/datasets/lfw/index.md +++ b/site/content/pages/datasets/lfw/index.md @@ -5,7 +5,7 @@ title: Labeled Faces in The Wild desc: Labeled Faces in The Wild (LFW) is a database of face photographs designed for studying the problem of unconstrained face recognition. subdesc: It includes 13,456 images of 4,432 people’s images copied from the Internet during 2002-2004. image: assets/lfw_feature.jpg -caption: Eighteen of the 5,749 people in the Labeled Faces in the Wild Dataset. The most widely used face dataset for benchmarking commercial face recognition algorithms. +caption: A few of the 5,749 people in the Labeled Faces in the Wild Dataset. The most widely used face dataset for benchmarking commercial face recognition algorithms. slug: lfw published: 2019-2-23 updated: 2019-2-23 diff --git a/site/public/datasets/lfw/index.html b/site/public/datasets/lfw/index.html index 54b6aa22..08ec8ee3 100644 --- a/site/public/datasets/lfw/index.html +++ b/site/public/datasets/lfw/index.html @@ -28,10 +28,10 @@
    Labeled Faces in The Wild (LFW) is a database of face photographs designed for studying the problem of unconstrained face recognition.
    It includes 13,456 images of 4,432 people’s images copied from the Internet during 2002-2004. -
    Eighteen of the 5,749 people in the Labeled Faces in the Wild Dataset. The most widely used face dataset for benchmarking commercial face recognition algorithms.
    A few of the 5,749 people in the Labeled Faces in the Wild Dataset. The most widely used face dataset for benchmarking commercial face recognition algorithms.

    Labeled Faces in the Wild

    -

    Labeled Faces in The Wild (LFW) is "a database of face photographs designed for studying the problem of unconstrained face recognition1. It is used to evaluate and improve the performance of facial recognition algorithms in academic, commercial, and government research. According to BiometricUpdate.com3, LFW is "the most widely used evaluation set in the field of facial recognition, LFW attracts a few dozen teams from around the globe including Google, Facebook, Microsoft Research Asia, Baidu, Tencent, SenseTime, Face++ and Chinese University of Hong Kong."

    +

    Labeled Faces in The Wild (LFW) is "a database of face photographs designed for studying the problem of unconstrained face recognition 1. It is used to evaluate and improve the performance of facial recognition algorithms in academic, commercial, and government research. According to BiometricUpdate.com 3, LFW is "the most widely used evaluation set in the field of facial recognition, LFW attracts a few dozen teams from around the globe including Google, Facebook, Microsoft Research Asia, Baidu, Tencent, SenseTime, Face++ and Chinese University of Hong Kong."

    The LFW dataset includes 13,233 images of 5,749 people that were collected between 2002-2004. LFW is a subset of Names of Faces and is part of the first facial recognition training dataset created entirely from images appearing on the Internet. The people appearing in LFW are...

    The Names and Faces dataset was the first face recognition dataset created entire from online photos. However, Names and Faces and LFW are not the first face recognition dataset created entirely "in the wild". That title belongs to the UCD dataset. Images obtained "in the wild" means using an image without explicit consent or awareness from the subject or photographer.

    Biometric Trade Routes

    @@ -51,11 +51,11 @@

    Additional Information

    (tweet-sized snippets go here)

      -
    • The LFW dataset is considered the "most popular benchmark for face recognition" 2
    • -
    • The LFW dataset is "the most widely used evaluation set in the field of facial recognition" 3
    • +
    • The LFW dataset is considered the "most popular benchmark for face recognition" 2
    • +
    • The LFW dataset is "the most widely used evaluation set in the field of facial recognition" 3
    • All images in LFW dataset were obtained "in the wild" meaning without any consent from the subject or from the photographer
    • The faces in the LFW dataset were detected using the Viola-Jones haarcascade face detector [^lfw_website] [^lfw-survey]
    • -
    • The LFW dataset is used by several of the largest tech companies in the world including "Google, Facebook, Microsoft Research Asia, Baidu, Tencent, SenseTime, Face++ and Chinese University of Hong Kong." 3
    • +
    • The LFW dataset is used by several of the largest tech companies in the world including "Google, Facebook, Microsoft Research Asia, Baidu, Tencent, SenseTime, Face++ and Chinese University of Hong Kong." 3
    • All images in the LFW dataset were copied from Yahoo News between 2002 - 2004
    • In 2014, two of the four original authors of the LFW dataset received funding from IARPA and ODNI for their followup paper Labeled Faces in the Wild: Updates and New Reporting Procedures via IARPA contract number 2014-14071600010
    • The dataset includes 2 images of George Tenet, the former Director of Central Intelligence (DCI) for the Central Intelligence Agency whose facial biometrics were eventually used to help train facial recognition software in China and Russia
    • @@ -94,9 +94,9 @@ imageio.imwrite('lfw_montage_960.jpg', montage)

    Supplementary Material

    Text and graphics ©Adam Harvey / megapixels.cc

    -
    -- cgit v1.2.3-70-g09d2 From d8ea57ede73087e0590bc98c7a018f3f185d057a Mon Sep 17 00:00:00 2001 From: Jules Laplace Date: Thu, 28 Feb 2019 16:40:21 +0100 Subject: div over leaflet map --- client/map/index.js | 38 ++++++++++++++++++++++++++------------ site/assets/css/applets.css | 22 ++++++++++++++++++++++ 2 files changed, 48 insertions(+), 12 deletions(-) (limited to 'client') diff --git a/client/map/index.js b/client/map/index.js index d38855bf..53d9439d 100644 --- a/client/map/index.js +++ b/client/map/index.js @@ -2,25 +2,25 @@ import L from 'leaflet' import './leaflet.bezier' const arcStyles = { - 'edu': { + edu: { color: 'rgb(245, 246, 150)', fillColor: 'rgb(245, 246, 150)', opacity: 0.5, weight: '1', }, - 'company': { + company: { color: 'rgb(50, 100, 246)', fillColor: 'rgb(50, 100, 246)', opacity: 1.0, weight: '2', }, - 'gov': { + gov: { color: 'rgb(245, 150, 100)', fillColor: 'rgb(245, 150, 150)', opacity: 1.0, weight: '2', }, - 'mil': { + mil: { color: 'rgb(245, 0, 0)', fillColor: 'rgb(245, 0, 0)', opacity: 1.0, @@ -79,17 +79,31 @@ export default function append(el, payload) { } // ....i dont think the sort order does anything?? - citations.sort((a,b) => sortOrder.indexOf(a) - sortOrder.indexOf(b)) - .forEach(citation => { - const address = citation.addresses[0] - const latlng = [address.lat, address.lng].map(n => parseFloat(n)) - if (Number.isNaN(latlng[0]) || Number.isNaN(latlng[1])) return - addMarker(map, latlng, citation.title, address.name) - addArc(map, source, latlng, arcStyles[address.type]) - }) + citations.sort((a, b) => sortOrder.indexOf(a) - sortOrder.indexOf(b)) + .forEach(citation => { + const citationAddress = citation.addresses[0] + const latlng = [citationAddress.lat, citationAddress.lng].map(n => parseFloat(n)) + if (Number.isNaN(latlng[0]) || Number.isNaN(latlng[1])) return + addMarker(map, latlng, citation.title, citationAddress.name) + addArc(map, source, latlng, arcStyles[citationAddress.type]) + }) console.log(paper) const rootMarker = addMarker(map, source, paper.title, paper.address) rootMarker.openPopup() + + // a transparent div to cover the map, so normal scroll events will not be eaten by leaflet + const mapCover = document.createElement("div") + mapCover.classList.add("map_cover") + mapCover.innerHTML = "
    Click here to explore the map
    " + mapCover.querySelector('div').addEventListener('click', () => { + el.removeChild(mapCover) + }) + function stopPropagation(e) { + e.stopPropagation() + } + mapCover.addEventListener('mousewheel', stopPropagation, true) + mapCover.addEventListener('DOMMouseScroll', stopPropagation, true) + el.appendChild(mapCover) } diff --git a/site/assets/css/applets.css b/site/assets/css/applets.css index 729737fe..e84fcfc2 100644 --- a/site/assets/css/applets.css +++ b/site/assets/css/applets.css @@ -140,6 +140,28 @@ .map { margin-bottom: 20px; } +.map_cover { + position: absolute; + top: 0; + left: 0; + width: 100%; + height: 100%; + cursor: pointer; + background: rgba(0,0,0,0.8); + z-index: 9998; /* site header is 9999 */ + display: flex; + justify-content: center; + align-items: center; + font-size: 36px; + transition: opacity 0.4s cubic-bezier(0,0,1,1); + opacity: 1; +} +.desktop .map_cover { + opacity: 0; +} +.desktop .map_cover:hover { + opacity: 1; +} /* tabulator */ -- cgit v1.2.3-70-g09d2 From ce570d47b47017ee3fb42227e47ec345df08bd18 Mon Sep 17 00:00:00 2001 From: Jules Laplace Date: Thu, 28 Feb 2019 16:52:14 +0100 Subject: scroll thing --- client/map/index.js | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'client') diff --git a/client/map/index.js b/client/map/index.js index 53d9439d..56a5abed 100644 --- a/client/map/index.js +++ b/client/map/index.js @@ -98,6 +98,7 @@ export default function append(el, payload) { mapCover.classList.add("map_cover") mapCover.innerHTML = "
    Click here to explore the map
    " mapCover.querySelector('div').addEventListener('click', () => { + map.scrollWheelZoom.enable() el.removeChild(mapCover) }) function stopPropagation(e) { @@ -105,5 +106,16 @@ export default function append(el, payload) { } mapCover.addEventListener('mousewheel', stopPropagation, true) mapCover.addEventListener('DOMMouseScroll', stopPropagation, true) + + map.scrollWheelZoom.disable() + map.on('focus', () => { + map.scrollWheelZoom.enable() + el.removeChild(mapCover) + }) + map.on('blur', () => { + map.scrollWheelZoom.disable() + // el.appendChild(mapCover) + }) + el.appendChild(mapCover) } -- cgit v1.2.3-70-g09d2 From 0801726d7a3fd18fb7c4d1ec92e3581699d95ccc Mon Sep 17 00:00:00 2001 From: Jules Laplace Date: Thu, 28 Feb 2019 16:58:11 +0100 Subject: fix lfw commerical use csv --- client/tables.js | 1 + .../datasets/lfw/assets/lfw_commercial_use.csv | 62 +++++++++++----------- 2 files changed, 32 insertions(+), 31 deletions(-) (limited to 'client') diff --git a/client/tables.js b/client/tables.js index 2f4214e1..3fadb797 100644 --- a/client/tables.js +++ b/client/tables.js @@ -71,6 +71,7 @@ export default function append(el, payload) { table.setData(data) el.classList.add('loaded') } catch (e) { + console.error("error making json:", payload.url) console.error(e) // console.log(text) diff --git a/site/content/pages/datasets/lfw/assets/lfw_commercial_use.csv b/site/content/pages/datasets/lfw/assets/lfw_commercial_use.csv index 70e2fdeb..a2a4b39c 100644 --- a/site/content/pages/datasets/lfw/assets/lfw_commercial_use.csv +++ b/site/content/pages/datasets/lfw/assets/lfw_commercial_use.csv @@ -1,44 +1,44 @@ "name_display","company_url","example_url","country","description" -"Aratek","http://www.aratek.co/","","China","Biometric sensors for telecom, civil identification, finance, education, POS, and transportation" -"Asaphus","https://asaphus.de/","","Germany","Face recognition for home appliances and autonomous vehicles interaction" -"Aureus","https://cyberextruder.com/biometric-face-recognition-software-use-cases/","","USA","Retail loss prevention solutions, biometric access control, law enforcement and safe city applications, gaming and hospitality applications" +"Aratek","http://www.aratek.co/"," ","China","Biometric sensors for telecom, civil identification, finance, education, POS, and transportation" +"Asaphus","https://asaphus.de/"," ","Germany","Face recognition for home appliances and autonomous vehicles interaction" +"Aureus","https://cyberextruder.com/biometric-face-recognition-software-use-cases/"," ","USA","Retail loss prevention solutions, biometric access control, law enforcement and safe city applications, gaming and hospitality applications" "Baidu","http://research.baidu.com/institute-of-deep-learning/","https://www.newscientist.com/article/2113176-chinese-tourist-town-uses-face-recognition-as-an-entry-pass/","China","Retail payment, transportation, civil identification" -"Betaface","https://www.betaface.com/","","Germany","Web advertising and entertainment, video surveillance, security software, b2b software" -"Yi+AI","http://www.dress-plus.com/solution","","China","Scenario-based advertising, real-time personalized recommendation, character recognition for ads placement" -"CM-CV&AR","http://www.cloudminds.com/","","USA","Human augmented robot intelligence" -"Samtech","http://samtechinfonet.com/products_frs.php","","India","Facilities management, infrastructure support" -"ColorReco","http://www.colorreco.com/","","China","Face login verification, online payment security verification, access control system identity authentication and face recognition lock, mobile payment, driver fatigue recognition, virtual makeup" +"Betaface","https://www.betaface.com/"," ","Germany","Web advertising and entertainment, video surveillance, security software, b2b software" +"Yi+AI","http://www.dress-plus.com/solution"," ","China","Scenario-based advertising, real-time personalized recommendation, character recognition for ads placement" +"CM-CV&AR","http://www.cloudminds.com/"," ","USA","Human augmented robot intelligence" +"Samtech","http://samtechinfonet.com/products_frs.php"," ","India","Facilities management, infrastructure support" +"ColorReco","http://www.colorreco.com/"," ","China","Face login verification, online payment security verification, access control system identity authentication and face recognition lock, mobile payment, driver fatigue recognition, virtual makeup" "CloudWalk","www.cloudwalk.cn/","https://qz.com/africa/1287675/china-is-exporting-facial-recognition-to-africa-ensuring-ai-dominance-through-diversity/","China","Security and law enforcement. Being deployed in Zimbabwe" -"Cylltech","http://www.cylltech.com.cn/","","China","Conference management, social assistance, civil access, media orientation, precision marketing, scenic intelligence, tourism management" +"Cylltech","http://www.cylltech.com.cn/"," ","China","Conference management, social assistance, civil access, media orientation, precision marketing, scenic intelligence, tourism management" "Dahua-FaceImage","https://www.dahuasecurity.com/","https://www.dahuasecurity.com/solutions/solutionsbyapplication/23","China","Public security, public access control, finance" -"Daream","http://www.daream.com","","China","Fatigue and distraction detection for autonomous vehicles" -"Deepmark","https://deepmark.ru/","","Russia","Workplace access control" -"Easen Electron","http://www.easen-electron.com","","China","Face recognition door locks for automobiles" -"Ever AI","https://ever.ai/","","USA","Law enforcement, smart cities, surveillance, building security, retail, payments, autonomous vehicles, grocery stores, enhanced marketing" -"Facebook (Face.com)","https://en.wikipedia.org/wiki/Face.com","","USA","Sold to facebook in 2012, and now incorporated into DeepFace" -"Face++","https://www.faceplusplus.com/","","China","Audience engagement analysis, interactive marketing, gaming, photo album processing, security for mobile payments" -"Faceall","http://www.faceall.cn/index.en.html","","China","Internet banking, insurance, automated surveillance, access control, photo refinement, avatar creation" -"Faceter","https://faceter.io","","USA","Workforce attendence reporting and analytics, home video surveillance, retail customer behavior, GPU mining compatible" -"Facevisa","http://www.facevisa.com","","China","Face detection, face key point positioning, living body certification, facial attribute analysis" -"Fujitsu R&D","https://www.fujitsu.com/cn/en/about/local/subsidiaries/frdc/","","Japan","Consumer cameras" -"SenseTime","https://www.sensetime.com/","","Hong Kong","Surveillance, access control, image retrieval, and automatic log-on for personal computer or mobile devices" +"Daream","http://www.daream.com"," ","China","Fatigue and distraction detection for autonomous vehicles" +"Deepmark","https://deepmark.ru/"," ","Russia","Workplace access control" +"Easen Electron","http://www.easen-electron.com"," ","China","Face recognition door locks for automobiles" +"Ever AI","https://ever.ai/"," ","USA","Law enforcement, smart cities, surveillance, building security, retail, payments, autonomous vehicles, grocery stores, enhanced marketing" +"Facebook (Face.com)","https://en.wikipedia.org/wiki/Face.com"," ","USA","Sold to facebook in 2012, and now incorporated into DeepFace" +"Face++","https://www.faceplusplus.com/"," ","China","Audience engagement analysis, interactive marketing, gaming, photo album processing, security for mobile payments" +"Faceall","http://www.faceall.cn/index.en.html"," ","China","Internet banking, insurance, automated surveillance, access control, photo refinement, avatar creation" +"Faceter","https://faceter.io"," ","USA","Workforce attendence reporting and analytics, home video surveillance, retail customer behavior, GPU mining compatible" +"Facevisa","http://www.facevisa.com"," ","China","Face detection, face key point positioning, living body certification, facial attribute analysis" +"Fujitsu R&D","https://www.fujitsu.com/cn/en/about/local/subsidiaries/frdc/"," ","Japan","Consumer cameras" +"SenseTime","https://www.sensetime.com/"," ","Hong Kong","Surveillance, access control, image retrieval, and automatic log-on for personal computer or mobile devices" "Turing Robot","http://www.tuling123.com/","http://biz.turingos.cn/home","China","Emotion recognition and analysis for robots and toys, chatbots and digital assistants" "NEC","https://www.nec.com/en/press/201407/global_20140716_01.html","https://arxiv.org/abs/1212.6094","Japan","Law enforcement, event crowd monitoring, used specificallfy by Metropolitan police in UK" -"Aurora","http://auroracs.co.uk/","","UK","Face recognition in airports for security, queue management, x-ray divestment tray linkage" +"Aurora","http://auroracs.co.uk/"," ","UK","Face recognition in airports for security, queue management, x-ray divestment tray linkage" "VisionLabs","https://visionlabs.ai/","https://venturebeat.com/2016/07/07/russian-facial-recognition-startup-visionlabs-raises-5-5m-after-partnering-with-facebook-and-google/","Russia","Video surveillance, banking and finance, customer authentication for retail" -"Yunshitu","http://yunshitu.cn","","China","Security, Internet, broadcasting and other industries" -"Glasssix","http://www.glasssix.com/","","China","School attendance, workforce monitoring" +"Yunshitu","http://yunshitu.cn"," ","China","Security, Internet, broadcasting and other industries" +"Glasssix","http://www.glasssix.com/"," ","China","School attendance, workforce monitoring" "Hisign","http://www.hisign.com.cn/en-us/index.aspx","https://www.bloomberg.com/research/stocks/private/snapshot.asp?privcapId=52323181","China","Criminal investigation information application, and financial big data risk prevention and control products in China" "icarevision","http://www.icarevision.cn","https://www.bloomberg.com/research/stocks/private/snapshot.asp?privcapId=306707800","China","Video surveillance" "IntelliVision","https://www.intelli-vision.com/facial-recognition/","https://www.bloomberg.com/profiles/companies/0080393D:US-intellivision-technologies-corp","USA","Smart homes and buildings, smart security, smart city, smart retail, Smart auto" "Meiya Pico","https://meiyapico.com/","https://www.bloomberg.com/research/stocks/private/snapshot.asp?privcapId=117577345","China","Digital forensics and information security products and services in China" "Orion Star","https://www.ainirobot.com/#sixthPage","https://www.prnewswire.com/news-releases/orionstar-wins-challenge-to-recognize-one-million-celebrity-faces-with-artificial-intelligence-300494265.html","China","Face recognition for robots and livestream video censoring" -"Pegatron","http://www.pegatroncorp.com","","China","Workforce attendance" +"Pegatron","http://www.pegatroncorp.com"," ","China","Workforce attendance" "PingAn AI Lab","http://www.pingan.com/","https://www.biometricupdate.com/201703/ping-an-technology-developing-ai-face-recognition-technology-with-record-results","China","Financial services, lending" -"ReadSense","http://www.readsense.ai/","","China","Access control, traffic analysis, crowd analysis, head counting, drone vision, home appliances, community surveillance, custom attention analysis" -"sensingtech","www.sensingtech.com.cn","","China","Workplace entrypoint authentication" -"TCIT","http://www.tcit-us.com/?p=4023","","Taiwan","Retail analytics, workplace access control" -"TerminAI","terminai.com","","China","Smart office, smart city, smart gym, smart medical, smart community" -"Uni-Ubi","http://uni-ubi.com/","","China","Facial recognition for education, business, community, construction" -"Tencent YouTu Lab","http://bestimage.qq.com/","","China","Consumer applications for automatic facial beauty" -"Yuntu WiseSight","http://www.facelab.cn/","","China","Intrusion alarm, access control, access control, electronic patrol, and network alarm. detect suspicious personnel, real-name authentication, and public security, customs, airports, railways and other government security agencies, electronic patrol" \ No newline at end of file +"ReadSense","http://www.readsense.ai/"," ","China","Access control, traffic analysis, crowd analysis, head counting, drone vision, home appliances, community surveillance, custom attention analysis" +"sensingtech","www.sensingtech.com.cn"," ","China","Workplace entrypoint authentication" +"TCIT","http://www.tcit-us.com/?p=4023"," ","Taiwan","Retail analytics, workplace access control" +"TerminAI","terminai.com"," ","China","Smart office, smart city, smart gym, smart medical, smart community" +"Uni-Ubi","http://uni-ubi.com/"," ","China","Facial recognition for education, business, community, construction" +"Tencent YouTu Lab","http://bestimage.qq.com/"," ","China","Consumer applications for automatic facial beauty" +"Yuntu WiseSight","http://www.facelab.cn/"," ","China","Intrusion alarm, access control, access control, electronic patrol, and network alarm. detect suspicious personnel, real-name authentication, and public security, customs, airports, railways and other government security agencies, electronic patrol" \ No newline at end of file -- cgit v1.2.3-70-g09d2 From 406d857c61fb128a48281a52899ddf77b68201be Mon Sep 17 00:00:00 2001 From: Jules Laplace Date: Thu, 28 Feb 2019 18:32:39 +0100 Subject: threejs splash page on the index --- client/splash/index.js | 14 +-- megapixels/app/site/parser.py | 2 + site/assets/css/css.css | 15 +++- site/content/pages/datasets/index.md | 5 -- site/content/pages/index.md | 26 ++---- site/content/pages/info/index.md | 2 +- site/public/datasets/index.html | 2 +- site/public/index.html | 166 +++-------------------------------- site/public/info/index.html | 2 +- site/templates/home.html | 113 ++++++++---------------- 10 files changed, 74 insertions(+), 273 deletions(-) (limited to 'client') diff --git a/client/splash/index.js b/client/splash/index.js index e247b7f5..a21110f0 100644 --- a/client/splash/index.js +++ b/client/splash/index.js @@ -31,12 +31,14 @@ function build() { function bind() { document.querySelector('.slogan').addEventListener('click', modal.close) - toArray(document.querySelectorAll('.aboutLink')).forEach(el => { - el.addEventListener('click', modal.toggle) - }) - document.querySelector('.about .inner').addEventListener('click', e => e.stopPropagation()) - document.querySelector('.about').addEventListener('click', modal.close) - document.querySelector('.close').addEventListener('click', modal.close) + if (document.querySelector('.about')) { + toArray(document.querySelectorAll('.aboutLink')).forEach(el => { + el.addEventListener('click', modal.toggle) + }) + document.querySelector('.about .inner').addEventListener('click', e => e.stopPropagation()) + document.querySelector('.about').addEventListener('click', modal.close) + document.querySelector('.close').addEventListener('click', modal.close) + } } function animate() { diff --git a/megapixels/app/site/parser.py b/megapixels/app/site/parser.py index c17d3b8a..ad4256ad 100644 --- a/megapixels/app/site/parser.py +++ b/megapixels/app/site/parser.py @@ -198,6 +198,8 @@ def format_metadata(section): """ meta = [] for line in section.split('\n'): + if ': ' not in line: + continue key, value = line[2:].split(': ', 1) meta.append("
    {}
    {}
    ".format(key, value)) return "
    {}
    ".format(''.join(meta)) diff --git a/site/assets/css/css.css b/site/assets/css/css.css index 3bd09f23..732386bd 100644 --- a/site/assets/css/css.css +++ b/site/assets/css/css.css @@ -112,13 +112,19 @@ footer { justify-content: space-between; color: #888; font-size: 9pt; - padding: 20px 75px 20px; + padding: 20px 0 20px; font-family: "Roboto", sans-serif; } footer > div { display: flex; flex-direction: row; } +footer > div:nth-child(1) { + padding-left: 75px; +} +footer > div:nth-child(2) { + padding-right: 75px; +} footer a { display: inline-block; color: #888; @@ -237,6 +243,7 @@ p { align-items: flex-start; font-size: 10pt; margin-bottom: 20px; + font-family: 'Roboto', sans-serif; } .meta > div { margin-right: 30px; @@ -540,11 +547,11 @@ section.fullwidth .image { .desktop .dataset-list a:nth-child(3n+2):hover { background-color: rgba(255, 128, 0, 0.2); } .dataset-list a:nth-child(3n+3) { background-color: rgba(255, 255, 0, 0.1); } -.desktop .dataset-list .dataset:nth-child(3n+3):hover { background-color: rgba(255, 255, 0, 0.2); } +.desktop .dataset-list a:nth-child(3n+3):hover { background-color: rgba(255, 255, 0, 0.2); } .dataset-list span { - box-shadow: -3px -3px black, 3px -3px black, -3px 3px black, 3px 3px black; - background-color: black; + box-shadow: -3px -3px #181818, 3px -3px #181818, -3px 3px #181818, 3px 3px #181818; + background-color: #181818; box-decoration-break: clone; } diff --git a/site/content/pages/datasets/index.md b/site/content/pages/datasets/index.md index c408fba4..fa012758 100644 --- a/site/content/pages/datasets/index.md +++ b/site/content/pages/datasets/index.md @@ -13,8 +13,6 @@ sync: false # Facial Recognition Datasets -### Sidebar - + Found: 275 datasets + Created between: 1993-2018 + Smallest dataset: 20 images @@ -22,6 +20,3 @@ sync: false + Highest resolution faces: 450x500 (Unconstrained College Students) + Lowest resolution faces: 16x20 pixels (QMUL SurvFace) - -## End Sidebar - diff --git a/site/content/pages/index.md b/site/content/pages/index.md index d63cf9fa..1cf47aac 100644 --- a/site/content/pages/index.md +++ b/site/content/pages/index.md @@ -1,30 +1,14 @@ ------------ status: published -title: MegaPixels -desc: -slug: home +title: Megapixels +desc: The Darkside of Datasets +slug: analysis published: 2018-12-15 updated: 2018-12-15 authors: Adam Harvey sync: false - ------------- - -## Facial Recognition Datasets - -Regular Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. -### Summary - -+ Found: 275 datasets -+ Created between: 1993-2018 -+ Smallest dataset: 20 images -+ Largest dataset: 10,000,000 images - -+ Highest resolution faces: 450x500 (Unconstrained College Students) -+ Lowest resolution faces: 16x20 pixels (QMUL SurvFace) +------------ -``` -load_file https://megapixels.nyc3.digitaloceanspaces.com/v1/citations/datasets.csv -``` +## diff --git a/site/content/pages/info/index.md b/site/content/pages/info/index.md index 9cbb219e..090783d9 100644 --- a/site/content/pages/info/index.md +++ b/site/content/pages/info/index.md @@ -11,7 +11,7 @@ sync: false ------------ -## +## Face Analysis ``` face_analysis diff --git a/site/public/datasets/index.html b/site/public/datasets/index.html index 17c938ac..7398da17 100644 --- a/site/public/datasets/index.html +++ b/site/public/datasets/index.html @@ -29,7 +29,7 @@

    Facial Recognition Datasets

    -
    +
    Found
    275 datasets
    Created between
    1993-2018
    Smallest dataset
    20 images
    Largest dataset
    10,000,000 images
    Highest resolution faces
    450x500 (Unconstrained College Students)
    Lowest resolution faces
    16x20 pixels (QMUL SurvFace)

    diff --git a/site/public/index.html b/site/public/index.html index 8775f22d..d5a2e59f 100644 --- a/site/public/index.html +++ b/site/public/index.html @@ -3,15 +3,13 @@ MegaPixels - - + + - - - +

    @@ -20,166 +18,22 @@
    MegaPixels
    -
    - -
    -
    -
    -
    -
    -
    -
    - MegaPixels is an art project that explores the dark side of face recognition datasets and the future of computer vision. -
    - - - -
    - Made by Adam Harvey in collaboration with Jules Laplace, and in partnership with Mozilla.
    - Read more about MegaPixels -
    -
    -
    -
    - -
    -

    Face Recognition Datasets

    - - -

    - MegaPixels is an online art project that explores the history of face recognition from the perspective of datasets. MegaPixels aims to unravel the meanings behind the data and expose the darker corners of the biometric industry that have contributed to its growth. -

    -

    - Through a mix of case studies, visualizations, and interactive tools, Megapixels will use face recognition datasets to tell the history of modern biometrics. Many people have contributed to the development of face recignition technology, both wittingly and unwittingly. Not only scientists, but also celebrities and regular internet users have played a part. -

    -

    - Face recognition is a mess of contradictinos. It works, yet it doesn't actually work. It's cheap and accessible, but also expensive and out of control. Face recognition research has achieved headline grabbing superhuman accuracies over 99.9%, yet in practice it's also dangerously inaccurate. -

    -

    - During a trial installation at Sudkreuz station in Berlin in 2018, 20% of the matches were wrong, a number so low that it should not have any connection to law enforcement or justice. And in London, the Metropolitan police had been using face recognition software that mistakenly identified an alarming 98% of people as criminals, which perhaps is a crime itself. -

    -
    - -
    -

    Dataset Portraits

    -

    - We have prepared detailed case studies of some of the more noteworthy datasets, including tools to help you learn what is contained in these datasets, and even whether your own face has been used to train these algorithms. -

    - - -
    - - +
    +
    - - - - - - - - - - + \ No newline at end of file diff --git a/site/public/info/index.html b/site/public/info/index.html index 65510255..0b59e647 100644 --- a/site/public/info/index.html +++ b/site/public/info/index.html @@ -27,7 +27,7 @@
    -

    +

    Face Analysis

    Results are only stored for the duration of the analysis and are deleted when you leave this page.

    diff --git a/site/templates/home.html b/site/templates/home.html index 9756e21f..d5a2e59f 100644 --- a/site/templates/home.html +++ b/site/templates/home.html @@ -1,82 +1,39 @@ -{% extends 'layout.html' %} - -{% block content %} -
    -
    -
    -
    -
    -
    -
    - MegaPixels is an art project that explores the dark side of face recognition datasets and the future of computer vision. -
    - - - -
    - Made by Adam Harvey in collaboration with Jules Laplace, and in partnership with Mozilla.
    - Read more about MegaPixels -
    -
    + + + + MegaPixels + + + + + + + + + + +
    + + +
    MegaPixels
    +
    + +
    +
    +
    - -
    -

    Face Recognition Datasets

    -
    - -
    -

    Dataset Portraits

    -

    - We have prepared detailed case studies of some of the more noteworthy datasets, including tools to help you learn what is contained in these datasets, and even whether your own face has been used to train these algorithms. -

    - -
    - {% for dataset in datasets %} - -
    - {{ dataset.title }} -
    -
    - {% endfor %} +
    + MegaPixels ©2017-19 Adam R. Harvey /  + ahprojects.com
    -
    - -{% endblock %} - -{% block scripts %} - - - - - - - -{% endblock %} + + + + \ No newline at end of file -- cgit v1.2.3-70-g09d2