From 2a1b884e841efe562e0c84885a404819433b3405 Mon Sep 17 00:00:00 2001 From: Jules Laplace Date: Wed, 5 Dec 2018 16:19:50 +0100 Subject: styling images --- site/public/index.html | 63 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 63 insertions(+) create mode 100644 site/public/index.html (limited to 'site/public/index.html') diff --git a/site/public/index.html b/site/public/index.html new file mode 100644 index 00000000..ea3dc24c --- /dev/null +++ b/site/public/index.html @@ -0,0 +1,63 @@ + + + + MegaPixels + + + + + + + + + +
+ + +
MegaPixels
+ The Darkside of Datasets +
+ +
+
+ +

MegaPixels is an art project that explores the dark side of face recognition training data and the future of computer vision

+

Made by Adam Harvey in partnership with Mozilla.
+Read more about MegaPixels

+

[Explore Datasets] [Explore Algorithms]

+

Facial Recognition Datasets

+

Regular Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.

+

Summary

+
    +
  • 275 datasets found
  • +
  • Created between the years 1993-2018
  • +
  • Smallest dataset: 20 images
  • +
  • Largest dataset: 10,000,000 images
  • +
  • Highest resolution faces: 450x500 (Unconstrained College Students)
  • +
  • Lowest resolution faces: 16x20 pixels (QMUL SurvFace)
  • +
+
+ +
+ + + + \ No newline at end of file -- cgit v1.2.3-70-g09d2 From 03ed12b471c1e50ae531c46fcbf5afd06ca5432b Mon Sep 17 00:00:00 2001 From: Jules Laplace Date: Wed, 5 Dec 2018 18:23:32 +0100 Subject: build --- builder/builder.py | 37 ++++-- builder/parser.py | 46 ++++++- builder/s3.py | 6 + site/assets/css/css.css | 65 +++++++--- site/assets/css/fonts.css | 18 ++- site/assets/js/app/site.js | 7 + site/public/about/credits/index.html | 8 +- site/public/about/disclaimer/index.html | 8 +- site/public/about/index.html | 11 +- site/public/about/press/index.html | 8 +- site/public/about/privacy/index.html | 9 +- site/public/about/style/index.html | 12 +- site/public/about/terms/index.html | 8 +- site/public/datasets/lfw/index.html | 20 +-- site/public/datasets/lfw/what/index.html | 141 +++++++++++++++++++++ site/public/datasets/vgg_faces2/index.html | 20 +-- site/public/index.html | 12 +- .../research/01_from_1_to_100_pixels/index.html | 15 +-- site/public/research/index.html | 50 ++++++++ site/templates/layout.html | 8 +- site/templates/research.html | 12 -- 21 files changed, 391 insertions(+), 130 deletions(-) create mode 100644 site/public/datasets/lfw/what/index.html create mode 100644 site/public/research/index.html (limited to 'site/public/index.html') diff --git a/builder/builder.py b/builder/builder.py index deb9eb68..0e404b88 100644 --- a/builder/builder.py +++ b/builder/builder.py @@ -29,21 +29,25 @@ def build_page(fn, research_posts): output_path = public_path + metadata['url'] output_fn = os.path.join(output_path, "index.html") + is_research = False + if 'research/' in fn: + is_research = True template = env.get_template("research.html") else: template = env.get_template("page.html") - if 'datasets' in fn: - s3_path = "{}/{}/{}{}".format(os.getenv('S3_ENDPOINT'), os.getenv('S3_BUCKET'), s3_datasets_path, metadata['path']) - if 'index.md' in fn: - s3.sync_directory(dirname, s3_datasets_path, metadata) + if 'datasets/' in fn: + s3_dir = s3_datasets_path else: - s3_path = "{}/{}/{}{}".format(os.getenv('S3_ENDPOINT'), os.getenv('S3_BUCKET'), s3_site_path, metadata['path']) - if 'index.md' in fn and metadata['url'] != '/': - s3.sync_directory(dirname, s3_site_path, metadata) + s3_dir = s3_site_path + + s3_path = s3.make_s3_path(s3_dir, metadata['path']) + + if 'index.md' in fn: + s3.sync_directory(dirname, s3_dir, metadata) - content = parser.parse_markdown(sections, s3_path) + content = parser.parse_markdown(sections, s3_path, skip_h1=is_research) html = template.render( metadata=metadata, @@ -58,10 +62,27 @@ def build_page(fn, research_posts): print("______") +def build_research_index(research_posts): + metadata, sections = parser.read_metadata('../site/content/research/index.md') + template = env.get_template("page.html") + s3_path = s3.make_s3_path(s3_site_path, metadata['path']) + content = parser.parse_markdown(sections, s3_path, skip_h1=False) + content += parser.parse_research_index(research_posts) + html = template.render( + metadata=metadata, + content=content, + research_posts=research_posts, + latest_research_post=research_posts[-1], + ) + output_fn = public_path + '/research/index.html' + with open(output_fn, "w") as file: + file.write(html) + def build_site(): research_posts = parser.read_research_post_index() for fn in glob.iglob(os.path.join(content_path, "**/*.md"), recursive=True): build_page(fn, research_posts) + build_research_index(research_posts) if __name__ == '__main__': build_site() diff --git a/builder/parser.py b/builder/parser.py index 529d21fa..da3044a0 100644 --- a/builder/parser.py +++ b/builder/parser.py @@ -2,6 +2,8 @@ import os import re import glob import mistune + +import s3 from paths import * renderer = mistune.Renderer(escape=False) @@ -12,7 +14,6 @@ def fix_images(lines, s3_path): block = "\n\n".join(lines) for line in block.split("\n"): if "![" in line: - print(line) line = line.replace('![', '') alt_text, tail = line.split('](', 1) url, tail = tail.split(')', 1) @@ -35,13 +36,26 @@ def format_section(lines, s3_path, type=''): return "
" + markdown(lines) + "
" return "" -def parse_markdown(sections, s3_path): +def format_metadata(section): + meta = [] + for line in section.split('\n'): + key, value = line[2:].split(': ', 1) + meta.append("
{}
{}
".format(key, value)) + return "
{}
".format(''.join(meta)) + +def parse_markdown(sections, s3_path, skip_h1=False): groups = [] current_group = [] + seen_metadata = False for section in sections: - if section.startswith('# '): + if skip_h1 and section.startswith('# '): continue - if '![wide:' in section: + elif section.startswith('+ ') and not seen_metadata: + groups.append(format_section(current_group, s3_path)) + groups.append(format_metadata(section)) + current_group = [] + seen_metadata = True + elif '![wide:' in section: groups.append(format_section(current_group, s3_path)) groups.append(format_section([section], s3_path, type='wide')) current_group = [] @@ -55,6 +69,23 @@ def parse_markdown(sections, s3_path): content = "".join(groups) return content +def parse_research_index(research_posts): + content = "
" + for post in research_posts: + s3_path = s3.make_s3_path(s3_site_path, post['path']) + if 'image' in post: + post_image = s3_path + post['image'] + else: + post_image = 'data:image/gif;base64,R0lGODlhAQABAAAAACH5BAEKAAEALAAAAAABAAEAAAICTAEAOw==' + row = "
Research post

{}

{}

".format( + post['path'], + post_image, + post['title'], + post['tagline']) + content += row + content += '
' + return content + def read_metadata(fn): with open(fn, "r") as file: data = file.read() @@ -74,6 +105,8 @@ default_metadata = { 'published': '2018-12-31', 'updated': '2018-12-31', 'authors': 'Adam Harvey', + 'sync': 'true', + 'tagline': '', } def parse_metadata_section(metadata, section): @@ -117,12 +150,15 @@ def parse_metadata(fn, sections): if metadata['status'] == 'published|draft|private': metadata['status'] = 'published' + + metadata['sync'] = metadata['sync'] != 'false' + metadata['author_html'] = '
'.join(metadata['authors'].split(',')) return metadata, valid_sections def read_research_post_index(): posts = [] - for fn in sorted(glob.glob(os.path.join(content_path, 'research/**/index.md'), recursive=True)): + for fn in sorted(glob.glob('../site/content/research/*/index.md')): metadata, valid_sections = read_metadata(fn) if metadata is None or metadata['status'] == 'private' or metadata['status'] == 'draft': continue diff --git a/builder/s3.py b/builder/s3.py index f3dcce48..41ecdf61 100644 --- a/builder/s3.py +++ b/builder/s3.py @@ -18,6 +18,9 @@ def sync_directory(base_fn, s3_path, metadata): for fn in glob.glob(os.path.join(base_fn, 'assets/*')): fns[os.path.basename(fn)] = True + if not metadata['sync']: + return + remote_path = s3_path + metadata['url'] directory = s3_client.list_objects(Bucket=os.getenv('S3_BUCKET'), Prefix=remote_path) @@ -53,3 +56,6 @@ def sync_directory(base_fn, s3_path, metadata): os.getenv('S3_BUCKET'), s3_fn, ExtraArgs={ 'ACL': 'public-read' }) + +def make_s3_path(s3_dir, metadata_path): + return "{}/{}/{}{}".format(os.getenv('S3_ENDPOINT'), os.getenv('S3_BUCKET'), s3_dir, metadata_path) diff --git a/site/assets/css/css.css b/site/assets/css/css.css index 1024ffcd..843809a8 100644 --- a/site/assets/css/css.css +++ b/site/assets/css/css.css @@ -164,16 +164,46 @@ p { .content a:hover { color: #fff; } + +/* top of post metadata */ + +.meta { + display: flex; + flex-direction: row; + justify-content: flex-start; + align-items: flex-start; + font-size: 10pt; + margin-bottom: 20px; +} +.meta > div { + margin-right: 30px; +} +.meta .gray { + font-size: 9pt; + padding-bottom: 4px; +} + +/* misc formatting */ + code { font-family: 'Roboto Mono', monospace; font-size: 9pt; padding: 2px 4px; background: rgba(255,255,255,0.1); } +pre { + margin: 0 0 40px 0; + border: 1px solid #666; + border-radius: 2px; +} pre code { display: block; max-height: 400px; max-width: 640px; + padding: 4px 10px; +} +table { + margin-bottom: 40px; } hr { height: 1px; @@ -181,6 +211,14 @@ hr { border: 0; width: 80px; } +blockquote { + margin-left: 28px; + padding: 0 0 0 10px; + border-left: 2px solid #555; +} + +/* footnotes */ + .footnotes hr { display: none; } @@ -243,29 +281,14 @@ section.wide .image { max-width: 620px; margin: 10px auto 0 auto; } - -blockquote { - margin-left: 28px; - padding: 0 0 0 10px; - border-left: 2px solid #555; -} - -/* top of post metadata */ - -.meta { - display: flex; - flex-direction: row; - justify-content: flex-start; - align-items: flex-start; - font-size: 10pt; - margin-bottom: 20px; +.research_index { + margin-top: 40px; } -.meta > div { - margin-right: 30px; +.research_index a { + text-decoration: none; } -.meta .gray { - font-size: 9pt; - padding-bottom: 4px; +.research_index h1 { + margin-top: 20px; } /* blogpost index */ diff --git a/site/assets/css/fonts.css b/site/assets/css/fonts.css index 2195c70b..8db01fbd 100644 --- a/site/assets/css/fonts.css +++ b/site/assets/css/fonts.css @@ -2,34 +2,40 @@ font-family: 'Roboto'; font-style: normal; font-weight: 300; - src: url("../fonts/Roboto_300.eot?#iefix") format("embedded-opentype"), url("../fonts/Roboto_300.woff") format("woff"), url("../fonts/Roboto_300.woff2") format("woff2"), url("../fonts/Roboto_300.svg#Roboto") format("svg"), url("../fonts/Roboto_300.ttf") format("truetype"); } + src: url("../fonts/Roboto_300.eot?#iefix") format("embedded-opentype"), url("../fonts/Roboto_300.woff") format("woff"), url("../fonts/Roboto_300.woff2") format("woff2"), url("../fonts/Roboto_300.svg#Roboto") format("svg"), url("../fonts/Roboto_300.ttf") format("truetype"); +} @font-face { font-family: 'Roboto'; font-style: normal; font-weight: 400; - src: url("../fonts/Roboto_400.eot?#iefix") format("embedded-opentype"), url("../fonts/Roboto_400.woff") format("woff"), url("../fonts/Roboto_400.woff2") format("woff2"), url("../fonts/Roboto_400.svg#Roboto") format("svg"), url("../fonts/Roboto_400.ttf") format("truetype"); } + src: url("../fonts/Roboto_400.eot?#iefix") format("embedded-opentype"), url("../fonts/Roboto_400.woff") format("woff"), url("../fonts/Roboto_400.woff2") format("woff2"), url("../fonts/Roboto_400.svg#Roboto") format("svg"), url("../fonts/Roboto_400.ttf") format("truetype"); +} @font-face { font-family: 'Roboto'; font-style: normal; font-weight: 500; - src: url("../fonts/Roboto_500.eot?#iefix") format("embedded-opentype"), url("../fonts/Roboto_500.woff") format("woff"), url("../fonts/Roboto_500.woff2") format("woff2"), url("../fonts/Roboto_500.svg#Roboto") format("svg"), url("../fonts/Roboto_500.ttf") format("truetype"); } + src: url("../fonts/Roboto_500.eot?#iefix") format("embedded-opentype"), url("../fonts/Roboto_500.woff") format("woff"), url("../fonts/Roboto_500.woff2") format("woff2"), url("../fonts/Roboto_500.svg#Roboto") format("svg"), url("../fonts/Roboto_500.ttf") format("truetype"); +} @font-face { font-family: 'Roboto Mono'; font-style: normal; font-weight: 300; - src: url("../fonts/Roboto_Mono_300.eot?#iefix") format("embedded-opentype"), url("../fonts/Roboto_Mono_300.woff") format("woff"), url("../fonts/Roboto_Mono_300.woff2") format("woff2"), url("../fonts/Roboto_Mono_300.svg#RobotoMono") format("svg"), url("../fonts/Roboto_Mono_300.ttf") format("truetype"); } + src: url("../fonts/Roboto_Mono_300.eot?#iefix") format("embedded-opentype"), url("../fonts/Roboto_Mono_300.woff") format("woff"), url("../fonts/Roboto_Mono_300.woff2") format("woff2"), url("../fonts/Roboto_Mono_300.svg#RobotoMono") format("svg"), url("../fonts/Roboto_Mono_300.ttf") format("truetype"); +} @font-face { font-family: 'Roboto Mono'; font-style: normal; font-weight: 400; - src: url("../fonts/Roboto_Mono_400.eot?#iefix") format("embedded-opentype"), url("../fonts/Roboto_Mono_400.woff") format("woff"), url("../fonts/Roboto_Mono_400.woff2") format("woff2"), url("../fonts/Roboto_Mono_400.svg#RobotoMono") format("svg"), url("../fonts/Roboto_Mono_400.ttf") format("truetype"); } + src: url("../fonts/Roboto_Mono_400.eot?#iefix") format("embedded-opentype"), url("../fonts/Roboto_Mono_400.woff") format("woff"), url("../fonts/Roboto_Mono_400.woff2") format("woff2"), url("../fonts/Roboto_Mono_400.svg#RobotoMono") format("svg"), url("../fonts/Roboto_Mono_400.ttf") format("truetype"); +} @font-face { font-family: 'Roboto Mono'; font-style: normal; font-weight: 500; - src: local("Roboto-Mono Medium"), local("RobotoMono-Medium"), url("../fonts/Roboto_Mono_500.eot?#iefix") format("embedded-opentype"), url("../fonts/Roboto_Mono_500.woff") format("woff"), url("../fonts/Roboto_Mono_500.woff2") format("woff2"), url("../fonts/Roboto_Mono_500.svg#RobotoMono") format("svg"), url("../fonts/Roboto_Mono_500.ttf") format("truetype"); } + src: local("Roboto-Mono Medium"), local("RobotoMono-Medium"), url("../fonts/Roboto_Mono_500.eot?#iefix") format("embedded-opentype"), url("../fonts/Roboto_Mono_500.woff") format("woff"), url("../fonts/Roboto_Mono_500.woff2") format("woff2"), url("../fonts/Roboto_Mono_500.svg#RobotoMono") format("svg"), url("../fonts/Roboto_Mono_500.ttf") format("truetype"); +} diff --git a/site/assets/js/app/site.js b/site/assets/js/app/site.js index 04c0c495..12bee3ec 100644 --- a/site/assets/js/app/site.js +++ b/site/assets/js/app/site.js @@ -7,6 +7,8 @@ const isDesktop = !isMobile const htmlClassList = document.body.parentNode.classList htmlClassList.add(isDesktop ? 'desktop' : 'mobile') +function toArray(A) { return Array.prototype.slice.apply(A) } + var site = (function(){ var site = {} site.init = function(){ @@ -17,6 +19,11 @@ var site = (function(){ if (paras.length) { paras[0].classList.add('first_paragraph') } + toArray(document.querySelectorAll('header .links a')).forEach(tag => { + if (window.location.href.match(tag.href)) { + tag.classList.add('active') + } + }) } site.init() })() \ No newline at end of file diff --git a/site/public/about/credits/index.html b/site/public/about/credits/index.html index 9fec7e64..f1a28b0e 100644 --- a/site/public/about/credits/index.html +++ b/site/public/about/credits/index.html @@ -18,10 +18,10 @@ The Darkside of Datasets
diff --git a/site/public/about/disclaimer/index.html b/site/public/about/disclaimer/index.html index 553bf084..5df5d656 100644 --- a/site/public/about/disclaimer/index.html +++ b/site/public/about/disclaimer/index.html @@ -18,10 +18,10 @@ The Darkside of Datasets
diff --git a/site/public/about/index.html b/site/public/about/index.html index 363e8fc0..f1a28b0e 100644 --- a/site/public/about/index.html +++ b/site/public/about/index.html @@ -18,15 +18,16 @@ The Darkside of Datasets
-
alt text
alt text
    +

    Credits

    +
    alt text
    alt text
    • MegaPixels by Adam Harvey
    • Made with support from Mozilla
    • Site developed by Jules Laplace
    • diff --git a/site/public/about/press/index.html b/site/public/about/press/index.html index aa6e5e13..e5763036 100644 --- a/site/public/about/press/index.html +++ b/site/public/about/press/index.html @@ -18,10 +18,10 @@ The Darkside of Datasets
      diff --git a/site/public/about/privacy/index.html b/site/public/about/privacy/index.html index d1ec1c77..7ad9564f 100644 --- a/site/public/about/privacy/index.html +++ b/site/public/about/privacy/index.html @@ -18,10 +18,10 @@ The Darkside of Datasets
      @@ -84,7 +84,6 @@ megapixels.cc will take all steps reasonably necessary to ensure that your data

      Disclosure Of Data

      Legal Requirements

      megapixels.cc may disclose your Personal Data in the good faith belief that such action is necessary to:

      -

        • To comply with a legal obligation
        • To protect and defend the rights or property of megapixels.cc
        • diff --git a/site/public/about/style/index.html b/site/public/about/style/index.html index 24e6f5be..eea861ac 100644 --- a/site/public/about/style/index.html +++ b/site/public/about/style/index.html @@ -18,15 +18,17 @@ The Darkside of Datasets
          -
          Alt text here
          Alt text here

          Header 2

          +

          Style Examples

          +
          Alt text here
          Alt text here

          Header 1

          +

          Header 2

          Header 3

          Header 4

          Header 5
          diff --git a/site/public/about/terms/index.html b/site/public/about/terms/index.html index 4b9f4445..db8b9e57 100644 --- a/site/public/about/terms/index.html +++ b/site/public/about/terms/index.html @@ -18,10 +18,10 @@ The Darkside of Datasets
          diff --git a/site/public/datasets/lfw/index.html b/site/public/datasets/lfw/index.html index a130c24e..76549d25 100644 --- a/site/public/datasets/lfw/index.html +++ b/site/public/datasets/lfw/index.html @@ -18,28 +18,22 @@ The Darkside of Datasets
          -
            -
          • Created 2007
          • -
          • Images 13,233
          • -
          • People 5,749
          • -
          • Created From Yahoo News images
          • -
          • Analyzed and searchable
          • -
          -

          Labeled Faces in The Wild is amongst the most widely used facial recognition training datasets in the world and is the first dataset of its kind to be created entirely from Internet photos. It includes 13,233 images of 5,749 people downloaded from the Internet, otherwise referred to as “The Wild”.

          +

          Labeled Faces in The Wild

          +
          Created
          2007
          Images
          13,233
          People
          5,749
          Created From
          Yahoo News images
          Search available
          Searchable

          Labeled Faces in The Wild is amongst the most widely used facial recognition training datasets in the world and is the first dataset of its kind to be created entirely from Internet photos. It includes 13,233 images of 5,749 people downloaded from the Internet, otherwise referred to as “The Wild”.

          Eight out of 5,749 people in the Labeled Faces in the Wild dataset. The face recognition training dataset is created entirely from photos downloaded from the Internet.
          Eight out of 5,749 people in the Labeled Faces in the Wild dataset. The face recognition training dataset is created entirely from photos downloaded from the Internet.

          INTRO

          It began in 2002. Researchers at University of Massachusetts Amherst were developing algorithms for facial recognition and they needed more data. Between 2002-2004 they scraped Yahoo News for images of public figures. Two years later they cleaned up the dataset and repackaged it as Labeled Faces in the Wild (LFW).

          Since then the LFW dataset has become one of the most widely used datasets used for evaluating face recognition algorithms. The associated research paper “Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments” has been cited 996 times reaching 45 different countries throughout the world.

          The faces come from news stories and are mostly celebrities from the entertainment industry, politicians, and villains. It’s a sampling of current affairs and breaking news that has come to pass. The images, detached from their original context now server a new purpose: to train, evaluate, and improve facial recognition.

          As the most widely used facial recognition dataset, it can be said that each individual in LFW has, in a small way, contributed to the current state of the art in facial recognition surveillance. John Cusack, Julianne Moore, Barry Bonds, Osama bin Laden, and even Moby are amongst these biometric pillars, exemplar faces provided the visual dimensions of a new computer vision future.

          -
          From Aaron Eckhart to Zydrunas Ilgauskas. A small sampling of the LFW dataset
          From Aaron Eckhart to Zydrunas Ilgauskas. A small sampling of the LFW dataset

          In addition to commercial use as an evaluation tool, alll of the faces in LFW dataset are prepackaged into a popular machine learning code framework called scikit-learn.

          +
          From Aaron Eckhart to Zydrunas Ilgauskas. A small sampling of the LFW dataset
          From Aaron Eckhart to Zydrunas Ilgauskas. A small sampling of the LFW dataset

          In addition to commercial use as an evaluation tool, alll of the faces in LFW dataset are prepackaged into a popular machine learning code framework called scikit-learn.

          Usage

          #!/usr/bin/python
           from matplotlib import plt
          diff --git a/site/public/datasets/lfw/what/index.html b/site/public/datasets/lfw/what/index.html
          new file mode 100644
          index 00000000..52993a79
          --- /dev/null
          +++ b/site/public/datasets/lfw/what/index.html
          @@ -0,0 +1,141 @@
          +
          +
          +
          +  MegaPixels
          +  
          +  
          +  
          +  
          +  
          +  
          +  
          +
          +
          +  
          + + +
          MegaPixels
          + The Darkside of Datasets +
          + +
          +
          + +

          Labeled Faces in The Wild

          +
            +
          • Created 2007 (auto)
          • +
          • Images 13,233 (auto)
          • +
          • People 5,749 (auto)
          • +
          • Created From Yahoo News images (auto)
          • +
          • Analyzed and searchable (auto)
          • +
          +

          Labeled Faces in The Wild is amongst the most widely used facial recognition training datasets in the world and is the first facial recognition dataset [^lfw_names_faces] of its kind to be created entirely from Internet photos. It includes 13,233 images of 5,749 people that appeared on Yahoo News between 2002 - 2004.

          +
          Eight out of 5,749 people in the Labeled Faces in the Wild dataset. The face recognition training dataset is created entirely from photos downloaded from the Internet.
          Eight out of 5,749 people in the Labeled Faces in the Wild dataset. The face recognition training dataset is created entirely from photos downloaded from the Internet.

          INTRO

          +

          It began in 2002. Researchers at University of Massachusetts Amherst were developing algorithms for facial recognition and they needed more data. Between 2002-2004 they scraped Yahoo News for images of public figures. Two years later they cleaned up the dataset and repackaged it as Labeled Faces in the Wild (LFW).

          +

          Since then the LFW dataset has become one of the most widely used datasets used for evaluating face recognition algorithms. The associated research paper “Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments” has been cited 996 times reaching 45 different countries throughout the world.

          +

          The faces come from news stories and are mostly celebrities from the entertainment industry, politicians, and villains. It’s a sampling of current affairs and breaking news that has come to pass. The images, detached from their original context now server a new purpose: to train, evaluate, and improve facial recognition.

          +

          As the most widely used facial recognition dataset, it can be said that each individual in LFW has, in a small way, contributed to the current state of the art in facial recognition surveillance. John Cusack, Julianne Moore, Barry Bonds, Osama bin Laden, and even Moby are amongst these biometric pillars, exemplar faces provided the visual dimensions of a new computer vision future.

          +
          From Aaron Eckhart to Zydrunas Ilgauskas. A small sampling of the LFW dataset
          From Aaron Eckhart to Zydrunas Ilgauskas. A small sampling of the LFW dataset

          In addition to commercial use as an evaluation tool, all of the faces in LFW dataset are prepackaged into a popular machine learning code framework called scikit-learn.

          +

          Usage

          +
          #!/usr/bin/python
          +from matplotlib import plt
          +from sklearn.datasets import fetch_lfw_people
          +lfw_people = fetch_lfw_people()
          +lfw_person = lfw_people[0]
          +plt.imshow(lfw_person)
          +
          +

          Commercial Use

          +

          The LFW dataset is used by numerous companies for benchmarking algorithms and in some cases training. According to the benchmarking results page [^lfw_results] provided by the authors, over 2 dozen companies have contributed their benchmark results

          +
          load file: lfw_commercial_use.csv
          +name_display,company_url,example_url,country,description
          +
          + + + + + + + + + + + + + + + + + + + + + + + + +
          CompanyCountryIndustries
          AratekChinaBiometric sensors for telecom, civil identification, finance, education, POS, and transportation
          AratekChinaBiometric sensors for telecom, civil identification, finance, education, POS, and transportation
          AratekChinaBiometric sensors for telecom, civil identification, finance, education, POS, and transportation
          +

          Add 2-4 screenshots of companies mentioning LFW here

          +
          ReadSense
          ReadSense

          In benchmarking, companies use a dataset to evaluate their algorithms which are typically trained on other data. After training, researchers will use LFW as a benchmark to compare results with other algorithms.

          +

          For example, Baidu (est. net worth $13B) uses LFW to report results for their "Targeting Ultimate Accuracy: Face Recognition via Deep Embedding". According to the three Baidu researchers who produced the paper:

          +

          LFW has been the most popular evaluation benchmark for face recognition, and played a very important role in facilitating the face recognition society to improve algorithm. 1.

          +
          +

          Citations

          + + + + + + + + + + + + + + + + + + + + + + +
          TitleOrganizationCountryType
          3D-aided face recognition from videosUniversity of LyonFranceedu
          A Community Detection Approach to Cleaning Extremely Large Face DatabaseNational University of Defense Technology, ChinaChinaedu
          +

          Conclusion

          +

          The LFW face recognition training and evaluation dataset is a historically important face dataset as it was the first popular dataset to be created entirely from Internet images, paving the way for a global trend towards downloading anyone’s face from the Internet and adding it to a dataset. As will be evident with other datasets, LFW’s approach has now become the norm.

          +

          For all the 5,000 people in this datasets, their face is forever a part of facial recognition history. It would be impossible to remove anyone from the dataset because it is so ubiquitous. For their rest of the lives and forever after, these 5,000 people will continue to be used for training facial recognition surveillance.

          +

          Notes

          +

          According to BiometricUpdate.com2, LFW is "the most widely used evaluation set in the field of facial recognition, LFW attracts a few dozen teams from around the globe including Google, Facebook, Microsoft Research Asia, Baidu, Tencent, SenseTime, Face++ and Chinese University of Hong Kong."

          +
          +
          +
          1. "Chinese tourist town uses face recognition as an entry pass". New Scientist. November 17, 2016. https://www.newscientist.com/article/2113176-chinese-tourist-town-uses-face-recognition-as-an-entry-pass/

          2. +
          3. "PING AN Tech facial recognition receives high score in latest LFW test results". https://www.biometricupdate.com/201702/ping-an-tech-facial-recognition-receives-high-score-in-latest-lfw-test-results

          4. +
          +
          +
          + +
          + + + + \ No newline at end of file diff --git a/site/public/datasets/vgg_faces2/index.html b/site/public/datasets/vgg_faces2/index.html index ee353047..95b5f7d7 100644 --- a/site/public/datasets/vgg_faces2/index.html +++ b/site/public/datasets/vgg_faces2/index.html @@ -18,23 +18,17 @@ The Darkside of Datasets
          -
            -
          • Created 2007
          • -
          • Images 13,233
          • -
          • People 5,749
          • -
          • Created From Yahoo News images
          • -
          • Search available Searchable
          • -
          -

          Labeled Faces in The Wild is amongst the most widely used facial recognition training datasets in the world and is the first dataset of its kind to be created entirely from Internet photos. It includes 13,233 images of 5,749 people downloaded from the Internet, otherwise referred to by researchers as “The Wild”.

          -
          Eight out of 5,749 people in the Labeled Faces in the Wild dataset. The face recognition training dataset is created entirely from photos downloaded from the Internet.
          Eight out of 5,749 people in the Labeled Faces in the Wild dataset. The face recognition training dataset is created entirely from photos downloaded from the Internet.

          INTRO

          +

          Labeled Faces in The Wild

          +
          Created
          2007
          Images
          13,233
          People
          5,749
          Created From
          Yahoo News images
          Search available
          [Searchable](#)

          Labeled Faces in The Wild is amongst the most widely used facial recognition training datasets in the world and is the first dataset of its kind to be created entirely from Internet photos. It includes 13,233 images of 5,749 people downloaded from the Internet, otherwise referred to by researchers as “The Wild”.

          +

          INTRO

          It began in 2002. Researchers at University of Massachusetts Amherst were developing algorithms for facial recognition and they needed more data. Between 2002-2004 they scraped Yahoo News for images of public figures. Two years later they cleaned up the dataset and repackaged it as Labeled Faces in the Wild (LFW).

          Since then the LFW dataset has become one of the most widely used datasets used for evaluating face recognition algorithms. The associated research paper “Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments” has been cited 996 times reaching 45 different countries throughout the world.

          The faces come from news stories and are mostly celebrities from the entertainment industry, politicians, and villains. It’s a sampling of current affairs and breaking news that has come to pass. The images, detached from their original context now server a new purpose: to train, evaluate, and improve facial recognition.

          diff --git a/site/public/index.html b/site/public/index.html index ea3dc24c..3ce22936 100644 --- a/site/public/index.html +++ b/site/public/index.html @@ -18,23 +18,23 @@ The Darkside of Datasets

          MegaPixels is an art project that explores the dark side of face recognition training data and the future of computer vision

          Made by Adam Harvey in partnership with Mozilla.
          -Read more about MegaPixels

          +Read more [about MegaPixels]

          [Explore Datasets] [Explore Algorithms]

          Facial Recognition Datasets

          Regular Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.

          Summary

            -
          • 275 datasets found
          • +
          • 275 datsets found
          • Created between the years 1993-2018
          • Smallest dataset: 20 images
          • Largest dataset: 10,000,000 images
          • diff --git a/site/public/research/01_from_1_to_100_pixels/index.html b/site/public/research/01_from_1_to_100_pixels/index.html index 90f142e9..55e02c6c 100644 --- a/site/public/research/01_from_1_to_100_pixels/index.html +++ b/site/public/research/01_from_1_to_100_pixels/index.html @@ -18,10 +18,10 @@ The Darkside of Datasets
            @@ -74,13 +74,6 @@
          -
          -

          MORE RESEARCH

          -
          - -
          -
          -
          {{ content }} - -
          -

          MORE RESEARCH

          -
          - {% for blogpost in blogposts %} -
          - {{ blogpost.title }} - {{ blogpost.date }} -
          - {% endfor %} -
          -
          {% endblock %} -- cgit v1.2.3-70-g09d2 From 2d950c3fa3b8107f941a80f88127ab45e371d128 Mon Sep 17 00:00:00 2001 From: Jules Laplace Date: Thu, 6 Dec 2018 19:39:29 +0100 Subject: homepage css --- builder/README.md | 3 + builder/builder.py | 10 +- builder/parser.py | 4 +- site/assets/css/css.css | 93 ++++++++- site/assets/js/app/face.js | 213 +++++++++++++++++++++ site/assets/js/app/site.js | 3 +- site/public/about/credits/index.html | 1 + site/public/about/disclaimer/index.html | 1 + site/public/about/index.html | 1 + site/public/about/press/index.html | 1 + site/public/about/privacy/index.html | 1 + site/public/about/style/index.html | 3 +- site/public/about/terms/index.html | 1 + site/public/datasets/lfw/index.html | 160 +++++++++++++--- site/public/datasets/lfw/what/index.html | 1 + site/public/datasets/vgg_faces2/index.html | 1 + site/public/index.html | 42 ++-- .../research/01_from_1_to_100_pixels/index.html | 1 + site/public/research/index.html | 1 + site/templates/home.html | 32 ++++ site/templates/layout.html | 1 + 21 files changed, 524 insertions(+), 50 deletions(-) create mode 100644 site/assets/js/app/face.js create mode 100644 site/templates/home.html (limited to 'site/public/index.html') diff --git a/builder/README.md b/builder/README.md index 1a6d3a1e..57c024cb 100644 --- a/builder/README.md +++ b/builder/README.md @@ -19,3 +19,6 @@ authors: Adam Harvey, Berit Gilma, Matthew Stender Static assets: `v1/site/about/assets/picture.jpg` Dataset assets: `v1/datasets/lfw/assets/picture.jpg` + +## Markup + diff --git a/builder/builder.py b/builder/builder.py index 0e404b88..620fc710 100644 --- a/builder/builder.py +++ b/builder/builder.py @@ -29,10 +29,12 @@ def build_page(fn, research_posts): output_path = public_path + metadata['url'] output_fn = os.path.join(output_path, "index.html") - is_research = False + skip_h1 = False - if 'research/' in fn: - is_research = True + if metadata['url'] == '/': + template = env.get_template("home.html") + elif 'research/' in fn: + skip_h1 = True template = env.get_template("research.html") else: template = env.get_template("page.html") @@ -47,7 +49,7 @@ def build_page(fn, research_posts): if 'index.md' in fn: s3.sync_directory(dirname, s3_dir, metadata) - content = parser.parse_markdown(sections, s3_path, skip_h1=is_research) + content = parser.parse_markdown(sections, s3_path, skip_h1=skip_h1) html = template.render( metadata=metadata, diff --git a/builder/parser.py b/builder/parser.py index da3044a0..dd3643bf 100644 --- a/builder/parser.py +++ b/builder/parser.py @@ -46,15 +46,13 @@ def format_metadata(section): def parse_markdown(sections, s3_path, skip_h1=False): groups = [] current_group = [] - seen_metadata = False for section in sections: if skip_h1 and section.startswith('# '): continue - elif section.startswith('+ ') and not seen_metadata: + elif section.startswith('+ '): groups.append(format_section(current_group, s3_path)) groups.append(format_metadata(section)) current_group = [] - seen_metadata = True elif '![wide:' in section: groups.append(format_section(current_group, s3_path)) groups.append(format_section([section], s3_path, type='wide')) diff --git a/site/assets/css/css.css b/site/assets/css/css.css index 843809a8..9ac35699 100644 --- a/site/assets/css/css.css +++ b/site/assets/css/css.css @@ -5,9 +5,11 @@ html, body { width: 100%; min-height: 100%; font-family: 'Roboto', sans-serif; - background: #191919; color: #b8b8b8; } +html { + background: #191919; +} /* header */ @@ -119,12 +121,14 @@ h1 { font-size: 24pt; margin: 75px 0 10px; padding: 0; + transition: color 0.2s cubic-bezier(0,0,1,1); } h2, h3 { margin: 0 0 20px 0; padding: 0; font-size: 11pt; font-weight: 500; + transition: color 0.2s cubic-bezier(0,0,1,1); } th, .gray, h2, h3 { @@ -281,6 +285,9 @@ section.wide .image { max-width: 620px; margin: 10px auto 0 auto; } + +/* blog index */ + .research_index { margin-top: 40px; } @@ -289,10 +296,88 @@ section.wide .image { } .research_index h1 { margin-top: 20px; + text-decoration: underline; +} +.desktop .research_index section:hover h1 { + color: #fff; +} +.research_index section:hover h2 { + color: #ddd; } -/* blogpost index */ +/* home page */ -.blogposts div { - margin-bottom: 5px; +.hero { + position: relative; + width: 100%; + max-width: 1200px; + height: 50vw; + max-height: 70vh; + display: flex; + align-items: center; + margin: 0 auto; +} +#face_container { + pointer-events: none; + position: absolute; + width: 50vw; + height: 50vw; + max-height: 70vh; + top: 0; + right: 0; + z-index: -1; + text-align: center; +} +.currentFace { + position: absolute; + bottom: 50px; + width: 100%; + left: 0; + text-align: center; +} +.intro { + max-width: 640px; + padding: 75px 0 75px 10px; + z-index: 1; +} +.intro .headline { + font-family: 'Roboto Mono', monospace; + font-size: 16pt; +} +.intro .buttons { + margin: 40px 0; +} +.intro button { + font-family: 'Roboto', sans-serif; + padding: 8px 12px; + border-radius: 6px; + border: 1px solid transparent; + cursor: pointer; + font-size: 11pt; + margin-right: 10px; + transition: color 0.1s cubic-bezier(0,0,1,1), background-color 0.1s cubic-bezier(0,0,1,1); +} +.intro button.normal { + background: #191919; + border-color: #444; + color: #ddd; +} +.intro button.important { + background: #444; + border-color: #444; + color: #ddd; +} +.desktop .intro button:hover { + background: #666; + border-color: #666; + color: #fff; +} +.intro .under { + color: #888; +} +.intro .under a { + color: #bbb; +} +.desktop .intro .under a:hover { + color: #fff; } \ No newline at end of file diff --git a/site/assets/js/app/face.js b/site/assets/js/app/face.js new file mode 100644 index 00000000..e8bcd313 --- /dev/null +++ b/site/assets/js/app/face.js @@ -0,0 +1,213 @@ +var face = (function(){ + var container = document.querySelector("#face_container") + var camera, controls, scene, renderer + var mouse = new THREE.Vector2(0.5, 0.5) + var mouseTarget = new THREE.Vector2(0.5, 0.5) + var POINT_SCALE = 1.8 + var FACE_POINT_COUNT = 68 + var SWAP_TIME = 500 + var cubes = [], meshes = [] + var currentFace = document.querySelector('.currentFace') + var faceBuffer = (function () { + var a = new Array(FACE_POINT_COUNT) + for (let i = 0; i < FACE_POINT_COUNT; i++) { + a[i] = new THREE.Vector3() + } + return a + })() + var last_t = 0, start_t = 0 + var colors = [ + 0xff3333, + 0xff8833, + 0xffff33, + 0x338833, + 0x3388ff, + 0x3333ff, + 0x8833ff, + 0xff3388, + 0xffffff, + ] + var swapping = false, swap_count = 0, swapFrom, swapTo, face_names, faces + init() + + function init() { + fetch("/assets/data/3dlm_0_10.json") + .then(req => req.json()) + .then(data => { + face_names = Object.keys(data) + faces = face_names.map(name => recenter(data[name])) + setup() + build(faces[0]) + updateFace(faces[0]) + setCurrentFace(face_names[0]) + swapTo = faces[0] + animate() + }) + } + function setup() { + var w = window.innerWidth / 2 + var h = Math.min(window.innerWidth / 2, window.innerHeight * 0.7) + camera = new THREE.PerspectiveCamera(70, w/h, 1, 10000) + camera.position.x = 0 + camera.position.y = 0 + camera.position.z = 250 + + scene = new THREE.Scene() + scene.background = new THREE.Color(0x191919) + + renderer = new THREE.WebGLRenderer({ antialias: true }) + renderer.setPixelRatio(window.devicePixelRatio) + renderer.setSize(w, h) + container.appendChild(renderer.domElement) + document.body.addEventListener('mousemove', onMouseMove) + // renderer.domElement.addEventListener('mousedown', swap) + setInterval(swap, 5000) + } + function build(points) { + var matrix = new THREE.Matrix4() + var quaternion = new THREE.Quaternion() + + for (var i = 0; i < FACE_POINT_COUNT; i++) { + var p = points[i] + var geometry = new THREE.BoxBufferGeometry() + var position = new THREE.Vector3(p[0], p[1], p[2]) + var rotation = new THREE.Euler() + var scale = new THREE.Vector3() + var color = new THREE.Color() + scale.x = scale.y = scale.z = POINT_SCALE + quaternion.setFromEuler(rotation, false) + matrix.compose(position, quaternion, scale) + geometry.applyMatrix(matrix) + material = new THREE.MeshBasicMaterial({ color: color.setHex(0xffffff) }) + cube = new THREE.Mesh(geometry, material) + scene.add(cube) + cubes.push(cube) + } + + meshes = getLineGeometry(points).map((geometry, i) => { + var color = new THREE.Color() + var material = new MeshLineMaterial({ + color: color.setHex(colors[i % colors.length]), + }) + var line = new MeshLine() + line.setGeometry(geometry, _ => 1.5) + var mesh = new THREE.Mesh(line.geometry, material) + mesh.geometry.dynamic = true + scene.add(mesh) + return [line, mesh] + }) + } + function lerpPoints(n, A, B, C) { + for (let i = 0, len = A.length; i < len; i++) { + lerpPoint(n, A[i], B[i], C[i]) + } + } + function lerpPoint(n, A, B, C) { + C.x = lerp(n, A.x, B.x) + C.y = lerp(n, A.y, B.y) + C.z = lerp(n, A.z, B.z) + } + function lerp(n, a, b) { + return (b-a) * n + a + } + function swap(){ + if (swapping) return + start_t = last_t + swapping = true + swap_count = (swap_count + 1) % faces.length + swapFrom = swapTo + swapTo = faces[swap_count] + setCurrentFace(face_names[swap_count]) + } + function setCurrentFace(name) { + name = name.replace('.png', '').split('_').filter(s => !s.match(/\d+/)).join(' ') + currentFace.innerHTML = name + } + function update_swap(t){ + var n = (t - start_t) / SWAP_TIME + if (n > 1) { + swapping = false + n = 1 + } + lerpPoints(n, swapFrom, swapTo, faceBuffer) + updateFace(faceBuffer) + } + function updateFace(points) { + updateCubeGeometry(points) + updateLineGeometry(points) + } + function updateCubeGeometry(points) { + cubes.forEach((cube, i) => { + const p = points[i] + cube.position.set(p.x, p.y, p.z) + }) + } + function updateLineGeometry(points) { + getLineGeometry(points).map((geometry, i) => { + var [line, mesh] = meshes[i] + line.setGeometry(geometry, _ => 1.5) + mesh.geometry.vertices = line.geometry.vertices + mesh.geometry.verticesNeedUpdate = true + }) + } + function getLineGeometry(points) { + return [ + points.slice(0, 17), + points.slice(17, 22), + points.slice(22, 27), + points.slice(27, 31), + points.slice(31, 36), + points.slice(36, 42), + points.slice(42, 48), + points.slice(48) + ].map((a, i) => { + var geometry = new THREE.Geometry() + a.forEach(p => geometry.vertices.push(p)) + if (i > 4) { + geometry.vertices.push(a[0]) + } + return geometry + }) + } + function getBounds(obj) { + return obj.reduce((a, p) => { + return [ + Math.min(a[0], p[0]), + Math.max(a[1], p[0]), + Math.min(a[2], p[1]), + Math.max(a[3], p[1]), + Math.min(a[4], p[2]), + Math.max(a[5], p[2]), + ] + }, [Infinity, -Infinity, Infinity, -Infinity, Infinity, -Infinity]) + } + function recenter(obj) { + const bounds = getBounds(obj) + const x_width = (bounds[1] - bounds[0]) / 2 + const y_width = (bounds[3] - bounds[2]) / -3 + const z_width = (bounds[5] - bounds[4]) / 2 + return obj.map(p => { + p[0] = p[0] - bounds[0] - x_width + p[1] = -p[1] + bounds[1] + y_width + p[2] = p[2] - bounds[2] + z_width + return new THREE.Vector3(p[0], p[1], p[2]) + }) + } + // + function onMouseMove(e) { + mouse.x = e.clientX / window.innerWidth + mouse.y = e.clientY / window.innerHeight + } + function animate(t) { + requestAnimationFrame(animate) + if (swapping) update_swap(t) + renderer.render(scene, camera) + scene.rotation.y += 0.01 * Math.PI + mouseTarget.x += (mouse.x - mouseTarget.x) * 0.1 + mouseTarget.y += (mouse.y - mouseTarget.y) * 0.1 + scene.rotation.x = (mouseTarget.y - 0.5) * Math.PI / 2 + // scene.rotation.y = (mouseTarget.x - 0.5) * Math.PI + scene.rotation.y += 0.01 + last_t = t + } +})() diff --git a/site/assets/js/app/site.js b/site/assets/js/app/site.js index 12bee3ec..eb6886c2 100644 --- a/site/assets/js/app/site.js +++ b/site/assets/js/app/site.js @@ -7,7 +7,8 @@ const isDesktop = !isMobile const htmlClassList = document.body.parentNode.classList htmlClassList.add(isDesktop ? 'desktop' : 'mobile') -function toArray(A) { return Array.prototype.slice.apply(A) } +function toArray(a) { return Array.prototype.slice.apply(a) } +function choice(a) { return a[Math.floor(Math.random()*a.length)]} var site = (function(){ var site = {} diff --git a/site/public/about/credits/index.html b/site/public/about/credits/index.html index f1a28b0e..65bc7ac4 100644 --- a/site/public/about/credits/index.html +++ b/site/public/about/credits/index.html @@ -52,5 +52,6 @@
          + \ No newline at end of file diff --git a/site/public/about/disclaimer/index.html b/site/public/about/disclaimer/index.html index 5df5d656..b0215bde 100644 --- a/site/public/about/disclaimer/index.html +++ b/site/public/about/disclaimer/index.html @@ -52,5 +52,6 @@
          + \ No newline at end of file diff --git a/site/public/about/index.html b/site/public/about/index.html index f1a28b0e..65bc7ac4 100644 --- a/site/public/about/index.html +++ b/site/public/about/index.html @@ -52,5 +52,6 @@
          + \ No newline at end of file diff --git a/site/public/about/press/index.html b/site/public/about/press/index.html index e5763036..09c89165 100644 --- a/site/public/about/press/index.html +++ b/site/public/about/press/index.html @@ -50,5 +50,6 @@
          + \ No newline at end of file diff --git a/site/public/about/privacy/index.html b/site/public/about/privacy/index.html index 7ad9564f..5675f072 100644 --- a/site/public/about/privacy/index.html +++ b/site/public/about/privacy/index.html @@ -129,5 +129,6 @@ You are advised to review this Privacy Policy periodically for any changes. Chan
      + \ No newline at end of file diff --git a/site/public/about/style/index.html b/site/public/about/style/index.html index eea861ac..f2c0d4b8 100644 --- a/site/public/about/style/index.html +++ b/site/public/about/style/index.html @@ -27,7 +27,7 @@

      Style Examples

      -
      Alt text here
      Alt text here

      Header 1

      +
      Alt text here
      Alt text here
      Date
      17-Jan-2019
      Numbers
      17
      Identities
      12,139
      But also
      This is a test of the stylesheet

      Header 1

      Header 2

      Header 3

      Header 4

      @@ -85,5 +85,6 @@ But let's throw in a <b>tag</b>.
      + \ No newline at end of file diff --git a/site/public/about/terms/index.html b/site/public/about/terms/index.html index db8b9e57..078c339f 100644 --- a/site/public/about/terms/index.html +++ b/site/public/about/terms/index.html @@ -64,5 +64,6 @@
      + \ No newline at end of file diff --git a/site/public/datasets/lfw/index.html b/site/public/datasets/lfw/index.html index 76549d25..39052b44 100644 --- a/site/public/datasets/lfw/index.html +++ b/site/public/datasets/lfw/index.html @@ -27,23 +27,22 @@

      Labeled Faces in The Wild

      -
      Created
      2007
      Images
      13,233
      People
      5,749
      Created From
      Yahoo News images
      Search available
      Searchable

      Labeled Faces in The Wild is amongst the most widely used facial recognition training datasets in the world and is the first dataset of its kind to be created entirely from Internet photos. It includes 13,233 images of 5,749 people downloaded from the Internet, otherwise referred to as “The Wild”.

      -
      Eight out of 5,749 people in the Labeled Faces in the Wild dataset. The face recognition training dataset is created entirely from photos downloaded from the Internet.
      Eight out of 5,749 people in the Labeled Faces in the Wild dataset. The face recognition training dataset is created entirely from photos downloaded from the Internet.

      INTRO

      +
      Created
      2007
      Images
      13,233
      People
      5,749
      Created From
      Yahoo News images
      Search available
      Searchable

      Labeled Faces in The Wild (LFW) is amongst the most widely used facial recognition training datasets in the world and is the first of its kind to be created entirely from images that were posted online. The LFW dataset includes 13,233 images of 5,749 people that were collected between 2002-2004. Use the tools below to check if you were included in this dataset or scroll down to read the analysis.

      +

      {INSERT IMAGE SEARCH MODULE}

      +

      {INSERT TEXT SEARCH MODULE}

      +
      Eight out of 5,749 people in the Labeled Faces in the Wild dataset. The face recognition training dataset is created entirely from photos downloaded from the Internet.
      Eight out of 5,749 people in the Labeled Faces in the Wild dataset. The face recognition training dataset is created entirely from photos downloaded from the Internet.

      INTRO

      It began in 2002. Researchers at University of Massachusetts Amherst were developing algorithms for facial recognition and they needed more data. Between 2002-2004 they scraped Yahoo News for images of public figures. Two years later they cleaned up the dataset and repackaged it as Labeled Faces in the Wild (LFW).

      Since then the LFW dataset has become one of the most widely used datasets used for evaluating face recognition algorithms. The associated research paper “Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments” has been cited 996 times reaching 45 different countries throughout the world.

      The faces come from news stories and are mostly celebrities from the entertainment industry, politicians, and villains. It’s a sampling of current affairs and breaking news that has come to pass. The images, detached from their original context now server a new purpose: to train, evaluate, and improve facial recognition.

      As the most widely used facial recognition dataset, it can be said that each individual in LFW has, in a small way, contributed to the current state of the art in facial recognition surveillance. John Cusack, Julianne Moore, Barry Bonds, Osama bin Laden, and even Moby are amongst these biometric pillars, exemplar faces provided the visual dimensions of a new computer vision future.

      -
      From Aaron Eckhart to Zydrunas Ilgauskas. A small sampling of the LFW dataset
      From Aaron Eckhart to Zydrunas Ilgauskas. A small sampling of the LFW dataset

      In addition to commercial use as an evaluation tool, alll of the faces in LFW dataset are prepackaged into a popular machine learning code framework called scikit-learn.

      -

      Usage

      -
      #!/usr/bin/python
      -from matplotlib import plt
      -from sklearn.datasets import fetch_lfw_people
      -lfw_people = fetch_lfw_people()
      -lfw_person = lfw_people[0]
      -plt.imshow(lfw_person)
      -
      +
      The entire LFW dataset cropped to facial regions
      The entire LFW dataset cropped to facial regions

      In addition to commercial use as an evaluation tool, alll of the faces in LFW dataset are prepackaged into a popular machine learning code framework called scikit-learn.

      +

      Facts

      +

      The person with the most images is: +The person with the least images is:

      Commercial Use

      -

      The LFW dataset is used by numerous companies for benchmarking algorithms and in some cases training. According to the benchmarking results page [^lfw_results] provided by the authors, over 2 dozen companies have contributed their benchmark results

      +

      The LFW dataset is used by numerous companies for benchmarking algorithms and in some cases training. According to the benchmarking results page [^lfw_results] provided by the authors, over 2 dozen companies have contributed their benchmark results.

      +

      According to BiometricUpdate.com [^lfw_pingan], LFW is "the most widely used evaluation set in the field of facial recognition, LFW attracts a few dozen teams from around the globe including Google, Facebook, Microsoft Research Asia, Baidu, Tencent, SenseTime, Face++ and Chinese University of Hong Kong."

      +

      According to researchers at the Baidu Research – Institute of Deep Learning "LFW has been the most popular evaluation benchmark for face recognition, and played a very important role in facilitating the face recognition society to improve algorithm. [^lfw_baidu]."

      load file: lfw_commercial_use.csv
       name_display,company_url,example_url,country,description
       
      @@ -73,11 +72,24 @@ name_display,company_url,example_url,country,description

      Add 2-4 screenshots of companies mentioning LFW here

      -
      ReadSense
      ReadSense

      In benchmarking, companies use a dataset to evaluate their algorithms which are typically trained on other data. After training, researchers will use LFW as a benchmark to compare results with other algorithms.

      +
       "PING AN Tech facial recognition receives high score in latest LFW test results"
      "PING AN Tech facial recognition receives high score in latest LFW test results"
      +
       "Face Recognition Performance in LFW benchmark"
      "Face Recognition Performance in LFW benchmark"
      +
       "The 1st place in face verification challenge, LFW"
      "The 1st place in face verification challenge, LFW"

      In benchmarking, companies use a dataset to evaluate their algorithms which are typically trained on other data. After training, researchers will use LFW as a benchmark to compare results with other algorithms.

      For example, Baidu (est. net worth $13B) uses LFW to report results for their "Targeting Ultimate Accuracy: Face Recognition via Deep Embedding". According to the three Baidu researchers who produced the paper:

      -

      LFW has been the most popular evaluation benchmark for face recognition, and played a very important role in facilitating the face recognition society to improve algorithm. 1.

      -

      Citations

      +

      Overall, LFW has at least 456 citations from 123 countries. Sed ut perspiciatis, unde omnis iste natus error sit voluptatem accusantium doloremque laudantium, totam rem aperiam eaque ipsa, quae ab illo inventore veritatis et quasi architecto beatae vitae dicta sunt, explicabo. Nemo enim ipsam voluptatem, quia voluptas sit, aspernatur aut odit aut fugit, sed quia consequuntur magni dolores eos.

      +

      Sed ut perspiciatis, unde omnis iste natus error sit voluptatem accusantium doloremque laudantium, totam rem aperiam eaque ipsa, quae ab illo inventore veritatis et quasi architecto beatae vitae dicta sunt, explicabo. Nemo enim ipsam voluptatem, quia voluptas sit, aspernatur aut odit aut fugit, sed quia consequuntur magni dolores eos.

      +
      Distribution of citations per year per country for the top 5 countries with citations for the LFW Dataset
      Distribution of citations per year per country for the top 5 countries with citations for the LFW Dataset
      Geographic distributions of citations for the LFW Dataset
      Geographic distributions of citations for the LFW Dataset

      Conclusion

      +

      The LFW face recognition training and evaluation dataset is a historically important face dataset as it was the first popular dataset to be created entirely from Internet images, paving the way for a global trend towards downloading anyone’s face from the Internet and adding it to a dataset. As will be evident with other datasets, LFW’s approach has now become the norm.

      +

      For all the 5,000 people in this datasets, their face is forever a part of facial recognition history. It would be impossible to remove anyone from the dataset because it is so ubiquitous. For their rest of the lives and forever after, these 5,000 people will continue to be used for training facial recognition surveillance.

      +

      Right to Removal

      +

      If you are affected by disclosure of your identity in this dataset please do contact the authors, many state that they are willing to remove images upon request. The authors of the LFW can be reached from the emails posted in their paper:

      +

      You can use the following message to request removal from the dataset:

      +

      Dear [researcher name],

      +

      I am writing to you about the "LFW Dataset". Recently I have discovered that your dataset includes my identity and no longer wish to be included in your dataset

      +

      MegaPixels is an educational art project developed for academic purposes. In no way does this project aim to villify the researchers who produced the datasets. The aim of this project is to encourage discourse around ethics and consent in artificial intelligence by providing information about these datasets that is otherwise difficult to obtain or inaccessible to other researchers.

      +

      Supplementary Data

      +

      Sed ut perspiciatis, unde omnis iste natus error sit voluptatem accusantium doloremque laudantium, totam rem aperiam eaque ipsa, quae ab illo inventore veritatis et quasi architecto beatae vitae dicta sunt, explicabo. Nemo enim ipsam voluptatem, quia voluptas sit, aspernatur aut odit aut fugit, sed quia consequuntur magni dolores eos.

      @@ -99,18 +111,119 @@ name_display,company_url,example_url,country,description + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      TitleChina edu
      3D-aided face recognition from videosUniversity of LyonFranceedu
      3D-aided face recognition from videosUniversity of LyonFranceedu
      3D-aided face recognition from videosUniversity of LyonFranceedu
      3D-aided face recognition from videosUniversity of LyonFranceedu
      3D-aided face recognition from videosUniversity of LyonFranceedu
      3D-aided face recognition from videosUniversity of LyonFranceedu
      3D-aided face recognition from videosUniversity of LyonFranceedu
      3D-aided face recognition from videosUniversity of LyonFranceedu
      3D-aided face recognition from videosUniversity of LyonFranceedu
      3D-aided face recognition from videosUniversity of LyonFranceedu
      3D-aided face recognition from videosUniversity of LyonFranceedu
      3D-aided face recognition from videosUniversity of LyonFranceedu
      3D-aided face recognition from videosUniversity of LyonFranceedu
      -

      Conclusion

      -

      The LFW face recognition training and evaluation dataset is a historically important face dataset as it was the first popular dataset to be created entirely from Internet images, paving the way for a global trend towards downloading anyone’s face from the Internet and adding it to a dataset. As will be evident with other datasets, LFW’s approach has now become the norm.

      -

      For all the 5,000 people in this datasets, their face is forever a part of facial recognition history. It would be impossible to remove anyone from the dataset because it is so ubiquitous. For their rest of the lives and forever after, these 5,000 people will continue to be used for training facial recognition surveillance.

      -

      Notes

      -

      According to BiometricUpdate.com2, LFW is "the most widely used evaluation set in the field of facial recognition, LFW attracts a few dozen teams from around the globe including Google, Facebook, Microsoft Research Asia, Baidu, Tencent, SenseTime, Face++ and Chinese University of Hong Kong."

      +

      Code

      +
      #!/usr/bin/python
      +
      +import numpy as np
      +from sklearn.datasets import fetch_lfw_people
      +import imageio
      +import imutils
      +
      +# download LFW dataset (first run takes a while)
      +lfw_people = fetch_lfw_people(min_faces_per_person=1, resize=1, color=True, funneled=False)
      +
      +# introspect dataset
      +n_samples, h, w, c = lfw_people.images.shape
      +print('{:,} images at {}x{}'.format(n_samples, w, h))
      +cols, rows = (176, 76)
      +n_ims = cols * rows
      +
      +# build montages
      +im_scale = 0.5
      +ims = lfw_people.images[:n_ims
      +montages = imutils.build_montages(ims, (int(w*im_scale, int(h*im_scale)), (cols, rows))
      +montage = montages[0]
      +
      +# save full montage image
      +imageio.imwrite('lfw_montage_full.png', montage)
      +
      +# make a smaller version
      +montage_960 = imutils.resize(montage, width=960)
      +imageio.imwrite('lfw_montage_960.jpg', montage_960)
      +

      -
      1. "Chinese tourist town uses face recognition as an entry pass". New Scientist. November 17, 2016. https://www.newscientist.com/article/2113176-chinese-tourist-town-uses-face-recognition-as-an-entry-pass/

      2. -
      3. "PING AN Tech facial recognition receives high score in latest LFW test results". https://www.biometricupdate.com/201702/ping-an-tech-facial-recognition-receives-high-score-in-latest-lfw-test-results

      4. -
      +
        @@ -130,5 +243,6 @@ name_display,company_url,example_url,country,description
        + \ No newline at end of file diff --git a/site/public/datasets/lfw/what/index.html b/site/public/datasets/lfw/what/index.html index 52993a79..ceafb35a 100644 --- a/site/public/datasets/lfw/what/index.html +++ b/site/public/datasets/lfw/what/index.html @@ -137,5 +137,6 @@ name_display,company_url,example_url,country,description
    + \ No newline at end of file diff --git a/site/public/datasets/vgg_faces2/index.html b/site/public/datasets/vgg_faces2/index.html index 95b5f7d7..3f778f71 100644 --- a/site/public/datasets/vgg_faces2/index.html +++ b/site/public/datasets/vgg_faces2/index.html @@ -58,5 +58,6 @@
    + \ No newline at end of file diff --git a/site/public/index.html b/site/public/index.html index 3ce22936..51006b59 100644 --- a/site/public/index.html +++ b/site/public/index.html @@ -26,22 +26,31 @@
    -

    MegaPixels is an art project that explores the dark side of face recognition training data and the future of computer vision

    -

    Made by Adam Harvey in partnership with Mozilla.
    -Read more [about MegaPixels]

    -

    [Explore Datasets] [Explore Algorithms]

    -

    Facial Recognition Datasets

    +
    +
    +
    +
    +
    +
    + MegaPixels is an art project that explores the dark side of face recognition and the future of computer vision. +
    + + + +
    + Made by Adam Harvey in partnership with Mozilla.
    + Read more about MegaPixels +
    +
    +
    + +

    Facial Recognition Datasets

    Regular Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.

    Summary

    -
      -
    • 275 datsets found
    • -
    • Created between the years 1993-2018
    • -
    • Smallest dataset: 20 images
    • -
    • Largest dataset: 10,000,000 images
    • -
    • Highest resolution faces: 450x500 (Unconstrained College Students)
    • -
    • Lowest resolution faces: 16x20 pixels (QMUL SurvFace)
    • -
    -
    +
    Found
    275 datasets
    Created between
    1993-2018
    Smallest dataset
    20 images
    Largest dataset
    10,000,000 images
    Highest resolution faces
    450x500 (Unconstrained College Students)
    Lowest resolution faces
    16x20 pixels (QMUL SurvFace)
    +
    @@ -59,5 +68,10 @@ Read more [about MegaPixels]

    + + + + + \ No newline at end of file diff --git a/site/public/research/01_from_1_to_100_pixels/index.html b/site/public/research/01_from_1_to_100_pixels/index.html index 55e02c6c..b4c85d00 100644 --- a/site/public/research/01_from_1_to_100_pixels/index.html +++ b/site/public/research/01_from_1_to_100_pixels/index.html @@ -90,5 +90,6 @@ + \ No newline at end of file diff --git a/site/public/research/index.html b/site/public/research/index.html index 1f61dadf..cf9546e1 100644 --- a/site/public/research/index.html +++ b/site/public/research/index.html @@ -46,5 +46,6 @@ + \ No newline at end of file diff --git a/site/templates/home.html b/site/templates/home.html new file mode 100644 index 00000000..436c1ddf --- /dev/null +++ b/site/templates/home.html @@ -0,0 +1,32 @@ +{% extends 'layout.html' %} + +{% block content %} +
    +
    +
    +
    +
    +
    + MegaPixels is an art project that explores the dark side of face recognition and the future of computer vision. +
    + + + +
    + Made by Adam Harvey in partnership with Mozilla.
    + Read more about MegaPixels +
    +
    +
    + + {{ content }} + +{% endblock %} + +{% block scripts %} + + + +{% endblock %} diff --git a/site/templates/layout.html b/site/templates/layout.html index 7558163e..605f9788 100644 --- a/site/templates/layout.html +++ b/site/templates/layout.html @@ -42,5 +42,6 @@ +{% block scripts %}{% endblock %} \ No newline at end of file -- cgit v1.2.3-70-g09d2 From 485cf0e4665c660d4e5e1fba00a95bc8036809c6 Mon Sep 17 00:00:00 2001 From: Jules Laplace Date: Sat, 15 Dec 2018 16:40:34 +0100 Subject: db stuff --- faiss/__init__.py | 0 faiss/server.py | 68 ----------------- megapixels/app/server/api.py | 97 +++++++++---------------- site/public/datasets/lfw/index.html | 67 +++++++++++++---- site/public/datasets/vgg_face2/index.html | 84 +++++++++++++++++++++ site/public/index.html | 1 + site/public/research/00_introduction/index.html | 86 ++++++++++++++++++++++ site/public/research/index.html | 2 +- 8 files changed, 259 insertions(+), 146 deletions(-) delete mode 100644 faiss/__init__.py delete mode 100644 faiss/server.py create mode 100644 site/public/datasets/vgg_face2/index.html create mode 100644 site/public/research/00_introduction/index.html (limited to 'site/public/index.html') diff --git a/faiss/__init__.py b/faiss/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/faiss/server.py b/faiss/server.py deleted file mode 100644 index a8c660fa..00000000 --- a/faiss/server.py +++ /dev/null @@ -1,68 +0,0 @@ -#!python - -import os -import sys -import json -import time -import argparse -import cv2 as cv -import numpy as np -from datetime import datetime -from flask import Flask, request, render_template, jsonify -from PIL import Image # todo: try to remove PIL dependency -import re - -sanitize_re = re.compile('[\W]+') -valid_exts = ['.gif', '.jpg', '.jpeg', '.png'] - -from dotenv import load_dotenv -load_dotenv() - -from feature_extractor import FeatureExtractor - -DEFAULT_LIMIT = 50 - -app = Flask(__name__, static_url_path="/search/static", static_folder="static") - -# static api routes - this routing is actually handled in the JS -@app.route('/', methods=['GET']) -def index(): - return app.send_static_file('metadata.html') - -# search using an uploaded file -@app.route('/search/api/upload', methods=['POST']) -def upload(): - file = request.files['query_img'] - fn = file.filename - if fn.endswith('blob'): - fn = 'filename.jpg' - - basename, ext = os.path.splitext(fn) - print("got {}, type {}".format(basename, ext)) - if ext.lower() not in valid_exts: - return jsonify({ 'error': 'not an image' }) - - uploaded_fn = datetime.now().isoformat() + "_" + basename - uploaded_fn = sanitize_re.sub('', uploaded_fn) - uploaded_img_path = "static/uploaded/" + uploaded_fn + ext - uploaded_img_path = uploaded_img_path.lower() - print('query: {}'.format(uploaded_img_path)) - - img = Image.open(file.stream).convert('RGB') - # img.save(uploaded_img_path) - # vec = db.load_feature_vector_from_file(uploaded_img_path) - vec = fe.extract(img) - # print(vec.shape) - - results = db.search(vec, limit=limit) - query = { - 'timing': time.time() - start, - } - print(results) - return jsonify({ - 'results': results, - }) - -if __name__=="__main__": - app.run("0.0.0.0", debug=False) - diff --git a/megapixels/app/server/api.py b/megapixels/app/server/api.py index 428c53b1..c5e27dd2 100644 --- a/megapixels/app/server/api.py +++ b/megapixels/app/server/api.py @@ -1,30 +1,13 @@ -from flask import Blueprint, jsonify +import os +import re +import time +from flask import Blueprint, request, jsonify +from PIL import Image # todo: try to remove PIL dependency from app.models.sql_factory import list_datasets, get_dataset, get_table -# from jinja2 import TemplateNotFound - -# import os -# import sys -# import json -# import time -# import argparse -# import cv2 as cv -# import numpy as np -# from datetime import datetime -# from flask import Flask, request, render_template, jsonify -# from PIL import Image # todo: try to remove PIL dependency -# import re - -# sanitize_re = re.compile('[\W]+') -# valid_exts = ['.gif', '.jpg', '.jpeg', '.png'] - -# from dotenv import load_dotenv -# load_dotenv() - -# from feature_extractor import FeatureExtractor - -# DEFAULT_LIMIT = 50 +sanitize_re = re.compile('[\W]+') +valid_exts = ['.gif', '.jpg', '.jpeg', '.png'] api = Blueprint('api', __name__) @@ -40,40 +23,32 @@ def show(name): else: return jsonify({ 'status': 404 }) -@api.route('/dataset//test', methods=['POST']) -def test(name): - print('hiiiiii') - return jsonify({ 'test': 'OK', 'dataset': name }) - -# @router.route('//face', methods=['POST']) -# def upload(name): -# file = request.files['query_img'] -# fn = file.filename -# if fn.endswith('blob'): -# fn = 'filename.jpg' - -# basename, ext = os.path.splitext(fn) -# print("got {}, type {}".format(basename, ext)) -# if ext.lower() not in valid_exts: -# return jsonify({ 'error': 'not an image' }) - -# uploaded_fn = datetime.now().isoformat() + "_" + basename -# uploaded_fn = sanitize_re.sub('', uploaded_fn) -# uploaded_img_path = "static/uploaded/" + uploaded_fn + ext -# uploaded_img_path = uploaded_img_path.lower() -# print('query: {}'.format(uploaded_img_path)) - -# img = Image.open(file.stream).convert('RGB') -# # img.save(uploaded_img_path) -# # vec = db.load_feature_vector_from_file(uploaded_img_path) -# vec = fe.extract(img) -# # print(vec.shape) - -# results = db.search(vec, limit=limit) -# query = { -# 'timing': time.time() - start, -# } -# print(results) -# return jsonify({ -# 'results': results, -# }) +@api.route('/dataset//face', methods=['POST']) +def upload(name): + file = request.files['query_img'] + fn = file.filename + if fn.endswith('blob'): + fn = 'filename.jpg' + + basename, ext = os.path.splitext(fn) + print("got {}, type {}".format(basename, ext)) + if ext.lower() not in valid_exts: + return jsonify({ 'error': 'not an image' }) + + img = Image.open(file.stream).convert('RGB') + + # vec = db.load_feature_vector_from_file(uploaded_img_path) + # vec = fe.extract(img) + # print(vec.shape) + # results = db.search(vec, limit=limit) + + query = { + 'timing': time.time() - start, + } + results = [] + + print(results) + return jsonify({ + 'query': query, + 'results': results, + }) diff --git a/site/public/datasets/lfw/index.html b/site/public/datasets/lfw/index.html index 39052b44..e080229f 100644 --- a/site/public/datasets/lfw/index.html +++ b/site/public/datasets/lfw/index.html @@ -26,23 +26,50 @@
    -

    Labeled Faces in The Wild

    -
    Created
    2007
    Images
    13,233
    People
    5,749
    Created From
    Yahoo News images
    Search available
    Searchable

    Labeled Faces in The Wild (LFW) is amongst the most widely used facial recognition training datasets in the world and is the first of its kind to be created entirely from images that were posted online. The LFW dataset includes 13,233 images of 5,749 people that were collected between 2002-2004. Use the tools below to check if you were included in this dataset or scroll down to read the analysis.

    +

    Labeled Faces in the Wild

    +
    Created
    2007
    Images
    13,233
    People
    5,749
    Created From
    Yahoo News images
    Search available
    Searchable

    Labeled Faces in The Wild (LFW) is amongst the most widely used facial recognition training datasets in the world and is the first of its kind to be created entirely from images posted online. The LFW dataset includes 13,233 images of 5,749 people that were collected between 2002-2004. Use the tools below to check if you were included in this dataset or scroll down to read the analysis.

    {INSERT IMAGE SEARCH MODULE}

    {INSERT TEXT SEARCH MODULE}

    -
    Eight out of 5,749 people in the Labeled Faces in the Wild dataset. The face recognition training dataset is created entirely from photos downloaded from the Internet.
    Eight out of 5,749 people in the Labeled Faces in the Wild dataset. The face recognition training dataset is created entirely from photos downloaded from the Internet.

    INTRO

    -

    It began in 2002. Researchers at University of Massachusetts Amherst were developing algorithms for facial recognition and they needed more data. Between 2002-2004 they scraped Yahoo News for images of public figures. Two years later they cleaned up the dataset and repackaged it as Labeled Faces in the Wild (LFW).

    -

    Since then the LFW dataset has become one of the most widely used datasets used for evaluating face recognition algorithms. The associated research paper “Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments” has been cited 996 times reaching 45 different countries throughout the world.

    -

    The faces come from news stories and are mostly celebrities from the entertainment industry, politicians, and villains. It’s a sampling of current affairs and breaking news that has come to pass. The images, detached from their original context now server a new purpose: to train, evaluate, and improve facial recognition.

    -

    As the most widely used facial recognition dataset, it can be said that each individual in LFW has, in a small way, contributed to the current state of the art in facial recognition surveillance. John Cusack, Julianne Moore, Barry Bonds, Osama bin Laden, and even Moby are amongst these biometric pillars, exemplar faces provided the visual dimensions of a new computer vision future.

    -
    The entire LFW dataset cropped to facial regions
    The entire LFW dataset cropped to facial regions

    In addition to commercial use as an evaluation tool, alll of the faces in LFW dataset are prepackaged into a popular machine learning code framework called scikit-learn.

    -

    Facts

    -

    The person with the most images is: -The person with the least images is:

    -

    Commercial Use

    +
    load file: lfw_names_gender_kg_min.csv
    +Name, Images, Gender, Description
    +
    +
    Eighteen of the 5,749 people in the Labeled Faces in the Wild Dataset. The most widely used face dataset for benchmarking commercial face recognition algorithms.
    Eighteen of the 5,749 people in the Labeled Faces in the Wild Dataset. The most widely used face dataset for benchmarking commercial face recognition algorithms.

    Intro

    +

    Three paragraphs describing the LFW dataset in a format that can be easily replicated for the other datasets. Nothing too custom. An analysis of the initial research papers with context relative to all the other dataset papers.

    +
     all 5,749 people in the LFW Dataset sorted from most to least images collected.
    all 5,749 people in the LFW Dataset sorted from most to least images collected.

    LFW by the Numbers

    +
      +
    • Was first published in 2007
    • +
    • Developed out of a prior dataset from Berkely called "Faces in the Wild" or "Names and Faces" [^lfw_original_paper]
    • +
    • Includes 13,233 images and 5,749 different people [^lfw_website]
    • +
    • There are about 3 men for every 1 woman (4,277 men and 1,472 women)[^lfw_website]
    • +
    • The person with the most images is George W. Bush with 530
    • +
    • Most people (70%) in the dataset have only 1 image
    • +
    • Thre are 1,680 people in the dataset with 2 or more images [^lfw_website]
    • +
    • Two out of 4 of the original authors received funding from the Office of Director of National Intelligence and IARPA for their 2016 LFW survey follow up report
    • +
    • The LFW dataset includes over 500 actors, 30 models, 10 presidents, 24 football players, 124 basketball players, 11 kings, and 2 queens
    • +
    • In all the LFW publications provided by the authors the words "ethics", "consent", and "privacy" appear 0 times [^lfw_original_paper], [^lfw_survey], [^lfw_tech_report] , [^lfw_website]
    • +
    • The word "future" appears 71 times
    • +
    +

    Facts

    +
      +
    • Was created for the purpose of improving "unconstrained face recognition" [^lfw_original_paper]
    • +
    • All images in LFW were obtained "in the wild" meaning without any consent from the subject or from the photographer
    • +
    • The faces were detected using the Viola-Jones haarcascade face detector [^lfw_website] [^lfw_survey]
    • +
    • Is considered the "most popular benchmark for face recognition" [^lfw_baidu]
    • +
    • Is "the most widely used evaluation set in the field of facial recognition" [^lfw_pingan]
    • +
    • Is used by several of the largest tech companies in the world including "Google, Facebook, Microsoft Research Asia, Baidu, Tencent, SenseTime, Face++ and Chinese University of Hong Kong." [^lfw_pingan]
    • +
    +

    need citations

    +
      +
    • All images were copied from Yahoo News between 2002 - 2004 [^lfw_original_paper]
    • +
    • SenseTime, who has relied on LFW for benchmarking their facial recognition performance, is the leading provider of surveillance to the Chinese Government (need citation)
    • +
    +
     former President George W. Bush
    former President George W. Bush
    +
     Colin Powel (236), Tony Blair (144), and Donald Rumsfeld (121)
    Colin Powel (236), Tony Blair (144), and Donald Rumsfeld (121)

    People and Companies using the LFW Dataset

    +

    This section describes who is using the dataset and for what purposes. It should include specific examples of people or companies with citations and screenshots. This section is followed up by the graph, the map, and then the supplementary material.

    The LFW dataset is used by numerous companies for benchmarking algorithms and in some cases training. According to the benchmarking results page [^lfw_results] provided by the authors, over 2 dozen companies have contributed their benchmark results.

    According to BiometricUpdate.com [^lfw_pingan], LFW is "the most widely used evaluation set in the field of facial recognition, LFW attracts a few dozen teams from around the globe including Google, Facebook, Microsoft Research Asia, Baidu, Tencent, SenseTime, Face++ and Chinese University of Hong Kong."

    According to researchers at the Baidu Research – Institute of Deep Learning "LFW has been the most popular evaluation benchmark for face recognition, and played a very important role in facilitating the face recognition society to improve algorithm. [^lfw_baidu]."

    +

    In addition to commercial use as an evaluation tool, alll of the faces in LFW dataset are prepackaged into a popular machine learning code framework called scikit-learn.

    load file: lfw_commercial_use.csv
     name_display,company_url,example_url,country,description
     
    @@ -83,13 +110,18 @@ name_display,company_url,example_url,country,description

    The LFW face recognition training and evaluation dataset is a historically important face dataset as it was the first popular dataset to be created entirely from Internet images, paving the way for a global trend towards downloading anyone’s face from the Internet and adding it to a dataset. As will be evident with other datasets, LFW’s approach has now become the norm.

    For all the 5,000 people in this datasets, their face is forever a part of facial recognition history. It would be impossible to remove anyone from the dataset because it is so ubiquitous. For their rest of the lives and forever after, these 5,000 people will continue to be used for training facial recognition surveillance.

    Right to Removal

    -

    If you are affected by disclosure of your identity in this dataset please do contact the authors, many state that they are willing to remove images upon request. The authors of the LFW can be reached from the emails posted in their paper:

    +

    If you are affected by disclosure of your identity in this dataset please do contact the authors. Many have stated that they are willing to remove images upon request. The authors of the LFW dataset provide the following email for inquiries:

    You can use the following message to request removal from the dataset:

    +

    To: Gary Huang mailto:gbhuang@cs.umass.edu

    +

    Subject: Request for Removal from LFW Face Dataset

    Dear [researcher name],

    -

    I am writing to you about the "LFW Dataset". Recently I have discovered that your dataset includes my identity and no longer wish to be included in your dataset

    -

    MegaPixels is an educational art project developed for academic purposes. In no way does this project aim to villify the researchers who produced the datasets. The aim of this project is to encourage discourse around ethics and consent in artificial intelligence by providing information about these datasets that is otherwise difficult to obtain or inaccessible to other researchers.

    +

    I am writing to you about the "Labeled Faces in The Wild Dataset". Recently I discovered that your dataset includes my identity and I no longer wish to be included in your dataset.

    +

    The dataset is being used thousands of companies around the world to improve facial recognition software including usage by governments for the purpose of law enforcement, national security, tracking consumers in retail environments, and tracking individuals through public spaces.

    +

    My name as it appears in your dataset is [your name]. Please remove all images from your dataset and inform your newsletter subscribers to likewise update their copies.

    +

    - [your name]

    +

    Supplementary Data

    -

    Sed ut perspiciatis, unde omnis iste natus error sit voluptatem accusantium doloremque laudantium, totam rem aperiam eaque ipsa, quae ab illo inventore veritatis et quasi architecto beatae vitae dicta sunt, explicabo. Nemo enim ipsam voluptatem, quia voluptas sit, aspernatur aut odit aut fugit, sed quia consequuntur magni dolores eos.

    +

    Researchers, journ

    @@ -221,6 +253,9 @@ imageio.imwrite('lfw_montage_full.png', montage) montage_960 = imutils.resize(montage, width=960) imageio.imwrite('lfw_montage_960.jpg', montage_960) +

    Disclaimer

    +

    MegaPixels is an educational art project designed to encourage discourse about facial recognition datasets. Any ethical or legal issues should be directed to the researcher's parent organizations. Except where necessary for contact or clarity, the names of researchers have been subsituted by their parent organization. In no way does this project aim to villify researchers who produced the datasets.

    +

    Read more about MegaPixels Code of Conduct


      diff --git a/site/public/datasets/vgg_face2/index.html b/site/public/datasets/vgg_face2/index.html new file mode 100644 index 00000000..24a1059b --- /dev/null +++ b/site/public/datasets/vgg_face2/index.html @@ -0,0 +1,84 @@ + + + + MegaPixels + + + + + + + + + +
      + + +
      MegaPixels
      + The Darkside of Datasets +
      + +
      +
      + +

      VGG Faces2

      +
      Created
      2018
      Images
      3.3M
      People
      9,000
      Created From
      Scraping search engines
      Search available
      [Searchable](#)

      VGG Face2 is the updated version of the VGG Face dataset and now includes over 3.3M face images from over 9K people. The identities were selected by taking the top 500K identities in Google's Knowledge Graph of celebrities and then selecting only the names that yielded enough training images. The dataset was created in the UK but funded by Office of Director of National Intelligence in the United States.

      +

      {INSERT IMAGE SEARCH MODULE}

      +

      {INSERT TEXT SEARCH MODULE}

      +
      load file: lfw_names_gender_kg_min.csv
      +Name, Images, Gender, Description
      +
      +

      VGG Face2 by the Numbers

      +
        +
      • 1,331 actresses, 139 presidents
      • +
      • 3 husbands and 16 wives
      • +
      • 2 snooker player
      • +
      • 1 guru
      • +
      • 1 pornographic actress
      • +
      • 3 computer programmer
      • +
      +

      Names and descriptions

      +
        +
      • The original VGGF2 name list has been updated with the results returned from Google Knowledge
      • +
      • Names with a similarity score greater than 0.75 where automatically updated. Scores computed using import difflib; seq = difflib.SequenceMatcher(a=a.lower(), b=b.lower()); score = seq.ratio()
      • +
      • The 97 names with a score of 0.75 or lower were manually reviewed and includes name changes validating using Wikipedia.org results for names such as "Bruce Jenner" to "Caitlyn Jenner", spousal last-name changes, and discretionary changes to improve search results such as combining nicknames with full name when appropriate, for example changing "Aleksandar Petrović" to "Aleksandar 'Aco' Petrović" and minor changes such as "Mohammad Ali" to "Muhammad Ali"
      • +
      • The 'Description` text was automatically added when the Knowledge Graph score was greater than 250
      • +
      +

      TODO

      +
        +
      • create name list, and populate with Knowledge graph information like LFW
      • +
      • make list of interesting number stats, by the numbers
      • +
      • make list of interesting important facts
      • +
      • write intro abstract
      • +
      • write analysis of usage
      • +
      • find examples, citations, and screenshots of useage
      • +
      • find list of companies using it for table
      • +
      • create montages of the dataset, like LFW
      • +
      • create right to removal information
      • +
      +
      + +
      + + + + + \ No newline at end of file diff --git a/site/public/index.html b/site/public/index.html index 51006b59..91ff467a 100644 --- a/site/public/index.html +++ b/site/public/index.html @@ -71,6 +71,7 @@ + diff --git a/site/public/research/00_introduction/index.html b/site/public/research/00_introduction/index.html new file mode 100644 index 00000000..8f598f5b --- /dev/null +++ b/site/public/research/00_introduction/index.html @@ -0,0 +1,86 @@ + + + + MegaPixels + + + + + + + + + +
      + + +
      MegaPixels
      + The Darkside of Datasets +
      + +
      +
      + +
      +

      Untitled Page

      +
      +
      +
      Posted
      +
      2018-12-31
      +
      +
      +
      By
      +
      Adam Harvey
      +
      + +
      +
      + +

      It was the early 2000s. Face recognition was new and no one seemed sure exactly how well it was going to perform in practice. In theory, face recognition was poised to be a game changer, a force multiplier, a strategic military advantage, a way to make cities safer and to secure borders. This was the future John Ashcroft demanded with the Total Information Awareness act of the 2003 and that spooks had dreamed of for decades. It was a future that academics at Carnegie Mellon Universtiy and Colorado State University would help build. It was also a future that celebrities would play a significant role in building. And to the surprise of ordinary Internet users like myself and perhaps you, it was a future that millions of Internet users would unwittingly play role in creating.

      +

      Now the future has arrived and it doesn't make sense. Facial recognition works yet it doesn't actually work. Facial recognition is cheap and accessible but also expensive and out of control. Facial recognition research has achieved headline grabbing superhuman accuracies over 99.9% yet facial recognition is also dangerously inaccurate. During a trial installation at Sudkreuz station in Berlin in 2018, 20% of the matches were wrong, a number so low that it should not have any connection to law enforcement or justice. And in London, the Metropolitan police had been using facial recognition software that mistakenly identified an alarming 98% of people as criminals 1, which perhaps is a crime itself.

      +

      MegaPixels is an online art project that explores the history of facial recognition from the perspective of datasets. To paraphrase the artist Trevor Paglen, whoever controls the dataset controls the meaning. MegaPixels aims to unravel the meanings behind the data and expose the darker corners of the biometric industry that have contributed to its growth. MegaPixels does not start with a conclusion, a moralistic slant, or a

      +

      Whether or not to build facial recognition was a question that can no longer be asked. As an outspoken critic of face recognition I've developed, and hopefully furthered, my understanding during the last 10 years I've spent working with computer vision. Though I initially disagreed, I've come to see technocratic perspective as a non-negotiable reality. As Oren (nytimes article) wrote in NYT Op-Ed "the horse is out of the barn" and the only thing we can do collectively or individually is to steer towards the least worse outcome. Computational communication has entered a new era and it's both exciting and frightening to explore the potentials and opportunities. In 1997 getting access to 1 teraFLOPS of computational power would have cost you $55 million and required a strategic partnership with the Department of Defense. At the time of writing, anyone can rent 1 teraFLOPS on a cloud GPU marketplace for less than $1/day. 2.

      +

      I hope that this project will illuminate the darker areas of strange world of facial recognition that have not yet received attention and encourage discourse in academic, industry, and . By no means do I believe discourse can save the day. Nor do I think creating artwork can. In fact, I'm not exactly sure what the outcome of this project will be. The project is not so much what I publish here but what happens after. This entire project is only a prologue.

      +

      As McLuhan wrote, "You can't have a static, fixed position in the electric age". And in our hyper-connected age of mass surveillance, artificial intelligece, and unevenly distributed virtual futures the most irrational thing to be is rational. Increasingly the world is becoming a contradiction where people use surveillance to protest surveillance, use

      +

      Like many projects, MegaPixels had spent years meandering between formats, unfeasible budgets, and was generally too niche of a subject. The basic idea for this project, as proposed to the original Glass Room installation in 2016 in NYC, was to build an interactive mirror that showed people if they had been included in the LFW facial recognition dataset. The idea was based on my reaction to all the datasets I'd come across during research for the CV Dazzle project. I'd noticed strange datasets created for training and testing face detection algorithms. Most were created in labratory settings and their interpretation of face data was very strict.

      +

      About the name

      +

      About the funding

      +

      About me

      +

      About the team

      +

      Conclusion

      +

      for other post

      +

      It was the early 2000s. Face recognition was new and no one seemed sure how well it was going to perform in practice. In theory, face recognition was poised to be a game changer, a force multiplier, a strategic military advantage, a way to make cities safer and to secure the borders. It was the future that John Ashcroft demanded with the Total Information Awareness act of the 2003. It was a future that academics helped build. It was a future that celebrities helped build. And it was a future that

      +

      A decade earlier the Department of Homeland Security and the Counterdrug Technology Development Program Office initated a feasibilty study called FERET (FacE REcognition Technology) to "develop automatic face recognition capabilities that could be employed to assist security, intelligence, and law enforcement personnel in the performance of their duties [^feret_website]."

      +

      One problem with FERET dataset was that the photos were in controlled settings. For face recognition to work it would have to be used in uncontrolled settings. Even newer datasets such as the Multi-PIE (Pose, Illumination, and Expression) from Carnegie Mellon University included only indoor photos of cooperative subjects. Not only were the photos completely unrealistic, CMU's Multi-Pie included only 18 individuals and cost $500 for academic use [^cmu_multipie_cost], took years to create, and required consent from every participant.

      +
      +
      +
      1. Sharman, Jon. "Metropolitan Police's facial recognition technology 98% inaccurate, figures show". 2018. https://www.independent.co.uk/news/uk/home-news/met-police-facial-recognition-success-south-wales-trial-home-office-false-positive-a8345036.html

      2. +
      3. Calle, Dan. "Supercomptuers". 1997. http://ei.cs.vt.edu/~history/SUPERCOM.Calle.HTML

      4. +
      +
      +
      + +
      + + + + + \ No newline at end of file diff --git a/site/public/research/index.html b/site/public/research/index.html index cf9546e1..59a5fee9 100644 --- a/site/public/research/index.html +++ b/site/public/research/index.html @@ -28,7 +28,7 @@

      Research Blog

      The darkside of datasets and the future of computer vision

      -
      +
      -- cgit v1.2.3-70-g09d2
      Title