diff options
| author | adamhrv <adam@ahprojects.com> | 2019-02-28 18:50:22 +0100 |
|---|---|---|
| committer | adamhrv <adam@ahprojects.com> | 2019-02-28 18:50:22 +0100 |
| commit | 6c631c88c9ecc2683b95534cfd15e82650c1b501 (patch) | |
| tree | 786d993a57c8c4d6fba26cad5fbda056c346c418 | |
| parent | 9e3bb35630349847bc005389c408f3072e0e22db (diff) | |
| parent | e845766d970f4afefc2fc47367c3478413f98ff2 (diff) | |
Merge branch 'master' of github.com:adamhrv/megapixels_dev
90 files changed, 1649 insertions, 667 deletions
@@ -19,7 +19,7 @@ pip install numpy Pillow pip install dlib pip install requests simplejson click pdfminer.six pip install urllib3 flask flask_sqlalchemy mysql-connector -pip install pymediainfo tqdm opencv-python imutils +pip install pymediainfo tqdm opencv-python imutils watchdog pip install scikit-image python-dotenv imagehash scikit-learn colorlog pip install celery keras tensorflow pip install python.app # OSX only! needed for matplotlib @@ -38,28 +38,40 @@ You may need to set the database charset to `utf8mb4` in order to import the CSV ALTER DATABASE megapixels CHARACTER SET = utf8mb4 COLLATE = utf8mb4_unicode_ci; ``` -## Building the site +## Development: automatic rebuilds -The most recently built copy of the site is kept in the repo. This is generated directly from NextCloud. Be mindful that NextCloud will create extra copies of things if there are merge conflicts. +In development, we can watch a bunch of things and rebuild stuff automatically. These rebuild the HTML and the Javascript: ``` -npm install -npm run build -cd megapixels -python cli_faiss.py sync_metadata -python cli_faiss.py build_faiss -python cli_faiss.py build_db -python cli_site.py build +python cli_site.py watch +npm run watch ``` -## Running the site +## Running the various servers -On OSX, you must run the server with `pythonw` because of matplotlib. +Run the web server, which will serve some HTML (you will need to add index.html to URLs... alas!): ``` python cli_flask.py run +``` + +These servers must be running to use all features of the site (face search, etc.) + +``` python `which celery` worker -A app.server.tasks --loglevel=info -E redis-server /usr/local/etc/redis.conf -npm run watch ``` +Note: On OSX, you must run the server with `pythonw` because of matplotlib. + +## Building the site for production + +``` +npm install +npm run build +cd megapixels +python cli_faiss.py sync_metadata +python cli_faiss.py build_faiss +python cli_faiss.py build_db +python cli_site.py build +``` diff --git a/client/index.js b/client/index.js index c9335f14..37906f30 100644 --- a/client/index.js +++ b/client/index.js @@ -110,9 +110,9 @@ function runApplets() { function main() { const paras = document.querySelectorAll('section p') - if (paras.length) { - paras[0].classList.add('first_paragraph') - } + // if (paras.length) { + // paras[0].classList.add('first_paragraph') + // } toArray(document.querySelectorAll('header .links a')).forEach(tag => { if (window.location.href.match(tag.href)) { tag.classList.add('active') diff --git a/client/map/index.js b/client/map/index.js index 2a6686be..56a5abed 100644 --- a/client/map/index.js +++ b/client/map/index.js @@ -2,25 +2,25 @@ import L from 'leaflet' import './leaflet.bezier' const arcStyles = { - 'edu': { + edu: { color: 'rgb(245, 246, 150)', fillColor: 'rgb(245, 246, 150)', opacity: 0.5, weight: '1', }, - 'company': { + company: { color: 'rgb(50, 100, 246)', fillColor: 'rgb(50, 100, 246)', opacity: 1.0, weight: '2', }, - 'gov': { + gov: { color: 'rgb(245, 150, 100)', fillColor: 'rgb(245, 150, 150)', opacity: 1.0, weight: '2', }, - 'mil': { + mil: { color: 'rgb(245, 0, 0)', fillColor: 'rgb(245, 0, 0)', opacity: 1.0, @@ -78,17 +78,44 @@ export default function append(el, payload) { source = [address.lat, address.lng].map(n => parseFloat(n)) } - citations.sort((a,b) => sortOrder.indexOf(a) - sortOrder.indexOf(b)) - .forEach(citation => { - const address = citation.addresses[0] - const latlng = [address.lat, address.lng].map(n => parseFloat(n)) - if (Number.isNaN(latlng[0]) || Number.isNaN(latlng[1])) return - addMarker(map, latlng, citation.title, address.name) - addArc(map, source, latlng, arcStyles[address.type]) - }) + // ....i dont think the sort order does anything?? + citations.sort((a, b) => sortOrder.indexOf(a) - sortOrder.indexOf(b)) + .forEach(citation => { + const citationAddress = citation.addresses[0] + const latlng = [citationAddress.lat, citationAddress.lng].map(n => parseFloat(n)) + if (Number.isNaN(latlng[0]) || Number.isNaN(latlng[1])) return + addMarker(map, latlng, citation.title, citationAddress.name) + addArc(map, source, latlng, arcStyles[citationAddress.type]) + }) console.log(paper) const rootMarker = addMarker(map, source, paper.title, paper.address) rootMarker.openPopup() + + // a transparent div to cover the map, so normal scroll events will not be eaten by leaflet + const mapCover = document.createElement("div") + mapCover.classList.add("map_cover") + mapCover.innerHTML = "<div class='cover_message'>Click here to explore the map</div>" + mapCover.querySelector('div').addEventListener('click', () => { + map.scrollWheelZoom.enable() + el.removeChild(mapCover) + }) + function stopPropagation(e) { + e.stopPropagation() + } + mapCover.addEventListener('mousewheel', stopPropagation, true) + mapCover.addEventListener('DOMMouseScroll', stopPropagation, true) + + map.scrollWheelZoom.disable() + map.on('focus', () => { + map.scrollWheelZoom.enable() + el.removeChild(mapCover) + }) + map.on('blur', () => { + map.scrollWheelZoom.disable() + // el.appendChild(mapCover) + }) + + el.appendChild(mapCover) } diff --git a/client/splash/index.js b/client/splash/index.js index e247b7f5..a21110f0 100644 --- a/client/splash/index.js +++ b/client/splash/index.js @@ -31,12 +31,14 @@ function build() { function bind() { document.querySelector('.slogan').addEventListener('click', modal.close) - toArray(document.querySelectorAll('.aboutLink')).forEach(el => { - el.addEventListener('click', modal.toggle) - }) - document.querySelector('.about .inner').addEventListener('click', e => e.stopPropagation()) - document.querySelector('.about').addEventListener('click', modal.close) - document.querySelector('.close').addEventListener('click', modal.close) + if (document.querySelector('.about')) { + toArray(document.querySelectorAll('.aboutLink')).forEach(el => { + el.addEventListener('click', modal.toggle) + }) + document.querySelector('.about .inner').addEventListener('click', e => e.stopPropagation()) + document.querySelector('.about').addEventListener('click', modal.close) + document.querySelector('.close').addEventListener('click', modal.close) + } } function animate() { diff --git a/client/tables.js b/client/tables.js index 70ab5971..3fadb797 100644 --- a/client/tables.js +++ b/client/tables.js @@ -65,11 +65,13 @@ export default function append(el, payload) { .then(r => r.text()) .then(text => { try { + console.log(text) const data = csv.toJSON(text, { headers: { included: true } }) // console.log(data) table.setData(data) el.classList.add('loaded') } catch (e) { + console.error("error making json:", payload.url) console.error(e) // console.log(text) diff --git a/client/util/index.js b/client/util/index.js index d0db0d98..0792e24e 100644 --- a/client/util/index.js +++ b/client/util/index.js @@ -5,12 +5,16 @@ export const isiPad = !!(navigator.userAgent.match(/iPad/i)) export const isAndroid = !!(navigator.userAgent.match(/Android/i)) export const isMobile = isiPhone || isiPad || isAndroid export const isDesktop = !isMobile +export const isFirefox = typeof InstallTrigger !== 'undefined' export const toArray = a => Array.prototype.slice.apply(a) export const choice = a => a[Math.floor(Math.random() * a.length)] const htmlClassList = document.body.parentNode.classList htmlClassList.add(isDesktop ? 'desktop' : 'mobile') +if (isFirefox) { + htmlClassList.add('firefox') +} /* Default image dimensions */ diff --git a/megapixels/app/server/create.py b/megapixels/app/server/create.py index a1ce56df..b6ca7ba3 100644 --- a/megapixels/app/server/create.py +++ b/megapixels/app/server/create.py @@ -1,3 +1,4 @@ +import os import logging import logging.handlers @@ -13,7 +14,7 @@ logging.getLogger().addHandler(logging.StreamHandler()) logging.debug("starting app") -from flask import Flask, Blueprint, jsonify, send_from_directory +from flask import Flask, Blueprint, jsonify, send_from_directory, request from flask_sqlalchemy import SQLAlchemy from app.models.sql_factory import connection_url, load_sql_datasets @@ -38,7 +39,14 @@ def create_app(script_info=None): app.register_blueprint(api, url_prefix='/api') app.register_blueprint(api_task, url_prefix='/task') - app.add_url_rule('/<path:file_relative_path_to_root>', 'serve_page', serve_page, methods=['GET']) + + @app.errorhandler(404) + def page_not_found(e): + path = os.path.join(os.path.dirname(__file__), './static', request.path[1:], 'index.html') + if os.path.exists(path): + with open(path, "r") as f: + return f.read(), 200 + return "404!!!!!!!!!!!!1", 404 @app.route('/', methods=['GET']) def index(): @@ -48,22 +56,4 @@ def create_app(script_info=None): def shell_context(): return { 'app': app, 'db': db } - @app.route("/site-map") - def site_map(): - links = [] - for rule in app.url_map.iter_rules(): - # url = url_for(rule.endpoint, **(rule.defaults or {})) - # print(url) - links.append((rule.endpoint)) - return(jsonify(links)) - return app - -def serve_page(file_relative_path_to_root): - """ - trying to get this to serve /path/ with /path/index.html, - ...but it doesnt actually matter for production... - """ - if file_relative_path_to_root[-1] == '/': - file_relative_path_to_root += 'index.html' - return send_from_directory("static", file_relative_path_to_root) diff --git a/megapixels/app/settings/app_cfg.py b/megapixels/app/settings/app_cfg.py index 0b1fb69d..40625958 100644 --- a/megapixels/app/settings/app_cfg.py +++ b/megapixels/app/settings/app_cfg.py @@ -163,7 +163,7 @@ S3_HTTP_METADATA_URL = join(S3_HTTP_URL, 'metadata') S3_SITE_PATH = "v1/site" S3_DATASETS_PATH = "v1" # datasets is already in the filename DIR_SITE_PUBLIC = "../site/public" -DIR_SITE_CONTENT = "../site/content" +DIR_SITE_CONTENT = "../site/content/pages" DIR_SITE_TEMPLATES = "../site/templates" DIR_SITE_USER_CONTENT = "../site/public/user_content" diff --git a/megapixels/app/site/builder.py b/megapixels/app/site/builder.py index fac49c24..603d4788 100644 --- a/megapixels/app/site/builder.py +++ b/megapixels/app/site/builder.py @@ -7,6 +7,7 @@ from jinja2 import Environment, FileSystemLoader, select_autoescape import app.settings.app_cfg as cfg import app.site.s3 as s3 +import app.site.loader as loader import app.site.parser as parser env = Environment( @@ -21,7 +22,7 @@ def build_page(fn, research_posts, datasets): - syncs any assets with s3 - handles certain index pages... """ - metadata, sections = parser.read_metadata(fn) + metadata, sections = loader.read_metadata(fn) if metadata is None: print("{} has no metadata".format(fn)) @@ -55,7 +56,7 @@ def build_page(fn, research_posts, datasets): if 'index.md' in fn: s3.sync_directory(dirname, s3_dir, metadata) - content = parser.parse_markdown(sections, s3_path, skip_h1=skip_h1) + content = parser.parse_markdown(metadata, sections, s3_path, skip_h1=skip_h1) html = template.render( metadata=metadata, @@ -73,10 +74,10 @@ def build_index(key, research_posts, datasets): """ build the index of research (blog) posts """ - metadata, sections = parser.read_metadata('../site/content/{}/index.md'.format(key)) + metadata, sections = loader.read_metadata(os.path.join(cfg.DIR_SITE_CONTENT, key, 'index.md')) template = env.get_template("page.html") s3_path = s3.make_s3_path(cfg.S3_SITE_PATH, metadata['path']) - content = parser.parse_markdown(sections, s3_path, skip_h1=False) + content = parser.parse_markdown(metadata, sections, s3_path, skip_h1=False) content += parser.parse_research_index(research_posts) html = template.render( metadata=metadata, @@ -93,8 +94,8 @@ def build_site(): """ build the site! =^) """ - research_posts = parser.read_research_post_index() - datasets = parser.read_datasets_index() + research_posts = loader.read_research_post_index() + datasets = loader.read_datasets_index() for fn in glob.iglob(os.path.join(cfg.DIR_SITE_CONTENT, "**/*.md"), recursive=True): build_page(fn, research_posts, datasets) build_index('research', research_posts, datasets) @@ -103,7 +104,8 @@ def build_file(fn): """ build just one page from a filename! =^) """ - research_posts = parser.read_research_post_index() - datasets = parser.read_datasets_index() - fn = os.path.join(cfg.DIR_SITE_CONTENT, fn) + research_posts = loader.read_research_post_index() + datasets = loader.read_datasets_index() + if cfg.DIR_SITE_CONTENT not in fn: + fn = os.path.join(cfg.DIR_SITE_CONTENT, fn) build_page(fn, research_posts, datasets) diff --git a/megapixels/app/site/loader.py b/megapixels/app/site/loader.py new file mode 100644 index 00000000..691efb25 --- /dev/null +++ b/megapixels/app/site/loader.py @@ -0,0 +1,123 @@ +import os +import re +import glob +import simplejson as json + +import app.settings.app_cfg as cfg + +def read_metadata(fn): + """ + Read in read a markdown file and extract the metadata + """ + with open(fn, "r") as file: + data = file.read() + data = data.replace("\n ", "\n") + if "\n" in data: + data = data.replace("\r", "") + else: + data = data.replace("\r", "\n") + sections = data.split("\n\n") + return parse_metadata(fn, sections) + + +default_metadata = { + 'status': 'published', + 'title': 'Untitled Page', + 'desc': '', + 'slug': '', + 'published': '2018-12-31', + 'updated': '2018-12-31', + 'authors': 'Adam Harvey', + 'sync': 'true', + 'tagline': '', +} + +def parse_metadata(fn, sections): + """ + parse the metadata headers in a markdown file + (everything before the second ---------) + also generates appropriate urls for this page :) + """ + found_meta = False + metadata = {} + valid_sections = [] + for section in sections: + if not found_meta and ': ' in section: + found_meta = True + parse_metadata_section(metadata, section) + continue + if '-----' in section: + continue + if found_meta: + valid_sections.append(section) + + if 'title' not in metadata: + print('warning: {} has no title'.format(fn)) + for key in default_metadata: + if key not in metadata: + metadata[key] = default_metadata[key] + + basedir = os.path.dirname(fn.replace(cfg.DIR_SITE_CONTENT, '')) + basename = os.path.basename(fn) + if basedir == '/': + metadata['path'] = '/' + metadata['url'] = '/' + elif basename == 'index.md': + metadata['path'] = basedir + '/' + metadata['url'] = metadata['path'] + else: + metadata['path'] = basedir + '/' + metadata['url'] = metadata['path'] + basename.replace('.md', '') + '/' + + if metadata['status'] == 'published|draft|private': + metadata['status'] = 'published' + + metadata['sync'] = metadata['sync'] != 'false' + + metadata['author_html'] = '<br>'.join(metadata['authors'].split(',')) + + return metadata, valid_sections + +def parse_metadata_section(metadata, section): + """ + parse a metadata key: value pair + """ + for line in section.split("\n"): + if ': ' not in line: + continue + key, value = line.split(': ', 1) + metadata[key.lower()] = value + + +def read_research_post_index(): + """ + Generate an index of the research (blog) posts + """ + return read_post_index('research') + + +def read_datasets_index(): + """ + Generate an index of the datasets + """ + return read_post_index('datasets') + + +def read_post_index(basedir): + """ + Generate an index of posts + """ + posts = [] + for fn in sorted(glob.glob(os.path.join(cfg.DIR_SITE_CONTENT, basedir, '*/index.md'))): + metadata, valid_sections = read_metadata(fn) + if metadata is None or metadata['status'] == 'private' or metadata['status'] == 'draft': + continue + posts.append(metadata) + if not len(posts): + posts.append({ + 'title': 'Placeholder', + 'slug': 'placeholder', + 'date': 'Placeholder', + 'url': '/', + }) + return posts diff --git a/megapixels/app/site/parser.py b/megapixels/app/site/parser.py index f739315a..ad4256ad 100644 --- a/megapixels/app/site/parser.py +++ b/megapixels/app/site/parser.py @@ -10,9 +10,141 @@ import app.site.s3 as s3 renderer = mistune.Renderer(escape=False) markdown = mistune.Markdown(renderer=renderer) +footnote_count = 0 + +def parse_markdown(metadata, sections, s3_path, skip_h1=False): + """ + parse page into sections, preprocess the markdown to handle our modifications + """ + groups = [] + current_group = [] + footnotes = [] + in_stats = False + in_footnotes = False + ignoring = False + + if 'desc' in metadata and 'subdesc' in metadata: + groups.append(intro_section(metadata, s3_path)) + + for section in sections: + if skip_h1 and section.startswith('# '): + continue + elif section.strip().startswith('---'): + continue + elif section.lower().strip().startswith('ignore text'): + ignoring = True + continue + elif section.strip().startswith('### Footnotes'): + groups.append(format_section(current_group, s3_path)) + current_group = [] + footnotes = [] + in_footnotes = True + elif in_footnotes: + footnotes.append(section) + elif ignoring: + continue + elif '### statistics' in section.lower() or '### sidebar' in section.lower(): + if len(current_group): + groups.append(format_section(current_group, s3_path)) + current_group = [] + if 'sidebar' not in section.lower(): + current_group.append(section) + in_stats = True + elif in_stats and not section.strip().startswith('## ') and 'end sidebar' not in section.lower(): + current_group.append(section) + elif in_stats and section.strip().startswith('## ') or 'end sidebar' in section.lower(): + current_group = [format_section(current_group, s3_path, 'right-sidebar', tag='div')] + if 'end sidebar' not in section.lower(): + current_group.append(section) + in_stats = False + elif section.strip().startswith('```'): + groups.append(format_section(current_group, s3_path)) + current_group = [] + current_group.append(section) + if section.strip().endswith('```'): + groups.append(format_applet("\n\n".join(current_group), s3_path)) + current_group = [] + elif section.strip().endswith('```'): + current_group.append(section) + groups.append(format_applet("\n\n".join(current_group), s3_path)) + current_group = [] + elif section.startswith('+ '): + groups.append(format_section(current_group, s3_path)) + groups.append('<section>' + format_metadata(section) + '<section>') + current_group = [] + elif '![fullwidth:' in section: + groups.append(format_section(current_group, s3_path)) + groups.append(format_section([section], s3_path, type='fullwidth')) + current_group = [] + elif '![wide:' in section: + groups.append(format_section(current_group, s3_path)) + groups.append(format_section([section], s3_path, type='wide')) + current_group = [] + elif '![' in section: + groups.append(format_section(current_group, s3_path)) + groups.append(format_section([section], s3_path, type='images')) + current_group = [] + else: + current_group.append(section) + groups.append(format_section(current_group, s3_path)) + + footnote_txt = '' + footnote_lookup = {} + + if len(footnotes): + footnote_txt, footnote_lookup = format_footnotes(footnotes, s3_path) + + content = "".join(groups) + + if footnote_lookup: + for key, index in footnote_lookup.items(): + global footnote_count + footnote_count = 0 + letters = "abcdefghijklmnopqrstuvwxyz" + footnote_backlinks = [] + def footnote_tag(match): + global footnote_count + footnote_count += 1 + footnote_backlinks.append('<a href="#{}_{}">{}</a>'.format(key, footnote_count, letters[footnote_count-1])) + return '<a class="footnote_shim" name="{}_{}"> </a><a href="#{}" class="footnote" title="Footnote {}">{}</a>'.format(key, footnote_count, key, index, index) + key_regex = re.compile(key.replace('[', '\\[').replace('^', '\\^').replace(']', '\\]')) + content = key_regex.sub(footnote_tag, content) + footnote_txt = footnote_txt.replace("{}_BACKLINKS".format(index), "".join(footnote_backlinks)) + content += footnote_txt + return content + + +def intro_section(metadata, s3_path): + """ + Build the intro section for datasets + """ + + section = "<section class='intro_section' style='background-image: url({})'>".format(s3_path + metadata['image']) + section += "<div class='inner'>" + + parts = [] + if 'desc' in metadata: + desc = metadata['desc'] + if 'color' in metadata and metadata['title'] in desc: + desc = desc.replace(metadata['title'], "<span style='color: {}'>{}</span>".format(metadata['color'], metadata['title'])) + section += "<div class='hero_desc'><span>{}</span></div>".format(desc, desc) + + if 'subdesc' in metadata: + subdesc = markdown(metadata['subdesc']).replace('<p>', '').replace('</p>', '') + section += "<div class='hero_subdesc'><span>{}</span></div>".format(subdesc, subdesc) + + section += "</div>" + section += "</section>" + + if 'caption' in metadata: + section += "<section><div class='image'><div class='caption'>{}</div></div></section>".format(metadata['caption']) + + return section + + def fix_images(lines, s3_path): """ - do our own tranformation of the markdown around images to handle wide images etc + do our own transformation of the markdown around images to handle wide images etc lines: markdown lines """ real_lines = [] @@ -22,48 +154,89 @@ def fix_images(lines, s3_path): line = line.replace(' url, tail = tail.split(')', 1) + tag = '' if ':' in alt_text: - tail, alt_text = alt_text.split(':', 1) + tag, alt_text = alt_text.split(':', 1) img_tag = "<img src='{}' alt='{}'>".format(s3_path + url, alt_text.replace("'", "")) - if len(alt_text): + if 'sideimage' in tag: + line = "<div class='sideimage'>{}<div>{}</div></div>".format(img_tag, markdown(tail)) + elif len(alt_text): line = "<div class='image'>{}<div class='caption'>{}</div></div>".format(img_tag, alt_text) else: line = "<div class='image'>{}</div>".format(img_tag, alt_text) real_lines.append(line) return "\n".join(real_lines) -def format_section(lines, s3_path, type=''): + +def format_section(lines, s3_path, type='', tag='section'): """ format a normal markdown section """ if len(lines): + lines = fix_meta(lines) lines = fix_images(lines, s3_path) if type: - return "<section class='{}'>{}</section>".format(type, markdown(lines)) + return "<{} class='{}'>{}</{}>".format(tag, type, markdown(lines), tag) else: - return "<section>" + markdown(lines) + "</section>" + return "<{}>{}</{}>".format(tag, markdown(lines), tag) return "" +def fix_meta(lines): + """ + Format metadata sections before passing to markdown + """ + new_lines = [] + for line in lines: + if line.startswith('+ '): + line = format_metadata(line) + new_lines.append(line) + return new_lines + def format_metadata(section): """ format a metadata section (+ key: value pairs) """ meta = [] for line in section.split('\n'): + if ': ' not in line: + continue key, value = line[2:].split(': ', 1) meta.append("<div><div class='gray'>{}</div><div>{}</div></div>".format(key, value)) - return "<section><div class='meta'>{}</div></section>".format(''.join(meta)) + return "<div class='meta'>{}</div>".format(''.join(meta)) + +def format_footnotes(footnotes, s3_path): + """ + Format the footnotes section separately and produce a lookup we can use to update the main site + """ + footnotes = '\n'.join(footnotes).split('\n') + index = 1 + footnote_index_lookup = {} + footnote_list = [] + for footnote in footnotes: + if not len(footnote) or '[^' not in footnote: + continue + key, note = footnote.split(': ', 1) + footnote_index_lookup[key] = index + footnote_list.append('<a name="{}" class="footnote_shim"></a><span class="backlinks">{}_BACKLINKS</span>'.format(key, index) + markdown(note)) + index += 1 + + footnote_txt = '<section><ul class="footnotes"><li>' + '</li><li>'.join(footnote_list) + '</li></ul></section>' + return footnote_txt, footnote_index_lookup def format_applet(section, s3_path): + """ + Format the applets, which load javascript modules like the map and CSVs + """ # print(section) payload = section.strip('```').strip().strip('```').strip().split('\n') applet = {} - print(payload) + # print(payload) if ': ' in payload[0]: - command, opt = payload[0].split(': ') + command, opt = payload[0].split(': ', 1) else: command = payload[0] opt = None + print(command) if command == 'python' or command == 'javascript' or command == 'code': return format_section([ section ], s3_path) if command == '': @@ -79,47 +252,6 @@ def format_applet(section, s3_path): applet['fields'] = payload[1:] return "<section class='applet_container'><div class='applet' data-payload='{}'></div></section>".format(json.dumps(applet)) -def parse_markdown(sections, s3_path, skip_h1=False): - """ - parse page into sections, preprocess the markdown to handle our modifications - """ - groups = [] - current_group = [] - for section in sections: - if skip_h1 and section.startswith('# '): - continue - elif section.strip().startswith('```'): - groups.append(format_section(current_group, s3_path)) - current_group = [] - current_group.append(section) - if section.strip().endswith('```'): - groups.append(format_applet("\n\n".join(current_group), s3_path)) - current_group = [] - elif section.strip().endswith('```'): - current_group.append(section) - groups.append(format_applet("\n\n".join(current_group), s3_path)) - current_group = [] - elif section.startswith('+ '): - groups.append(format_section(current_group, s3_path)) - groups.append(format_metadata(section)) - current_group = [] - elif '![fullwidth:' in section: - groups.append(format_section(current_group, s3_path)) - groups.append(format_section([section], s3_path, type='fullwidth')) - current_group = [] - elif '![wide:' in section: - groups.append(format_section(current_group, s3_path)) - groups.append(format_section([section], s3_path, type='wide')) - current_group = [] - elif '![' in section: - groups.append(format_section(current_group, s3_path)) - groups.append(format_section([section], s3_path, type='images')) - current_group = [] - else: - current_group.append(section) - groups.append(format_section(current_group, s3_path)) - content = "".join(groups) - return content def parse_research_index(research_posts): """ @@ -127,6 +259,7 @@ def parse_research_index(research_posts): """ content = "<div class='research_index'>" for post in research_posts: + print(post) s3_path = s3.make_s3_path(cfg.S3_SITE_PATH, post['path']) if 'image' in post: post_image = s3_path + post['image'] @@ -140,117 +273,3 @@ def parse_research_index(research_posts): content += row content += '</div>' return content - -def read_metadata(fn): - """ - Read in read a markdown file and extract the metadata - """ - with open(fn, "r") as file: - data = file.read() - data = data.replace("\n ", "\n") - if "\n" in data: - data = data.replace("\r", "") - else: - data = data.replace("\r", "\n") - sections = data.split("\n\n") - return parse_metadata(fn, sections) - -default_metadata = { - 'status': 'published', - 'title': 'Untitled Page', - 'desc': '', - 'slug': '', - 'published': '2018-12-31', - 'updated': '2018-12-31', - 'authors': 'Adam Harvey', - 'sync': 'true', - 'tagline': '', -} - -def parse_metadata_section(metadata, section): - """ - parse a metadata key: value pair - """ - for line in section.split("\n"): - if ': ' not in line: - continue - key, value = line.split(': ', 1) - metadata[key.lower()] = value - -def parse_metadata(fn, sections): - """ - parse the metadata headers in a markdown file - (everything before the second ---------) - also generates appropriate urls for this page :) - """ - found_meta = False - metadata = {} - valid_sections = [] - for section in sections: - if not found_meta and ': ' in section: - found_meta = True - parse_metadata_section(metadata, section) - continue - if '-----' in section: - continue - if found_meta: - valid_sections.append(section) - - if 'title' not in metadata: - print('warning: {} has no title'.format(fn)) - for key in default_metadata: - if key not in metadata: - metadata[key] = default_metadata[key] - - basedir = os.path.dirname(fn.replace(cfg.DIR_SITE_CONTENT, '')) - basename = os.path.basename(fn) - if basedir == '/': - metadata['path'] = '/' - metadata['url'] = '/' - elif basename == 'index.md': - metadata['path'] = basedir + '/' - metadata['url'] = metadata['path'] - else: - metadata['path'] = basedir + '/' - metadata['url'] = metadata['path'] + basename.replace('.md', '') + '/' - - if metadata['status'] == 'published|draft|private': - metadata['status'] = 'published' - - metadata['sync'] = metadata['sync'] != 'false' - - metadata['author_html'] = '<br>'.join(metadata['authors'].split(',')) - - return metadata, valid_sections - -def read_research_post_index(): - """ - Generate an index of the research (blog) posts - """ - return read_post_index('research') - -def read_datasets_index(): - """ - Generate an index of the datasets - """ - return read_post_index('datasets') - -def read_post_index(basedir): - """ - Generate an index of posts - """ - posts = [] - for fn in sorted(glob.glob('../site/content/{}/*/index.md'.format(basedir))): - metadata, valid_sections = read_metadata(fn) - if metadata is None or metadata['status'] == 'private' or metadata['status'] == 'draft': - continue - posts.append(metadata) - if not len(posts): - posts.append({ - 'title': 'Placeholder', - 'slug': 'placeholder', - 'date': 'Placeholder', - 'url': '/', - }) - return posts - diff --git a/megapixels/commands/site/watch.py b/megapixels/commands/site/watch.py new file mode 100644 index 00000000..7bd71038 --- /dev/null +++ b/megapixels/commands/site/watch.py @@ -0,0 +1,46 @@ +""" +Watch for changes in the static site and build them +""" + +import click +import time +from watchdog.observers import Observer +from watchdog.events import PatternMatchingEventHandler + +import app.settings.app_cfg as cfg +from app.site.builder import build_site, build_file + +class SiteBuilder(PatternMatchingEventHandler): + """ + Handler for filesystem changes to the content path + """ + patterns = ["*.md"] + + def on_modified(self, event): + print(event.src_path, event.event_type) + build_file(event.src_path) + + def on_created(self, event): + print(event.src_path, event.event_type) + build_file(event.src_path) + +@click.command() +@click.pass_context +def cli(ctx): + """ + Run the observer and start watching for changes + """ + print("{} is now being watched for changes.".format(cfg.DIR_SITE_CONTENT)) + observer = Observer() + observer.schedule(SiteBuilder(), path=cfg.DIR_SITE_CONTENT, recursive=True) + observer.start() + + build_file(cfg.DIR_SITE_CONTENT + "/datasets/lfw/index.md") + + try: + while True: + time.sleep(1) + except KeyboardInterrupt: + observer.stop() + + observer.join() diff --git a/site/assets/css/applets.css b/site/assets/css/applets.css index aa9ce47f..e84fcfc2 100644 --- a/site/assets/css/applets.css +++ b/site/assets/css/applets.css @@ -140,9 +140,34 @@ .map { margin-bottom: 20px; } +.map_cover { + position: absolute; + top: 0; + left: 0; + width: 100%; + height: 100%; + cursor: pointer; + background: rgba(0,0,0,0.8); + z-index: 9998; /* site header is 9999 */ + display: flex; + justify-content: center; + align-items: center; + font-size: 36px; + transition: opacity 0.4s cubic-bezier(0,0,1,1); + opacity: 1; +} +.desktop .map_cover { + opacity: 0; +} +.desktop .map_cover:hover { + opacity: 1; +} /* tabulator */ +.tabulator { + font-family: 'Roboto', sans-serif; +} .tabulator-row { transition: background-color 100ms cubic-bezier(0,0,1,1); background-color: rgba(255,255,255,0.0); diff --git a/site/assets/css/css.css b/site/assets/css/css.css index 7544fd9d..ab3bb4a7 100644 --- a/site/assets/css/css.css +++ b/site/assets/css/css.css @@ -1,22 +1,23 @@ -* { box-sizing: border-box; outline: 0; } +da* { box-sizing: border-box; outline: 0; } html, body { margin: 0; padding: 0; width: 100%; min-height: 100%; - font-family: 'Roboto', sans-serif; - color: #b8b8b8; + font-family: 'Roboto Mono', sans-serif; + color: #eee; overflow-x: hidden; } html { - background: #191919; + background: #181818; } .content { opacity: 0; transition: opacity 0.2s cubic-bezier(0,1,1,1); } -html.desktop .content, html.mobile .content { +html.desktop .content, +html.mobile .content { opacity: 1; } @@ -28,7 +29,7 @@ header { left: 0; width: 100%; height: 70px; - z-index: 2; + z-index: 9999; background: #1e1e1e; display: flex; flex-direction: row; @@ -53,8 +54,10 @@ header .logo { height: 30px; } header .site_name { + font-family: 'Roboto', sans-serif; font-weight: bold; color: #fff; + font-size: 14px; } header .sub { margin-left: 4px; @@ -109,12 +112,19 @@ footer { justify-content: space-between; color: #888; font-size: 9pt; - padding: 20px 75px 20px; + padding: 20px 0 20px; + font-family: "Roboto", sans-serif; } footer > div { display: flex; flex-direction: row; } +footer > div:nth-child(1) { + padding-left: 75px; +} +footer > div:nth-child(2) { + padding-right: 75px; +} footer a { display: inline-block; color: #888; @@ -145,8 +155,8 @@ h2 { } h3 { margin: 0 0 20px 0; - padding: 0; - font-size: 11pt; + padding: 20px 0 0 0; + font-size: 14pt; font-weight: 500; transition: color 0.2s cubic-bezier(0,0,1,1); } @@ -165,8 +175,17 @@ h4 { color: #fff; text-decoration: underline; } +.right-sidebar h3 { + margin: 0; + padding: 0 0 10px 0; + font-family: 'Roboto Mono'; + font-weight: 400; + font-size: 14px; + text-transform: uppercase; + letter-spacing: 2px; +} -th, .gray, h3, h4 { +th, .gray { font-family: 'Roboto Mono', monospace; font-weight: 400; text-transform: uppercase; @@ -185,7 +204,7 @@ th, .gray { line-height: 1.5; } section { - width: 640px; + width: 960px; margin: 0 auto; } .home section { @@ -201,13 +220,18 @@ section { } p { margin: 0 0 20px 0; + line-height: 2; + font-size: 15px; + font-weight: 400; } .content a { - color: #ddd; + color: #fff; + text-decoration: none; + border-bottom: 1px dashed; transition: color 0.2s cubic-bezier(0,0,1,1); } -.content a:hover { - color: #fff; +.desktop .content a:hover { + color: #ff8; } /* top of post metadata */ @@ -219,6 +243,7 @@ p { align-items: flex-start; font-size: 10pt; margin-bottom: 20px; + font-family: 'Roboto', sans-serif; } .meta > div { margin-right: 30px; @@ -229,10 +254,13 @@ p { } .right-sidebar { float: right; - width: 200px; + width: 240px; margin-left: 20px; + padding-top: 10px; padding-left: 20px; border-left: 1px solid #444; + font-family: 'Roboto'; + font-size: 14px; } .right-sidebar .meta { flex-direction: column; @@ -240,6 +268,14 @@ p { .right-sidebar .meta > div { margin-bottom: 10px; } +.right-sidebar ul { + margin-bottom: 10px; + color: #aaa; +} +.right-sidebar ul:first-child a { + text-decoration: none; + border-bottom: 1px solid; +} /* lists */ @@ -251,6 +287,7 @@ ul { ul li { margin-bottom: 8px; } + /* misc formatting */ code { @@ -267,7 +304,7 @@ pre { pre code { display: block; max-height: 400px; - max-width: 640px; + max-width: 960px; overflow: scroll; padding: 4px 10px; } @@ -318,6 +355,7 @@ section.images { flex-direction: row; align-items: flex-start; justify-content: center; + clear: both; } .image:only-child { width: 100%; @@ -350,11 +388,24 @@ section.fullwidth .image { max-width: 100%; } .caption { - text-align: center; + text-align: left; font-size: 9pt; - color: #888; - max-width: 620px; + color: #999; + max-width: 960px; margin: 10px auto 0 auto; + font-family: 'Roboto'; +} +.sideimage { + margin: 10px 0; + display: flex; + flex-direction: row; + justify-content: flex-start; + align-items: flex-start; +} +.sideimage img { + margin-right: 10px; + width: 200px; + height: 200px; } /* blog index */ @@ -416,7 +467,7 @@ section.fullwidth .image { font-size: 26px; } .intro { - max-width: 640px; + max-width: 960px; padding: 75px 0 75px 10px; z-index: 1; } @@ -481,7 +532,8 @@ section.fullwidth .image { text-decoration: none; transition: background-color 0.1s cubic-bezier(0,0,1,1); background: black; - margin: 0 20px 20px 0; + margin: 0 11px 11px 0; + border: 0; } .dataset-list .dataset { width: 220px; @@ -496,4 +548,100 @@ section.fullwidth .image { .desktop .dataset-list a:nth-child(3n+2):hover { background-color: rgba(255, 128, 0, 0.2); } .dataset-list a:nth-child(3n+3) { background-color: rgba(255, 255, 0, 0.1); } -.desktop .dataset-list .dataset:nth-child(3n+3):hover { background-color: rgba(255, 255, 0, 0.2); } +.desktop .dataset-list a:nth-child(3n+3):hover { background-color: rgba(255, 255, 0, 0.2); } + +.dataset-list span { + box-shadow: -3px -3px #181818, 3px -3px #181818, -3px 3px #181818, 3px 3px #181818; + background-color: #181818; + box-decoration-break: clone; +} + +/* intro section for datasets */ + +section.intro_section { + font-family: 'Roboto Mono'; + width: 100%; + background-size: cover; + background-position: bottom left; + padding: 50px 0; + min-height: 60vh; + display: flex; + justify-content: center; + align-items: center; + background-color: #111111; +} +.intro_section .inner { + max-width: 960px; + margin: 0 auto; +} +.intro_section .hero_desc { + font-size: 38px; + line-height: 60px; + margin-bottom: 30px; + color: #ddd; + font-weight: 300; +} +.intro_section .hero_subdesc { + font-size: 18px; + line-height: 36px; + max-width: 640px; + font-weight: 300; + color: #ddd; +} +.intro_section div > span { + box-shadow: -10px -10px #181818, 10px -10px #181818, 10px 10px #181818, -10px 10px #181818; + background: #181818; +} +.firefox .intro_section div > span { + box-decoration-break: clone; +} + +/* footnotes */ + +a.footnote { + font-size: 10px; + position: relative; + display: inline-block; + bottom: 10px; + text-decoration: none; + color: #ff8; + border: 0; + left: 2px; + transition-duration: 0s; +} +a.footnote_shim { + display: inline-block; + width: 1px; height: 1px; + overflow: hidden; + position: relative; + top: -90px; + visibility: hidden; +} +.right-sidebar a.footnote { + bottom: 8px; +} +.desktop a.footnote:hover { + background-color: #ff8; + color: #000; +} +.backlinks { + margin-right: 10px; +} +.content .backlinks a { + color: #ff8; + font-size: 10px; + text-decoration: none; + border: 0; + font-weight: bold; + position: relative; + bottom: 5px; + margin-right: 2px; +} +ul.footnotes { + list-style-type: decimal; + margin-left: 30px; +} +li p { + margin: 0; padding: 0; + display: inline; +}
\ No newline at end of file diff --git a/site/assets/css/tabulator.css b/site/assets/css/tabulator.css index 200f0c5c..63abf050 100755 --- a/site/assets/css/tabulator.css +++ b/site/assets/css/tabulator.css @@ -493,7 +493,7 @@ display: inline-block; position: relative; box-sizing: border-box; - padding: 4px; + padding: 10px; border-right: 1px solid #333; vertical-align: middle; white-space: nowrap; diff --git a/site/content/pages/about/assets/adam-harvey.jpg b/site/content/pages/about/assets/adam-harvey.jpg Binary files differnew file mode 100644 index 00000000..e0ab893a --- /dev/null +++ b/site/content/pages/about/assets/adam-harvey.jpg diff --git a/site/content/pages/about/assets/jules-laplace.jpg b/site/content/pages/about/assets/jules-laplace.jpg Binary files differnew file mode 100644 index 00000000..310b2783 --- /dev/null +++ b/site/content/pages/about/assets/jules-laplace.jpg diff --git a/site/content/pages/about/assets/mozilla.png b/site/content/pages/about/assets/mozilla.png Binary files differnew file mode 100644 index 00000000..0fd4f115 --- /dev/null +++ b/site/content/pages/about/assets/mozilla.png diff --git a/site/content/pages/about/credits.md b/site/content/pages/about/credits.md index 2d16155c..3cd0b05b 100644 --- a/site/content/pages/about/credits.md +++ b/site/content/pages/about/credits.md @@ -12,6 +12,17 @@ authors: Adam Harvey # Credits +### Sidebar + +- [About](/about/) +- [Press](/about/press/) +- [Credits](/about/credits/) +- [Disclaimer](/about/disclaimer/) +- [Terms and Conditions](/about/terms/) +- [Privacy Policy](/about/privacy/) + +## End Sidebar + - MegaPixels by Adam Harvey - Made with support from Mozilla - Site developed by Jules Laplace diff --git a/site/content/pages/about/disclaimer.md b/site/content/pages/about/disclaimer.md index 64ce9f21..27cf6760 100644 --- a/site/content/pages/about/disclaimer.md +++ b/site/content/pages/about/disclaimer.md @@ -12,6 +12,17 @@ authors: Adam Harvey # Disclaimer +### Sidebar + +- [About](/about/) +- [Press](/about/press/) +- [Credits](/about/credits/) +- [Disclaimer](/about/disclaimer/) +- [Terms and Conditions](/about/terms/) +- [Privacy Policy](/about/privacy/) + +## End Sidebar + Last updated: December 04, 2018 The information contained on MegaPixels.cc website (the "Service") is for academic and artistic purposes only. diff --git a/site/content/pages/about/index.md b/site/content/pages/about/index.md index e2025bf2..59f70d7a 100644 --- a/site/content/pages/about/index.md +++ b/site/content/pages/about/index.md @@ -1,8 +1,8 @@ ------------ status: published -title: MegaPixels Credits -desc: MegaPixels Project Team Credits +title: About MegaPixels +desc: About MegaPixels slug: credits published: 2018-12-04 updated: 2018-12-04 @@ -10,10 +10,30 @@ authors: Adam Harvey ------------ -# Credits +# About MegaPixels -- MegaPixels by Adam Harvey -- Made with support from Mozilla -- Site developed by Jules Laplace -- Design and graphics: Adam Harvey -- Research assistants: Berit Gilma
\ No newline at end of file +### Sidebar + +- [Press](/about/press/) +- [Credits](/about/credits/) +- [Disclaimer](/about/disclaimer/) +- [Terms and Conditions](/about/terms/) +- [Privacy Policy](/about/privacy/) + ++ Years: 2002-2019 ++ Datasets Analyzed: 325 ++ Author: Adam Harvey ++ Development: Jules LaPlace ++ Research Assistance: Berit Gilma + +## End Sidebar + +MegaPixels aims to answer to these questions and reveal the stories behind the millions of images used to train, evaluate, and power the facial recognition surveillance algorithms used today. MegaPixels is authored by Adam Harvey, developed in collaboration with Jules LaPlace, and produced in partnership with Mozilla. + +MegaPixels aims to answer to these questions and reveal the stories behind the millions of images used to train, evaluate, and power the facial recognition surveillance algorithms used today. MegaPixels is authored by Adam Harvey, developed in collaboration with Jules LaPlace, and produced in partnership with Mozilla. + + **Adam Harvey** is an American artist and researcher based in Berlin. His previous projects (CV Dazzle, Stealth Wear, and SkyLift) explore the potential for countersurveillance as artwork. He is the founder of VFRAME (visual forensics software for human rights groups), the recipient of 2 PrototypeFund awards, and is currently a researcher in residence at Karlsruhe HfG studying artifical intelligence and datasets. + + **Jules LaPlace** is an American artist and technologist also based in Berlin. He was previously the CTO of a NYC digital agency and currently works at VFRAME, developing computer vision for human rights groups, and building creative software for artists. + + **Mozilla** is a free software community founded in 1998 by members of Netscape. The Mozilla community uses, develops, spreads and supports Mozilla products, thereby promoting exclusively free software and open standards, with only minor exceptions. The community is supported institutionally by the not-for-profit Mozilla Foundation and its tax-paying subsidiary, the Mozilla Corporation. diff --git a/site/content/pages/about/press.md b/site/content/pages/about/press.md index 56b4990f..0e3124d0 100644 --- a/site/content/pages/about/press.md +++ b/site/content/pages/about/press.md @@ -13,8 +13,20 @@ authors: Adam Harvey # Press +### Sidebar + +- [About](/about/) +- [Press](/about/press/) +- [Credits](/about/credits/) +- [Disclaimer](/about/disclaimer/) +- [Terms and Conditions](/about/terms/) +- [Privacy Policy](/about/privacy/) + +## End Sidebar +  - Aug 22, 2018: "Transgender YouTubers had their videos grabbed to train facial recognition software" by James Vincent <https://www.theverge.com/2017/8/22/16180080/transgender-youtubers-ai-facial-recognition-dataset> - Aug 22, 2018: "Transgender YouTubers had their videos grabbed to train facial recognition software" by James Vincent <https://www.theverge.com/2017/8/22/16180080/transgender-youtubers-ai-facial-recognition-dataset> - Aug 22, 2018: "Transgender YouTubers had their videos grabbed to train facial recognition software" by James Vincent <https://www.theverge.com/2017/8/22/16180080/transgender-youtubers-ai-facial-recognition-dataset> +lfw
\ No newline at end of file diff --git a/site/content/pages/about/privacy.md b/site/content/pages/about/privacy.md index 17d1b707..9685a189 100644 --- a/site/content/pages/about/privacy.md +++ b/site/content/pages/about/privacy.md @@ -12,6 +12,16 @@ authors: Adam Harvey # Privacy Policy +### Sidebar + +- [About](/about/) +- [Press](/about/press/) +- [Credits](/about/credits/) +- [Disclaimer](/about/disclaimer/) +- [Terms and Conditions](/about/terms/) +- [Privacy Policy](/about/privacy/) + +## End Sidebar A summary of our privacy policy is as follows: diff --git a/site/content/pages/about/terms.md b/site/content/pages/about/terms.md index 3735ff08..6ad03bc1 100644 --- a/site/content/pages/about/terms.md +++ b/site/content/pages/about/terms.md @@ -11,8 +11,18 @@ authors: Adam Harvey ------------ -Terms and Conditions ("Terms") +# Terms and Conditions ("Terms") +### Sidebar + +- [About](/about/) +- [Press](/about/press/) +- [Credits](/about/credits/) +- [Disclaimer](/about/disclaimer/) +- [Terms and Conditions](/about/terms/) +- [Privacy Policy](/about/privacy/) + +## End Sidebar Last updated: December 04, 2018 diff --git a/site/content/pages/datasets/50_people_one_question/assets/background.gif b/site/content/pages/datasets/50_people_one_question/assets/background.gif Binary files differnew file mode 100644 index 00000000..a0539bbb --- /dev/null +++ b/site/content/pages/datasets/50_people_one_question/assets/background.gif diff --git a/site/content/pages/datasets/50_people_one_question/assets/background.jpg b/site/content/pages/datasets/50_people_one_question/assets/background.jpg Binary files differnew file mode 100644 index 00000000..f7567314 --- /dev/null +++ b/site/content/pages/datasets/50_people_one_question/assets/background.jpg diff --git a/site/content/pages/datasets/50_people_one_question/assets/background_02.jpg b/site/content/pages/datasets/50_people_one_question/assets/background_02.jpg Binary files differnew file mode 100644 index 00000000..f20ee861 --- /dev/null +++ b/site/content/pages/datasets/50_people_one_question/assets/background_02.jpg diff --git a/site/content/pages/datasets/50_people_one_question/assets/index.jpg b/site/content/pages/datasets/50_people_one_question/assets/index.jpg Binary files differnew file mode 100644 index 00000000..7bf4ed1d --- /dev/null +++ b/site/content/pages/datasets/50_people_one_question/assets/index.jpg diff --git a/site/content/pages/datasets/50_people_one_question/assets/index_02.jpg b/site/content/pages/datasets/50_people_one_question/assets/index_02.jpg Binary files differnew file mode 100644 index 00000000..347bd08d --- /dev/null +++ b/site/content/pages/datasets/50_people_one_question/assets/index_02.jpg diff --git a/site/content/pages/datasets/brainwash/assets/background.jpg b/site/content/pages/datasets/brainwash/assets/background.jpg Binary files differnew file mode 100644 index 00000000..eada1779 --- /dev/null +++ b/site/content/pages/datasets/brainwash/assets/background.jpg diff --git a/site/content/pages/datasets/brainwash/assets/index.jpg b/site/content/pages/datasets/brainwash/assets/index.jpg Binary files differnew file mode 100644 index 00000000..c903baea --- /dev/null +++ b/site/content/pages/datasets/brainwash/assets/index.jpg diff --git a/site/content/pages/datasets/caltech_10k/index.md b/site/content/pages/datasets/caltech_10k/index.md new file mode 100644 index 00000000..8f49f2d1 --- /dev/null +++ b/site/content/pages/datasets/caltech_10k/index.md @@ -0,0 +1,29 @@ +------------ + +status: published +title: Caltech 10K Faces Dataset +desc: Caltech 10K Faces Dataset +slug: caltech_10k +published: 2019-2-23 +updated: 2019-2-23 +authors: Adam Harvey + +------------ + +# Caltech 10K Faces Dataset + ++ Years: TBD ++ Images: TBD ++ Identities: TBD ++ Origin: Google Search ++ Funding: TBD + +------- + +Ignore text below these lines + +------- + +Research + +The dataset contains images of people collected from the web by typing common given names into Google Image Search. The coordinates of the eyes, the nose and the center of the mouth for each frontal face are provided in a ground truth file. This information can be used to align and crop the human faces or as a ground truth for a face detection algorithm. The dataset has 10,524 human faces of various resolutions and in different settings, e.g. portrait images, groups of people, etc. Profile faces or very low resolution faces are not labeled.
\ No newline at end of file diff --git a/site/content/pages/datasets/duke_mtmc/assets/background.jpg b/site/content/pages/datasets/duke_mtmc/assets/background.jpg Binary files differnew file mode 100644 index 00000000..fb76d97f --- /dev/null +++ b/site/content/pages/datasets/duke_mtmc/assets/background.jpg diff --git a/site/content/pages/datasets/duke_mtmc/assets/index.jpg b/site/content/pages/datasets/duke_mtmc/assets/index.jpg Binary files differnew file mode 100644 index 00000000..89b7b08d --- /dev/null +++ b/site/content/pages/datasets/duke_mtmc/assets/index.jpg diff --git a/site/content/pages/datasets/facebook/index.md b/site/content/pages/datasets/facebook/index.md new file mode 100644 index 00000000..6e3857fd --- /dev/null +++ b/site/content/pages/datasets/facebook/index.md @@ -0,0 +1,32 @@ +------------ + +status: published +title: Facebook +desc: TBD +subdesc: TBD +image: assets/background.jpg +caption: TBD +slug: facebook +published: 2019-2-23 +updated: 2019-2-23 +color: #aaaaff +authors: Adam Harvey + +------------ + +### Statistics + ++ Years: 2002-2004 ++ Images: 13,233 ++ Identities: 5,749 ++ Origin: Yahoo News Images ++ Funding: (Possibly, partially CIA) + +---- + +Ignore content below these lines + +--- + + +- Tool to create face datasets from Facebook <https://github.com/ankitaggarwal011/FaceGrab> diff --git a/site/content/pages/datasets/helen/assets/background.jpg b/site/content/pages/datasets/helen/assets/background.jpg Binary files differnew file mode 100644 index 00000000..63ebb8c9 --- /dev/null +++ b/site/content/pages/datasets/helen/assets/background.jpg diff --git a/site/content/pages/datasets/helen/assets/index.jpg b/site/content/pages/datasets/helen/assets/index.jpg Binary files differnew file mode 100644 index 00000000..9e62679b --- /dev/null +++ b/site/content/pages/datasets/helen/assets/index.jpg diff --git a/site/content/pages/datasets/hrt_transgender/assets/background.jpg b/site/content/pages/datasets/hrt_transgender/assets/background.jpg Binary files differnew file mode 100644 index 00000000..9437c4ff --- /dev/null +++ b/site/content/pages/datasets/hrt_transgender/assets/background.jpg diff --git a/site/content/pages/datasets/hrt_transgender/assets/index.jpg b/site/content/pages/datasets/hrt_transgender/assets/index.jpg Binary files differnew file mode 100644 index 00000000..428ac42a --- /dev/null +++ b/site/content/pages/datasets/hrt_transgender/assets/index.jpg diff --git a/site/content/pages/datasets/ilids_vid/assets/background.jpg b/site/content/pages/datasets/ilids_vid/assets/background.jpg Binary files differnew file mode 100644 index 00000000..6acd30e8 --- /dev/null +++ b/site/content/pages/datasets/ilids_vid/assets/background.jpg diff --git a/site/content/pages/datasets/ilids_vid/assets/index.jpg b/site/content/pages/datasets/ilids_vid/assets/index.jpg Binary files differnew file mode 100644 index 00000000..66c80d7d --- /dev/null +++ b/site/content/pages/datasets/ilids_vid/assets/index.jpg diff --git a/site/content/pages/datasets/index.md b/site/content/pages/datasets/index.md new file mode 100644 index 00000000..fa012758 --- /dev/null +++ b/site/content/pages/datasets/index.md @@ -0,0 +1,22 @@ +------------ + +status: published +title: MegaPixels: Datasets +desc: Facial Recognition Datasets +slug: home +published: 2018-12-15 +updated: 2018-12-15 +authors: Adam Harvey +sync: false + +------------ + +# Facial Recognition Datasets + ++ Found: 275 datasets ++ Created between: 1993-2018 ++ Smallest dataset: 20 images ++ Largest dataset: 10,000,000 images + ++ Highest resolution faces: 450x500 (Unconstrained College Students) ++ Lowest resolution faces: 16x20 pixels (QMUL SurvFace) diff --git a/site/content/pages/datasets/lfw/assets/background.jpg b/site/content/pages/datasets/lfw/assets/background.jpg Binary files differnew file mode 100644 index 00000000..3ab1607d --- /dev/null +++ b/site/content/pages/datasets/lfw/assets/background.jpg diff --git a/site/content/pages/datasets/lfw/assets/index.jpg b/site/content/pages/datasets/lfw/assets/index.jpg Binary files differnew file mode 100644 index 00000000..bc36c106 --- /dev/null +++ b/site/content/pages/datasets/lfw/assets/index.jpg diff --git a/site/content/pages/datasets/lfw/assets/lfw_commercial_use.csv b/site/content/pages/datasets/lfw/assets/lfw_commercial_use.csv index 70e2fdeb..a2a4b39c 100644 --- a/site/content/pages/datasets/lfw/assets/lfw_commercial_use.csv +++ b/site/content/pages/datasets/lfw/assets/lfw_commercial_use.csv @@ -1,44 +1,44 @@ "name_display","company_url","example_url","country","description" -"Aratek","http://www.aratek.co/","","China","Biometric sensors for telecom, civil identification, finance, education, POS, and transportation" -"Asaphus","https://asaphus.de/","","Germany","Face recognition for home appliances and autonomous vehicles interaction" -"Aureus","https://cyberextruder.com/biometric-face-recognition-software-use-cases/","","USA","Retail loss prevention solutions, biometric access control, law enforcement and safe city applications, gaming and hospitality applications" +"Aratek","http://www.aratek.co/"," ","China","Biometric sensors for telecom, civil identification, finance, education, POS, and transportation" +"Asaphus","https://asaphus.de/"," ","Germany","Face recognition for home appliances and autonomous vehicles interaction" +"Aureus","https://cyberextruder.com/biometric-face-recognition-software-use-cases/"," ","USA","Retail loss prevention solutions, biometric access control, law enforcement and safe city applications, gaming and hospitality applications" "Baidu","http://research.baidu.com/institute-of-deep-learning/","https://www.newscientist.com/article/2113176-chinese-tourist-town-uses-face-recognition-as-an-entry-pass/","China","Retail payment, transportation, civil identification" -"Betaface","https://www.betaface.com/","","Germany","Web advertising and entertainment, video surveillance, security software, b2b software" -"Yi+AI","http://www.dress-plus.com/solution","","China","Scenario-based advertising, real-time personalized recommendation, character recognition for ads placement" -"CM-CV&AR","http://www.cloudminds.com/","","USA","Human augmented robot intelligence" -"Samtech","http://samtechinfonet.com/products_frs.php","","India","Facilities management, infrastructure support" -"ColorReco","http://www.colorreco.com/","","China","Face login verification, online payment security verification, access control system identity authentication and face recognition lock, mobile payment, driver fatigue recognition, virtual makeup" +"Betaface","https://www.betaface.com/"," ","Germany","Web advertising and entertainment, video surveillance, security software, b2b software" +"Yi+AI","http://www.dress-plus.com/solution"," ","China","Scenario-based advertising, real-time personalized recommendation, character recognition for ads placement" +"CM-CV&AR","http://www.cloudminds.com/"," ","USA","Human augmented robot intelligence" +"Samtech","http://samtechinfonet.com/products_frs.php"," ","India","Facilities management, infrastructure support" +"ColorReco","http://www.colorreco.com/"," ","China","Face login verification, online payment security verification, access control system identity authentication and face recognition lock, mobile payment, driver fatigue recognition, virtual makeup" "CloudWalk","www.cloudwalk.cn/","https://qz.com/africa/1287675/china-is-exporting-facial-recognition-to-africa-ensuring-ai-dominance-through-diversity/","China","Security and law enforcement. Being deployed in Zimbabwe" -"Cylltech","http://www.cylltech.com.cn/","","China","Conference management, social assistance, civil access, media orientation, precision marketing, scenic intelligence, tourism management" +"Cylltech","http://www.cylltech.com.cn/"," ","China","Conference management, social assistance, civil access, media orientation, precision marketing, scenic intelligence, tourism management" "Dahua-FaceImage","https://www.dahuasecurity.com/","https://www.dahuasecurity.com/solutions/solutionsbyapplication/23","China","Public security, public access control, finance" -"Daream","http://www.daream.com","","China","Fatigue and distraction detection for autonomous vehicles" -"Deepmark","https://deepmark.ru/","","Russia","Workplace access control" -"Easen Electron","http://www.easen-electron.com","","China","Face recognition door locks for automobiles" -"Ever AI","https://ever.ai/","","USA","Law enforcement, smart cities, surveillance, building security, retail, payments, autonomous vehicles, grocery stores, enhanced marketing" -"Facebook (Face.com)","https://en.wikipedia.org/wiki/Face.com","","USA","Sold to facebook in 2012, and now incorporated into DeepFace" -"Face++","https://www.faceplusplus.com/","","China","Audience engagement analysis, interactive marketing, gaming, photo album processing, security for mobile payments" -"Faceall","http://www.faceall.cn/index.en.html","","China","Internet banking, insurance, automated surveillance, access control, photo refinement, avatar creation" -"Faceter","https://faceter.io","","USA","Workforce attendence reporting and analytics, home video surveillance, retail customer behavior, GPU mining compatible" -"Facevisa","http://www.facevisa.com","","China","Face detection, face key point positioning, living body certification, facial attribute analysis" -"Fujitsu R&D","https://www.fujitsu.com/cn/en/about/local/subsidiaries/frdc/","","Japan","Consumer cameras" -"SenseTime","https://www.sensetime.com/","","Hong Kong","Surveillance, access control, image retrieval, and automatic log-on for personal computer or mobile devices" +"Daream","http://www.daream.com"," ","China","Fatigue and distraction detection for autonomous vehicles" +"Deepmark","https://deepmark.ru/"," ","Russia","Workplace access control" +"Easen Electron","http://www.easen-electron.com"," ","China","Face recognition door locks for automobiles" +"Ever AI","https://ever.ai/"," ","USA","Law enforcement, smart cities, surveillance, building security, retail, payments, autonomous vehicles, grocery stores, enhanced marketing" +"Facebook (Face.com)","https://en.wikipedia.org/wiki/Face.com"," ","USA","Sold to facebook in 2012, and now incorporated into DeepFace" +"Face++","https://www.faceplusplus.com/"," ","China","Audience engagement analysis, interactive marketing, gaming, photo album processing, security for mobile payments" +"Faceall","http://www.faceall.cn/index.en.html"," ","China","Internet banking, insurance, automated surveillance, access control, photo refinement, avatar creation" +"Faceter","https://faceter.io"," ","USA","Workforce attendence reporting and analytics, home video surveillance, retail customer behavior, GPU mining compatible" +"Facevisa","http://www.facevisa.com"," ","China","Face detection, face key point positioning, living body certification, facial attribute analysis" +"Fujitsu R&D","https://www.fujitsu.com/cn/en/about/local/subsidiaries/frdc/"," ","Japan","Consumer cameras" +"SenseTime","https://www.sensetime.com/"," ","Hong Kong","Surveillance, access control, image retrieval, and automatic log-on for personal computer or mobile devices" "Turing Robot","http://www.tuling123.com/","http://biz.turingos.cn/home","China","Emotion recognition and analysis for robots and toys, chatbots and digital assistants" "NEC","https://www.nec.com/en/press/201407/global_20140716_01.html","https://arxiv.org/abs/1212.6094","Japan","Law enforcement, event crowd monitoring, used specificallfy by Metropolitan police in UK" -"Aurora","http://auroracs.co.uk/","","UK","Face recognition in airports for security, queue management, x-ray divestment tray linkage" +"Aurora","http://auroracs.co.uk/"," ","UK","Face recognition in airports for security, queue management, x-ray divestment tray linkage" "VisionLabs","https://visionlabs.ai/","https://venturebeat.com/2016/07/07/russian-facial-recognition-startup-visionlabs-raises-5-5m-after-partnering-with-facebook-and-google/","Russia","Video surveillance, banking and finance, customer authentication for retail" -"Yunshitu","http://yunshitu.cn","","China","Security, Internet, broadcasting and other industries" -"Glasssix","http://www.glasssix.com/","","China","School attendance, workforce monitoring" +"Yunshitu","http://yunshitu.cn"," ","China","Security, Internet, broadcasting and other industries" +"Glasssix","http://www.glasssix.com/"," ","China","School attendance, workforce monitoring" "Hisign","http://www.hisign.com.cn/en-us/index.aspx","https://www.bloomberg.com/research/stocks/private/snapshot.asp?privcapId=52323181","China","Criminal investigation information application, and financial big data risk prevention and control products in China" "icarevision","http://www.icarevision.cn","https://www.bloomberg.com/research/stocks/private/snapshot.asp?privcapId=306707800","China","Video surveillance" "IntelliVision","https://www.intelli-vision.com/facial-recognition/","https://www.bloomberg.com/profiles/companies/0080393D:US-intellivision-technologies-corp","USA","Smart homes and buildings, smart security, smart city, smart retail, Smart auto" "Meiya Pico","https://meiyapico.com/","https://www.bloomberg.com/research/stocks/private/snapshot.asp?privcapId=117577345","China","Digital forensics and information security products and services in China" "Orion Star","https://www.ainirobot.com/#sixthPage","https://www.prnewswire.com/news-releases/orionstar-wins-challenge-to-recognize-one-million-celebrity-faces-with-artificial-intelligence-300494265.html","China","Face recognition for robots and livestream video censoring" -"Pegatron","http://www.pegatroncorp.com","","China","Workforce attendance" +"Pegatron","http://www.pegatroncorp.com"," ","China","Workforce attendance" "PingAn AI Lab","http://www.pingan.com/","https://www.biometricupdate.com/201703/ping-an-technology-developing-ai-face-recognition-technology-with-record-results","China","Financial services, lending" -"ReadSense","http://www.readsense.ai/","","China","Access control, traffic analysis, crowd analysis, head counting, drone vision, home appliances, community surveillance, custom attention analysis" -"sensingtech","www.sensingtech.com.cn","","China","Workplace entrypoint authentication" -"TCIT","http://www.tcit-us.com/?p=4023","","Taiwan","Retail analytics, workplace access control" -"TerminAI","terminai.com","","China","Smart office, smart city, smart gym, smart medical, smart community" -"Uni-Ubi","http://uni-ubi.com/","","China","Facial recognition for education, business, community, construction" -"Tencent YouTu Lab","http://bestimage.qq.com/","","China","Consumer applications for automatic facial beauty" -"Yuntu WiseSight","http://www.facelab.cn/","","China","Intrusion alarm, access control, access control, electronic patrol, and network alarm. detect suspicious personnel, real-name authentication, and public security, customs, airports, railways and other government security agencies, electronic patrol"
\ No newline at end of file +"ReadSense","http://www.readsense.ai/"," ","China","Access control, traffic analysis, crowd analysis, head counting, drone vision, home appliances, community surveillance, custom attention analysis" +"sensingtech","www.sensingtech.com.cn"," ","China","Workplace entrypoint authentication" +"TCIT","http://www.tcit-us.com/?p=4023"," ","Taiwan","Retail analytics, workplace access control" +"TerminAI","terminai.com"," ","China","Smart office, smart city, smart gym, smart medical, smart community" +"Uni-Ubi","http://uni-ubi.com/"," ","China","Facial recognition for education, business, community, construction" +"Tencent YouTu Lab","http://bestimage.qq.com/"," ","China","Consumer applications for automatic facial beauty" +"Yuntu WiseSight","http://www.facelab.cn/"," ","China","Intrusion alarm, access control, access control, electronic patrol, and network alarm. detect suspicious personnel, real-name authentication, and public security, customs, airports, railways and other government security agencies, electronic patrol"
\ No newline at end of file diff --git a/site/content/pages/datasets/lfw/assets/lfw_index.gif b/site/content/pages/datasets/lfw/assets/lfw_index.gif Binary files differdeleted file mode 100644 index 63a5c423..00000000 --- a/site/content/pages/datasets/lfw/assets/lfw_index.gif +++ /dev/null diff --git a/site/content/pages/datasets/lfw/assets/lfw_montage.jpg b/site/content/pages/datasets/lfw/assets/lfw_montage.jpg Binary files differdeleted file mode 100644 index 8d73c417..00000000 --- a/site/content/pages/datasets/lfw/assets/lfw_montage.jpg +++ /dev/null diff --git a/site/content/pages/datasets/lfw/assets/lfw_synthetic.jpg b/site/content/pages/datasets/lfw/assets/lfw_synthetic.jpg Binary files differdeleted file mode 100644 index c2a34043..00000000 --- a/site/content/pages/datasets/lfw/assets/lfw_synthetic.jpg +++ /dev/null diff --git a/site/content/pages/datasets/lfw/index.md b/site/content/pages/datasets/lfw/index.md index 1f847a2a..4161561d 100644 --- a/site/content/pages/datasets/lfw/index.md +++ b/site/content/pages/datasets/lfw/index.md @@ -2,60 +2,64 @@ status: published title: Labeled Faces in The Wild -desc: LFW: Labeled Faces in The Wild +desc: Labeled Faces in The Wild (LFW) is a database of face photographs designed for studying the problem of unconstrained face recognition. +subdesc: It includes 13,456 images of 4,432 people’s images copied from the Internet during 2002-2004. +image: assets/background.jpg +caption: A few of the 5,749 people in the Labeled Faces in the Wild Dataset. The most widely used face dataset for benchmarking commercial face recognition algorithms. slug: lfw published: 2019-2-23 updated: 2019-2-23 +color: #ff0000 authors: Adam Harvey ------------ -# LFW +### Statistics + Years: 2002-2004 + Images: 13,233 + Identities: 5,749 + Origin: Yahoo News Images -+ Funding: (Possibly, partially CIA*) ++ Funding: (Possibly, partially CIA) - - -*Labeled Faces in The Wild* (LFW) is "a database of face photographs designed for studying the problem of unconstrained face recognition[^lfw_www]. It is used to evaluate and improve the performance of facial recognition algorithms in academic, commercial, and government research. According to BiometricUpdate.com[^lfw_pingan], LFW is "the most widely used evaluation set in the field of facial recognition, LFW attracts a few dozen teams from around the globe including Google, Facebook, Microsoft Research Asia, Baidu, Tencent, SenseTime, Face++ and Chinese University of Hong Kong." - -The LFW dataset includes 13,233 images of 5,749 people that were collected between 2002-2004. LFW is a subset of *Names of Faces* and is part of the first facial recognition training dataset created entirely from images appearing on the Internet. The people appearing in LFW are... - -The *Names and Faces* dataset was the first face recognition dataset created entire from online photos. However, *Names and Faces* and *LFW* are not the first face recognition dataset created entirely "in the wild". That title belongs to the [UCD dataset](/datasets/ucd_faces/). Images obtained "in the wild" means using an image without explicit consent or awareness from the subject or photographer. - - -### Analysis +### INSIGHTS - There are about 3 men for every 1 woman (4,277 men and 1,472 women) in the LFW dataset[^lfw_www] - The person with the most images is [George W. Bush](http://vis-www.cs.umass.edu/lfw/person/George_W_Bush_comp.html) with 530 - There are about 3 George W. Bush's for every 1 [Tony Blair](http://vis-www.cs.umass.edu/lfw/person/Tony_Blair.html) -- 70% of people in the dataset have only 1 image and 29% have 2 or more images - The LFW dataset includes over 500 actors, 30 models, 10 presidents, 124 basketball players, 24 football players, 11 kings, 7 queens, and 1 [Moby](http://vis-www.cs.umass.edu/lfw/person/Moby.html) - In all 3 of the LFW publications [^lfw_original_paper], [^lfw_survey], [^lfw_tech_report] the words "ethics", "consent", and "privacy" appear 0 times - The word "future" appears 71 times -### Synthetic Faces +## Labeled Faces in the Wild -To visualize the types of photos in the dataset without explicitly publishing individual's identities a generative adversarial network (GAN) was trained on the entire dataset. The images in this video show a neural network learning the visual latent space and then interpolating between archetypical identities within the LFW dataset. +*Labeled Faces in The Wild* (LFW) is "a database of face photographs designed for studying the problem of unconstrained face recognition[^lfw_www]. It is used to evaluate and improve the performance of facial recognition algorithms in academic, commercial, and government research. According to BiometricUpdate.com[^lfw_pingan], LFW is "the most widely used evaluation set in the field of facial recognition, LFW attracts a few dozen teams from around the globe including Google, Facebook, Microsoft Research Asia, Baidu, Tencent, SenseTime, Face++ and Chinese University of Hong Kong." - +The LFW dataset includes 13,233 images of 5,749 people that were collected between 2002-2004. LFW is a subset of *Names of Faces* and is part of the first facial recognition training dataset created entirely from images appearing on the Internet. The people appearing in LFW are... +The *Names and Faces* dataset was the first face recognition dataset created entire from online photos. However, *Names and Faces* and *LFW* are not the first face recognition dataset created entirely "in the wild". That title belongs to the [UCD dataset](/datasets/ucd_faces/). Images obtained "in the wild" means using an image without explicit consent or awareness from the subject or photographer. ### Biometric Trade Routes -To understand how this dataset has been used, its citations have been geocoded to show an approximate geographic digital trade route of the biometric data. Lines indicate an organization (education, commercial, or governmental) that has cited the LFW dataset in their research. Data is compiled from [SemanticScholar](https://www.semanticscholar.org). +To understand how this dataset has been used, its citations have been geocoded to show an approximate geographic digital trade route of the biometric data. Lines indicate an organization (education, commercial, or governmental) that has cited the LFW dataset in their research. Data is compiled from [Semantic Scholar](https://www.semanticscholar.org). -[add map here] +``` +map +``` + +### Synthetic Faces + +To visualize the types of photos in the dataset without explicitly publishing individual's identities a generative adversarial network (GAN) was trained on the entire dataset. The images in this video show a neural network learning the visual latent space and then interpolating between archetypical identities within the LFW dataset. + + ### Citations Browse or download the geocoded citation data collected for the LFW dataset. -[add citations table here] - +``` +citations +``` ### Additional Information @@ -67,27 +71,14 @@ Browse or download the geocoded citation data collected for the LFW dataset. - The faces in the LFW dataset were detected using the Viola-Jones haarcascade face detector [^lfw_website] [^lfw-survey] - The LFW dataset is used by several of the largest tech companies in the world including "Google, Facebook, Microsoft Research Asia, Baidu, Tencent, SenseTime, Face++ and Chinese University of Hong Kong." [^lfw_pingan] - All images in the LFW dataset were copied from Yahoo News between 2002 - 2004 -<<<<<<< HEAD -- In 2014, two of the four original authors of the LFW dataset received funding from IARPA and ODNI for their follow up paper [Labeled Faces in the Wild: Updates and New Reporting Procedures](https://www.semanticscholar.org/paper/Labeled-Faces-in-the-Wild-%3A-Updates-and-New-Huang-Learned-Miller/2d3482dcff69c7417c7b933f22de606a0e8e42d4) via IARPA contract number 2014-14071600010 +- In 2014, two of the four original authors of the LFW dataset received funding from IARPA and ODNI for their followup paper [Labeled Faces in the Wild: Updates and New Reporting Procedures](https://www.semanticscholar.org/paper/Labeled-Faces-in-the-Wild-%3A-Updates-and-New-Huang-Learned-Miller/2d3482dcff69c7417c7b933f22de606a0e8e42d4) via IARPA contract number 2014-14071600010 - The dataset includes 2 images of [George Tenet](http://vis-www.cs.umass.edu/lfw/person/George_Tenet.html), the former Director of Central Intelligence (DCI) for the Central Intelligence Agency whose facial biometrics were eventually used to help train facial recognition software in China and Russia -======= -- In 2014, 2/4 of the original authors of the LFW dataset received funding from IARPA and ODNI for their follow up paper "Labeled Faces in the Wild: Updates and New Reporting Procedures" via IARPA contract number 2014-14071600010 -- The LFW dataset was used Center for Intelligent Information Retrieval, the Central Intelligence Agency, the National Security Agency and National - -TODO (need citations for the following) - -- SenseTime, who has relied on LFW for benchmarking their facial recognition performance, is one the leading provider of surveillance to the Chinese Government [need citation for this fact. is it the most? or is that Tencent?] -- Two out of 4 of the original authors received funding from the Office of Director of National Intelligence and IARPA for their 2016 LFW survey follow up report - ->>>>>>> 13d7a450affe8ea4f368a97ea2014faa17702a4c    - - ## Code The LFW dataset is so widely used that a popular code library called Sci-Kit Learn includes a function called `fetch_lfw_people` to download the faces in the LFW dataset. @@ -131,7 +122,6 @@ imageio.imwrite('lfw_montage_960.jpg', montage) ### Supplementary Material - ``` load_file assets/lfw_commercial_use.csv name_display, company_url, example_url, country, description @@ -139,14 +129,14 @@ name_display, company_url, example_url, country, description Text and graphics ©Adam Harvey / megapixels.cc - ------- Ignore text below these lines ------- -Research + +### Research - "In our experiments, we used 10000 images and associated captions from the Faces in the wilddata set [3]." - "This work was supported in part by the Center for Intelligent Information Retrieval, the Central Intelligence Agency, the National Security Agency and National Science Foundation under CAREER award IIS-0546666 and grant IIS-0326249." @@ -156,7 +146,11 @@ Research - This research is based upon work supported in part by the Office of the Director of National Intelligence (ODNI), Intelligence Advanced Research Projects Activity (IARPA), via contract number 2014-14071600010. - From "Labeled Faces in the Wild: Updates and New Reporting Procedures" +- 70% of people in the dataset have only 1 image and 29% have 2 or more images + +### Footnotes [^lfw_www]: <http://vis-www.cs.umass.edu/lfw/results.html> [^lfw_baidu]: Jingtuo Liu, Yafeng Deng, Tao Bai, Zhengping Wei, Chang Huang. Targeting Ultimate Accuracy: Face Recognition via Deep Embedding. <https://arxiv.org/abs/1506.07310> [^lfw_pingan]: Lee, Justin. "PING AN Tech facial recognition receives high score in latest LFW test results". BiometricUpdate.com. Feb 13, 2017. <https://www.biometricupdate.com/201702/ping-an-tech-facial-recognition-receives-high-score-in-latest-lfw-test-results> + diff --git a/site/content/pages/datasets/mars/assets/background.jpg b/site/content/pages/datasets/mars/assets/background.jpg Binary files differnew file mode 100644 index 00000000..f3440590 --- /dev/null +++ b/site/content/pages/datasets/mars/assets/background.jpg diff --git a/site/content/pages/datasets/mars/assets/index.jpg b/site/content/pages/datasets/mars/assets/index.jpg Binary files differnew file mode 100644 index 00000000..e866defd --- /dev/null +++ b/site/content/pages/datasets/mars/assets/index.jpg diff --git a/site/content/pages/datasets/pubfig/assets/background.jpg b/site/content/pages/datasets/pubfig/assets/background.jpg Binary files differnew file mode 100644 index 00000000..db748a8f --- /dev/null +++ b/site/content/pages/datasets/pubfig/assets/background.jpg diff --git a/site/content/pages/datasets/pubfig/assets/index.jpg b/site/content/pages/datasets/pubfig/assets/index.jpg Binary files differnew file mode 100644 index 00000000..2470b35c --- /dev/null +++ b/site/content/pages/datasets/pubfig/assets/index.jpg diff --git a/site/content/pages/datasets/uccs/assets/background.jpg b/site/content/pages/datasets/uccs/assets/background.jpg Binary files differnew file mode 100644 index 00000000..db6b0180 --- /dev/null +++ b/site/content/pages/datasets/uccs/assets/background.jpg diff --git a/site/content/pages/datasets/uccs/assets/index.jpg b/site/content/pages/datasets/uccs/assets/index.jpg Binary files differnew file mode 100644 index 00000000..9809e564 --- /dev/null +++ b/site/content/pages/datasets/uccs/assets/index.jpg diff --git a/site/content/pages/datasets/uccs/index.md b/site/content/pages/datasets/uccs/index.md index d40dce22..be1d2474 100644 --- a/site/content/pages/datasets/uccs/index.md +++ b/site/content/pages/datasets/uccs/index.md @@ -68,7 +68,7 @@ The more recent UCCS version of the dataset received funding from [^funding_uccs - You are welcomed to use these images for academic and journalistic use including for research papers, news stories, presentations. - Please use the following citation: -```MegaPixels.cc Adam Harvey 2013-2109.``` +```MegaPixels.cc Adam Harvey 2013-2019.``` [^funding_sb]: Sapkota, Archana and Boult, Terrance. "Large Scale Unconstrained Open Set Face Database." 2013. [^funding_uccs]: Günther, M. et. al. "Unconstrained Face Detection and Open-Set Face Recognition Challenge," 2018. Arxiv 1708.02337v3.
\ No newline at end of file diff --git a/site/content/pages/datasets/ucf_selfie/assets/background.jpg b/site/content/pages/datasets/ucf_selfie/assets/background.jpg Binary files differnew file mode 100644 index 00000000..877133e5 --- /dev/null +++ b/site/content/pages/datasets/ucf_selfie/assets/background.jpg diff --git a/site/content/pages/datasets/ucf_selfie/assets/background_02.jpg b/site/content/pages/datasets/ucf_selfie/assets/background_02.jpg Binary files differnew file mode 100644 index 00000000..1e092f61 --- /dev/null +++ b/site/content/pages/datasets/ucf_selfie/assets/background_02.jpg diff --git a/site/content/pages/datasets/ucf_selfie/assets/index.jpg b/site/content/pages/datasets/ucf_selfie/assets/index.jpg Binary files differnew file mode 100644 index 00000000..4837f6e3 --- /dev/null +++ b/site/content/pages/datasets/ucf_selfie/assets/index.jpg diff --git a/site/content/pages/datasets/ucf_selfie/assets/index_02.jpg b/site/content/pages/datasets/ucf_selfie/assets/index_02.jpg Binary files differnew file mode 100644 index 00000000..524340d7 --- /dev/null +++ b/site/content/pages/datasets/ucf_selfie/assets/index_02.jpg diff --git a/site/content/pages/datasets/viper/assets/background.jpg b/site/content/pages/datasets/viper/assets/background.jpg Binary files differnew file mode 100644 index 00000000..db0b2857 --- /dev/null +++ b/site/content/pages/datasets/viper/assets/background.jpg diff --git a/site/content/pages/datasets/ytmu/assets/index.jpg b/site/content/pages/datasets/ytmu/assets/index.jpg Binary files differnew file mode 100644 index 00000000..76716847 --- /dev/null +++ b/site/content/pages/datasets/ytmu/assets/index.jpg diff --git a/site/content/pages/datasets/ytmu/assets/index_02.jpg b/site/content/pages/datasets/ytmu/assets/index_02.jpg Binary files differnew file mode 100644 index 00000000..30c863f6 --- /dev/null +++ b/site/content/pages/datasets/ytmu/assets/index_02.jpg diff --git a/site/content/pages/datasets/ytmu/assets/index_03.jpg b/site/content/pages/datasets/ytmu/assets/index_03.jpg Binary files differnew file mode 100644 index 00000000..20ccae90 --- /dev/null +++ b/site/content/pages/datasets/ytmu/assets/index_03.jpg diff --git a/site/content/pages/datasets/ytmu/assets/index_04.jpg b/site/content/pages/datasets/ytmu/assets/index_04.jpg Binary files differnew file mode 100644 index 00000000..3d67baac --- /dev/null +++ b/site/content/pages/datasets/ytmu/assets/index_04.jpg diff --git a/site/content/pages/index.md b/site/content/pages/index.md index d63cf9fa..1cf47aac 100644 --- a/site/content/pages/index.md +++ b/site/content/pages/index.md @@ -1,30 +1,14 @@ ------------ status: published -title: MegaPixels -desc: -slug: home +title: Megapixels +desc: The Darkside of Datasets +slug: analysis published: 2018-12-15 updated: 2018-12-15 authors: Adam Harvey sync: false - ------------- - -## Facial Recognition Datasets - -Regular Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. -### Summary - -+ Found: 275 datasets -+ Created between: 1993-2018 -+ Smallest dataset: 20 images -+ Largest dataset: 10,000,000 images - -+ Highest resolution faces: 450x500 (Unconstrained College Students) -+ Lowest resolution faces: 16x20 pixels (QMUL SurvFace) +------------ -``` -load_file https://megapixels.nyc3.digitaloceanspaces.com/v1/citations/datasets.csv -``` +## diff --git a/site/content/pages/info/index.md b/site/content/pages/info/index.md index 4a65e71a..090783d9 100644 --- a/site/content/pages/info/index.md +++ b/site/content/pages/info/index.md @@ -11,7 +11,7 @@ sync: false ------------ -## What do facial recognition algorithms see? +## Face Analysis ``` face_analysis diff --git a/site/content/pages/research/00_introduction/index.md b/site/content/pages/research/00_introduction/index.md index d3ef506b..6fec7ab5 100644 --- a/site/content/pages/research/00_introduction/index.md +++ b/site/content/pages/research/00_introduction/index.md @@ -15,6 +15,19 @@ authors: Megapixels + Posted: Dec. 15 + Author: Adam Harvey + +Ever since the first computational facial recognition research project by the CIA in the early 1960s, data has always played a vital role in the development of our biometric future. Without facial recognition datasets there would be no facial recognition. Datasets are an indispensable part of any artificial intelligence system because, as Geoffrey Hinton points out: +> Our relationship to computers has changed. Instead of programming them, we now show them and they figure it out. - [Geoffrey Hinton](https://www.youtube.com/watch?v=-eyhCTvrEtE) + +Algorithms learn from datasets. And we program algorithms by building datasets. But datasets aren't like code. There's no programming language made of data except for the data itself. + +----- + +Ignore content below these lines + +----- +
+ It was the early 2000s. Face recognition was new and no one seemed sure exactly how well it was going to perform in practice. In theory, face recognition was poised to be a game changer, a force multiplier, a strategic military advantage, a way to make cities safer and to secure borders. This was the future John Ashcroft demanded with the Total Information Awareness act of the 2003 and that spooks had dreamed of for decades. It was a future that academics at Carnegie Mellon Universtiy and Colorado State University would help build. It was also a future that celebrities would play a significant role in building. And to the surprise of ordinary Internet users like myself and perhaps you, it was a future that millions of Internet users would unwittingly play role in creating. Now the future has arrived and it doesn't make sense. Facial recognition works yet it doesn't actually work. Facial recognition is cheap and accessible but also expensive and out of control. Facial recognition research has achieved headline grabbing superhuman accuracies over 99.9% yet facial recognition is also dangerously inaccurate. During a trial installation at Sudkreuz station in Berlin in 2018, 20% of the matches were wrong, a number so low that it should not have any connection to law enforcement or justice. And in London, the Metropolitan police had been using facial recognition software that mistakenly identified an alarming 98% of people as criminals [^met_police], which perhaps is a crime itself. @@ -33,16 +46,6 @@ As McLuhan wrote, "You can't have a static, fixed position in the electric age". Like many projects, MegaPixels had spent years meandering between formats, unfeasible budgets, and was generally too niche of a subject. The basic idea for this project, as proposed to the original [Glass Room](https://tacticaltech.org/projects/the-glass-room-nyc/) installation in 2016 in NYC, was to build an interactive mirror that showed people if they had been included in the [LFW](/datasets/lfw) facial recognition dataset. The idea was based on my reaction to all the datasets I'd come across during research for the CV Dazzle project. I'd noticed strange datasets created for training and testing face detection algorithms. Most were created in labratory settings and their interpretation of face data was very strict. -About the name - -About the funding - -About me - -About the team - -Conclusion - ### for other post diff --git a/site/content/pages/research/01_from_1_to_100_pixels/index.md b/site/content/pages/research/01_from_1_to_100_pixels/index.md index 3a46bccb..409dcf02 100644 --- a/site/content/pages/research/01_from_1_to_100_pixels/index.md +++ b/site/content/pages/research/01_from_1_to_100_pixels/index.md @@ -52,4 +52,6 @@ Ideas: - NIST report on sres states several resolutions - "Results show that the tested face recognition systems yielded similar performance for query sets with eye-to-eye distance from 60 pixels to 30 pixels" [^nist_sres] -[^nist_sres]: NIST 906932. Performance Assessment of Face Recognition Using Super-Resolution. Shuowen Hu, Robert Maschal, S. Susan Young, Tsai Hong Hong, Jonathon P. Phillips
\ No newline at end of file +[^nist_sres]: NIST 906932. Performance Assessment of Face Recognition Using Super-Resolution. Shuowen Hu, Robert Maschal, S. Susan Young, Tsai Hong Hong, Jonathon P. Phillips + +- "Note that we only keep the images with a minimal side length of 80 pixels." and "a face will be labeled as “Ignore” if it is very difficult to be detected due to blurring, severe deformation and unrecognizable eyes, or the side length of its bounding box is less than 32 pixels." Ge_Detecting_Masked_Faces_CVPR_2017_paper.pdf
\ No newline at end of file diff --git a/site/datasets/final/ijb_c_sample.csv b/site/datasets/final/ijb_c_sample.csv new file mode 100644 index 00000000..15bfccab --- /dev/null +++ b/site/datasets/final/ijb_c_sample.csv @@ -0,0 +1,141 @@ +index,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,IJB-A,ijb_c,0.0,0.0,,,140c95e53c619eac594d70f6369f518adfea12ef,main,http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/1B_089_ext.pdf,Pushing the frontiers of unconstrained face detection and recognition: IARPA Janus Benchmark A,2015 +1,IJB-A,ijb_c,39.2899685,-76.62196103,University of Maryland,edu,872dfdeccf99bbbed7c8f1ea08afb2d713ebe085,citation,https://arxiv.org/pdf/1703.09507.pdf,L2-constrained Softmax Loss for Discriminative Face Verification,2017 +2,IJB-A,ijb_c,38.8920756,-104.79716389,"University of Colorado, Colorado Springs",edu,146a7ecc7e34b85276dd0275c337eff6ba6ef8c0,citation,https://arxiv.org/pdf/1611.06158v1.pdf,AFFACT: Alignment-free facial attribute classification technique,2017 +3,IJB-A,ijb_c,51.7534538,-1.25400997,University of Oxford,edu,313d5eba97fe064bdc1f00b7587a4b3543ef712a,citation,https://pdfs.semanticscholar.org/cb7f/93467b0ec1afd43d995e511f5d7bf052a5af.pdf,Compact Deep Aggregation for Set Retrieval,2018 +4,IJB-A,ijb_c,39.2899685,-76.62196103,University of Maryland,edu,5865b6d83ba6dbbf9167f1481e9339c2ef1d1f6b,citation,https://doi.org/10.1109/ICPR.2016.7900278,Regularized metric adaptation for unconstrained face verification,2016 +5,IJB-A,ijb_c,37.4102193,-122.05965487,Carnegie Mellon University,edu,48a9241edda07252c1aadca09875fabcfee32871,citation,https://arxiv.org/pdf/1611.08657v5.pdf,Convolutional Experts Constrained Local Model for Facial Landmark Detection,2017 +6,IJB-A,ijb_c,42.718568,-84.47791571,Michigan State University,edu,86204fc037936754813b91898377e8831396551a,citation,https://arxiv.org/pdf/1709.01442.pdf,Dense Face Alignment,2017 +7,IJB-A,ijb_c,22.57423855,88.4337303,"Institute of Engineering and Management, Kolkata, India",edu,b2cb335ded99b10f37002d09753bd5a6ea522ef1,citation,https://doi.org/10.1109/ISBA.2017.7947679,Analysis of adaptability of deep features for verifying blurred and cross-resolution images,2017 +8,IJB-A,ijb_c,39.2899685,-76.62196103,University of Maryland,edu,b2cb335ded99b10f37002d09753bd5a6ea522ef1,citation,https://doi.org/10.1109/ISBA.2017.7947679,Analysis of adaptability of deep features for verifying blurred and cross-resolution images,2017 +9,IJB-A,ijb_c,45.7835966,4.7678948,École Centrale de Lyon,edu,486840f4f524e97f692a7f6b42cd19019ee71533,citation,https://arxiv.org/pdf/1703.08388v2.pdf,DeepVisage: Making Face Recognition Simple Yet With Powerful Generalization Skills,2017 +10,IJB-A,ijb_c,48.832493,2.267474,Safran Identity and Security,company,486840f4f524e97f692a7f6b42cd19019ee71533,citation,https://arxiv.org/pdf/1703.08388v2.pdf,DeepVisage: Making Face Recognition Simple Yet With Powerful Generalization Skills,2017 +11,IJB-A,ijb_c,39.2899685,-76.62196103,University of Maryland,edu,2d748f8ee023a5b1fbd50294d176981ded4ad4ee,citation,http://pdfs.semanticscholar.org/2d74/8f8ee023a5b1fbd50294d176981ded4ad4ee.pdf,Triplet Similarity Embedding for Face Verification,2016 +12,IJB-A,ijb_c,38.99203005,-76.9461029,University of Maryland College Park,edu,f7824758800a7b1a386db5bd35f84c81454d017a,citation,https://arxiv.org/pdf/1702.05085.pdf,KEPLER: Keypoint and Pose Estimation of Unconstrained Faces by Learning Efficient H-CNN Regressors,2017 +13,IJB-A,ijb_c,42.718568,-84.47791571,Michigan State University,edu,02467703b6e087799e04e321bea3a4c354c5487d,citation,http://doi.ieeecomputersociety.org/10.1109/CVPRW.2016.27,Grouper: Optimizing Crowdsourced Face Annotations,2016 +14,IJB-A,ijb_c,39.329053,-76.619425,Johns Hopkins University,edu,377f2b65e6a9300448bdccf678cde59449ecd337,citation,https://arxiv.org/pdf/1804.10275.pdf,Pushing the Limits of Unconstrained Face Detection: a Challenge Dataset and Baseline Results,2018 +15,IJB-A,ijb_c,40.47913175,-74.43168868,Rutgers University,edu,377f2b65e6a9300448bdccf678cde59449ecd337,citation,https://arxiv.org/pdf/1804.10275.pdf,Pushing the Limits of Unconstrained Face Detection: a Challenge Dataset and Baseline Results,2018 +16,IJB-A,ijb_c,42.718568,-84.47791571,Michigan State University,edu,cd55fb30737625e86454a2861302b96833ed549d,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7139094,Annotating Unconstrained Face Imagery: A scalable approach,2015 +17,IJB-A,ijb_c,38.95187,-77.363259,"Noblis, Falls Church, VA, U.S.A.",company,cd55fb30737625e86454a2861302b96833ed549d,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7139094,Annotating Unconstrained Face Imagery: A scalable approach,2015 +18,IJB-A,ijb_c,46.0501558,14.46907327,University of Ljubljana,edu,5226296884b3e151ce317a37f94827dbda0b9d16,citation,https://doi.org/10.1109/IWBF.2016.7449690,Deep pair-wise similarity learning for face recognition,2016 +19,IJB-A,ijb_c,39.9601488,116.35193921,Beijing University of Posts and Telecommunications,edu,80be8624771104ff4838dcba9629bacfe6b3ea09,citation,http://www.ifp.illinois.edu/~moulin/Papers/ECCV14-jiwen.pdf,Simultaneous Feature and Dictionary Learning for Image Set Based Face Recognition,2014 +20,IJB-A,ijb_c,1.3484104,103.68297965,Nanyang Technological University,edu,80be8624771104ff4838dcba9629bacfe6b3ea09,citation,http://www.ifp.illinois.edu/~moulin/Papers/ECCV14-jiwen.pdf,Simultaneous Feature and Dictionary Learning for Image Set Based Face Recognition,2014 +21,IJB-A,ijb_c,40.11116745,-88.22587665,"University of Illinois, Urbana-Champaign",edu,80be8624771104ff4838dcba9629bacfe6b3ea09,citation,http://www.ifp.illinois.edu/~moulin/Papers/ECCV14-jiwen.pdf,Simultaneous Feature and Dictionary Learning for Image Set Based Face Recognition,2014 +22,IJB-A,ijb_c,22.304572,114.17976285,Hong Kong Polytechnic University,edu,50b58becaf67e92a6d9633e0eea7d352157377c3,citation,https://pdfs.semanticscholar.org/50b5/8becaf67e92a6d9633e0eea7d352157377c3.pdf,Dependency-Aware Attention Control for Unconstrained Face Recognition with Image Sets,2018 +23,IJB-A,ijb_c,39.9601488,116.35193921,Beijing University of Posts and Telecommunications,edu,cd6aaa37fffd0b5c2320f386be322b8adaa1cc68,citation,https://arxiv.org/pdf/1804.06655.pdf,Deep Face Recognition: A Survey,2018 +24,IJB-A,ijb_c,39.9082804,116.2458527,University of Chinese Academy of Sciences,edu,ac2881bdf7b57dc1672a17b221d68a438d79fce8,citation,https://arxiv.org/pdf/1806.08472.pdf,Learning a High Fidelity Pose Invariant Model for High-resolution Face Frontalization,2018 +25,IJB-A,ijb_c,40.0044795,116.370238,Chinese Academy of Sciences,edu,72a7eb68f0955564e1ceafa75aeeb6b5bbb14e7e,citation,https://pdfs.semanticscholar.org/72a7/eb68f0955564e1ceafa75aeeb6b5bbb14e7e.pdf,Face Recognition with Contrastive Convolution,2018 +26,IJB-A,ijb_c,39.9082804,116.2458527,University of Chinese Academy of Sciences,edu,72a7eb68f0955564e1ceafa75aeeb6b5bbb14e7e,citation,https://pdfs.semanticscholar.org/72a7/eb68f0955564e1ceafa75aeeb6b5bbb14e7e.pdf,Face Recognition with Contrastive Convolution,2018 +27,IJB-A,ijb_c,42.3889785,-72.5286987,University of Massachusetts,edu,368e99f669ea5fd395b3193cd75b301a76150f9d,citation,https://arxiv.org/pdf/1506.01342.pdf,One-to-many face recognition with bilinear CNNs,2016 +28,IJB-A,ijb_c,32.77824165,34.99565673,Open University of Israel,edu,1e6ed6ca8209340573a5e907a6e2e546a3bf2d28,citation,http://arxiv.org/pdf/1607.01450v1.pdf,Pooling Faces: Template Based Face Recognition with Pooled Face Images,2016 +29,IJB-A,ijb_c,38.88140235,121.52281098,Dalian University of Technology,edu,052f994898c79529955917f3dfc5181586282cf8,citation,https://arxiv.org/pdf/1708.02191.pdf,Unsupervised Domain Adaptation for Face Recognition in Unlabeled Videos,2017 +30,IJB-A,ijb_c,32.9820799,-96.7566278,University of Texas at Dallas,edu,4e8168fbaa615009d1618a9d6552bfad809309e9,citation,http://pdfs.semanticscholar.org/4e81/68fbaa615009d1618a9d6552bfad809309e9.pdf,Deep Convolutional Neural Network Features and the Original Image,2016 +31,IJB-A,ijb_c,39.2899685,-76.62196103,University of Maryland,edu,4e8168fbaa615009d1618a9d6552bfad809309e9,citation,http://pdfs.semanticscholar.org/4e81/68fbaa615009d1618a9d6552bfad809309e9.pdf,Deep Convolutional Neural Network Features and the Original Image,2016 +32,IJB-A,ijb_c,29.7207902,-95.34406271,University of Houston,edu,3cb2841302af1fb9656f144abc79d4f3d0b27380,citation,https://pdfs.semanticscholar.org/3cb2/841302af1fb9656f144abc79d4f3d0b27380.pdf,When 3 D-Aided 2 D Face Recognition Meets Deep Learning : An extended UR 2 D for Pose-Invariant Face Recognition,2017 +33,IJB-A,ijb_c,24.4469025,54.3942563,Khalifa University,edu,0c1d85a197a1f5b7376652a485523e616a406273,citation,http://doi.ieeecomputersociety.org/10.1109/CVPR.2017.169,Joint Registration and Representation Learning for Unconstrained Face Identification,2017 +34,IJB-A,ijb_c,-35.23656905,149.08446994,University of Canberra,edu,0c1d85a197a1f5b7376652a485523e616a406273,citation,http://doi.ieeecomputersociety.org/10.1109/CVPR.2017.169,Joint Registration and Representation Learning for Unconstrained Face Identification,2017 +35,IJB-A,ijb_c,32.77824165,34.99565673,Open University of Israel,edu,c75e6ce54caf17b2780b4b53f8d29086b391e839,citation,https://arxiv.org/pdf/1802.00542.pdf,"ExpNet: Landmark-Free, Deep, 3D Facial Expressions",2018 +36,IJB-A,ijb_c,42.718568,-84.47791571,Michigan State University,edu,450c6a57f19f5aa45626bb08d7d5d6acdb863b4b,citation,https://arxiv.org/pdf/1805.00611.pdf,Towards Interpretable Face Recognition,2018 +37,IJB-A,ijb_c,51.7534538,-1.25400997,University of Oxford,edu,30180f66d5b4b7c0367e4b43e2b55367b72d6d2a,citation,http://www.robots.ox.ac.uk/~vgg/publications/2017/Crosswhite17/crosswhite17.pdf,Template Adaptation for Face Verification and Identification,2017 +38,IJB-A,ijb_c,29.7207902,-95.34406271,University of Houston,edu,8334da483f1986aea87b62028672836cb3dc6205,citation,https://arxiv.org/pdf/1805.06306.pdf,Fully Associative Patch-Based 1-to-N Matcher for Face Recognition,2018 +39,IJB-A,ijb_c,-33.8809651,151.20107299,University of Technology Sydney,edu,3b64efa817fd609d525c7244a0e00f98feacc8b4,citation,http://doi.acm.org/10.1145/2845089,A Comprehensive Survey on Pose-Invariant Face Recognition,2016 +40,IJB-A,ijb_c,40.9153196,-73.1270626,Stony Brook University,edu,6fbb179a4ad39790f4558dd32316b9f2818cd106,citation,http://pdfs.semanticscholar.org/6fbb/179a4ad39790f4558dd32316b9f2818cd106.pdf,Input Aggregated Network for Face Video Representation,2016 +41,IJB-A,ijb_c,38.8920756,-104.79716389,"University of Colorado, Colorado Springs",edu,d4f1eb008eb80595bcfdac368e23ae9754e1e745,citation,https://arxiv.org/pdf/1708.02337.pdf,Unconstrained Face Detection and Open-Set Face Recognition Challenge,2017 +42,IJB-A,ijb_c,33.5866784,-101.87539204,Electrical and Computer Engineering,edu,ebb3d5c70bedf2287f9b26ac0031004f8f617b97,citation,https://doi.org/10.1109/MSP.2017.2764116,"Deep Learning for Understanding Faces: Machines May Be Just as Good, or Better, than Humans",2018 +43,IJB-A,ijb_c,39.2899685,-76.62196103,University of Maryland,edu,ebb3d5c70bedf2287f9b26ac0031004f8f617b97,citation,https://doi.org/10.1109/MSP.2017.2764116,"Deep Learning for Understanding Faces: Machines May Be Just as Good, or Better, than Humans",2018 +44,IJB-A,ijb_c,34.0224149,-118.28634407,University of Southern California,edu,d28d32af7ef9889ef9cb877345a90ea85e70f7f1,citation,http://doi.ieeecomputersociety.org/10.1109/FG.2017.84,Local-Global Landmark Confidences for Face Recognition,2017 +45,IJB-A,ijb_c,37.4102193,-122.05965487,Carnegie Mellon University,edu,d28d32af7ef9889ef9cb877345a90ea85e70f7f1,citation,http://doi.ieeecomputersociety.org/10.1109/FG.2017.84,Local-Global Landmark Confidences for Face Recognition,2017 +46,IJB-A,ijb_c,51.5247272,-0.03931035,Queen Mary University of London,edu,a29566375836f37173ccaffa47dea25eb1240187,citation,https://arxiv.org/pdf/1809.09409.pdf,Vehicle Re-Identification in Context,2018 +47,IJB-A,ijb_c,34.0224149,-118.28634407,University of Southern California,edu,29f298dd5f806c99951cb434834bc8dcc765df18,citation,https://doi.org/10.1109/ICPR.2016.7899837,Computationally efficient template-based face recognition,2016 +48,IJB-A,ijb_c,51.49887085,-0.17560797,Imperial College London,edu,54bb25a213944b08298e4e2de54f2ddea890954a,citation,http://openaccess.thecvf.com/content_cvpr_2017_workshops/w33/papers/Moschoglou_AgeDB_The_First_CVPR_2017_paper.pdf,"AgeDB: The First Manually Collected, In-the-Wild Age Database",2017 +49,IJB-A,ijb_c,51.59029705,-0.22963221,Middlesex University,edu,54bb25a213944b08298e4e2de54f2ddea890954a,citation,http://openaccess.thecvf.com/content_cvpr_2017_workshops/w33/papers/Moschoglou_AgeDB_The_First_CVPR_2017_paper.pdf,"AgeDB: The First Manually Collected, In-the-Wild Age Database",2017 +50,IJB-A,ijb_c,50.8142701,8.771435,Philipps-Universität Marburg,edu,5981c309bd0ffd849c51b1d8a2ccc481a8ec2f5c,citation,https://doi.org/10.1109/ICT.2017.7998256,SmartFace: Efficient face detection on smartphones for wireless on-demand emergency networks,2017 +51,IJB-A,ijb_c,42.718568,-84.47791571,Michigan State University,edu,a2b4a6c6b32900a066d0257ae6d4526db872afe2,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8272466,Learning Face Image Quality From Human Assessments,2018 +52,IJB-A,ijb_c,39.9601488,116.35193921,Beijing University of Posts and Telecommunications,edu,3dfb822e16328e0f98a47209d7ecd242e4211f82,citation,https://arxiv.org/pdf/1708.08197.pdf,Cross-Age LFW: A Database for Studying Cross-Age Face Recognition in Unconstrained Environments,2017 +53,IJB-A,ijb_c,47.6423318,-122.1369302,Microsoft,company,291265db88023e92bb8c8e6390438e5da148e8f5,citation,http://pdfs.semanticscholar.org/4603/cb8e05258bb0572ae912ad20903b8f99f4b1.pdf,MS-Celeb-1M: A Dataset and Benchmark for Large-Scale Face Recognition,2016 +54,IJB-A,ijb_c,42.718568,-84.47791571,Michigan State University,edu,d29eec5e047560627c16803029d2eb8a4e61da75,citation,http://pdfs.semanticscholar.org/d29e/ec5e047560627c16803029d2eb8a4e61da75.pdf,Feature Transfer Learning for Deep Face Recognition with Long-Tail Data,2018 +55,IJB-A,ijb_c,36.20304395,117.05842113,Tianjin University,edu,5180df9d5eb26283fb737f491623395304d57497,citation,https://arxiv.org/pdf/1804.10899.pdf,Scalable Angular Discriminative Deep Metric Learning for Face Recognition,2018 +56,IJB-A,ijb_c,22.42031295,114.20788644,Chinese University of Hong Kong,edu,abdd17e411a7bfe043f280abd4e560a04ab6e992,citation,https://arxiv.org/pdf/1803.00839.pdf,Pose-Robust Face Recognition via Deep Residual Equivariant Mapping,2018 +57,IJB-A,ijb_c,28.5456282,77.2731505,"IIIT Delhi, India",edu,3cf1f89d73ca4b25399c237ed3e664a55cd273a2,citation,https://arxiv.org/pdf/1710.02914.pdf,Face Sketch Matching via Coupled Deep Transform Learning,2017 +58,IJB-A,ijb_c,-27.49741805,153.01316956,University of Queensland,edu,f27fd2a1bc229c773238f1912db94991b8bf389a,citation,https://doi.org/10.1109/IVCNZ.2016.7804414,How do you develop a face detector for the unconstrained environment?,2016 +59,IJB-A,ijb_c,39.86742125,32.73519072,Hacettepe University,edu,9865fe20df8fe11717d92b5ea63469f59cf1635a,citation,https://arxiv.org/pdf/1805.07566.pdf,Wildest Faces: Face Detection and Recognition in Violent Settings,2018 +60,IJB-A,ijb_c,39.87549675,32.78553506,Middle East Technical University,edu,9865fe20df8fe11717d92b5ea63469f59cf1635a,citation,https://arxiv.org/pdf/1805.07566.pdf,Wildest Faces: Face Detection and Recognition in Violent Settings,2018 +61,IJB-A,ijb_c,28.2290209,112.99483204,"National University of Defense Technology, China",edu,c1cc2a2a1ab66f6c9c6fabe28be45d1440a57c3d,citation,https://pdfs.semanticscholar.org/aae7/a5182e59f44b7bb49f61999181ce011f800b.pdf,Dual-Agent GANs for Photorealistic and Identity Preserving Profile Face Synthesis,2017 +62,IJB-A,ijb_c,1.2962018,103.77689944,National University of Singapore,edu,c1cc2a2a1ab66f6c9c6fabe28be45d1440a57c3d,citation,https://pdfs.semanticscholar.org/aae7/a5182e59f44b7bb49f61999181ce011f800b.pdf,Dual-Agent GANs for Photorealistic and Identity Preserving Profile Face Synthesis,2017 +63,IJB-A,ijb_c,17.4454957,78.34854698,International Institute of Information Technology,edu,f5eb411217f729ad7ae84bfd4aeb3dedb850206a,citation,https://pdfs.semanticscholar.org/f5eb/411217f729ad7ae84bfd4aeb3dedb850206a.pdf,Tackling Low Resolution for Better Scene Understanding,2018 +64,IJB-A,ijb_c,40.51865195,-74.44099801,State University of New Jersey,edu,96e731e82b817c95d4ce48b9e6b08d2394937cf8,citation,http://arxiv.org/pdf/1508.01722v2.pdf,Unconstrained face verification using deep CNN features,2016 +65,IJB-A,ijb_c,39.2899685,-76.62196103,University of Maryland,edu,96e731e82b817c95d4ce48b9e6b08d2394937cf8,citation,http://arxiv.org/pdf/1508.01722v2.pdf,Unconstrained face verification using deep CNN features,2016 +66,IJB-A,ijb_c,32.77824165,34.99565673,Open University of Israel,edu,870433ba89d8cab1656e57ac78f1c26f4998edfb,citation,http://doi.ieeecomputersociety.org/10.1109/CVPR.2017.163,Regressing Robust and Discriminative 3D Morphable Models with a Very Deep Neural Network,2017 +67,IJB-A,ijb_c,55.6801502,12.572327,University of Copenhagen,edu,3dfd94d3fad7e17f52a8ae815eb9cc5471172bc0,citation,http://pdfs.semanticscholar.org/3dfd/94d3fad7e17f52a8ae815eb9cc5471172bc0.pdf,Face2Text: Collecting an Annotated Image Description Corpus for the Generation of Rich Face Descriptions,2018 +68,IJB-A,ijb_c,35.9023226,14.4834189,University of Malta,edu,3dfd94d3fad7e17f52a8ae815eb9cc5471172bc0,citation,http://pdfs.semanticscholar.org/3dfd/94d3fad7e17f52a8ae815eb9cc5471172bc0.pdf,Face2Text: Collecting an Annotated Image Description Corpus for the Generation of Rich Face Descriptions,2018 +69,IJB-A,ijb_c,34.0224149,-118.28634407,University of Southern California,edu,6341274aca0c2977c3e1575378f4f2126aa9b050,citation,http://arxiv.org/pdf/1609.03536v1.pdf,A multi-scale cascade fully convolutional network face detector,2016 +70,IJB-A,ijb_c,41.70456775,-86.23822026,University of Notre Dame,edu,17479e015a2dcf15d40190e06419a135b66da4e0,citation,https://arxiv.org/pdf/1610.08119.pdf,Predicting First Impressions With Deep Learning,2017 +71,IJB-A,ijb_c,37.4102193,-122.05965487,Carnegie Mellon University,edu,a0b1990dd2b4cd87e4fd60912cc1552c34792770,citation,https://pdfs.semanticscholar.org/a0b1/990dd2b4cd87e4fd60912cc1552c34792770.pdf,Deep Constrained Local Models for Facial Landmark Detection,2016 +72,IJB-A,ijb_c,30.642769,104.06751175,"Sichuan University, Chengdu",edu,772474b5b0c90629f4d9c223fd9c1ef45e1b1e66,citation,https://doi.org/10.1109/BTAS.2017.8272716,Multi-dim: A multi-dimensional face database towards the application of 3D technology in real-world scenarios,2017 +73,IJB-A,ijb_c,38.8920756,-104.79716389,"University of Colorado, Colorado Springs",edu,4b3f425274b0c2297d136f8833a31866db2f2aec,citation,http://doi.ieeecomputersociety.org/10.1109/CVPRW.2017.85,Toward Open-Set Face Recognition,2017 +74,IJB-A,ijb_c,56.46255985,84.95565495,Tomsk Polytechnic University,edu,17ded725602b4329b1c494bfa41527482bf83a6f,citation,http://pdfs.semanticscholar.org/cb10/434a5d68ffbe9ed0498771192564ecae8894.pdf,Compact Convolutional Neural Network Cascade for Face Detection,2015 +75,IJB-A,ijb_c,37.3351908,-121.88126008,San Jose State University,edu,14b016c7a87d142f4b9a0e6dc470dcfc073af517,citation,http://ws680.nist.gov/publication/get_pdf.cfm?pub_id=918912,Modest proposals for improving biometric recognition papers,2015 +76,IJB-A,ijb_c,39.2899685,-76.62196103,University of Maryland,edu,93420d9212dd15b3ef37f566e4d57e76bb2fab2f,citation,https://arxiv.org/pdf/1611.00851.pdf,An All-In-One Convolutional Neural Network for Face Analysis,2017 +77,IJB-A,ijb_c,39.2899685,-76.62196103,University of Maryland,edu,def2983576001bac7d6461d78451159800938112,citation,https://arxiv.org/pdf/1705.07426.pdf,The Do’s and Don’ts for CNN-Based Face Verification,2017 +78,IJB-A,ijb_c,42.718568,-84.47791571,Michigan State University,edu,4b605e6a9362485bfe69950432fa1f896e7d19bf,citation,http://biometrics.cse.msu.edu/Publications/Face/BlantonAllenMillerKalkaJain_CVPRWB2016_HID.pdf,A Comparison of Human and Automated Face Verification Accuracy on Unconstrained Image Sets,2016 +79,IJB-A,ijb_c,39.2899685,-76.62196103,University of Maryland,edu,8d3e95c31c93548b8c71dbeee2e9f7180067a888,citation,https://doi.org/10.1109/ICPR.2016.7899841,Template regularized sparse coding for face verification,2016 +80,IJB-A,ijb_c,42.8271556,-73.8780481,GE Global Research,company,8d3e95c31c93548b8c71dbeee2e9f7180067a888,citation,https://doi.org/10.1109/ICPR.2016.7899841,Template regularized sparse coding for face verification,2016 +81,IJB-A,ijb_c,25.0410728,121.6147562,Institute of Information Science,edu,337dd4aaca2c5f9b5d2de8e0e2401b5a8feb9958,citation,https://arxiv.org/pdf/1810.11160.pdf,Data-specific Adaptive Threshold for Face Recognition and Authentication,2018 +82,IJB-A,ijb_c,22.59805605,113.98533784,Shenzhen Institutes of Advanced Technology,edu,0aeb5020003e0c89219031b51bd30ff1bceea363,citation,http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.525,Sparsifying Neural Network Connections for Face Recognition,2016 +83,IJB-A,ijb_c,22.42031295,114.20788644,Chinese University of Hong Kong,edu,0aeb5020003e0c89219031b51bd30ff1bceea363,citation,http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.525,Sparsifying Neural Network Connections for Face Recognition,2016 +84,IJB-A,ijb_c,42.718568,-84.47791571,Michigan State University,edu,99daa2839213f904e279aec7cef26c1dfb768c43,citation,https://arxiv.org/pdf/1805.02283.pdf,DocFace: Matching ID Document Photos to Selfies,2018 +85,IJB-A,ijb_c,43.7776426,11.259765,University of Florence,edu,71ca8b6e84c17b3e68f980bfb8cddc837100f8bf,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7899774,Effective 3D based frontalization for unconstrained face recognition,2016 +86,IJB-A,ijb_c,51.49887085,-0.17560797,Imperial College London,edu,c43ed9b34cad1a3976bac7979808eb038d88af84,citation,https://arxiv.org/pdf/1804.03675.pdf,Semi-supervised Adversarial Learning to Generate Photorealistic Face Images of New Identities from 3D Morphable Model,2018 +87,IJB-A,ijb_c,51.24303255,-0.59001382,University of Surrey,edu,c43ed9b34cad1a3976bac7979808eb038d88af84,citation,https://arxiv.org/pdf/1804.03675.pdf,Semi-supervised Adversarial Learning to Generate Photorealistic Face Images of New Identities from 3D Morphable Model,2018 +88,IJB-A,ijb_c,37.3936717,-122.0807262,Facebook,company,628a3f027b7646f398c68a680add48c7969ab1d9,citation,https://pdfs.semanticscholar.org/628a/3f027b7646f398c68a680add48c7969ab1d9.pdf,Plan for Final Year Project : HKU-Face : A Large Scale Dataset for Deep Face Recognition,2017 +89,IJB-A,ijb_c,40.2773077,-7.5095801,University of Beira Interior,edu,61262450d4d814865a4f9a84299c24daa493f66e,citation,http://doi.org/10.1007/s10462-016-9474-x,Biometric recognition in surveillance scenarios: a survey,2016 +90,IJB-A,ijb_c,-31.95040445,115.79790037,University of Western Australia,edu,626913b8fcbbaee8932997d6c4a78fe1ce646127,citation,https://arxiv.org/pdf/1711.05942.pdf,Learning from Millions of 3D Scans for Large-scale 3D Face Recognition,2017 +91,IJB-A,ijb_c,35.9023226,14.4834189,University of Malta,edu,4efd58102ff46b7435c9ec6d4fc3dd21d93b15b4,citation,https://doi.org/10.1109/TIFS.2017.2788002,"Matching Software-Generated Sketches to Face Photographs With a Very Deep CNN, Morphed Faces, and Transfer Learning",2018 +92,IJB-A,ijb_c,39.2899685,-76.62196103,University of Maryland,edu,b6f758be954d34817d4ebaa22b30c63a4b8ddb35,citation,http://arxiv.org/abs/1703.04835,A Proximity-Aware Hierarchical Clustering of Faces,2017 +93,IJB-A,ijb_c,32.77824165,34.99565673,Open University of Israel,edu,0a34fe39e9938ae8c813a81ae6d2d3a325600e5c,citation,https://arxiv.org/pdf/1708.07517.pdf,FacePoseNet: Making a Case for Landmark-Free Face Alignment,2017 +94,IJB-A,ijb_c,40.2773077,-7.5095801,University of Beira Interior,edu,84ae55603bffda40c225fe93029d39f04793e01f,citation,https://doi.org/10.1109/ICB.2016.7550066,ICB-RW 2016: International challenge on biometric recognition in the wild,2016 +95,IJB-A,ijb_c,41.70456775,-86.23822026,University of Notre Dame,edu,73ea06787925157df519a15ee01cc3dc1982a7e0,citation,https://arxiv.org/pdf/1811.01474.pdf,Fast Face Image Synthesis with Minimal Training,2018 +96,IJB-A,ijb_c,42.718568,-84.47791571,Michigan State University,edu,c6382de52636705be5898017f2f8ed7c70d7ae96,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7139089,Unconstrained face detection: State of the art baseline and challenges,2015 +97,IJB-A,ijb_c,38.95187,-77.363259,"Noblis, Falls Church, VA, U.S.A.",company,c6382de52636705be5898017f2f8ed7c70d7ae96,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7139089,Unconstrained face detection: State of the art baseline and challenges,2015 +98,IJB-A,ijb_c,40.47913175,-74.43168868,Rutgers University,edu,eee06d68497be8bf3a8aba4fde42a13aa090b301,citation,https://arxiv.org/pdf/1806.11191.pdf,CR-GAN: Learning Complete Representations for Multi-view Generation,2018 +99,IJB-A,ijb_c,35.3103441,-80.73261617,University of North Carolina at Charlotte,edu,eee06d68497be8bf3a8aba4fde42a13aa090b301,citation,https://arxiv.org/pdf/1806.11191.pdf,CR-GAN: Learning Complete Representations for Multi-view Generation,2018 +100,IJB-A,ijb_c,39.2899685,-76.62196103,University of Maryland,edu,a3201e955d6607d383332f3a12a7befa08c5a18c,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7900276,VLAD encoded Deep Convolutional features for unconstrained face verification,2016 +101,IJB-A,ijb_c,40.47913175,-74.43168868,Rutgers University,edu,a3201e955d6607d383332f3a12a7befa08c5a18c,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7900276,VLAD encoded Deep Convolutional features for unconstrained face verification,2016 +102,IJB-A,ijb_c,22.42031295,114.20788644,Chinese University of Hong Kong,edu,52d7eb0fbc3522434c13cc247549f74bb9609c5d,citation,https://arxiv.org/pdf/1511.06523.pdf,WIDER FACE: A Face Detection Benchmark,2016 +103,IJB-A,ijb_c,39.2899685,-76.62196103,University of Maryland,edu,19458454308a9f56b7de76bf7d8ff8eaa52b0173,citation,https://pdfs.semanticscholar.org/1945/8454308a9f56b7de76bf7d8ff8eaa52b0173.pdf,Deep Features for Recognizing Disguised Faces in the Wild,0 +104,IJB-A,ijb_c,43.7776426,11.259765,University of Florence,edu,746c0205fdf191a737df7af000eaec9409ede73f,citation,http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8423119,Investigating Nuisances in DCNN-Based Face Recognition,2018 +105,IJB-A,ijb_c,47.5612651,7.5752961,University of Basel,edu,0081e2188c8f34fcea3e23c49fb3e17883b33551,citation,http://pdfs.semanticscholar.org/0081/e2188c8f34fcea3e23c49fb3e17883b33551.pdf,Training Deep Face Recognition Systems with Synthetic Data,2018 +106,IJB-A,ijb_c,37.4102193,-122.05965487,Carnegie Mellon University,edu,2b869d5551b10f13bf6fcdb8d13f0aa4d1f59fc4,citation,https://arxiv.org/pdf/1803.00130.pdf,Ring loss: Convex Feature Normalization for Face Recognition,2018 +107,IJB-A,ijb_c,28.2290209,112.99483204,"National University of Defense Technology, China",edu,5f771fed91c8e4b666489ba2384d0705bcf75030,citation,https://arxiv.org/pdf/1804.03287.pdf,Understanding Humans in Crowded Scenes: Deep Nested Adversarial Learning and A New Benchmark for Multi-Human Parsing,2018 +108,IJB-A,ijb_c,1.2962018,103.77689944,National University of Singapore,edu,5f771fed91c8e4b666489ba2384d0705bcf75030,citation,https://arxiv.org/pdf/1804.03287.pdf,Understanding Humans in Crowded Scenes: Deep Nested Adversarial Learning and A New Benchmark for Multi-Human Parsing,2018 +109,IJB-A,ijb_c,42.3889785,-72.5286987,University of Massachusetts,edu,2241eda10b76efd84f3c05bdd836619b4a3df97e,citation,http://arxiv.org/pdf/1506.01342v5.pdf,One-to-many face recognition with bilinear CNNs,2016 +110,IJB-A,ijb_c,22.42031295,114.20788644,Chinese University of Hong Kong,edu,58d76380d194248b3bb291b8c7c5137a0a376897,citation,https://pdfs.semanticscholar.org/58d7/6380d194248b3bb291b8c7c5137a0a376897.pdf,FaceID-GAN : Learning a Symmetry Three-Player GAN for Identity-Preserving Face Synthesis,2018 +111,IJB-A,ijb_c,22.59805605,113.98533784,Shenzhen Institutes of Advanced Technology,edu,58d76380d194248b3bb291b8c7c5137a0a376897,citation,https://pdfs.semanticscholar.org/58d7/6380d194248b3bb291b8c7c5137a0a376897.pdf,FaceID-GAN : Learning a Symmetry Three-Player GAN for Identity-Preserving Face Synthesis,2018 +112,IJB-A,ijb_c,42.718568,-84.47791571,Michigan State University,edu,7fb5006b6522436ece5bedf509e79bdb7b79c9a7,citation,https://pdfs.semanticscholar.org/7fb5/006b6522436ece5bedf509e79bdb7b79c9a7.pdf,Multi-Task Convolutional Neural Network for Face Recognition,2017 +113,IJB-A,ijb_c,-27.49741805,153.01316956,University of Queensland,edu,28646c6220848db46c6944967298d89a6559c700,citation,https://pdfs.semanticscholar.org/2864/6c6220848db46c6944967298d89a6559c700.pdf,It takes two to tango : Cascading off-the-shelf face detectors,2018 +114,IJB-A,ijb_c,51.7534538,-1.25400997,University of Oxford,edu,5812d8239d691e99d4108396f8c26ec0619767a6,citation,https://arxiv.org/pdf/1810.09951.pdf,GhostVLAD for set-based face recognition,2018 +115,IJB-A,ijb_c,25.01353105,121.54173736,National Taiwan University of Science and Technology,edu,e4c3587392d477b7594086c6f28a00a826abf004,citation,https://doi.org/10.1109/ICIP.2017.8296998,Face recognition by facial attribute assisted network,2017 +116,IJB-A,ijb_c,1.3484104,103.68297965,Nanyang Technological University,edu,47190d213caef85e8b9dd0d271dbadc29ed0a953,citation,https://arxiv.org/pdf/1807.11649.pdf,The Devil of Face Recognition is in the Noise,2018 +117,IJB-A,ijb_c,32.87935255,-117.23110049,"University of California, San Diego",edu,47190d213caef85e8b9dd0d271dbadc29ed0a953,citation,https://arxiv.org/pdf/1807.11649.pdf,The Devil of Face Recognition is in the Noise,2018 +118,IJB-A,ijb_c,39.2899685,-76.62196103,University of Maryland,edu,ce6d60b69eb95477596535227958109e07c61e1e,citation,http://www.rci.rutgers.edu/~vmp93/Conference_pub/BTAS_2015_FVFF_JunCheng_Chen.pdf,Unconstrained face verification using fisher vectors computed from frontalized faces,2015 +119,IJB-A,ijb_c,29.7207902,-95.34406271,University of Houston,edu,38d8ff137ff753f04689e6b76119a44588e143f3,citation,http://pdfs.semanticscholar.org/38d8/ff137ff753f04689e6b76119a44588e143f3.pdf,When 3D-Aided 2D Face Recognition Meets Deep Learning: An extended UR2D for Pose-Invariant Face Recognition,2017 +120,IJB-A,ijb_c,39.9082804,116.2458527,University of Chinese Academy of Sciences,edu,9627f28ea5f4c389350572b15968386d7ce3fe49,citation,https://arxiv.org/pdf/1802.07447.pdf,Load Balanced GANs for Multi-view Face Image Synthesis,2018 +121,IJB-A,ijb_c,34.0224149,-118.28634407,University of Southern California,edu,4e7ed13e541b8ed868480375785005d33530e06d,citation,http://doi.ieeecomputersociety.org/10.1109/WACV.2016.7477555,Face recognition using deep multi-pose representations,2016 +122,IJB-A,ijb_c,32.77824165,34.99565673,Open University of Israel,edu,582edc19f2b1ab2ac6883426f147196c8306685a,citation,http://pdfs.semanticscholar.org/be6c/db7b181e73f546d43cf2ab6bc7181d7d619b.pdf,Do We Really Need to Collect Millions of Faces for Effective Face Recognition?,2016 +123,IJB-A,ijb_c,37.4102193,-122.05965487,Carnegie Mellon University,edu,87e6cb090aecfc6f03a3b00650a5c5f475dfebe1,citation,https://pdfs.semanticscholar.org/87e6/cb090aecfc6f03a3b00650a5c5f475dfebe1.pdf,Holistically Constrained Local Model: Going Beyond Frontal Poses for Facial Landmark Detection,2016 +124,IJB-A,ijb_c,34.0224149,-118.28634407,University of Southern California,edu,87e6cb090aecfc6f03a3b00650a5c5f475dfebe1,citation,https://pdfs.semanticscholar.org/87e6/cb090aecfc6f03a3b00650a5c5f475dfebe1.pdf,Holistically Constrained Local Model: Going Beyond Frontal Poses for Facial Landmark Detection,2016 +125,IJB-A,ijb_c,39.65404635,-79.96475355,West Virginia University,edu,3b9b200e76a35178da940279d566bbb7dfebb787,citation,http://pdfs.semanticscholar.org/3b9b/200e76a35178da940279d566bbb7dfebb787.pdf,Learning Channel Inter-dependencies at Multiple Scales on Dense Networks for Face Recognition,2017 +126,IJB-A,ijb_c,-27.49741805,153.01316956,University of Queensland,edu,de79437f74e8e3b266afc664decf4e6e4bdf34d7,citation,https://doi.org/10.1109/IVCNZ.2016.7804415,To face or not to face: Towards reducing false positive of face detection,2016 +127,IJB-A,ijb_c,46.0501558,14.46907327,University of Ljubljana,edu,368d59cf1733af511ed8abbcbeb4fb47afd4da1c,citation,http://pdfs.semanticscholar.org/368d/59cf1733af511ed8abbcbeb4fb47afd4da1c.pdf,To Frontalize or Not To Frontalize: A Study of Face Pre-Processing Techniques and Their Impact on Recognition,2016 +128,IJB-A,ijb_c,41.70456775,-86.23822026,University of Notre Dame,edu,368d59cf1733af511ed8abbcbeb4fb47afd4da1c,citation,http://pdfs.semanticscholar.org/368d/59cf1733af511ed8abbcbeb4fb47afd4da1c.pdf,To Frontalize or Not To Frontalize: A Study of Face Pre-Processing Techniques and Their Impact on Recognition,2016 +129,IJB-A,ijb_c,32.77824165,34.99565673,Open University of Israel,edu,62e913431bcef5983955e9ca160b91bb19d9de42,citation,http://pdfs.semanticscholar.org/62e9/13431bcef5983955e9ca160b91bb19d9de42.pdf,Facial Landmark Detection with Tweaked Convolutional Neural Networks,2015 +130,IJB-A,ijb_c,29.5084174,106.57858552,Chongqing University,edu,acd4280453b995cb071c33f7c9db5760432f4279,citation,https://doi.org/10.1007/s00138-018-0907-1,Deep transformation learning for face recognition in the unconstrained scene,2018 +131,IJB-A,ijb_c,38.99203005,-76.9461029,University of Maryland College Park,edu,ceeb67bf53ffab1395c36f1141b516f893bada27,citation,http://pdfs.semanticscholar.org/ceeb/67bf53ffab1395c36f1141b516f893bada27.pdf,Face Alignment by Local Deep Descriptor Regression,2016 +132,IJB-A,ijb_c,40.47913175,-74.43168868,Rutgers University,edu,ceeb67bf53ffab1395c36f1141b516f893bada27,citation,http://pdfs.semanticscholar.org/ceeb/67bf53ffab1395c36f1141b516f893bada27.pdf,Face Alignment by Local Deep Descriptor Regression,2016 +133,IJB-A,ijb_c,39.2899685,-76.62196103,University of Maryland,edu,ceeb67bf53ffab1395c36f1141b516f893bada27,citation,http://pdfs.semanticscholar.org/ceeb/67bf53ffab1395c36f1141b516f893bada27.pdf,Face Alignment by Local Deep Descriptor Regression,2016 +134,IJB-A,ijb_c,39.2899685,-76.62196103,University of Maryland,edu,37619564574856c6184005830deda4310d3ca580,citation,https://doi.org/10.1109/BTAS.2015.7358755,A deep pyramid Deformable Part Model for face detection,2015 +135,IJB-A,ijb_c,51.7534538,-1.25400997,University of Oxford,edu,eb027969f9310e0ae941e2adee2d42cdf07d938c,citation,https://arxiv.org/pdf/1710.08092.pdf,VGGFace2: A Dataset for Recognising Faces across Pose and Age,2018 +136,IJB-A,ijb_c,42.3889785,-72.5286987,University of Massachusetts,edu,3c97c32ff575989ef2869f86d89c63005fc11ba9,citation,http://people.cs.umass.edu/~hzjiang/pubs/face_det_fg_2017.pdf,Face Detection with the Faster R-CNN,2017 +137,IJB-A,ijb_c,39.2899685,-76.62196103,University of Maryland,edu,4f7b92bd678772552b3c3edfc9a7c5c4a8c60a8e,citation,https://pdfs.semanticscholar.org/4f7b/92bd678772552b3c3edfc9a7c5c4a8c60a8e.pdf,Deep Density Clustering of Unconstrained Faces,0 +138,IJB-A,ijb_c,1.2962018,103.77689944,National University of Singapore,edu,fca9ebaa30d69ccec8bb577c31693c936c869e72,citation,https://arxiv.org/pdf/1809.00338.pdf,Look Across Elapse: Disentangled Representation Learning and Photorealistic Cross-Age Face Synthesis for Age-Invariant Face Recognition,2018 +139,IJB-A,ijb_c,40.0044795,116.370238,Chinese Academy of Sciences,edu,fca9ebaa30d69ccec8bb577c31693c936c869e72,citation,https://arxiv.org/pdf/1809.00338.pdf,Look Across Elapse: Disentangled Representation Learning and Photorealistic Cross-Age Face Synthesis for Age-Invariant Face Recognition,2018 diff --git a/site/public/about/credits/index.html b/site/public/about/credits/index.html index fecc6c7b..6e4f06c1 100644 --- a/site/public/about/credits/index.html +++ b/site/public/about/credits/index.html @@ -28,7 +28,15 @@ <div class="content"> <section><h1>Credits</h1> -<ul> +</section><section><div class='right-sidebar'><ul> +<li><a href="/about/">About</a></li> +<li><a href="/about/press/">Press</a></li> +<li><a href="/about/credits/">Credits</a></li> +<li><a href="/about/disclaimer/">Disclaimer</a></li> +<li><a href="/about/terms/">Terms and Conditions</a></li> +<li><a href="/about/privacy/">Privacy Policy</a></li> +</ul> +</div><ul> <li>MegaPixels by Adam Harvey</li> <li>Made with support from Mozilla</li> <li>Site developed by Jules Laplace</li> diff --git a/site/public/about/disclaimer/index.html b/site/public/about/disclaimer/index.html index a108baa0..b93194fa 100644 --- a/site/public/about/disclaimer/index.html +++ b/site/public/about/disclaimer/index.html @@ -28,7 +28,15 @@ <div class="content"> <section><h1>Disclaimer</h1> -<p>Last updated: December 04, 2018</p> +</section><section><div class='right-sidebar'><ul> +<li><a href="/about/">About</a></li> +<li><a href="/about/press/">Press</a></li> +<li><a href="/about/credits/">Credits</a></li> +<li><a href="/about/disclaimer/">Disclaimer</a></li> +<li><a href="/about/terms/">Terms and Conditions</a></li> +<li><a href="/about/privacy/">Privacy Policy</a></li> +</ul> +</div><p>Last updated: December 04, 2018</p> <p>The information contained on MegaPixels.cc website (the "Service") is for academic and artistic purposes only.</p> <p>MegaPixels.cc assumes no responsibility for errors or omissions in the contents on the Service.</p> <p>In no event shall MegaPixels.cc be liable for any special, direct, indirect, consequential, or incidental damages or any damages whatsoever, whether in an action of contract, negligence or other tort, arising out of or in connection with the use of the Service or the contents of the Service. MegaPixels.cc reserves the right to make additions, deletions, or modification to the contents on the Service at any time without prior notice.</p> diff --git a/site/public/about/index.html b/site/public/about/index.html index fecc6c7b..b7401ee8 100644 --- a/site/public/about/index.html +++ b/site/public/about/index.html @@ -4,7 +4,7 @@ <title>MegaPixels</title> <meta charset="utf-8" /> <meta name="author" content="Adam Harvey" /> - <meta name="description" content="MegaPixels Project Team Credits" /> + <meta name="description" content="About MegaPixels" /> <meta name="referrer" content="no-referrer" /> <meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes" /> <link rel='stylesheet' href='/assets/css/fonts.css' /> @@ -27,15 +27,20 @@ </header> <div class="content"> - <section><h1>Credits</h1> -<ul> -<li>MegaPixels by Adam Harvey</li> -<li>Made with support from Mozilla</li> -<li>Site developed by Jules Laplace</li> -<li>Design and graphics: Adam Harvey</li> -<li>Research assistants: Berit Gilma</li> + <section><h1>About MegaPixels</h1> +</section><section><div class='right-sidebar'><ul> +<li><a href="/about/press/">Press</a></li> +<li><a href="/about/credits/">Credits</a></li> +<li><a href="/about/disclaimer/">Disclaimer</a></li> +<li><a href="/about/terms/">Terms and Conditions</a></li> +<li><a href="/about/privacy/">Privacy Policy</a></li> </ul> -</section> +<div class='meta'><div><div class='gray'>Years</div><div>2002-2019</div></div><div><div class='gray'>Datasets Analyzed</div><div>325</div></div><div><div class='gray'>Author</div><div>Adam Harvey</div></div><div><div class='gray'>Development</div><div>Jules LaPlace</div></div><div><div class='gray'>Research Assistance</div><div>Berit Gilma</div></div></div></div><p>MegaPixels aims to answer to these questions and reveal the stories behind the millions of images used to train, evaluate, and power the facial recognition surveillance algorithms used today. MegaPixels is authored by Adam Harvey, developed in collaboration with Jules LaPlace, and produced in partnership with Mozilla.</p> +<p>MegaPixels aims to answer to these questions and reveal the stories behind the millions of images used to train, evaluate, and power the facial recognition surveillance algorithms used today. MegaPixels is authored by Adam Harvey, developed in collaboration with Jules LaPlace, and produced in partnership with Mozilla.</p> +</section><section class='images'><div class='sideimage'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/site/about/assets/adam-harvey.jpg' alt='Adam Harvey'><div><p><strong>Adam Harvey</strong> is an American artist and researcher based in Berlin. His previous projects (CV Dazzle, Stealth Wear, and SkyLift) explore the potential for countersurveillance as artwork. He is the founder of VFRAME (visual forensics software for human rights groups), the recipient of 2 PrototypeFund awards, and is currently a researcher in residence at Karlsruhe HfG studying artifical intelligence and datasets.</p> +</div></div></section><section class='images'><div class='sideimage'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/site/about/assets/jules-laplace.jpg' alt='Jules LaPlace'><div><p><strong>Jules LaPlace</strong> is an American artist and technologist also based in Berlin. He was previously the CTO of a NYC digital agency and currently works at VFRAME, developing computer vision for human rights groups, and building creative software for artists.</p> +</div></div></section><section class='images'><div class='sideimage'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/site/about/assets/mozilla.png' alt='Mozilla'><div><p><strong>Mozilla</strong> is a free software community founded in 1998 by members of Netscape. The Mozilla community uses, develops, spreads and supports Mozilla products, thereby promoting exclusively free software and open standards, with only minor exceptions. The community is supported institutionally by the not-for-profit Mozilla Foundation and its tax-paying subsidiary, the Mozilla Corporation.</p> +</div></div></section> </div> <footer> diff --git a/site/public/about/press/index.html b/site/public/about/press/index.html index b9dd97c2..d36b6bc6 100644 --- a/site/public/about/press/index.html +++ b/site/public/about/press/index.html @@ -28,10 +28,19 @@ <div class="content"> <section><h1>Press</h1> -</section><section class='images'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/site/about/assets/test.jpg' alt='alt text'><div class='caption'>alt text</div></div></section><section><ul> -<li>Aug 22, 2018: "Transgender YouTubers had their videos grabbed to train facial recognition software" by James Vincent <a href="https://www.theverge.com/2017/8/22/16180080/transgender-youtubers-ai-facial-recognition-dataset">https://www.theverge.com/2017/8/22/16180080/transgender-youtubers-ai-facial-recognition-dataset</a></li> +</section><section><div class='right-sidebar'><ul> +<li><a href="/about/">About</a></li> +<li><a href="/about/press/">Press</a></li> +<li><a href="/about/credits/">Credits</a></li> +<li><a href="/about/disclaimer/">Disclaimer</a></li> +<li><a href="/about/terms/">Terms and Conditions</a></li> +<li><a href="/about/privacy/">Privacy Policy</a></li> +</ul> +</div></section><section class='images'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/site/about/assets/test.jpg' alt='alt text'><div class='caption'>alt text</div></div></section><section><ul> <li>Aug 22, 2018: "Transgender YouTubers had their videos grabbed to train facial recognition software" by James Vincent <a href="https://www.theverge.com/2017/8/22/16180080/transgender-youtubers-ai-facial-recognition-dataset">https://www.theverge.com/2017/8/22/16180080/transgender-youtubers-ai-facial-recognition-dataset</a></li> <li>Aug 22, 2018: "Transgender YouTubers had their videos grabbed to train facial recognition software" by James Vincent <a href="https://www.theverge.com/2017/8/22/16180080/transgender-youtubers-ai-facial-recognition-dataset">https://www.theverge.com/2017/8/22/16180080/transgender-youtubers-ai-facial-recognition-dataset</a></li> +<li>Aug 22, 2018: "Transgender YouTubers had their videos grabbed to train facial recognition software" by James Vincent <a href="https://www.theverge.com/2017/8/22/16180080/transgender-youtubers-ai-facial-recognition-dataset">https://www.theverge.com/2017/8/22/16180080/transgender-youtubers-ai-facial-recognition-dataset</a> +lfw</li> </ul> </section> diff --git a/site/public/about/privacy/index.html b/site/public/about/privacy/index.html index 92a1b9a8..1b3b9d2f 100644 --- a/site/public/about/privacy/index.html +++ b/site/public/about/privacy/index.html @@ -28,10 +28,17 @@ <div class="content"> <section><h1>Privacy Policy</h1> -<p>A summary of our privacy policy is as follows:</p> +</section><section><div class='right-sidebar'><ul> +<li><a href="/about/">About</a></li> +<li><a href="/about/press/">Press</a></li> +<li><a href="/about/credits/">Credits</a></li> +<li><a href="/about/disclaimer/">Disclaimer</a></li> +<li><a href="/about/terms/">Terms and Conditions</a></li> +<li><a href="/about/privacy/">Privacy Policy</a></li> +</ul> +</div><p>A summary of our privacy policy is as follows:</p> <p>The MegaPixels site does not use any analytics programs or collect any data besides the necessary IP address of your connection, which are deleted every 30 days and used only for security and to prevent misuse.</p> <p>The image processing sections of the site do not collect any data whatsoever. All processing takes place in temporary memory (RAM) and then is displayed back to the user over a SSL secured HTTPS connection. It is the sole responsibility of the user whether they discard, by closing the page, or share their analyzed information and any potential consequences that may arise from doing so.</p> -<hr> <p>A more complete legal version is below:</p> <p><strong>This is a boilerplate Privacy policy from <a href="https://termsfeed.com/">https://termsfeed.com/</a></strong></p> <p><strong>Needs to be reviewed</strong></p> diff --git a/site/public/about/terms/index.html b/site/public/about/terms/index.html index fd17b4d9..8bd6e738 100644 --- a/site/public/about/terms/index.html +++ b/site/public/about/terms/index.html @@ -27,8 +27,16 @@ </header> <div class="content"> - <section><p>Terms and Conditions ("Terms")</p> -<p>Last updated: December 04, 2018</p> + <section><h1>Terms and Conditions ("Terms")</h1> +</section><section><div class='right-sidebar'><ul> +<li><a href="/about/">About</a></li> +<li><a href="/about/press/">Press</a></li> +<li><a href="/about/credits/">Credits</a></li> +<li><a href="/about/disclaimer/">Disclaimer</a></li> +<li><a href="/about/terms/">Terms and Conditions</a></li> +<li><a href="/about/privacy/">Privacy Policy</a></li> +</ul> +</div><p>Last updated: December 04, 2018</p> <p>Please read these Terms and Conditions ("Terms", "Terms and Conditions") carefully before using the MegaPixels website (the "Service") operated by megapixels.cc ("us", "we", or "our").</p> <p>Your access to and use of the Service is conditioned on your acceptance of and compliance with these Terms.</p> <p>By accessing or using the Service you agree to be bound by these Terms. If you disagree with any part of the terms then you may not access the Service.</p> diff --git a/site/public/datasets/index.html b/site/public/datasets/index.html index 77c5ab2b..7398da17 100644 --- a/site/public/datasets/index.html +++ b/site/public/datasets/index.html @@ -29,27 +29,78 @@ <section><h1>Facial Recognition Datasets</h1> -<p>Regular Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.</p> -<h3>Summary</h3> -</section><section><div class='meta'><div><div class='gray'>Found</div><div>275 datasets</div></div><div><div class='gray'>Created between</div><div>1993-2018</div></div><div><div class='gray'>Smallest dataset</div><div>20 images</div></div><div><div class='gray'>Largest dataset</div><div>10,000,000 images</div></div></div></section><section><div class='meta'><div><div class='gray'>Highest resolution faces</div><div>450x500 (Unconstrained College Students)</div></div><div><div class='gray'>Lowest resolution faces</div><div>16x20 pixels (QMUL SurvFace)</div></div></div></section><section class='applet_container'><div class='applet' data-payload='{"command": "load_file https://megapixels.nyc3.digitaloceanspaces.com/v1/citations/datasets.csv"}'></div></section> +</section><section><div class='meta'><div><div class='gray'>Found</div><div>275 datasets</div></div><div><div class='gray'>Created between</div><div>1993-2018</div></div><div><div class='gray'>Smallest dataset</div><div>20 images</div></div><div><div class='gray'>Largest dataset</div><div>10,000,000 images</div></div></div><section><section><div class='meta'><div><div class='gray'>Highest resolution faces</div><div>450x500 (Unconstrained College Students)</div></div><div><div class='gray'>Lowest resolution faces</div><div>16x20 pixels (QMUL SurvFace)</div></div></div><section> - <section> - <h2>Dataset Portraits</h2> + <section class='wide dataset-intro'> <p> - We have prepared detailed studies of some of the more noteworthy datasets. + We have prepared detailed case studies of some of the more noteworthy datasets, including tools to help you learn what is contained in these datasets, and even whether your own face has been used to train these algorithms. </p> <div class="dataset-list"> - <a href="/datasets/lfw/"> + <a href="/datasets/afad/" style="background-image: url(https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/afad/assets/index.jpg)"> <div class="dataset"> - Labeled Faces in The Wild + <span>Asian Face Age Dataset</span> </div> </a> - <a href="/datasets/vgg_face2/"> + <a href="/datasets/aflw/" style="background-image: url(https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/aflw/assets/index.jpg)"> <div class="dataset"> - VGG Face2 + <span>Annotated Facial Landmarks in The Wild</span> + </div> + </a> + + <a href="/datasets/caltech_10k/" style="background-image: url(https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/caltech_10k/assets/index.jpg)"> + <div class="dataset"> + <span>Caltech 10K Faces Dataset</span> + </div> + </a> + + <a href="/datasets/cofw/" style="background-image: url(https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/cofw/assets/index.jpg)"> + <div class="dataset"> + <span>Caltech Occluded Faces in The Wild</span> + </div> + </a> + + <a href="/datasets/facebook/" style="background-image: url(https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/facebook/assets/index.jpg)"> + <div class="dataset"> + <span>Facebook</span> + </div> + </a> + + <a href="/datasets/feret/" style="background-image: url(https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/feret/assets/index.jpg)"> + <div class="dataset"> + <span>FERET: FacE REcognition </span> + </div> + </a> + + <a href="/datasets/lfpw/" style="background-image: url(https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/lfpw/assets/index.jpg)"> + <div class="dataset"> + <span>Labeled Face Parts in The Wild</span> + </div> + </a> + + <a href="/datasets/lfw/" style="background-image: url(https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/lfw/assets/index.jpg)"> + <div class="dataset"> + <span>Labeled Faces in The Wild</span> + </div> + </a> + + <a href="/datasets/uccs/" style="background-image: url(https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/uccs/assets/index.jpg)"> + <div class="dataset"> + <span>Unconstrained College Students</span> + </div> + </a> + + <a href="/datasets/vgg_face2/" style="background-image: url(https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/vgg_face2/assets/index.jpg)"> + <div class="dataset"> + <span>VGG Face 2 Dataset</span> + </div> + </a> + + <a href="/datasets/youtube_celebrities/" style="background-image: url(https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/youtube_celebrities/assets/index.jpg)"> + <div class="dataset"> + <span>YouTube Celebrities</span> </div> </a> diff --git a/site/public/datasets/lfw/index.html b/site/public/datasets/lfw/index.html index a6226720..5b5e58f3 100644 --- a/site/public/datasets/lfw/index.html +++ b/site/public/datasets/lfw/index.html @@ -4,7 +4,7 @@ <title>MegaPixels</title> <meta charset="utf-8" /> <meta name="author" content="Adam Harvey" /> - <meta name="description" content="LFW: Labeled Faces in The Wild" /> + <meta name="description" content="Labeled Faces in The Wild (LFW) is a database of face photographs designed for studying the problem of unconstrained face recognition." /> <meta name="referrer" content="no-referrer" /> <meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes" /> <link rel='stylesheet' href='/assets/css/fonts.css' /> @@ -27,54 +27,42 @@ </header> <div class="content"> - <section><h1>Labeled Faces in the Wild</h1> -</section><section><div class='meta'><div><div class='gray'>Created</div><div>2007</div></div><div><div class='gray'>Images</div><div>13,233</div></div><div><div class='gray'>People</div><div>5,749</div></div><div><div class='gray'>Created From</div><div>Yahoo News images</div></div><div><div class='gray'>Search available</div><div>Searchable</div></div></div></section><section class='applet_container'><div class='applet' data-payload='{"command": "face_search"}'></div></section><section class='applet_container'><div class='applet' data-payload='{"command": "name_search"}'></div></section><section class='fullwidth'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/lfw/assets/lfw_feature.jpg' alt='Eighteen of the 5,749 people in the Labeled Faces in the Wild Dataset. The most widely used face dataset for benchmarking commercial face recognition algorithms.'><div class='caption'>Eighteen of the 5,749 people in the Labeled Faces in the Wild Dataset. The most widely used face dataset for benchmarking commercial face recognition algorithms.</div></div></section><section><h3>Intro</h3> -<p>Labeled Faces in The Wild (LFW) is among the most widely used facial recognition training datasets in the world and is the first of its kind to be created entirely from images posted online. The LFW dataset includes 13,233 images of 5,749 people that were collected between 2002-2004. Use the tools below to check if you were included in this dataset or scroll down to read the analysis.</p> -<p>Three paragraphs describing the LFW dataset in a format that can be easily replicated for the other datasets. Nothing too custom. An analysis of the initial research papers with context relative to all the other dataset papers.</p> -</section><section class='fullwidth'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/lfw/assets/lfw_montage_everyone_nocrop_1920.jpg' alt=' From George W. Bush to Jamie Lee Curtis: all 5,749 people in the LFW Dataset sorted from most to least images collected.'><div class='caption'> From George W. Bush to Jamie Lee Curtis: all 5,749 people in the LFW Dataset sorted from most to least images collected.</div></div></section><section><h3>LFW by the Numbers</h3> + <section class='intro_section' style='background-image: url(https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/lfw/assets/background.jpg)'><div class='inner'><div class='hero_desc'><span><span style='color: #ff0000'>Labeled Faces in The Wild</span> (LFW) is a database of face photographs designed for studying the problem of unconstrained face recognition.</span></div><div class='hero_subdesc'><span>It includes 13,456 images of 4,432 people’s images copied from the Internet during 2002-2004. +</span></div></div></section><section><div class='image'><div class='caption'>A few of the 5,749 people in the Labeled Faces in the Wild Dataset. The most widely used face dataset for benchmarking commercial face recognition algorithms.</div></div></section><section><div class='right-sidebar'><h3>Statistics</h3> +<div class='meta'><div><div class='gray'>Years</div><div>2002-2004</div></div><div><div class='gray'>Images</div><div>13,233</div></div><div><div class='gray'>Identities</div><div>5,749</div></div><div><div class='gray'>Origin</div><div>Yahoo News Images</div></div><div><div class='gray'>Funding</div><div>(Possibly, partially CIA)</div></div></div><h3>INSIGHTS</h3> <ul> -<li>Was first published in 2007</li> -<li>Developed out of a prior dataset from Berkely called "Faces in the Wild" or "Names and Faces" [^lfw_original_paper]</li> -<li>Includes 13,233 images and 5,749 different people [^lfw_website]</li> -<li>There are about 3 men for every 1 woman (4,277 men and 1,472 women)[^lfw_website]</li> -<li>The person with the most images is George W. Bush with 530</li> -<li>Most people (70%) in the dataset have only 1 image</li> -<li>Thre are 1,680 people in the dataset with 2 or more images [^lfw_website]</li> -<li>Two out of 4 of the original authors received funding from the Office of Director of National Intelligence and IARPA for their 2016 LFW survey follow up report </li> -<li>The LFW dataset includes over 500 actors, 30 models, 10 presidents, 24 football players, 124 basketball players, 11 kings, and 2 queens</li> -<li>In all the LFW publications provided by the authors the words "ethics", "consent", and "privacy" appear 0 times [^lfw_original_paper], [^lfw_survey], [^lfw_tech_report] , [^lfw_website]</li> +<li>There are about 3 men for every 1 woman (4,277 men and 1,472 women) in the LFW dataset<a class="footnote_shim" name="[^lfw_www]_1"> </a><a href="#[^lfw_www]" class="footnote" title="Footnote 1">1</a></li> +<li>The person with the most images is <a href="http://vis-www.cs.umass.edu/lfw/person/George_W_Bush_comp.html">George W. Bush</a> with 530</li> +<li>There are about 3 George W. Bush's for every 1 <a href="http://vis-www.cs.umass.edu/lfw/person/Tony_Blair.html">Tony Blair</a></li> +<li>The LFW dataset includes over 500 actors, 30 models, 10 presidents, 124 basketball players, 24 football players, 11 kings, 7 queens, and 1 <a href="http://vis-www.cs.umass.edu/lfw/person/Moby.html">Moby</a></li> +<li>In all 3 of the LFW publications [^lfw_original_paper], [^lfw_survey], [^lfw_tech_report] the words "ethics", "consent", and "privacy" appear 0 times</li> <li>The word "future" appears 71 times</li> </ul> -<h3>Facts</h3> +</div><h2>Labeled Faces in the Wild</h2> +<p><em>Labeled Faces in The Wild</em> (LFW) is "a database of face photographs designed for studying the problem of unconstrained face recognition<a class="footnote_shim" name="[^lfw_www]_2"> </a><a href="#[^lfw_www]" class="footnote" title="Footnote 1">1</a>. It is used to evaluate and improve the performance of facial recognition algorithms in academic, commercial, and government research. According to BiometricUpdate.com<a class="footnote_shim" name="[^lfw_pingan]_1"> </a><a href="#[^lfw_pingan]" class="footnote" title="Footnote 3">3</a>, LFW is "the most widely used evaluation set in the field of facial recognition, LFW attracts a few dozen teams from around the globe including Google, Facebook, Microsoft Research Asia, Baidu, Tencent, SenseTime, Face++ and Chinese University of Hong Kong."</p> +<p>The LFW dataset includes 13,233 images of 5,749 people that were collected between 2002-2004. LFW is a subset of <em>Names of Faces</em> and is part of the first facial recognition training dataset created entirely from images appearing on the Internet. The people appearing in LFW are...</p> +<p>The <em>Names and Faces</em> dataset was the first face recognition dataset created entire from online photos. However, <em>Names and Faces</em> and <em>LFW</em> are not the first face recognition dataset created entirely "in the wild". That title belongs to the <a href="/datasets/ucd_faces/">UCD dataset</a>. Images obtained "in the wild" means using an image without explicit consent or awareness from the subject or photographer.</p> +<h3>Biometric Trade Routes</h3> +<p>To understand how this dataset has been used, its citations have been geocoded to show an approximate geographic digital trade route of the biometric data. Lines indicate an organization (education, commercial, or governmental) that has cited the LFW dataset in their research. Data is compiled from <a href="https://www.semanticscholar.org">Semantic Scholar</a>.</p> +</section><section class='applet_container'><div class='applet' data-payload='{"command": "map"}'></div></section><section><h3>Synthetic Faces</h3> +<p>To visualize the types of photos in the dataset without explicitly publishing individual's identities a generative adversarial network (GAN) was trained on the entire dataset. The images in this video show a neural network learning the visual latent space and then interpolating between archetypical identities within the LFW dataset.</p> +</section><section class='fullwidth'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/lfw/assets/lfw_synthetic.jpg' alt=''></div></section><section><h3>Citations</h3> +<p>Browse or download the geocoded citation data collected for the LFW dataset.</p> +</section><section class='applet_container'><div class='applet' data-payload='{"command": "citations"}'></div></section><section><h3>Additional Information</h3> +<p>(tweet-sized snippets go here)</p> <ul> -<li>Was created for the purpose of improving "unconstrained face recognition" [^lfw_original_paper]</li> -<li>All images in LFW were obtained "in the wild" meaning without any consent from the subject or from the photographer</li> -<li>The faces were detected using the Viola-Jones haarcascade face detector [^lfw_website] [^lfw_survey]</li> -<li>Is considered the "most popular benchmark for face recognition" [^lfw_baidu]</li> -<li>Is "the most widely used evaluation set in the field of facial recognition" [^lfw_pingan]</li> -<li><p>Is used by several of the largest tech companies in the world including "Google, Facebook, Microsoft Research Asia, Baidu, Tencent, SenseTime, Face++ and Chinese University of Hong Kong." [^lfw_pingan]</p> -</li> -<li><p>All images were copied from Yahoo News between 2002 - 2004 [^lfw_original_paper]</p> -</li> -<li>SenseTime, who has relied on LFW for benchmarking their facial recognition performance, is the leading provider of surveillance to the Chinese Government</li> +<li>The LFW dataset is considered the "most popular benchmark for face recognition" <a class="footnote_shim" name="[^lfw_baidu]_1"> </a><a href="#[^lfw_baidu]" class="footnote" title="Footnote 2">2</a></li> +<li>The LFW dataset is "the most widely used evaluation set in the field of facial recognition" <a class="footnote_shim" name="[^lfw_pingan]_2"> </a><a href="#[^lfw_pingan]" class="footnote" title="Footnote 3">3</a></li> +<li>All images in LFW dataset were obtained "in the wild" meaning without any consent from the subject or from the photographer</li> +<li>The faces in the LFW dataset were detected using the Viola-Jones haarcascade face detector [^lfw_website] [^lfw-survey]</li> +<li>The LFW dataset is used by several of the largest tech companies in the world including "Google, Facebook, Microsoft Research Asia, Baidu, Tencent, SenseTime, Face++ and Chinese University of Hong Kong." <a class="footnote_shim" name="[^lfw_pingan]_3"> </a><a href="#[^lfw_pingan]" class="footnote" title="Footnote 3">3</a></li> +<li>All images in the LFW dataset were copied from Yahoo News between 2002 - 2004</li> +<li>In 2014, two of the four original authors of the LFW dataset received funding from IARPA and ODNI for their followup paper <a href="https://www.semanticscholar.org/paper/Labeled-Faces-in-the-Wild-%3A-Updates-and-New-Huang-Learned-Miller/2d3482dcff69c7417c7b933f22de606a0e8e42d4">Labeled Faces in the Wild: Updates and New Reporting Procedures</a> via IARPA contract number 2014-14071600010</li> +<li>The dataset includes 2 images of <a href="http://vis-www.cs.umass.edu/lfw/person/George_Tenet.html">George Tenet</a>, the former Director of Central Intelligence (DCI) for the Central Intelligence Agency whose facial biometrics were eventually used to help train facial recognition software in China and Russia</li> </ul> </section><section class='images'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/lfw/assets/lfw_montage_top1_640.jpg' alt=' former President George W. Bush'><div class='caption'> former President George W. Bush</div></div> -<div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/lfw/assets/lfw_montage_top2_4_640.jpg' alt=' Colin Powell (236), Tony Blair (144), and Donald Rumsfeld (121)'><div class='caption'> Colin Powell (236), Tony Blair (144), and Donald Rumsfeld (121)</div></div></section><section><h3>People and Companies using the LFW Dataset</h3> -<p>This section describes who is using the dataset and for what purposes. It should include specific examples of people or companies with citations and screenshots. This section is followed up by the graph, the map, and then the supplementary material.</p> -<p>The LFW dataset is used by numerous companies for <a href="about/glossary#benchmarking">benchmarking</a> algorithms and in some cases <a href="about/glossary#training">training</a>. According to the benchmarking results page [^lfw_results] provided by the authors, over 2 dozen companies have contributed their benchmark results.</p> -<p>According to BiometricUpdate.com [^lfw_pingan], LFW is "the most widely used evaluation set in the field of facial recognition, LFW attracts a few dozen teams from around the globe including Google, Facebook, Microsoft Research Asia, Baidu, Tencent, SenseTime, Face++ and Chinese University of Hong Kong."</p> -<p>According to researchers at the Baidu Research – Institute of Deep Learning "LFW has been the most popular evaluation benchmark for face recognition, and played a very important role in facilitating the face recognition society to improve algorithm. [^lfw_baidu]."</p> -<p>In addition to commercial use as an evaluation tool, alll of the faces in LFW dataset are prepackaged into a popular machine learning code framework called scikit-learn.</p> -</section><section class='images'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/lfw/assets/lfw_screenshot_01.jpg' alt=' "PING AN Tech facial recognition receives high score in latest LFW test results"'><div class='caption'> "PING AN Tech facial recognition receives high score in latest LFW test results"</div></div> -<div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/lfw/assets/lfw_screenshot_02.jpg' alt=' "Face Recognition Performance in LFW benchmark"'><div class='caption'> "Face Recognition Performance in LFW benchmark"</div></div> -<div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/lfw/assets/lfw_screenshot_03.jpg' alt=' "The 1st place in face verification challenge, LFW"'><div class='caption'> "The 1st place in face verification challenge, LFW"</div></div></section><section><p>In benchmarking, companies use a dataset to evaluate their algorithms which are typically trained on other data. After training, researchers will use LFW as a benchmark to compare results with other algorithms.</p> -<p>For example, Baidu (est. net worth $13B) uses LFW to report results for their "Targeting Ultimate Accuracy: Face Recognition via Deep Embedding". According to the three Baidu researchers who produced the paper:</p> -<h3>Citations</h3> -<p>Overall, LFW has at least 116 citations from 11 countries.</p> -</section><section class='applet_container'><div class='applet' data-payload='{"command": "map"}'></div></section><section class='applet_container'><div class='applet' data-payload='{"command": "citations"}'></div></section><section><h3>Conclusion</h3> -<p>The LFW face recognition training and evaluation dataset is a historically important face dataset as it was the first popular dataset to be created entirely from Internet images, paving the way for a global trend towards downloading anyone’s face from the Internet and adding it to a dataset. As will be evident with other datasets, LFW’s approach has now become the norm.</p> -<p>For all the 5,000 people in this datasets, their face is forever a part of facial recognition history. It would be impossible to remove anyone from the dataset because it is so ubiquitous. For their rest of the lives and forever after, these 5,000 people will continue to be used for training facial recognition surveillance.</p> -<h2>Code</h2> +<div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/lfw/assets/lfw_montage_top2_4_640.jpg' alt=' Colin Powell (236), Tony Blair (144), and Donald Rumsfeld (121)'><div class='caption'> Colin Powell (236), Tony Blair (144), and Donald Rumsfeld (121)</div></div></section><section class='images'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/lfw/assets/lfw_montage_all_crop.jpg' alt='All 5,379 faces in the Labeled Faces in The Wild Dataset'><div class='caption'>All 5,379 faces in the Labeled Faces in The Wild Dataset</div></div></section><section><h2>Code</h2> +<p>The LFW dataset is so widely used that a popular code library called Sci-Kit Learn includes a function called <code>fetch_lfw_people</code> to download the faces in the LFW dataset.</p> </section><section><pre><code class="lang-python">#!/usr/bin/python import numpy as np @@ -87,31 +75,29 @@ lfw_people = fetch_lfw_people(min_faces_per_person=1, resize=1, color=True, funn # introspect dataset n_samples, h, w, c = lfw_people.images.shape -print('{:,} images at {}x{}'.format(n_samples, w, h)) +print(f'{n_samples:,} images at {w}x{h} pixels') cols, rows = (176, 76) n_ims = cols * rows # build montages im_scale = 0.5 -ims = lfw_people.images[:n_ims -montages = imutils.build_montages(ims, (int(w*im_scale, int(h*im_scale)), (cols, rows)) +ims = lfw_people.images[:n_ims] +montages = imutils.build_montages(ims, (int(w * im_scale, int(h * im_scale)), (cols, rows)) montage = montages[0] # save full montage image imageio.imwrite('lfw_montage_full.png', montage) # make a smaller version -montage_960 = imutils.resize(montage, width=960) -imageio.imwrite('lfw_montage_960.jpg', montage_960) +montage = imutils.resize(montage, width=960) +imageio.imwrite('lfw_montage_960.jpg', montage) </code></pre> -</section><section><h2>Disclaimer</h2> -<p>MegaPixels is an educational art project designed to encourage discourse about facial recognition datasets. Any ethical or legal issues should be directed to the researcher's parent organizations. Except where necessary for contact or clarity, the names of researchers have been subsituted by their parent organization. In no way does this project aim to villify researchers who produced the datasets.</p> -<p>Read more about <a href="about/code-of-conduct">MegaPixels Code of Conduct</a></p> -<div class="footnotes"> -<hr> -<ol></ol> -</div> -</section> +</section><section><h3>Supplementary Material</h3> +</section><section class='applet_container'><div class='applet' data-payload='{"command": "load_file assets/lfw_commercial_use.csv", "fields": ["name_display, company_url, example_url, country, description"]}'></div></section><section><p>Text and graphics ©Adam Harvey / megapixels.cc</p> +</section><section><ul class="footnotes"><li><a name="[^lfw_www]" class="footnote_shim"></a><span class="backlinks"><a href="#[^lfw_www]_1">a</a><a href="#[^lfw_www]_2">b</a></span><p><a href="http://vis-www.cs.umass.edu/lfw/results.html">http://vis-www.cs.umass.edu/lfw/results.html</a></p> +</li><li><a name="[^lfw_baidu]" class="footnote_shim"></a><span class="backlinks"><a href="#[^lfw_baidu]_1">a</a></span><p>Jingtuo Liu, Yafeng Deng, Tao Bai, Zhengping Wei, Chang Huang. Targeting Ultimate Accuracy: Face Recognition via Deep Embedding. <a href="https://arxiv.org/abs/1506.07310">https://arxiv.org/abs/1506.07310</a></p> +</li><li><a name="[^lfw_pingan]" class="footnote_shim"></a><span class="backlinks"><a href="#[^lfw_pingan]_1">a</a><a href="#[^lfw_pingan]_2">b</a><a href="#[^lfw_pingan]_3">c</a></span><p>Lee, Justin. "PING AN Tech facial recognition receives high score in latest LFW test results". BiometricUpdate.com. Feb 13, 2017. <a href="https://www.biometricupdate.com/201702/ping-an-tech-facial-recognition-receives-high-score-in-latest-lfw-test-results">https://www.biometricupdate.com/201702/ping-an-tech-facial-recognition-receives-high-score-in-latest-lfw-test-results</a></p> +</li></ul></section> </div> <footer> diff --git a/site/public/datasets/vgg_face2/index.html b/site/public/datasets/vgg_face2/index.html index b7ba5a4c..efe6cb84 100644 --- a/site/public/datasets/vgg_face2/index.html +++ b/site/public/datasets/vgg_face2/index.html @@ -4,7 +4,7 @@ <title>MegaPixels</title> <meta charset="utf-8" /> <meta name="author" content="Adam Harvey" /> - <meta name="description" content="A large scale image dataset for face recognition" /> + <meta name="description" content="VGG Face 2 Dataset" /> <meta name="referrer" content="no-referrer" /> <meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes" /> <link rel='stylesheet' href='/assets/css/fonts.css' /> @@ -27,35 +27,10 @@ </header> <div class="content"> - <section><h1>VGG Faces2</h1> -</section><section><div class='meta'><div><div class='gray'>Created</div><div>2018</div></div><div><div class='gray'>Images</div><div>3.3M</div></div><div><div class='gray'>People</div><div>9,000</div></div><div><div class='gray'>Created From</div><div>Scraping search engines</div></div><div><div class='gray'>Search available</div><div>[Searchable](#)</div></div></div></section><section><p>VGG Face2 is the updated version of the VGG Face dataset and now includes over 3.3M face images from over 9K people. The identities were selected by taking the top 500K identities in Google's Knowledge Graph of celebrities and then selecting only the names that yielded enough training images. The dataset was created in the UK but funded by Office of Director of National Intelligence in the United States.</p> -</section><section class='applet_container'><div class='applet' data-payload='{"command": "face_search"}'></div></section><section class='applet_container'><div class='applet' data-payload='{"command": "name_search"}'></div></section><section class='applet_container'><div class='applet' data-payload='{"command": "load_file assets/lfw_names_gender_kg_min.csv", "fields": ["Name, Images, Gender, Description"]}'></div></section><section><h3>VGG Face2 by the Numbers</h3> + <section><h1>VGG Face 2</h1> +</section><section><div class='meta'><div><div class='gray'>Years</div><div>TBD</div></div><div><div class='gray'>Images</div><div>TBD</div></div><div><div class='gray'>Identities</div><div>TBD</div></div><div><div class='gray'>Origin</div><div>TBD</div></div><div><div class='gray'>Funding</div><div>IARPA</div></div></div><section><section class='fullwidth'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/vgg_face2/assets/vgg_face2_index.gif' alt='...'><div class='caption'>...</div></div></section><section><h3>Analysis</h3> <ul> -<li>1,331 actresses, 139 presidents</li> -<li>3 husbands and 16 wives</li> -<li>2 snooker player</li> -<li>1 guru</li> -<li>1 pornographic actress</li> -<li>3 computer programmer</li> -</ul> -<h3>Names and descriptions</h3> -<ul> -<li>The original VGGF2 name list has been updated with the results returned from Google Knowledge</li> -<li>Names with a similarity score greater than 0.75 where automatically updated. Scores computed using <code>import difflib; seq = difflib.SequenceMatcher(a=a.lower(), b=b.lower()); score = seq.ratio()</code></li> -<li>The 97 names with a score of 0.75 or lower were manually reviewed and includes name changes validating using Wikipedia.org results for names such as "Bruce Jenner" to "Caitlyn Jenner", spousal last-name changes, and discretionary changes to improve search results such as combining nicknames with full name when appropriate, for example changing "Aleksandar Petrović" to "Aleksandar 'Aco' Petrović" and minor changes such as "Mohammad Ali" to "Muhammad Ali"</li> -<li>The 'Description' text was automatically added when the Knowledge Graph score was greater than 250</li> -</ul> -<h2>TODO</h2> -<ul> -<li>create name list, and populate with Knowledge graph information like LFW</li> -<li>make list of interesting number stats, by the numbers</li> -<li>make list of interesting important facts</li> -<li>write intro abstract</li> -<li>write analysis of usage</li> -<li>find examples, citations, and screenshots of useage</li> -<li>find list of companies using it for table</li> -<li>create montages of the dataset, like LFW</li> -<li>create right to removal information</li> +<li>The VGG Face 2 dataset includes approximately 1,331 actresses, 139 presidents, 16 wives, 3 husbands, 2 snooker player, and 1 guru</li> </ul> </section> diff --git a/site/public/datasets_v0/index.html b/site/public/datasets_v0/index.html new file mode 100644 index 00000000..c2e6617b --- /dev/null +++ b/site/public/datasets_v0/index.html @@ -0,0 +1,53 @@ +<!doctype html> +<html> +<head> + <title>MegaPixels</title> + <meta charset="utf-8" /> + <meta name="author" content="Adam Harvey" /> + <meta name="description" content="Facial Recognition Datasets" /> + <meta name="referrer" content="no-referrer" /> + <meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes" /> + <link rel='stylesheet' href='/assets/css/fonts.css' /> + <link rel='stylesheet' href='/assets/css/tabulator.css' /> + <link rel='stylesheet' href='/assets/css/css.css' /> + <link rel='stylesheet' href='/assets/css/leaflet.css' /> + <link rel='stylesheet' href='/assets/css/applets.css' /> +</head> +<body> + <header> + <a class='slogan' href="/"> + <div class='logo'></div> + <div class='site_name'>MegaPixels</div> + </a> + <div class='links'> + <a href="/datasets/">Datasets</a> + <a href="/research/">Research</a> + <a href="/about/">About</a> + </div> + </header> + <div class="content"> + + <section><h1>Facial Recognition Datasets</h1> +<p>Regular Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.</p> +<h3>Summary</h3> +</section><section><div class='meta'><div><div class='gray'>Found</div><div>275 datasets</div></div><div><div class='gray'>Created between</div><div>1993-2018</div></div><div><div class='gray'>Smallest dataset</div><div>20 images</div></div><div><div class='gray'>Largest dataset</div><div>10,000,000 images</div></div></div><section><section><div class='meta'><div><div class='gray'>Highest resolution faces</div><div>450x500 (Unconstrained College Students)</div></div><div><div class='gray'>Lowest resolution faces</div><div>16x20 pixels (QMUL SurvFace)</div></div></div><section><section class='applet_container'><div class='applet' data-payload='{"command": "load_file https://megapixels.nyc3.digitaloceanspaces.com/v1/citations/datasets.csv"}'></div></section> + + </div> + <footer> + <div> + <a href="/">MegaPixels.cc</a> + <a href="/about/disclaimer/">Disclaimer</a> + <a href="/about/terms/">Terms of Use</a> + <a href="/about/privacy/">Privacy</a> + <a href="/about/">About</a> + <a href="/about/team/">Team</a> + </div> + <div> + MegaPixels ©2017-19 Adam R. Harvey / + <a href="https://ahprojects.com">ahprojects.com</a> + </div> + </footer> +</body> + +<script src="/assets/js/dist/index.js"></script> +</html>
\ No newline at end of file diff --git a/site/public/datasets_v0/lfw/index.html b/site/public/datasets_v0/lfw/index.html new file mode 100644 index 00000000..4ee4799f --- /dev/null +++ b/site/public/datasets_v0/lfw/index.html @@ -0,0 +1,131 @@ +<!doctype html> +<html> +<head> + <title>MegaPixels</title> + <meta charset="utf-8" /> + <meta name="author" content="Adam Harvey" /> + <meta name="description" content="LFW: Labeled Faces in The Wild" /> + <meta name="referrer" content="no-referrer" /> + <meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes" /> + <link rel='stylesheet' href='/assets/css/fonts.css' /> + <link rel='stylesheet' href='/assets/css/tabulator.css' /> + <link rel='stylesheet' href='/assets/css/css.css' /> + <link rel='stylesheet' href='/assets/css/leaflet.css' /> + <link rel='stylesheet' href='/assets/css/applets.css' /> +</head> +<body> + <header> + <a class='slogan' href="/"> + <div class='logo'></div> + <div class='site_name'>MegaPixels</div> + </a> + <div class='links'> + <a href="/datasets/">Datasets</a> + <a href="/research/">Research</a> + <a href="/about/">About</a> + </div> + </header> + <div class="content"> + + <section><h1>Labeled Faces in the Wild</h1> +</section><section><div class='meta'><div><div class='gray'>Created</div><div>2007</div></div><div><div class='gray'>Images</div><div>13,233</div></div><div><div class='gray'>People</div><div>5,749</div></div><div><div class='gray'>Created From</div><div>Yahoo News images</div></div><div><div class='gray'>Search available</div><div>Searchable</div></div></div><section><section class='applet_container'><div class='applet' data-payload='{"command": "face_search"}'></div></section><section class='applet_container'><div class='applet' data-payload='{"command": "name_search"}'></div></section><section class='fullwidth'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/site/datasets_v0/lfw/assets/lfw_feature.jpg' alt='Eighteen of the 5,749 people in the Labeled Faces in the Wild Dataset. The most widely used face dataset for benchmarking commercial face recognition algorithms.'><div class='caption'>Eighteen of the 5,749 people in the Labeled Faces in the Wild Dataset. The most widely used face dataset for benchmarking commercial face recognition algorithms.</div></div></section><section><h3>Intro</h3> +<p>Labeled Faces in The Wild (LFW) is among the most widely used facial recognition training datasets in the world and is the first of its kind to be created entirely from images posted online. The LFW dataset includes 13,233 images of 5,749 people that were collected between 2002-2004. Use the tools below to check if you were included in this dataset or scroll down to read the analysis.</p> +<p>Three paragraphs describing the LFW dataset in a format that can be easily replicated for the other datasets. Nothing too custom. An analysis of the initial research papers with context relative to all the other dataset papers.</p> +</section><section class='fullwidth'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/site/datasets_v0/lfw/assets/lfw_montage_everyone_nocrop_1920.jpg' alt=' From George W. Bush to Jamie Lee Curtis: all 5,749 people in the LFW Dataset sorted from most to least images collected.'><div class='caption'> From George W. Bush to Jamie Lee Curtis: all 5,749 people in the LFW Dataset sorted from most to least images collected.</div></div></section><section><h3>LFW by the Numbers</h3> +<ul> +<li>Was first published in 2007</li> +<li>Developed out of a prior dataset from Berkely called "Faces in the Wild" or "Names and Faces" [^lfw_original_paper]</li> +<li>Includes 13,233 images and 5,749 different people [^lfw_website]</li> +<li>There are about 3 men for every 1 woman (4,277 men and 1,472 women)[^lfw_website]</li> +<li>The person with the most images is George W. Bush with 530</li> +<li>Most people (70%) in the dataset have only 1 image</li> +<li>Thre are 1,680 people in the dataset with 2 or more images [^lfw_website]</li> +<li>Two out of 4 of the original authors received funding from the Office of Director of National Intelligence and IARPA for their 2016 LFW survey follow up report </li> +<li>The LFW dataset includes over 500 actors, 30 models, 10 presidents, 24 football players, 124 basketball players, 11 kings, and 2 queens</li> +<li>In all the LFW publications provided by the authors the words "ethics", "consent", and "privacy" appear 0 times [^lfw_original_paper], [^lfw_survey], [^lfw_tech_report] , [^lfw_website]</li> +<li>The word "future" appears 71 times</li> +</ul> +<h3>Facts</h3> +<ul> +<li>Was created for the purpose of improving "unconstrained face recognition" [^lfw_original_paper]</li> +<li>All images in LFW were obtained "in the wild" meaning without any consent from the subject or from the photographer</li> +<li>The faces were detected using the Viola-Jones haarcascade face detector [^lfw_website] [^lfw_survey]</li> +<li>Is considered the "most popular benchmark for face recognition" [^lfw_baidu]</li> +<li>Is "the most widely used evaluation set in the field of facial recognition" [^lfw_pingan]</li> +<li><p>Is used by several of the largest tech companies in the world including "Google, Facebook, Microsoft Research Asia, Baidu, Tencent, SenseTime, Face++ and Chinese University of Hong Kong." [^lfw_pingan]</p> +</li> +<li><p>All images were copied from Yahoo News between 2002 - 2004 [^lfw_original_paper]</p> +</li> +<li>SenseTime, who has relied on LFW for benchmarking their facial recognition performance, is the leading provider of surveillance to the Chinese Government</li> +</ul> +</section><section class='images'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/site/datasets_v0/lfw/assets/lfw_montage_top1_640.jpg' alt=' former President George W. Bush'><div class='caption'> former President George W. Bush</div></div> +<div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/site/datasets_v0/lfw/assets/lfw_montage_top2_4_640.jpg' alt=' Colin Powell (236), Tony Blair (144), and Donald Rumsfeld (121)'><div class='caption'> Colin Powell (236), Tony Blair (144), and Donald Rumsfeld (121)</div></div></section><section><h3>People and Companies using the LFW Dataset</h3> +<p>This section describes who is using the dataset and for what purposes. It should include specific examples of people or companies with citations and screenshots. This section is followed up by the graph, the map, and then the supplementary material.</p> +<p>The LFW dataset is used by numerous companies for <a href="about/glossary#benchmarking">benchmarking</a> algorithms and in some cases <a href="about/glossary#training">training</a>. According to the benchmarking results page [^lfw_results] provided by the authors, over 2 dozen companies have contributed their benchmark results.</p> +<p>According to BiometricUpdate.com [^lfw_pingan], LFW is "the most widely used evaluation set in the field of facial recognition, LFW attracts a few dozen teams from around the globe including Google, Facebook, Microsoft Research Asia, Baidu, Tencent, SenseTime, Face++ and Chinese University of Hong Kong."</p> +<p>According to researchers at the Baidu Research – Institute of Deep Learning "LFW has been the most popular evaluation benchmark for face recognition, and played a very important role in facilitating the face recognition society to improve algorithm. [^lfw_baidu]."</p> +<p>In addition to commercial use as an evaluation tool, alll of the faces in LFW dataset are prepackaged into a popular machine learning code framework called scikit-learn.</p> +</section><section class='images'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/site/datasets_v0/lfw/assets/lfw_screenshot_01.jpg' alt=' "PING AN Tech facial recognition receives high score in latest LFW test results"'><div class='caption'> "PING AN Tech facial recognition receives high score in latest LFW test results"</div></div> +<div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/site/datasets_v0/lfw/assets/lfw_screenshot_02.jpg' alt=' "Face Recognition Performance in LFW benchmark"'><div class='caption'> "Face Recognition Performance in LFW benchmark"</div></div> +<div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/site/datasets_v0/lfw/assets/lfw_screenshot_03.jpg' alt=' "The 1st place in face verification challenge, LFW"'><div class='caption'> "The 1st place in face verification challenge, LFW"</div></div></section><section><p>In benchmarking, companies use a dataset to evaluate their algorithms which are typically trained on other data. After training, researchers will use LFW as a benchmark to compare results with other algorithms.</p> +<p>For example, Baidu (est. net worth $13B) uses LFW to report results for their "Targeting Ultimate Accuracy: Face Recognition via Deep Embedding". According to the three Baidu researchers who produced the paper:</p> +<h3>Citations</h3> +<p>Overall, LFW has at least 116 citations from 11 countries.</p> +</section><section class='applet_container'><div class='applet' data-payload='{"command": "map"}'></div></section><section class='applet_container'><div class='applet' data-payload='{"command": "citations"}'></div></section><section><h3>Conclusion</h3> +<p>The LFW face recognition training and evaluation dataset is a historically important face dataset as it was the first popular dataset to be created entirely from Internet images, paving the way for a global trend towards downloading anyone’s face from the Internet and adding it to a dataset. As will be evident with other datasets, LFW’s approach has now become the norm.</p> +<p>For all the 5,000 people in this datasets, their face is forever a part of facial recognition history. It would be impossible to remove anyone from the dataset because it is so ubiquitous. For their rest of the lives and forever after, these 5,000 people will continue to be used for training facial recognition surveillance.</p> +<h2>Code</h2> +</section><section><pre><code class="lang-python">#!/usr/bin/python + +import numpy as np +from sklearn.datasets import fetch_lfw_people +import imageio +import imutils + +# download LFW dataset (first run takes a while) +lfw_people = fetch_lfw_people(min_faces_per_person=1, resize=1, color=True, funneled=False) + +# introspect dataset +n_samples, h, w, c = lfw_people.images.shape +print(f'{n_samples:,} images at {w}x{h} pixels') +cols, rows = (176, 76) +n_ims = cols * rows + +# build montages +im_scale = 0.5 +ims = lfw_people.images[:n_ims] +montages = imutils.build_montages(ims, (int(w * im_scale, int(h * im_scale)), (cols, rows)) +montage = montages[0] + +# save full montage image +imageio.imwrite('lfw_montage_full.png', montage) + +# make a smaller version +montage_960 = imutils.resize(montage, width=960) +imageio.imwrite('lfw_montage_960.jpg', montage_960) +</code></pre> +</section><section><div class="footnotes"> +<hr> +<ol></ol> +</div> +</section> + + </div> + <footer> + <div> + <a href="/">MegaPixels.cc</a> + <a href="/about/disclaimer/">Disclaimer</a> + <a href="/about/terms/">Terms of Use</a> + <a href="/about/privacy/">Privacy</a> + <a href="/about/">About</a> + <a href="/about/team/">Team</a> + </div> + <div> + MegaPixels ©2017-19 Adam R. Harvey / + <a href="https://ahprojects.com">ahprojects.com</a> + </div> + </footer> +</body> + +<script src="/assets/js/dist/index.js"></script> +</html>
\ No newline at end of file diff --git a/site/public/datasets_v0/lfw/right-to-removal/index.html b/site/public/datasets_v0/lfw/right-to-removal/index.html new file mode 100644 index 00000000..97ce4d05 --- /dev/null +++ b/site/public/datasets_v0/lfw/right-to-removal/index.html @@ -0,0 +1,61 @@ +<!doctype html> +<html> +<head> + <title>MegaPixels</title> + <meta charset="utf-8" /> + <meta name="author" content="Adam Harvey" /> + <meta name="description" content="LFW: Labeled Faces in The Wild" /> + <meta name="referrer" content="no-referrer" /> + <meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes" /> + <link rel='stylesheet' href='/assets/css/fonts.css' /> + <link rel='stylesheet' href='/assets/css/tabulator.css' /> + <link rel='stylesheet' href='/assets/css/css.css' /> + <link rel='stylesheet' href='/assets/css/leaflet.css' /> + <link rel='stylesheet' href='/assets/css/applets.css' /> +</head> +<body> + <header> + <a class='slogan' href="/"> + <div class='logo'></div> + <div class='site_name'>MegaPixels</div> + </a> + <div class='links'> + <a href="/datasets/">Datasets</a> + <a href="/research/">Research</a> + <a href="/about/">About</a> + </div> + </header> + <div class="content"> + + <section><h1>Labeled Faces in the Wild</h1> +<h2>Right to Removal</h2> +<p>If you are affected by disclosure of your identity in this dataset please do contact the authors. Many have stated that they are willing to remove images upon request. The authors of the LFW dataset provide the following email for inquiries:</p> +<p>You can use the following message to request removal from the dataset:</p> +<p>To: Gary Huang <a href="mailto:mailto:gbhuang@cs.umass.edu">mailto:gbhuang@cs.umass.edu</a></p> +<p>Subject: Request for Removal from LFW Face Dataset</p> +<p>Dear [researcher name],</p> +<p>I am writing to you about the "Labeled Faces in The Wild Dataset". Recently I discovered that your dataset includes my identity and I no longer wish to be included in your dataset.</p> +<p>The dataset is being used thousands of companies around the world to improve facial recognition software including usage by governments for the purpose of law enforcement, national security, tracking consumers in retail environments, and tracking individuals through public spaces.</p> +<p>My name as it appears in your dataset is [your name]. Please remove all images from your dataset and inform your newsletter subscribers to likewise update their copies.</p> +<p>- [your name]</p> +</section> + + </div> + <footer> + <div> + <a href="/">MegaPixels.cc</a> + <a href="/about/disclaimer/">Disclaimer</a> + <a href="/about/terms/">Terms of Use</a> + <a href="/about/privacy/">Privacy</a> + <a href="/about/">About</a> + <a href="/about/team/">Team</a> + </div> + <div> + MegaPixels ©2017-19 Adam R. Harvey / + <a href="https://ahprojects.com">ahprojects.com</a> + </div> + </footer> +</body> + +<script src="/assets/js/dist/index.js"></script> +</html>
\ No newline at end of file diff --git a/site/public/datasets_v0/lfw/tables/index.html b/site/public/datasets_v0/lfw/tables/index.html new file mode 100644 index 00000000..dd460843 --- /dev/null +++ b/site/public/datasets_v0/lfw/tables/index.html @@ -0,0 +1,52 @@ +<!doctype html> +<html> +<head> + <title>MegaPixels</title> + <meta charset="utf-8" /> + <meta name="author" content="Adam Harvey" /> + <meta name="description" content="LFW: Labeled Faces in The Wild" /> + <meta name="referrer" content="no-referrer" /> + <meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes" /> + <link rel='stylesheet' href='/assets/css/fonts.css' /> + <link rel='stylesheet' href='/assets/css/tabulator.css' /> + <link rel='stylesheet' href='/assets/css/css.css' /> + <link rel='stylesheet' href='/assets/css/leaflet.css' /> + <link rel='stylesheet' href='/assets/css/applets.css' /> +</head> +<body> + <header> + <a class='slogan' href="/"> + <div class='logo'></div> + <div class='site_name'>MegaPixels</div> + </a> + <div class='links'> + <a href="/datasets/">Datasets</a> + <a href="/research/">Research</a> + <a href="/about/">About</a> + </div> + </header> + <div class="content"> + + <section><h1>Labeled Faces in the Wild</h1> +<h2>Tables</h2> +</section><section class='applet_container'><div class='applet' data-payload='{"command": "load_file assets/lfw_names_gender_kg_min.csv", "fields": ["Name, Images, Gender, Description"]}'></div></section><section class='applet_container'><div class='applet' data-payload='{"command": "load_file assets/lfw_commercial_use.csv", "fields": ["name_display, company_url, example_url, country, description"]}'></div></section><section></section> + + </div> + <footer> + <div> + <a href="/">MegaPixels.cc</a> + <a href="/about/disclaimer/">Disclaimer</a> + <a href="/about/terms/">Terms of Use</a> + <a href="/about/privacy/">Privacy</a> + <a href="/about/">About</a> + <a href="/about/team/">Team</a> + </div> + <div> + MegaPixels ©2017-19 Adam R. Harvey / + <a href="https://ahprojects.com">ahprojects.com</a> + </div> + </footer> +</body> + +<script src="/assets/js/dist/index.js"></script> +</html>
\ No newline at end of file diff --git a/site/public/datasets_v0/vgg_face2/index.html b/site/public/datasets_v0/vgg_face2/index.html new file mode 100644 index 00000000..6a67e7e4 --- /dev/null +++ b/site/public/datasets_v0/vgg_face2/index.html @@ -0,0 +1,80 @@ +<!doctype html> +<html> +<head> + <title>MegaPixels</title> + <meta charset="utf-8" /> + <meta name="author" content="Adam Harvey" /> + <meta name="description" content="A large scale image dataset for face recognition" /> + <meta name="referrer" content="no-referrer" /> + <meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes" /> + <link rel='stylesheet' href='/assets/css/fonts.css' /> + <link rel='stylesheet' href='/assets/css/tabulator.css' /> + <link rel='stylesheet' href='/assets/css/css.css' /> + <link rel='stylesheet' href='/assets/css/leaflet.css' /> + <link rel='stylesheet' href='/assets/css/applets.css' /> +</head> +<body> + <header> + <a class='slogan' href="/"> + <div class='logo'></div> + <div class='site_name'>MegaPixels</div> + </a> + <div class='links'> + <a href="/datasets/">Datasets</a> + <a href="/research/">Research</a> + <a href="/about/">About</a> + </div> + </header> + <div class="content"> + + <section><h1>VGG Faces2</h1> +</section><section><div class='meta'><div><div class='gray'>Created</div><div>2018</div></div><div><div class='gray'>Images</div><div>3.3M</div></div><div><div class='gray'>People</div><div>9,000</div></div><div><div class='gray'>Created From</div><div>Scraping search engines</div></div><div><div class='gray'>Search available</div><div>[Searchable](#)</div></div></div><section><section><p>VGG Face2 is the updated version of the VGG Face dataset and now includes over 3.3M face images from over 9K people. The identities were selected by taking the top 500K identities in Google's Knowledge Graph of celebrities and then selecting only the names that yielded enough training images. The dataset was created in the UK but funded by Office of Director of National Intelligence in the United States.</p> +</section><section class='applet_container'><div class='applet' data-payload='{"command": "face_search"}'></div></section><section class='applet_container'><div class='applet' data-payload='{"command": "name_search"}'></div></section><section class='applet_container'><div class='applet' data-payload='{"command": "load_file assets/lfw_names_gender_kg_min.csv", "fields": ["Name, Images, Gender, Description"]}'></div></section><section><h3>VGG Face2 by the Numbers</h3> +<ul> +<li>1,331 actresses, 139 presidents</li> +<li>3 husbands and 16 wives</li> +<li>2 snooker player</li> +<li>1 guru</li> +<li>1 pornographic actress</li> +<li>3 computer programmer</li> +</ul> +<h3>Names and descriptions</h3> +<ul> +<li>The original VGGF2 name list has been updated with the results returned from Google Knowledge</li> +<li>Names with a similarity score greater than 0.75 where automatically updated. Scores computed using <code>import difflib; seq = difflib.SequenceMatcher(a=a.lower(), b=b.lower()); score = seq.ratio()</code></li> +<li>The 97 names with a score of 0.75 or lower were manually reviewed and includes name changes validating using Wikipedia.org results for names such as "Bruce Jenner" to "Caitlyn Jenner", spousal last-name changes, and discretionary changes to improve search results such as combining nicknames with full name when appropriate, for example changing "Aleksandar Petrović" to "Aleksandar 'Aco' Petrović" and minor changes such as "Mohammad Ali" to "Muhammad Ali"</li> +<li>The 'Description' text was automatically added when the Knowledge Graph score was greater than 250</li> +</ul> +<h2>TODO</h2> +<ul> +<li>create name list, and populate with Knowledge graph information like LFW</li> +<li>make list of interesting number stats, by the numbers</li> +<li>make list of interesting important facts</li> +<li>write intro abstract</li> +<li>write analysis of usage</li> +<li>find examples, citations, and screenshots of useage</li> +<li>find list of companies using it for table</li> +<li>create montages of the dataset, like LFW</li> +<li>create right to removal information</li> +</ul> +</section> + + </div> + <footer> + <div> + <a href="/">MegaPixels.cc</a> + <a href="/about/disclaimer/">Disclaimer</a> + <a href="/about/terms/">Terms of Use</a> + <a href="/about/privacy/">Privacy</a> + <a href="/about/">About</a> + <a href="/about/team/">Team</a> + </div> + <div> + MegaPixels ©2017-19 Adam R. Harvey / + <a href="https://ahprojects.com">ahprojects.com</a> + </div> + </footer> +</body> + +<script src="/assets/js/dist/index.js"></script> +</html>
\ No newline at end of file diff --git a/site/public/index.html b/site/public/index.html index d2986084..d5a2e59f 100644 --- a/site/public/index.html +++ b/site/public/index.html @@ -3,15 +3,13 @@ <head> <title>MegaPixels</title> <meta charset="utf-8" /> - <meta name="author" content="Adam Harvey" /> - <meta name="description" content="" /> + <meta name="author" content="info@megapixels.cc" /> + <meta name="description" content="The Dark Side of Datasets" /> <meta name="referrer" content="no-referrer" /> <meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes" /> <link rel='stylesheet' href='/assets/css/fonts.css' /> - <link rel='stylesheet' href='/assets/css/tabulator.css' /> <link rel='stylesheet' href='/assets/css/css.css' /> - <link rel='stylesheet' href='/assets/css/leaflet.css' /> - <link rel='stylesheet' href='/assets/css/applets.css' /> + <link rel='stylesheet' href='/assets/css/splash.css' /> </head> <body> <header> @@ -20,112 +18,22 @@ <div class='site_name'>MegaPixels</div> </a> <div class='links'> - <a href="/datasets/">Datasets</a> - <a href="/research/">Research</a> - <a href="/about/">About</a> + <a href="/datasets/" class='aboutLink'>DATASETS</a> + <a href="/research/" class='aboutLink'>RESEARCH</a> + <a href="/about/" class='aboutLink'>ABOUT</a> </div> </header> - <div class="content"> - - <div class='hero'> - <div class='inner'> - <div id="face_container"> - <div class='currentFace'></div> - </div> - <div class='intro'> - <div class='headline'> - MegaPixels is an art project that explores the dark side of face recognition datasets and the future of computer vision. - </div> - - <div class='buttons'> - <a href="/datasets/lfw/"><button class='important'>Find Your Face</button></a> - <a href="/analyze/"><button class='normal'>Analyze Your Face</button></a> - </div> - - <div class='under'> - Made by Adam Harvey in collaboration with Jules Laplace, and in partnership with Mozilla.<br/> - <a href='/about/'>Read more about MegaPixels</a> - </div> - </div> - </div> - </div> - - <section class='wide dataset-intro'> - <h2>Face Recognition Datasets</h2> - <div class='right-sidebar'> - <h4>SUMMARY</h4> - <div class='meta'> - <div><div class='gray'>Found</div><div>275 datasets</div></div> - <div><div class='gray'>Created between</div><div>1993-2018</div></div> - <div><div class='gray'>Smallest dataset</div><div>20 images</div></div> - <div><div class='gray'>Largest dataset</div><div>10,000,000 images</div></div> - <div><div class='gray'>Highest resolution faces</div><div>450x500 (Unconstrained College Students)</div></div> - <div><div class='gray'>Lowest resolution faces</div><div>16x20 pixels (QMUL SurvFace)</div></div> - </div> - </div> - - <p> - MegaPixels is an online art project that explores the history of face recognition from the perspective of datasets. MegaPixels aims to unravel the meanings behind the data and expose the darker corners of the biometric industry that have contributed to its growth. - </p> - <p> - Through a mix of case studies, visualizations, and interactive tools, Megapixels will use face recognition datasets to tell the history of modern biometrics. Many people have contributed to the development of face recignition technology, both wittingly and unwittingly. Not only scientists, but also celebrities and regular internet users have played a part. - </p> - <p> - Face recognition is a mess of contradictinos. It works, yet it doesn't actually work. It's cheap and accessible, but also expensive and out of control. Face recognition research has achieved headline grabbing superhuman accuracies over 99.9%, yet in practice it's also dangerously inaccurate. - </p> - <p> - During a trial installation at Sudkreuz station in Berlin in 2018, 20% of the matches were wrong, a number so low that it should not have any connection to law enforcement or justice. And in London, the Metropolitan police had been using face recognition software that mistakenly identified an alarming 98% of people as criminals, which perhaps is a crime itself. - </p> - </section> - - <section class='wide dataset-intro'> - <h2>Dataset Portraits</h2> - <p> - We have prepared detailed case studies of some of the more noteworthy datasets, including tools to help you learn what is contained in these datasets, and even whether your own face has been used to train these algorithms. - </p> - - <div class="dataset-list"> - - <a href="/datasets/lfw/"> - <div class="dataset"> - Labeled Faces in The Wild - </div> - </a> - - <a href="/datasets/vgg_face2/"> - <div class="dataset"> - VGG Face2 - </div> - </a> - - </div> - </section> - - + <div class="splash"> + <div id="three_container"></div> </div> <footer> <div> - <a href="/">MegaPixels.cc</a> - <a href="/about/disclaimer/">Disclaimer</a> - <a href="/about/terms/">Terms of Use</a> - <a href="/about/privacy/">Privacy</a> - <a href="/about/">About</a> - <a href="/about/team/">Team</a> </div> <div> MegaPixels ©2017-19 Adam R. Harvey / - <a href="https://ahprojects.com">ahprojects.com</a> + <a href="https://ahprojects.com/megapixels/">ahprojects.com</a> </div> </footer> </body> - -<script src="https://cdnjs.cloudflare.com/ajax/libs/babel-polyfill/7.0.0/polyfill.min.js"></script> -<script src="https://cdnjs.cloudflare.com/ajax/libs/three.js/97/three.min.js"></script> -<script src="https://unpkg.com/three.texttexture@18.10.24"></script> -<script src="/assets/demo/cloud/THREE.TextSprite.js"></script> -<script src="/assets/js/vendor/three.meshline.js"></script> -<script src="/assets/js/vendor/oktween.js"></script> -<script src="/assets/js/app/face.js"></script> - -<script src="/assets/js/dist/index.js"></script> +<script src="/assets/js/dist/splash.js"></script> </html>
\ No newline at end of file diff --git a/site/public/info/index.html b/site/public/info/index.html index d3a7d549..0b59e647 100644 --- a/site/public/info/index.html +++ b/site/public/info/index.html @@ -27,7 +27,7 @@ </header> <div class="content"> - <section><h2>What do facial recognition algorithms see?</h2> + <section><h2>Face Analysis</h2> </section><section class='applet_container'><div class='applet' data-payload='{"command": "face_analysis"}'></div></section><section><p>Results are only stored for the duration of the analysis and are deleted when you leave this page.</p> </section> diff --git a/site/public/research/00_introduction/index.html b/site/public/research/00_introduction/index.html index b6cc8e4a..395bd268 100644 --- a/site/public/research/00_introduction/index.html +++ b/site/public/research/00_introduction/index.html @@ -42,18 +42,18 @@ </div> </section> - <section><div class='meta'><div><div class='gray'>Posted</div><div>Dec. 15</div></div><div><div class='gray'>Author</div><div>Adam Harvey</div></div></div></section><section><p>It was the early 2000s. Face recognition was new and no one seemed sure exactly how well it was going to perform in practice. In theory, face recognition was poised to be a game changer, a force multiplier, a strategic military advantage, a way to make cities safer and to secure borders. This was the future John Ashcroft demanded with the Total Information Awareness act of the 2003 and that spooks had dreamed of for decades. It was a future that academics at Carnegie Mellon Universtiy and Colorado State University would help build. It was also a future that celebrities would play a significant role in building. And to the surprise of ordinary Internet users like myself and perhaps you, it was a future that millions of Internet users would unwittingly play role in creating.</p> + <section><div class='meta'><div><div class='gray'>Posted</div><div>Dec. 15</div></div><div><div class='gray'>Author</div><div>Adam Harvey</div></div></div><section><section><p>Ever since the first computational facial recognition research project by the CIA in the early 1960s, data has always played a vital role in the development of our biometric future. Without facial recognition datasets there would be no facial recognition. Datasets are an indispensable part of any artificial intelligence system because, as Geoffrey Hinton points out:</p> +<blockquote><p>Our relationship to computers has changed. Instead of programming them, we now show them and they figure it out. - <a href="https://www.youtube.com/watch?v=-eyhCTvrEtE">Geoffrey Hinton</a></p> +</blockquote> +<p>Algorithms learn from datasets. And we program algorithms by building datasets. But datasets aren't like code. There's no programming language made of data except for the data itself.</p> +<p>Ignore content below these lines</p> +<p>It was the early 2000s. Face recognition was new and no one seemed sure exactly how well it was going to perform in practice. In theory, face recognition was poised to be a game changer, a force multiplier, a strategic military advantage, a way to make cities safer and to secure borders. This was the future John Ashcroft demanded with the Total Information Awareness act of the 2003 and that spooks had dreamed of for decades. It was a future that academics at Carnegie Mellon Universtiy and Colorado State University would help build. It was also a future that celebrities would play a significant role in building. And to the surprise of ordinary Internet users like myself and perhaps you, it was a future that millions of Internet users would unwittingly play role in creating.</p> <p>Now the future has arrived and it doesn't make sense. Facial recognition works yet it doesn't actually work. Facial recognition is cheap and accessible but also expensive and out of control. Facial recognition research has achieved headline grabbing superhuman accuracies over 99.9% yet facial recognition is also dangerously inaccurate. During a trial installation at Sudkreuz station in Berlin in 2018, 20% of the matches were wrong, a number so low that it should not have any connection to law enforcement or justice. And in London, the Metropolitan police had been using facial recognition software that mistakenly identified an alarming 98% of people as criminals <sup class="footnote-ref" id="fnref-met_police"><a href="#fn-met_police">1</a></sup>, which perhaps is a crime itself.</p> <p>MegaPixels is an online art project that explores the history of facial recognition from the perspective of datasets. To paraphrase the artist Trevor Paglen, whoever controls the dataset controls the meaning. MegaPixels aims to unravel the meanings behind the data and expose the darker corners of the biometric industry that have contributed to its growth. MegaPixels does not start with a conclusion, a moralistic slant, or a</p> <p>Whether or not to build facial recognition was a question that can no longer be asked. As an outspoken critic of face recognition I've developed, and hopefully furthered, my understanding during the last 10 years I've spent working with computer vision. Though I initially disagreed, I've come to see technocratic perspective as a non-negotiable reality. As Oren (nytimes article) wrote in NYT Op-Ed "the horse is out of the barn" and the only thing we can do collectively or individually is to steer towards the least worse outcome. Computational communication has entered a new era and it's both exciting and frightening to explore the potentials and opportunities. In 1997 getting access to 1 teraFLOPS of computational power would have cost you $55 million and required a strategic partnership with the Department of Defense. At the time of writing, anyone can rent 1 teraFLOPS on a cloud GPU marketplace for less than $1/day. <sup class="footnote-ref" id="fnref-asci_option_red"><a href="#fn-asci_option_red">2</a></sup>.</p> <p>I hope that this project will illuminate the darker areas of strange world of facial recognition that have not yet received attention and encourage discourse in academic, industry, and . By no means do I believe discourse can save the day. Nor do I think creating artwork can. In fact, I'm not exactly sure what the outcome of this project will be. The project is not so much what I publish here but what happens after. This entire project is only a prologue.</p> <p>As McLuhan wrote, "You can't have a static, fixed position in the electric age". And in our hyper-connected age of mass surveillance, artificial intelligece, and unevenly distributed virtual futures the most irrational thing to be is rational. Increasingly the world is becoming a contradiction where people use surveillance to protest surveillance, use</p> <p>Like many projects, MegaPixels had spent years meandering between formats, unfeasible budgets, and was generally too niche of a subject. The basic idea for this project, as proposed to the original <a href="https://tacticaltech.org/projects/the-glass-room-nyc/">Glass Room</a> installation in 2016 in NYC, was to build an interactive mirror that showed people if they had been included in the <a href="/datasets/lfw">LFW</a> facial recognition dataset. The idea was based on my reaction to all the datasets I'd come across during research for the CV Dazzle project. I'd noticed strange datasets created for training and testing face detection algorithms. Most were created in labratory settings and their interpretation of face data was very strict.</p> -<p>About the name</p> -<p>About the funding</p> -<p>About me</p> -<p>About the team</p> -<p>Conclusion</p> <h3>for other post</h3> <p>It was the early 2000s. Face recognition was new and no one seemed sure how well it was going to perform in practice. In theory, face recognition was poised to be a game changer, a force multiplier, a strategic military advantage, a way to make cities safer and to secure the borders. It was the future that John Ashcroft demanded with the Total Information Awareness act of the 2003. It was a future that academics helped build. It was a future that celebrities helped build. And it was a future that</p> <p>A decade earlier the Department of Homeland Security and the Counterdrug Technology Development Program Office initated a feasibilty study called FERET (FacE REcognition Technology) to "develop automatic face recognition capabilities that could be employed to assist security, intelligence, and law enforcement personnel in the performance of their duties [^feret_website]."</p> diff --git a/site/public/research/01_from_1_to_100_pixels/index.html b/site/public/research/01_from_1_to_100_pixels/index.html index 4446e1be..c11e966e 100644 --- a/site/public/research/01_from_1_to_100_pixels/index.html +++ b/site/public/research/01_from_1_to_100_pixels/index.html @@ -68,6 +68,9 @@ <li>NIST report on sres states several resolutions</li> <li>"Results show that the tested face recognition systems yielded similar performance for query sets with eye-to-eye distance from 60 pixels to 30 pixels" <sup class="footnote-ref" id="fnref-nist_sres"><a href="#fn-nist_sres">1</a></sup></li> </ul> +<ul> +<li>"Note that we only keep the images with a minimal side length of 80 pixels." and "a face will be labeled as “Ignore” if it is very difficult to be detected due to blurring, severe deformation and unrecognizable eyes, or the side length of its bounding box is less than 32 pixels." Ge_Detecting_Masked_Faces_CVPR_2017_paper.pdf </li> +</ul> <div class="footnotes"> <hr> <ol><li id="fn-nist_sres"><p>NIST 906932. Performance Assessment of Face Recognition Using Super-Resolution. Shuowen Hu, Robert Maschal, S. Susan Young, Tsai Hong Hong, Jonathon P. Phillips<a href="#fnref-nist_sres" class="footnote">↩</a></p></li> diff --git a/site/templates/datasets.html b/site/templates/datasets.html index ba230eee..3456eac8 100644 --- a/site/templates/datasets.html +++ b/site/templates/datasets.html @@ -4,17 +4,16 @@ {{ content }} - <section> - <h2>Dataset Portraits</h2> + <section class='wide dataset-intro'> <p> - We have prepared detailed studies of some of the more noteworthy datasets. + We have prepared detailed case studies of some of the more noteworthy datasets, including tools to help you learn what is contained in these datasets, and even whether your own face has been used to train these algorithms. </p> <div class="dataset-list"> {% for dataset in datasets %} - <a href="{{ dataset.url }}"> + <a href="{{ dataset.url }}" style="background-image: url(https://nyc3.digitaloceanspaces.com/megapixels/v1{{ dataset.url }}assets/index.jpg)"> <div class="dataset"> - {{ dataset.title }} + <span>{{ dataset.title }}</span> </div> </a> {% endfor %} diff --git a/site/templates/home.html b/site/templates/home.html index 9756e21f..d5a2e59f 100644 --- a/site/templates/home.html +++ b/site/templates/home.html @@ -1,82 +1,39 @@ -{% extends 'layout.html' %} - -{% block content %} - <div class='hero'> - <div class='inner'> - <div id="face_container"> - <div class='currentFace'></div> - </div> - <div class='intro'> - <div class='headline'> - MegaPixels is an art project that explores the dark side of face recognition datasets and the future of computer vision. - </div> - - <div class='buttons'> - <a href="/datasets/lfw/"><button class='important'>Find Your Face</button></a> - <a href="/analyze/"><button class='normal'>Analyze Your Face</button></a> - </div> - - <div class='under'> - Made by Adam Harvey in collaboration with Jules Laplace, and in partnership with Mozilla.<br/> - <a href='/about/'>Read more about MegaPixels</a> - </div> - </div> +<!doctype html> +<html> +<head> + <title>MegaPixels</title> + <meta charset="utf-8" /> + <meta name="author" content="info@megapixels.cc" /> + <meta name="description" content="The Dark Side of Datasets" /> + <meta name="referrer" content="no-referrer" /> + <meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes" /> + <link rel='stylesheet' href='/assets/css/fonts.css' /> + <link rel='stylesheet' href='/assets/css/css.css' /> + <link rel='stylesheet' href='/assets/css/splash.css' /> +</head> +<body> + <header> + <a class='slogan' href="/"> + <div class='logo'></div> + <div class='site_name'>MegaPixels</div> + </a> + <div class='links'> + <a href="/datasets/" class='aboutLink'>DATASETS</a> + <a href="/research/" class='aboutLink'>RESEARCH</a> + <a href="/about/" class='aboutLink'>ABOUT</a> </div> + </header> + <div class="splash"> + <div id="three_container"></div> </div> - - <section class='wide dataset-intro'> - <h2>Face Recognition Datasets</h2> - <div class='right-sidebar'> - <h4>SUMMARY</h4> - <div class='meta'> - <div><div class='gray'>Found</div><div>275 datasets</div></div> - <div><div class='gray'>Created between</div><div>1993-2018</div></div> - <div><div class='gray'>Smallest dataset</div><div>20 images</div></div> - <div><div class='gray'>Largest dataset</div><div>10,000,000 images</div></div> - <div><div class='gray'>Highest resolution faces</div><div>450x500 (Unconstrained College Students)</div></div> - <div><div class='gray'>Lowest resolution faces</div><div>16x20 pixels (QMUL SurvFace)</div></div> - </div> + <footer> + <div> </div> - - <p> - MegaPixels is an online art project that explores the history of face recognition from the perspective of datasets. MegaPixels aims to unravel the meanings behind the data and expose the darker corners of the biometric industry that have contributed to its growth. - </p> - <p> - Through a mix of case studies, visualizations, and interactive tools, Megapixels will use face recognition datasets to tell the history of modern biometrics. Many people have contributed to the development of face recignition technology, both wittingly and unwittingly. Not only scientists, but also celebrities and regular internet users have played a part. - </p> - <p> - Face recognition is a mess of contradictinos. It works, yet it doesn't actually work. It's cheap and accessible, but also expensive and out of control. Face recognition research has achieved headline grabbing superhuman accuracies over 99.9%, yet in practice it's also dangerously inaccurate. - </p> - <p> - During a trial installation at Sudkreuz station in Berlin in 2018, 20% of the matches were wrong, a number so low that it should not have any connection to law enforcement or justice. And in London, the Metropolitan police had been using face recognition software that mistakenly identified an alarming 98% of people as criminals, which perhaps is a crime itself. - </p> - </section> - - <section class='wide dataset-intro'> - <h2>Dataset Portraits</h2> - <p> - We have prepared detailed case studies of some of the more noteworthy datasets, including tools to help you learn what is contained in these datasets, and even whether your own face has been used to train these algorithms. - </p> - - <div class="dataset-list"> - {% for dataset in datasets %} - <a href="{{ dataset.url }}"> - <div class="dataset"> - {{ dataset.title }} - </div> - </a> - {% endfor %} + <div> + MegaPixels ©2017-19 Adam R. Harvey / + <a href="https://ahprojects.com/megapixels/">ahprojects.com</a> </div> - </section> - -{% endblock %} - -{% block scripts %} -<script src="https://cdnjs.cloudflare.com/ajax/libs/babel-polyfill/7.0.0/polyfill.min.js"></script> -<script src="https://cdnjs.cloudflare.com/ajax/libs/three.js/97/three.min.js"></script> -<script src="https://unpkg.com/three.texttexture@18.10.24"></script> -<script src="/assets/demo/cloud/THREE.TextSprite.js"></script> -<script src="/assets/js/vendor/three.meshline.js"></script> -<script src="/assets/js/vendor/oktween.js"></script> -<script src="/assets/js/app/face.js"></script> -{% endblock %} + </footer> +</body> +<script src="/assets/js/dist/splash.js"></script> +</html>
\ No newline at end of file |
