summaryrefslogtreecommitdiff
path: root/site/public
diff options
context:
space:
mode:
authorjules@lens <julescarbon@gmail.com>2019-04-18 16:55:14 +0200
committerjules@lens <julescarbon@gmail.com>2019-04-18 16:55:14 +0200
commit2e4daed06264f3dc3bbabd8fa4fc0d8ceed4c5af (patch)
tree1a17bb4459776ac91f7006a2a407ca12edd3471e /site/public
parent3d32e5b4ddbfbfe5d4abeda57ff200adf1532f4c (diff)
parentf8012f88641b0bb378ba79393f277c8918ebe452 (diff)
Merge branch 'master' of asdf.us:megapixels_dev
Diffstat (limited to 'site/public')
-rw-r--r--site/public/about/assets/LICENSE/index.html58
-rw-r--r--site/public/about/attribution/index.html78
-rw-r--r--site/public/about/faq/index.html59
-rw-r--r--site/public/about/index.html75
-rw-r--r--site/public/about/legal/index.html71
-rw-r--r--site/public/about/press/index.html31
-rw-r--r--site/public/about/privacy/index.html67
-rw-r--r--site/public/about/terms/index.html79
-rw-r--r--site/public/datasets/50_people_one_question/index.html85
-rw-r--r--site/public/datasets/afad/index.html96
-rw-r--r--site/public/datasets/aflw/index.html54
-rw-r--r--site/public/datasets/brainwash/index.html104
-rw-r--r--site/public/datasets/caltech_10k/index.html104
-rw-r--r--site/public/datasets/celeba/index.html83
-rw-r--r--site/public/datasets/cofw/index.html113
-rw-r--r--site/public/datasets/duke_mtmc/index.html451
-rw-r--r--site/public/datasets/facebook/index.html55
-rw-r--r--site/public/datasets/feret/index.html102
-rw-r--r--site/public/datasets/hrt_transgender/index.html101
-rw-r--r--site/public/datasets/index.html83
-rw-r--r--site/public/datasets/lfpw/index.html87
-rw-r--r--site/public/datasets/lfw/index.html144
-rw-r--r--site/public/datasets/market_1501/index.html85
-rw-r--r--site/public/datasets/msceleb/index.html89
-rw-r--r--site/public/datasets/oxford_town_centre/index.html156
-rw-r--r--site/public/datasets/pipa/index.html86
-rw-r--r--site/public/datasets/pubfig/index.html117
-rw-r--r--site/public/datasets/uccs/index.html405
-rw-r--r--site/public/datasets/vgg_face2/index.html105
-rw-r--r--site/public/datasets/viper/index.html68
-rw-r--r--site/public/datasets/youtube_celebrities/index.html91
-rw-r--r--site/public/index.html2
-rw-r--r--site/public/info/index.html23
-rw-r--r--site/public/research/00_introduction/index.html30
-rw-r--r--site/public/research/01_from_1_to_100_pixels/index.html23
-rw-r--r--site/public/research/02_what_computers_can_see/index.html23
-rw-r--r--site/public/research/index.html23
-rw-r--r--site/public/test/chart/index.html23
-rw-r--r--site/public/test/citations/index.html23
-rw-r--r--site/public/test/csv/index.html23
-rw-r--r--site/public/test/datasets/index.html23
-rw-r--r--site/public/test/face_search/index.html23
-rw-r--r--site/public/test/gallery/index.html41
-rw-r--r--site/public/test/index.html23
-rw-r--r--site/public/test/map/index.html23
-rw-r--r--site/public/test/name_search/index.html23
-rw-r--r--site/public/test/pie_chart/index.html23
47 files changed, 2151 insertions, 1603 deletions
diff --git a/site/public/about/assets/LICENSE/index.html b/site/public/about/assets/LICENSE/index.html
new file mode 100644
index 00000000..66d8b3ac
--- /dev/null
+++ b/site/public/about/assets/LICENSE/index.html
@@ -0,0 +1,58 @@
+<!doctype html>
+<html>
+<head>
+ <title>MegaPixels</title>
+ <meta charset="utf-8" />
+ <meta name="author" content="Adam Harvey" />
+ <meta name="description" content="" />
+ <meta name="referrer" content="no-referrer" />
+ <meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes" />
+ <link rel='stylesheet' href='/assets/css/fonts.css' />
+ <link rel='stylesheet' href='/assets/css/css.css' />
+ <link rel='stylesheet' href='/assets/css/leaflet.css' />
+ <link rel='stylesheet' href='/assets/css/applets.css' />
+</head>
+<body>
+ <header>
+ <a class='slogan' href="/">
+ <div class='logo'></div>
+ <div class='site_name'>MegaPixels</div>
+
+ </a>
+ <div class='links'>
+ <a href="/datasets/">Datasets</a>
+ <a href="/about/">About</a>
+ </div>
+ </header>
+ <div class="content content-">
+
+ <section><p>and include this license and attribution protocol within any derivative work.</p>
+<p>If you publish data derived from MegaPixels, the original dataset creators should first be notified.</p>
+<p>The MegaPixels dataset is made available under the Open Data Commons Attribution License (<a href="https://opendatacommons.org/licenses/by/1.0/">https://opendatacommons.org/licenses/by/1.0/</a>) and for academic use only.</p>
+<p>READABLE SUMMARY OF Open Data Commons Attribution License</p>
+<p>You are free:</p>
+<p>To Share: To copy, distribute and use the dataset
+ To Create: To produce works from the dataset
+ To Adapt: To modify, transform and build upon the database</p>
+<p>As long as you:</p>
+<p>Attribute: You must attribute any public use of the database, or works produced from the database, in the manner specified in the license. For any use or redistribution of the database, or works produced from it, you must make clear to others the license of the database and keep intact any notices on the original database.</p>
+</section>
+
+ </div>
+ <footer>
+ <ul class="footer-left">
+ <li><a href="/">MegaPixels.cc</a></li>
+ <li><a href="/datasets/">Datasets</a></li>
+ <li><a href="/about/">About</a></li>
+ <li><a href="/about/press/">Press</a></li>
+ <li><a href="/about/legal/">Legal and Privacy</a></li>
+ </ul>
+ <ul class="footer-right">
+ <li>MegaPixels &copy;2017-19 &nbsp;<a href="https://ahprojects.com">Adam R. Harvey</a></li>
+ <li>Made with support from &nbsp;<a href="https://mozilla.org">Mozilla</a></li>
+ </ul>
+ </footer>
+</body>
+
+<script src="/assets/js/dist/index.js"></script>
+</html> \ No newline at end of file
diff --git a/site/public/about/attribution/index.html b/site/public/about/attribution/index.html
new file mode 100644
index 00000000..5fe92b8d
--- /dev/null
+++ b/site/public/about/attribution/index.html
@@ -0,0 +1,78 @@
+<!doctype html>
+<html>
+<head>
+ <title>MegaPixels</title>
+ <meta charset="utf-8" />
+ <meta name="author" content="Adam Harvey" />
+ <meta name="description" content="MegaPixels Privacy Policy" />
+ <meta name="referrer" content="no-referrer" />
+ <meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes" />
+ <link rel='stylesheet' href='/assets/css/fonts.css' />
+ <link rel='stylesheet' href='/assets/css/css.css' />
+ <link rel='stylesheet' href='/assets/css/leaflet.css' />
+ <link rel='stylesheet' href='/assets/css/applets.css' />
+</head>
+<body>
+ <header>
+ <a class='slogan' href="/">
+ <div class='logo'></div>
+ <div class='site_name'>MegaPixels</div>
+
+ </a>
+ <div class='links'>
+ <a href="/datasets/">Datasets</a>
+ <a href="/about/">About</a>
+ </div>
+ </header>
+ <div class="content content-about">
+
+ <section><h1>Legal</h1>
+<section class="about-menu">
+<ul>
+<li><a href="/about/">About</a></li>
+<li><a href="/about/press/">Press</a></li>
+<li><a class="current" href="/about/attribution/">Attribution</a></li>
+<li><a href="/about/legal/">Legal / Privacy</a></li>
+</ul>
+</section><p>ATTRIBUTION PROTOCOL</p>
+<p>If you use the MegaPixels data or any data derived from it, please cite the original work as follows:</p>
+<pre>
+@online{megapixels,
+ author = {Harvey, Adam. LaPlace, Jules.},
+ title = {MegaPixels: Origins, Ethics, and Privacy Implications of Publicly Available Face Recognition Image Datasets},
+ year = 2019,
+ url = {https://megapixels.cc/},
+ urldate = {2019-04-20}
+}
+</pre><p>and include this license and attribution protocol within any derivative work.</p>
+<p>If you publish data derived from MegaPixels, the original dataset creators should first be notified.</p>
+<p>The MegaPixels dataset is made available under the Open Data Commons Attribution License (<a href="https://opendatacommons.org/licenses/by/1.0/">https://opendatacommons.org/licenses/by/1.0/</a>) and for academic use only.</p>
+<p>READABLE SUMMARY OF Open Data Commons Attribution License</p>
+<p>You are free:</p>
+<blockquote><p>To Share: To copy, distribute and use the dataset
+To Create: To produce works from the dataset
+To Adapt: To modify, transform and build upon the database</p>
+</blockquote>
+<p>As long as you:</p>
+<blockquote><p>Attribute: You must attribute any public use of the database, or works produced from the database, in the manner specified in the license. For any use or redistribution of the database, or works produced from it, you must make clear to others the license of the database and keep intact any notices on the original database.</p>
+</blockquote>
+</section>
+
+ </div>
+ <footer>
+ <ul class="footer-left">
+ <li><a href="/">MegaPixels.cc</a></li>
+ <li><a href="/datasets/">Datasets</a></li>
+ <li><a href="/about/">About</a></li>
+ <li><a href="/about/press/">Press</a></li>
+ <li><a href="/about/legal/">Legal and Privacy</a></li>
+ </ul>
+ <ul class="footer-right">
+ <li>MegaPixels &copy;2017-19 &nbsp;<a href="https://ahprojects.com">Adam R. Harvey</a></li>
+ <li>Made with support from &nbsp;<a href="https://mozilla.org">Mozilla</a></li>
+ </ul>
+ </footer>
+</body>
+
+<script src="/assets/js/dist/index.js"></script>
+</html> \ No newline at end of file
diff --git a/site/public/about/faq/index.html b/site/public/about/faq/index.html
deleted file mode 100644
index 168abd0b..00000000
--- a/site/public/about/faq/index.html
+++ /dev/null
@@ -1,59 +0,0 @@
-<!doctype html>
-<html>
-<head>
- <title>MegaPixels</title>
- <meta charset="utf-8" />
- <meta name="author" content="Adam Harvey" />
- <meta name="description" content="MegaPixels Press and News" />
- <meta name="referrer" content="no-referrer" />
- <meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes" />
- <link rel='stylesheet' href='/assets/css/fonts.css' />
- <link rel='stylesheet' href='/assets/css/css.css' />
- <link rel='stylesheet' href='/assets/css/leaflet.css' />
- <link rel='stylesheet' href='/assets/css/applets.css' />
-</head>
-<body>
- <header>
- <a class='slogan' href="/">
- <div class='logo'></div>
- <div class='site_name'>MegaPixels</div>
-
- </a>
- <div class='links'>
- <a href="/datasets/">Datasets</a>
- <a href="/about/">About</a>
- </div>
- </header>
- <div class="content content-about">
-
- <section><h1>FAQs</h1>
-<section class="about-menu">
-<ul>
-<li><a href="/about/">About</a></li>
-<li><a class="current" href="/about/faq/">FAQs</a></li>
-<li><a href="/about/press/">Press</a></li>
-<li><a href="/about/terms/">Terms</a></li>
-<li><a href="/about/privacy/">Privacy</a></li>
-</ul>
-</section><p>[ page under development ]</p>
-</section>
-
- </div>
- <footer>
- <div>
- <a href="/">MegaPixels.cc</a>
- <a href="/about/disclaimer/">Disclaimer</a>
- <a href="/about/terms/">Terms of Use</a>
- <a href="/about/privacy/">Privacy</a>
- <a href="/about/">About</a>
- <a href="/about/team/">Team</a>
- </div>
- <div>
- MegaPixels &copy;2017-19 Adam R. Harvey /&nbsp;
- <a href="https://ahprojects.com">ahprojects.com</a>
- </div>
- </footer>
-</body>
-
-<script src="/assets/js/dist/index.js"></script>
-</html> \ No newline at end of file
diff --git a/site/public/about/index.html b/site/public/about/index.html
index c379ec43..b83736d3 100644
--- a/site/public/about/index.html
+++ b/site/public/about/index.html
@@ -30,13 +30,13 @@
<section class="about-menu">
<ul>
<li><a class="current" href="/about/">About</a></li>
-<li><a href="/about/faq/">FAQs</a></li>
<li><a href="/about/press/">Press</a></li>
-<li><a href="/about/terms/">Terms</a></li>
-<li><a href="/about/privacy/">Privacy</a></li>
+<li><a href="/about/attribution/">Attribution</a></li>
+<li><a href="/about/legal/">Legal / Privacy</a></li>
</ul>
-</section><p>MegaPixels is an independent art and research project by Adam Harvey and Jules LaPlace investigating the ethics and individual privacy implications of publicly available face recognition datasets, and their role in industry and governmental expansion into biometric surveillance technologies.</p>
-<p>The MegaPixels site is made possible with support from <a href="http://mozilla.org">Mozilla</a></p>
+</section><p>MegaPixels is an independent art and research project by Adam Harvey and Jules LaPlace that investigates the ethics, origins, and individual privacy implications of face recognition image datasets and their role in the expansion of biometric surveillance technologies.</p>
+<p>MegaPixels is made possible with support from <a href="http://mozilla.org">Mozilla</a>, our primary funding partner.</p>
+<p>Additional support for MegaPixels is provided by the European ARTificial Intelligence Network (AI LAB) at the Ars Electronica Center, 1-year research-in-residence grant from Karlsruhe HfG, and sales from the Privacy Gift Shop.</p>
<div class="flex-container team-photos-container">
<div class="team-member">
<h3>Adam Harvey</h3>
@@ -50,21 +50,24 @@
</p>
<p><a href="https://asdf.us/">asdf.us</a></p>
</div>
-</div><p>MegaPixels.cc is an independent research project about publicly available face recognition datasets. This website is based, in part, on earlier installations and research projects about facial recognition datasets in 2016-2018, which focused particularly on the MegaFace dataset. Since then it has evolved into a large-scale survey of publicly-available face and person analysis datasets, covering their usage, geographies, and ethics.</p>
-<p>An academic report and presentation on the findings is forthcoming. This site is published to make the research more accessible to a wider audience and to include visualizations and interactive features not possible in PDF publications. Continued research on MegaPixels is supported by a 1 year Researcher-in-Residence grant from Karlsruhe HfG.</p>
-<p>When possible, and once thoroughly verified, data generated for MegaPixels will be made available for download on <a href="https://github.com/adamhrv/megapixels">github.com/adamhrv/megapixels</a></p>
-</section><section><div class='columns columns-3'><div class='column'><h4>Team</h4>
+</div><p>The MegaPixels website is based on an <a href="https://ahprojects.com/megapixels-glassroom/">earlier installation from 2017</a> and ongoing research and lectures (<a href="https://www.youtube.com/watch?v=bfhcco9gS30">TedX</a>, <a href="https://www.cpdpconferences.org/events/megapixels-is-in-publicly-available-facial-recognition-datasets">CPDP</a>) about facial recognition datasets. Over the last several years this project has evolved into a large-scale interrogation of hundreds of publicly-available face and person analysis datasets.</p>
+<p>MegaPixels aims to provide a critical perspective on machine learning image datsets, one that might otherwise escape academia and the industry funded artificial intelligence think tanks that are often supported by the same technology companies who have created many of the datasets presented on this site.</p>
+<p>MegaPixels is an independent project, designed as a public resource for educators, students, journalists, and researchers. Each dataset presented on this site undergoes a thorough review of its images, intent, and funding sources. Though the goals are similar to publishing a public academic paper, MegaPixels is a website-first reserch project aligns closley with the goals of pre-print academic publications. As such we welcome feedback and ways to improve this site and the clarity of the research.</p>
+<p>Because this project surfaces many funding issues with datasets (from datasets funded by the C.I.A. to the National Unviversity of Defense and Technology in China), it is important that we are transparent about own funding. The original MegaPixels installation in 2017 was built as a commission for and with support from Tactical Technology Collective and Mozilla. The bulk of the research and web-development during 2018 - 2018 was supported by a grant from Mozilla. Continued development in 2019 is partially supported by a 1-year Reseacher-in-Residence grant from Karlsruhe HfG, lecture and workshop fees, and from commissions and sales from the Privacy Gift Shop.</p>
+<p>Please get in touch if you are interested in supporting this project.</p>
+</section><section><div class='columns columns-3'><div class='column'><h5>Team</h5>
<ul>
-<li>Adam Harvey: Concept, research, design, computer vision</li>
-<li>Jules LaPlace: Information and systems architecture, data retrieval, web applications</li>
+<li>Adam Harvey: Concept, research and analysis, design, computer vision</li>
+<li>Jules LaPlace: Information and systems architecture, data management, web applications
+You are free:</li>
</ul>
-</div><div class='column'><h4>Contributing Researchers</h4>
+</div><div class='column'><h5>Contributing Researchers</h5>
<ul>
-<li>Berit Gilma: Dataset statistics and discovery</li>
-<li>Beth (aka Ms. Celeb): Dataset usage verification and research</li>
-<li>Mathana Stender: Commercial usage verification and research on LFW</li>
+<li>Berit Gilma</li>
+<li>Beth (aka Ms. Celeb)</li>
+<li>Mathana Stender</li>
</ul>
-</div><div class='column'><h4>Code and Libraries</h4>
+</div><div class='column'><h5>Code and Libraries</h5>
<ul>
<li><a href="https://semanticscholar.org">Semantic Scholar</a> for citation aggregation</li>
<li>Leaflet.js for maps</li>
@@ -73,22 +76,36 @@
<li>PDFMiner.Six and Pandas for research paper data analysis</li>
</ul>
</div></div></section><section><p>Please direct questions, comments, or feedback to <a href="https://mastodon.social/@adamhrv">mastodon.social/@adamhrv</a></p>
-</section>
+<h4>Funding Partners</h4>
+<p>The MegaPixels website, research, and development is made possible with support form Mozilla, our primary funding partner.</p>
+<p>[ add logos ]</p>
+<p>Additional support is provided by the European ARTificial Intelligence Network (AI LAB) at the Ars Electronica Center and a 1-year research-in-residence grant from Karlsruhe HfG.</p>
+<p>[ add logos ]</p>
+<h5>Attribution</h5>
+<p>If you use MegaPixels or any data derived from it for your work, please cite our original work as follows:</p>
+<pre>
+@online{megapixels,
+ author = {Harvey, Adam. LaPlace, Jules.},
+ title = {MegaPixels: Origins, Ethics, and Privacy Implications of Publicly Available Face Recognition Image Datasets},
+ year = 2019,
+ url = {https://megapixels.cc/},
+ urldate = {2019-04-20}
+}
+</pre></section>
</div>
<footer>
- <div>
- <a href="/">MegaPixels.cc</a>
- <a href="/about/disclaimer/">Disclaimer</a>
- <a href="/about/terms/">Terms of Use</a>
- <a href="/about/privacy/">Privacy</a>
- <a href="/about/">About</a>
- <a href="/about/team/">Team</a>
- </div>
- <div>
- MegaPixels &copy;2017-19 Adam R. Harvey /&nbsp;
- <a href="https://ahprojects.com">ahprojects.com</a>
- </div>
+ <ul class="footer-left">
+ <li><a href="/">MegaPixels.cc</a></li>
+ <li><a href="/datasets/">Datasets</a></li>
+ <li><a href="/about/">About</a></li>
+ <li><a href="/about/press/">Press</a></li>
+ <li><a href="/about/legal/">Legal and Privacy</a></li>
+ </ul>
+ <ul class="footer-right">
+ <li>MegaPixels &copy;2017-19 &nbsp;<a href="https://ahprojects.com">Adam R. Harvey</a></li>
+ <li>Made with support from &nbsp;<a href="https://mozilla.org">Mozilla</a></li>
+ </ul>
</footer>
</body>
diff --git a/site/public/about/legal/index.html b/site/public/about/legal/index.html
index 4e84a601..ce10014a 100644
--- a/site/public/about/legal/index.html
+++ b/site/public/about/legal/index.html
@@ -30,26 +30,44 @@
<section class="about-menu">
<ul>
<li><a href="/about/">About</a></li>
-<li><a href="/about/faq/">FAQs</a></li>
<li><a href="/about/press/">Press</a></li>
-<li><a href="/about/terms/">Terms</a></li>
-<li><a class="current" href="/about/privacy/">Privacy</a></li>
+<li><a href="/about/attribution/">Attribution</a></li>
+<li><a class="current" href="/about/legal/">Legal / Privacy</a></li>
</ul>
</section><p>MegaPixels.cc Terms and Privacy</p>
-<p>MegaPixels is an independent art and research project about the origins and ethics of publicly available face analysis image datasets. By accessing MegaPixels (the <em>Service</em> or <em>Services</em>) you agree to the terms and conditions set forth below.</p>
-<h3>Changes</h3>
-<p>We reserve the right, at our sole discretion, to modify or replace these Terms at any time. If a revision is material we will try to provide at least 30 days notice prior to any new terms taking effect. What constitutes a material change will be determined at our sole discretion.</p>
-<p>By continuing to access or use our Service after those revisions become effective, you agree to be bound by the revised terms. If you do not agree to the new terms, please stop using the Service.</p>
+<p>MegaPixels is an independent and academic art and research project about the origins and ethics of publicly available face analysis image datasets. By accessing MegaPixels (the <em>Service</em> or <em>Services</em>) you agree to the terms and conditions set forth below.</p>
<h2>Privacy</h2>
-<p>The MegaPixels site has been designed to minimize the amount of network requests to 3rd party services and therefore prioritize the privacy of the viewer by only loading local dependencies. Additionaly, this site does not use any anaytics programs to monitor site viewers. In fact, the only data collected are the necessary server logs that used only for preventing misuse, which are deleteted at regular short-term intervals.</p>
+<p>The MegaPixels site has been designed to minimize the amount of network requests to 3rd party services and therefore prioritize the privacy of the viewer. This site does not use any local or external analytics programs to monitor site viewers. In fact, the only data collected are the necessary server logs used only for preventing misuse, which are deleted at short-term intervals.</p>
<h2>3rd Party Services</h2>
-<p>In order to provide certain features of the site, some 3rd party services are needed. Currently, the MegaPixels.cc site uses two 3rd party services: (1) Leaflet.js for the interactive map and (2 Digital Ocean Spaces as a condent delivery network. Both services encrypt your requests to their server using HTTPS and neither service requires storing any cookies or authentication. However, both services will store files in your web browser's local cache (local storage) to improve loading performance. None of these local storage files are using for analytics, cookie-like technologies, tracking, or any similar purpose.</p>
+<p>In order to provide certain features of the site, some 3rd party services are needed. Currently, the MegaPixels.cc site uses two 3rd party services: (1) Leaflet.js for the interactive map and (2) Digital Ocean Spaces as a content delivery network. Both services encrypt your requests to their server using HTTPS and neither service requires storing any cookies or authentication. However, both services will store files in your web browser's local cache (local storage) to improve loading performance. None of these local storage files are using for analytics, tracking, or any similar purpose.</p>
<h3>Links To Other Web Sites</h3>
-<p>The MegaPixels.cc contains many links to 3rd party websites, especically in the list of citations that are provided for each dataset. This website has no control over and assumes no responsibility for, the content, privacy policies, or practices of any third party web sites or services. You further acknowledge and agree that megapixels.cc shall not be responsible or liable, directly or indirectly, for any damage or loss caused or alleged to be caused by or in connection with use of or reliance on any such content, goods or services available on or through any such web sites or services.</p>
+<p>The MegaPixels.cc contains many links to 3rd party websites, especially in the list of citations that are provided for each dataset. This website has no control over and assumes no responsibility for, the content, privacy policies, or practices of any third party web sites or services. You acknowledge and agree that megapixels.cc shall not be responsible or liable, directly or indirectly, for any damage or loss caused or alleged to be caused by or in connection with use of or reliance on any such content, goods or services available on or through any such web sites or services.</p>
<p>We advise you to read the terms and conditions and privacy policies of any third-party web sites or services that you visit.</p>
-<h3>The Information We Provide</h3>
-<p>While every intention is made to verify and publish only verifiablenformation, at times amendments to accuracy may be required. In no event will the operators of this site be liable for your use or misuse of the information provided.</p>
-<p>We may terminate or suspend access to our Service immediately, without prior notice or liability, for any reason whatsoever, including without limitation if you breach the Terms.</p>
+<h3>Information We Collect</h3>
+<p>When you access the Service, we record your visit to the site in a server log file for the purposes of maintaining site security and preventing misuse. This includes your IP address and the header information sent by your web browser which includes the User Agent, referrer, and the requested page on our site.</p>
+<h3>Information We Share</h3>
+<p>We do not share or make public any information about individual site visitors, unless where required by law to the extent that server logs are only retained for a limited duration.</p>
+<h3>Information We Provide</h3>
+<p>We provide information for educational, journalistic, and research purposes. The published information on MegaPixels is made available under the Open Data Commons Attribution License (<a href="https://opendatacommons.org/licenses/by/1.0/">https://opendatacommons.org/licenses/by/1.0/</a>) and for academic use only.</p>
+<p>You are free:</p>
+<blockquote><p>To Share: To copy, distribute and use the dataset
+To Create: To produce works from the dataset
+To Adapt: To modify, transform and build upon the database</p>
+</blockquote>
+<p>As long as you:</p>
+<blockquote><p>Attribute: You must attribute any public use of the database, or works produced from the database, in the manner specified in the license. For any use or redistribution of the database, or works produced from it, you must make clear to others the license of the database and keep intact any notices on the original database.</p>
+</blockquote>
+<p>If you use the MegaPixels data or any data derived from it, please cite the original work as follows:</p>
+<pre>
+@online{megapixels,
+ author = {Harvey, Adam. LaPlace, Jules.},
+ title = {MegaPixels: Origins, Ethics, and Privacy Implications of Publicly Available Face Recognition Image Datasets},
+ year = 2019,
+ url = {https://megapixels.cc/},
+ urldate = {2019-04-20}
+}
+</pre><p>While every intention is made to publish only verifiable information, at times information may be edited, removed, or appended for clarity or correction. In no event will the operators of this site be liable for your use or misuse of the information provided.</p>
+<p>We may terminate or suspend access to our Service immediately without prior notice or liability, for any reason whatsoever, including without limitation if you breach the Terms.</p>
<p>All provisions of the Terms which by their nature should survive termination shall survive termination, including, without limitation, ownership provisions, warranty disclaimers, indemnity and limitations of liability.</p>
<h3>Prohibited Uses</h3>
<p>You may not access or use, or attempt to access or use, the Services to take any action that could harm us or a third party. You may not use the Services in violation of applicable laws or in violation of our or any third party’s intellectual property or other proprietary or legal rights. You further agree that you shall not attempt (or encourage or support anyone else's attempt) to circumvent, reverse engineer, decrypt, or otherwise alter or interfere with the Services, or any content thereof, or make any unauthorized use thereof.</p>
@@ -57,7 +75,7 @@
<p>(i) access any part of the Services, Content, data or information you do not have permission or authorization to access;</p>
<p>(ii) use robots, spiders, scripts, service, software or any manual or automatic device, tool, or process designed to data mine or scrape the Content, data or information from the Services, or otherwise access or collect the Content, data or information from the Services using automated means;</p>
<p>(iii) use services, software or any manual or automatic device, tool, or process designed to circumvent any restriction, condition, or technological measure that controls access to the Services in any way, including overriding any security feature or bypassing or circumventing any access controls or use limits of the Services;</p>
-<p>(iv) cache or archive the Content (except for a public search engine’s use of spiders for creating search indices);</p>
+<p>(iv) cache or archive the Content (except for a public search engine’s use of spiders for creating search indices) with prior written consent;</p>
<p>(v) take action that imposes an unreasonable or disproportionately large load on our network or infrastructure; and</p>
<p>(vi) do anything that could disable, damage or change the functioning or appearance of the Services, including the presentation of advertising.</p>
<p>Engaging in a prohibited use of the Services may result in civil, criminal, and/or administrative penalties, fines, or sanctions against the user and those assisting the user.</p>
@@ -66,22 +84,23 @@
<p>Our failure to enforce any right or provision of these Terms will not be considered a waiver of those rights. If any provision of these Terms is held to be invalid or unenforceable by a court, the remaining provisions of these Terms will remain in effect. These Terms constitute the entire agreement between us regarding our Service, and supersede and replace any prior agreements we might have between us regarding the Service.</p>
<h3>Indemnity</h3>
<p>You hereby indemnify, defend and hold harmless MegaPixels (and its creators) and all officers, directors, owners, agents, information providers, affiliates, licensors and licensees (collectively, the "Indemnified Parties") from and against any and all liability and costs, including, without limitation, reasonable attorneys' fees, incurred by the Indemnified Parties in connection with any claim arising out of any breach by you or any user of your account of these Terms of Service or the foregoing representations, warranties and covenants. You shall cooperate as fully as reasonably required in the defense of any such claim. We reserves the right, at its own expense, to assume the exclusive defense and control of any matter subject to indemnification by you.</p>
+<h3>Changes</h3>
+<p>We reserve the right, at our sole discretion, to modify or replace these Terms at any time. By continuing to use or access our Service after revisions become effective, you agree to be bound by the revised terms. If you do not agree to revised terms, please do not use the Service.</p>
</section>
</div>
<footer>
- <div>
- <a href="/">MegaPixels.cc</a>
- <a href="/about/disclaimer/">Disclaimer</a>
- <a href="/about/terms/">Terms of Use</a>
- <a href="/about/privacy/">Privacy</a>
- <a href="/about/">About</a>
- <a href="/about/team/">Team</a>
- </div>
- <div>
- MegaPixels &copy;2017-19 Adam R. Harvey /&nbsp;
- <a href="https://ahprojects.com">ahprojects.com</a>
- </div>
+ <ul class="footer-left">
+ <li><a href="/">MegaPixels.cc</a></li>
+ <li><a href="/datasets/">Datasets</a></li>
+ <li><a href="/about/">About</a></li>
+ <li><a href="/about/press/">Press</a></li>
+ <li><a href="/about/legal/">Legal and Privacy</a></li>
+ </ul>
+ <ul class="footer-right">
+ <li>MegaPixels &copy;2017-19 &nbsp;<a href="https://ahprojects.com">Adam R. Harvey</a></li>
+ <li>Made with support from &nbsp;<a href="https://mozilla.org">Mozilla</a></li>
+ </ul>
</footer>
</body>
diff --git a/site/public/about/press/index.html b/site/public/about/press/index.html
index 610fda6e..70caf03c 100644
--- a/site/public/about/press/index.html
+++ b/site/public/about/press/index.html
@@ -30,31 +30,28 @@
<section class="about-menu">
<ul>
<li><a href="/about/">About</a></li>
-<li><a href="/about/faq/">FAQs</a></li>
<li><a class="current" href="/about/press/">Press</a></li>
-<li><a href="/about/terms/">Terms</a></li>
-<li><a href="/about/privacy/">Privacy</a></li>
+<li><a href="/about/attribution/">Attribution</a></li>
+<li><a href="/about/legal/">Legal / Privacy</a></li>
</ul>
-</section><p>(TEMPORARY PAGE)</p>
-<ul>
+</section><ul>
<li>Aug 22, 2018: "Transgender YouTubers had their videos grabbed to train facial recognition software" by James Vincent <a href="https://www.theverge.com/2017/8/22/16180080/transgender-youtubers-ai-facial-recognition-dataset">https://www.theverge.com/2017/8/22/16180080/transgender-youtubers-ai-facial-recognition-dataset</a></li>
</ul>
</section>
</div>
<footer>
- <div>
- <a href="/">MegaPixels.cc</a>
- <a href="/about/disclaimer/">Disclaimer</a>
- <a href="/about/terms/">Terms of Use</a>
- <a href="/about/privacy/">Privacy</a>
- <a href="/about/">About</a>
- <a href="/about/team/">Team</a>
- </div>
- <div>
- MegaPixels &copy;2017-19 Adam R. Harvey /&nbsp;
- <a href="https://ahprojects.com">ahprojects.com</a>
- </div>
+ <ul class="footer-left">
+ <li><a href="/">MegaPixels.cc</a></li>
+ <li><a href="/datasets/">Datasets</a></li>
+ <li><a href="/about/">About</a></li>
+ <li><a href="/about/press/">Press</a></li>
+ <li><a href="/about/legal/">Legal and Privacy</a></li>
+ </ul>
+ <ul class="footer-right">
+ <li>MegaPixels &copy;2017-19 &nbsp;<a href="https://ahprojects.com">Adam R. Harvey</a></li>
+ <li>Made with support from &nbsp;<a href="https://mozilla.org">Mozilla</a></li>
+ </ul>
</footer>
</body>
diff --git a/site/public/about/privacy/index.html b/site/public/about/privacy/index.html
deleted file mode 100644
index 6e760cf7..00000000
--- a/site/public/about/privacy/index.html
+++ /dev/null
@@ -1,67 +0,0 @@
-<!doctype html>
-<html>
-<head>
- <title>MegaPixels</title>
- <meta charset="utf-8" />
- <meta name="author" content="Adam Harvey" />
- <meta name="description" content="MegaPixels Privacy Policy" />
- <meta name="referrer" content="no-referrer" />
- <meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes" />
- <link rel='stylesheet' href='/assets/css/fonts.css' />
- <link rel='stylesheet' href='/assets/css/tabulator.css' />
- <link rel='stylesheet' href='/assets/css/css.css' />
- <link rel='stylesheet' href='/assets/css/leaflet.css' />
- <link rel='stylesheet' href='/assets/css/applets.css' />
-</head>
-<body>
- <header>
- <a class='slogan' href="/">
- <div class='logo'></div>
- <div class='site_name'>MegaPixels</div>
- </a>
- <div class='links'>
- <a href="/datasets/">Datasets</a>
- <a href="/about/">About</a>
- </div>
- </header>
- <div class="content content-about">
-
- <section><h1>Privacy Policy</h1>
-<section class="about-menu">
-<ul>
-<li><a href="/about/">About</a></li>
-<li><a href="/about/faq/">FAQs</a></li>
-<li><a href="/about/press/">Press</a></li>
-<li><a href="/about/terms/">Terms</a></li>
-<li><a class="current" href="/about/privacy/">Privacy</a></li>
-</ul>
-</section><p>MegaPixels.cc Terms and Privacy</p>
-<p>MegaPixels is an independent art and research project about the origins and ethics of publicly available face analysis image datasets. By accessing this site you agree to the terms and conditions set forth below.</p>
-<h2>Privacy</h2>
-<p>The MegaPixels site has been designed to minimize the amount of network requests to 3rd party services and therefore prioritize the privacy of the viewer by only loading local dependencies. Additionaly, this site does not use any anaytics programs to monitor site viewers. In fact, the only data collected are the necessary server logs that used only for preventing misuse, which are deleteted at regular short-term intervals.</p>
-<h2>3rd Party Services</h2>
-<p>In order to provide certain features of the site, some 3rd party services are needed. Currently, the MegaPixels.cc site uses two 3rd party services: (1) Leaflet.js for the interactive map and (2 Digital Ocean Spaces as a condent delivery network. Both services encrypt your requests to their server using HTTPS and neither service requires storing any cookies or authentication. However, both services will store files in your web browser's local cache (local storage) to improve loading performance. None of these local storage files are using for analytics, cookie-like technologies, tracking, or any similar purpose.</p>
-<h3>Links To Other Web Sites</h3>
-<p>The MegaPixels.cc contains many links to 3rd party websites, especically in the list of citations that are provided for each dataset. This website has no control over and assumes no responsibility for, the content, privacy policies, or practices of any third party web sites or services. You further acknowledge and agree that megapixels.cc shall not be responsible or liable, directly or indirectly, for any damage or loss caused or alleged to be caused by or in connection with use of or reliance on any such content, goods or services available on or through any such web sites or services.</p>
-<p>We advise you to read the terms and conditions and privacy policies of any third-party web sites or services that you visit.</p>
-</section>
-
- </div>
- <footer>
- <div>
- <a href="/">MegaPixels.cc</a>
- <a href="/about/disclaimer/">Disclaimer</a>
- <a href="/about/terms/">Terms of Use</a>
- <a href="/about/privacy/">Privacy</a>
- <a href="/about/">About</a>
- <a href="/about/team/">Team</a>
- </div>
- <div>
- MegaPixels &copy;2017-19 Adam R. Harvey /&nbsp;
- <a href="https://ahprojects.com">ahprojects.com</a>
- </div>
- </footer>
-</body>
-
-<script src="/assets/js/dist/index.js"></script>
-</html> \ No newline at end of file
diff --git a/site/public/about/terms/index.html b/site/public/about/terms/index.html
deleted file mode 100644
index 58e49b78..00000000
--- a/site/public/about/terms/index.html
+++ /dev/null
@@ -1,79 +0,0 @@
-<!doctype html>
-<html>
-<head>
- <title>MegaPixels</title>
- <meta charset="utf-8" />
- <meta name="author" content="Adam Harvey" />
- <meta name="description" content="MegaPixels Terms of Use and Privacy Policy" />
- <meta name="referrer" content="no-referrer" />
- <meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes" />
- <link rel='stylesheet' href='/assets/css/fonts.css' />
- <link rel='stylesheet' href='/assets/css/tabulator.css' />
- <link rel='stylesheet' href='/assets/css/css.css' />
- <link rel='stylesheet' href='/assets/css/leaflet.css' />
- <link rel='stylesheet' href='/assets/css/applets.css' />
-</head>
-<body>
- <header>
- <a class='slogan' href="/">
- <div class='logo'></div>
- <div class='site_name'>MegaPixels</div>
- </a>
- <div class='links'>
- <a href="/datasets/">Datasets</a>
- <a href="/about/">About</a>
- </div>
- </header>
- <div class="content content-about">
-
- <section><h1>Terms and Conditions ("Terms")</h1>
-<section class="about-menu">
-<ul>
-<li><a href="/about/">About</a></li>
-<li><a href="/about/faq/">FAQs</a></li>
-<li><a href="/about/press/">Press</a></li>
-<li><a class="current" href="/about/terms/">Terms</a></li>
-<li><a href="/about/privacy/">Privacy</a></li>
-</ul>
-</section><p>(TEMPORARY PAGE)</p>
-<p>(FPO: this is only example text)</p>
-<p>Last updated: December 04, 2018</p>
-<p>Please read these Terms and Conditions ("Terms", "Terms and Conditions") carefully before using the MegaPixels website (the "Service") operated by megapixels.cc ("us", "we", or "our").</p>
-<p>Your access to and use of the Service is conditioned on your acceptance of and compliance with these Terms.</p>
-<p>By accessing or using the Service you agree to be bound by these Terms. If you disagree with any part of the terms then you may not access the Service.</p>
-<h3>Links To Other Web Sites</h3>
-<p>Our Service may contain links to third-party web sites or services that are not owned or controlled by megapixels.cc.</p>
-<p>megapixels.cc has no control over, and assumes no responsibility for, the content, privacy policies, or practices of any third party web sites or services. You further acknowledge and agree that megapixels.cc shall not be responsible or liable, directly or indirectly, for any damage or loss caused or alleged to be caused by or in connection with use of or reliance on any such content, goods or services available on or through any such web sites or services.</p>
-<p>We strongly advise you to read the terms and conditions and privacy policies of any third-party web sites or services that you visit.</p>
-<h3>Termination</h3>
-<p>We may terminate or suspend access to our Service immediately, without prior notice or liability, for any reason whatsoever, including without limitation if you breach the Terms.</p>
-<p>All provisions of the Terms which by their nature should survive termination shall survive termination, including, without limitation, ownership provisions, warranty disclaimers, indemnity and limitations of liability.</p>
-<h3>Governing Law</h3>
-<p>These Terms shall be governed and construed in accordance with the laws of Berlin, Germany, without regard to its conflict of law provisions.</p>
-<p>Our failure to enforce any right or provision of these Terms will not be considered a waiver of those rights. If any provision of these Terms is held to be invalid or unenforceable by a court, the remaining provisions of these Terms will remain in effect. These Terms constitute the entire agreement between us regarding our Service, and supersede and replace any prior agreements we might have between us regarding the Service.</p>
-<h3>Changes</h3>
-<p>We reserve the right, at our sole discretion, to modify or replace these Terms at any time. If a revision is material we will try to provide at least 30 days notice prior to any new terms taking effect. What constitutes a material change will be determined at our sole discretion.</p>
-<p>By continuing to access or use our Service after those revisions become effective, you agree to be bound by the revised terms. If you do not agree to the new terms, please stop using the Service.</p>
-<h3>Contact Us</h3>
-<p>If you have any questions about these Terms, please contact us.</p>
-</section>
-
- </div>
- <footer>
- <div>
- <a href="/">MegaPixels.cc</a>
- <a href="/about/disclaimer/">Disclaimer</a>
- <a href="/about/terms/">Terms of Use</a>
- <a href="/about/privacy/">Privacy</a>
- <a href="/about/">About</a>
- <a href="/about/team/">Team</a>
- </div>
- <div>
- MegaPixels &copy;2017-19 Adam R. Harvey /&nbsp;
- <a href="https://ahprojects.com">ahprojects.com</a>
- </div>
- </footer>
-</body>
-
-<script src="/assets/js/dist/index.js"></script>
-</html> \ No newline at end of file
diff --git a/site/public/datasets/50_people_one_question/index.html b/site/public/datasets/50_people_one_question/index.html
index 796af8d6..79411122 100644
--- a/site/public/datasets/50_people_one_question/index.html
+++ b/site/public/datasets/50_people_one_question/index.html
@@ -17,7 +17,7 @@
<a class='slogan' href="/">
<div class='logo'></div>
<div class='site_name'>MegaPixels</div>
- <div class='splash'>50 People One Question</div>
+ <div class='splash'>50 People One Question Dataset</div>
</a>
<div class='links'>
<a href="/datasets/">Datasets</a>
@@ -27,7 +27,8 @@
<div class="content content-dataset">
<section class='intro_section' style='background-image: url(https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/50_people_one_question/assets/background.jpg)'><div class='inner'><div class='hero_desc'><span class='bgpad'><span style="color:#ffaa00">People One Question</span> is a dataset of people from an online video series on YouTube and Vimeo used for building facial recogntion algorithms</span></div><div class='hero_subdesc'><span class='bgpad'>People One Question dataset includes ...
-</span></div></div></section><section><div class='left-sidebar'><div class='meta'>
+</span></div></div></section><section><h2>50 People 1 Question</h2>
+</section><section><div class='right-sidebar'><div class='meta'>
<div class='gray'>Published</div>
<div>2013</div>
</div><div class='meta'>
@@ -35,36 +36,42 @@
<div>33 </div>
</div><div class='meta'>
<div class='gray'>Purpose</div>
- <div>Facial landmark estimation in the wild</div>
+ <div>Facial landmark estimation</div>
</div><div class='meta'>
<div class='gray'>Website</div>
<div><a href='http://www.vision.caltech.edu/~dhall/projects/MergingPoseEstimates/' target='_blank' rel='nofollow noopener'>caltech.edu</a></div>
- </div><div class='meta'><div><div class='gray'>Collected</div><div>TBD</div></div><div><div class='gray'>Published</div><div>TBD</div></div><div><div class='gray'>Images</div><div>TBD</div></div><div><div class='gray'>Faces</div><div>TBD</div></div></div></div><h2>50 People 1 Question</h2>
-<p>(PAGE UNDER DEVELOPMENT)</p>
-<p>At vero eos et accusamus et iusto odio dignissimos ducimus, qui blanditiis praesentium voluptatum deleniti atque corrupti, quos dolores et quas molestias excepturi sint, obcaecati cupiditate non-provident, similique sunt in culpa, qui officia deserunt mollitia animi, id est laborum et dolorum fuga. Et harum quidem rerum facilis est et expedita distinctio.</p>
-<p>Nam libero tempore, cum soluta nobis est eligendi optio, cumque nihil impedit, quo minus id, quod maxime placeat, facere possimus, omnis voluptas assumenda est, omnis dolor repellendus. Temporibus autem quibusdam et aut officiis debitis aut rerum necessitatibus saepe eveniet, ut et voluptates repudiandae sint et molestiae non-recusandae. Itaque earum rerum hic tenetur a sapiente delectus, ut aut reiciendis voluptatibus maiores alias consequatur aut perferendis doloribus asperiores repellat</p>
+ </div></div><p>[ page under development ]</p>
</section><section>
+ <h3>Who used 50 People One Question Dataset?</h3>
+
+ <p>
+ This bar chart presents a ranking of the top countries where dataset citations originated. Mouse over individual columns to see yearly totals. These charts show at most the top 10 countries.
+ </p>
+
+ </section>
+
+<section class="applet_container">
+<!-- <div style="position: absolute;top: 0px;right: -55px;width: 180px;font-size: 14px;">Labeled Faces in the Wild Dataset<br><span class="numc" style="font-size: 11px;">20 citations</span>
+</div> -->
+ <div class="applet" data-payload="{&quot;command&quot;: &quot;chart&quot;}"></div>
+</section>
+
+<section class="applet_container">
+ <div class="applet" data-payload="{&quot;command&quot;: &quot;piechart&quot;}"></div>
+</section>
+
+<section>
<h3>Biometric Trade Routes</h3>
-<!--
- <div class="map-sidebar right-sidebar">
- <h3>Legend</h3>
- <ul>
- <li><span style="color: #f2f293">&#9632;</span> Industry</li>
- <li><span style="color: #f30000">&#9632;</span> Academic</li>
- <li><span style="color: #3264f6">&#9632;</span> Government</li>
- </ul>
- </div>
- -->
+
<p>
- To help understand how 50 People One Question Dataset has been used around the world for commercial, military and academic research; publicly available research citing 50 People One Question is collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal reserach projects at that location.
+ To help understand how 50 People One Question Dataset has been used around the world by commercial, military, and academic organizations; existing publicly available research citing 50 People One Question was collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal research projects at that location.
</p>
</section>
<section class="applet_container fullwidth">
<div class="applet" data-payload="{&quot;command&quot;: &quot;map&quot;}"></div>
-
</section>
<div class="caption">
@@ -72,25 +79,12 @@
<li class="edu">Academic</li>
<li class="com">Commercial</li>
<li class="gov">Military / Government</li>
- <li class="source">Citation data is collected using <a href="https://semanticscholar.org" target="_blank">SemanticScholar.org</a> then dataset usage verified and geolocated.</li>
</ul>
+ <div class="source">Citation data is collected using <a href="https://semanticscholar.org" target="_blank">SemanticScholar.org</a> then dataset usage verified and geolocated.</div >
</div>
-<!-- <section>
- <p class='subp'>
- [section under development] 50 People One Question Dataset ... Standardized paragraph of text about the map. Sed ut perspiciatis, unde omnis iste natus error sit voluptatem accusantium doloremque laudantium, totam rem aperiam eaque ipsa, quae ab illo inventore veritatis et quasi architecto beatae vitae dicta sunt, explicabo.
- </p>
-</section>
- --><section>
- <div class="hr-wave-holder">
- <div class="hr-wave-line hr-wave-line1"></div>
- <div class="hr-wave-line hr-wave-line2"></div>
- </div>
-
- <h3>Supplementary Information</h3>
-
-</section><section class="applet_container">
+<section class="applet_container">
<h3>Dataset Citations</h3>
<p>
@@ -102,18 +96,17 @@
</div>
<footer>
- <div>
- <a href="/">MegaPixels.cc</a>
- <a href="/about/disclaimer/">Disclaimer</a>
- <a href="/about/terms/">Terms of Use</a>
- <a href="/about/privacy/">Privacy</a>
- <a href="/about/">About</a>
- <a href="/about/team/">Team</a>
- </div>
- <div>
- MegaPixels &copy;2017-19 Adam R. Harvey /&nbsp;
- <a href="https://ahprojects.com">ahprojects.com</a>
- </div>
+ <ul class="footer-left">
+ <li><a href="/">MegaPixels.cc</a></li>
+ <li><a href="/datasets/">Datasets</a></li>
+ <li><a href="/about/">About</a></li>
+ <li><a href="/about/press/">Press</a></li>
+ <li><a href="/about/legal/">Legal and Privacy</a></li>
+ </ul>
+ <ul class="footer-right">
+ <li>MegaPixels &copy;2017-19 &nbsp;<a href="https://ahprojects.com">Adam R. Harvey</a></li>
+ <li>Made with support from &nbsp;<a href="https://mozilla.org">Mozilla</a></li>
+ </ul>
</footer>
</body>
diff --git a/site/public/datasets/afad/index.html b/site/public/datasets/afad/index.html
index ac025a80..7969c1d6 100644
--- a/site/public/datasets/afad/index.html
+++ b/site/public/datasets/afad/index.html
@@ -17,7 +17,7 @@
<a class='slogan' href="/">
<div class='logo'></div>
<div class='site_name'>MegaPixels</div>
- <div class='splash'>AFAD</div>
+ <div class='splash'>Asian Face Age Dataset</div>
</a>
<div class='links'>
<a href="/datasets/">Datasets</a>
@@ -26,8 +26,75 @@
</header>
<div class="content content-">
- <section><h1>Asian Face Age Dataset</h1>
-</section><section><div class='meta'><div><div class='gray'>Years</div><div>2016?</div></div><div><div class='gray'>Images</div><div>164,432</div></div><div><div class='gray'>Identities</div><div>4,362</div></div><div><div class='gray'>Origin</div><div>RenRen</div></div></div><section><section><h2>Research</h2>
+ <section><h2>Asian Face Age Dataset</h2>
+</section><section><div class='right-sidebar'><div class='meta'>
+ <div class='gray'>Published</div>
+ <div>2017</div>
+ </div><div class='meta'>
+ <div class='gray'>Images</div>
+ <div>164,432 </div>
+ </div><div class='meta'>
+ <div class='gray'>Purpose</div>
+ <div>age estimation on Asian Faces</div>
+ </div><div class='meta'>
+ <div class='gray'>Funded by</div>
+ <div>NSFC, the Fundamental Research Funds for the Central Universities, the Program for Changjiang Scholars and Innovative Research Team in University of China, the Shaanxi Innovative Research Team for Key Science and Technology, and China Postdoctoral Science Foundation</div>
+ </div><div class='meta'>
+ <div class='gray'>Website</div>
+ <div><a href='https://afad-dataset.github.io/' target='_blank' rel='nofollow noopener'>github.io</a></div>
+ </div></div><p>[ page under development ]</p>
+</section><section>
+ <h3>Who used Asian Face Age Dataset?</h3>
+
+ <p>
+ This bar chart presents a ranking of the top countries where dataset citations originated. Mouse over individual columns to see yearly totals. These charts show at most the top 10 countries.
+ </p>
+
+ </section>
+
+<section class="applet_container">
+<!-- <div style="position: absolute;top: 0px;right: -55px;width: 180px;font-size: 14px;">Labeled Faces in the Wild Dataset<br><span class="numc" style="font-size: 11px;">20 citations</span>
+</div> -->
+ <div class="applet" data-payload="{&quot;command&quot;: &quot;chart&quot;}"></div>
+</section>
+
+<section class="applet_container">
+ <div class="applet" data-payload="{&quot;command&quot;: &quot;piechart&quot;}"></div>
+</section>
+
+<section>
+
+ <h3>Biometric Trade Routes</h3>
+
+ <p>
+ To help understand how Asian Face Age Dataset has been used around the world by commercial, military, and academic organizations; existing publicly available research citing The Asian Face Age Dataset was collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal research projects at that location.
+ </p>
+
+ </section>
+
+<section class="applet_container fullwidth">
+ <div class="applet" data-payload="{&quot;command&quot;: &quot;map&quot;}"></div>
+</section>
+
+<div class="caption">
+ <ul class="map-legend">
+ <li class="edu">Academic</li>
+ <li class="com">Commercial</li>
+ <li class="gov">Military / Government</li>
+ </ul>
+ <div class="source">Citation data is collected using <a href="https://semanticscholar.org" target="_blank">SemanticScholar.org</a> then dataset usage verified and geolocated.</div >
+</div>
+
+
+<section class="applet_container">
+
+ <h3>Dataset Citations</h3>
+ <p>
+ The dataset citations used in the visualizations were collected from <a href="https://www.semanticscholar.org">Semantic Scholar</a>, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms.
+ </p>
+
+ <div class="applet" data-payload="{&quot;command&quot;: &quot;citations&quot;}"></div>
+</section><section><h2>(ignore) research notes</h2>
<blockquote><p>The Asian Face Age Dataset (AFAD) is a new dataset proposed for evaluating the performance of age estimation, which contains more than 160K facial images and the corresponding age and gender labels. This dataset is oriented to age estimation on Asian faces, so all the facial images are for Asian faces. It is noted that the AFAD is the biggest dataset for age estimation to date. It is well suited to evaluate how deep learning methods can be adopted for age estimation.
Motivation</p>
<p>For age estimation, there are several public datasets for evaluating the performance of a specific algorithm, such as FG-NET [1] (1002 face images), MORPH I (1690 face images), and MORPH II[2] (55,608 face images). Among them, the MORPH II is the biggest public dataset to date. On the other hand, as we know it is necessary to collect a large scale dataset to train a deep Convolutional Neural Network. Therefore, the MORPH II dataset is extensively used to evaluate how deep learning methods can be adopted for age estimation [3][4].</p>
@@ -42,18 +109,17 @@ Motivation</p>
</div>
<footer>
- <div>
- <a href="/">MegaPixels.cc</a>
- <a href="/about/disclaimer/">Disclaimer</a>
- <a href="/about/terms/">Terms of Use</a>
- <a href="/about/privacy/">Privacy</a>
- <a href="/about/">About</a>
- <a href="/about/team/">Team</a>
- </div>
- <div>
- MegaPixels &copy;2017-19 Adam R. Harvey /&nbsp;
- <a href="https://ahprojects.com">ahprojects.com</a>
- </div>
+ <ul class="footer-left">
+ <li><a href="/">MegaPixels.cc</a></li>
+ <li><a href="/datasets/">Datasets</a></li>
+ <li><a href="/about/">About</a></li>
+ <li><a href="/about/press/">Press</a></li>
+ <li><a href="/about/legal/">Legal and Privacy</a></li>
+ </ul>
+ <ul class="footer-right">
+ <li>MegaPixels &copy;2017-19 &nbsp;<a href="https://ahprojects.com">Adam R. Harvey</a></li>
+ <li>Made with support from &nbsp;<a href="https://mozilla.org">Mozilla</a></li>
+ </ul>
</footer>
</body>
diff --git a/site/public/datasets/aflw/index.html b/site/public/datasets/aflw/index.html
deleted file mode 100644
index 476f390c..00000000
--- a/site/public/datasets/aflw/index.html
+++ /dev/null
@@ -1,54 +0,0 @@
-<!doctype html>
-<html>
-<head>
- <title>MegaPixels</title>
- <meta charset="utf-8" />
- <meta name="author" content="Adam Harvey" />
- <meta name="description" content="AFLW: Annotated Facial Landmarks in The Wild" />
- <meta name="referrer" content="no-referrer" />
- <meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes" />
- <link rel='stylesheet' href='/assets/css/fonts.css' />
- <link rel='stylesheet' href='/assets/css/css.css' />
- <link rel='stylesheet' href='/assets/css/leaflet.css' />
- <link rel='stylesheet' href='/assets/css/applets.css' />
-</head>
-<body>
- <header>
- <a class='slogan' href="/">
- <div class='logo'></div>
- <div class='site_name'>MegaPixels</div>
-
- </a>
- <div class='links'>
- <a href="/datasets/">Datasets</a>
- <a href="/about/">About</a>
- </div>
- </header>
- <div class="content content-">
-
- <section><h1>Annotated Facial Landmarks in The Wild</h1>
-</section><section><div class='meta'><div><div class='gray'>Years</div><div>1993-1996</div></div><div><div class='gray'>Images</div><div>25,993</div></div><div><div class='gray'>Identities</div><div>1,199 </div></div><div><div class='gray'>Origin</div><div>Flickr</div></div></div><section><section><!--header--></section><section class='images'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/aflw/aflw_index.gif' alt=''></div></section><section><p>RESEARCH below this line</p>
-<blockquote><p>The motivation for the AFLW database is the need for a large-scale, multi-view, real-world face database with annotated facial features. We gathered the images on Flickr using a wide range of face relevant tags (e.g., face, mugshot, profile face). The downloaded set of images was manually scanned for images containing faces. The key data and most important properties of the database are:</p>
-</blockquote>
-<p><a href="https://www.tugraz.at/institute/icg/research/team-bischof/lrs/downloads/aflw/">https://www.tugraz.at/institute/icg/research/team-bischof/lrs/downloads/aflw/</a></p>
-</section>
-
- </div>
- <footer>
- <div>
- <a href="/">MegaPixels.cc</a>
- <a href="/about/disclaimer/">Disclaimer</a>
- <a href="/about/terms/">Terms of Use</a>
- <a href="/about/privacy/">Privacy</a>
- <a href="/about/">About</a>
- <a href="/about/team/">Team</a>
- </div>
- <div>
- MegaPixels &copy;2017-19 Adam R. Harvey /&nbsp;
- <a href="https://ahprojects.com">ahprojects.com</a>
- </div>
- </footer>
-</body>
-
-<script src="/assets/js/dist/index.js"></script>
-</html> \ No newline at end of file
diff --git a/site/public/datasets/brainwash/index.html b/site/public/datasets/brainwash/index.html
index ec5ee434..0f782924 100644
--- a/site/public/datasets/brainwash/index.html
+++ b/site/public/datasets/brainwash/index.html
@@ -17,7 +17,7 @@
<a class='slogan' href="/">
<div class='logo'></div>
<div class='site_name'>MegaPixels</div>
- <div class='splash'>Brainwash</div>
+ <div class='splash'>Brainwash Dataset</div>
</a>
<div class='links'>
<a href="/datasets/">Datasets</a>
@@ -27,7 +27,8 @@
<div class="content content-dataset">
<section class='intro_section' style='background-image: url(https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/brainwash/assets/background.jpg)'><div class='inner'><div class='hero_desc'><span class='bgpad'>Brainwash is a dataset of webcam images taken from the Brainwash Cafe in San Francisco in 2014</span></div><div class='hero_subdesc'><span class='bgpad'>The Brainwash dataset includes 11,918 images of "everyday life of a busy downtown cafe" and is used for training head detection surveillance algorithms
-</span></div></div></section><section><div class='left-sidebar'><div class='meta'>
+</span></div></div></section><section><h2>Brainwash Dataset</h2>
+</section><section><div class='right-sidebar'><div class='meta'>
<div class='gray'>Published</div>
<div>2015</div>
</div><div class='meta'>
@@ -48,11 +49,10 @@
</div><div class='meta'>
<div class='gray'>Website</div>
<div><a href='https://purl.stanford.edu/sx925dc9385' target='_blank' rel='nofollow noopener'>stanford.edu</a></div>
- </div></div><h2>Brainwash Dataset</h2>
-<p><em>Brainwash</em> is a head detection dataset created from San Francisco's Brainwash Cafe livecam footage. It includes 11,918 images of "everyday life of a busy downtown cafe"<a class="footnote_shim" name="[^readme]_1"> </a><a href="#[^readme]" class="footnote" title="Footnote 1">1</a> captured at 100 second intervals throught the entire day. Brainwash dataset was captured during 3 days in 2014: October 27, November 13, and November 24. According the author's reserach paper introducing the dataset, the images were acquired with the help of Angelcam.com [cite orig paper].</p>
-<p>Brainwash is not a widely used dataset but since its publication by Stanford University in 2015, it has notably appeared in several research papers from the National University of Defense Technology in Changsha, China. In 2016 and in 2017 researchers there conducted studies on detecting people's heads in crowded scenes for the purpose of surveillance <a class="footnote_shim" name="[^localized_region_context]_1"> </a><a href="#[^localized_region_context]" class="footnote" title="Footnote 2">2</a> <a class="footnote_shim" name="[^replacement_algorithm]_1"> </a><a href="#[^replacement_algorithm]" class="footnote" title="Footnote 3">3</a>.</p>
-<p>If you happen to have been at Brainwash cafe in San Franscisco at any time on October 26, November 13, or November 24 in 2014 you are most likely included in the Brainwash dataset.</p>
-</section><section class='images'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/brainwash/assets/brainwash_mean_overlay.jpg' alt=' The pixel-averaged image of all Brainwash dataset images is shown with 81,973 head annotations drawn from the Brainwash training partition. (c) Adam Harvey'><div class='caption'> The pixel-averaged image of all Brainwash dataset images is shown with 81,973 head annotations drawn from the Brainwash training partition. (c) Adam Harvey</div></div></section><section>
+ </div></div><p>Brainwash is a dataset of livecam images taken from San Francisco's Brainwash Cafe. It includes 11,918 images of "everyday life of a busy downtown cafe"<a class="footnote_shim" name="[^readme]_1"> </a><a href="#[^readme]" class="footnote" title="Footnote 1">1</a> captured at 100 second intervals throught the entire day. The Brainwash dataset includes 3 full days of webcam images taken on October 27, November 13, and November 24 in 2014. According the author's <a href="https://www.semanticscholar.org/paper/End-to-End-People-Detection-in-Crowded-Scenes-Stewart-Andriluka/1bd1645a629f1b612960ab9bba276afd4cf7c666">reserach paper</a> introducing the dataset, the images were acquired with the help of Angelcam.com<a class="footnote_shim" name="[^end_to_end]_1"> </a><a href="#[^end_to_end]" class="footnote" title="Footnote 2">2</a></p>
+<p>The Brainwash dataset is unique because it uses images from a publicly available webcam that records people inside a privately owned business without any consent. No ordinary cafe custom could ever suspect there image would end up in dataset used for surveillance reserach and development, but that is exactly what happened to customers at Brainwash cafe in San Francisco.</p>
+<p>Although Brainwash appears to be a less popular dataset, it was used in 2016 and 2017 by researchers from the National University of Defense Technology in China took note of the dataset and used it for two <a href="https://www.semanticscholar.org/paper/Localized-region-context-and-object-feature-fusion-Li-Dou/b02d31c640b0a31fb18c4f170d841d8e21ffb66c">research</a> <a href="https://www.semanticscholar.org/paper/A-Replacement-Algorithm-of-Non-Maximum-Suppression-Zhao-Wang/591a4bfa6380c9fcd5f3ae690e3ac5c09b7bf37b">projects</a> on advancing the capabilities of object detection to more accurately isolate the target region in an image (<a href="https://www.itm-conferences.org/articles/itmconf/pdf/2017/04/itmconf_ita2017_05006.pdf">PDF</a>). <a class="footnote_shim" name="[^localized_region_context]_1"> </a><a href="#[^localized_region_context]" class="footnote" title="Footnote 3">3</a> <a class="footnote_shim" name="[^replacement_algorithm]_1"> </a><a href="#[^replacement_algorithm]" class="footnote" title="Footnote 4">4</a>. The dataset also appears in a 2017 <a href="https://ieeexplore.ieee.org/document/7877809">research paper</a> from Peking University for the purpose of improving surveillance capabilities for "people detection in the crowded scenes".</p>
+</section><section class='images'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/brainwash/assets/brainwash_grid.jpg' alt=' A visualization of 81,973 head annotations from the Brainwash dataset training partition. &copy; megapixels.cc'><div class='caption'> A visualization of 81,973 head annotations from the Brainwash dataset training partition. &copy; megapixels.cc</div></div></section><section>
<h3>Who used Brainwash Dataset?</h3>
<p>
@@ -65,30 +65,24 @@
<!-- <div style="position: absolute;top: 0px;right: -55px;width: 180px;font-size: 14px;">Labeled Faces in the Wild Dataset<br><span class="numc" style="font-size: 11px;">20 citations</span>
</div> -->
<div class="applet" data-payload="{&quot;command&quot;: &quot;chart&quot;}"></div>
-</section><section class="applet_container">
+</section>
+
+<section class="applet_container">
<div class="applet" data-payload="{&quot;command&quot;: &quot;piechart&quot;}"></div>
-</section><section>
+</section>
+
+<section>
<h3>Biometric Trade Routes</h3>
-<!--
- <div class="map-sidebar right-sidebar">
- <h3>Legend</h3>
- <ul>
- <li><span style="color: #f2f293">&#9632;</span> Industry</li>
- <li><span style="color: #f30000">&#9632;</span> Academic</li>
- <li><span style="color: #3264f6">&#9632;</span> Government</li>
- </ul>
- </div>
- -->
+
<p>
- To help understand how Brainwash Dataset has been used around the world for commercial, military and academic research; publicly available research citing Brainwash Dataset is collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal reserach projects at that location.
+ To help understand how Brainwash Dataset has been used around the world by commercial, military, and academic organizations; existing publicly available research citing Brainwash Dataset was collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal research projects at that location.
</p>
</section>
<section class="applet_container fullwidth">
<div class="applet" data-payload="{&quot;command&quot;: &quot;map&quot;}"></div>
-
</section>
<div class="caption">
@@ -96,16 +90,12 @@
<li class="edu">Academic</li>
<li class="com">Commercial</li>
<li class="gov">Military / Government</li>
- <li class="source">Citation data is collected using <a href="https://semanticscholar.org" target="_blank">SemanticScholar.org</a> then dataset usage verified and geolocated.</li>
</ul>
+ <div class="source">Citation data is collected using <a href="https://semanticscholar.org" target="_blank">SemanticScholar.org</a> then dataset usage verified and geolocated.</div >
</div>
-<!-- <section>
- <p class='subp'>
- [section under development] Brainwash Dataset ... Standardized paragraph of text about the map. Sed ut perspiciatis, unde omnis iste natus error sit voluptatem accusantium doloremque laudantium, totam rem aperiam eaque ipsa, quae ab illo inventore veritatis et quasi architecto beatae vitae dicta sunt, explicabo.
- </p>
-</section>
- --><section class="applet_container">
+
+<section class="applet_container">
<h3>Dataset Citations</h3>
<p>
@@ -120,38 +110,44 @@
<div class="hr-wave-line hr-wave-line2"></div>
</div>
- <h3>Supplementary Information</h3>
+ <h2>Supplementary Information</h2>
-</section><section class='images'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/brainwash/assets/00425000_960.jpg' alt=' An sample image from the Brainwash dataset used for training face and head detection algorithms for surveillance. The datset contains about 12,000 images. License: Open Data Commons Public Domain Dedication (PDDL)'><div class='caption'> An sample image from the Brainwash dataset used for training face and head detection algorithms for surveillance. The datset contains about 12,000 images. License: Open Data Commons Public Domain Dedication (PDDL)</div></div></section><section class='images'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/brainwash/assets/brainwash_montage.jpg' alt=' 49 of the 11,918 images included in the Brainwash dataset. License: Open Data Commons Public Domain Dedication (PDDL)'><div class='caption'> 49 of the 11,918 images included in the Brainwash dataset. License: Open Data Commons Public Domain Dedication (PDDL)</div></div></section><section><h4>Additional Resources</h4>
-<ul>
-<li>The dataset author spoke about his research at the CVPR conference in 2016 <a href="https://www.youtube.com/watch?v=Nl2fBKxwusQ">https://www.youtube.com/watch?v=Nl2fBKxwusQ</a></li>
-</ul>
-<p>TODO</p>
-<ul>
-<li>add bounding boxes to the header image</li>
-<li>remake montage with randomized images, with bboxes</li>
-<li>clean up intro text</li>
-<li>verify quote citations</li>
-</ul>
-</section><section><ul class="footnotes"><li><a name="[^readme]" class="footnote_shim"></a><span class="backlinks"><a href="#[^readme]_1">a</a></span><p>"readme.txt" <a href="https://exhibits.stanford.edu/data/catalog/sx925dc9385">https://exhibits.stanford.edu/data/catalog/sx925dc9385</a>.</p>
+</section><section class='images'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/brainwash/assets/brainwash_example.jpg' alt=' An sample image from the Brainwash dataset used for training face and head detection algorithms for surveillance. The datset contains about 12,000 images. License: Open Data Commons Public Domain Dedication (PDDL)'><div class='caption'> An sample image from the Brainwash dataset used for training face and head detection algorithms for surveillance. The datset contains about 12,000 images. License: Open Data Commons Public Domain Dedication (PDDL)</div></div></section><section class='images'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/brainwash/assets/brainwash_saliency_map.jpg' alt=' A visualization of 81,973 head annotations from the Brainwash dataset training partition. &copy; megapixels.cc'><div class='caption'> A visualization of 81,973 head annotations from the Brainwash dataset training partition. &copy; megapixels.cc</div></div></section><section>
+
+ <h4>Cite Our Work</h4>
+ <p>
+
+ If you use our data, research, or graphics please cite our work:
+
+<pre id="cite-bibtex">
+@online{megapixels,
+ author = {Harvey, Adam. LaPlace, Jules.},
+ title = {MegaPixels: Origins, Ethics, and Privacy Implications of Publicly Available Face Recognition Image Datasets},
+ year = 2019,
+ url = {https://megapixels.cc/},
+ urldate = {2019-04-18}
+}</pre>
+
+ </p>
+</section><section><h3>References</h3><section><ul class="footnotes"><li><a name="[^readme]" class="footnote_shim"></a><span class="backlinks"><a href="#[^readme]_1">a</a></span><p>"readme.txt" <a href="https://exhibits.stanford.edu/data/catalog/sx925dc9385">https://exhibits.stanford.edu/data/catalog/sx925dc9385</a>.</p>
+</li><li><a name="[^end_to_end]" class="footnote_shim"></a><span class="backlinks"><a href="#[^end_to_end]_1">a</a></span><p>Stewart, Russel. Andriluka, Mykhaylo. "End-to-end people detection in crowded scenes". 2016.</p>
</li><li><a name="[^localized_region_context]" class="footnote_shim"></a><span class="backlinks"><a href="#[^localized_region_context]_1">a</a></span><p>Li, Y. and Dou, Y. and Liu, X. and Li, T. Localized Region Context and Object Feature Fusion for People Head Detection. ICIP16 Proceedings. 2016. Pages 594-598.</p>
</li><li><a name="[^replacement_algorithm]" class="footnote_shim"></a><span class="backlinks"><a href="#[^replacement_algorithm]_1">a</a></span><p>Zhao. X, Wang Y, Dou, Y. A Replacement Algorithm of Non-Maximum Suppression Base on Graph Clustering.</p>
-</li></ul></section>
+</li></ul></section></section>
</div>
<footer>
- <div>
- <a href="/">MegaPixels.cc</a>
- <a href="/about/disclaimer/">Disclaimer</a>
- <a href="/about/terms/">Terms of Use</a>
- <a href="/about/privacy/">Privacy</a>
- <a href="/about/">About</a>
- <a href="/about/team/">Team</a>
- </div>
- <div>
- MegaPixels &copy;2017-19 Adam R. Harvey /&nbsp;
- <a href="https://ahprojects.com">ahprojects.com</a>
- </div>
+ <ul class="footer-left">
+ <li><a href="/">MegaPixels.cc</a></li>
+ <li><a href="/datasets/">Datasets</a></li>
+ <li><a href="/about/">About</a></li>
+ <li><a href="/about/press/">Press</a></li>
+ <li><a href="/about/legal/">Legal and Privacy</a></li>
+ </ul>
+ <ul class="footer-right">
+ <li>MegaPixels &copy;2017-19 &nbsp;<a href="https://ahprojects.com">Adam R. Harvey</a></li>
+ <li>Made with support from &nbsp;<a href="https://mozilla.org">Mozilla</a></li>
+ </ul>
</footer>
</body>
diff --git a/site/public/datasets/caltech_10k/index.html b/site/public/datasets/caltech_10k/index.html
index 9aa0b2c3..abb55148 100644
--- a/site/public/datasets/caltech_10k/index.html
+++ b/site/public/datasets/caltech_10k/index.html
@@ -17,7 +17,7 @@
<a class='slogan' href="/">
<div class='logo'></div>
<div class='site_name'>MegaPixels</div>
-
+ <div class='splash'>Brainwash Dataset</div>
</a>
<div class='links'>
<a href="/datasets/">Datasets</a>
@@ -26,23 +26,97 @@
</header>
<div class="content content-">
- <section><h1>Caltech 10K Faces Dataset</h1>
-</section><section><div class='meta'><div><div class='gray'>Years</div><div>TBD</div></div><div><div class='gray'>Images</div><div>TBD</div></div><div><div class='gray'>Identities</div><div>TBD</div></div><div><div class='gray'>Origin</div><div>Google Search</div></div><div><div class='gray'>Funding</div><div>TBD</div></div></div><section>
+ <section><h2>Caltech 10K Faces Dataset</h2>
+</section><section><div class='right-sidebar'><div class='meta'>
+ <div class='gray'>Published</div>
+ <div>2015</div>
+ </div><div class='meta'>
+ <div class='gray'>Images</div>
+ <div>11,917 </div>
+ </div><div class='meta'>
+ <div class='gray'>Purpose</div>
+ <div>Head detection</div>
+ </div><div class='meta'>
+ <div class='gray'>Created by</div>
+ <div>Stanford University (US), Max Planck Institute for Informatics (DE)</div>
+ </div><div class='meta'>
+ <div class='gray'>Funded by</div>
+ <div>Max Planck Center for Visual Computing and Communication</div>
+ </div><div class='meta'>
+ <div class='gray'>Download Size</div>
+ <div>4.1 GB</div>
+ </div><div class='meta'>
+ <div class='gray'>Website</div>
+ <div><a href='https://purl.stanford.edu/sx925dc9385' target='_blank' rel='nofollow noopener'>stanford.edu</a></div>
+ </div></div><p>[ page under development ]</p>
+</section><section>
+ <h3>Who used Brainwash Dataset?</h3>
+
+ <p>
+ This bar chart presents a ranking of the top countries where dataset citations originated. Mouse over individual columns to see yearly totals. These charts show at most the top 10 countries.
+ </p>
+
+ </section>
+
+<section class="applet_container">
+<!-- <div style="position: absolute;top: 0px;right: -55px;width: 180px;font-size: 14px;">Labeled Faces in the Wild Dataset<br><span class="numc" style="font-size: 11px;">20 citations</span>
+</div> -->
+ <div class="applet" data-payload="{&quot;command&quot;: &quot;chart&quot;}"></div>
+</section>
+
+<section class="applet_container">
+ <div class="applet" data-payload="{&quot;command&quot;: &quot;piechart&quot;}"></div>
+</section>
+
+<section>
+
+ <h3>Biometric Trade Routes</h3>
+
+ <p>
+ To help understand how Brainwash Dataset has been used around the world by commercial, military, and academic organizations; existing publicly available research citing Brainwash Dataset was collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal research projects at that location.
+ </p>
+
+ </section>
+
+<section class="applet_container fullwidth">
+ <div class="applet" data-payload="{&quot;command&quot;: &quot;map&quot;}"></div>
+</section>
+
+<div class="caption">
+ <ul class="map-legend">
+ <li class="edu">Academic</li>
+ <li class="com">Commercial</li>
+ <li class="gov">Military / Government</li>
+ </ul>
+ <div class="source">Citation data is collected using <a href="https://semanticscholar.org" target="_blank">SemanticScholar.org</a> then dataset usage verified and geolocated.</div >
+</div>
+
+
+<section class="applet_container">
+
+ <h3>Dataset Citations</h3>
+ <p>
+ The dataset citations used in the visualizations were collected from <a href="https://www.semanticscholar.org">Semantic Scholar</a>, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms.
+ </p>
+
+ <div class="applet" data-payload="{&quot;command&quot;: &quot;citations&quot;}"></div>
+</section><section><h3>(ignore) research notes</h3>
+<p>The dataset contains images of people collected from the web by typing common given names into Google Image Search. The coordinates of the eyes, the nose and the center of the mouth for each frontal face are provided in a ground truth file. This information can be used to align and crop the human faces or as a ground truth for a face detection algorithm. The dataset has 10,524 human faces of various resolutions and in different settings, e.g. portrait images, groups of people, etc. Profile faces or very low resolution faces are not labeled.</p>
+</section>
</div>
<footer>
- <div>
- <a href="/">MegaPixels.cc</a>
- <a href="/about/disclaimer/">Disclaimer</a>
- <a href="/about/terms/">Terms of Use</a>
- <a href="/about/privacy/">Privacy</a>
- <a href="/about/">About</a>
- <a href="/about/team/">Team</a>
- </div>
- <div>
- MegaPixels &copy;2017-19 Adam R. Harvey /&nbsp;
- <a href="https://ahprojects.com">ahprojects.com</a>
- </div>
+ <ul class="footer-left">
+ <li><a href="/">MegaPixels.cc</a></li>
+ <li><a href="/datasets/">Datasets</a></li>
+ <li><a href="/about/">About</a></li>
+ <li><a href="/about/press/">Press</a></li>
+ <li><a href="/about/legal/">Legal and Privacy</a></li>
+ </ul>
+ <ul class="footer-right">
+ <li>MegaPixels &copy;2017-19 &nbsp;<a href="https://ahprojects.com">Adam R. Harvey</a></li>
+ <li>Made with support from &nbsp;<a href="https://mozilla.org">Mozilla</a></li>
+ </ul>
</footer>
</body>
diff --git a/site/public/datasets/celeba/index.html b/site/public/datasets/celeba/index.html
index 26a43803..a4a7efa2 100644
--- a/site/public/datasets/celeba/index.html
+++ b/site/public/datasets/celeba/index.html
@@ -17,7 +17,7 @@
<a class='slogan' href="/">
<div class='logo'></div>
<div class='site_name'>MegaPixels</div>
- <div class='splash'>CelebA</div>
+ <div class='splash'>CelebA Dataset</div>
</a>
<div class='links'>
<a href="/datasets/">Datasets</a>
@@ -27,7 +27,8 @@
<div class="content content-dataset">
<section class='intro_section' style='background-image: url(https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/celeba/assets/background.jpg)'><div class='inner'><div class='hero_desc'><span class='bgpad'><span style="color:#ffaa00">CelebA</span> is a dataset of people...</span></div><div class='hero_subdesc'><span class='bgpad'>CelebA includes...
-</span></div></div></section><section><div class='left-sidebar'><div class='meta'>
+</span></div></div></section><section><h2>CelebA Dataset</h2>
+</section><section><div class='right-sidebar'><div class='meta'>
<div class='gray'>Published</div>
<div>2015</div>
</div><div class='meta'>
@@ -45,32 +46,38 @@
</div><div class='meta'>
<div class='gray'>Website</div>
<div><a href='http://mmlab.ie.cuhk.edu.hk/projects/CelebA.html' target='_blank' rel='nofollow noopener'>edu.hk</a></div>
- </div><div class='meta'><div><div class='gray'>Collected</div><div>TBD</div></div><div><div class='gray'>Published</div><div>TBD</div></div><div><div class='gray'>Images</div><div>TBD</div></div><div><div class='gray'>Faces</div><div>TBD</div></div></div></div><h2>CelebA</h2>
-<p>(PAGE UNDER DEVELOPMENT)</p>
-<p>At vero eos et accusamus et iusto odio dignissimos ducimus, qui blanditiis praesentium voluptatum deleniti atque corrupti, quos dolores et quas molestias excepturi sint, obcaecati cupiditate non-provident, similique sunt in culpa, qui officia deserunt mollitia animi, id est laborum et dolorum fuga. Et harum quidem rerum facilis est et expedita distinctio.</p>
-<p>Nam libero tempore, cum soluta nobis est eligendi optio, cumque nihil impedit, quo minus id, quod maxime placeat, facere possimus, omnis voluptas assumenda est, omnis dolor repellendus. Temporibus autem quibusdam et aut officiis debitis aut rerum necessitatibus saepe eveniet, ut et voluptates repudiandae sint et molestiae non-recusandae. Itaque earum rerum hic tenetur a sapiente delectus, ut aut reiciendis voluptatibus maiores alias consequatur aut perferendis doloribus asperiores repellat</p>
+ </div></div><p>[ PAGE UNDER DEVELOPMENT ]</p>
</section><section>
+ <h3>Who used CelebA Dataset?</h3>
+
+ <p>
+ This bar chart presents a ranking of the top countries where dataset citations originated. Mouse over individual columns to see yearly totals. These charts show at most the top 10 countries.
+ </p>
+
+ </section>
+
+<section class="applet_container">
+<!-- <div style="position: absolute;top: 0px;right: -55px;width: 180px;font-size: 14px;">Labeled Faces in the Wild Dataset<br><span class="numc" style="font-size: 11px;">20 citations</span>
+</div> -->
+ <div class="applet" data-payload="{&quot;command&quot;: &quot;chart&quot;}"></div>
+</section>
+
+<section class="applet_container">
+ <div class="applet" data-payload="{&quot;command&quot;: &quot;piechart&quot;}"></div>
+</section>
+
+<section>
<h3>Biometric Trade Routes</h3>
-<!--
- <div class="map-sidebar right-sidebar">
- <h3>Legend</h3>
- <ul>
- <li><span style="color: #f2f293">&#9632;</span> Industry</li>
- <li><span style="color: #f30000">&#9632;</span> Academic</li>
- <li><span style="color: #3264f6">&#9632;</span> Government</li>
- </ul>
- </div>
- -->
+
<p>
- To help understand how CelebA Dataset has been used around the world for commercial, military and academic research; publicly available research citing Large-scale CelebFaces Attributes Dataset is collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal reserach projects at that location.
+ To help understand how CelebA Dataset has been used around the world by commercial, military, and academic organizations; existing publicly available research citing Large-scale CelebFaces Attributes Dataset was collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal research projects at that location.
</p>
</section>
<section class="applet_container fullwidth">
<div class="applet" data-payload="{&quot;command&quot;: &quot;map&quot;}"></div>
-
</section>
<div class="caption">
@@ -78,25 +85,12 @@
<li class="edu">Academic</li>
<li class="com">Commercial</li>
<li class="gov">Military / Government</li>
- <li class="source">Citation data is collected using <a href="https://semanticscholar.org" target="_blank">SemanticScholar.org</a> then dataset usage verified and geolocated.</li>
</ul>
+ <div class="source">Citation data is collected using <a href="https://semanticscholar.org" target="_blank">SemanticScholar.org</a> then dataset usage verified and geolocated.</div >
</div>
-<!-- <section>
- <p class='subp'>
- [section under development] CelebA Dataset ... Standardized paragraph of text about the map. Sed ut perspiciatis, unde omnis iste natus error sit voluptatem accusantium doloremque laudantium, totam rem aperiam eaque ipsa, quae ab illo inventore veritatis et quasi architecto beatae vitae dicta sunt, explicabo.
- </p>
-</section>
- --><section>
- <div class="hr-wave-holder">
- <div class="hr-wave-line hr-wave-line1"></div>
- <div class="hr-wave-line hr-wave-line2"></div>
- </div>
-
- <h3>Supplementary Information</h3>
-
-</section><section class="applet_container">
+<section class="applet_container">
<h3>Dataset Citations</h3>
<p>
@@ -114,18 +108,17 @@
</div>
<footer>
- <div>
- <a href="/">MegaPixels.cc</a>
- <a href="/about/disclaimer/">Disclaimer</a>
- <a href="/about/terms/">Terms of Use</a>
- <a href="/about/privacy/">Privacy</a>
- <a href="/about/">About</a>
- <a href="/about/team/">Team</a>
- </div>
- <div>
- MegaPixels &copy;2017-19 Adam R. Harvey /&nbsp;
- <a href="https://ahprojects.com">ahprojects.com</a>
- </div>
+ <ul class="footer-left">
+ <li><a href="/">MegaPixels.cc</a></li>
+ <li><a href="/datasets/">Datasets</a></li>
+ <li><a href="/about/">About</a></li>
+ <li><a href="/about/press/">Press</a></li>
+ <li><a href="/about/legal/">Legal and Privacy</a></li>
+ </ul>
+ <ul class="footer-right">
+ <li>MegaPixels &copy;2017-19 &nbsp;<a href="https://ahprojects.com">Adam R. Harvey</a></li>
+ <li>Made with support from &nbsp;<a href="https://mozilla.org">Mozilla</a></li>
+ </ul>
</footer>
</body>
diff --git a/site/public/datasets/cofw/index.html b/site/public/datasets/cofw/index.html
index 8925d4b8..c6d7417e 100644
--- a/site/public/datasets/cofw/index.html
+++ b/site/public/datasets/cofw/index.html
@@ -17,7 +17,7 @@
<a class='slogan' href="/">
<div class='logo'></div>
<div class='site_name'>MegaPixels</div>
- <div class='splash'>COFW</div>
+ <div class='splash'>COFW Dataset</div>
</a>
<div class='links'>
<a href="/datasets/">Datasets</a>
@@ -26,7 +26,8 @@
</header>
<div class="content content-">
- <section><div class='left-sidebar'><div class='meta'>
+ <section><h2>Caltech Occluded Faces in the Wild</h2>
+</section><section><div class='right-sidebar'><div class='meta'>
<div class='gray'>Published</div>
<div>2013</div>
</div><div class='meta'>
@@ -38,10 +39,60 @@
</div><div class='meta'>
<div class='gray'>Website</div>
<div><a href='http://www.vision.caltech.edu/xpburgos/ICCV13/' target='_blank' rel='nofollow noopener'>caltech.edu</a></div>
- </div><div class='meta'><div><div class='gray'>Years</div><div>1993-1996</div></div><div><div class='gray'>Images</div><div>14,126</div></div><div><div class='gray'>Identities</div><div>1,199 </div></div><div><div class='gray'>Origin</div><div>Web Searches</div></div><div><div class='gray'>Funded by</div><div>ODNI, IARPA, Microsoft</div></div></div></div><h2>Caltech Occluded Faces in the Wild</h2>
-<p>(PAGE UNDER DEVELOPMENT)</p>
-<p>COFW is "is designed to benchmark face landmark algorithms in realistic conditions, which include heavy occlusions and large shape variations" [Robust face landmark estimation under occlusion].</p>
-<p>RESEARCH below this line</p>
+ </div></div><p>[ PAGE UNDER DEVELOPMENT ]</p>
+</section><section>
+ <h3>Who used COFW Dataset?</h3>
+
+ <p>
+ This bar chart presents a ranking of the top countries where dataset citations originated. Mouse over individual columns to see yearly totals. These charts show at most the top 10 countries.
+ </p>
+
+ </section>
+
+<section class="applet_container">
+<!-- <div style="position: absolute;top: 0px;right: -55px;width: 180px;font-size: 14px;">Labeled Faces in the Wild Dataset<br><span class="numc" style="font-size: 11px;">20 citations</span>
+</div> -->
+ <div class="applet" data-payload="{&quot;command&quot;: &quot;chart&quot;}"></div>
+</section>
+
+<section class="applet_container">
+ <div class="applet" data-payload="{&quot;command&quot;: &quot;piechart&quot;}"></div>
+</section>
+
+<section>
+
+ <h3>Biometric Trade Routes</h3>
+
+ <p>
+ To help understand how COFW Dataset has been used around the world by commercial, military, and academic organizations; existing publicly available research citing Caltech Occluded Faces in the Wild was collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal research projects at that location.
+ </p>
+
+ </section>
+
+<section class="applet_container fullwidth">
+ <div class="applet" data-payload="{&quot;command&quot;: &quot;map&quot;}"></div>
+</section>
+
+<div class="caption">
+ <ul class="map-legend">
+ <li class="edu">Academic</li>
+ <li class="com">Commercial</li>
+ <li class="gov">Military / Government</li>
+ </ul>
+ <div class="source">Citation data is collected using <a href="https://semanticscholar.org" target="_blank">SemanticScholar.org</a> then dataset usage verified and geolocated.</div >
+</div>
+
+
+<section class="applet_container">
+
+ <h3>Dataset Citations</h3>
+ <p>
+ The dataset citations used in the visualizations were collected from <a href="https://www.semanticscholar.org">Semantic Scholar</a>, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms.
+ </p>
+
+ <div class="applet" data-payload="{&quot;command&quot;: &quot;citations&quot;}"></div>
+</section><section><h3>(ignore) research notes</h3>
+</section><section><div class='meta'><div><div class='gray'>Years</div><div>1993-1996</div></div><div><div class='gray'>Images</div><div>14,126</div></div><div><div class='gray'>Identities</div><div>1,199 </div></div><div><div class='gray'>Origin</div><div>Web Searches</div></div><div><div class='gray'>Funded by</div><div>ODNI, IARPA, Microsoft</div></div></div><section><section><p>COFW is "is designed to benchmark face landmark algorithms in realistic conditions, which include heavy occlusions and large shape variations" [Robust face landmark estimation under occlusion].</p>
<blockquote><p>We asked four people with different levels of computer vision knowledge to each collect 250 faces representative of typical real-world images, with the clear goal of challenging computer vision methods.
The result is 1,007 images of faces obtained from a variety of sources.</p>
</blockquote>
@@ -56,25 +107,15 @@ To increase the number of training images, and since COFW has the exact same la
</section><section>
<h3>Biometric Trade Routes</h3>
-<!--
- <div class="map-sidebar right-sidebar">
- <h3>Legend</h3>
- <ul>
- <li><span style="color: #f2f293">&#9632;</span> Industry</li>
- <li><span style="color: #f30000">&#9632;</span> Academic</li>
- <li><span style="color: #3264f6">&#9632;</span> Government</li>
- </ul>
- </div>
- -->
+
<p>
- To help understand how COFW Dataset has been used around the world for commercial, military and academic research; publicly available research citing Caltech Occluded Faces in the Wild is collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal reserach projects at that location.
+ To help understand how COFW Dataset has been used around the world by commercial, military, and academic organizations; existing publicly available research citing Caltech Occluded Faces in the Wild was collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the location markers to reveal research projects at that location.
</p>
</section>
<section class="applet_container fullwidth">
<div class="applet" data-payload="{&quot;command&quot;: &quot;map&quot;}"></div>
-
</section>
<div class="caption">
@@ -82,23 +123,16 @@ To increase the number of training images, and since COFW has the exact same la
<li class="edu">Academic</li>
<li class="com">Commercial</li>
<li class="gov">Military / Government</li>
- <li class="source">Citation data is collected using <a href="https://semanticscholar.org" target="_blank">SemanticScholar.org</a> then dataset usage verified and geolocated.</li>
</ul>
-</div>
-
-<!-- <section>
- <p class='subp'>
- [section under development] COFW Dataset ... Standardized paragraph of text about the map. Sed ut perspiciatis, unde omnis iste natus error sit voluptatem accusantium doloremque laudantium, totam rem aperiam eaque ipsa, quae ab illo inventore veritatis et quasi architecto beatae vitae dicta sunt, explicabo.
- </p>
-</section>
- --><section>
+ <div class="source">Citation data is collected using <a href="https://semanticscholar.org" target="_blank">SemanticScholar.org</a> and then dataset usage verified and geolocated.</div >
+</div><section>
<div class="hr-wave-holder">
<div class="hr-wave-line hr-wave-line1"></div>
<div class="hr-wave-line hr-wave-line2"></div>
</div>
- <h3>Supplementary Information</h3>
+ <h2>Supplementary Information</h2>
</section><section class="applet_container">
@@ -127,18 +161,17 @@ To increase the number of training images, and since COFW has the exact same la
</div>
<footer>
- <div>
- <a href="/">MegaPixels.cc</a>
- <a href="/about/disclaimer/">Disclaimer</a>
- <a href="/about/terms/">Terms of Use</a>
- <a href="/about/privacy/">Privacy</a>
- <a href="/about/">About</a>
- <a href="/about/team/">Team</a>
- </div>
- <div>
- MegaPixels &copy;2017-19 Adam R. Harvey /&nbsp;
- <a href="https://ahprojects.com">ahprojects.com</a>
- </div>
+ <ul class="footer-left">
+ <li><a href="/">MegaPixels.cc</a></li>
+ <li><a href="/datasets/">Datasets</a></li>
+ <li><a href="/about/">About</a></li>
+ <li><a href="/about/press/">Press</a></li>
+ <li><a href="/about/legal/">Legal and Privacy</a></li>
+ </ul>
+ <ul class="footer-right">
+ <li>MegaPixels &copy;2017-19 &nbsp;<a href="https://ahprojects.com">Adam R. Harvey</a></li>
+ <li>Made with support from &nbsp;<a href="https://mozilla.org">Mozilla</a></li>
+ </ul>
</footer>
</body>
diff --git a/site/public/datasets/duke_mtmc/index.html b/site/public/datasets/duke_mtmc/index.html
index 37de48ad..3c0bc0c2 100644
--- a/site/public/datasets/duke_mtmc/index.html
+++ b/site/public/datasets/duke_mtmc/index.html
@@ -17,7 +17,7 @@
<a class='slogan' href="/">
<div class='logo'></div>
<div class='site_name'>MegaPixels</div>
- <div class='splash'>Duke MTMC</div>
+ <div class='splash'>Duke MTMC Dataset</div>
</a>
<div class='links'>
<a href="/datasets/">Datasets</a>
@@ -26,8 +26,9 @@
</header>
<div class="content content-dataset">
- <section class='intro_section' style='background-image: url(https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/duke_mtmc/assets/background.jpg)'><div class='inner'><div class='hero_desc'><span class='bgpad'><span class="dataset-name">Duke MTMC</span> is a dataset of surveillance camera footage of students on Duke University campus</span></div><div class='hero_subdesc'><span class='bgpad'>Duke MTMC contains over 2 million video frames and 2,000 unique identities collected from 8 HD cameras at Duke University campus in March 2014
-</span></div></div></section><section><div class='left-sidebar'><div class='meta'>
+ <section class='intro_section' style='background-image: url(https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/duke_mtmc/assets/background.jpg)'><div class='inner'><div class='hero_desc'><span class='bgpad'><span class="dataset-name">Duke MTMC</span> is a dataset of surveillance camera footage of students on Duke University campus</span></div><div class='hero_subdesc'><span class='bgpad'>Duke MTMC contains over 2 million video frames and 2,700 unique identities collected from 8 HD cameras at Duke University campus in March 2014
+</span></div></div></section><section><h2>Duke MTMC</h2>
+</section><section><div class='right-sidebar'><div class='meta'>
<div class='gray'>Published</div>
<div>2016</div>
</div><div class='meta'>
@@ -35,45 +36,210 @@
<div>2,000,000 </div>
</div><div class='meta'>
<div class='gray'>Identities</div>
- <div>1,812 </div>
+ <div>2,700 </div>
</div><div class='meta'>
<div class='gray'>Purpose</div>
- <div>Person re-identification and multi-camera tracking</div>
+ <div>Person re-identification, multi-camera tracking</div>
</div><div class='meta'>
<div class='gray'>Created by</div>
<div>Computer Science Department, Duke University, Durham, US</div>
</div><div class='meta'>
<div class='gray'>Website</div>
<div><a href='http://vision.cs.duke.edu/DukeMTMC/' target='_blank' rel='nofollow noopener'>duke.edu</a></div>
- </div><div class='meta'><div><div class='gray'>Created</div><div>2014</div></div><div><div class='gray'>Identities</div><div>Over 2,700</div></div><div><div class='gray'>Used for</div><div>Face recognition, person re-identification</div></div><div><div class='gray'>Created by</div><div>Computer Science Department, Duke University, Durham, US</div></div><div><div class='gray'>Website</div><div><a href="http://vision.cs.duke.edu/DukeMTMC/">duke.edu</a></div></div></div></div><h2>Duke Multi-Target, Multi-Camera Tracking Dataset (Duke MTMC)</h2>
-<p>[ PAGE UNDER DEVELOPMENT ]</p>
-<p>Duke MTMC is a dataset of video recorded on Duke University campus during for the purpose of training, evaluating, and improving <em>multi-target multi-camera tracking</em>. The videos were recorded during February and March 2014 and cinclude</p>
-<p>Includes a total of 888.8 minutes of video (ind. verified)</p>
-<p>"We make available a new data set that has more than 2 million frames and more than 2,700 identities. It consists of 8×85 minutes of 1080p video recorded at 60 frames per second from 8 static cameras deployed on the Duke University campus during periods between lectures, when pedestrian traffic is heavy."</p>
-<p>The dataset includes approximately 2,000 annotated identities appearing in 85 hours of video from 8 cameras located throughout Duke University's campus.</p>
-</section><section class='images'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/duke_mtmc/assets/duke_mtmc_cam5_average_comp.jpg' alt=' Duke MTMC pixel-averaged image of camera #5 is shown with the bounding boxes for each student drawn in white. (c) Adam Harvey'><div class='caption'> Duke MTMC pixel-averaged image of camera #5 is shown with the bounding boxes for each student drawn in white. (c) Adam Harvey</div></div></section><section><p>According to the dataset authors,</p>
-</section><section>
+ </div></div><p>Duke MTMC (Multi-Target, Multi-Camera) is a dataset of surveillance video footage taken on Duke University's campus in 2014 and is used for research and development of video tracking systems, person re-identification, and low-resolution facial recognition. The dataset contains over 14 hours of synchronized surveillance video from 8 cameras at 1080p and 60FPS with over 2 million frames of 2,000 students walking to and from classes. The 8 surveillance cameras deployed on campus were specifically setup to capture students "during periods between lectures, when pedestrian traffic is heavy"<a class="footnote_shim" name="[^duke_mtmc_orig]_1"> </a><a href="#[^duke_mtmc_orig]" class="footnote" title="Footnote 1">1</a>.</p>
+<p>In this investigation into the Duke MTMC dataset we tracked down over 100 publicly available research papers that explicitly acknowledged using Duke MTMC. Our analysis shows that the dataset has spread far beyond its origins and intentions in academic research projects at Duke University. Since its publication in 2016, more than twice as many research citations originated in China as in the United States. Among these citations were papers with explicit and direct links to the Chinese military and several of the companies known to provide Chinese authorities with the oppressive surveillance technology used to monitor millions of Uighur Muslims.</p>
+<<<<<<< HEAD
+<p>In one 2018 <a href="http://openaccess.thecvf.com/content_cvpr_2018/papers/Xu_Attention-Aware_Compositional_Network_CVPR_2018_paper.pdf">paper</a> jointly published by researchers from SenseNets and SenseTime (and funded by SenseTime Group Limited) entitled <a href="https://www.semanticscholar.org/paper/Attention-Aware-Compositional-Network-for-Person-Xu-Zhao/14ce502bc19b225466126b256511f9c05cadcb6e">Attention-Aware Compositional Network for Person Re-identification</a>, the Duke MTMC dataset was used for "extensive experiments" on improving person re-identification across multiple surveillance cameras with important applications in "finding missing elderly and children, and suspect tracking, etc." Both SenseNets and SenseTime have been directly linked to the providing surveillance technology to monitor Uighur Muslims in China. <a class="footnote_shim" name="[^sensetime_qz]_1"> </a><a href="#[^sensetime_qz]" class="footnote" title="Footnote 2">2</a><a class="footnote_shim" name="[^sensenets_uyghurs]_1"> </a><a href="#[^sensenets_uyghurs]" class="footnote" title="Footnote 3">3</a><a class="footnote_shim" name="[^xinjiang_nyt]_1"> </a><a href="#[^xinjiang_nyt]" class="footnote" title="Footnote 4">4</a></p>
+=======
+<p>In one 2018 <a href="http://openaccess.thecvf.com/content_cvpr_2018/papers/Xu_Attention-Aware_Compositional_Network_CVPR_2018_paper.pdf">paper</a> jointly published by researchers from SenseNets and SenseTime (and funded by SenseTime Group Limited) entitled <a href="https://www.semanticscholar.org/paper/Attention-Aware-Compositional-Network-for-Person-Xu-Zhao/14ce502bc19b225466126b256511f9c05cadcb6e">Attention-Aware Compositional Network for Person Re-identification</a>, the Duke MTMC dataset was used for "extensive experiments" on improving person re-identification across multiple surveillance cameras with important applications in "finding missing elderly and children, and suspect tracking, etc." Both SenseNets and SenseTime have been directly linked to the providing surveillance technology to monitor Uighur Muslims in China. <a class="footnote_shim" name="[^xinjiang_nyt]_1"> </a><a href="#[^xinjiang_nyt]" class="footnote" title="Footnote 1">1</a><a class="footnote_shim" name="[^sensetime_qz]_1"> </a><a href="#[^sensetime_qz]" class="footnote" title="Footnote 2">2</a><a class="footnote_shim" name="[^sensenets_uyghurs]_1"> </a><a href="#[^sensenets_uyghurs]" class="footnote" title="Footnote 3">3</a></p>
+>>>>>>> 61fbcb8f2709236f36a103a73e0bd9d1dd3723e8
+</section><section class='images'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/duke_mtmc/assets/duke_mtmc_reid_montage.jpg' alt=' A collection of 1,600 out of the approximately 2,000 students and pedestrians in the Duke MTMC dataset. These students were also included in the Duke MTMC Re-ID dataset extension used for person re-identification, and eventually the QMUL SurvFace face recognition dataset. Open Data Commons Attribution License.'><div class='caption'> A collection of 1,600 out of the approximately 2,000 students and pedestrians in the Duke MTMC dataset. These students were also included in the Duke MTMC Re-ID dataset extension used for person re-identification, and eventually the QMUL SurvFace face recognition dataset. Open Data Commons Attribution License.</div></div></section><section><p>Despite <a href="https://www.hrw.org/news/2017/11/19/china-police-big-data-systems-violate-privacy-target-dissent">repeated</a> <a href="https://www.hrw.org/news/2018/02/26/china-big-data-fuels-crackdown-minority-region">warnings</a> by Human Rights Watch that the authoritarian surveillance used in China represents a violation of human rights, researchers at Duke University continued to provide open access to their dataset for anyone to use for any project. As the surveillance crisis in China grew, so did the number of citations with links to organizations complicit in the crisis. In 2018 alone there were over 70 research projects happening in China that publicly acknowledged benefiting from the Duke MTMC dataset. Amongst these were projects from SenseNets, SenseTime, CloudWalk, Megvii, Beihang University, and the PLA's National University of Defense Technology.</p>
+<table>
+<thead><tr>
+<th>Organization</th>
+<th>Paper</th>
+<th>Link</th>
+<th>Year</th>
+<th>Used Duke MTMC</th>
+</tr>
+</thead>
+<tbody>
+<tr>
+<td>Beihang University</td>
+<td>Orientation-Guided Similarity Learning for Person Re-identification</td>
+<td><a href="https://ieeexplore.ieee.org/document/8545620">ieee.org</a></td>
+<td>2018</td>
+<td>&#x2714;</td>
+</tr>
+<tr>
+<td>Beihang University</td>
+<td>Online Inter-Camera Trajectory Association Exploiting Person Re-Identification and Camera Topology</td>
+<td><a href="https://dl.acm.org/citation.cfm?id=3240663">acm.org</a></td>
+<td>2018</td>
+<td>&#x2714;</td>
+</tr>
+<tr>
+<td>CloudWalk</td>
+<td>CloudWalk re-identification technology extends facial biometric tracking with improved accuracy</td>
+<td><a href="https://www.biometricupdate.com/201903/cloudwalk-re-identification-technology-extends-facial-biometric-tracking-with-improved-accuracy">BiometricUpdate.com</a></td>
+<td>2019</td>
+<td>&#x2714;</td>
+</tr>
+<tr>
+<td>CloudWalk</td>
+<td>Horizontal Pyramid Matching for Person Re-identification</td>
+<td><a href="https://arxiv.org/pdf/1804.05275.pdf">arxiv.org</a></td>
+<td>2018</td>
+<td>&#x2714;</td>
+</tr>
+<tr>
+<td>Megvii</td>
+<td>Person Re-Identification (slides)</td>
+<td><a href="https://zsc.github.io/megvii-pku-dl-course/slides/Lecture%2011,%20Human%20Understanding_%20ReID%20and%20Pose%20and%20Attributes%20and%20Activity%20.pdf">github.io</a></td>
+<td>2017</td>
+<td>&#x2714;</td>
+</tr>
+<tr>
+<td>Megvii</td>
+<td>Multi-Target, Multi-Camera Tracking by Hierarchical Clustering: Recent Progress on DukeMTMC Project</td>
+<td><a href="https://www.semanticscholar.org/paper/Multi-Target%2C-Multi-Camera-Tracking-by-Hierarchical-Zhang-Wu/10c20cf47d61063032dce4af73a4b8e350bf1128">SemanticScholar</a></td>
+<td>2018</td>
+<td>&#x2714;</td>
+</tr>
+<tr>
+<td>Megvii</td>
+<td>SCPNet: Spatial-Channel Parallelism Network for Joint Holistic and Partial PersonRe-Identification</td>
+<td><a href="https://arxiv.org/abs/1810.06996">arxiv.org</a></td>
+<td>2018</td>
+<td>&#x2714;</td>
+</tr>
+<tr>
+<td>National University of Defense Technology</td>
+<td>Tracking by Animation: Unsupervised Learning of Multi-Object Attentive Trackers</td>
+<td><a href="https://www.semanticscholar.org/paper/Tracking-by-Animation%3A-Unsupervised-Learning-of-He-Liu/e90816e1a0e14ea1e7039e0b2782260999aef786">SemanticScholar.org</a></td>
+<td>2018</td>
+<td>&#x2714;</td>
+</tr>
+<tr>
+<td>National University of Defense Technology</td>
+<td>Unsupervised Multi-Object Detection for Video Surveillance Using Memory-Based Recurrent Attention Networks</td>
+<td><a href="https://www.semanticscholar.org/paper/Unsupervised-Multi-Object-Detection-for-Video-Using-He-He/59f357015054bab43fb8cbfd3f3dbf17b1d1f881">SemanticScholar.org</a></td>
+<td>2018</td>
+<td>&#x2714;</td>
+</tr>
+<tr>
+<td>SenseNets, SenseTime</td>
+<td>Attention-Aware Compositional Network for Person Re-identification</td>
+<td><a href="https://www.semanticscholar.org/paper/Attention-Aware-Compositional-Network-for-Person-Xu-Zhao/14ce502bc19b225466126b256511f9c05cadcb6e">SemanticScholar</a></td>
+<td>2018</td>
+<td>&#x2714;</td>
+</tr>
+<tr>
+<td>SenseTime</td>
+<td>End-to-End Deep Kronecker-Product Matching for Person Re-identification</td>
+<td><a href="http://openaccess.thecvf.com/content_cvpr_2018/papers/Shen_End-to-End_Deep_Kronecker-Product_CVPR_2018_paper.pdf">thcvf.com</a></td>
+<td>2018</td>
+<td>&#x2714;</td>
+</tr>
+</tbody>
+</table>
+<p>The reasons that companies in China use the Duke MTMC dataset for research are technically no different than the reasons it is used in the United States and Europe. In fact, the original creators of the dataset published a follow up report in 2017 titled <a href="https://www.semanticscholar.org/paper/Tracking-Social-Groups-Within-and-Across-Cameras-Solera-Calderara/9e644b1e33dd9367be167eb9d832174004840400">Tracking Social Groups Within and Across Cameras</a> with specific applications to "automated analysis of crowds and social gatherings for surveillance and security applications". Their work, as well as the creation of the original dataset in 2014 were both supported in part by the United States Army Research Laboratory.</p>
+<p>Citations from the United States and Europe show a similar trend to that in China, including publicly acknowledged and verified usage of the Duke MTMC dataset supported or carried out by the United States Department of Homeland Security, IARPA, IBM, Microsoft (who provides surveillance to ICE), and Vision Semantics (who works with the UK Ministry of Defence). One <a href="https://pdfs.semanticscholar.org/59f3/57015054bab43fb8cbfd3f3dbf17b1d1f881.pdf">paper</a> is even jointly published by researchers affiliated with both the University College of London and the National University of Defense Technology in China.</p>
+<table>
+<thead><tr>
+<th>Organization</th>
+<th>Paper</th>
+<th>Link</th>
+<th>Year</th>
+<th>Used Duke MTMC</th>
+</tr>
+</thead>
+<tbody>
+<tr>
+<td>IARPA, IBM</td>
+<td>Horizontal Pyramid Matching for Person Re-identification</td>
+<td><a href="https://arxiv.org/abs/1804.05275">arxiv.org</a></td>
+<td>2018</td>
+<td>&#x2714;</td>
+</tr>
+<tr>
+<td>Microsoft</td>
+<td>ReXCam: Resource-Efficient, Cross-CameraVideo Analytics at Enterprise Scale</td>
+<td><a href="https://arxiv.org/abs/1811.01268">arxiv.org</a></td>
+<td>2018</td>
+<td>&#x2714;</td>
+</tr>
+<tr>
+<td>Microsoft</td>
+<td>Scaling Video Analytics Systems to Large Camera Deployments</td>
+<td><a href="https://arxiv.org/pdf/1809.02318.pdf">arxiv.org</a></td>
+<td>2018</td>
+<td>&#x2714;</td>
+</tr>
+<tr>
+<td>University College of London</td>
+<td>Unsupervised Multi-Object Detection for Video Surveillance Using Memory-Based RecurrentAttention Networks</td>
+<td><a href="https://pdfs.semanticscholar.org/59f3/57015054bab43fb8cbfd3f3dbf17b1d1f881.pdf">SemanticScholar.org</a></td>
+<td>2018</td>
+<td>&#x2714;</td>
+</tr>
+<tr>
+<td>US Dept. of Homeland Security</td>
+<td>Re-Identification with Consistent Attentive Siamese Networks</td>
+<td><a href="https://arxiv.org/abs/1811.07487/">arxiv.org</a></td>
+<td>2019</td>
+<td>&#x2714;</td>
+</tr>
+<tr>
+<td>Vision Semantics Ltd.</td>
+<td>Unsupervised Person Re-identification by Deep Learning Tracklet Association</td>
+<td><a href="https://arxiv.org/abs/1809.02874">arxiv.org</a></td>
+<td>2018</td>
+<td>&#x2714;</td>
+</tr>
+</tbody>
+</table>
+<p>By some metrics the dataset is considered a huge success. It is regarded as highly influential research and has contributed to hundreds, if not thousands, of projects to advance artificial intelligence for person tracking and monitoring. All the above citations, regardless of which country is using it, align perfectly with the original <a href="http://vision.cs.duke.edu/DukeMTMC/">intent</a> of the Duke MTMC dataset: "to accelerate advances in multi-target multi-camera tracking".</p>
+<<<<<<< HEAD
+<p>The same logic applies for all the new extensions of the Duke MTMC dataset including <a href="https://github.com/layumi/DukeMTMC-reID_evaluation">Duke MTMC Re-ID</a>, <a href="https://github.com/Yu-Wu/DukeMTMC-VideoReID">Duke MTMC Video Re-ID</a>, Duke MTMC Groups, and <a href="https://github.com/vana77/DukeMTMC-attribute">Duke MTMC Attribute</a>. And it also applies to all the new specialized datasets that will be created from Duke MTMC, such as the low-resolution face recognition dataset called <a href="https://qmul-survface.github.io/">QMUL-SurvFace</a>, which was funded in part by <a href="https://seequestor.com">SeeQuestor</a>, a computer vision provider to law enforcement agencies including Scotland Yards and Queensland Police. From the perspective of academic researchers, security contractors, and defense agencies using these datasets to advance their organization's work, Duke MTMC provides significant value regardless of who else is using it, so long as it advances their own interests in artificial intelligence.</p>
+</section><section class='images'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/duke_mtmc/assets/duke_mtmc_saliencies.jpg' alt=' Duke MTMC pedestrian detection saliency maps for 8 cameras deployed on campus &copy; megapixels.cc'><div class='caption'> Duke MTMC pedestrian detection saliency maps for 8 cameras deployed on campus &copy; megapixels.cc</div></div></section><section><p>But this perspective comes at significant cost to civil rights, human rights, and privacy. The creation and distribution of the Duke MTMC illustrates an egregious prioritization of surveillance technologies over individual rights, where the simple act of going to class could implicate your biometric data in a surveillance training dataset, perhaps even used by foreign defense agencies against your own ethics, against your own political interests, or against universal human rights.</p>
+<p>For the approximately 2,000 students in Duke MTMC dataset, there is unfortunately no escape. It would be impossible to remove oneself from all copies of the dataset downloaded around the world. Instead, over 2,000 students and visitors who happened to be walking to class on March 13, 2014 will forever remain in all downloaded copies of the Duke MTMC dataset and all its extensions, contributing to a global supply chain of data that powers governmental and commercial expansion of biometric surveillance technologies.</p>
+=======
+<p>The same logic applies for all the new extensions of the Duke MTMC dataset including <a href="https://github.com/layumi/DukeMTMC-reID_evaluation">Duke MTMC Re-ID</a>, <a href="https://github.com/Yu-Wu/DukeMTMC-VideoReID">Duke MTMC Video Re-ID</a>, Duke MTMC Groups, and <a href="https://github.com/vana77/DukeMTMC-attribute">Duke MTMC Attribute</a>. And it also applies to all the new specialized datasets that will be created from Duke MTMC, such as the low-resolution face recognition dataset called <a href="https://qmul-survface.github.io/">QMUL-SurvFace</a>, which was funded in part by <a href="https://seequestor.com">SeeQuestor</a>, a computer vision provider to law enforcement agencies including Scotland Yards and Queensland Police. From the perspective of academic researchers, security contractors, and defense agencies using these datasets to advance their organization's work, Duke MTMC provides significant value regardless of who else is using it so long as it accelerate advances their own interests in artificial intelligence.</p>
+</section><section class='images'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/duke_mtmc/assets/duke_mtmc_saliencies.jpg' alt=' Duke MTMC pedestrian detection saliency maps for 8 cameras deployed on campus &copy; megapixels.cc'><div class='caption'> Duke MTMC pedestrian detection saliency maps for 8 cameras deployed on campus &copy; megapixels.cc</div></div></section><section><p>But this perspective comes at significant cost to civil rights, human rights, and privacy. The creation and distribution of the Duke MTMC illustrates an egregious prioritization of surveillance technologies over individual rights, where the simple act of going to class could implicate your biometric data in a surveillance training dataset, perhaps even used by foreign defense agencies against your own ethics, against universal human rights, or against your own political interests.</p>
+<p>For the approximately 2,000 students in Duke MTMC dataset there is unfortunately no escape. It would be impossible to remove oneself from all copies of the dataset downloaded around the world. Instead, over 2,000 students and visitors who happened to be walking to class in 2014 will forever remain in all downloaded copies of the Duke MTMC dataset and all its extensions, contributing to a global supply chain of data that powers governmental and commercial expansion of biometric surveillance technologies.</p>
+>>>>>>> 61fbcb8f2709236f36a103a73e0bd9d1dd3723e8
+</section><section class='images'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/duke_mtmc/assets/duke_mtmc_cameras.jpg' alt=' Duke MTMC camera views for 8 cameras deployed on campus &copy; megapixels.cc'><div class='caption'> Duke MTMC camera views for 8 cameras deployed on campus &copy; megapixels.cc</div></div></section><section class='images'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/duke_mtmc/assets/duke_mtmc_camera_map.jpg' alt=' Duke MTMC camera locations on Duke University campus. Open Data Commons Attribution License.'><div class='caption'> Duke MTMC camera locations on Duke University campus. Open Data Commons Attribution License.</div></div></section><section>
+ <h3>Who used Duke MTMC Dataset?</h3>
+
+ <p>
+ This bar chart presents a ranking of the top countries where dataset citations originated. Mouse over individual columns to see yearly totals. These charts show at most the top 10 countries.
+ </p>
+
+ </section>
+
+<section class="applet_container">
+<!-- <div style="position: absolute;top: 0px;right: -55px;width: 180px;font-size: 14px;">Labeled Faces in the Wild Dataset<br><span class="numc" style="font-size: 11px;">20 citations</span>
+</div> -->
+ <div class="applet" data-payload="{&quot;command&quot;: &quot;chart&quot;}"></div>
+</section>
+
+<section class="applet_container">
+ <div class="applet" data-payload="{&quot;command&quot;: &quot;piechart&quot;}"></div>
+</section>
+
+<section>
<h3>Biometric Trade Routes</h3>
-<!--
- <div class="map-sidebar right-sidebar">
- <h3>Legend</h3>
- <ul>
- <li><span style="color: #f2f293">&#9632;</span> Industry</li>
- <li><span style="color: #f30000">&#9632;</span> Academic</li>
- <li><span style="color: #3264f6">&#9632;</span> Government</li>
- </ul>
- </div>
- -->
+
<p>
- To help understand how Duke MTMC Dataset has been used around the world for commercial, military and academic research; publicly available research citing Duke Multi-Target, Multi-Camera Tracking Project is collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal reserach projects at that location.
+ To help understand how Duke MTMC Dataset has been used around the world by commercial, military, and academic organizations; existing publicly available research citing Duke Multi-Target, Multi-Camera Tracking Project was collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal research projects at that location.
</p>
</section>
<section class="applet_container fullwidth">
<div class="applet" data-payload="{&quot;command&quot;: &quot;map&quot;}"></div>
-
</section>
<div class="caption">
@@ -81,30 +247,19 @@
<li class="edu">Academic</li>
<li class="com">Commercial</li>
<li class="gov">Military / Government</li>
- <li class="source">Citation data is collected using <a href="https://semanticscholar.org" target="_blank">SemanticScholar.org</a> then dataset usage verified and geolocated.</li>
</ul>
+ <div class="source">Citation data is collected using <a href="https://semanticscholar.org" target="_blank">SemanticScholar.org</a> then dataset usage verified and geolocated.</div >
</div>
-<!-- <section>
- <p class='subp'>
- [section under development] Duke MTMC Dataset ... Standardized paragraph of text about the map. Sed ut perspiciatis, unde omnis iste natus error sit voluptatem accusantium doloremque laudantium, totam rem aperiam eaque ipsa, quae ab illo inventore veritatis et quasi architecto beatae vitae dicta sunt, explicabo.
- </p>
-</section>
- --><section>
- <h3>Who used Duke MTMC Dataset?</h3>
+<section class="applet_container">
+
+ <h3>Dataset Citations</h3>
<p>
- This bar chart presents a ranking of the top countries where dataset citations originated. Mouse over individual columns to see yearly totals. These charts show at most the top 10 countries.
+ The dataset citations used in the visualizations were collected from <a href="https://www.semanticscholar.org">Semantic Scholar</a>, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms.
</p>
-
- </section>
-<section class="applet_container">
-<!-- <div style="position: absolute;top: 0px;right: -55px;width: 180px;font-size: 14px;">Labeled Faces in the Wild Dataset<br><span class="numc" style="font-size: 11px;">20 citations</span>
-</div> -->
- <div class="applet" data-payload="{&quot;command&quot;: &quot;chart&quot;}"></div>
-</section><section class="applet_container">
- <div class="applet" data-payload="{&quot;command&quot;: &quot;piechart&quot;}"></div>
+ <div class="applet" data-payload="{&quot;command&quot;: &quot;citations&quot;}"></div>
</section><section>
<div class="hr-wave-holder">
@@ -112,93 +267,155 @@
<div class="hr-wave-line hr-wave-line2"></div>
</div>
- <h3>Supplementary Information</h3>
+ <h2>Supplementary Information</h2>
-</section><section class="applet_container">
+</section><section><h4>Video Timestamps</h4>
+<<<<<<< HEAD
+<p>The video timestamps contain the likely, but not yet confirmed, date and times of capture. Because the video timestamps align with the start and stop <a href="http://vision.cs.duke.edu/DukeMTMC/details.html#time-sync">time sync data</a> provided by the researchers, it at least aligns the relative time. The <a href="https://www.wunderground.com/history/daily/KIGX/date/2014-3-19?req_city=Durham&amp;req_state=NC&amp;req_statename=North%20Carolina&amp;reqdb.zip=27708&amp;reqdb.magic=1&amp;reqdb.wmo=99999">rainy weather</a> on that day also contributes towards the likelihood of March 14, 2014.</p>
+=======
+<p>The video timestamps contain the likely, but not yet confirmed, date and times the video recorded. Because the video timestamps align with the start and stop <a href="http://vision.cs.duke.edu/DukeMTMC/details.html#time-sync">time sync data</a> provided by the researchers, it at least confirms the relative timing. The <a href="https://www.wunderground.com/history/daily/KIGX/date/2014-3-19?req_city=Durham&amp;req_state=NC&amp;req_statename=North%20Carolina&amp;reqdb.zip=27708&amp;reqdb.magic=1&amp;reqdb.wmo=99999">precipitous weather</a> on March 14, 2014 in Durham, North Carolina supports, but does not confirm, that this day is a potential capture date.</p>
+>>>>>>> 61fbcb8f2709236f36a103a73e0bd9d1dd3723e8
+</section><section><div class='columns columns-2'><div class='column'><table>
+<thead><tr>
+<th>Camera</th>
+<th>Date</th>
+<th>Start</th>
+<th>End</th>
+</tr>
+</thead>
+<tbody>
+<tr>
+<td>Camera 1</td>
+<td>March 14, 2014</td>
+<td>4:14PM</td>
+<td>5:43PM</td>
+</tr>
+<tr>
+<td>Camera 2</td>
+<td>March 14, 2014</td>
+<td>4:13PM</td>
+<td>4:43PM</td>
+</tr>
+<tr>
+<td>Camera 3</td>
+<td>March 14, 2014</td>
+<td>4:20PM</td>
+<td>5:48PM</td>
+</tr>
+<tr>
+<td>Camera 4</td>
+<td>March 14, 2014</td>
+<td>4:21PM</td>
+<td>5:54PM</td>
+</tr>
+</tbody>
+</table>
+</div><div class='column'><table>
+<thead><tr>
+<th>Camera</th>
+<th>Date</th>
+<th>Start</th>
+<th>End</th>
+</tr>
+</thead>
+<tbody>
+<tr>
+<td>Camera 5</td>
+<td>March 14, 2014</td>
+<td>4:12PM</td>
+<td>5:43PM</td>
+</tr>
+<tr>
+<td>Camera 6</td>
+<td>March 14, 2014</td>
+<td>4:18PM</td>
+<td>5:43PM</td>
+</tr>
+<tr>
+<td>Camera 7</td>
+<td>March 14, 2014</td>
+<td>4:16PM</td>
+<td>5:40PM</td>
+</tr>
+<tr>
+<td>Camera 8</td>
+<td>March 14, 2014</td>
+<td>4:25PM</td>
+<td>5:42PM</td>
+</tr>
+</tbody>
+</table>
+<<<<<<< HEAD
+</div></div></section><section><h4>Errata</h4>
+<ul>
+<li>The Duke MTMC dataset paper mentions 2,700 identities, but their ground truth file only lists annotations for 1,812.</li>
+</ul>
+<h4>Citing Duke MTMC</h4>
+<p>If you use any data from the Duke MTMC, please follow their <a href="http://vision.cs.duke.edu/DukeMTMC/#how-to-cite">license</a> and cite their work as:</p>
+<pre>
+@inproceedings{ristani2016MTMC,
+ title = {Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking},
+ author = {Ristani, Ergys and Solera, Francesco and Zou, Roger and Cucchiara, Rita and Tomasi, Carlo},
+ booktitle = {European Conference on Computer Vision workshop on Benchmarking Multi-Target Tracking},
+ year = {2016}
+}
+</pre></section><section>
+=======
+</div></div></section><section><h4>Notes</h4>
+<p>The original Duke MTMC dataset paper mentions 2,700 identities, but their ground truth file only lists annotations for 1,812, and their own research typically mentions 2,000. For this write up we used 2,000 to describe the approximate number of students.</p>
+<h4>Ethics</h4>
+<p>Please direct any questions about the ethics of the dataset to Duke University's <a href="https://hr.duke.edu/policies/expectations/compliance/">Institutional Ethics &amp; Compliance Office</a> using the number at the bottom of the page.</p>
+</section><section>
+>>>>>>> 61fbcb8f2709236f36a103a73e0bd9d1dd3723e8
- <h3>Dataset Citations</h3>
+ <h4>Cite Our Work</h4>
<p>
- The dataset citations used in the visualizations were collected from <a href="https://www.semanticscholar.org">Semantic Scholar</a>, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms.
- </p>
+
+ If you use our data, research, or graphics please cite our work:
- <div class="applet" data-payload="{&quot;command&quot;: &quot;citations&quot;}"></div>
-</section><section><h2>Research Notes</h2>
+<pre id="cite-bibtex">
+@online{megapixels,
+ author = {Harvey, Adam. LaPlace, Jules.},
+ title = {MegaPixels: Origins, Ethics, and Privacy Implications of Publicly Available Face Recognition Image Datasets},
+ year = 2019,
+ url = {https://megapixels.cc/},
+ urldate = {2019-04-18}
+}</pre>
+
+ </p>
+<<<<<<< HEAD
+</section><section><h4>ToDo</h4>
<ul>
-<li>"We make available a new data set that has more than 2 million frames and more than 2,700 identities. It consists of 8×85 minutes of 1080p video recorded at 60 frames per second from 8 static cameras deployed on the Duke University campus during periods between lectures, when pedestrian traffic is heavy." - 27a2fad58dd8727e280f97036e0d2bc55ef5424c</li>
-<li>"This work was supported in part by the EPSRC Programme Grant (FACER2VM) EP/N007743/1, EPSRC/dstl/MURI project EP/R018456/1, the National Natural Science Foundation of China (61373055, 61672265, 61602390, 61532009, 61571313), Chinese Ministry of Education (Z2015101), Science and Technology Department of Sichuan Province (2017RZ0009 and 2017FZ0029), Education Department of Sichuan Province (15ZB0130), the Open Research Fund from Province Key Laboratory of Xihua University (szjj2015-056) and the NVIDIA GPU Grant Program." - ec9c20ed6cce15e9b63ac96bb5a6d55e69661e0b</li>
-<li>"DukeMTMC aims to accelerate advances in multi-target multi-camera tracking. It provides a tracking system that works within and across cameras, a new large scale HD video data set recorded by 8 synchronized cameras with more than 7,000 single camera trajectories and over 2,000 unique identities, and a new performance evaluation method that measures how often a system is correct about who is where"</li>
-<li><p>DukeMTMC is a new, manually annotated, calibrated, multi-camera data set recorded outdoors on the Duke University campus with 8 synchronized cameras. It consists of:</p>
-<p>8 static cameras x 85 minutes of 1080p 60 fps video
- More than 2,000,000 manually annotated frames
- More than 2,000 identities
- Manual annotation by 5 people over 1 year
- More identities than all existing MTMC datasets combined
- Unconstrained paths, diverse appearance</p>
-</li>
-<li>DukeMTMC Project
-Ergys Ristani Ergys Ristani Ergys Ristani Ergys Ristani Ergys Ristani</li>
+<li>clean up citations, formatting</li>
</ul>
-<p>People involved:
-Ergys Ristani, Francesco Solera, Roger S. Zou, Rita Cucchiara, Carlo Tomasi.</p>
-<p>Navigation:</p>
-<p>Data Set
- Downloads
- Downloads
- Dataset Extensions
- Performance Measures
- Tracking Systems
- Publications
- How to Cite
- Contact</p>
-<p>Welcome to the Duke Multi-Target, Multi-Camera Tracking Project.</p>
-<p>DukeMTMC aims to accelerate advances in multi-target multi-camera tracking. It provides a tracking system that works within and across cameras, a new large scale HD video data set recorded by 8 synchronized cameras with more than 7,000 single camera trajectories and over 2,000 unique identities, and a new performance evaluation method that measures how often a system is correct about who is where.
-DukeMTMC Data Set
-Snapshot from the DukeMTMC data set.</p>
-<p>DukeMTMC is a new, manually annotated, calibrated, multi-camera data set recorded outdoors on the Duke University campus with 8 synchronized cameras. It consists of:</p>
-<p>8 static cameras x 85 minutes of 1080p 60 fps video
- More than 2,000,000 manually annotated frames
- More than 2,000 identities
- Manual annotation by 5 people over 1 year
- More identities than all existing MTMC datasets combined
- Unconstrained paths, diverse appearance</p>
-<p>News</p>
-<p>05 Feb 2019 We are organizing the 2nd Workshop on MTMCT and ReID at CVPR 2019
- 25 Jul 2018: The code for DeepCC is available on github
- 28 Feb 2018: OpenPose detections now available for download
- 19 Feb 2018: Our DeepCC tracker has been accepted to CVPR 2018
- 04 Oct 2017: A new blog post describes ID measures of performance
- 26 Jul 2017: Slides from the BMTT 2017 workshop are now available
- 09 Dec 2016: DukeMTMC is now hosted on MOTChallenge</p>
-<p>DukeMTMC Downloads</p>
-<p>DukeMTMC dataset (tracking)</p>
-<p>Dataset Extensions</p>
-<p>Below is a list of dataset extensions provided by the community:</p>
-<p>DukeMTMC-VideoReID (download)
- DukeMTMC-reID (download)
- DukeMTMC4REID
- DukeMTMC-attribute</p>
-<p>If you use or extend DukeMTMC, please refer to the license terms.
-DukeMTMCT Benchmark</p>
-<p>DukeMTMCT is a tracking benchmark hosted on motchallenge.net. Click here for the up-to-date rankings. Here you will find the official motchallenge-devkit used for evaluation by MOTChallenge. For detailed instructions how to submit on motchallenge you can refer to this link.</p>
-<p>Trackers are ranked using our identity-based measures which compute how often the system is correct about who is where, regardless of how often a target is lost and reacquired. Our measures are useful in applications such as security, surveillance or sports. This short post describes our measures with illustrations, while for details you can refer to the original paper.
-Tracking Systems</p>
-<p>We provide code for the following tracking systems which are all based on Correlation Clustering optimization:</p>
-<p>DeepCC for single- and multi-camera tracking [1]
- Single-Camera Tracker (demo video) [2]
- Multi-Camera Tracker (demo video, failure cases) [2]
- People-Groups Tracker [3]
- Original Single-Camera Tracker [4]</p>
-</section>
+</section><section><h3>References</h3><section><ul class="footnotes"><li>1 <a name="[^duke_mtmc_orig]" class="footnote_shim"></a><span class="backlinks"><a href="#[^duke_mtmc_orig]_1">a</a></span>"Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking". 2016. <a href="https://www.semanticscholar.org/paper/Performance-Measures-and-a-Data-Set-for-Tracking-Ristani-Solera/27a2fad58dd8727e280f97036e0d2bc55ef5424c">SemanticScholar</a>
+</li><li>2 <a name="[^sensetime_qz]" class="footnote_shim"></a><span class="backlinks"><a href="#[^sensetime_qz]_1">a</a></span><a href="https://qz.com/1248493/sensetime-the-billion-dollar-alibaba-backed-ai-company-thats-quietly-watching-everyone-in-china/">https://qz.com/1248493/sensetime-the-billion-dollar-alibaba-backed-ai-company-thats-quietly-watching-everyone-in-china/</a>
+</li><li>3 <a name="[^sensenets_uyghurs]" class="footnote_shim"></a><span class="backlinks"><a href="#[^sensenets_uyghurs]_1">a</a></span><a href="https://foreignpolicy.com/2019/03/19/962492-orwell-china-socialcredit-surveillance/">https://foreignpolicy.com/2019/03/19/962492-orwell-china-socialcredit-surveillance/</a>
+</li><li>4 <a name="[^xinjiang_nyt]" class="footnote_shim"></a><span class="backlinks"><a href="#[^xinjiang_nyt]_1">a</a></span>Mozur, Paul. "One Month, 500,000 Face Scans: How China Is Using A.I. to Profile a Minority". <a href="https://www.nytimes.com/2019/04/14/technology/china-surveillance-artificial-intelligence-racial-profiling.html">https://www.nytimes.com/2019/04/14/technology/china-surveillance-artificial-intelligence-racial-profiling.html</a>. April 14, 2019.
+=======
+</section><section><p>If you use any data from the Duke MTMC please follow their <a href="http://vision.cs.duke.edu/DukeMTMC/#how-to-cite">license</a> and cite their work as:</p>
+<pre>
+@inproceedings{ristani2016MTMC,
+ title = {Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking},
+ author = {Ristani, Ergys and Solera, Francesco and Zou, Roger and Cucchiara, Rita and Tomasi, Carlo},
+ booktitle = {European Conference on Computer Vision workshop on Benchmarking Multi-Target Tracking},
+ year = {2016}
+}
+</pre></section><section><h3>References</h3><section><ul class="footnotes"><li><a name="[^xinjiang_nyt]" class="footnote_shim"></a><span class="backlinks"><a href="#[^xinjiang_nyt]_1">a</a></span><p>Mozur, Paul. "One Month, 500,000 Face Scans: How China Is Using A.I. to Profile a Minority". <a href="https://www.nytimes.com/2019/04/14/technology/china-surveillance-artificial-intelligence-racial-profiling.html">https://www.nytimes.com/2019/04/14/technology/china-surveillance-artificial-intelligence-racial-profiling.html</a>. April 14, 2019.</p>
+</li><li><a name="[^sensetime_qz]" class="footnote_shim"></a><span class="backlinks"><a href="#[^sensetime_qz]_1">a</a></span><p><a href="https://qz.com/1248493/sensetime-the-billion-dollar-alibaba-backed-ai-company-thats-quietly-watching-everyone-in-china/">https://qz.com/1248493/sensetime-the-billion-dollar-alibaba-backed-ai-company-thats-quietly-watching-everyone-in-china/</a></p>
+</li><li><a name="[^sensenets_uyghurs]" class="footnote_shim"></a><span class="backlinks"><a href="#[^sensenets_uyghurs]_1">a</a></span><p><a href="https://foreignpolicy.com/2019/03/19/962492-orwell-china-socialcredit-surveillance/">https://foreignpolicy.com/2019/03/19/962492-orwell-china-socialcredit-surveillance/</a></p>
+</li><li><a name="[^duke_mtmc_orig]" class="footnote_shim"></a><span class="backlinks"><a href="#[^duke_mtmc_orig]_1">a</a></span><p>"Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking". 2016. <a href="https://www.semanticscholar.org/paper/Performance-Measures-and-a-Data-Set-for-Tracking-Ristani-Solera/27a2fad58dd8727e280f97036e0d2bc55ef5424c">SemanticScholar</a></p>
+>>>>>>> 61fbcb8f2709236f36a103a73e0bd9d1dd3723e8
+</li></ul></section></section>
</div>
<footer>
<div>
<a href="/">MegaPixels.cc</a>
- <a href="/about/disclaimer/">Disclaimer</a>
- <a href="/about/terms/">Terms of Use</a>
- <a href="/about/privacy/">Privacy</a>
+ <a href="/datasets/">Datasets</a>
<a href="/about/">About</a>
- <a href="/about/team/">Team</a>
+ <a href="/about/press/">Press</a>
+ <a href="/about/legal/">Legal and Privacy</a>
</div>
<div>
MegaPixels &copy;2017-19 Adam R. Harvey /&nbsp;
diff --git a/site/public/datasets/facebook/index.html b/site/public/datasets/facebook/index.html
deleted file mode 100644
index b2943e1f..00000000
--- a/site/public/datasets/facebook/index.html
+++ /dev/null
@@ -1,55 +0,0 @@
-<!doctype html>
-<html>
-<head>
- <title>MegaPixels</title>
- <meta charset="utf-8" />
- <meta name="author" content="Adam Harvey" />
- <meta name="description" content="TBD" />
- <meta name="referrer" content="no-referrer" />
- <meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes" />
- <link rel='stylesheet' href='/assets/css/fonts.css' />
- <link rel='stylesheet' href='/assets/css/css.css' />
- <link rel='stylesheet' href='/assets/css/leaflet.css' />
- <link rel='stylesheet' href='/assets/css/applets.css' />
-</head>
-<body>
- <header>
- <a class='slogan' href="/">
- <div class='logo'></div>
- <div class='site_name'>MegaPixels</div>
-
- </a>
- <div class='links'>
- <a href="/datasets/">Datasets</a>
- <a href="/about/">About</a>
- </div>
- </header>
- <div class="content content-">
-
- <section class='intro_section' style='background-image: url(https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/facebook/assets/background.jpg)'><div class='inner'><div class='hero_desc'><span class='bgpad'>TBD</span></div><div class='hero_subdesc'><span class='bgpad'>TBD
-</span></div></div></section><section><div class='image'><div class='intro-caption caption'>TBD</div></div></section><section><h3>Statistics</h3>
-<div class='meta'><div><div class='gray'>Years</div><div>2002-2004</div></div><div><div class='gray'>Images</div><div>13,233</div></div><div><div class='gray'>Identities</div><div>5,749</div></div><div><div class='gray'>Origin</div><div>Yahoo News Images</div></div><div><div class='gray'>Funding</div><div>(Possibly, partially CIA)</div></div></div><p>Ignore content below these lines</p>
-<ul>
-<li>Tool to create face datasets from Facebook <a href="https://github.com/ankitaggarwal011/FaceGrab">https://github.com/ankitaggarwal011/FaceGrab</a></li>
-</ul>
-</section>
-
- </div>
- <footer>
- <div>
- <a href="/">MegaPixels.cc</a>
- <a href="/about/disclaimer/">Disclaimer</a>
- <a href="/about/terms/">Terms of Use</a>
- <a href="/about/privacy/">Privacy</a>
- <a href="/about/">About</a>
- <a href="/about/team/">Team</a>
- </div>
- <div>
- MegaPixels &copy;2017-19 Adam R. Harvey /&nbsp;
- <a href="https://ahprojects.com">ahprojects.com</a>
- </div>
- </footer>
-</body>
-
-<script src="/assets/js/dist/index.js"></script>
-</html> \ No newline at end of file
diff --git a/site/public/datasets/feret/index.html b/site/public/datasets/feret/index.html
index 45510f64..7f9ed94c 100644
--- a/site/public/datasets/feret/index.html
+++ b/site/public/datasets/feret/index.html
@@ -26,13 +26,84 @@
</header>
<div class="content content-">
- <section><h1>FERET</h1>
-</section><section><div class='meta'><div><div class='gray'>Years</div><div>1993-1996</div></div><div><div class='gray'>Images</div><div>14,126</div></div><div><div class='gray'>Identities</div><div>1,199 </div></div><div><div class='gray'>Origin</div><div>Fairfax, MD</div></div></div><section><section><!--header--></section><section class='images'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/feret/assets/feret_index.gif' alt=''></div></section><section><p><em>Facial Recognition Evaluation</em> (FERET) is develop, test, and evaluate face recognition algorithms</p>
-<p>The goal of the FERET program was to develop automatic face recognition capabilities that could be employed to assist security, intelligence, and law enforcement personnel in the performance of their duties.</p>
+ <section><h1>FacE REcognition Dataset (FERET)</h1>
+</section><section><div class='right-sidebar'><div class='meta'>
+ <div class='gray'>Published</div>
+ <div>2007</div>
+ </div><div class='meta'>
+ <div class='gray'>Images</div>
+ <div>13,233 </div>
+ </div><div class='meta'>
+ <div class='gray'>Identities</div>
+ <div>5,749 </div>
+ </div><div class='meta'>
+ <div class='gray'>Purpose</div>
+ <div>face recognition</div>
+ </div><div class='meta'>
+ <div class='gray'>Website</div>
+ <div><a href='http://vis-www.cs.umass.edu/lfw/' target='_blank' rel='nofollow noopener'>umass.edu</a></div>
+ </div></div><p>[ page under development ]</p>
+</section><section>
+ <h3>Who used LFW?</h3>
+
+ <p>
+ This bar chart presents a ranking of the top countries where dataset citations originated. Mouse over individual columns to see yearly totals. These charts show at most the top 10 countries.
+ </p>
+
+ </section>
+
+<section class="applet_container">
+<!-- <div style="position: absolute;top: 0px;right: -55px;width: 180px;font-size: 14px;">Labeled Faces in the Wild Dataset<br><span class="numc" style="font-size: 11px;">20 citations</span>
+</div> -->
+ <div class="applet" data-payload="{&quot;command&quot;: &quot;chart&quot;}"></div>
+</section>
+
+<section class="applet_container">
+ <div class="applet" data-payload="{&quot;command&quot;: &quot;piechart&quot;}"></div>
+</section>
+
+<section>
+
+ <h3>Biometric Trade Routes</h3>
+
+ <p>
+ To help understand how LFW has been used around the world by commercial, military, and academic organizations; existing publicly available research citing Labeled Faces in the Wild was collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal research projects at that location.
+ </p>
+
+ </section>
+
+<section class="applet_container fullwidth">
+ <div class="applet" data-payload="{&quot;command&quot;: &quot;map&quot;}"></div>
+</section>
+
+<div class="caption">
+ <ul class="map-legend">
+ <li class="edu">Academic</li>
+ <li class="com">Commercial</li>
+ <li class="gov">Military / Government</li>
+ </ul>
+ <div class="source">Citation data is collected using <a href="https://semanticscholar.org" target="_blank">SemanticScholar.org</a> then dataset usage verified and geolocated.</div >
+</div>
+
+
+<section class="applet_container">
+
+ <h3>Dataset Citations</h3>
+ <p>
+ The dataset citations used in the visualizations were collected from <a href="https://www.semanticscholar.org">Semantic Scholar</a>, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms.
+ </p>
+
+ <div class="applet" data-payload="{&quot;command&quot;: &quot;citations&quot;}"></div>
+</section><section><h3>(ignore) RESEARCH below this line</h3>
<ul>
+<li>Years: 1993-1996</li>
+<li>Images: 14,126</li>
+<li>Identities: 1,199 </li>
+<li>Origin: Fairfax, MD</li>
+<li><em>Facial Recognition Evaluation</em> (FERET) is develop, test, and evaluate face recognition algorithms</li>
+<li>The goal of the FERET program was to develop automatic face recognition capabilities that could be employed to assist security, intelligence, and law enforcement personnel in the performance of their duties.</li>
<li><a href="https://www.nist.gov/programs-projects/face-recognition-technology-feret">https://www.nist.gov/programs-projects/face-recognition-technology-feret</a></li>
</ul>
-<p>RESEARCH below this line</p>
<h3>"The FERET database and evaluation procedure for face-recognition algorithms"</h3>
<ul>
<li>Images were captured using Kodak Ultra film</li>
@@ -48,18 +119,17 @@
</div>
<footer>
- <div>
- <a href="/">MegaPixels.cc</a>
- <a href="/about/disclaimer/">Disclaimer</a>
- <a href="/about/terms/">Terms of Use</a>
- <a href="/about/privacy/">Privacy</a>
- <a href="/about/">About</a>
- <a href="/about/team/">Team</a>
- </div>
- <div>
- MegaPixels &copy;2017-19 Adam R. Harvey /&nbsp;
- <a href="https://ahprojects.com">ahprojects.com</a>
- </div>
+ <ul class="footer-left">
+ <li><a href="/">MegaPixels.cc</a></li>
+ <li><a href="/datasets/">Datasets</a></li>
+ <li><a href="/about/">About</a></li>
+ <li><a href="/about/press/">Press</a></li>
+ <li><a href="/about/legal/">Legal and Privacy</a></li>
+ </ul>
+ <ul class="footer-right">
+ <li>MegaPixels &copy;2017-19 &nbsp;<a href="https://ahprojects.com">Adam R. Harvey</a></li>
+ <li>Made with support from &nbsp;<a href="https://mozilla.org">Mozilla</a></li>
+ </ul>
</footer>
</body>
diff --git a/site/public/datasets/hrt_transgender/index.html b/site/public/datasets/hrt_transgender/index.html
index 80a4f40b..4e566a4a 100644
--- a/site/public/datasets/hrt_transgender/index.html
+++ b/site/public/datasets/hrt_transgender/index.html
@@ -27,7 +27,8 @@
<div class="content content-dataset">
<section class='intro_section' style='background-image: url(https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/hrt_transgender/assets/background.jpg)'><div class='inner'><div class='hero_desc'><span class='bgpad'>TBD</span></div><div class='hero_subdesc'><span class='bgpad'>TBD
-</span></div></div></section><section><div class='left-sidebar'><div class='meta'>
+</span></div></div></section><section><h2>HRT Transgender Dataset</h2>
+</section><section><div class='right-sidebar'><div class='meta'>
<div class='gray'>Published</div>
<div>2013</div>
</div><div class='meta'>
@@ -38,97 +39,27 @@
<div>38 </div>
</div><div class='meta'>
<div class='gray'>Purpose</div>
- <div>gender transition and facial recognition</div>
+ <div>Face recognition, gender transition biometrics</div>
</div><div class='meta'>
<div class='gray'>Website</div>
<div><a href='http://www.faceaginggroup.com/hrt-transgender/' target='_blank' rel='nofollow noopener'>faceaginggroup.com</a></div>
- </div><div class='meta'><div><div class='gray'>Published</div><div>TBD</div></div><div><div class='gray'>Images</div><div>TBD</div></div></div></div><h2>HRT Transgender Dataset</h2>
-</section><section>
- <h3>Who used HRT Transgender?</h3>
-
- <p>
- This bar chart presents a ranking of the top countries where dataset citations originated. Mouse over individual columns to see yearly totals. These charts show at most the top 10 countries.
- </p>
-
- </section>
-
-<section class="applet_container">
-<!-- <div style="position: absolute;top: 0px;right: -55px;width: 180px;font-size: 14px;">Labeled Faces in the Wild Dataset<br><span class="numc" style="font-size: 11px;">20 citations</span>
-</div> -->
- <div class="applet" data-payload="{&quot;command&quot;: &quot;chart&quot;}"></div>
-</section><section class="applet_container">
- <div class="applet" data-payload="{&quot;command&quot;: &quot;piechart&quot;}"></div>
-</section><section>
-
- <h3>Biometric Trade Routes</h3>
-<!--
- <div class="map-sidebar right-sidebar">
- <h3>Legend</h3>
- <ul>
- <li><span style="color: #f2f293">&#9632;</span> Industry</li>
- <li><span style="color: #f30000">&#9632;</span> Academic</li>
- <li><span style="color: #3264f6">&#9632;</span> Government</li>
- </ul>
- </div>
- -->
- <p>
- To help understand how HRT Transgender has been used around the world for commercial, military and academic research; publicly available research citing HRT Transgender Dataset is collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal reserach projects at that location.
- </p>
-
- </section>
-
-<section class="applet_container fullwidth">
- <div class="applet" data-payload="{&quot;command&quot;: &quot;map&quot;}"></div>
-
-</section>
-
-<div class="caption">
- <ul class="map-legend">
- <li class="edu">Academic</li>
- <li class="com">Commercial</li>
- <li class="gov">Military / Government</li>
- <li class="source">Citation data is collected using <a href="https://semanticscholar.org" target="_blank">SemanticScholar.org</a> then dataset usage verified and geolocated.</li>
- </ul>
-</div>
-
-<!-- <section>
- <p class='subp'>
- [section under development] HRT Transgender ... Standardized paragraph of text about the map. Sed ut perspiciatis, unde omnis iste natus error sit voluptatem accusantium doloremque laudantium, totam rem aperiam eaque ipsa, quae ab illo inventore veritatis et quasi architecto beatae vitae dicta sunt, explicabo.
- </p>
-</section>
- --><section>
-
- <div class="hr-wave-holder">
- <div class="hr-wave-line hr-wave-line1"></div>
- <div class="hr-wave-line hr-wave-line2"></div>
- </div>
-
- <h3>Supplementary Information</h3>
-
-</section><section class="applet_container">
-
- <h3>Dataset Citations</h3>
- <p>
- The dataset citations used in the visualizations were collected from <a href="https://www.semanticscholar.org">Semantic Scholar</a>, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms.
- </p>
-
- <div class="applet" data-payload="{&quot;command&quot;: &quot;citations&quot;}"></div>
+ </div></div><p>[ page under development ]</p>
+</section><section><p>{% include 'dashboard.html' }</p>
</section>
</div>
<footer>
- <div>
- <a href="/">MegaPixels.cc</a>
- <a href="/about/disclaimer/">Disclaimer</a>
- <a href="/about/terms/">Terms of Use</a>
- <a href="/about/privacy/">Privacy</a>
- <a href="/about/">About</a>
- <a href="/about/team/">Team</a>
- </div>
- <div>
- MegaPixels &copy;2017-19 Adam R. Harvey /&nbsp;
- <a href="https://ahprojects.com">ahprojects.com</a>
- </div>
+ <ul class="footer-left">
+ <li><a href="/">MegaPixels.cc</a></li>
+ <li><a href="/datasets/">Datasets</a></li>
+ <li><a href="/about/">About</a></li>
+ <li><a href="/about/press/">Press</a></li>
+ <li><a href="/about/legal/">Legal and Privacy</a></li>
+ </ul>
+ <ul class="footer-right">
+ <li>MegaPixels &copy;2017-19 &nbsp;<a href="https://ahprojects.com">Adam R. Harvey</a></li>
+ <li>Made with support from &nbsp;<a href="https://mozilla.org">Mozilla</a></li>
+ </ul>
</footer>
</body>
diff --git a/site/public/datasets/index.html b/site/public/datasets/index.html
index f4776f6a..5c8e2546 100644
--- a/site/public/datasets/index.html
+++ b/site/public/datasets/index.html
@@ -28,7 +28,7 @@
<section><h1>Facial Recognition Datasets</h1>
-<p>Explore publicly available facial recognition datasets. More datasets will be added throughout 2019.</p>
+<p>Explore publicly available facial recognition datasets feeding into research and development of biometric surveillance technologies at the largest technology companies and defense contractors in the world.</p>
</section>
<section class='applet_container autosize'><div class='applet' data-payload='{"command":"dataset_list"}'></div></section>
@@ -51,43 +51,31 @@
<a href="/datasets/duke_mtmc/" style="background-image: url(https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/duke_mtmc/assets/index.jpg)">
<div class="dataset">
- <span class='title'>Duke Multi-Target, Multi-Camera Tracking</span>
+ <span class='title'>Duke MTMC</span>
<div class='fields'>
<div class='year visible'><span>2016</span></div>
- <div class='purpose'><span>Person re-identification and multi-camera tracking</span></div>
+ <div class='purpose'><span>Person re-identification, multi-camera tracking</span></div>
<div class='images'><span>2,000,000 images</span></div>
<div class='identities'><span>1,812 </span></div>
</div>
</div>
</a>
- <a href="/datasets/lfw/" style="background-image: url(https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/lfw/assets/index.jpg)">
+ <a href="/datasets/hrt_transgender/" style="background-image: url(https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/hrt_transgender/assets/index.jpg)">
<div class="dataset">
- <span class='title'>Labeled Faces in The Wild</span>
+ <span class='title'>HRT Transgender Dataset</span>
<div class='fields'>
- <div class='year visible'><span>2007</span></div>
- <div class='purpose'><span>face recognition</span></div>
- <div class='images'><span>13,233 images</span></div>
- <div class='identities'><span>5,749 </span></div>
- </div>
- </div>
- </a>
-
- <a href="/datasets/market_1501/" style="background-image: url(https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/market_1501/assets/index.jpg)">
- <div class="dataset">
- <span class='title'>Market-1501</span>
- <div class='fields'>
- <div class='year visible'><span>2015</span></div>
- <div class='purpose'><span>Person re-identification</span></div>
- <div class='images'><span>32,668 images</span></div>
- <div class='identities'><span>1,501 </span></div>
+ <div class='year visible'><span>2013</span></div>
+ <div class='purpose'><span>gender transition and facial recognition</span></div>
+ <div class='images'><span>10,564 images</span></div>
+ <div class='identities'><span>38 </span></div>
</div>
</div>
</a>
<a href="/datasets/msceleb/" style="background-image: url(https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/msceleb/assets/index.jpg)">
<div class="dataset">
- <span class='title'>MS Celeb</span>
+ <span class='title'>Microsoft Celeb</span>
<div class='fields'>
<div class='year visible'><span>2016</span></div>
<div class='purpose'><span>Large-scale face recognition</span></div>
@@ -97,23 +85,23 @@
</div>
</a>
- <a href="/datasets/pipa/" style="background-image: url(https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/pipa/assets/index.jpg)">
+ <a href="/datasets/oxford_town_centre/" style="background-image: url(https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/oxford_town_centre/assets/index.jpg)">
<div class="dataset">
- <span class='title'>People in Photo Albums</span>
+ <span class='title'>Oxford Town Centre</span>
<div class='fields'>
- <div class='year visible'><span>2015</span></div>
- <div class='purpose'><span>Face recognition</span></div>
- <div class='images'><span>37,107 images</span></div>
- <div class='identities'><span>2,356 </span></div>
+ <div class='year visible'><span>2011</span></div>
+ <div class='purpose'><span>Person detection, gaze estimation</span></div>
+ <div class='images'><span> images</span></div>
+ <div class='identities'><span></span></div>
</div>
</div>
</a>
<a href="/datasets/uccs/" style="background-image: url(https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/uccs/assets/index.jpg)">
<div class="dataset">
- <span class='title'>Unconstrained College Students</span>
+ <span class='title'>UnConstrained College Students</span>
<div class='fields'>
- <div class='year visible'><span>2018</span></div>
+ <div class='year visible'><span>2016</span></div>
<div class='purpose'><span>Face recognition, face detection</span></div>
<div class='images'><span>16,149 images</span></div>
<div class='identities'><span>1,732 </span></div>
@@ -121,36 +109,23 @@
</div>
</a>
- <a href="/datasets/viper/" style="background-image: url(https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/viper/assets/index.jpg)">
- <div class="dataset">
- <span class='title'>VIPeR</span>
- <div class='fields'>
- <div class='year visible'><span>2007</span></div>
- <div class='purpose'><span>Person re-identification</span></div>
- <div class='images'><span>1,264 images</span></div>
- <div class='identities'><span>632 </span></div>
- </div>
- </div>
- </a>
-
</div>
</section>
</div>
<footer>
- <div>
- <a href="/">MegaPixels.cc</a>
- <a href="/about/disclaimer/">Disclaimer</a>
- <a href="/about/terms/">Terms of Use</a>
- <a href="/about/privacy/">Privacy</a>
- <a href="/about/">About</a>
- <a href="/about/team/">Team</a>
- </div>
- <div>
- MegaPixels &copy;2017-19 Adam R. Harvey /&nbsp;
- <a href="https://ahprojects.com">ahprojects.com</a>
- </div>
+ <ul class="footer-left">
+ <li><a href="/">MegaPixels.cc</a></li>
+ <li><a href="/datasets/">Datasets</a></li>
+ <li><a href="/about/">About</a></li>
+ <li><a href="/about/press/">Press</a></li>
+ <li><a href="/about/legal/">Legal and Privacy</a></li>
+ </ul>
+ <ul class="footer-right">
+ <li>MegaPixels &copy;2017-19 &nbsp;<a href="https://ahprojects.com">Adam R. Harvey</a></li>
+ <li>Made with support from &nbsp;<a href="https://mozilla.org">Mozilla</a></li>
+ </ul>
</footer>
</body>
diff --git a/site/public/datasets/lfpw/index.html b/site/public/datasets/lfpw/index.html
index 77189ce7..a9eb025d 100644
--- a/site/public/datasets/lfpw/index.html
+++ b/site/public/datasets/lfpw/index.html
@@ -26,8 +26,68 @@
</header>
<div class="content content-">
- <section><h1>Labeled Face Parts in The Wild</h1>
-</section><section><div class='meta'><div><div class='gray'>Year</div><div>2011</div></div><div><div class='gray'>Images</div><div>1,432</div></div><div><div class='gray'>Origin</div><div>Flickr</div></div><div><div class='gray'>Funding</div><div>CIA</div></div></div><section><section class='images'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/lfpw/assets/background.jpg' alt=''></div></section><section><p>RESEARCH below this line</p>
+ <section><h2>Labeled Face Parts in The Wild</h2>
+</section><section><div class='right-sidebar'><div class='meta'>
+ <div class='gray'>Published</div>
+ <div>2011</div>
+ </div><div class='meta'>
+ <div class='gray'>Funded by</div>
+ <div>CIA</div>
+ </div><div class='meta'>
+ <div class='gray'>Website</div>
+ <div><a href='http://neerajkumar.org/databases/lfpw/' target='_blank' rel='nofollow noopener'>neerajkumar.org</a></div>
+ </div></div></section><section>
+ <h3>Who used LFWP?</h3>
+
+ <p>
+ This bar chart presents a ranking of the top countries where dataset citations originated. Mouse over individual columns to see yearly totals. These charts show at most the top 10 countries.
+ </p>
+
+ </section>
+
+<section class="applet_container">
+<!-- <div style="position: absolute;top: 0px;right: -55px;width: 180px;font-size: 14px;">Labeled Faces in the Wild Dataset<br><span class="numc" style="font-size: 11px;">20 citations</span>
+</div> -->
+ <div class="applet" data-payload="{&quot;command&quot;: &quot;chart&quot;}"></div>
+</section>
+
+<section class="applet_container">
+ <div class="applet" data-payload="{&quot;command&quot;: &quot;piechart&quot;}"></div>
+</section>
+
+<section>
+
+ <h3>Biometric Trade Routes</h3>
+
+ <p>
+ To help understand how LFWP has been used around the world by commercial, military, and academic organizations; existing publicly available research citing Labeled Face Parts in the Wild was collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal research projects at that location.
+ </p>
+
+ </section>
+
+<section class="applet_container fullwidth">
+ <div class="applet" data-payload="{&quot;command&quot;: &quot;map&quot;}"></div>
+</section>
+
+<div class="caption">
+ <ul class="map-legend">
+ <li class="edu">Academic</li>
+ <li class="com">Commercial</li>
+ <li class="gov">Military / Government</li>
+ </ul>
+ <div class="source">Citation data is collected using <a href="https://semanticscholar.org" target="_blank">SemanticScholar.org</a> then dataset usage verified and geolocated.</div >
+</div>
+
+
+<section class="applet_container">
+
+ <h3>Dataset Citations</h3>
+ <p>
+ The dataset citations used in the visualizations were collected from <a href="https://www.semanticscholar.org">Semantic Scholar</a>, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms.
+ </p>
+
+ <div class="applet" data-payload="{&quot;command&quot;: &quot;citations&quot;}"></div>
+</section><section><p>RESEARCH below this line</p>
<blockquote><p>Release 1 of LFPW consists of 1,432 faces from images downloaded from the web using simple text queries on sites such as google.com, flickr.com, and yahoo.com. Each image was labeled by three MTurk workers, and 29 fiducial points, shown below, are included in dataset. LFPW was originally described in the following publication:</p>
<p>Due to copyright issues, we cannot distribute image files in any format to anyone. Instead, we have made available a list of image URLs where you can download the images yourself. We realize that this makes it impossible to exactly compare numbers, as image links will slowly disappear over time, but we have no other option. This seems to be the way other large web-based databases seem to be evolving.</p>
</blockquote>
@@ -38,18 +98,17 @@
</div>
<footer>
- <div>
- <a href="/">MegaPixels.cc</a>
- <a href="/about/disclaimer/">Disclaimer</a>
- <a href="/about/terms/">Terms of Use</a>
- <a href="/about/privacy/">Privacy</a>
- <a href="/about/">About</a>
- <a href="/about/team/">Team</a>
- </div>
- <div>
- MegaPixels &copy;2017-19 Adam R. Harvey /&nbsp;
- <a href="https://ahprojects.com">ahprojects.com</a>
- </div>
+ <ul class="footer-left">
+ <li><a href="/">MegaPixels.cc</a></li>
+ <li><a href="/datasets/">Datasets</a></li>
+ <li><a href="/about/">About</a></li>
+ <li><a href="/about/press/">Press</a></li>
+ <li><a href="/about/legal/">Legal and Privacy</a></li>
+ </ul>
+ <ul class="footer-right">
+ <li>MegaPixels &copy;2017-19 &nbsp;<a href="https://ahprojects.com">Adam R. Harvey</a></li>
+ <li>Made with support from &nbsp;<a href="https://mozilla.org">Mozilla</a></li>
+ </ul>
</footer>
</body>
diff --git a/site/public/datasets/lfw/index.html b/site/public/datasets/lfw/index.html
index d451d0cd..ff7a3cd9 100644
--- a/site/public/datasets/lfw/index.html
+++ b/site/public/datasets/lfw/index.html
@@ -27,7 +27,8 @@
<div class="content content-">
<section class='intro_section' style='background-image: url(https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/lfw/assets/background.jpg)'><div class='inner'><div class='hero_desc'><span class='bgpad'><span class="dataset-name">Labeled Faces in The Wild (LFW)</span> is the first facial recognition dataset created entirely from online photos</span></div><div class='hero_subdesc'><span class='bgpad'>It includes 13,456 images of 4,432 people's images copied from the Internet during 2002-2004 and is the most frequently used dataset in the world for benchmarking face recognition algorithms.
-</span></div></div></section><section><div class='left-sidebar'><div class='meta'>
+</span></div></div></section><section><h2>Labeled Faces in the Wild</h2>
+</section><section><div class='right-sidebar'><div class='meta'>
<div class='gray'>Published</div>
<div>2007</div>
</div><div class='meta'>
@@ -42,45 +43,44 @@
</div><div class='meta'>
<div class='gray'>Website</div>
<div><a href='http://vis-www.cs.umass.edu/lfw/' target='_blank' rel='nofollow noopener'>umass.edu</a></div>
- </div><div class='meta'><div><div class='gray'>Created</div><div>2002 &ndash; 2004</div></div><div><div class='gray'>Images</div><div>13,233</div></div><div><div class='gray'>Identities</div><div>5,749</div></div><div><div class='gray'>Origin</div><div>Yahoo! News Images</div></div><div><div class='gray'>Used by</div><div>Facebook, Google, Microsoft, Baidu, Tencent, SenseTime, Face++, CIA, NSA, IARPA</div></div><div><div class='gray'>Website</div><div><a href="http://vis-www.cs.umass.edu/lfw">umass.edu</a></div></div></div><ul>
-<li>There are about 3 men for every 1 woman in the LFW dataset<a class="footnote_shim" name="[^lfw_www]_1"> </a><a href="#[^lfw_www]" class="footnote" title="Footnote 1">1</a></li>
-<li>The person with the most images is <a href="http://vis-www.cs.umass.edu/lfw/person/George_W_Bush_comp.html">George W. Bush</a> with 530</li>
-<li>There are about 3 George W. Bush's for every 1 <a href="http://vis-www.cs.umass.edu/lfw/person/Tony_Blair.html">Tony Blair</a></li>
-<li>The LFW dataset includes over 500 actors, 30 models, 10 presidents, 124 basketball players, 24 football players, 11 kings, 7 queens, and 1 <a href="http://vis-www.cs.umass.edu/lfw/person/Moby.html">Moby</a></li>
-<li>In all 3 of the LFW publications [^lfw_original_paper], [^lfw_survey], [^lfw_tech_report] the words "ethics", "consent", and "privacy" appear 0 times</li>
-<li>The word "future" appears 71 times</li>
-<li>* denotes partial funding for related research</li>
-</ul>
-</div><h2>Labeled Faces in the Wild</h2>
-<p>(PAGE UNDER DEVELOPMENT)</p>
-<p><em>Labeled Faces in The Wild</em> (LFW) is "a database of face photographs designed for studying the problem of unconstrained face recognition<a class="footnote_shim" name="[^lfw_www]_2"> </a><a href="#[^lfw_www]" class="footnote" title="Footnote 1">1</a>. It is used to evaluate and improve the performance of facial recognition algorithms in academic, commercial, and government research. According to BiometricUpdate.com<a class="footnote_shim" name="[^lfw_pingan]_1"> </a><a href="#[^lfw_pingan]" class="footnote" title="Footnote 3">3</a>, LFW is "the most widely used evaluation set in the field of facial recognition, LFW attracts a few dozen teams from around the globe including Google, Facebook, Microsoft Research Asia, Baidu, Tencent, SenseTime, Face++ and Chinese University of Hong Kong."</p>
+ </div></div><p>[ PAGE UNDER DEVELOPMENT ]</p>
+<p><em>Labeled Faces in The Wild</em> (LFW) is "a database of face photographs designed for studying the problem of unconstrained face recognition<a class="footnote_shim" name="[^lfw_www]_1"> </a><a href="#[^lfw_www]" class="footnote" title="Footnote 1">1</a>. It is used to evaluate and improve the performance of facial recognition algorithms in academic, commercial, and government research. According to BiometricUpdate.com<a class="footnote_shim" name="[^lfw_pingan]_1"> </a><a href="#[^lfw_pingan]" class="footnote" title="Footnote 3">3</a>, LFW is "the most widely used evaluation set in the field of facial recognition, LFW attracts a few dozen teams from around the globe including Google, Facebook, Microsoft Research Asia, Baidu, Tencent, SenseTime, Face++ and Chinese University of Hong Kong."</p>
<p>The LFW dataset includes 13,233 images of 5,749 people that were collected between 2002-2004. LFW is a subset of <em>Names of Faces</em> and is part of the first facial recognition training dataset created entirely from images appearing on the Internet. The people appearing in LFW are...</p>
<p>The <em>Names and Faces</em> dataset was the first face recognition dataset created entire from online photos. However, <em>Names and Faces</em> and <em>LFW</em> are not the first face recognition dataset created entirely "in the wild". That title belongs to the <a href="/datasets/ucd_faces/">UCD dataset</a>. Images obtained "in the wild" means using an image without explicit consent or awareness from the subject or photographer.</p>
<p>The <em>Names and Faces</em> dataset was the first face recognition dataset created entire from online photos. However, <em>Names and Faces</em> and <em>LFW</em> are not the first face recognition dataset created entirely "in the wild". That title belongs to the <a href="/datasets/ucd_faces/">UCD dataset</a>. Images obtained "in the wild" means using an image without explicit consent or awareness from the subject or photographer.</p>
</section><section class='images'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/lfw/assets/lfw_montage_all_crop.jpg' alt='All 5,379 people in the Labeled Faces in The Wild Dataset. Showing one face per person'><div class='caption'>All 5,379 people in the Labeled Faces in The Wild Dataset. Showing one face per person</div></div></section><section><p>The <em>Names and Faces</em> dataset was the first face recognition dataset created entire from online photos. However, <em>Names and Faces</em> and <em>LFW</em> are not the first face recognition dataset created entirely "in the wild". That title belongs to the <a href="/datasets/ucd_faces/">UCD dataset</a>. Images obtained "in the wild" means using an image without explicit consent or awareness from the subject or photographer.</p>
<p>The <em>Names and Faces</em> dataset was the first face recognition dataset created entire from online photos. However, <em>Names and Faces</em> and <em>LFW</em> are not the first face recognition dataset created entirely "in the wild". That title belongs to the <a href="/datasets/ucd_faces/">UCD dataset</a>. Images obtained "in the wild" means using an image without explicit consent or awareness from the subject or photographer.</p>
</section><section>
+ <h3>Who used LFW?</h3>
+
+ <p>
+ This bar chart presents a ranking of the top countries where dataset citations originated. Mouse over individual columns to see yearly totals. These charts show at most the top 10 countries.
+ </p>
+
+ </section>
+
+<section class="applet_container">
+<!-- <div style="position: absolute;top: 0px;right: -55px;width: 180px;font-size: 14px;">Labeled Faces in the Wild Dataset<br><span class="numc" style="font-size: 11px;">20 citations</span>
+</div> -->
+ <div class="applet" data-payload="{&quot;command&quot;: &quot;chart&quot;}"></div>
+</section>
+
+<section class="applet_container">
+ <div class="applet" data-payload="{&quot;command&quot;: &quot;piechart&quot;}"></div>
+</section>
+
+<section>
<h3>Biometric Trade Routes</h3>
-<!--
- <div class="map-sidebar right-sidebar">
- <h3>Legend</h3>
- <ul>
- <li><span style="color: #f2f293">&#9632;</span> Industry</li>
- <li><span style="color: #f30000">&#9632;</span> Academic</li>
- <li><span style="color: #3264f6">&#9632;</span> Government</li>
- </ul>
- </div>
- -->
+
<p>
- To help understand how LFW has been used around the world for commercial, military and academic research; publicly available research citing Labeled Faces in the Wild is collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal reserach projects at that location.
+ To help understand how LFW has been used around the world by commercial, military, and academic organizations; existing publicly available research citing Labeled Faces in the Wild was collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal research projects at that location.
</p>
</section>
<section class="applet_container fullwidth">
<div class="applet" data-payload="{&quot;command&quot;: &quot;map&quot;}"></div>
-
</section>
<div class="caption">
@@ -88,30 +88,19 @@
<li class="edu">Academic</li>
<li class="com">Commercial</li>
<li class="gov">Military / Government</li>
- <li class="source">Citation data is collected using <a href="https://semanticscholar.org" target="_blank">SemanticScholar.org</a> then dataset usage verified and geolocated.</li>
</ul>
+ <div class="source">Citation data is collected using <a href="https://semanticscholar.org" target="_blank">SemanticScholar.org</a> then dataset usage verified and geolocated.</div >
</div>
-<!-- <section>
- <p class='subp'>
- [section under development] LFW ... Standardized paragraph of text about the map. Sed ut perspiciatis, unde omnis iste natus error sit voluptatem accusantium doloremque laudantium, totam rem aperiam eaque ipsa, quae ab illo inventore veritatis et quasi architecto beatae vitae dicta sunt, explicabo.
- </p>
-</section>
- --><section>
- <h3>Who used LFW?</h3>
+<section class="applet_container">
+
+ <h3>Dataset Citations</h3>
<p>
- This bar chart presents a ranking of the top countries where dataset citations originated. Mouse over individual columns to see yearly totals. These charts show at most the top 10 countries.
+ The dataset citations used in the visualizations were collected from <a href="https://www.semanticscholar.org">Semantic Scholar</a>, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms.
</p>
-
- </section>
-<section class="applet_container">
-<!-- <div style="position: absolute;top: 0px;right: -55px;width: 180px;font-size: 14px;">Labeled Faces in the Wild Dataset<br><span class="numc" style="font-size: 11px;">20 citations</span>
-</div> -->
- <div class="applet" data-payload="{&quot;command&quot;: &quot;chart&quot;}"></div>
-</section><section class="applet_container">
- <div class="applet" data-payload="{&quot;command&quot;: &quot;piechart&quot;}"></div>
+ <div class="applet" data-payload="{&quot;command&quot;: &quot;citations&quot;}"></div>
</section><section>
<div class="hr-wave-holder">
@@ -119,38 +108,57 @@
<div class="hr-wave-line hr-wave-line2"></div>
</div>
- <h3>Supplementary Information</h3>
+ <h2>Supplementary Information</h2>
-</section><section class="applet_container">
-
- <h3>Dataset Citations</h3>
- <p>
- The dataset citations used in the visualizations were collected from <a href="https://www.semanticscholar.org">Semantic Scholar</a>, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms.
- </p>
-
- <div class="applet" data-payload="{&quot;command&quot;: &quot;citations&quot;}"></div>
</section><section><h3>Commercial Use</h3>
<p>Add a paragraph about how usage extends far beyond academia into research centers for largest companies in the world. And even funnels into CIA funded research in the US and defense industry usage in China.</p>
-</section><section class='applet_container'><div class='applet' data-payload='{"command": "load_file assets/lfw_commercial_use.csv", "fields": ["name_display, company_url, example_url, country, description"]}'></div></section><section><p>Research, text, and graphics ©Adam Harvey / megapixels.cc</p>
-</section><section><ul class="footnotes"><li><a name="[^lfw_www]" class="footnote_shim"></a><span class="backlinks"><a href="#[^lfw_www]_1">a</a><a href="#[^lfw_www]_2">b</a></span><p><a href="http://vis-www.cs.umass.edu/lfw/results.html">http://vis-www.cs.umass.edu/lfw/results.html</a></p>
-</li><li><a name="[^lfw_baidu]" class="footnote_shim"></a><span class="backlinks"></span><p>Jingtuo Liu, Yafeng Deng, Tao Bai, Zhengping Wei, Chang Huang. Targeting Ultimate Accuracy: Face Recognition via Deep Embedding. <a href="https://arxiv.org/abs/1506.07310">https://arxiv.org/abs/1506.07310</a></p>
-</li><li><a name="[^lfw_pingan]" class="footnote_shim"></a><span class="backlinks"><a href="#[^lfw_pingan]_1">a</a></span><p>Lee, Justin. "PING AN Tech facial recognition receives high score in latest LFW test results". BiometricUpdate.com. Feb 13, 2017. <a href="https://www.biometricupdate.com/201702/ping-an-tech-facial-recognition-receives-high-score-in-latest-lfw-test-results">https://www.biometricupdate.com/201702/ping-an-tech-facial-recognition-receives-high-score-in-latest-lfw-test-results</a></p>
-</li></ul></section>
+</section><section class='applet_container'><div class='applet' data-payload='{"command": "load_file assets/lfw_commercial_use.csv", "fields": ["name_display, company_url, example_url, country, description"]}'></div></section><section><h3>Research</h3>
+<ul>
+<li>"In our experiments, we used 10000 images and associated captions from the Faces in the wilddata set [3]."</li>
+<li>"This work was supported in part by the Center for Intelligent Information Retrieval, the Central Intelligence Agency, the National Security Agency and National Science Foundation under CAREER award IIS-0546666 and grant IIS-0326249."</li>
+<li>From: "People-LDA: Anchoring Topics to People using Face Recognition" <a href="https://www.semanticscholar.org/paper/People-LDA%3A-Anchoring-Topics-to-People-using-Face-Jain-Learned-Miller/10f17534dba06af1ddab96c4188a9c98a020a459">https://www.semanticscholar.org/paper/People-LDA%3A-Anchoring-Topics-to-People-using-Face-Jain-Learned-Miller/10f17534dba06af1ddab96c4188a9c98a020a459</a> and <a href="https://ieeexplore.ieee.org/document/4409055">https://ieeexplore.ieee.org/document/4409055</a></li>
+<li>This paper was presented at IEEE 11th ICCV conference Oct 14-21 and the main LFW paper "Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments" was also published that same year</li>
+<li>10f17534dba06af1ddab96c4188a9c98a020a459</li>
+<li>This research is based upon work supported in part by the Office of the Director of National Intelligence (ODNI), Intelligence Advanced Research Projects Activity (IARPA), via contract number 2014-14071600010.</li>
+<li>From "Labeled Faces in the Wild: Updates and New Reporting Procedures"</li>
+<li>70% of people in the dataset have only 1 image and 29% have 2 or more images</li>
+<li>The LFW dataset is considered the "most popular benchmark for face recognition" <a class="footnote_shim" name="[^lfw_baidu]_1"> </a><a href="#[^lfw_baidu]" class="footnote" title="Footnote 2">2</a></li>
+<li>The LFW dataset is "the most widely used evaluation set in the field of facial recognition" <a class="footnote_shim" name="[^lfw_pingan]_2"> </a><a href="#[^lfw_pingan]" class="footnote" title="Footnote 3">3</a></li>
+<li>All images in LFW dataset were obtained "in the wild" meaning without any consent from the subject or from the photographer</li>
+<li>The faces in the LFW dataset were detected using the Viola-Jones haarcascade face detector [^lfw_website] [^lfw-survey]</li>
+<li>The LFW dataset is used by several of the largest tech companies in the world including "Google, Facebook, Microsoft Research Asia, Baidu, Tencent, SenseTime, Face++ and Chinese University of Hong Kong." <a class="footnote_shim" name="[^lfw_pingan]_3"> </a><a href="#[^lfw_pingan]" class="footnote" title="Footnote 3">3</a></li>
+<li>All images in the LFW dataset were copied from Yahoo News between 2002 - 2004</li>
+<li>In 2014, two of the four original authors of the LFW dataset received funding from IARPA and ODNI for their followup paper <a href="https://www.semanticscholar.org/paper/Labeled-Faces-in-the-Wild-%3A-Updates-and-New-Huang-Learned-Miller/2d3482dcff69c7417c7b933f22de606a0e8e42d4">Labeled Faces in the Wild: Updates and New Reporting Procedures</a> via IARPA contract number 2014-14071600010</li>
+<li>The dataset includes 2 images of <a href="http://vis-www.cs.umass.edu/lfw/person/George_Tenet.html">George Tenet</a>, the former Director of Central Intelligence (DCI) for the Central Intelligence Agency whose facial biometrics were eventually used to help train facial recognition software in China and Russia</li>
+<li>./15/155205b8e288fd49bf203135871d66de879c8c04/paper.txt shows usage by DSTO Australia, supported parimal@iisc.ac.in</li>
+</ul>
+</section><section><div class='meta'><div><div class='gray'>Created</div><div>2002 &ndash; 2004</div></div><div><div class='gray'>Images</div><div>13,233</div></div><div><div class='gray'>Identities</div><div>5,749</div></div><div><div class='gray'>Origin</div><div>Yahoo! News Images</div></div><div><div class='gray'>Used by</div><div>Facebook, Google, Microsoft, Baidu, Tencent, SenseTime, Face++, CIA, NSA, IARPA</div></div><div><div class='gray'>Website</div><div><a href="http://vis-www.cs.umass.edu/lfw">umass.edu</a></div></div></div><section><section><ul>
+<li>There are about 3 men for every 1 woman in the LFW dataset<a class="footnote_shim" name="[^lfw_www]_2"> </a><a href="#[^lfw_www]" class="footnote" title="Footnote 1">1</a></li>
+<li>The person with the most images is <a href="http://vis-www.cs.umass.edu/lfw/person/George_W_Bush_comp.html">George W. Bush</a> with 530</li>
+<li>There are about 3 George W. Bush's for every 1 <a href="http://vis-www.cs.umass.edu/lfw/person/Tony_Blair.html">Tony Blair</a></li>
+<li>The LFW dataset includes over 500 actors, 30 models, 10 presidents, 124 basketball players, 24 football players, 11 kings, 7 queens, and 1 <a href="http://vis-www.cs.umass.edu/lfw/person/Moby.html">Moby</a></li>
+<li>In all 3 of the LFW publications [^lfw_original_paper], [^lfw_survey], [^lfw_tech_report] the words "ethics", "consent", and "privacy" appear 0 times</li>
+<li>The word "future" appears 71 times</li>
+<li>* denotes partial funding for related research</li>
+</ul>
+</section><section><h3>References</h3><section><ul class="footnotes"><li><a name="[^lfw_www]" class="footnote_shim"></a><span class="backlinks"><a href="#[^lfw_www]_1">a</a><a href="#[^lfw_www]_2">b</a></span><p><a href="http://vis-www.cs.umass.edu/lfw/results.html">http://vis-www.cs.umass.edu/lfw/results.html</a></p>
+</li><li><a name="[^lfw_baidu]" class="footnote_shim"></a><span class="backlinks"><a href="#[^lfw_baidu]_1">a</a></span><p>Jingtuo Liu, Yafeng Deng, Tao Bai, Zhengping Wei, Chang Huang. Targeting Ultimate Accuracy: Face Recognition via Deep Embedding. <a href="https://arxiv.org/abs/1506.07310">https://arxiv.org/abs/1506.07310</a></p>
+</li><li><a name="[^lfw_pingan]" class="footnote_shim"></a><span class="backlinks"><a href="#[^lfw_pingan]_1">a</a><a href="#[^lfw_pingan]_2">b</a><a href="#[^lfw_pingan]_3">c</a></span><p>Lee, Justin. "PING AN Tech facial recognition receives high score in latest LFW test results". BiometricUpdate.com. Feb 13, 2017. <a href="https://www.biometricupdate.com/201702/ping-an-tech-facial-recognition-receives-high-score-in-latest-lfw-test-results">https://www.biometricupdate.com/201702/ping-an-tech-facial-recognition-receives-high-score-in-latest-lfw-test-results</a></p>
+</li></ul></section></section>
</div>
<footer>
- <div>
- <a href="/">MegaPixels.cc</a>
- <a href="/about/disclaimer/">Disclaimer</a>
- <a href="/about/terms/">Terms of Use</a>
- <a href="/about/privacy/">Privacy</a>
- <a href="/about/">About</a>
- <a href="/about/team/">Team</a>
- </div>
- <div>
- MegaPixels &copy;2017-19 Adam R. Harvey /&nbsp;
- <a href="https://ahprojects.com">ahprojects.com</a>
- </div>
+ <ul class="footer-left">
+ <li><a href="/">MegaPixels.cc</a></li>
+ <li><a href="/datasets/">Datasets</a></li>
+ <li><a href="/about/">About</a></li>
+ <li><a href="/about/press/">Press</a></li>
+ <li><a href="/about/legal/">Legal and Privacy</a></li>
+ </ul>
+ <ul class="footer-right">
+ <li>MegaPixels &copy;2017-19 &nbsp;<a href="https://ahprojects.com">Adam R. Harvey</a></li>
+ <li>Made with support from &nbsp;<a href="https://mozilla.org">Mozilla</a></li>
+ </ul>
</footer>
</body>
diff --git a/site/public/datasets/market_1501/index.html b/site/public/datasets/market_1501/index.html
index 1ffd7e6c..05750dc7 100644
--- a/site/public/datasets/market_1501/index.html
+++ b/site/public/datasets/market_1501/index.html
@@ -4,7 +4,7 @@
<title>MegaPixels</title>
<meta charset="utf-8" />
<meta name="author" content="Adam Harvey" />
- <meta name="description" content="Market-1501 is a dataset is collection of CCTV footage from ..." />
+ <meta name="description" content="Market-1501 is a dataset is collection of CCTV footage from Tsinghua University" />
<meta name="referrer" content="no-referrer" />
<meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes" />
<link rel='stylesheet' href='/assets/css/fonts.css' />
@@ -26,8 +26,9 @@
</header>
<div class="content content-dataset">
- <section class='intro_section' style='background-image: url(https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/market_1501/assets/background.jpg)'><div class='inner'><div class='hero_desc'><span class='bgpad'><span class="dataset-name">Market-1501</span> is a dataset is collection of CCTV footage from ...</span></div><div class='hero_subdesc'><span class='bgpad'>The Market-1501 dataset includes ...
-</span></div></div></section><section><div class='left-sidebar'><div class='meta'>
+ <section class='intro_section' style='background-image: url(https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/market_1501/assets/background.jpg)'><div class='inner'><div class='hero_desc'><span class='bgpad'><span class="dataset-name">Market-1501</span> is a dataset is collection of CCTV footage from Tsinghua University</span></div><div class='hero_subdesc'><span class='bgpad'>The Market-1501 dataset includes 1,261 people from 5 HD surveillance cameras located on campus
+</span></div></div></section><section><h2>Market-1501 Dataset</h2>
+</section><section><div class='right-sidebar'><div class='meta'>
<div class='gray'>Published</div>
<div>2015</div>
</div><div class='meta'>
@@ -42,30 +43,38 @@
</div><div class='meta'>
<div class='gray'>Website</div>
<div><a href='http://www.liangzheng.org/Project/project_reid.html' target='_blank' rel='nofollow noopener'>liangzheng.org</a></div>
- </div><div class='meta'><div><div class='gray'>Collected</div><div>TBD</div></div><div><div class='gray'>Published</div><div>TBD</div></div><div><div class='gray'>Images</div><div>TBD</div></div><div><div class='gray'>Faces</div><div>TBD</div></div></div></div><h2>Market-1501 ...</h2>
-<p>(PAGE UNDER DEVELOPMENT)</p>
+ </div></div><p>[ PAGE UNDER DEVELOPMENT]</p>
</section><section>
+ <h3>Who used Market 1501?</h3>
+
+ <p>
+ This bar chart presents a ranking of the top countries where dataset citations originated. Mouse over individual columns to see yearly totals. These charts show at most the top 10 countries.
+ </p>
+
+ </section>
+
+<section class="applet_container">
+<!-- <div style="position: absolute;top: 0px;right: -55px;width: 180px;font-size: 14px;">Labeled Faces in the Wild Dataset<br><span class="numc" style="font-size: 11px;">20 citations</span>
+</div> -->
+ <div class="applet" data-payload="{&quot;command&quot;: &quot;chart&quot;}"></div>
+</section>
+
+<section class="applet_container">
+ <div class="applet" data-payload="{&quot;command&quot;: &quot;piechart&quot;}"></div>
+</section>
+
+<section>
<h3>Biometric Trade Routes</h3>
-<!--
- <div class="map-sidebar right-sidebar">
- <h3>Legend</h3>
- <ul>
- <li><span style="color: #f2f293">&#9632;</span> Industry</li>
- <li><span style="color: #f30000">&#9632;</span> Academic</li>
- <li><span style="color: #3264f6">&#9632;</span> Government</li>
- </ul>
- </div>
- -->
+
<p>
- To help understand how Market 1501 has been used around the world for commercial, military and academic research; publicly available research citing Market 1501 Dataset is collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal reserach projects at that location.
+ To help understand how Market 1501 has been used around the world by commercial, military, and academic organizations; existing publicly available research citing Market 1501 Dataset was collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal research projects at that location.
</p>
</section>
<section class="applet_container fullwidth">
<div class="applet" data-payload="{&quot;command&quot;: &quot;map&quot;}"></div>
-
</section>
<div class="caption">
@@ -73,25 +82,12 @@
<li class="edu">Academic</li>
<li class="com">Commercial</li>
<li class="gov">Military / Government</li>
- <li class="source">Citation data is collected using <a href="https://semanticscholar.org" target="_blank">SemanticScholar.org</a> then dataset usage verified and geolocated.</li>
</ul>
+ <div class="source">Citation data is collected using <a href="https://semanticscholar.org" target="_blank">SemanticScholar.org</a> then dataset usage verified and geolocated.</div >
</div>
-<!-- <section>
- <p class='subp'>
- [section under development] Market 1501 ... Standardized paragraph of text about the map. Sed ut perspiciatis, unde omnis iste natus error sit voluptatem accusantium doloremque laudantium, totam rem aperiam eaque ipsa, quae ab illo inventore veritatis et quasi architecto beatae vitae dicta sunt, explicabo.
- </p>
-</section>
- --><section>
- <div class="hr-wave-holder">
- <div class="hr-wave-line hr-wave-line1"></div>
- <div class="hr-wave-line hr-wave-line2"></div>
- </div>
-
- <h3>Supplementary Information</h3>
-
-</section><section class="applet_container">
+<section class="applet_container">
<h3>Dataset Citations</h3>
<p>
@@ -99,7 +95,7 @@
</p>
<div class="applet" data-payload="{&quot;command&quot;: &quot;citations&quot;}"></div>
-</section><section><h2>Research Notes</h2>
+</section><section><h2>(ignore) research Notes</h2>
<ul>
<li>"MARS is an extension of the Market-1501 dataset. During collection, we placed six near synchronized cameras in the campus of Tsinghua university. There were Five 1,080<em>1920 HD cameras and one 640</em>480 SD camera. MARS consists of 1,261 different pedestrians whom are captured by at least 2 cameras. Given a query tracklet, MARS aims to retrieve tracklets that contain the same ID." - main paper</li>
<li>bbox "0065C1T0002F0016.jpg", "0065" is the ID of the pedestrian. "C1" denotes the first
@@ -118,18 +114,17 @@ organization={Springer}
</div>
<footer>
- <div>
- <a href="/">MegaPixels.cc</a>
- <a href="/about/disclaimer/">Disclaimer</a>
- <a href="/about/terms/">Terms of Use</a>
- <a href="/about/privacy/">Privacy</a>
- <a href="/about/">About</a>
- <a href="/about/team/">Team</a>
- </div>
- <div>
- MegaPixels &copy;2017-19 Adam R. Harvey /&nbsp;
- <a href="https://ahprojects.com">ahprojects.com</a>
- </div>
+ <ul class="footer-left">
+ <li><a href="/">MegaPixels.cc</a></li>
+ <li><a href="/datasets/">Datasets</a></li>
+ <li><a href="/about/">About</a></li>
+ <li><a href="/about/press/">Press</a></li>
+ <li><a href="/about/legal/">Legal and Privacy</a></li>
+ </ul>
+ <ul class="footer-right">
+ <li>MegaPixels &copy;2017-19 &nbsp;<a href="https://ahprojects.com">Adam R. Harvey</a></li>
+ <li>Made with support from &nbsp;<a href="https://mozilla.org">Mozilla</a></li>
+ </ul>
</footer>
</body>
diff --git a/site/public/datasets/msceleb/index.html b/site/public/datasets/msceleb/index.html
index fb08c737..86741647 100644
--- a/site/public/datasets/msceleb/index.html
+++ b/site/public/datasets/msceleb/index.html
@@ -17,7 +17,7 @@
<a class='slogan' href="/">
<div class='logo'></div>
<div class='site_name'>MegaPixels</div>
- <div class='splash'>MsCeleb</div>
+ <div class='splash'>Microsoft Celeb</div>
</a>
<div class='links'>
<a href="/datasets/">Datasets</a>
@@ -27,7 +27,8 @@
<div class="content content-dataset">
<section class='intro_section' style='background-image: url(https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/msceleb/assets/background.jpg)'><div class='inner'><div class='hero_desc'><span class='bgpad'>MS Celeb is a dataset of web images used for training and evaluating face recognition algorithms</span></div><div class='hero_subdesc'><span class='bgpad'>The MS Celeb dataset includes over 10,000,000 images and 93,000 identities of semi-public figures collected using the Bing search engine
-</span></div></div></section><section><div class='left-sidebar'><div class='meta'>
+</span></div></div></section><section><h2>Microsoft Celeb Dataset (MS Celeb)</h2>
+</section><section><div class='right-sidebar'><div class='meta'>
<div class='gray'>Published</div>
<div>2016</div>
</div><div class='meta'>
@@ -48,12 +49,10 @@
</div><div class='meta'>
<div class='gray'>Website</div>
<div><a href='http://www.msceleb.org/' target='_blank' rel='nofollow noopener'>msceleb.org</a></div>
- </div><div class='meta'><div><div class='gray'>Published</div><div>TBD</div></div><div><div class='gray'>Images</div><div>TBD</div></div><div><div class='gray'>Faces</div><div>TBD</div></div><div><div class='gray'>Created by</div><div>TBD</div></div></div></div><h2>Microsoft Celeb Dataset (MS Celeb)</h2>
-<p>(PAGE UNDER DEVELOPMENT)</p>
-<p>At vero eos et accusamus et iusto odio dignissimos ducimus, qui blanditiis praesentium voluptatum deleniti atque corrupti, quos dolores et quas molestias excepturi sint, obcaecati cupiditate non-provident, similique sunt in culpa, qui officia deserunt mollitia animi, id est laborum et dolorum fuga. Et harum quidem rerum facilis est et expedita distinctio.</p>
-<p>Nam libero tempore, cum soluta nobis est eligendi optio, cumque nihil impedit, quo minus id, quod maxime placeat, facere possimus, omnis voluptas assumenda est, omnis dolor repellendus. Temporibus autem quibusdam et aut officiis debitis aut rerum necessitatibus saepe eveniet, ut et voluptates repudiandae sint et molestiae non-recusandae. Itaque earum rerum hic tenetur a sapiente delectus, ut aut reiciendis voluptatibus maiores alias consequatur aut perferendis doloribus asperiores repellat</p>
+ </div></div><p><a href="https://www.hrw.org/news/2019/01/15/letter-microsoft-face-surveillance-technology">https://www.hrw.org/news/2019/01/15/letter-microsoft-face-surveillance-technology</a></p>
+<p><a href="https://www.scmp.com/tech/science-research/article/3005733/what-you-need-know-about-sensenets-facial-recognition-firm">https://www.scmp.com/tech/science-research/article/3005733/what-you-need-know-about-sensenets-facial-recognition-firm</a></p>
</section><section>
- <h3>Who used MsCeleb?</h3>
+ <h3>Who used Microsoft Celeb?</h3>
<p>
This bar chart presents a ranking of the top countries where dataset citations originated. Mouse over individual columns to see yearly totals. These charts show at most the top 10 countries.
@@ -65,30 +64,24 @@
<!-- <div style="position: absolute;top: 0px;right: -55px;width: 180px;font-size: 14px;">Labeled Faces in the Wild Dataset<br><span class="numc" style="font-size: 11px;">20 citations</span>
</div> -->
<div class="applet" data-payload="{&quot;command&quot;: &quot;chart&quot;}"></div>
-</section><section class="applet_container">
+</section>
+
+<section class="applet_container">
<div class="applet" data-payload="{&quot;command&quot;: &quot;piechart&quot;}"></div>
-</section><section>
+</section>
+
+<section>
<h3>Biometric Trade Routes</h3>
-<!--
- <div class="map-sidebar right-sidebar">
- <h3>Legend</h3>
- <ul>
- <li><span style="color: #f2f293">&#9632;</span> Industry</li>
- <li><span style="color: #f30000">&#9632;</span> Academic</li>
- <li><span style="color: #3264f6">&#9632;</span> Government</li>
- </ul>
- </div>
- -->
+
<p>
- To help understand how MsCeleb has been used around the world for commercial, military and academic research; publicly available research citing Microsoft Celebrity Dataset is collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal reserach projects at that location.
+ To help understand how Microsoft Celeb has been used around the world by commercial, military, and academic organizations; existing publicly available research citing Microsoft Celebrity Dataset was collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal research projects at that location.
</p>
</section>
<section class="applet_container fullwidth">
<div class="applet" data-payload="{&quot;command&quot;: &quot;map&quot;}"></div>
-
</section>
<div class="caption">
@@ -96,26 +89,12 @@
<li class="edu">Academic</li>
<li class="com">Commercial</li>
<li class="gov">Military / Government</li>
- <li class="source">Citation data is collected using <a href="https://semanticscholar.org" target="_blank">SemanticScholar.org</a> then dataset usage verified and geolocated.</li>
</ul>
+ <div class="source">Citation data is collected using <a href="https://semanticscholar.org" target="_blank">SemanticScholar.org</a> then dataset usage verified and geolocated.</div >
</div>
-<!-- <section>
- <p class='subp'>
- [section under development] MsCeleb ... Standardized paragraph of text about the map. Sed ut perspiciatis, unde omnis iste natus error sit voluptatem accusantium doloremque laudantium, totam rem aperiam eaque ipsa, quae ab illo inventore veritatis et quasi architecto beatae vitae dicta sunt, explicabo.
- </p>
-</section>
- --><section><p>Add more analysis here</p>
-</section><section>
- <div class="hr-wave-holder">
- <div class="hr-wave-line hr-wave-line1"></div>
- <div class="hr-wave-line hr-wave-line2"></div>
- </div>
-
- <h3>Supplementary Information</h3>
-
-</section><section class="applet_container">
+<section class="applet_container">
<h3>Dataset Citations</h3>
<p>
@@ -123,29 +102,37 @@
</p>
<div class="applet" data-payload="{&quot;command&quot;: &quot;citations&quot;}"></div>
+</section><section>
+
+ <div class="hr-wave-holder">
+ <div class="hr-wave-line hr-wave-line1"></div>
+ <div class="hr-wave-line hr-wave-line2"></div>
+ </div>
+
+ <h2>Supplementary Information</h2>
+
</section><section><h3>Additional Information</h3>
<ul>
<li>The dataset author spoke about his research at the CVPR conference in 2016 <a href="https://www.youtube.com/watch?v=Nl2fBKxwusQ">https://www.youtube.com/watch?v=Nl2fBKxwusQ</a></li>
</ul>
-</section><section><ul class="footnotes"><li><a name="[^readme]" class="footnote_shim"></a><span class="backlinks"></span><p>"readme.txt" <a href="https://exhibits.stanford.edu/data/catalog/sx925dc9385">https://exhibits.stanford.edu/data/catalog/sx925dc9385</a>.</p>
+</section><section><h3>References</h3><section><ul class="footnotes"><li><a name="[^readme]" class="footnote_shim"></a><span class="backlinks"></span><p>"readme.txt" <a href="https://exhibits.stanford.edu/data/catalog/sx925dc9385">https://exhibits.stanford.edu/data/catalog/sx925dc9385</a>.</p>
</li><li><a name="[^localized_region_context]" class="footnote_shim"></a><span class="backlinks"></span><p>Li, Y. and Dou, Y. and Liu, X. and Li, T. Localized Region Context and Object Feature Fusion for People Head Detection. ICIP16 Proceedings. 2016. Pages 594-598.</p>
</li><li><a name="[^replacement_algorithm]" class="footnote_shim"></a><span class="backlinks"></span><p>Zhao. X, Wang Y, Dou, Y. A Replacement Algorithm of Non-Maximum Suppression Base on Graph Clustering.</p>
-</li></ul></section>
+</li></ul></section></section>
</div>
<footer>
- <div>
- <a href="/">MegaPixels.cc</a>
- <a href="/about/disclaimer/">Disclaimer</a>
- <a href="/about/terms/">Terms of Use</a>
- <a href="/about/privacy/">Privacy</a>
- <a href="/about/">About</a>
- <a href="/about/team/">Team</a>
- </div>
- <div>
- MegaPixels &copy;2017-19 Adam R. Harvey /&nbsp;
- <a href="https://ahprojects.com">ahprojects.com</a>
- </div>
+ <ul class="footer-left">
+ <li><a href="/">MegaPixels.cc</a></li>
+ <li><a href="/datasets/">Datasets</a></li>
+ <li><a href="/about/">About</a></li>
+ <li><a href="/about/press/">Press</a></li>
+ <li><a href="/about/legal/">Legal and Privacy</a></li>
+ </ul>
+ <ul class="footer-right">
+ <li>MegaPixels &copy;2017-19 &nbsp;<a href="https://ahprojects.com">Adam R. Harvey</a></li>
+ <li>Made with support from &nbsp;<a href="https://mozilla.org">Mozilla</a></li>
+ </ul>
</footer>
</body>
diff --git a/site/public/datasets/oxford_town_centre/index.html b/site/public/datasets/oxford_town_centre/index.html
new file mode 100644
index 00000000..03d8934b
--- /dev/null
+++ b/site/public/datasets/oxford_town_centre/index.html
@@ -0,0 +1,156 @@
+<!doctype html>
+<html>
+<head>
+ <title>MegaPixels</title>
+ <meta charset="utf-8" />
+ <meta name="author" content="Adam Harvey" />
+ <meta name="description" content="Oxford Town Centre is a dataset of surveillance camera footage from Cornmarket St Oxford, England" />
+ <meta name="referrer" content="no-referrer" />
+ <meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes" />
+ <link rel='stylesheet' href='/assets/css/fonts.css' />
+ <link rel='stylesheet' href='/assets/css/css.css' />
+ <link rel='stylesheet' href='/assets/css/leaflet.css' />
+ <link rel='stylesheet' href='/assets/css/applets.css' />
+</head>
+<body>
+ <header>
+ <a class='slogan' href="/">
+ <div class='logo'></div>
+ <div class='site_name'>MegaPixels</div>
+ <div class='splash'>TownCentre</div>
+ </a>
+ <div class='links'>
+ <a href="/datasets/">Datasets</a>
+ <a href="/about/">About</a>
+ </div>
+ </header>
+ <div class="content content-dataset">
+
+ <section class='intro_section' style='background-image: url(https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/oxford_town_centre/assets/background.jpg)'><div class='inner'><div class='hero_desc'><span class='bgpad'>Oxford Town Centre is a dataset of surveillance camera footage from Cornmarket St Oxford, England</span></div><div class='hero_subdesc'><span class='bgpad'>The Oxford Town Centre dataset includes approximately 2,200 identities and is used for research and development of face recognition systems
+</span></div></div></section><section><h2>Oxford Town Centre</h2>
+</section><section><div class='right-sidebar'><div class='meta'>
+ <div class='gray'>Published</div>
+ <div>2009</div>
+ </div><div class='meta'>
+ <div class='gray'>Videos</div>
+ <div>1 </div>
+ </div><div class='meta'>
+ <div class='gray'>Identities</div>
+ <div>2,200 </div>
+ </div><div class='meta'>
+ <div class='gray'>Purpose</div>
+ <div>Person detection, gaze estimation</div>
+ </div><div class='meta'>
+ <div class='gray'>Funded by</div>
+ <div>EU FP6 Hermes project and Oxford Risk </div>
+ </div><div class='meta'>
+ <div class='gray'>Download Size</div>
+ <div>0.147 GB</div>
+ </div><div class='meta'>
+ <div class='gray'>Website</div>
+ <div><a href='http://www.robots.ox.ac.uk/ActiveVision/Research/Projects/2009bbenfold_headpose/project.html' target='_blank' rel='nofollow noopener'>ox.ac.uk</a></div>
+ </div></div><p>The Oxford Town Centre dataset is a CCTV video of pedestrians in a busy downtown area in Oxford used for research and development of activity and face recognition systems.<a class="footnote_shim" name="[^ben_benfold_orig]_1"> </a><a href="#[^ben_benfold_orig]" class="footnote" title="Footnote 1">1</a> The CCTV video was obtained from a public surveillance camera at the corner of Cornmarket and Market St. in Oxford, England and includes approximately 2,200 people. Since its publication in 2009<a class="footnote_shim" name="[^guiding_surveillance]_1"> </a><a href="#[^guiding_surveillance]" class="footnote" title="Footnote 2">2</a> the Oxford Town Centre dataset has been used in over 80 verified research projects including commercial research by Amazon, Disney, OSRAM, and Huawei; and academic research in China, Israel, Russia, Singapore, the US, and Germany among dozens more.</p>
+<p>The Oxford Town Centre dataset is unique in that it uses footage from a public surveillance camera that would otherwise be designated for public safety. The video shows that the pedestrians act normally and unrehearsed indicating they neither knew of or consented to participation in the research project.</p>
+</section><section>
+ <h3>Who used TownCentre?</h3>
+
+ <p>
+ This bar chart presents a ranking of the top countries where dataset citations originated. Mouse over individual columns to see yearly totals. These charts show at most the top 10 countries.
+ </p>
+
+ </section>
+
+<section class="applet_container">
+<!-- <div style="position: absolute;top: 0px;right: -55px;width: 180px;font-size: 14px;">Labeled Faces in the Wild Dataset<br><span class="numc" style="font-size: 11px;">20 citations</span>
+</div> -->
+ <div class="applet" data-payload="{&quot;command&quot;: &quot;chart&quot;}"></div>
+</section>
+
+<section class="applet_container">
+ <div class="applet" data-payload="{&quot;command&quot;: &quot;piechart&quot;}"></div>
+</section>
+
+<section>
+
+ <h3>Biometric Trade Routes</h3>
+
+ <p>
+ To help understand how TownCentre has been used around the world by commercial, military, and academic organizations; existing publicly available research citing Oxford Town Centre was collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal research projects at that location.
+ </p>
+
+ </section>
+
+<section class="applet_container fullwidth">
+ <div class="applet" data-payload="{&quot;command&quot;: &quot;map&quot;}"></div>
+</section>
+
+<div class="caption">
+ <ul class="map-legend">
+ <li class="edu">Academic</li>
+ <li class="com">Commercial</li>
+ <li class="gov">Military / Government</li>
+ </ul>
+ <div class="source">Citation data is collected using <a href="https://semanticscholar.org" target="_blank">SemanticScholar.org</a> then dataset usage verified and geolocated.</div >
+</div>
+
+
+<section class="applet_container">
+
+ <h3>Dataset Citations</h3>
+ <p>
+ The dataset citations used in the visualizations were collected from <a href="https://www.semanticscholar.org">Semantic Scholar</a>, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms.
+ </p>
+
+ <div class="applet" data-payload="{&quot;command&quot;: &quot;citations&quot;}"></div>
+</section><section>
+
+ <div class="hr-wave-holder">
+ <div class="hr-wave-line hr-wave-line1"></div>
+ <div class="hr-wave-line hr-wave-line2"></div>
+ </div>
+
+ <h2>Supplementary Information</h2>
+
+</section><section><h3>Location</h3>
+<p>The street location of the camera used for the Oxford Town Centre dataset was confirmed by matching the road, benches, and store signs <a href="https://www.google.com/maps/@51.7528162,-1.2581152,3a,50.3y,310.59h,87.23t/data=!3m7!1e1!3m5!1s3FsGN-PqYC-VhQGjWgmBdQ!2e0!5s20120601T000000!7i13312!8i6656">source</a>. At that location, two public CCTV cameras exist mounted on the side of the Northgate House building at 13-20 Cornmarket St. Because of the lower camera's mounting pole directionality, a view from a private camera in the building across the street can be ruled out because it would have to show more of silhouette of the lower camera's mounting pole. Two options remain: either the public CCTV camera mounted to the side of the building was used or the researchers mounted their own camera to the side of the building in the same location. Because the researchers used many other existing public CCTV cameras for their <a href="http://www.robots.ox.ac.uk/ActiveVision/Research/Projects/2009bbenfold_headpose/project.html">research projects</a> it is likely that they would also be able to access to this camera.</p>
+<p>To discredit the theory that this public CCTV is only seen pointing the other way in Google Street View images, at least one public photo shows the upper CCTV camera <a href="https://www.oxcivicsoc.org.uk/northgate-house-cornmarket/">pointing in the same direction</a> as the Oxford Town Centre dataset proving the camera can and has been rotated before.</p>
+<p>As for the capture date, the text on the storefront display shows a sale happening from December 2nd &ndash; 7th indicating the capture date was between or just before those dates. The capture year is either 2008 or 2007 since prior to 2007 the Carphone Warehouse (<a href="https://www.flickr.com/photos/katieportwin/364492063/in/photolist-4meWFE-yd7rw-yd7X6-5sDHuc-yd7DN-59CpEK-5GoHAc-yd7Zh-3G2uJP-yd7US-5GomQH-4peYpq-4bAEwm-PALEr-58RkAp-5pHEkf-5v7fGq-4q1J9W-4kypQ2-5KX2Eu-yd7MV-yd7p6-4McgWb-5pJ55w-24N9gj-37u9LK-4FVcKQ-a81Enz-5qNhTG-59CrMZ-2yuwYM-5oagH5-59CdsP-4FVcKN-4PdxhC-5Lhr2j-2PAd2d-5hAwvk-zsQSG-4Cdr4F-3dUPEi-9B1RZ6-2hv5NY-4G5qwP-HCHBW-4JiuC4-4Pdr9Y-584aEV-2GYBEc-HCPkp/">photo</a>, <a href="http://www.oxfordhistory.org.uk/cornmarket/west/47_51.html">history</a>) did not exist at this location. Since the sweaters in the GAP window display are more similar to those in a <a href="web.archive.org/web/20081201002524/http://www.gap.com/">GAP website snapshot</a> from November 2007, our guess is that the footage was obtained during late November or early December 2007. The lack of street vendors and slight waste residue near the bench suggests that is was probably a weekday after rubbish removal.</p>
+</section><section class='images'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/oxford_town_centre/assets/oxford_town_centre_cctv.jpg' alt=' Footage from this public CCTV camera was used to create the Oxford Town Centre dataset. Image sources: Google Street View (<a href="https://www.google.com/maps/@51.7528162,-1.2581152,3a,50.3y,310.59h,87.23t/data=!3m7!1e1!3m5!1s3FsGN-PqYC-VhQGjWgmBdQ!2e0!5s20120601T000000!7i13312!8i6656">map</a>)'><div class='caption'> Footage from this public CCTV camera was used to create the Oxford Town Centre dataset. Image sources: Google Street View (<a href="https://www.google.com/maps/@51.7528162,-1.2581152,3a,50.3y,310.59h,87.23t/data=!3m7!1e1!3m5!1s3FsGN-PqYC-VhQGjWgmBdQ!2e0!5s20120601T000000!7i13312!8i6656">map</a>)</div></div></section><section><div class='columns columns-'><section class='images'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/oxford_town_centre/assets/oxford_town_centre_sal_body.jpg' alt=' Heat map body visualization of the pedestrians detected in the Oxford Town Centre dataset &copy; megapixels.cc'><div class='caption'> Heat map body visualization of the pedestrians detected in the Oxford Town Centre dataset &copy; megapixels.cc</div></div></section><section class='images'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/oxford_town_centre/assets/oxford_town_centre_sal_face.jpg' alt=' Heat map face visualization of the pedestrians detected in the Oxford Town Centre dataset &copy; megapixels.cc'><div class='caption'> Heat map face visualization of the pedestrians detected in the Oxford Town Centre dataset &copy; megapixels.cc</div></div></section></div></section><section>
+
+ <h4>Cite Our Work</h4>
+ <p>
+
+ If you use our data, research, or graphics please cite our work:
+
+<pre id="cite-bibtex">
+@online{megapixels,
+ author = {Harvey, Adam. LaPlace, Jules.},
+ title = {MegaPixels: Origins, Ethics, and Privacy Implications of Publicly Available Face Recognition Image Datasets},
+ year = 2019,
+ url = {https://megapixels.cc/},
+ urldate = {2019-04-18}
+}</pre>
+
+ </p>
+</section><section><h3>References</h3><section><ul class="footnotes"><li><a name="[^ben_benfold_orig]" class="footnote_shim"></a><span class="backlinks"><a href="#[^ben_benfold_orig]_1">a</a></span><p>Benfold, Ben and Reid, Ian. "Stable Multi-Target Tracking in Real-Time Surveillance Video". CVPR 2011. Pages 3457-3464.</p>
+</li><li><a name="[^guiding_surveillance]" class="footnote_shim"></a><span class="backlinks"><a href="#[^guiding_surveillance]_1">a</a></span><p>"Guiding Visual Surveillance by Tracking Human Attention". 2009.</p>
+</li></ul></section></section>
+
+ </div>
+ <footer>
+ <ul class="footer-left">
+ <li><a href="/">MegaPixels.cc</a></li>
+ <li><a href="/datasets/">Datasets</a></li>
+ <li><a href="/about/">About</a></li>
+ <li><a href="/about/press/">Press</a></li>
+ <li><a href="/about/legal/">Legal and Privacy</a></li>
+ </ul>
+ <ul class="footer-right">
+ <li>MegaPixels &copy;2017-19 &nbsp;<a href="https://ahprojects.com">Adam R. Harvey</a></li>
+ <li>Made with support from &nbsp;<a href="https://mozilla.org">Mozilla</a></li>
+ </ul>
+ </footer>
+</body>
+
+<script src="/assets/js/dist/index.js"></script>
+</html> \ No newline at end of file
diff --git a/site/public/datasets/pipa/index.html b/site/public/datasets/pipa/index.html
index 27168c5c..ae8aef6d 100644
--- a/site/public/datasets/pipa/index.html
+++ b/site/public/datasets/pipa/index.html
@@ -4,7 +4,7 @@
<title>MegaPixels</title>
<meta charset="utf-8" />
<meta name="author" content="Adam Harvey" />
- <meta name="description" content=" is a dataset..." />
+ <meta name="description" content=" People in Photo Albums (PIPA) is a dataset..." />
<meta name="referrer" content="no-referrer" />
<meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes" />
<link rel='stylesheet' href='/assets/css/fonts.css' />
@@ -17,7 +17,7 @@
<a class='slogan' href="/">
<div class='logo'></div>
<div class='site_name'>MegaPixels</div>
- <div class='splash'>PIPA</div>
+ <div class='splash'>PIPA Dataset</div>
</a>
<div class='links'>
<a href="/datasets/">Datasets</a>
@@ -26,8 +26,9 @@
</header>
<div class="content content-dataset">
- <section class='intro_section' style='background-image: url(https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/pipa/assets/background.jpg)'><div class='inner'><div class='hero_desc'><span class='bgpad'><span class="dataset-name"> is a dataset...</span></div><div class='hero_subdesc'><span class='bgpad'>PIPA subdescription
-</span></div></div></section><section><div class='left-sidebar'><div class='meta'>
+ <section class='intro_section' style='background-image: url(https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/pipa/assets/background.jpg)'><div class='inner'><div class='hero_desc'><span class='bgpad'><span class="dataset-name"> People in Photo Albums (PIPA)</span> is a dataset...</span></div><div class='hero_subdesc'><span class='bgpad'>[ add subdescrition ]
+</span></div></div></section><section><h2>People in Photo Albums</h2>
+</section><section><div class='right-sidebar'><div class='meta'>
<div class='gray'>Published</div>
<div>2015</div>
</div><div class='meta'>
@@ -45,30 +46,38 @@
</div><div class='meta'>
<div class='gray'>Website</div>
<div><a href='https://people.eecs.berkeley.edu/~nzhang/piper.html' target='_blank' rel='nofollow noopener'>berkeley.edu</a></div>
- </div><div class='meta'><div><div class='gray'>Collected</div><div>TBD</div></div><div><div class='gray'>Published</div><div>TBD</div></div><div><div class='gray'>Images</div><div>TBD</div></div><div><div class='gray'>Faces</div><div>TBD</div></div></div></div><h2>Dataset Title TBD</h2>
-<p>(PAGE UNDER DEVELOPMENT)</p>
+ </div></div><p>[ PAGE UNDER DEVELOPMENT ]</p>
</section><section>
+ <h3>Who used PIPA Dataset?</h3>
+
+ <p>
+ This bar chart presents a ranking of the top countries where dataset citations originated. Mouse over individual columns to see yearly totals. These charts show at most the top 10 countries.
+ </p>
+
+ </section>
+
+<section class="applet_container">
+<!-- <div style="position: absolute;top: 0px;right: -55px;width: 180px;font-size: 14px;">Labeled Faces in the Wild Dataset<br><span class="numc" style="font-size: 11px;">20 citations</span>
+</div> -->
+ <div class="applet" data-payload="{&quot;command&quot;: &quot;chart&quot;}"></div>
+</section>
+
+<section class="applet_container">
+ <div class="applet" data-payload="{&quot;command&quot;: &quot;piechart&quot;}"></div>
+</section>
+
+<section>
<h3>Biometric Trade Routes</h3>
-<!--
- <div class="map-sidebar right-sidebar">
- <h3>Legend</h3>
- <ul>
- <li><span style="color: #f2f293">&#9632;</span> Industry</li>
- <li><span style="color: #f30000">&#9632;</span> Academic</li>
- <li><span style="color: #3264f6">&#9632;</span> Government</li>
- </ul>
- </div>
- -->
+
<p>
- To help understand how PIPA Dataset has been used around the world for commercial, military and academic research; publicly available research citing People in Photo Albums Dataset is collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal reserach projects at that location.
+ To help understand how PIPA Dataset has been used around the world by commercial, military, and academic organizations; existing publicly available research citing People in Photo Albums Dataset was collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal research projects at that location.
</p>
</section>
<section class="applet_container fullwidth">
<div class="applet" data-payload="{&quot;command&quot;: &quot;map&quot;}"></div>
-
</section>
<div class="caption">
@@ -76,25 +85,12 @@
<li class="edu">Academic</li>
<li class="com">Commercial</li>
<li class="gov">Military / Government</li>
- <li class="source">Citation data is collected using <a href="https://semanticscholar.org" target="_blank">SemanticScholar.org</a> then dataset usage verified and geolocated.</li>
</ul>
+ <div class="source">Citation data is collected using <a href="https://semanticscholar.org" target="_blank">SemanticScholar.org</a> then dataset usage verified and geolocated.</div >
</div>
-<!-- <section>
- <p class='subp'>
- [section under development] PIPA Dataset ... Standardized paragraph of text about the map. Sed ut perspiciatis, unde omnis iste natus error sit voluptatem accusantium doloremque laudantium, totam rem aperiam eaque ipsa, quae ab illo inventore veritatis et quasi architecto beatae vitae dicta sunt, explicabo.
- </p>
-</section>
- --><section>
- <div class="hr-wave-holder">
- <div class="hr-wave-line hr-wave-line1"></div>
- <div class="hr-wave-line hr-wave-line2"></div>
- </div>
-
- <h3>Supplementary Information</h3>
-
-</section><section class="applet_container">
+<section class="applet_container">
<h3>Dataset Citations</h3>
<p>
@@ -102,23 +98,21 @@
</p>
<div class="applet" data-payload="{&quot;command&quot;: &quot;citations&quot;}"></div>
-</section><section><h2>Research Notes</h2>
</section>
</div>
<footer>
- <div>
- <a href="/">MegaPixels.cc</a>
- <a href="/about/disclaimer/">Disclaimer</a>
- <a href="/about/terms/">Terms of Use</a>
- <a href="/about/privacy/">Privacy</a>
- <a href="/about/">About</a>
- <a href="/about/team/">Team</a>
- </div>
- <div>
- MegaPixels &copy;2017-19 Adam R. Harvey /&nbsp;
- <a href="https://ahprojects.com">ahprojects.com</a>
- </div>
+ <ul class="footer-left">
+ <li><a href="/">MegaPixels.cc</a></li>
+ <li><a href="/datasets/">Datasets</a></li>
+ <li><a href="/about/">About</a></li>
+ <li><a href="/about/press/">Press</a></li>
+ <li><a href="/about/legal/">Legal and Privacy</a></li>
+ </ul>
+ <ul class="footer-right">
+ <li>MegaPixels &copy;2017-19 &nbsp;<a href="https://ahprojects.com">Adam R. Harvey</a></li>
+ <li>Made with support from &nbsp;<a href="https://mozilla.org">Mozilla</a></li>
+ </ul>
</footer>
</body>
diff --git a/site/public/datasets/pubfig/index.html b/site/public/datasets/pubfig/index.html
new file mode 100644
index 00000000..ef289954
--- /dev/null
+++ b/site/public/datasets/pubfig/index.html
@@ -0,0 +1,117 @@
+<!doctype html>
+<html>
+<head>
+ <title>MegaPixels</title>
+ <meta charset="utf-8" />
+ <meta name="author" content="Adam Harvey" />
+ <meta name="description" content="PubFig is a dataset..." />
+ <meta name="referrer" content="no-referrer" />
+ <meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes" />
+ <link rel='stylesheet' href='/assets/css/fonts.css' />
+ <link rel='stylesheet' href='/assets/css/css.css' />
+ <link rel='stylesheet' href='/assets/css/leaflet.css' />
+ <link rel='stylesheet' href='/assets/css/applets.css' />
+</head>
+<body>
+ <header>
+ <a class='slogan' href="/">
+ <div class='logo'></div>
+ <div class='site_name'>MegaPixels</div>
+ <div class='splash'>PubFig</div>
+ </a>
+ <div class='links'>
+ <a href="/datasets/">Datasets</a>
+ <a href="/about/">About</a>
+ </div>
+ </header>
+ <div class="content content-dataset">
+
+ <section class='intro_section' style='background-image: url(https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/pubfig/assets/background.jpg)'><div class='inner'><div class='hero_desc'><span class='bgpad'><span class="dataset-name">PubFig</span> is a dataset...</span></div><div class='hero_subdesc'><span class='bgpad'>[ add subdescrition ]
+</span></div></div></section><section><h2>PubFig</h2>
+</section><section><div class='right-sidebar'><div class='meta'>
+ <div class='gray'>Published</div>
+ <div>2009</div>
+ </div><div class='meta'>
+ <div class='gray'>Images</div>
+ <div>58,797 </div>
+ </div><div class='meta'>
+ <div class='gray'>Identities</div>
+ <div>200 </div>
+ </div><div class='meta'>
+ <div class='gray'>Purpose</div>
+ <div>mostly names from LFW but includes new names. large variation in pose, lighting, expression, scene, camera, imaging conditions and parameters</div>
+ </div><div class='meta'>
+ <div class='gray'>Website</div>
+ <div><a href='http://www.cs.columbia.edu/CAVE/databases/pubfig/' target='_blank' rel='nofollow noopener'>columbia.edu</a></div>
+ </div></div><p>[ PAGE UNDER DEVELOPMENT ]</p>
+</section><section>
+ <h3>Who used PubFig?</h3>
+
+ <p>
+ This bar chart presents a ranking of the top countries where dataset citations originated. Mouse over individual columns to see yearly totals. These charts show at most the top 10 countries.
+ </p>
+
+ </section>
+
+<section class="applet_container">
+<!-- <div style="position: absolute;top: 0px;right: -55px;width: 180px;font-size: 14px;">Labeled Faces in the Wild Dataset<br><span class="numc" style="font-size: 11px;">20 citations</span>
+</div> -->
+ <div class="applet" data-payload="{&quot;command&quot;: &quot;chart&quot;}"></div>
+</section>
+
+<section class="applet_container">
+ <div class="applet" data-payload="{&quot;command&quot;: &quot;piechart&quot;}"></div>
+</section>
+
+<section>
+
+ <h3>Biometric Trade Routes</h3>
+
+ <p>
+ To help understand how PubFig has been used around the world by commercial, military, and academic organizations; existing publicly available research citing Public Figures Face Dataset was collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal research projects at that location.
+ </p>
+
+ </section>
+
+<section class="applet_container fullwidth">
+ <div class="applet" data-payload="{&quot;command&quot;: &quot;map&quot;}"></div>
+</section>
+
+<div class="caption">
+ <ul class="map-legend">
+ <li class="edu">Academic</li>
+ <li class="com">Commercial</li>
+ <li class="gov">Military / Government</li>
+ </ul>
+ <div class="source">Citation data is collected using <a href="https://semanticscholar.org" target="_blank">SemanticScholar.org</a> then dataset usage verified and geolocated.</div >
+</div>
+
+
+<section class="applet_container">
+
+ <h3>Dataset Citations</h3>
+ <p>
+ The dataset citations used in the visualizations were collected from <a href="https://www.semanticscholar.org">Semantic Scholar</a>, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms.
+ </p>
+
+ <div class="applet" data-payload="{&quot;command&quot;: &quot;citations&quot;}"></div>
+</section>
+
+ </div>
+ <footer>
+ <ul class="footer-left">
+ <li><a href="/">MegaPixels.cc</a></li>
+ <li><a href="/datasets/">Datasets</a></li>
+ <li><a href="/about/">About</a></li>
+ <li><a href="/about/press/">Press</a></li>
+ <li><a href="/about/legal/">Legal and Privacy</a></li>
+ </ul>
+ <ul class="footer-right">
+ <li>MegaPixels &copy;2017-19 &nbsp;<a href="https://ahprojects.com">Adam R. Harvey</a></li>
+ <li>Made with support from &nbsp;<a href="https://mozilla.org">Mozilla</a></li>
+ </ul>
+ </footer>
+</body>
+
+<script src="/assets/js/dist/index.js"></script>
+</html> \ No newline at end of file
diff --git a/site/public/datasets/uccs/index.html b/site/public/datasets/uccs/index.html
index 593ac498..3652e329 100644
--- a/site/public/datasets/uccs/index.html
+++ b/site/public/datasets/uccs/index.html
@@ -4,7 +4,7 @@
<title>MegaPixels</title>
<meta charset="utf-8" />
<meta name="author" content="Adam Harvey" />
- <meta name="description" content="Unconstrained College Students (UCCS) is a dataset of long-range surveillance photos of students taken without their knowledge" />
+ <meta name="description" content="UnConstrained College Students is a dataset of long-range surveillance photos of students on University of Colorado in Colorado Springs campus" />
<meta name="referrer" content="no-referrer" />
<meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes" />
<link rel='stylesheet' href='/assets/css/fonts.css' />
@@ -26,10 +26,11 @@
</header>
<div class="content content-dataset">
- <section class='intro_section' style='background-image: url(https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/uccs/assets/background.jpg)'><div class='inner'><div class='hero_desc'><span class='bgpad'><span class="dataset-name">Unconstrained College Students (UCCS)</span> is a dataset of long-range surveillance photos of students taken without their knowledge</span></div><div class='hero_subdesc'><span class='bgpad'>The UCCS dataset includes 16,149 images and 1,732 identities of students at University of Colorado Colorado Springs campus and is used for face recognition and face detection
-</span></div></div></section><section><div class='left-sidebar'><div class='meta'>
+ <section class='intro_section' style='background-image: url(https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/uccs/assets/background.jpg)'><div class='inner'><div class='hero_desc'><span class='bgpad'><span class="dataset-name">UnConstrained College Students</span> is a dataset of long-range surveillance photos of students on University of Colorado in Colorado Springs campus</span></div><div class='hero_subdesc'><span class='bgpad'>The UnConstrained College Students dataset includes 16,149 images of 1,732 students, faculty, and pedestrians and is used for developing face recognition and face detection algorithms
+</span></div></div></section><section><h2>UnConstrained College Students</h2>
+</section><section><div class='right-sidebar'><div class='meta'>
<div class='gray'>Published</div>
- <div>2018</div>
+ <div>2016</div>
</div><div class='meta'>
<div class='gray'>Images</div>
<div>16,149 </div>
@@ -48,302 +49,226 @@
</div><div class='meta'>
<div class='gray'>Website</div>
<div><a href='http://vast.uccs.edu/Opensetface/' target='_blank' rel='nofollow noopener'>uccs.edu</a></div>
- </div><div class='meta'><div><div class='gray'>Published</div><div>2018</div></div><div><div class='gray'>Images</div><div>16,149</div></div><div><div class='gray'>Identities</div><div>1,732</div></div><div><div class='gray'>Used for</div><div>Face recognition, face detection</div></div><div><div class='gray'>Created by</div><div>Unviversity of Colorado Colorado Springs (US)</div></div><div><div class='gray'>Funded by</div><div>ODNI, IARPA, ONR MURI, Amry SBIR, SOCOM SBIR</div></div><div><div class='gray'>Website</div><div><a href="https://vast.uccs.edu/Opensetface/">vast.uccs.edu</a></div></div></div></div><h2>Unconstrained College Students ...</h2>
-<p>(PAGE UNDER DEVELOPMENT)</p>
-<p>Unconstrained College Students (UCCS) is a dataset of long-range surveillance photos captured at University of Colorado Colorado Springs. According to the authors of two papers associated with the dataset, subjects were "photographed using a long-range high-resolution surveillance camera without their knowledge" [^funding_sb]. The images were captured using a Canon 7D digital camera fitted with a Sigma 800mm telephoto lens pointed out the window of an office.</p>
-<p>The UCCS dataset was funded by ODNI (Office of Director of National Intelligence), IARPA (Intelligence Advance Research Projects Activity), ONR MURI Office of Naval Research and The Department of Defense Multidisciplinary University Research Initiative, Army SBIR (Small Business Innovation Research), SOCOM SBIR (Special Operations Command and Small Business Innovation Research), and the National Science Foundation.</p>
-<p>The images in UCCS include students walking between classes on campus over 19 days in 2012 - 2013. The dates include:</p>
+ </div></div><p>UnConstrained College Students (UCCS) is a dataset of long-range surveillance photos captured at University of Colorado Colorado Springs developed primarily for research and development of "face detection and recognition research towards surveillance applications"<a class="footnote_shim" name="[^uccs_vast]_1"> </a><a href="#[^uccs_vast]" class="footnote" title="Footnote 1">1</a>. According to the authors of <a href="https://www.semanticscholar.org/paper/Unconstrained-Face-Detection-and-Open-Set-Face-G%C3%BCnther-Hu/d4f1eb008eb80595bcfdac368e23ae9754e1e745">two</a> <a href="https://www.semanticscholar.org/paper/Large-scale-unconstrained-open-set-face-database-Sapkota-Boult/07fcbae86f7a3ad3ea1cf95178459ee9eaf77cb1">papers</a> associated with the dataset, over 1,700 students and pedestrians were "photographed using a long-range high-resolution surveillance camera without their knowledge".<a class="footnote_shim" name="[^funding_uccs]_1"> </a><a href="#[^funding_uccs]" class="footnote" title="Footnote 3">3</a> In this investigation, we examine the contents of the <a href="http://vast.uccs.edu/Opensetface/">dataset</a>, its funding sources, photo EXIF data, and information from publicly available research project citations.</p>
+<p>The UCCS dataset includes over 1,700 unique identities, most of which are students walking to and from class. As of 2018, it was the "largest surveillance [face recognition] benchmark in the public domain."<a class="footnote_shim" name="[^surv_face_qmul]_1"> </a><a href="#[^surv_face_qmul]" class="footnote" title="Footnote 4">4</a> The photos were taken during the spring semesters of 2012 &ndash; 2013 on the West Lawn of the University of Colorado Colorado Springs campus. The photographs were timed to capture students during breaks between their scheduled classes in the morning and afternoon during Monday through Thursday. "For example, a student taking Monday-Wednesday classes at 12:30 PM will show up in the camera on almost every Monday and Wednesday."<a class="footnote_shim" name="[^sapkota_boult]_1"> </a><a href="#[^sapkota_boult]" class="footnote" title="Footnote 2">2</a>.</p>
+</section><section class='images'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/uccs/assets/uccs_map_aerial.jpg' alt=' The location at University of Colorado Colorado Springs where students were surreptitiously photographed with a long-range surveillance camera for use in a defense and intelligence agency funded research project on face recognition. Image: Google Maps'><div class='caption'> The location at University of Colorado Colorado Springs where students were surreptitiously photographed with a long-range surveillance camera for use in a defense and intelligence agency funded research project on face recognition. Image: Google Maps</div></div></section><section><p>The long-range surveillance images in the UnContsrained College Students dataset were taken using a Canon 7D 18-megapixel digital camera fitted with a Sigma 800mm F5.6 EX APO DG HSM telephoto lens and pointed out an office window across the university's West Lawn. The students were photographed from a distance of approximately 150 meters through an office window. "The camera [was] programmed to start capturing images at specific time intervals between classes to maximize the number of faces being captured."<a class="footnote_shim" name="[^sapkota_boult]_2"> </a><a href="#[^sapkota_boult]" class="footnote" title="Footnote 2">2</a>
+Their setup made it impossible for students to know they were being photographed, providing the researchers with realistic surveillance images to help build face recognition systems for real world applications for defense, intelligence, and commercial partners.</p>
+</section><section class='images'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/uccs/assets/uccs_grid.jpg' alt=' Example images from the UnConstrained College Students Dataset. '><div class='caption'> Example images from the UnConstrained College Students Dataset. </div></div></section><section><p>The EXIF data embedded in the images shows that the photo capture times follow a similar pattern to that outlined by the researchers, but also highlights that the vast majority of photos (over 7,000) were taken on Tuesdays around noon during students' lunch break. The lack of any photos taken between Friday through Sunday shows that the researchers were only interested in capturing images of students during the peak campus hours.</p>
+</section><section class='images'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/uccs/assets/uccs_exif_plot_days.png' alt=' UCCS photos captured per weekday &copy; megapixels.cc'><div class='caption'> UCCS photos captured per weekday &copy; megapixels.cc</div></div></section><section class='images'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/uccs/assets/uccs_exif_plot.png' alt=' UCCS photos captured per weekday &copy; megapixels.cc'><div class='caption'> UCCS photos captured per weekday &copy; megapixels.cc</div></div></section><section><p>The two research papers associated with the release of the UCCS dataset (<a href="https://www.semanticscholar.org/paper/Unconstrained-Face-Detection-and-Open-Set-Face-G%C3%BCnther-Hu/d4f1eb008eb80595bcfdac368e23ae9754e1e745">Unconstrained Face Detection and Open-Set Face Recognition Challenge</a> and <a href="https://www.semanticscholar.org/paper/Large-scale-unconstrained-open-set-face-database-Sapkota-Boult/07fcbae86f7a3ad3ea1cf95178459ee9eaf77cb1">Large Scale Unconstrained Open Set Face Database</a>), acknowledge that the primary funding sources for their work were United States defense and intelligence agencies. Specifically, development of the UnContsrianed College Students dataset was funded by the Intelligence Advanced Research Projects Activity (IARPA), Office of Director of National Intelligence (ODNI), Office of Naval Research and The Department of Defense Multidisciplinary University Research Initiative (ONR MURI), and the Special Operations Command and Small Business Innovation Research (SOCOM SBIR) amongst others. UCCS's VAST site also explicitly <a href="https://vast.uccs.edu/project/iarpa-janus/">states</a> their involvement in the <a href="https://www.iarpa.gov/index.php/research-programs/janus">IARPA Janus</a> face recognition project developed to serve the needs of national intelligence, establishing that immediate benefactors of this dataset include United States defense and intelligence agencies, but it would go on to benefit other similar organizations.</p>
+<p>In 2017, one year after its public release, the UCCS face dataset formed the basis for a defense and intelligence agency funded <a href="http://www.face-recognition-challenge.com/">face recognition challenge</a> project at the International Joint Biometrics Conference in Denver, CO. And in 2018 the dataset was again used for the <a href="https://erodner.github.io/ial2018eccv/">2nd Unconstrained Face Detection and Open Set Recognition Challenge</a> at the European Computer Vision Conference (ECCV) in Munich, Germany.</p>
+<p>As of April 15, 2019, the UCCS dataset is no longer available for public download. But during the three years it was publicly available (2016-2019) the UCCS dataset appeared in at least 6 publicly available research papers including verified usage from Beihang University who is known to provide research and development for China's military; and Vision Semantics Ltd who lists the UK Ministory of Defence as a project partner.</p>
+</section><section>
+ <h3>Who used UCCS?</h3>
+
+ <p>
+ This bar chart presents a ranking of the top countries where dataset citations originated. Mouse over individual columns to see yearly totals. These charts show at most the top 10 countries.
+ </p>
+
+ </section>
+
+<section class="applet_container">
+<!-- <div style="position: absolute;top: 0px;right: -55px;width: 180px;font-size: 14px;">Labeled Faces in the Wild Dataset<br><span class="numc" style="font-size: 11px;">20 citations</span>
+</div> -->
+ <div class="applet" data-payload="{&quot;command&quot;: &quot;chart&quot;}"></div>
+</section>
+
+<section class="applet_container">
+ <div class="applet" data-payload="{&quot;command&quot;: &quot;piechart&quot;}"></div>
+</section>
+
+<section>
+
+ <h3>Biometric Trade Routes</h3>
+
+ <p>
+ To help understand how UCCS has been used around the world by commercial, military, and academic organizations; existing publicly available research citing UnConstrained College Students Dataset was collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal research projects at that location.
+ </p>
+
+ </section>
+
+<section class="applet_container fullwidth">
+ <div class="applet" data-payload="{&quot;command&quot;: &quot;map&quot;}"></div>
+</section>
+
+<div class="caption">
+ <ul class="map-legend">
+ <li class="edu">Academic</li>
+ <li class="com">Commercial</li>
+ <li class="gov">Military / Government</li>
+ </ul>
+ <div class="source">Citation data is collected using <a href="https://semanticscholar.org" target="_blank">SemanticScholar.org</a> then dataset usage verified and geolocated.</div >
+</div>
+
+
+<section class="applet_container">
+
+ <h3>Dataset Citations</h3>
+ <p>
+ The dataset citations used in the visualizations were collected from <a href="https://www.semanticscholar.org">Semantic Scholar</a>, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms.
+ </p>
+
+ <div class="applet" data-payload="{&quot;command&quot;: &quot;citations&quot;}"></div>
+</section><section>
+
+ <div class="hr-wave-holder">
+ <div class="hr-wave-line hr-wave-line1"></div>
+ <div class="hr-wave-line hr-wave-line2"></div>
+ </div>
+
+ <h2>Supplementary Information</h2>
+
+</section><section><p>Since this site To show the types of face images used in the UCCS student dataset while protecting their individual privacy, a generative adversarial network was used to interpolate between identities in the dataset. The image below shows a generative adversarial network trained on the UCCS face bounding box areas from 16,000 images and over 90,000 face regions.</p>
+</section><section class='images'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/uccs/assets/uccs_pgan_01.jpg' alt=' GAN generated approximations of students in the UCCS dataset. &copy; megapixels.cc 2018'><div class='caption'> GAN generated approximations of students in the UCCS dataset. &copy; megapixels.cc 2018</div></div></section><section><div class='columns columns-2'><div class='column'><h4>UCCS photos taken in 2012</h4>
<table>
<thead><tr>
-<th>Year</th>
-<th>Month</th>
-<th>Day</th>
<th>Date</th>
-<th>Time Range</th>
<th>Photos</th>
</tr>
</thead>
<tbody>
<tr>
-<td>2012</td>
-<td>Februay</td>
-<td>---</td>
-<td>23</td>
-<td>-</td>
+<td>Feb 23, 2012</td>
<td>132</td>
</tr>
<tr>
-<td>2012</td>
-<td>March</td>
-<td>---</td>
-<td>6</td>
-<td>-</td>
-<td>-</td>
+<td>March 6, 2012</td>
+<td>288</td>
</tr>
<tr>
-<td>2012</td>
-<td>March</td>
-<td>---</td>
-<td>8</td>
-<td>-</td>
-<td>-</td>
+<td>March 8, 2012</td>
+<td>506</td>
</tr>
<tr>
-<td>2012</td>
-<td>March</td>
-<td>---</td>
-<td>13</td>
-<td>-</td>
-<td>-</td>
+<td>March 13, 2012</td>
+<td>160</td>
</tr>
<tr>
-<td>2012</td>
-<td>Februay</td>
-<td>---</td>
-<td>23</td>
-<td>-</td>
-<td>132</td>
+<td>March 20, 2012</td>
+<td>1,840</td>
</tr>
<tr>
-<td>2012</td>
-<td>March</td>
-<td>---</td>
-<td>6</td>
-<td>-</td>
-<td>-</td>
+<td>March 22, 2012</td>
+<td>445</td>
</tr>
<tr>
-<td>2012</td>
-<td>March</td>
-<td>---</td>
-<td>8</td>
-<td>-</td>
-<td>-</td>
+<td>April 3, 2012</td>
+<td>1,639</td>
</tr>
<tr>
-<td>2012</td>
-<td>March</td>
-<td>---</td>
-<td>13</td>
-<td>-</td>
-<td>-</td>
+<td>April 12, 2012</td>
+<td>14</td>
</tr>
<tr>
-<td>2012</td>
-<td>Februay</td>
-<td>---</td>
-<td>23</td>
-<td>-</td>
-<td>132</td>
+<td>April 17, 2012</td>
+<td>19</td>
</tr>
<tr>
-<td>2012</td>
-<td>March</td>
-<td>---</td>
-<td>6</td>
-<td>-</td>
-<td>-</td>
+<td>April 24, 2012</td>
+<td>63</td>
</tr>
<tr>
-<td>2012</td>
-<td>March</td>
-<td>---</td>
-<td>8</td>
-<td>-</td>
-<td>-</td>
+<td>April 25, 2012</td>
+<td>11</td>
</tr>
<tr>
-<td>2012</td>
-<td>March</td>
-<td>---</td>
-<td>13</td>
-<td>-</td>
-<td>-</td>
+<td>April 26, 2012</td>
+<td>20</td>
</tr>
-<tr>
-<td>2012</td>
-<td>Februay</td>
-<td>---</td>
-<td>23</td>
-<td>-</td>
-<td>132</td>
+</tbody>
+</table>
+</div><div class='column'><h4>UCCS photos taken in 2013</h4>
+<table>
+<thead><tr>
+<th>Date</th>
+<th>Photos</th>
</tr>
+</thead>
+<tbody>
<tr>
-<td>2012</td>
-<td>March</td>
-<td>---</td>
-<td>6</td>
-<td>-</td>
-<td>-</td>
+<td>Jan 28, 2013</td>
+<td>1,056</td>
</tr>
<tr>
-<td>2012</td>
-<td>March</td>
-<td>---</td>
-<td>8</td>
-<td>-</td>
-<td>-</td>
+<td>Jan 29, 2013</td>
+<td>1,561</td>
</tr>
<tr>
-<td>2012</td>
-<td>March</td>
-<td>---</td>
-<td>13</td>
-<td>-</td>
-<td>-</td>
+<td>Feb 13, 2013</td>
+<td>739</td>
</tr>
<tr>
-<td>2012</td>
-<td>Februay</td>
-<td>---</td>
-<td>23</td>
-<td>-</td>
-<td>132</td>
+<td>Feb 19, 2013</td>
+<td>723</td>
</tr>
<tr>
-<td>2012</td>
-<td>March</td>
-<td>---</td>
-<td>6</td>
-<td>-</td>
-<td>-</td>
+<td>Feb 20, 2013</td>
+<td>965</td>
</tr>
<tr>
-<td>2012</td>
-<td>March</td>
-<td>---</td>
-<td>8</td>
-<td>-</td>
-<td>-</td>
+<td>Feb 26, 2013</td>
+<td>736</td>
</tr>
</tbody>
</table>
-<p>2012-03-20
-2012-03-22
-2012-04-03
-2012-04-12
-2012-04-17
-2012-04-24
-2012-04-25
-2012-04-26
-2013-01-28
-2013-01-29
-2013-02-13
-2013-02-19
-2013-02-20
-2013-02-26</p>
-</section><section class='images'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/uccs/assets/uccs_mean_bboxes_comp.jpg' alt=' The pixel-average of all Uconstrained College Students images is shown with all 51,838 face annotations. (c) Adam Harvey'><div class='caption'> The pixel-average of all Uconstrained College Students images is shown with all 51,838 face annotations. (c) Adam Harvey</div></div></section><section>
-
- <h3>Biometric Trade Routes</h3>
-<!--
- <div class="map-sidebar right-sidebar">
- <h3>Legend</h3>
- <ul>
- <li><span style="color: #f2f293">&#9632;</span> Industry</li>
- <li><span style="color: #f30000">&#9632;</span> Academic</li>
- <li><span style="color: #3264f6">&#9632;</span> Government</li>
- </ul>
- </div>
- -->
- <p>
- To help understand how UCCS has been used around the world for commercial, military and academic research; publicly available research citing UnConstrained College Students Dataset is collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal reserach projects at that location.
- </p>
-
- </section>
-
-<section class="applet_container fullwidth">
- <div class="applet" data-payload="{&quot;command&quot;: &quot;map&quot;}"></div>
-
-</section>
-
-<div class="caption">
- <ul class="map-legend">
- <li class="edu">Academic</li>
- <li class="com">Commercial</li>
- <li class="gov">Military / Government</li>
- <li class="source">Citation data is collected using <a href="https://semanticscholar.org" target="_blank">SemanticScholar.org</a> then dataset usage verified and geolocated.</li>
- </ul>
-</div>
-
-<!-- <section>
- <p class='subp'>
- [section under development] UCCS ... Standardized paragraph of text about the map. Sed ut perspiciatis, unde omnis iste natus error sit voluptatem accusantium doloremque laudantium, totam rem aperiam eaque ipsa, quae ab illo inventore veritatis et quasi architecto beatae vitae dicta sunt, explicabo.
- </p>
-</section>
- --><section>
- <h3>Who used UCCS?</h3>
-
- <p>
- This bar chart presents a ranking of the top countries where dataset citations originated. Mouse over individual columns to see yearly totals. These charts show at most the top 10 countries.
- </p>
-
- </section>
-
-<section class="applet_container">
-<!-- <div style="position: absolute;top: 0px;right: -55px;width: 180px;font-size: 14px;">Labeled Faces in the Wild Dataset<br><span class="numc" style="font-size: 11px;">20 citations</span>
-</div> -->
- <div class="applet" data-payload="{&quot;command&quot;: &quot;chart&quot;}"></div>
-</section><section class="applet_container">
- <div class="applet" data-payload="{&quot;command&quot;: &quot;piechart&quot;}"></div>
-</section><section class="applet_container">
-
- <h3>Dataset Citations</h3>
- <p>
- The dataset citations used in the visualizations were collected from <a href="https://www.semanticscholar.org">Semantic Scholar</a>, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms.
- </p>
-
- <div class="applet" data-payload="{&quot;command&quot;: &quot;citations&quot;}"></div>
-</section><section>
-
- <div class="hr-wave-holder">
- <div class="hr-wave-line hr-wave-line1"></div>
- <div class="hr-wave-line hr-wave-line2"></div>
- </div>
-
- <h3>Supplementary Information</h3>
-
-</section><section><p>The original Sapkota and Boult dataset, from which UCCS is derived, received funding from<sup class="footnote-ref" id="fnref-funding_sb"><a href="#fn-funding_sb">1</a></sup>:</p>
+</div></div></section><section><h3>Location</h3>
+<p>The location of the camera and subjects was confirmed using several visual cues in the dataset images: the unique pattern of the sidewalk that is only used on the UCCS Pedestrian Spine near the West Lawn, the two UCCS sign poles with matching graphics still visible in Google Street View, the no parking sign and directionality of its arrow, the back of street sign next to it, the slight bend in the sidewalk, the presence of cars passing in the background of the image, and the far wall of the parking garage all match images in the dataset. The <a href="https://www.semanticscholar.org/paper/Large-scale-unconstrained-open-set-face-database-Sapkota-Boult/07fcbae86f7a3ad3ea1cf95178459ee9eaf77cb1">original papers</a> also provides another clue: a <a href="https://www.semanticscholar.org/paper/Large-scale-unconstrained-open-set-face-database-Sapkota-Boult/07fcbae86f7a3ad3ea1cf95178459ee9eaf77cb1/figure/1">picture of the camera</a> inside the office that was used to create the dataset. The window view in this image provides another match for the brick pattern on the north facade of the Kraember Family Library and the green metal fence along the sidewalk. View the <a href="https://www.google.com/maps/place/University+of+Colorado+Colorado+Springs/@38.8934297,-104.7992445,27a,35y,258.51h,75.06t/data=!3m1!1e3!4m5!3m4!1s0x87134fa088fe399d:0x92cadf3962c058c4!8m2!3d38.8968312!4d-104.8049528">location on Google Maps</a></p>
+</section><section class='images'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/uccs/assets/uccs_map_3d.jpg' alt=' 3D view showing the angle of view of the surveillance camera used for UCCS dataset. Image: Google Maps'><div class='caption'> 3D view showing the angle of view of the surveillance camera used for UCCS dataset. Image: Google Maps</div></div></section><section><h3>Funding</h3>
+<p>The UnConstrained College Students dataset is associated with two main research papers: "Large Scale Unconstrained Open Set Face Database" and "Unconstrained Face Detection and Open-Set Face Recognition Challenge". Collectively, these papers and the creation of the dataset have received funding from the following organizations:</p>
<ul>
<li>ONR (Office of Naval Research) MURI (The Department of Defense Multidisciplinary University Research Initiative) grant N00014-08-1-0638</li>
<li>Army SBIR (Small Business Innovation Research) grant W15P7T-12-C-A210</li>
<li>SOCOM (Special Operations Command) SBIR (Small Business Innovation Research) grant H92222-07-P-0020</li>
-</ul>
-<p>The more recent UCCS version of the dataset received funding from <sup class="footnote-ref" id="fnref-funding_uccs"><a href="#fn-funding_uccs">2</a></sup>:</p>
-<ul>
<li>National Science Foundation Grant IIS-1320956</li>
<li>ODNI (Office of Director of National Intelligence)</li>
<li>IARPA (Intelligence Advance Research Projects Activity) R&amp;D contract 2014-14071600012</li>
</ul>
-<h3>TODO</h3>
+<h3>Opting Out</h3>
+<p>If you attended University of Colorado Colorado Springs and were captured by the long range surveillance camera used to create this dataset, there is unfortunately currently no way to be removed. The authors do not provide any options for students to opt-out nor were students informed they would be used for training face recognition. According to the authors, the lack of any consent or knowledge of participation is what provides part of the value of Unconstrained College Students Dataset.</p>
+<h3>Ethics</h3>
<ul>
-<li>add tabulator module for dates</li>
-<li>parse dates into CSV using Python</li>
-<li>get google image showing line of sight?</li>
-<li>fix up quote/citations</li>
+<li>Please direct any questions about the ethics of the dataset to the University of Colorado Colorado Springs <a href="https://www.uccs.edu/compliance/">Ethics and Compliance Office</a></li>
+<li>For further technical information about the UnConstrained College Students dataset, visit the <a href="https://vast.uccs.edu/Opensetface">UCCS dataset project page</a>. </li>
</ul>
-<h3>footnotes</h3>
-<div class="footnotes">
-<hr>
-<ol><li id="fn-funding_sb"><p>Sapkota, Archana and Boult, Terrance. "Large Scale Unconstrained Open Set Face Database." 2013.<a href="#fnref-funding_sb" class="footnote">&#8617;</a></p></li>
-<li id="fn-funding_uccs"><p>Günther, M. et. al. "Unconstrained Face Detection and Open-Set Face Recognition Challenge," 2018. Arxiv 1708.02337v3.<a href="#fnref-funding_uccs" class="footnote">&#8617;</a></p></li>
-</ol>
-</div>
-</section>
+<h3>Downloads</h3>
+<ul>
+<li>Download EXIF data for UCCS photos: <a href="https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/uccs/assets/uccs_camera_exif.csv">uccs_camera_exif.csv</a></li>
+</ul>
+</section><section>
+
+ <h4>Cite Our Work</h4>
+ <p>
+
+ If you use our data, research, or graphics please cite our work:
+
+<pre id="cite-bibtex">
+@online{megapixels,
+ author = {Harvey, Adam. LaPlace, Jules.},
+ title = {MegaPixels: Origins, Ethics, and Privacy Implications of Publicly Available Face Recognition Image Datasets},
+ year = 2019,
+ url = {https://megapixels.cc/},
+ urldate = {2019-04-18}
+}</pre>
+
+ </p>
+</section><section><h3>References</h3><section><ul class="footnotes"><li><a name="[^uccs_vast]" class="footnote_shim"></a><span class="backlinks"><a href="#[^uccs_vast]_1">a</a></span><p>"2nd Unconstrained Face Detection and Open Set Recognition Challenge." <a href="https://vast.uccs.edu/Opensetface/">https://vast.uccs.edu/Opensetface/</a>. Accessed April 15, 2019.</p>
+</li><li><a name="[^sapkota_boult]" class="footnote_shim"></a><span class="backlinks"><a href="#[^sapkota_boult]_1">a</a><a href="#[^sapkota_boult]_2">b</a></span><p>Sapkota, Archana and Boult, Terrance. "Large Scale Unconstrained Open Set Face Database." 2013.</p>
+</li><li><a name="[^funding_uccs]" class="footnote_shim"></a><span class="backlinks"><a href="#[^funding_uccs]_1">a</a></span><p>Günther, M. et. al. "Unconstrained Face Detection and Open-Set Face Recognition Challenge," 2018. Arxiv 1708.02337v3.</p>
+</li><li><a name="[^surv_face_qmul]" class="footnote_shim"></a><span class="backlinks"><a href="#[^surv_face_qmul]_1">a</a></span><p>"Surveillance Face Recognition Challenge". <a href="https://www.semanticscholar.org/paper/Surveillance-Face-Recognition-Challenge-Cheng-Zhu/2306b2a8fba28539306052764a77a0d0f5d1236a">SemanticScholar</a></p>
+</li></ul></section></section>
</div>
<footer>
- <div>
- <a href="/">MegaPixels.cc</a>
- <a href="/about/disclaimer/">Disclaimer</a>
- <a href="/about/terms/">Terms of Use</a>
- <a href="/about/privacy/">Privacy</a>
- <a href="/about/">About</a>
- <a href="/about/team/">Team</a>
- </div>
- <div>
- MegaPixels &copy;2017-19 Adam R. Harvey /&nbsp;
- <a href="https://ahprojects.com">ahprojects.com</a>
- </div>
+ <ul class="footer-left">
+ <li><a href="/">MegaPixels.cc</a></li>
+ <li><a href="/datasets/">Datasets</a></li>
+ <li><a href="/about/">About</a></li>
+ <li><a href="/about/press/">Press</a></li>
+ <li><a href="/about/legal/">Legal and Privacy</a></li>
+ </ul>
+ <ul class="footer-right">
+ <li>MegaPixels &copy;2017-19 &nbsp;<a href="https://ahprojects.com">Adam R. Harvey</a></li>
+ <li>Made with support from &nbsp;<a href="https://mozilla.org">Mozilla</a></li>
+ </ul>
</footer>
</body>
diff --git a/site/public/datasets/vgg_face2/index.html b/site/public/datasets/vgg_face2/index.html
index d5c1d98c..24ce4b2d 100644
--- a/site/public/datasets/vgg_face2/index.html
+++ b/site/public/datasets/vgg_face2/index.html
@@ -17,7 +17,7 @@
<a class='slogan' href="/">
<div class='logo'></div>
<div class='site_name'>MegaPixels</div>
-
+ <div class='splash'>Brainwash Dataset</div>
</a>
<div class='links'>
<a href="/datasets/">Datasets</a>
@@ -26,13 +26,83 @@
</header>
<div class="content content-">
- <section><h1>VGG Face 2</h1>
-</section><section><div class='meta'><div><div class='gray'>Years</div><div>TBD</div></div><div><div class='gray'>Images</div><div>TBD</div></div><div><div class='gray'>Identities</div><div>TBD</div></div><div><div class='gray'>Origin</div><div>TBD</div></div><div><div class='gray'>Funding</div><div>IARPA</div></div></div><section><section class='fullwidth'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/vgg_face2/assets/vgg_face2_index.gif' alt='...'><div class='caption'>...</div></div></section><section><h3>Analysis</h3>
+ <section><h2>VGG Face 2</h2>
+</section><section><div class='right-sidebar'><div class='meta'>
+ <div class='gray'>Published</div>
+ <div>2015</div>
+ </div><div class='meta'>
+ <div class='gray'>Images</div>
+ <div>11,917 </div>
+ </div><div class='meta'>
+ <div class='gray'>Purpose</div>
+ <div>Head detection</div>
+ </div><div class='meta'>
+ <div class='gray'>Created by</div>
+ <div>Stanford University (US), Max Planck Institute for Informatics (DE)</div>
+ </div><div class='meta'>
+ <div class='gray'>Funded by</div>
+ <div>Max Planck Center for Visual Computing and Communication</div>
+ </div><div class='meta'>
+ <div class='gray'>Download Size</div>
+ <div>4.1 GB</div>
+ </div><div class='meta'>
+ <div class='gray'>Website</div>
+ <div><a href='https://purl.stanford.edu/sx925dc9385' target='_blank' rel='nofollow noopener'>stanford.edu</a></div>
+ </div></div><p>[ page under development ]</p>
+</section><section>
+ <h3>Who used Brainwash Dataset?</h3>
+
+ <p>
+ This bar chart presents a ranking of the top countries where dataset citations originated. Mouse over individual columns to see yearly totals. These charts show at most the top 10 countries.
+ </p>
+
+ </section>
+
+<section class="applet_container">
+<!-- <div style="position: absolute;top: 0px;right: -55px;width: 180px;font-size: 14px;">Labeled Faces in the Wild Dataset<br><span class="numc" style="font-size: 11px;">20 citations</span>
+</div> -->
+ <div class="applet" data-payload="{&quot;command&quot;: &quot;chart&quot;}"></div>
+</section>
+
+<section class="applet_container">
+ <div class="applet" data-payload="{&quot;command&quot;: &quot;piechart&quot;}"></div>
+</section>
+
+<section>
+
+ <h3>Biometric Trade Routes</h3>
+
+ <p>
+ To help understand how Brainwash Dataset has been used around the world by commercial, military, and academic organizations; existing publicly available research citing Brainwash Dataset was collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal research projects at that location.
+ </p>
+
+ </section>
+
+<section class="applet_container fullwidth">
+ <div class="applet" data-payload="{&quot;command&quot;: &quot;map&quot;}"></div>
+</section>
+
+<div class="caption">
+ <ul class="map-legend">
+ <li class="edu">Academic</li>
+ <li class="com">Commercial</li>
+ <li class="gov">Military / Government</li>
+ </ul>
+ <div class="source">Citation data is collected using <a href="https://semanticscholar.org" target="_blank">SemanticScholar.org</a> then dataset usage verified and geolocated.</div >
+</div>
+
+
+<section class="applet_container">
+
+ <h3>Dataset Citations</h3>
+ <p>
+ The dataset citations used in the visualizations were collected from <a href="https://www.semanticscholar.org">Semantic Scholar</a>, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms.
+ </p>
+
+ <div class="applet" data-payload="{&quot;command&quot;: &quot;citations&quot;}"></div>
+</section><section><h3>(ignore) research notes</h3>
<ul>
<li>The VGG Face 2 dataset includes approximately 1,331 actresses, 139 presidents, 16 wives, 3 husbands, 2 snooker player, and 1 guru</li>
-</ul>
-<h3>Names and descriptions</h3>
-<ul>
<li>The original VGGF2 name list has been updated with the results returned from Google Knowledge</li>
<li>Names with a similarity score greater than 0.75 where automatically updated. Scores computed using <code>import difflib; seq = difflib.SequenceMatcher(a=a.lower(), b=b.lower()); score = seq.ratio()</code></li>
<li>The 97 names with a score of 0.75 or lower were manually reviewed and includes name changes validating using Wikipedia.org results for names such as "Bruce Jenner" to "Caitlyn Jenner", spousal last-name changes, and discretionary changes to improve search results such as combining nicknames with full name when appropriate, for example changing "Aleksandar Petrović" to "Aleksandar 'Aco' Petrović" and minor changes such as "Mohammad Ali" to "Muhammad Ali"</li>
@@ -54,18 +124,17 @@
</div>
<footer>
- <div>
- <a href="/">MegaPixels.cc</a>
- <a href="/about/disclaimer/">Disclaimer</a>
- <a href="/about/terms/">Terms of Use</a>
- <a href="/about/privacy/">Privacy</a>
- <a href="/about/">About</a>
- <a href="/about/team/">Team</a>
- </div>
- <div>
- MegaPixels &copy;2017-19 Adam R. Harvey /&nbsp;
- <a href="https://ahprojects.com">ahprojects.com</a>
- </div>
+ <ul class="footer-left">
+ <li><a href="/">MegaPixels.cc</a></li>
+ <li><a href="/datasets/">Datasets</a></li>
+ <li><a href="/about/">About</a></li>
+ <li><a href="/about/press/">Press</a></li>
+ <li><a href="/about/legal/">Legal and Privacy</a></li>
+ </ul>
+ <ul class="footer-right">
+ <li>MegaPixels &copy;2017-19 &nbsp;<a href="https://ahprojects.com">Adam R. Harvey</a></li>
+ <li>Made with support from &nbsp;<a href="https://mozilla.org">Mozilla</a></li>
+ </ul>
</footer>
</body>
diff --git a/site/public/datasets/viper/index.html b/site/public/datasets/viper/index.html
index 6d27b15b..e4b2a05a 100644
--- a/site/public/datasets/viper/index.html
+++ b/site/public/datasets/viper/index.html
@@ -27,7 +27,8 @@
<div class="content content-dataset">
<section class='intro_section' style='background-image: url(https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/viper/assets/background.jpg)'><div class='inner'><div class='hero_desc'><span class='bgpad'><span class="dataset-name">VIPeR</span> is a person re-identification dataset of images captured at UC Santa Cruz in 2007</span></div><div class='hero_subdesc'><span class='bgpad'>VIPeR contains 1,264 images and 632 persons on the UC Santa Cruz campus and is used to train person re-identification algorithms for surveillance
-</span></div></div></section><section><div class='left-sidebar'><div class='meta'>
+</span></div></div></section><section><h2>VIPeR Dataset</h2>
+</section><section><div class='right-sidebar'><div class='meta'>
<div class='gray'>Published</div>
<div>2007</div>
</div><div class='meta'>
@@ -45,8 +46,7 @@
</div><div class='meta'>
<div class='gray'>Website</div>
<div><a href='https://vision.soe.ucsc.edu/node/178' target='_blank' rel='nofollow noopener'>ucsc.edu</a></div>
- </div><div class='meta'><div><div class='gray'>Published</div><div>2007</div></div><div><div class='gray'>Images</div><div>1,264</div></div><div><div class='gray'>Persons</div><div>632</div></div><div><div class='gray'>Created by</div><div>UC Santa Cruz</div></div></div></div><h2>VIPeR Dataset</h2>
-<p>(PAGE UNDER DEVELOPMENT)</p>
+ </div></div><p>[ page under development ]</p>
<p><em>VIPeR (Viewpoint Invariant Pedestrian Recognition)</em> is a dataset of pedestrian images captured at University of California Santa Cruz in 2007. Accoriding to the reserachers 2 "cameras were placed in different locations in an academic setting and subjects were notified of the presence of cameras, but were not coached or instructed in any way."</p>
<p>VIPeR is amongst the most widely used publicly available person re-identification datasets. In 2017 the VIPeR dataset was combined into a larger person re-identification created by the Chinese University of Hong Kong called PETA (PEdesTrian Attribute).</p>
</section><section>
@@ -62,30 +62,24 @@
<!-- <div style="position: absolute;top: 0px;right: -55px;width: 180px;font-size: 14px;">Labeled Faces in the Wild Dataset<br><span class="numc" style="font-size: 11px;">20 citations</span>
</div> -->
<div class="applet" data-payload="{&quot;command&quot;: &quot;chart&quot;}"></div>
-</section><section class="applet_container">
+</section>
+
+<section class="applet_container">
<div class="applet" data-payload="{&quot;command&quot;: &quot;piechart&quot;}"></div>
-</section><section>
+</section>
+
+<section>
<h3>Biometric Trade Routes</h3>
-<!--
- <div class="map-sidebar right-sidebar">
- <h3>Legend</h3>
- <ul>
- <li><span style="color: #f2f293">&#9632;</span> Industry</li>
- <li><span style="color: #f30000">&#9632;</span> Academic</li>
- <li><span style="color: #3264f6">&#9632;</span> Government</li>
- </ul>
- </div>
- -->
+
<p>
- To help understand how VIPeR has been used around the world for commercial, military and academic research; publicly available research citing Viewpoint Invariant Pedestrian Recognition is collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal reserach projects at that location.
+ To help understand how VIPeR has been used around the world by commercial, military, and academic organizations; existing publicly available research citing Viewpoint Invariant Pedestrian Recognition was collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal research projects at that location.
</p>
</section>
<section class="applet_container fullwidth">
<div class="applet" data-payload="{&quot;command&quot;: &quot;map&quot;}"></div>
-
</section>
<div class="caption">
@@ -93,25 +87,12 @@
<li class="edu">Academic</li>
<li class="com">Commercial</li>
<li class="gov">Military / Government</li>
- <li class="source">Citation data is collected using <a href="https://semanticscholar.org" target="_blank">SemanticScholar.org</a> then dataset usage verified and geolocated.</li>
</ul>
+ <div class="source">Citation data is collected using <a href="https://semanticscholar.org" target="_blank">SemanticScholar.org</a> then dataset usage verified and geolocated.</div >
</div>
-<!-- <section>
- <p class='subp'>
- [section under development] VIPeR ... Standardized paragraph of text about the map. Sed ut perspiciatis, unde omnis iste natus error sit voluptatem accusantium doloremque laudantium, totam rem aperiam eaque ipsa, quae ab illo inventore veritatis et quasi architecto beatae vitae dicta sunt, explicabo.
- </p>
-</section>
- --><section>
-
- <div class="hr-wave-holder">
- <div class="hr-wave-line hr-wave-line1"></div>
- <div class="hr-wave-line hr-wave-line2"></div>
- </div>
- <h3>Supplementary Information</h3>
-
-</section><section class="applet_container">
+<section class="applet_container">
<h3>Dataset Citations</h3>
<p>
@@ -123,18 +104,17 @@
</div>
<footer>
- <div>
- <a href="/">MegaPixels.cc</a>
- <a href="/about/disclaimer/">Disclaimer</a>
- <a href="/about/terms/">Terms of Use</a>
- <a href="/about/privacy/">Privacy</a>
- <a href="/about/">About</a>
- <a href="/about/team/">Team</a>
- </div>
- <div>
- MegaPixels &copy;2017-19 Adam R. Harvey /&nbsp;
- <a href="https://ahprojects.com">ahprojects.com</a>
- </div>
+ <ul class="footer-left">
+ <li><a href="/">MegaPixels.cc</a></li>
+ <li><a href="/datasets/">Datasets</a></li>
+ <li><a href="/about/">About</a></li>
+ <li><a href="/about/press/">Press</a></li>
+ <li><a href="/about/legal/">Legal and Privacy</a></li>
+ </ul>
+ <ul class="footer-right">
+ <li>MegaPixels &copy;2017-19 &nbsp;<a href="https://ahprojects.com">Adam R. Harvey</a></li>
+ <li>Made with support from &nbsp;<a href="https://mozilla.org">Mozilla</a></li>
+ </ul>
</footer>
</body>
diff --git a/site/public/datasets/youtube_celebrities/index.html b/site/public/datasets/youtube_celebrities/index.html
index ee11f3c0..e90b45cb 100644
--- a/site/public/datasets/youtube_celebrities/index.html
+++ b/site/public/datasets/youtube_celebrities/index.html
@@ -26,18 +26,68 @@
</header>
<div class="content content-">
- <section><h1>YouTube Celebrities</h1>
-</section><section><div class='meta'><div><div class='gray'>Years</div><div>TBD</div></div><div><div class='gray'>Images</div><div>TBD</div></div><div><div class='gray'>Identities</div><div>TBD</div></div><div><div class='gray'>Origin</div><div>YouTube.com</div></div><div><div class='gray'>Funded by</div><div>CIA, US Army</div></div></div><section><section><p>TODO</p>
-<p>RESEARCH below these lines</p>
-<blockquote><p>Selected dataset sequences: (a) MBGC, (b) CMU MoBo, (c) First
-Honda/UCSD, and (d) YouTube Celebrities.
-This research is supported by the Central Intelligence Agency, the Biometrics
+ <section><h2>YouTube Celebrities</h2>
+</section><section><div class='right-sidebar'></div><p>[ page under development ]</p>
+</section><section>
+ <h3>Who used YouTube Celebrities?</h3>
+
+ <p>
+ This bar chart presents a ranking of the top countries where dataset citations originated. Mouse over individual columns to see yearly totals. These charts show at most the top 10 countries.
+ </p>
+
+ </section>
+
+<section class="applet_container">
+<!-- <div style="position: absolute;top: 0px;right: -55px;width: 180px;font-size: 14px;">Labeled Faces in the Wild Dataset<br><span class="numc" style="font-size: 11px;">20 citations</span>
+</div> -->
+ <div class="applet" data-payload="{&quot;command&quot;: &quot;chart&quot;}"></div>
+</section>
+
+<section class="applet_container">
+ <div class="applet" data-payload="{&quot;command&quot;: &quot;piechart&quot;}"></div>
+</section>
+
+<section>
+
+ <h3>Biometric Trade Routes</h3>
+
+ <p>
+ To help understand how YouTube Celebrities has been used around the world by commercial, military, and academic organizations; existing publicly available research citing YouTube Celebrities was collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal research projects at that location.
+ </p>
+
+ </section>
+
+<section class="applet_container fullwidth">
+ <div class="applet" data-payload="{&quot;command&quot;: &quot;map&quot;}"></div>
+</section>
+
+<div class="caption">
+ <ul class="map-legend">
+ <li class="edu">Academic</li>
+ <li class="com">Commercial</li>
+ <li class="gov">Military / Government</li>
+ </ul>
+ <div class="source">Citation data is collected using <a href="https://semanticscholar.org" target="_blank">SemanticScholar.org</a> then dataset usage verified and geolocated.</div >
+</div>
+
+
+<section class="applet_container">
+
+ <h3>Dataset Citations</h3>
+ <p>
+ The dataset citations used in the visualizations were collected from <a href="https://www.semanticscholar.org">Semantic Scholar</a>, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms.
+ </p>
+
+ <div class="applet" data-payload="{&quot;command&quot;: &quot;citations&quot;}"></div>
+</section><section><h4>Notes...</h4>
+<ul>
+<li>Selected dataset sequences: (a) MBGC, (b) CMU MoBo, (c) First
+Honda/UCSD, and (d) YouTube Celebrities.</li>
+<li>This research is supported by the Central Intelligence Agency, the Biometrics
Task Force and the Technical Support Working Group through US Army contract
W91CRB-08-C-0093. The opinions, (cid:12)ndings, and conclusions or recommendations
expressed in this publication are those of the authors and do not necessarily re(cid:13)ect
-the views of our sponsors.</p>
-</blockquote>
-<ul>
+the views of our sponsors.</li>
<li>in "Face Recognition From Video Draft 17"</li>
<li>International Journal of Pattern Recognition and Artifcial Intelligence WorldScientific Publishing Company</li>
</ul>
@@ -45,18 +95,17 @@ the views of our sponsors.</p>
</div>
<footer>
- <div>
- <a href="/">MegaPixels.cc</a>
- <a href="/about/disclaimer/">Disclaimer</a>
- <a href="/about/terms/">Terms of Use</a>
- <a href="/about/privacy/">Privacy</a>
- <a href="/about/">About</a>
- <a href="/about/team/">Team</a>
- </div>
- <div>
- MegaPixels &copy;2017-19 Adam R. Harvey /&nbsp;
- <a href="https://ahprojects.com">ahprojects.com</a>
- </div>
+ <ul class="footer-left">
+ <li><a href="/">MegaPixels.cc</a></li>
+ <li><a href="/datasets/">Datasets</a></li>
+ <li><a href="/about/">About</a></li>
+ <li><a href="/about/press/">Press</a></li>
+ <li><a href="/about/legal/">Legal and Privacy</a></li>
+ </ul>
+ <ul class="footer-right">
+ <li>MegaPixels &copy;2017-19 &nbsp;<a href="https://ahprojects.com">Adam R. Harvey</a></li>
+ <li>Made with support from &nbsp;<a href="https://mozilla.org">Mozilla</a></li>
+ </ul>
</footer>
</body>
diff --git a/site/public/index.html b/site/public/index.html
index 11ff7279..118814be 100644
--- a/site/public/index.html
+++ b/site/public/index.html
@@ -27,7 +27,7 @@
</div>
<footer>
<div>
- MegaPixels is an art and research project by Adam Harvey about the origins and ethics of facial analysis datasets, developed in partnership with Mozilla.
+ MegaPixels is a research project by Adam Harvey about facial recognition datasets, developed in partnership with Mozilla.
</div>
<div>
MegaPixels &copy;2017-19 Adam R. Harvey /&nbsp;
diff --git a/site/public/info/index.html b/site/public/info/index.html
index eb78b260..7e7ecf80 100644
--- a/site/public/info/index.html
+++ b/site/public/info/index.html
@@ -32,18 +32,17 @@
</div>
<footer>
- <div>
- <a href="/">MegaPixels.cc</a>
- <a href="/about/disclaimer/">Disclaimer</a>
- <a href="/about/terms/">Terms of Use</a>
- <a href="/about/privacy/">Privacy</a>
- <a href="/about/">About</a>
- <a href="/about/team/">Team</a>
- </div>
- <div>
- MegaPixels &copy;2017-19 Adam R. Harvey /&nbsp;
- <a href="https://ahprojects.com">ahprojects.com</a>
- </div>
+ <ul class="footer-left">
+ <li><a href="/">MegaPixels.cc</a></li>
+ <li><a href="/datasets/">Datasets</a></li>
+ <li><a href="/about/">About</a></li>
+ <li><a href="/about/press/">Press</a></li>
+ <li><a href="/about/legal/">Legal and Privacy</a></li>
+ </ul>
+ <ul class="footer-right">
+ <li>MegaPixels &copy;2017-19 &nbsp;<a href="https://ahprojects.com">Adam R. Harvey</a></li>
+ <li>Made with support from &nbsp;<a href="https://mozilla.org">Mozilla</a></li>
+ </ul>
</footer>
</body>
diff --git a/site/public/research/00_introduction/index.html b/site/public/research/00_introduction/index.html
index 43f95d2a..535958cc 100644
--- a/site/public/research/00_introduction/index.html
+++ b/site/public/research/00_introduction/index.html
@@ -41,7 +41,12 @@
</div>
</section>
- <section><div class='meta'><div><div class='gray'>Posted</div><div>Dec. 15</div></div><div><div class='gray'>Author</div><div>Adam Harvey</div></div></div><section><section><h3>Motivation</h3>
+ <section><div class='meta'><div><div class='gray'>Posted</div><div>Dec. 15</div></div><div><div class='gray'>Author</div><div>Adam Harvey</div></div></div><section><section><p>Facial recognition is a scam.</p>
+<p>During the last 20 years commericial, academic, and governmental agencies have promoted the false dream of a future with face recognition. This essay debunks the popular myth that such a thing ever existed.</p>
+<p>There is no such thing as <em>face recognition</em>. For the last 20 years, government agencies, commercial organizations, and academic institutions have played the public as a fool, selling a roadmap of the future that simply does not exist. Facial recognition, as it is currently defined, promoted, and sold to the public, government, and commercial sector is a scam.</p>
+<p>Committed to developing robust solutions with superhuman accuracy, the industry has repeatedly undermined itself by never actually developing anything close to "face recognition".</p>
+<p>There is only biased feature vector clustering and probabilistic thresholding.</p>
+<h3>Motivation</h3>
<p>Ever since government agencies began developing face recognition in the early 1960's, datasets of face images have always been central to developing and validating face recognition technologies. Today, these datasets no longer originate in labs, but instead from family photo albums posted on photo sharing sites, surveillance camera footage from college campuses, search engine queries for celebrities, cafe livestreams, or <a href="https://www.theverge.com/2017/8/22/16180080/transgender-youtubers-ai-facial-recognition-dataset">videos on YouTube</a>.</p>
<p>During the last year, hundreds of these facial analysis datasets created "in the wild" have been collected to understand how they contribute to a global supply chain of biometric data that is powering the global facial recognition industry.</p>
<p>While many of these datasets include public figures such as politicians, athletes, and actors; they also include many non-public figures: digital activists, students, pedestrians, and semi-private shared photo albums are all considered "in the wild" and fair game for research projects. Some images are used with creative commons licenses, yet others were taken in unconstrained scenarios without awareness or consent. At first glance it appears many of the datasets were created for seemingly harmless academic research, but when examined further it becomes clear that they're also used by foreign defense agencies.</p>
@@ -78,18 +83,17 @@
</div>
<footer>
- <div>
- <a href="/">MegaPixels.cc</a>
- <a href="/about/disclaimer/">Disclaimer</a>
- <a href="/about/terms/">Terms of Use</a>
- <a href="/about/privacy/">Privacy</a>
- <a href="/about/">About</a>
- <a href="/about/team/">Team</a>
- </div>
- <div>
- MegaPixels &copy;2017-19 Adam R. Harvey /&nbsp;
- <a href="https://ahprojects.com">ahprojects.com</a>
- </div>
+ <ul class="footer-left">
+ <li><a href="/">MegaPixels.cc</a></li>
+ <li><a href="/datasets/">Datasets</a></li>
+ <li><a href="/about/">About</a></li>
+ <li><a href="/about/press/">Press</a></li>
+ <li><a href="/about/legal/">Legal and Privacy</a></li>
+ </ul>
+ <ul class="footer-right">
+ <li>MegaPixels &copy;2017-19 &nbsp;<a href="https://ahprojects.com">Adam R. Harvey</a></li>
+ <li>Made with support from &nbsp;<a href="https://mozilla.org">Mozilla</a></li>
+ </ul>
</footer>
</body>
diff --git a/site/public/research/01_from_1_to_100_pixels/index.html b/site/public/research/01_from_1_to_100_pixels/index.html
index ea296960..fe49e998 100644
--- a/site/public/research/01_from_1_to_100_pixels/index.html
+++ b/site/public/research/01_from_1_to_100_pixels/index.html
@@ -121,18 +121,17 @@ relying on FaceID and TouchID to protect their information agree to a</p>
</div>
<footer>
- <div>
- <a href="/">MegaPixels.cc</a>
- <a href="/about/disclaimer/">Disclaimer</a>
- <a href="/about/terms/">Terms of Use</a>
- <a href="/about/privacy/">Privacy</a>
- <a href="/about/">About</a>
- <a href="/about/team/">Team</a>
- </div>
- <div>
- MegaPixels &copy;2017-19 Adam R. Harvey /&nbsp;
- <a href="https://ahprojects.com">ahprojects.com</a>
- </div>
+ <ul class="footer-left">
+ <li><a href="/">MegaPixels.cc</a></li>
+ <li><a href="/datasets/">Datasets</a></li>
+ <li><a href="/about/">About</a></li>
+ <li><a href="/about/press/">Press</a></li>
+ <li><a href="/about/legal/">Legal and Privacy</a></li>
+ </ul>
+ <ul class="footer-right">
+ <li>MegaPixels &copy;2017-19 &nbsp;<a href="https://ahprojects.com">Adam R. Harvey</a></li>
+ <li>Made with support from &nbsp;<a href="https://mozilla.org">Mozilla</a></li>
+ </ul>
</footer>
</body>
diff --git a/site/public/research/02_what_computers_can_see/index.html b/site/public/research/02_what_computers_can_see/index.html
index 23641328..d139e83e 100644
--- a/site/public/research/02_what_computers_can_see/index.html
+++ b/site/public/research/02_what_computers_can_see/index.html
@@ -292,18 +292,17 @@ Head top</p>
</div>
<footer>
- <div>
- <a href="/">MegaPixels.cc</a>
- <a href="/about/disclaimer/">Disclaimer</a>
- <a href="/about/terms/">Terms of Use</a>
- <a href="/about/privacy/">Privacy</a>
- <a href="/about/">About</a>
- <a href="/about/team/">Team</a>
- </div>
- <div>
- MegaPixels &copy;2017-19 Adam R. Harvey /&nbsp;
- <a href="https://ahprojects.com">ahprojects.com</a>
- </div>
+ <ul class="footer-left">
+ <li><a href="/">MegaPixels.cc</a></li>
+ <li><a href="/datasets/">Datasets</a></li>
+ <li><a href="/about/">About</a></li>
+ <li><a href="/about/press/">Press</a></li>
+ <li><a href="/about/legal/">Legal and Privacy</a></li>
+ </ul>
+ <ul class="footer-right">
+ <li>MegaPixels &copy;2017-19 &nbsp;<a href="https://ahprojects.com">Adam R. Harvey</a></li>
+ <li>Made with support from &nbsp;<a href="https://mozilla.org">Mozilla</a></li>
+ </ul>
</footer>
</body>
diff --git a/site/public/research/index.html b/site/public/research/index.html
index d3ed6ef3..0386fa99 100644
--- a/site/public/research/index.html
+++ b/site/public/research/index.html
@@ -31,18 +31,17 @@
</div>
<footer>
- <div>
- <a href="/">MegaPixels.cc</a>
- <a href="/about/disclaimer/">Disclaimer</a>
- <a href="/about/terms/">Terms of Use</a>
- <a href="/about/privacy/">Privacy</a>
- <a href="/about/">About</a>
- <a href="/about/team/">Team</a>
- </div>
- <div>
- MegaPixels &copy;2017-19 Adam R. Harvey /&nbsp;
- <a href="https://ahprojects.com">ahprojects.com</a>
- </div>
+ <ul class="footer-left">
+ <li><a href="/">MegaPixels.cc</a></li>
+ <li><a href="/datasets/">Datasets</a></li>
+ <li><a href="/about/">About</a></li>
+ <li><a href="/about/press/">Press</a></li>
+ <li><a href="/about/legal/">Legal and Privacy</a></li>
+ </ul>
+ <ul class="footer-right">
+ <li>MegaPixels &copy;2017-19 &nbsp;<a href="https://ahprojects.com">Adam R. Harvey</a></li>
+ <li>Made with support from &nbsp;<a href="https://mozilla.org">Mozilla</a></li>
+ </ul>
</footer>
</body>
diff --git a/site/public/test/chart/index.html b/site/public/test/chart/index.html
index 53f41d6a..05081cf5 100644
--- a/site/public/test/chart/index.html
+++ b/site/public/test/chart/index.html
@@ -32,18 +32,17 @@
</div>
<footer>
- <div>
- <a href="/">MegaPixels.cc</a>
- <a href="/about/disclaimer/">Disclaimer</a>
- <a href="/about/terms/">Terms of Use</a>
- <a href="/about/privacy/">Privacy</a>
- <a href="/about/">About</a>
- <a href="/about/team/">Team</a>
- </div>
- <div>
- MegaPixels &copy;2017-19 Adam R. Harvey /&nbsp;
- <a href="https://ahprojects.com">ahprojects.com</a>
- </div>
+ <ul class="footer-left">
+ <li><a href="/">MegaPixels.cc</a></li>
+ <li><a href="/datasets/">Datasets</a></li>
+ <li><a href="/about/">About</a></li>
+ <li><a href="/about/press/">Press</a></li>
+ <li><a href="/about/legal/">Legal and Privacy</a></li>
+ </ul>
+ <ul class="footer-right">
+ <li>MegaPixels &copy;2017-19 &nbsp;<a href="https://ahprojects.com">Adam R. Harvey</a></li>
+ <li>Made with support from &nbsp;<a href="https://mozilla.org">Mozilla</a></li>
+ </ul>
</footer>
</body>
diff --git a/site/public/test/citations/index.html b/site/public/test/citations/index.html
index e7140177..36021752 100644
--- a/site/public/test/citations/index.html
+++ b/site/public/test/citations/index.html
@@ -32,18 +32,17 @@
</div>
<footer>
- <div>
- <a href="/">MegaPixels.cc</a>
- <a href="/about/disclaimer/">Disclaimer</a>
- <a href="/about/terms/">Terms of Use</a>
- <a href="/about/privacy/">Privacy</a>
- <a href="/about/">About</a>
- <a href="/about/team/">Team</a>
- </div>
- <div>
- MegaPixels &copy;2017-19 Adam R. Harvey /&nbsp;
- <a href="https://ahprojects.com">ahprojects.com</a>
- </div>
+ <ul class="footer-left">
+ <li><a href="/">MegaPixels.cc</a></li>
+ <li><a href="/datasets/">Datasets</a></li>
+ <li><a href="/about/">About</a></li>
+ <li><a href="/about/press/">Press</a></li>
+ <li><a href="/about/legal/">Legal and Privacy</a></li>
+ </ul>
+ <ul class="footer-right">
+ <li>MegaPixels &copy;2017-19 &nbsp;<a href="https://ahprojects.com">Adam R. Harvey</a></li>
+ <li>Made with support from &nbsp;<a href="https://mozilla.org">Mozilla</a></li>
+ </ul>
</footer>
</body>
diff --git a/site/public/test/csv/index.html b/site/public/test/csv/index.html
index c47bcd57..301ed718 100644
--- a/site/public/test/csv/index.html
+++ b/site/public/test/csv/index.html
@@ -32,18 +32,17 @@
</div>
<footer>
- <div>
- <a href="/">MegaPixels.cc</a>
- <a href="/about/disclaimer/">Disclaimer</a>
- <a href="/about/terms/">Terms of Use</a>
- <a href="/about/privacy/">Privacy</a>
- <a href="/about/">About</a>
- <a href="/about/team/">Team</a>
- </div>
- <div>
- MegaPixels &copy;2017-19 Adam R. Harvey /&nbsp;
- <a href="https://ahprojects.com">ahprojects.com</a>
- </div>
+ <ul class="footer-left">
+ <li><a href="/">MegaPixels.cc</a></li>
+ <li><a href="/datasets/">Datasets</a></li>
+ <li><a href="/about/">About</a></li>
+ <li><a href="/about/press/">Press</a></li>
+ <li><a href="/about/legal/">Legal and Privacy</a></li>
+ </ul>
+ <ul class="footer-right">
+ <li>MegaPixels &copy;2017-19 &nbsp;<a href="https://ahprojects.com">Adam R. Harvey</a></li>
+ <li>Made with support from &nbsp;<a href="https://mozilla.org">Mozilla</a></li>
+ </ul>
</footer>
</body>
diff --git a/site/public/test/datasets/index.html b/site/public/test/datasets/index.html
index 61e4ef86..58555895 100644
--- a/site/public/test/datasets/index.html
+++ b/site/public/test/datasets/index.html
@@ -32,18 +32,17 @@
</div>
<footer>
- <div>
- <a href="/">MegaPixels.cc</a>
- <a href="/about/disclaimer/">Disclaimer</a>
- <a href="/about/terms/">Terms of Use</a>
- <a href="/about/privacy/">Privacy</a>
- <a href="/about/">About</a>
- <a href="/about/team/">Team</a>
- </div>
- <div>
- MegaPixels &copy;2017-19 Adam R. Harvey /&nbsp;
- <a href="https://ahprojects.com">ahprojects.com</a>
- </div>
+ <ul class="footer-left">
+ <li><a href="/">MegaPixels.cc</a></li>
+ <li><a href="/datasets/">Datasets</a></li>
+ <li><a href="/about/">About</a></li>
+ <li><a href="/about/press/">Press</a></li>
+ <li><a href="/about/legal/">Legal and Privacy</a></li>
+ </ul>
+ <ul class="footer-right">
+ <li>MegaPixels &copy;2017-19 &nbsp;<a href="https://ahprojects.com">Adam R. Harvey</a></li>
+ <li>Made with support from &nbsp;<a href="https://mozilla.org">Mozilla</a></li>
+ </ul>
</footer>
</body>
diff --git a/site/public/test/face_search/index.html b/site/public/test/face_search/index.html
index cad7ceec..e2db70df 100644
--- a/site/public/test/face_search/index.html
+++ b/site/public/test/face_search/index.html
@@ -32,18 +32,17 @@
</div>
<footer>
- <div>
- <a href="/">MegaPixels.cc</a>
- <a href="/about/disclaimer/">Disclaimer</a>
- <a href="/about/terms/">Terms of Use</a>
- <a href="/about/privacy/">Privacy</a>
- <a href="/about/">About</a>
- <a href="/about/team/">Team</a>
- </div>
- <div>
- MegaPixels &copy;2017-19 Adam R. Harvey /&nbsp;
- <a href="https://ahprojects.com">ahprojects.com</a>
- </div>
+ <ul class="footer-left">
+ <li><a href="/">MegaPixels.cc</a></li>
+ <li><a href="/datasets/">Datasets</a></li>
+ <li><a href="/about/">About</a></li>
+ <li><a href="/about/press/">Press</a></li>
+ <li><a href="/about/legal/">Legal and Privacy</a></li>
+ </ul>
+ <ul class="footer-right">
+ <li>MegaPixels &copy;2017-19 &nbsp;<a href="https://ahprojects.com">Adam R. Harvey</a></li>
+ <li>Made with support from &nbsp;<a href="https://mozilla.org">Mozilla</a></li>
+ </ul>
</footer>
</body>
diff --git a/site/public/test/gallery/index.html b/site/public/test/gallery/index.html
index 7dbe020b..869c3aaa 100644
--- a/site/public/test/gallery/index.html
+++ b/site/public/test/gallery/index.html
@@ -30,22 +30,37 @@
<h3><a href="/test/">&larr; Back to test index</a></h3>
</section><section class='images'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/site/test/assets/man.jpg' alt='Modal image 1'><div class='caption'>Modal image 1</div></div>
<div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/site/test/assets/man.jpg' alt='Modal image 2'><div class='caption'>Modal image 2</div></div>
-<div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/site/test/assets/man.jpg' alt='Modal image 3'><div class='caption'>Modal image 3</div></div></section>
+<div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/site/test/assets/man.jpg' alt='Modal image 3'><div class='caption'>Modal image 3</div></div></section><section><h2>Test table</h2>
+<table>
+<thead><tr>
+<th>Col1</th>
+<th>Col2</th>
+<th>Col3</th>
+</tr>
+</thead>
+<tbody>
+<tr>
+<td>Content1</td>
+<td>Content2</td>
+<td>Content3</td>
+</tr>
+</tbody>
+</table>
+</section>
</div>
<footer>
- <div>
- <a href="/">MegaPixels.cc</a>
- <a href="/about/disclaimer/">Disclaimer</a>
- <a href="/about/terms/">Terms of Use</a>
- <a href="/about/privacy/">Privacy</a>
- <a href="/about/">About</a>
- <a href="/about/team/">Team</a>
- </div>
- <div>
- MegaPixels &copy;2017-19 Adam R. Harvey /&nbsp;
- <a href="https://ahprojects.com">ahprojects.com</a>
- </div>
+ <ul class="footer-left">
+ <li><a href="/">MegaPixels.cc</a></li>
+ <li><a href="/datasets/">Datasets</a></li>
+ <li><a href="/about/">About</a></li>
+ <li><a href="/about/press/">Press</a></li>
+ <li><a href="/about/legal/">Legal and Privacy</a></li>
+ </ul>
+ <ul class="footer-right">
+ <li>MegaPixels &copy;2017-19 &nbsp;<a href="https://ahprojects.com">Adam R. Harvey</a></li>
+ <li>Made with support from &nbsp;<a href="https://mozilla.org">Mozilla</a></li>
+ </ul>
</footer>
</body>
diff --git a/site/public/test/index.html b/site/public/test/index.html
index 730d10c9..9c15d431 100644
--- a/site/public/test/index.html
+++ b/site/public/test/index.html
@@ -43,18 +43,17 @@
</div>
<footer>
- <div>
- <a href="/">MegaPixels.cc</a>
- <a href="/about/disclaimer/">Disclaimer</a>
- <a href="/about/terms/">Terms of Use</a>
- <a href="/about/privacy/">Privacy</a>
- <a href="/about/">About</a>
- <a href="/about/team/">Team</a>
- </div>
- <div>
- MegaPixels &copy;2017-19 Adam R. Harvey /&nbsp;
- <a href="https://ahprojects.com">ahprojects.com</a>
- </div>
+ <ul class="footer-left">
+ <li><a href="/">MegaPixels.cc</a></li>
+ <li><a href="/datasets/">Datasets</a></li>
+ <li><a href="/about/">About</a></li>
+ <li><a href="/about/press/">Press</a></li>
+ <li><a href="/about/legal/">Legal and Privacy</a></li>
+ </ul>
+ <ul class="footer-right">
+ <li>MegaPixels &copy;2017-19 &nbsp;<a href="https://ahprojects.com">Adam R. Harvey</a></li>
+ <li>Made with support from &nbsp;<a href="https://mozilla.org">Mozilla</a></li>
+ </ul>
</footer>
</body>
diff --git a/site/public/test/map/index.html b/site/public/test/map/index.html
index 19c09314..ba2756ae 100644
--- a/site/public/test/map/index.html
+++ b/site/public/test/map/index.html
@@ -32,18 +32,17 @@
</div>
<footer>
- <div>
- <a href="/">MegaPixels.cc</a>
- <a href="/about/disclaimer/">Disclaimer</a>
- <a href="/about/terms/">Terms of Use</a>
- <a href="/about/privacy/">Privacy</a>
- <a href="/about/">About</a>
- <a href="/about/team/">Team</a>
- </div>
- <div>
- MegaPixels &copy;2017-19 Adam R. Harvey /&nbsp;
- <a href="https://ahprojects.com">ahprojects.com</a>
- </div>
+ <ul class="footer-left">
+ <li><a href="/">MegaPixels.cc</a></li>
+ <li><a href="/datasets/">Datasets</a></li>
+ <li><a href="/about/">About</a></li>
+ <li><a href="/about/press/">Press</a></li>
+ <li><a href="/about/legal/">Legal and Privacy</a></li>
+ </ul>
+ <ul class="footer-right">
+ <li>MegaPixels &copy;2017-19 &nbsp;<a href="https://ahprojects.com">Adam R. Harvey</a></li>
+ <li>Made with support from &nbsp;<a href="https://mozilla.org">Mozilla</a></li>
+ </ul>
</footer>
</body>
diff --git a/site/public/test/name_search/index.html b/site/public/test/name_search/index.html
index f14f6f83..c956ff0b 100644
--- a/site/public/test/name_search/index.html
+++ b/site/public/test/name_search/index.html
@@ -32,18 +32,17 @@
</div>
<footer>
- <div>
- <a href="/">MegaPixels.cc</a>
- <a href="/about/disclaimer/">Disclaimer</a>
- <a href="/about/terms/">Terms of Use</a>
- <a href="/about/privacy/">Privacy</a>
- <a href="/about/">About</a>
- <a href="/about/team/">Team</a>
- </div>
- <div>
- MegaPixels &copy;2017-19 Adam R. Harvey /&nbsp;
- <a href="https://ahprojects.com">ahprojects.com</a>
- </div>
+ <ul class="footer-left">
+ <li><a href="/">MegaPixels.cc</a></li>
+ <li><a href="/datasets/">Datasets</a></li>
+ <li><a href="/about/">About</a></li>
+ <li><a href="/about/press/">Press</a></li>
+ <li><a href="/about/legal/">Legal and Privacy</a></li>
+ </ul>
+ <ul class="footer-right">
+ <li>MegaPixels &copy;2017-19 &nbsp;<a href="https://ahprojects.com">Adam R. Harvey</a></li>
+ <li>Made with support from &nbsp;<a href="https://mozilla.org">Mozilla</a></li>
+ </ul>
</footer>
</body>
diff --git a/site/public/test/pie_chart/index.html b/site/public/test/pie_chart/index.html
index a3167090..2e3ba39c 100644
--- a/site/public/test/pie_chart/index.html
+++ b/site/public/test/pie_chart/index.html
@@ -32,18 +32,17 @@
</div>
<footer>
- <div>
- <a href="/">MegaPixels.cc</a>
- <a href="/about/disclaimer/">Disclaimer</a>
- <a href="/about/terms/">Terms of Use</a>
- <a href="/about/privacy/">Privacy</a>
- <a href="/about/">About</a>
- <a href="/about/team/">Team</a>
- </div>
- <div>
- MegaPixels &copy;2017-19 Adam R. Harvey /&nbsp;
- <a href="https://ahprojects.com">ahprojects.com</a>
- </div>
+ <ul class="footer-left">
+ <li><a href="/">MegaPixels.cc</a></li>
+ <li><a href="/datasets/">Datasets</a></li>
+ <li><a href="/about/">About</a></li>
+ <li><a href="/about/press/">Press</a></li>
+ <li><a href="/about/legal/">Legal and Privacy</a></li>
+ </ul>
+ <ul class="footer-right">
+ <li>MegaPixels &copy;2017-19 &nbsp;<a href="https://ahprojects.com">Adam R. Harvey</a></li>
+ <li>Made with support from &nbsp;<a href="https://mozilla.org">Mozilla</a></li>
+ </ul>
</footer>
</body>