From cba3a913fb63a8b97d25f8c5c40274897d290d4e Mon Sep 17 00:00:00 2001 From: adamhrv Date: Thu, 18 Apr 2019 23:41:55 +0200 Subject: update msceleb --- site/content/pages/datasets/msceleb/index.md | 94 ++++++------- site/public/about/assets/LICENSE/index.html | 22 +-- site/public/about/attribution/index.html | 22 +-- site/public/about/index.html | 42 +++--- site/public/about/legal/index.html | 22 +-- site/public/about/press/index.html | 22 +-- .../datasets/50_people_one_question/index.html | 24 ++-- site/public/datasets/afad/index.html | 24 ++-- site/public/datasets/brainwash/index.html | 24 ++-- site/public/datasets/caltech_10k/index.html | 24 ++-- site/public/datasets/celeba/index.html | 24 ++-- site/public/datasets/cofw/index.html | 26 ++-- site/public/datasets/duke_mtmc/index.html | 24 ++-- site/public/datasets/feret/index.html | 24 ++-- site/public/datasets/hrt_transgender/index.html | 22 +-- site/public/datasets/index.html | 147 +++++++++++++++++++++ site/public/datasets/lfpw/index.html | 24 ++-- site/public/datasets/lfw/index.html | 24 ++-- site/public/datasets/market_1501/index.html | 24 ++-- site/public/datasets/msceleb/index.html | 28 ++-- site/public/datasets/oxford_town_centre/index.html | 24 ++-- site/public/datasets/pipa/index.html | 24 ++-- site/public/datasets/pubfig/index.html | 24 ++-- site/public/datasets/uccs/index.html | 24 ++-- site/public/datasets/vgg_face2/index.html | 24 ++-- site/public/datasets/viper/index.html | 24 ++-- .../public/datasets/youtube_celebrities/index.html | 24 ++-- site/public/info/index.html | 22 +-- site/public/research/00_introduction/index.html | 27 ++-- .../research/01_from_1_to_100_pixels/index.html | 22 +-- .../research/02_what_computers_can_see/index.html | 22 +-- site/public/research/index.html | 22 +-- site/public/test/chart/index.html | 22 +-- site/public/test/citations/index.html | 22 +-- site/public/test/csv/index.html | 22 +-- site/public/test/datasets/index.html | 22 +-- site/public/test/face_search/index.html | 22 +-- site/public/test/gallery/index.html | 22 +-- site/public/test/index.html | 22 +-- site/public/test/map/index.html | 22 +-- site/public/test/name_search/index.html | 22 +-- site/public/test/pie_chart/index.html | 22 +-- 42 files changed, 664 insertions(+), 526 deletions(-) create mode 100644 site/public/datasets/index.html diff --git a/site/content/pages/datasets/msceleb/index.md b/site/content/pages/datasets/msceleb/index.md index 0c78e094..fac40bc2 100644 --- a/site/content/pages/datasets/msceleb/index.md +++ b/site/content/pages/datasets/msceleb/index.md @@ -2,7 +2,7 @@ status: published title: Microsoft Celeb -desc: Microsoft Celeb 1M is a target list and dataset of web images used for research and development of face recognition technologies +desc: Microsoft Celeb 1M is a target list and dataset of web images used for research and development of face recognition subdesc: The MS Celeb dataset includes over 10 million images of about 100K people and a target list of 1 million individuals slug: msceleb cssclass: dataset @@ -19,73 +19,65 @@ authors: Adam Harvey ### sidebar ### end sidebar -Microsoft Celeb (MS Celeb) is a dataset of 10 million face images scraped from the Internet and used for research and development of large-scale biometric recognition systems. According to Microsoft Research who created and published the [dataset](http://msceleb.org) in 2016, MS Celeb is the largest publicly available face recognition dataset in the world, containing over 10 million images of nearly 100,000 individuals. Microsoft's goal in building this dataset was to distribute the initial training dataset of 100,000 individuals images and use this to accelerate reserch into recognizing a target list of one million individuals from their face images "using all the possibly collected face images of this individual on the web as training data".[^msceleb_orig] +Microsoft Celeb (MS Celeb) is a dataset of 10 million face images scraped from the Internet and used for research and development of large-scale biometric recognition systems. According to Microsoft Research who created and published the [dataset](https://www.microsoft.com/en-us/research/publication/ms-celeb-1m-dataset-benchmark-large-scale-face-recognition-2/) in 2016, MS Celeb is the largest publicly available face recognition dataset in the world, containing over 10 million images of nearly 100,000 individuals. Microsoft's goal in building this dataset was to distribute an initial training dataset of 100,000 individuals images and use this to accelerate reserch into recognizing a target list of one million individuals from their face images "using all the possibly collected face images of this individual on the web as training data".[^msceleb_orig] -These one million people, defined as Micrsoft Research as "celebrities", are often merely people who must maintain an online presence for their professional lives. Microsoft's list of 1 million people is an expansive exploitation of the current reality that for many people including academics, policy makers, writers, artists, and especially journalists maintaining an online presence is mandatory and should not allow Microsoft (or anyone else) to use their biometrics for reserach and development of surveillance technology. Many of names in target list even include people critical of the very technology Microsoft is using their name and biometric information to build. The list includes digital rights activists like Jillian York and [add more]; artists critical of surveillance including Trevor Paglen, Hito Steryl, Kyle McDonald, Jill Magid, and Aram Bartholl; Intercept founders Laura Poitras, Jeremy Scahill, and Glen Greenwald; Data and Society founder danah boyd; and even Julie Brill the former FTC commissioner responsible for protecting consumer’s privacy to name a few. +These one million people, defined by Microsoft Research as "celebrities", are often merely people who must maintain an online presence for their professional lives. Microsoft's list of 1 million people is an expansive exploitation of the current reality that for many people including academics, policy makers, writers, artists, and especially journalists maintaining an online presence is mandatory and should not allow Microsoft or anyone else to use their biometrics for research and development of surveillance technology. Many of names in target list even include people critical of the very technology Microsoft is using their name and biometric information to build. The list includes digital rights activists like Jillian York and [add more]; artists critical of surveillance including Trevor Paglen, Hito Steryl, Jill Magid, and Aram Bartholl; Intercept founders Laura Poitras, Jeremy Scahill, and Glen Greenwald; Data and Society founder danah boyd; and even Julie Brill the former FTC commissioner responsible for protecting consumer’s privacy to name a few. ### Microsoft's 1 Million Target List -Below is a list of names that were included in list of 1 million individuals curated to illustrate Microsoft's expansive and exploitative practice of scraping the Internet for biometric training data. The entire name file can be downloaded from [msceleb.org](https://msceleb.org). Names appearing with * indicate that Microsoft also distributed imaged. - -[ cleaning this up ] +Below is a list of names that were included in list of 1 million individuals curated to illustrate Microsoft's expansive and exploitative practice of scraping the Internet for biometric training data. The entire name file can be downloaded from [msceleb.org](https://msceleb.org). Email msceleb@microsoft.com to have your name removed. Names appearing with * indicate that Microsoft also distributed images. === columns 2 -| Name | ID | Profession | Images | -| --- | --- | --- | --- | -| Jeremy Scahill | /m/02p_8_n | Journalist | x | -| Jillian York | /m/0g9_3c3 | Digital rights activist | x | -| Astra Taylor | /m/05f6_39 | Author, activist | x | -| Jonathan Zittrain | /m/01f75c | EFF board member | no | -| Julie Brill | x | x | x | -| Jonathan Zittrain | x | x | x | -| Bruce Schneier | m.095js | Cryptologist and author | yes | -| Julie Brill | m.0bs3s9g | x | x | -| Kim Zetter | /m/09r4j3 | x | x | -| Ethan Zuckerman | x | x | x | -| Jill Magid | x | x | x | -| Kyle McDonald | x | x | x | -| Trevor Paglen | x | x | x | -| R. Luke DuBois | x | x | x | - -==== - -| Name | ID | Profession | Images | -| --- | --- | --- | -- | -| Trevor Paglen | x | x | x | -| Ai Weiwei | /m/0278dyq | x | x | -| Jer Thorp | /m/01h8lg | x | x | -| Edward Felten | /m/028_7k | x | x | -| Evgeny Morozov | /m/05sxhgd | Scholar and technology critic | yes | -| danah boyd | /m/06zmx5 | Data and Society founder | x | -| Bruce Schneier | x | x | x | -| Laura Poitras | x | x | x | -| Trevor Paglen | x | x | x | -| Astra Taylor | x | x | x | -| Shoshanaa Zuboff | x | x | x | -| Eyal Weizman | m.0g54526 | x | x | -| Aram Bartholl | m.06_wjyc | x | x | -| James Risen | m.09pk6b | x | x | +| Name | Profession | +| --- | --- | --- | +| Adrian Chen | Journalist | +| Ai Weiwei* | Artist | +| Aram Bartholl | Internet artist | +| Astra Taylor | Author, director, activist | +| Alexander Madrigal | Journlist | +| Bruce Schneier* | Cryptologist | +| danah boyd | Data & Society founder | +| Edward Felten | Former FTC Chief Technologist | +| Evgeny Morozov* | Tech writer, researcher | +| Glen Greenwald* | Journalist, author | +| Hito Steryl | Artist, writer | + +=== + +| Name | Profession | +| --- | --- | --- | +| James Risen | Journalist | +| Jeremy Scahill* | Journalist | +| Jill Magid | Artist | +| Jillian York | Digital rights activist | +| Jonathan Zittrain | EFF board member | +| Julie Brill | Former FTC Commissioner| +| Kim Zetter | Journalist, author | +| Laura Poitras* | Filmmaker | +| Luke DuBois | Artist | +| Shoshana Zuboff | Author, academic | +| Trevor Paglen | Artist, researcher | === end columns -After publishing this list, researchers from Microsoft Asia then worked with researchers affilliated with China's National University of Defense Technology (controlled by China's Central Military Commission) and used the the MS Celeb dataset for their [research paper](https://www.semanticscholar.org/paper/Faces-as-Lighting-Probes-via-Unsupervised-Deep-Yi-Zhu/b301fd2fc33f24d6f75224e7c0991f4f04b64a65) on using "Faces as Lighting Probes via Unsupervised Deep Highlight Extraction" with potential applications in 3D face recognition. +After publishing this list, researchers from Microsoft Asia then worked with researchers affiliated with China's National University of Defense Technology (controlled by China's Central Military Commission) and used the the MS Celeb dataset for their [research paper](https://www.semanticscholar.org/paper/Faces-as-Lighting-Probes-via-Unsupervised-Deep-Yi-Zhu/b301fd2fc33f24d6f75224e7c0991f4f04b64a65) on using "Faces as Lighting Probes via Unsupervised Deep Highlight Extraction" with potential applications in 3D face recognition. -In an article published by the Financial Times based on data discovered during this investigation, Samm Sacks (senior fellow at New American and China tech policy expert) commented that this research raised "red flags because of the nature of the technology, the authors affilliations, combined with the what we know about how this technology is being deployed in China right now".[^madhu_ft] +In an [article](https://www.ft.com/content/9378e7ee-5ae6-11e9-9dde-7aedca0a081a) published by Financial Times based on data surfaced during this investigation, Samm Sacks (a senior fellow at New America think tank) commented that this research raised "red flags because of the nature of the technology, the author's affiliations, combined with what we know about how this technology is being deployed in China right now". Adding, that "the [Chinese] government is using these technologies to biuld surveillance systems and to detain minorities [in Xinjiang]".[^madhu_ft] -Four more papers published by SenseTime which also use the MS Celeb dataset raise similar flags. SenseTime is Beijing based company providing surveillance to Chinese authorities including [ add context here ] has been [flagged](https://www.nytimes.com/2019/04/14/technology/china-surveillance-artificial-intelligence-racial-profiling.html) as complicity in potential human rights violations. +Four more papers published by SenseTime which also use the MS Celeb dataset raise similar flags. SenseTime is a computer vision surveillance company who until [April 2019](https://uhrp.org/news-commentary/china%E2%80%99s-sensetime-sells-out-xinjiang-security-joint-venture) provided surveillance to Chinese authorities to monitor and track Uighur Muslims in Xinjiang province and had been [flagged](https://www.nytimes.com/2019/04/14/technology/china-surveillance-artificial-intelligence-racial-profiling.html) numerous times as having potential links to human rights violations. -One of the 4 SenseTime papers, "Exploring Disentangled Feature Representation Beyond Face Identification", shows how SenseTime is developing automated face analysis technology to infer race, narrow eyes, nose size, and chin size, all of which could be used to target vulnerable ethnic groups based on their facial appearances.[^disentangled] +One of the 4 SenseTime papers, "[Exploring Disentangled Feature Representation Beyond Face Identification](https://www.semanticscholar.org/paper/Exploring-Disentangled-Feature-Representation-Face-Liu-Wei/1fd5d08394a3278ef0a89639e9bfec7cb482e0bf)", shows how SenseTime was developing automated face analysis technology to infer race, narrow eyes, nose size, and chin size, all of which could be used to target vulnerable ethnic groups based on their facial appearances. Earlier in 2019, Microsoft CEO [Brad Smith](https://blogs.microsoft.com/on-the-issues/2018/12/06/facial-recognition-its-time-for-action/) called for the governmental regulation of face recognition, citing the potential for misuse, a rare admission that Microsoft's surveillance-driven business model had lost its bearing. More recently Smith also [announced](https://www.reuters.com/article/us-microsoft-ai/microsoft-turned-down-facial-recognition-sales-on-human-rights-concerns-idUSKCN1RS2FV) that Microsoft would seemingly take stand against potential misuse and decided to not sell face recognition to an unnamed United States law enforcement agency, citing that their technology was not accurate enough to be used on minorities because it was trained mostly on white male faces. -What the decision to block the sale announces is not so much that Microsoft has upgraded their ethics, but that it publicly acknolwedged it can't sell a data-driven product without data. Microsoft can't sell face recognition for faces they can't train on. +What the decision to block the sale announces is not so much that Microsoft had upgraded their ethics, but that Microsoft publicly acknowledged it can't sell a data-driven product without data. In other words, Microsoft can't sell face recognition for faces they can't train on. -Until now, that data has been freely harvested from the Internet and packaged in training sets like MS Celeb, which are overwhelmingly [white](https://www.nytimes.com/2018/02/09/technology/facial-recognition-race-artificial-intelligence.html) and [male](https://gendershades.org). Without balanced data, facial recognition contains blind spots. And without datasets like MS Celeb, the powerful yet innaccurate facial recognition services like Microsoft's Azure Cognitive Service also would not be able to see at all. +Until now, that data has been freely harvested from the Internet and packaged in training sets like MS Celeb, which are overwhelmingly [white](https://www.nytimes.com/2018/02/09/technology/facial-recognition-race-artificial-intelligence.html) and [male](https://gendershades.org). Without balanced data, facial recognition contains blind spots. And without datasets like MS Celeb, the powerful yet inaccurate facial recognition services like Microsoft's Azure Cognitive Service also would not be able to see at all. -Microsoft didn't only create MS Celeb for other researchers to use, they also used it internally. In a publicly available 2017 Microsoft Research project called "([One-shot Face Recognition by Promoting Underrepresented Classes](https://www.microsoft.com/en-us/research/publication/one-shot-face-recognition-promoting-underrepresented-classes/))", Microsoft leveraged the MS Celeb dataset to analyse their algorithms and advertise the results. Interestingly, the Microsoft's [corporate version](https://www.microsoft.com/en-us/research/publication/one-shot-face-recognition-promoting-underrepresented-classes/) does not mention they used the MS Celeb datset, but the [open-acess version](https://www.semanticscholar.org/paper/One-shot-Face-Recognition-by-Promoting-Classes-Guo/6cacda04a541d251e8221d70ac61fda88fb61a70) of the paper published on arxiv.org that same year explicity mentions that Microsoft Research tested their algorithms "on the MS-Celeb-1M low-shot learning benchmark task." +Microsoft didn't only create MS Celeb for other researchers to use, they also used it internally. In a publicly available 2017 Microsoft Research project called [One-shot Face Recognition by Promoting Underrepresented Classes](https://www.microsoft.com/en-us/research/publication/one-shot-face-recognition-promoting-underrepresented-classes/), Microsoft leveraged the MS Celeb dataset to analyze their algorithms and advertise the results. Interestingly, Microsoft's [corporate version](https://www.microsoft.com/en-us/research/publication/one-shot-face-recognition-promoting-underrepresented-classes/) of the paper does not mention they used the MS Celeb datset, but the [open-access version](https://www.semanticscholar.org/paper/One-shot-Face-Recognition-by-Promoting-Classes-Guo/6cacda04a541d251e8221d70ac61fda88fb61a70) published on arxiv.org explicitly mentions that Microsoft Research tested their algorithms "on the MS-Celeb-1M low-shot learning benchmark task." -We suggest that if Microsoft Research wants biometric data for surveillance research and development, they should start with own researcher's biometric data instead of scraping the Internet for journalists, artists, writers, and academics. +We suggest that if Microsoft Research wants to make biometric data publicly available for surveillance research and development, they should start with releasing their researchers' own biometric data instead of scraping the Internet for journalists, artists, writers, actors, athletes, musicians, and academics. {% include 'dashboard.html' %} @@ -93,7 +85,5 @@ We suggest that if Microsoft Research wants biometric data for surveillance rese ### Footnotes -[^brad_smith]: Brad Smith cite [^msceleb_orig]: MS-Celeb-1M: A Dataset and Benchmark for Large-Scale Face Recognition -[^madhu_ft]: Microsoft worked with Chinese military university on artificial intelligence -[^disentangled]: "Exploring Disentangled Feature Representation Beyond Face Identification" \ No newline at end of file +[^madhu_ft]: Murgia, Madhumita. Microsoft worked with Chinese military university on artificial intelligence. Financial Times. April 10, 2019. \ No newline at end of file diff --git a/site/public/about/assets/LICENSE/index.html b/site/public/about/assets/LICENSE/index.html index 0d3a7878..66d8b3ac 100644 --- a/site/public/about/assets/LICENSE/index.html +++ b/site/public/about/assets/LICENSE/index.html @@ -40,17 +40,17 @@ diff --git a/site/public/about/attribution/index.html b/site/public/about/attribution/index.html index d3d38d3c..7b09e5b4 100644 --- a/site/public/about/attribution/index.html +++ b/site/public/about/attribution/index.html @@ -60,17 +60,17 @@ To Adapt: To modify, transform and build upon the database

diff --git a/site/public/about/index.html b/site/public/about/index.html index b0cb3436..48d1bb1c 100644 --- a/site/public/about/index.html +++ b/site/public/about/index.html @@ -50,9 +50,9 @@

asdf.us

MegaPixels is an art and research project first launched in 2017 for an installation at Tactical Technology Collective's GlassRoom about facial recognition datasets. In 2018 it was extended to cover pedestrian analysis datasets for a commission by Elevate Arts festival in Austria. Since then MegaPixels has evolved into a large-scale interrogation of hundreds of publicly-available face and person analysis datasets.

-

MegaPixels aims to provide a critical perspective on machine learning image datsets, one that might otherwise escape academia and industry funded artificial intelligence think tanks that are often supported by the same technology companies who have created many of the datasets presented on this site.

-

MegaPixels is an independent project, designed as a public resource for educators, students, journalists, and researchers. Each dataset presented on this site undergoes a thorough review of its images, intent, and funding sources. Though the goals are similar to publishing a public academic paper, MegaPixels is a website-first research project.

-

One of the main focuses of the dataset investigations is uncovering where funding originated. Because of our empahasis on other researchers' funding sources, it is important that we are transparent about our own. This site and the past year of reserach have been primarily funded by a privacy art grant from Mozilla in 2018. The original MegaPixels installation in 2017 was built as a commission for and with support from Tactical Technology Collective and Mozilla. Continued development in 2019 is partially supported by a 1-year Reseacher-in-Residence grant from Karlsruhe HfG and lecture and workshop fees.

+

MegaPixels aims to provide a critical perspective on machine learning image datsets, one that might otherwise escape academia and industry funded artificial intelligence think tanks that are often supported by the several of the same technology companies who have created datasets presented on this site.

+

MegaPixels is an independent project, designed as a public resource for educators, students, journalists, and researchers. Each dataset presented on this site undergoes a thorough review of its images, intent, and funding sources. Though the goals are similar to publishing an academic paper, MegaPixels is a website-first research project, with an academic paper to follow.

+

One of the main focuses of the dataset investigations presented on this site is to uncover where funding originated. Because of our empahasis on other researchers' funding sources, it is important that we are transparent about our own. This site and the past year of reserach have been primarily funded by a privacy art grant from Mozilla in 2018. The original MegaPixels installation in 2017 was built as a commission for and with support from Tactical Technology Collective and Mozilla. The research into pedestrian analysis datasets was funded by a commission from Elevate Arts, and continued development in 2019 is supported in part by a 1-year Reseacher-in-Residence grant from Karlsruhe HfG and lecture and workshop fees.

Team
  • Adam Harvey: Concept, research and analysis, design, computer vision
  • @@ -73,13 +73,7 @@ You are free:
  • ThreeJS for 3D visualizations
  • PDFMiner.Six and Pandas for research paper data analysis
-

Please direct questions, comments, or feedback to mastodon.social/@adamhrv

-

Funding Partners

-

The MegaPixels website, research, and development is made possible with support form Mozilla, our primary funding partner.

-

[ add logos ]

-

Additional support is provided by the European ARTificial Intelligence Network (AI LAB) at the Ars Electronica Center and a 1-year research-in-residence grant from Karlsruhe HfG.

-

[ add logos ]

-
Attribution
+
Attribution

If you use MegaPixels or any data derived from it for your work, please cite our original work as follows:

 @online{megapixels,
@@ -87,23 +81,25 @@ You are free:
  title = {MegaPixels: Origins, Ethics, and Privacy Implications of Publicly Available Face Recognition Image Datasets},
  year = 2019,
  url = {https://megapixels.cc/},
- urldate = {2019-04-20}
+ urldate = {2019-04-18}
 }
-
+
Contact
+

Please direct questions, comments, or feedback to mastodon.social/@adamhrv

+ diff --git a/site/public/about/legal/index.html b/site/public/about/legal/index.html index 9eb5dd5a..ce10014a 100644 --- a/site/public/about/legal/index.html +++ b/site/public/about/legal/index.html @@ -90,17 +90,17 @@ To Adapt: To modify, transform and build upon the database

diff --git a/site/public/about/press/index.html b/site/public/about/press/index.html index 7b0a3e87..70caf03c 100644 --- a/site/public/about/press/index.html +++ b/site/public/about/press/index.html @@ -41,17 +41,17 @@ diff --git a/site/public/datasets/50_people_one_question/index.html b/site/public/datasets/50_people_one_question/index.html index dc7919f7..76d5b92f 100644 --- a/site/public/datasets/50_people_one_question/index.html +++ b/site/public/datasets/50_people_one_question/index.html @@ -88,7 +88,7 @@

Dataset Citations

- The dataset citations used in the visualizations were collected from Semantic Scholar, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms. + The dataset citations used in the visualizations were collected from Semantic Scholar, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms. If you use our data, please cite our work.

@@ -96,17 +96,17 @@ diff --git a/site/public/datasets/afad/index.html b/site/public/datasets/afad/index.html index f2b0a5ba..a3ff00cf 100644 --- a/site/public/datasets/afad/index.html +++ b/site/public/datasets/afad/index.html @@ -90,7 +90,7 @@

Dataset Citations

- The dataset citations used in the visualizations were collected from Semantic Scholar, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms. + The dataset citations used in the visualizations were collected from Semantic Scholar, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms. If you use our data, please cite our work.

@@ -109,17 +109,17 @@ Motivation

diff --git a/site/public/datasets/brainwash/index.html b/site/public/datasets/brainwash/index.html index b17617a6..cf1f5e5e 100644 --- a/site/public/datasets/brainwash/index.html +++ b/site/public/datasets/brainwash/index.html @@ -99,7 +99,7 @@

Dataset Citations

- The dataset citations used in the visualizations were collected from Semantic Scholar, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms. + The dataset citations used in the visualizations were collected from Semantic Scholar, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms. If you use our data, please cite our work.

@@ -137,17 +137,17 @@ diff --git a/site/public/datasets/caltech_10k/index.html b/site/public/datasets/caltech_10k/index.html index 04d63ee3..e86c5ca3 100644 --- a/site/public/datasets/caltech_10k/index.html +++ b/site/public/datasets/caltech_10k/index.html @@ -96,7 +96,7 @@

Dataset Citations

- The dataset citations used in the visualizations were collected from Semantic Scholar, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms. + The dataset citations used in the visualizations were collected from Semantic Scholar, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms. If you use our data, please cite our work.

@@ -106,17 +106,17 @@ diff --git a/site/public/datasets/celeba/index.html b/site/public/datasets/celeba/index.html index c72f3798..0236b91c 100644 --- a/site/public/datasets/celeba/index.html +++ b/site/public/datasets/celeba/index.html @@ -94,7 +94,7 @@

Dataset Citations

- The dataset citations used in the visualizations were collected from Semantic Scholar, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms. + The dataset citations used in the visualizations were collected from Semantic Scholar, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms. If you use our data, please cite our work.

@@ -108,17 +108,17 @@ diff --git a/site/public/datasets/cofw/index.html b/site/public/datasets/cofw/index.html index eef8cf5e..b0e73dac 100644 --- a/site/public/datasets/cofw/index.html +++ b/site/public/datasets/cofw/index.html @@ -87,7 +87,7 @@

Dataset Citations

- The dataset citations used in the visualizations were collected from Semantic Scholar, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms. + The dataset citations used in the visualizations were collected from Semantic Scholar, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms. If you use our data, please cite our work.

@@ -138,7 +138,7 @@ To increase the number of training images, and since COFW has the exact same la

Dataset Citations

- The dataset citations used in the visualizations were collected from Semantic Scholar, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms. + The dataset citations used in the visualizations were collected from Semantic Scholar, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms. If you use our data, please cite our work.

@@ -161,17 +161,17 @@ To increase the number of training images, and since COFW has the exact same la diff --git a/site/public/datasets/duke_mtmc/index.html b/site/public/datasets/duke_mtmc/index.html index 14e6bee0..90c131b8 100644 --- a/site/public/datasets/duke_mtmc/index.html +++ b/site/public/datasets/duke_mtmc/index.html @@ -246,7 +246,7 @@

Dataset Citations

- The dataset citations used in the visualizations were collected from Semantic Scholar, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms. + The dataset citations used in the visualizations were collected from Semantic Scholar, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms. If you use our data, please cite our work.

@@ -369,17 +369,17 @@ diff --git a/site/public/datasets/feret/index.html b/site/public/datasets/feret/index.html index 387826b0..09abaee2 100644 --- a/site/public/datasets/feret/index.html +++ b/site/public/datasets/feret/index.html @@ -90,7 +90,7 @@

Dataset Citations

- The dataset citations used in the visualizations were collected from Semantic Scholar, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms. + The dataset citations used in the visualizations were collected from Semantic Scholar, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms. If you use our data, please cite our work.

@@ -119,17 +119,17 @@ diff --git a/site/public/datasets/hrt_transgender/index.html b/site/public/datasets/hrt_transgender/index.html index 6b9ae7be..4e566a4a 100644 --- a/site/public/datasets/hrt_transgender/index.html +++ b/site/public/datasets/hrt_transgender/index.html @@ -49,17 +49,17 @@ diff --git a/site/public/datasets/index.html b/site/public/datasets/index.html new file mode 100644 index 00000000..6e43e73f --- /dev/null +++ b/site/public/datasets/index.html @@ -0,0 +1,147 @@ + + + + MegaPixels + + + + + + + + + + + +
+ + +
MegaPixels
+ +
+ +
+
+ + +
+

Face Recognition Datasets

+

Explore publicly available facial recognition datasets feeding into research and development of biometric surveillance technologies at the largest technology companies and defense contractors in the world.

+
+
+ +
+ +
+ + +
+ + +
+ + + + + \ No newline at end of file diff --git a/site/public/datasets/lfpw/index.html b/site/public/datasets/lfpw/index.html index 45de2599..1238c8d3 100644 --- a/site/public/datasets/lfpw/index.html +++ b/site/public/datasets/lfpw/index.html @@ -83,7 +83,7 @@

Dataset Citations

- The dataset citations used in the visualizations were collected from Semantic Scholar, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms. + The dataset citations used in the visualizations were collected from Semantic Scholar, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms. If you use our data, please cite our work.

@@ -98,17 +98,17 @@ diff --git a/site/public/datasets/lfw/index.html b/site/public/datasets/lfw/index.html index ca17b1cd..68021e93 100644 --- a/site/public/datasets/lfw/index.html +++ b/site/public/datasets/lfw/index.html @@ -97,7 +97,7 @@

Dataset Citations

- The dataset citations used in the visualizations were collected from Semantic Scholar, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms. + The dataset citations used in the visualizations were collected from Semantic Scholar, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms. If you use our data, please cite our work.

@@ -148,17 +148,17 @@ diff --git a/site/public/datasets/market_1501/index.html b/site/public/datasets/market_1501/index.html index 7c545335..a72cb6cf 100644 --- a/site/public/datasets/market_1501/index.html +++ b/site/public/datasets/market_1501/index.html @@ -91,7 +91,7 @@

Dataset Citations

- The dataset citations used in the visualizations were collected from Semantic Scholar, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms. + The dataset citations used in the visualizations were collected from Semantic Scholar, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms. If you use our data, please cite our work.

@@ -114,17 +114,17 @@ organization={Springer} diff --git a/site/public/datasets/msceleb/index.html b/site/public/datasets/msceleb/index.html index 345592d3..a00b3527 100644 --- a/site/public/datasets/msceleb/index.html +++ b/site/public/datasets/msceleb/index.html @@ -50,7 +50,7 @@
Website
msceleb.org

Microsoft Celeb (MS Celeb) is a dataset of 10 million face images scraped from the Internet and used for research and development of large-scale biometric recognition systems. According to Microsoft Research who created and published the dataset in 2016, MS Celeb is the largest publicly available face recognition dataset in the world, containing over 10 million images of nearly 100,000 individuals. Microsoft's goal in building this dataset was to distribute an initial training dataset of 100,000 individuals images and use this to accelerate reserch into recognizing a target list of one million individuals from their face images "using all the possibly collected face images of this individual on the web as training data". 1

-

These one million people, defined by Micrsoft Research as "celebrities", are often merely people who must maintain an online presence for their professional lives. Microsoft's list of 1 million people is an expansive exploitation of the current reality that for many people including academics, policy makers, writers, artists, and especially journalists maintaining an online presence is mandatory and should not allow Microsoft or anyone else to use their biometrics for reserach and development of surveillance technology. Many of names in target list even include people critical of the very technology Microsoft is using their name and biometric information to build. The list includes digital rights activists like Jillian York and [add more]; artists critical of surveillance including Trevor Paglen, Hito Steryl, Jill Magid, and Aram Bartholl; Intercept founders Laura Poitras, Jeremy Scahill, and Glen Greenwald; Data and Society founder danah boyd; and even Julie Brill the former FTC commissioner responsible for protecting consumer’s privacy to name a few.

+

These one million people, defined by Microsoft Research as "celebrities", are often merely people who must maintain an online presence for their professional lives. Microsoft's list of 1 million people is an expansive exploitation of the current reality that for many people including academics, policy makers, writers, artists, and especially journalists maintaining an online presence is mandatory and should not allow Microsoft or anyone else to use their biometrics for research and development of surveillance technology. Many of names in target list even include people critical of the very technology Microsoft is using their name and biometric information to build. The list includes digital rights activists like Jillian York and [add more]; artists critical of surveillance including Trevor Paglen, Hito Steryl, Jill Magid, and Aram Bartholl; Intercept founders Laura Poitras, Jeremy Scahill, and Glen Greenwald; Data and Society founder danah boyd; and even Julie Brill the former FTC commissioner responsible for protecting consumer’s privacy to name a few.

Microsoft's 1 Million Target List

Below is a list of names that were included in list of 1 million individuals curated to illustrate Microsoft's expansive and exploitative practice of scraping the Internet for biometric training data. The entire name file can be downloaded from msceleb.org. Email msceleb@microsoft.com to have your name removed. Names appearing with * indicate that Microsoft also distributed images.

@@ -166,7 +166,7 @@

Earlier in 2019, Microsoft CEO Brad Smith called for the governmental regulation of face recognition, citing the potential for misuse, a rare admission that Microsoft's surveillance-driven business model had lost its bearing. More recently Smith also announced that Microsoft would seemingly take stand against potential misuse and decided to not sell face recognition to an unnamed United States law enforcement agency, citing that their technology was not accurate enough to be used on minorities because it was trained mostly on white male faces.

What the decision to block the sale announces is not so much that Microsoft had upgraded their ethics, but that Microsoft publicly acknowledged it can't sell a data-driven product without data. In other words, Microsoft can't sell face recognition for faces they can't train on.

Until now, that data has been freely harvested from the Internet and packaged in training sets like MS Celeb, which are overwhelmingly white and male. Without balanced data, facial recognition contains blind spots. And without datasets like MS Celeb, the powerful yet inaccurate facial recognition services like Microsoft's Azure Cognitive Service also would not be able to see at all.

-

Microsoft didn't only create MS Celeb for other researchers to use, they also used it internally. In a publicly available 2017 Microsoft Research project called "(One-shot Face Recognition by Promoting Underrepresented Classes)", Microsoft leveraged the MS Celeb dataset to analyze their algorithms and advertise the results. Interestingly, Microsoft's corporate version of the paper does not mention they used the MS Celeb datset, but the open-access version published on arxiv.org explicitly mentions that Microsoft Research tested their algorithms "on the MS-Celeb-1M low-shot learning benchmark task."

+

Microsoft didn't only create MS Celeb for other researchers to use, they also used it internally. In a publicly available 2017 Microsoft Research project called One-shot Face Recognition by Promoting Underrepresented Classes, Microsoft leveraged the MS Celeb dataset to analyze their algorithms and advertise the results. Interestingly, Microsoft's corporate version of the paper does not mention they used the MS Celeb datset, but the open-access version published on arxiv.org explicitly mentions that Microsoft Research tested their algorithms "on the MS-Celeb-1M low-shot learning benchmark task."

We suggest that if Microsoft Research wants to make biometric data publicly available for surveillance research and development, they should start with releasing their researchers' own biometric data instead of scraping the Internet for journalists, artists, writers, actors, athletes, musicians, and academics.

Who used Microsoft Celeb?

@@ -215,7 +215,7 @@

Dataset Citations

- The dataset citations used in the visualizations were collected from Semantic Scholar, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms. + The dataset citations used in the visualizations were collected from Semantic Scholar, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms. If you use our data, please cite our work.

@@ -234,17 +234,17 @@ diff --git a/site/public/datasets/oxford_town_centre/index.html b/site/public/datasets/oxford_town_centre/index.html index 4fbcaccb..fabcae6b 100644 --- a/site/public/datasets/oxford_town_centre/index.html +++ b/site/public/datasets/oxford_town_centre/index.html @@ -98,7 +98,7 @@

Dataset Citations

- The dataset citations used in the visualizations were collected from Semantic Scholar, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms. + The dataset citations used in the visualizations were collected from Semantic Scholar, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms. If you use our data, please cite our work.

@@ -138,17 +138,17 @@ diff --git a/site/public/datasets/pipa/index.html b/site/public/datasets/pipa/index.html index 6c920b46..297f4d45 100644 --- a/site/public/datasets/pipa/index.html +++ b/site/public/datasets/pipa/index.html @@ -94,7 +94,7 @@

Dataset Citations

- The dataset citations used in the visualizations were collected from Semantic Scholar, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms. + The dataset citations used in the visualizations were collected from Semantic Scholar, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms. If you use our data, please cite our work.

@@ -102,17 +102,17 @@ diff --git a/site/public/datasets/pubfig/index.html b/site/public/datasets/pubfig/index.html index e81e12bc..5feed748 100644 --- a/site/public/datasets/pubfig/index.html +++ b/site/public/datasets/pubfig/index.html @@ -91,7 +91,7 @@

Dataset Citations

- The dataset citations used in the visualizations were collected from Semantic Scholar, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms. + The dataset citations used in the visualizations were collected from Semantic Scholar, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms. If you use our data, please cite our work.

@@ -99,17 +99,17 @@ diff --git a/site/public/datasets/uccs/index.html b/site/public/datasets/uccs/index.html index 23aeeff1..3296cabc 100644 --- a/site/public/datasets/uccs/index.html +++ b/site/public/datasets/uccs/index.html @@ -104,7 +104,7 @@ Their setup made it impossible for students to know they were being photographed

Dataset Citations

- The dataset citations used in the visualizations were collected from Semantic Scholar, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms. + The dataset citations used in the visualizations were collected from Semantic Scholar, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms. If you use our data, please cite our work.

@@ -258,17 +258,17 @@ Their setup made it impossible for students to know they were being photographed diff --git a/site/public/datasets/vgg_face2/index.html b/site/public/datasets/vgg_face2/index.html index a9d318f1..5f314d9e 100644 --- a/site/public/datasets/vgg_face2/index.html +++ b/site/public/datasets/vgg_face2/index.html @@ -96,7 +96,7 @@

Dataset Citations

- The dataset citations used in the visualizations were collected from Semantic Scholar, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms. + The dataset citations used in the visualizations were collected from Semantic Scholar, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms. If you use our data, please cite our work.

@@ -124,17 +124,17 @@ diff --git a/site/public/datasets/viper/index.html b/site/public/datasets/viper/index.html index bc4ddd3d..4d2abbe1 100644 --- a/site/public/datasets/viper/index.html +++ b/site/public/datasets/viper/index.html @@ -96,7 +96,7 @@

Dataset Citations

- The dataset citations used in the visualizations were collected from Semantic Scholar, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms. + The dataset citations used in the visualizations were collected from Semantic Scholar, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms. If you use our data, please cite our work.

@@ -104,17 +104,17 @@ diff --git a/site/public/datasets/youtube_celebrities/index.html b/site/public/datasets/youtube_celebrities/index.html index 69b3a02e..d0a7a172 100644 --- a/site/public/datasets/youtube_celebrities/index.html +++ b/site/public/datasets/youtube_celebrities/index.html @@ -75,7 +75,7 @@

Dataset Citations

- The dataset citations used in the visualizations were collected from Semantic Scholar, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms. + The dataset citations used in the visualizations were collected from Semantic Scholar, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms. If you use our data, please cite our work.

@@ -95,17 +95,17 @@ the views of our sponsors. diff --git a/site/public/info/index.html b/site/public/info/index.html index 749c29ba..7e7ecf80 100644 --- a/site/public/info/index.html +++ b/site/public/info/index.html @@ -32,17 +32,17 @@ diff --git a/site/public/research/00_introduction/index.html b/site/public/research/00_introduction/index.html index 353e3270..ef8a5316 100644 --- a/site/public/research/00_introduction/index.html +++ b/site/public/research/00_introduction/index.html @@ -42,10 +42,15 @@
Posted
Dec. 15
Author
Adam Harvey

Facial recognition is a scam.

+

It's extractive and damaging industry that's built on the biometric backbone of the Internet.

During the last 20 years commericial, academic, and governmental agencies have promoted the false dream of a future with face recognition. This essay debunks the popular myth that such a thing ever existed.

There is no such thing as face recognition. For the last 20 years, government agencies, commercial organizations, and academic institutions have played the public as a fool, selling a roadmap of the future that simply does not exist. Facial recognition, as it is currently defined, promoted, and sold to the public, government, and commercial sector is a scam.

Committed to developing robust solutions with superhuman accuracy, the industry has repeatedly undermined itself by never actually developing anything close to "face recognition".

There is only biased feature vector clustering and probabilistic thresholding.

+

If you don't have data, you don't have a product.

+

Yesterday's decision by Brad Smith, CEO of Microsoft, to not sell facial recognition to a US law enforcement agency is not an about face by Microsoft to become more humane, it's simply a perfect illustration of the value of training data. Without data, you don't have a product to sell. Microsoft realized that doesn't have enough training data to sell

+

Use Your Own Biometrics First

+

If researchers want faces, they should take selfies and create their own dataset. If researchers want images of families to build surveillance software, they should use and distibute their own family portraits.

Motivation

Ever since government agencies began developing face recognition in the early 1960's, datasets of face images have always been central to developing and validating face recognition technologies. Today, these datasets no longer originate in labs, but instead from family photo albums posted on photo sharing sites, surveillance camera footage from college campuses, search engine queries for celebrities, cafe livestreams, or videos on YouTube.

During the last year, hundreds of these facial analysis datasets created "in the wild" have been collected to understand how they contribute to a global supply chain of biometric data that is powering the global facial recognition industry.

@@ -83,17 +88,17 @@ diff --git a/site/public/research/01_from_1_to_100_pixels/index.html b/site/public/research/01_from_1_to_100_pixels/index.html index 9426ef0f..fe49e998 100644 --- a/site/public/research/01_from_1_to_100_pixels/index.html +++ b/site/public/research/01_from_1_to_100_pixels/index.html @@ -121,17 +121,17 @@ relying on FaceID and TouchID to protect their information agree to a

diff --git a/site/public/research/02_what_computers_can_see/index.html b/site/public/research/02_what_computers_can_see/index.html index e870ea4b..aac0b723 100644 --- a/site/public/research/02_what_computers_can_see/index.html +++ b/site/public/research/02_what_computers_can_see/index.html @@ -296,17 +296,17 @@ Head top

diff --git a/site/public/research/index.html b/site/public/research/index.html index 1be8203f..0386fa99 100644 --- a/site/public/research/index.html +++ b/site/public/research/index.html @@ -31,17 +31,17 @@ diff --git a/site/public/test/chart/index.html b/site/public/test/chart/index.html index e882ecc5..05081cf5 100644 --- a/site/public/test/chart/index.html +++ b/site/public/test/chart/index.html @@ -32,17 +32,17 @@ diff --git a/site/public/test/citations/index.html b/site/public/test/citations/index.html index a8af41df..36021752 100644 --- a/site/public/test/citations/index.html +++ b/site/public/test/citations/index.html @@ -32,17 +32,17 @@ diff --git a/site/public/test/csv/index.html b/site/public/test/csv/index.html index 2c2242b4..301ed718 100644 --- a/site/public/test/csv/index.html +++ b/site/public/test/csv/index.html @@ -32,17 +32,17 @@ diff --git a/site/public/test/datasets/index.html b/site/public/test/datasets/index.html index bf08418f..58555895 100644 --- a/site/public/test/datasets/index.html +++ b/site/public/test/datasets/index.html @@ -32,17 +32,17 @@ diff --git a/site/public/test/face_search/index.html b/site/public/test/face_search/index.html index 75bb907b..e2db70df 100644 --- a/site/public/test/face_search/index.html +++ b/site/public/test/face_search/index.html @@ -32,17 +32,17 @@ diff --git a/site/public/test/gallery/index.html b/site/public/test/gallery/index.html index 8958f369..869c3aaa 100644 --- a/site/public/test/gallery/index.html +++ b/site/public/test/gallery/index.html @@ -50,17 +50,17 @@ diff --git a/site/public/test/index.html b/site/public/test/index.html index e660bb2d..9c15d431 100644 --- a/site/public/test/index.html +++ b/site/public/test/index.html @@ -43,17 +43,17 @@ diff --git a/site/public/test/map/index.html b/site/public/test/map/index.html index 21229ec1..ba2756ae 100644 --- a/site/public/test/map/index.html +++ b/site/public/test/map/index.html @@ -32,17 +32,17 @@ diff --git a/site/public/test/name_search/index.html b/site/public/test/name_search/index.html index b0bdb86f..c956ff0b 100644 --- a/site/public/test/name_search/index.html +++ b/site/public/test/name_search/index.html @@ -32,17 +32,17 @@ diff --git a/site/public/test/pie_chart/index.html b/site/public/test/pie_chart/index.html index 98a89ff4..2e3ba39c 100644 --- a/site/public/test/pie_chart/index.html +++ b/site/public/test/pie_chart/index.html @@ -32,17 +32,17 @@ -- cgit v1.2.3-70-g09d2