From b73e233acec5ad6c3aca7475288482f366f7a31f Mon Sep 17 00:00:00 2001 From: adamhrv Date: Fri, 5 Apr 2019 13:17:05 +0200 Subject: never say final, update uccs --- site/public/datasets/feret/index.html | 38 ++++++++++++++++++++++++++--------- 1 file changed, 29 insertions(+), 9 deletions(-) (limited to 'site/public/datasets/feret/index.html') diff --git a/site/public/datasets/feret/index.html b/site/public/datasets/feret/index.html index 45510f64..5cd29c4c 100644 --- a/site/public/datasets/feret/index.html +++ b/site/public/datasets/feret/index.html @@ -26,13 +26,34 @@
-

FERET

-
Years
1993-1996
Images
14,126
Identities
1,199
Origin
Fairfax, MD

Facial Recognition Evaluation (FERET) is develop, test, and evaluate face recognition algorithms

-

The goal of the FERET program was to develop automatic face recognition capabilities that could be employed to assist security, intelligence, and law enforcement personnel in the performance of their duties.

+

Funding

The FERET program is sponsored by the U.S. Depart- ment of Defense’s Counterdrug Technology Development Program Office. The U.S. Army Research Laboratory (ARL) is the technical agent for the FERET program. ARL designed, administered, and scored the FERET tests. George Mason University collected, processed, and main- tained the FERET database. Inquiries regarding the FERET database or test should be directed to P. Jonathon Phillips.

@@ -50,11 +71,10 @@
MegaPixels ©2017-19 Adam R. Harvey /  -- cgit v1.2.3-70-g09d2 From 57fba037d519e45488599288f7753cb7a3cd32aa Mon Sep 17 00:00:00 2001 From: adamhrv Date: Fri, 12 Apr 2019 09:09:13 +0200 Subject: merging --- site/assets/css/css.css | 43 ++- site/content/pages/datasets/brainwash/index.md | 1 + site/content/pages/datasets/duke_mtmc/index.md | 3 + .../pages/datasets/oxford_town_centre/index.md | 11 +- site/datasets/verified/50_people_one_question.csv | 2 + site/datasets/verified/adience.csv | 2 + site/datasets/verified/afad.csv | 2 + site/datasets/verified/afw.csv | 2 + site/datasets/verified/agedb.csv | 2 + site/datasets/verified/alert_airport.csv | 2 + site/datasets/verified/appa_real.csv | 2 + site/datasets/verified/awe_ears.csv | 2 + site/datasets/verified/bbc_pose.csv | 2 + site/datasets/verified/bpad.csv | 2 + site/datasets/verified/brainwash.csv | 5 + site/datasets/verified/caltech_10k_web_faces.csv | 2 + site/datasets/verified/caltech_crp.csv | 2 + site/datasets/verified/casia_webface.csv | 2 + site/datasets/verified/celeba.csv | 2 + site/datasets/verified/coco.csv | 2 + site/datasets/verified/cofw.csv | 2 + site/datasets/verified/cuhk_campus_03.csv | 2 + site/datasets/verified/cuhk_train_station.csv | 2 + site/datasets/verified/duke_mtmc.csv | 181 ++++++++++++ site/datasets/verified/erce.csv | 2 + site/datasets/verified/expw.csv | 2 + site/datasets/verified/face_scrub.csv | 2 + site/datasets/verified/face_tracer.csv | 2 + site/datasets/verified/facebook_100.csv | 2 + site/datasets/verified/families_in_the_wild.csv | 2 + site/datasets/verified/fddb.csv | 2 + site/datasets/verified/feret.csv | 2 + site/datasets/verified/fiw_300.csv | 2 + site/datasets/verified/frgc.csv | 2 + site/datasets/verified/gallagher.csv | 2 + site/datasets/verified/geofaces.csv | 2 + site/datasets/verified/gfw.csv | 2 + site/datasets/verified/helen.csv | 2 + site/datasets/verified/hipsterwars.csv | 2 + site/datasets/verified/hrt_transgender.csv | 6 + site/datasets/verified/ibm_dif.csv | 2 + site/datasets/verified/ifad.csv | 2 + site/datasets/verified/ifdb.csv | 2 + site/datasets/verified/ijb_a.csv | 2 + site/datasets/verified/ijb_b.csv | 2 + site/datasets/verified/ijb_c.csv | 6 + site/datasets/verified/ilids_mcts_vid.csv | 2 + site/datasets/verified/images_of_groups.csv | 2 + site/datasets/verified/imdb_face.csv | 2 + site/datasets/verified/imdb_wiki.csv | 2 + site/datasets/verified/imfdb.csv | 2 + site/datasets/verified/kin_face.csv | 2 + site/datasets/verified/lag.csv | 2 + site/datasets/verified/laofiw.csv | 2 + site/datasets/verified/lfpw.csv | 2 + site/datasets/verified/lfw.csv | 2 + site/datasets/verified/market_1501.csv | 177 ++++++++++++ site/datasets/verified/mars.csv | 2 + site/datasets/verified/megaage.csv | 2 + site/datasets/verified/megaface.csv | 4 + site/datasets/verified/mifs.csv | 2 + site/datasets/verified/miw.csv | 2 + site/datasets/verified/morph.csv | 2 + site/datasets/verified/morph_nc.csv | 2 + site/datasets/verified/mot.csv | 2 + site/datasets/verified/msceleb.csv | 127 +++++++++ site/datasets/verified/mug_faces.csv | 2 + site/datasets/verified/names_and_faces.csv | 2 + site/datasets/verified/orl.csv | 2 + site/datasets/verified/oxford_town_centre.csv | 114 ++++++++ site/datasets/verified/pa_100k.csv | 2 + site/datasets/verified/penn_fudan.csv | 2 + site/datasets/verified/peta.csv | 2 + site/datasets/verified/pilot_parliament.csv | 2 + site/datasets/verified/pipa.csv | 2 + site/datasets/verified/pku_reid.csv | 2 + site/datasets/verified/prid.csv | 2 + site/datasets/verified/pubfig.csv | 2 + site/datasets/verified/pubfig_83.csv | 2 + site/datasets/verified/social_relation.csv | 2 + site/datasets/verified/tisi.csv | 2 + site/datasets/verified/uccs.csv | 9 + site/datasets/verified/ucf_selfie.csv | 2 + site/datasets/verified/ufdd.csv | 2 + site/datasets/verified/umd_faces.csv | 2 + site/datasets/verified/urban_tribes.csv | 2 + site/datasets/verified/used.csv | 2 + site/datasets/verified/vadana.csv | 2 + site/datasets/verified/vgg_celebs_in_places.csv | 2 + site/datasets/verified/vgg_faces.csv | 2 + site/datasets/verified/vgg_faces2.csv | 2 + site/datasets/verified/viper.csv | 2 + site/datasets/verified/vmu.csv | 2 + site/datasets/verified/voc.csv | 2 + site/datasets/verified/who_goes_there.csv | 2 + site/datasets/verified/wider.csv | 2 + site/datasets/verified/wider_attribute.csv | 2 + site/datasets/verified/wider_face.csv | 2 + site/datasets/verified/wildtrack.csv | 2 + site/datasets/verified/yale_faces.csv | 2 + site/datasets/verified/yfcc_100m.csv | 2 + site/datasets/verified/youtube_celebrities.csv | 2 + site/datasets/verified/youtube_faces.csv | 2 + site/datasets/verified/youtube_makeup.csv | 2 + site/datasets/verified/youtube_poses.csv | 2 + site/includes/cite_our_work.html | 18 ++ site/public/about/faq/index.html | 59 ---- site/public/about/index.html | 93 ------- site/public/about/legal/index.html | 85 ------ site/public/about/press/index.html | 58 ---- .../datasets/50_people_one_question/index.html | 114 -------- site/public/datasets/afad/index.html | 127 --------- site/public/datasets/aflw/index.html | 53 ---- site/public/datasets/brainwash/index.html | 146 ---------- site/public/datasets/caltech_10k/index.html | 124 --------- site/public/datasets/celeba/index.html | 126 --------- site/public/datasets/cofw/index.html | 179 ------------ site/public/datasets/duke_mtmc/index.html | 144 ---------- site/public/datasets/facebook/index.html | 54 ---- site/public/datasets/feret/index.html | 87 ------ site/public/datasets/hrt_transgender/index.html | 67 ----- site/public/datasets/index.html | 145 ---------- site/public/datasets/lfpw/index.html | 116 -------- site/public/datasets/lfw/index.html | 166 ----------- site/public/datasets/market_1501/index.html | 132 --------- site/public/datasets/msceleb/index.html | 139 --------- site/public/datasets/oxford_town_centre/index.html | 146 ---------- site/public/datasets/pipa/index.html | 120 -------- site/public/datasets/pubfig/index.html | 117 -------- site/public/datasets/uccs/index.html | 255 ----------------- site/public/datasets/vgg_face2/index.html | 142 ---------- site/public/datasets/viper/index.html | 122 -------- .../public/datasets/youtube_celebrities/index.html | 113 -------- site/public/index.html | 39 --- site/public/info/index.html | 50 ---- site/public/research/00_introduction/index.html | 101 ------- .../research/01_from_1_to_100_pixels/index.html | 139 --------- .../research/02_what_computers_can_see/index.html | 310 --------------------- site/public/research/index.html | 49 ---- site/public/test/chart/index.html | 50 ---- site/public/test/citations/index.html | 50 ---- site/public/test/csv/index.html | 50 ---- site/public/test/datasets/index.html | 50 ---- site/public/test/face_search/index.html | 50 ---- site/public/test/gallery/index.html | 68 ----- site/public/test/index.html | 61 ---- site/public/test/map/index.html | 50 ---- site/public/test/name_search/index.html | 50 ---- site/public/test/pie_chart/index.html | 50 ---- 149 files changed, 872 insertions(+), 4463 deletions(-) create mode 100644 site/datasets/verified/50_people_one_question.csv create mode 100644 site/datasets/verified/adience.csv create mode 100644 site/datasets/verified/afad.csv create mode 100644 site/datasets/verified/afw.csv create mode 100644 site/datasets/verified/agedb.csv create mode 100644 site/datasets/verified/alert_airport.csv create mode 100644 site/datasets/verified/appa_real.csv create mode 100644 site/datasets/verified/awe_ears.csv create mode 100644 site/datasets/verified/bbc_pose.csv create mode 100644 site/datasets/verified/bpad.csv create mode 100644 site/datasets/verified/brainwash.csv create mode 100644 site/datasets/verified/caltech_10k_web_faces.csv create mode 100644 site/datasets/verified/caltech_crp.csv create mode 100644 site/datasets/verified/casia_webface.csv create mode 100644 site/datasets/verified/celeba.csv create mode 100644 site/datasets/verified/coco.csv create mode 100644 site/datasets/verified/cofw.csv create mode 100644 site/datasets/verified/cuhk_campus_03.csv create mode 100644 site/datasets/verified/cuhk_train_station.csv create mode 100644 site/datasets/verified/duke_mtmc.csv create mode 100644 site/datasets/verified/erce.csv create mode 100644 site/datasets/verified/expw.csv create mode 100644 site/datasets/verified/face_scrub.csv create mode 100644 site/datasets/verified/face_tracer.csv create mode 100644 site/datasets/verified/facebook_100.csv create mode 100644 site/datasets/verified/families_in_the_wild.csv create mode 100644 site/datasets/verified/fddb.csv create mode 100644 site/datasets/verified/feret.csv create mode 100644 site/datasets/verified/fiw_300.csv create mode 100644 site/datasets/verified/frgc.csv create mode 100644 site/datasets/verified/gallagher.csv create mode 100644 site/datasets/verified/geofaces.csv create mode 100644 site/datasets/verified/gfw.csv create mode 100644 site/datasets/verified/helen.csv create mode 100644 site/datasets/verified/hipsterwars.csv create mode 100644 site/datasets/verified/hrt_transgender.csv create mode 100644 site/datasets/verified/ibm_dif.csv create mode 100644 site/datasets/verified/ifad.csv create mode 100644 site/datasets/verified/ifdb.csv create mode 100644 site/datasets/verified/ijb_a.csv create mode 100644 site/datasets/verified/ijb_b.csv create mode 100644 site/datasets/verified/ijb_c.csv create mode 100644 site/datasets/verified/ilids_mcts_vid.csv create mode 100644 site/datasets/verified/images_of_groups.csv create mode 100644 site/datasets/verified/imdb_face.csv create mode 100644 site/datasets/verified/imdb_wiki.csv create mode 100644 site/datasets/verified/imfdb.csv create mode 100644 site/datasets/verified/kin_face.csv create mode 100644 site/datasets/verified/lag.csv create mode 100644 site/datasets/verified/laofiw.csv create mode 100644 site/datasets/verified/lfpw.csv create mode 100644 site/datasets/verified/lfw.csv create mode 100644 site/datasets/verified/market_1501.csv create mode 100644 site/datasets/verified/mars.csv create mode 100644 site/datasets/verified/megaage.csv create mode 100644 site/datasets/verified/megaface.csv create mode 100644 site/datasets/verified/mifs.csv create mode 100644 site/datasets/verified/miw.csv create mode 100644 site/datasets/verified/morph.csv create mode 100644 site/datasets/verified/morph_nc.csv create mode 100644 site/datasets/verified/mot.csv create mode 100644 site/datasets/verified/msceleb.csv create mode 100644 site/datasets/verified/mug_faces.csv create mode 100644 site/datasets/verified/names_and_faces.csv create mode 100644 site/datasets/verified/orl.csv create mode 100644 site/datasets/verified/oxford_town_centre.csv create mode 100644 site/datasets/verified/pa_100k.csv create mode 100644 site/datasets/verified/penn_fudan.csv create mode 100644 site/datasets/verified/peta.csv create mode 100644 site/datasets/verified/pilot_parliament.csv create mode 100644 site/datasets/verified/pipa.csv create mode 100644 site/datasets/verified/pku_reid.csv create mode 100644 site/datasets/verified/prid.csv create mode 100644 site/datasets/verified/pubfig.csv create mode 100644 site/datasets/verified/pubfig_83.csv create mode 100644 site/datasets/verified/social_relation.csv create mode 100644 site/datasets/verified/tisi.csv create mode 100644 site/datasets/verified/uccs.csv create mode 100644 site/datasets/verified/ucf_selfie.csv create mode 100644 site/datasets/verified/ufdd.csv create mode 100644 site/datasets/verified/umd_faces.csv create mode 100644 site/datasets/verified/urban_tribes.csv create mode 100644 site/datasets/verified/used.csv create mode 100644 site/datasets/verified/vadana.csv create mode 100644 site/datasets/verified/vgg_celebs_in_places.csv create mode 100644 site/datasets/verified/vgg_faces.csv create mode 100644 site/datasets/verified/vgg_faces2.csv create mode 100644 site/datasets/verified/viper.csv create mode 100644 site/datasets/verified/vmu.csv create mode 100644 site/datasets/verified/voc.csv create mode 100644 site/datasets/verified/who_goes_there.csv create mode 100644 site/datasets/verified/wider.csv create mode 100644 site/datasets/verified/wider_attribute.csv create mode 100644 site/datasets/verified/wider_face.csv create mode 100644 site/datasets/verified/wildtrack.csv create mode 100644 site/datasets/verified/yale_faces.csv create mode 100644 site/datasets/verified/yfcc_100m.csv create mode 100644 site/datasets/verified/youtube_celebrities.csv create mode 100644 site/datasets/verified/youtube_faces.csv create mode 100644 site/datasets/verified/youtube_makeup.csv create mode 100644 site/datasets/verified/youtube_poses.csv create mode 100644 site/includes/cite_our_work.html delete mode 100644 site/public/about/faq/index.html delete mode 100644 site/public/about/index.html delete mode 100644 site/public/about/legal/index.html delete mode 100644 site/public/about/press/index.html delete mode 100644 site/public/datasets/50_people_one_question/index.html delete mode 100644 site/public/datasets/afad/index.html delete mode 100644 site/public/datasets/aflw/index.html delete mode 100644 site/public/datasets/brainwash/index.html delete mode 100644 site/public/datasets/caltech_10k/index.html delete mode 100644 site/public/datasets/celeba/index.html delete mode 100644 site/public/datasets/cofw/index.html delete mode 100644 site/public/datasets/duke_mtmc/index.html delete mode 100644 site/public/datasets/facebook/index.html delete mode 100644 site/public/datasets/feret/index.html delete mode 100644 site/public/datasets/hrt_transgender/index.html delete mode 100644 site/public/datasets/index.html delete mode 100644 site/public/datasets/lfpw/index.html delete mode 100644 site/public/datasets/lfw/index.html delete mode 100644 site/public/datasets/market_1501/index.html delete mode 100644 site/public/datasets/msceleb/index.html delete mode 100644 site/public/datasets/oxford_town_centre/index.html delete mode 100644 site/public/datasets/pipa/index.html delete mode 100644 site/public/datasets/pubfig/index.html delete mode 100644 site/public/datasets/uccs/index.html delete mode 100644 site/public/datasets/vgg_face2/index.html delete mode 100644 site/public/datasets/viper/index.html delete mode 100644 site/public/datasets/youtube_celebrities/index.html delete mode 100644 site/public/index.html delete mode 100644 site/public/info/index.html delete mode 100644 site/public/research/00_introduction/index.html delete mode 100644 site/public/research/01_from_1_to_100_pixels/index.html delete mode 100644 site/public/research/02_what_computers_can_see/index.html delete mode 100644 site/public/research/index.html delete mode 100644 site/public/test/chart/index.html delete mode 100644 site/public/test/citations/index.html delete mode 100644 site/public/test/csv/index.html delete mode 100644 site/public/test/datasets/index.html delete mode 100644 site/public/test/face_search/index.html delete mode 100644 site/public/test/gallery/index.html delete mode 100644 site/public/test/index.html delete mode 100644 site/public/test/map/index.html delete mode 100644 site/public/test/name_search/index.html delete mode 100644 site/public/test/pie_chart/index.html (limited to 'site/public/datasets/feret/index.html') diff --git a/site/assets/css/css.css b/site/assets/css/css.css index e5615c67..a61a1875 100644 --- a/site/assets/css/css.css +++ b/site/assets/css/css.css @@ -197,7 +197,7 @@ h3 { h4 { margin: 0 0 10px 0; padding: 0; - font-size: 11pt; + font-size: 18pt; font-weight: 400; transition: color 0.1s cubic-bezier(0,0,1,1); font-family: 'Roboto Mono', monospace; @@ -366,6 +366,9 @@ pre { margin: 0 0 40px 0; border: 1px solid #666; border-radius: 2px; + padding: 10px; + display: block; + background: #333; } pre code { display: block; @@ -422,8 +425,28 @@ blockquote { border-left: 2px solid #555; } -/* footnotes */ +/* Buttons */ +.citation-opts { +} +.citation-opts li{ + display: inline-block; + margin-right:10px; +} +.desktop .content a.btn{ + display: inline-block; + background: #333; + padding: 6px 10px; + font-size: 10px; + border-radius: 6px; + font-weight: 400; + border:0px; +} +.desktop .content a.btn:hover{ + background: #444; + border:0px; +} +/* footnotes */ .footnotes hr { display: none; } @@ -678,8 +701,8 @@ section.fullwidth .image { border: 0; } .dataset-list .dataset { - width: 220px; - height: 140px; + width: 300px; + height: 180px; padding: 10px; color: white; font-weight: 400; @@ -878,6 +901,12 @@ ul.map-legend li.source:before { } /* about */ +.desktop a.ahrefmoz{ + color:#DE5825; +} +.desktop a.ahrefmoz:hover{ + color:#EF6D3B; +} .content-about { color: #fff; @@ -890,8 +919,8 @@ ul.map-legend li.source:before { font-weight: 300; } .content-about section:first-of-type > p:first-of-type { - font-size: 23px; - line-height: 39px; + font-size: 22px; + line-height: 40px; } .content-about .about-menu ul li { display: inline-block; @@ -985,7 +1014,7 @@ ul.map-legend li.source:before { display: block; } .content-about .team-member h3{ - font-size:18px; + font-size:20px; } diff --git a/site/content/pages/datasets/brainwash/index.md b/site/content/pages/datasets/brainwash/index.md index c2f5c1ae..6bb7f287 100644 --- a/site/content/pages/datasets/brainwash/index.md +++ b/site/content/pages/datasets/brainwash/index.md @@ -44,6 +44,7 @@ TODO - add ethics link to Stanford - add optout info +{% include 'cite_our_work.html' %} ### Footnotes diff --git a/site/content/pages/datasets/duke_mtmc/index.md b/site/content/pages/datasets/duke_mtmc/index.md index ceed1416..8308eee7 100644 --- a/site/content/pages/datasets/duke_mtmc/index.md +++ b/site/content/pages/datasets/duke_mtmc/index.md @@ -44,6 +44,9 @@ The 8 cameras deployed on Duke's campus were specifically setup to capture stude The Duke MTMC dataset paper mentions 2,700 identities, but their ground truth file only lists annotations for 1,812 +{% include 'cite_our_work.html' %} + + ### Footnotes [^sensetime_qz]: diff --git a/site/content/pages/datasets/oxford_town_centre/index.md b/site/content/pages/datasets/oxford_town_centre/index.md index 349ab257..c26b81e7 100644 --- a/site/content/pages/datasets/oxford_town_centre/index.md +++ b/site/content/pages/datasets/oxford_town_centre/index.md @@ -48,16 +48,7 @@ As for the capture date, the text on the storefront display shows a sale happeni === end columns -### Demo Videos Using Oxford Town Centre Dataset - -Several researchers have posted their demo videos using the Oxford Town Centre dataset on YouTube: - -- [Multi target tracking on Oxford Dataset](https://www.youtube.com/watch?v=nO-3EM9dEd4) -- [Multi-pedestrian tracking (TownCentre dataset)]https://www.youtube.com/watch?v=nO-3EM9dEd4 -- [Multiple object tracking with kalman tracker and sort](https://www.youtube.com/watch?v=SKXk6uB8348) -- [Multi target tracking on Oxford dataset](https://www.youtube.com/watch?v=RM_RdXH7pSY) -- [towncentre](https://www.youtube.com/watch?v=ErLtfUAJA8U) -- [VTD - towncenter.avi](https://www.youtube.com/watch?v=LwMOmqvhnoc) +{% include 'cite_our_work.html' %} ### Footnotes diff --git a/site/datasets/verified/50_people_one_question.csv b/site/datasets/verified/50_people_one_question.csv new file mode 100644 index 00000000..ab3b8956 --- /dev/null +++ b/site/datasets/verified/50_people_one_question.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,50 People One Question,50_people_one_question,0.0,0.0,,,,main,,Merging Pose Estimates Across Space and Time,2013 diff --git a/site/datasets/verified/adience.csv b/site/datasets/verified/adience.csv new file mode 100644 index 00000000..deadc399 --- /dev/null +++ b/site/datasets/verified/adience.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,Adience,adience,0.0,0.0,,,,main,,Age and Gender Estimation of Unfiltered Faces,2014 diff --git a/site/datasets/verified/afad.csv b/site/datasets/verified/afad.csv new file mode 100644 index 00000000..b67ff97a --- /dev/null +++ b/site/datasets/verified/afad.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,AFAD,afad,0.0,0.0,,,,main,,Ordinal Regression with Multiple Output CNN for Age Estimation,2016 diff --git a/site/datasets/verified/afw.csv b/site/datasets/verified/afw.csv new file mode 100644 index 00000000..b17652e3 --- /dev/null +++ b/site/datasets/verified/afw.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,AFW,afw,0.0,0.0,,,,main,,"Face detection, pose estimation, and landmark localization in the wild",2012 diff --git a/site/datasets/verified/agedb.csv b/site/datasets/verified/agedb.csv new file mode 100644 index 00000000..ad90a985 --- /dev/null +++ b/site/datasets/verified/agedb.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,AgeDB,agedb,0.0,0.0,,,,main,,"AgeDB: The First Manually Collected, In-the-Wild Age Database",2017 diff --git a/site/datasets/verified/alert_airport.csv b/site/datasets/verified/alert_airport.csv new file mode 100644 index 00000000..6fa30c1f --- /dev/null +++ b/site/datasets/verified/alert_airport.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,ALERT Airport,alert_airport,0.0,0.0,,,,main,,"A Systematic Evaluation and Benchmark for Person Re-Identification: Features, Metrics, and Datasets",2018 diff --git a/site/datasets/verified/appa_real.csv b/site/datasets/verified/appa_real.csv new file mode 100644 index 00000000..a877dd82 --- /dev/null +++ b/site/datasets/verified/appa_real.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,APPA-REAL,appa_real,0.0,0.0,,,,main,,Apparent and Real Age Estimation in Still Images with Deep Residual Regressors on Appa-Real Database,2017 diff --git a/site/datasets/verified/awe_ears.csv b/site/datasets/verified/awe_ears.csv new file mode 100644 index 00000000..1959b962 --- /dev/null +++ b/site/datasets/verified/awe_ears.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,AWE Ears,awe_ears,0.0,0.0,,,,main,,Ear Recognition: More Than a Survey,2017 diff --git a/site/datasets/verified/bbc_pose.csv b/site/datasets/verified/bbc_pose.csv new file mode 100644 index 00000000..5926c85a --- /dev/null +++ b/site/datasets/verified/bbc_pose.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,BBC Pose,bbc_pose,0.0,0.0,,,,main,,Automatic and Efficient Human Pose Estimation for Sign Language Videos,2013 diff --git a/site/datasets/verified/bpad.csv b/site/datasets/verified/bpad.csv new file mode 100644 index 00000000..bdf32861 --- /dev/null +++ b/site/datasets/verified/bpad.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,BPAD,bpad,0.0,0.0,,,,main,,Describing people: A poselet-based approach to attribute classification,2011 diff --git a/site/datasets/verified/brainwash.csv b/site/datasets/verified/brainwash.csv new file mode 100644 index 00000000..628ca090 --- /dev/null +++ b/site/datasets/verified/brainwash.csv @@ -0,0 +1,5 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,Brainwash,brainwash,0.0,0.0,,,,main,,End-to-End People Detection in Crowded Scenes,2016 +1,China,Brainwash,brainwash,39.9922379,116.30393816,Peking University,edu,7e915bb8e4ada4f8d261bc855a4f587ea97764ca,citation,,People detection in crowded scenes via regional-based convolutional network,2016 +2,China,Brainwash,brainwash,28.2290209,112.99483204,"National University of Defense Technology, China",mil,591a4bfa6380c9fcd5f3ae690e3ac5c09b7bf37b,citation,https://pdfs.semanticscholar.org/591a/4bfa6380c9fcd5f3ae690e3ac5c09b7bf37b.pdf,A Replacement Algorithm of Non-Maximum Suppression Base on Graph Clustering,2017 +3,China,Brainwash,brainwash,28.2290209,112.99483204,"National University of Defense Technology, China",mil,b02d31c640b0a31fb18c4f170d841d8e21ffb66c,citation,,Localized region context and object feature fusion for people head detection,2016 diff --git a/site/datasets/verified/caltech_10k_web_faces.csv b/site/datasets/verified/caltech_10k_web_faces.csv new file mode 100644 index 00000000..c86cce93 --- /dev/null +++ b/site/datasets/verified/caltech_10k_web_faces.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,Caltech 10K Web Faces,caltech_10k_web_faces,0.0,0.0,,,,main,,Pruning training sets for learning of object categories,2005 diff --git a/site/datasets/verified/caltech_crp.csv b/site/datasets/verified/caltech_crp.csv new file mode 100644 index 00000000..d858c8a7 --- /dev/null +++ b/site/datasets/verified/caltech_crp.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,Caltech CRP,caltech_crp,0.0,0.0,,,,main,,Fine-grained classification of pedestrians in video: Benchmark and state of the art,2015 diff --git a/site/datasets/verified/casia_webface.csv b/site/datasets/verified/casia_webface.csv new file mode 100644 index 00000000..fe39fac8 --- /dev/null +++ b/site/datasets/verified/casia_webface.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,CASIA Webface,casia_webface,0.0,0.0,,,,main,,Learning Face Representation from Scratch,2014 diff --git a/site/datasets/verified/celeba.csv b/site/datasets/verified/celeba.csv new file mode 100644 index 00000000..342dcbbf --- /dev/null +++ b/site/datasets/verified/celeba.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,CelebA,celeba,0.0,0.0,,,,main,,Deep Learning Face Attributes in the Wild,2015 diff --git a/site/datasets/verified/coco.csv b/site/datasets/verified/coco.csv new file mode 100644 index 00000000..0c19b8cf --- /dev/null +++ b/site/datasets/verified/coco.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,COCO,coco,0.0,0.0,,,,main,,Microsoft COCO: Common Objects in Context,2014 diff --git a/site/datasets/verified/cofw.csv b/site/datasets/verified/cofw.csv new file mode 100644 index 00000000..7bd9e598 --- /dev/null +++ b/site/datasets/verified/cofw.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,COFW,cofw,0.0,0.0,,,,main,,Robust Face Landmark Estimation under Occlusion,2013 diff --git a/site/datasets/verified/cuhk_campus_03.csv b/site/datasets/verified/cuhk_campus_03.csv new file mode 100644 index 00000000..cdfd115d --- /dev/null +++ b/site/datasets/verified/cuhk_campus_03.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,CUHK03 Campus,cuhk_campus_03,0.0,0.0,,,,main,,Human Reidentification with Transferred Metric Learning,2012 diff --git a/site/datasets/verified/cuhk_train_station.csv b/site/datasets/verified/cuhk_train_station.csv new file mode 100644 index 00000000..675e473b --- /dev/null +++ b/site/datasets/verified/cuhk_train_station.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,CUHK Train Station Dataset,cuhk_train_station,0.0,0.0,,,,main,,Understanding collective crowd behaviors: Learning a Mixture model of Dynamic pedestrian-Agents,2012 diff --git a/site/datasets/verified/duke_mtmc.csv b/site/datasets/verified/duke_mtmc.csv new file mode 100644 index 00000000..929b84c1 --- /dev/null +++ b/site/datasets/verified/duke_mtmc.csv @@ -0,0 +1,181 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,Duke MTMC,duke_mtmc,0.0,0.0,,,,main,,"Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking",2016 +1,United States,Duke MTMC,duke_mtmc,35.9990522,-78.9290629,Duke University,edu,c9b98c98357a154bceb2287c427c5fa9c17b4a07,citation,https://arxiv.org/pdf/1803.05872.pdf,Virtual CNN Branching: Efficient Feature Ensemble for Person Re-Identification,2018 +2,United States,Duke MTMC,duke_mtmc,42.3614256,-71.0812092,Microsoft Research Asia,company,1e2f07f7231eef629c78cba4ada0c9be29d77254,citation,,Group Re-Identification: Leveraging and Integrating Multi-Grain Information,2018 +3,China,Duke MTMC,duke_mtmc,31.20081505,121.42840681,Shanghai Jiao Tong University,edu,1e2f07f7231eef629c78cba4ada0c9be29d77254,citation,,Group Re-Identification: Leveraging and Integrating Multi-Grain Information,2018 +4,China,Duke MTMC,duke_mtmc,39.9082804,116.2458527,University of Chinese Academy of Sciences,edu,1e2f07f7231eef629c78cba4ada0c9be29d77254,citation,,Group Re-Identification: Leveraging and Integrating Multi-Grain Information,2018 +5,China,Duke MTMC,duke_mtmc,24.4399419,118.09301781,Xiamen University,edu,2788a2461ed0067e2f7aaa63c449a24a237ec341,citation,https://arxiv.org/pdf/1708.04896.pdf,Random Erasing Data Augmentation,2017 +6,United States,Duke MTMC,duke_mtmc,32.7768233,-117.0693407,"California State University, San Marcos",edu,9643dabbf1771d2d82ded2fde3baaa15a67f6e56,citation,,Unsupervised Joint Subspace and Dictionary Learning for Enhanced Cross-Domain Person Re-Identification,2018 +7,China,Duke MTMC,duke_mtmc,32.0565957,118.77408833,Nanjing University,edu,9643dabbf1771d2d82ded2fde3baaa15a67f6e56,citation,,Unsupervised Joint Subspace and Dictionary Learning for Enhanced Cross-Domain Person Re-Identification,2018 +8,United Kingdom,Duke MTMC,duke_mtmc,51.5247272,-0.03931035,Queen Mary University of London,edu,e323bbaef9ea9a6257b7464e4cc146d690d0d55b,citation,https://arxiv.org/pdf/1811.08400.pdf,Single-Label Multi-Class Image Classification by Deep Logistic Regression,2019 +9,China,Duke MTMC,duke_mtmc,28.2290209,112.99483204,"National University of Defense Technology, China",mil,59f357015054bab43fb8cbfd3f3dbf17b1d1f881,citation,https://pdfs.semanticscholar.org/59f3/57015054bab43fb8cbfd3f3dbf17b1d1f881.pdf,Unsupervised Multi-Object Detection for Video Surveillance Using Memory-Based Recurrent Attention Networks,2018 +10,United Kingdom,Duke MTMC,duke_mtmc,51.5231607,-0.1282037,University College London,edu,59f357015054bab43fb8cbfd3f3dbf17b1d1f881,citation,https://pdfs.semanticscholar.org/59f3/57015054bab43fb8cbfd3f3dbf17b1d1f881.pdf,Unsupervised Multi-Object Detection for Video Surveillance Using Memory-Based Recurrent Attention Networks,2018 +11,China,Duke MTMC,duke_mtmc,31.20081505,121.42840681,Shanghai Jiao Tong University,edu,a0dfc588cd1bc35a06734a31fca81e7adc94b940,citation,https://arxiv.org/pdf/1803.08580.pdf,Weighted Bilinear Coding over Salient Body Parts for Person Re-identification,2018 +12,United States,Duke MTMC,duke_mtmc,39.95472495,-75.15346905,Temple University,edu,a0dfc588cd1bc35a06734a31fca81e7adc94b940,citation,https://arxiv.org/pdf/1803.08580.pdf,Weighted Bilinear Coding over Salient Body Parts for Person Re-identification,2018 +13,China,Duke MTMC,duke_mtmc,23.0502042,113.39880323,South China University of Technology,edu,a0dfc588cd1bc35a06734a31fca81e7adc94b940,citation,https://arxiv.org/pdf/1803.08580.pdf,Weighted Bilinear Coding over Salient Body Parts for Person Re-identification,2018 +14,China,Duke MTMC,duke_mtmc,40.00229045,116.32098908,Tsinghua University,edu,a0dfc588cd1bc35a06734a31fca81e7adc94b940,citation,https://arxiv.org/pdf/1803.08580.pdf,Weighted Bilinear Coding over Salient Body Parts for Person Re-identification,2018 +15,China,Duke MTMC,duke_mtmc,30.672721,104.098806,University of Electronic Science and Technology of China,edu,ed2ba6448db8cf945ca24d4df11916c2c5c3edd1,citation,,Rapid Pedestrian Detection Based on Deep Omega-Shape Features with Partial Occlusion Handing,2018 +16,China,Duke MTMC,duke_mtmc,30.19331415,120.11930822,Zhejiang University,edu,5b062562a8067baae045df1c7f5a8455d0363b5a,citation,https://arxiv.org/pdf/1810.06996.pdf,SCPNet: Spatial-Channel Parallelism Network for Joint Holistic and Partial Person Re-Identification,2018 +17,China,Duke MTMC,duke_mtmc,40.0044795,116.370238,Chinese Academy of Sciences,edu,5b062562a8067baae045df1c7f5a8455d0363b5a,citation,https://arxiv.org/pdf/1810.06996.pdf,SCPNet: Spatial-Channel Parallelism Network for Joint Holistic and Partial Person Re-Identification,2018 +18,China,Duke MTMC,duke_mtmc,38.88140235,121.52281098,Dalian University of Technology,edu,e8dac6b899e2be56b4d8b4b5bfb422eb1fe2cb68,citation,,A novel two-stream saliency image fusion CNN architecture for person re-identification,2017 +19,United States,Duke MTMC,duke_mtmc,29.58333105,-98.61944505,University of Texas at San Antonio,edu,e8dac6b899e2be56b4d8b4b5bfb422eb1fe2cb68,citation,,A novel two-stream saliency image fusion CNN architecture for person re-identification,2017 +20,China,Duke MTMC,duke_mtmc,31.83907195,117.26420748,University of Science and Technology of China,edu,d4a5c9b2197b6bc476aa296b8d59515c9684e97d,citation,,CA3Net: Contextual-Attentional Attribute-Appearance Network for Person Re-Identification,2018 +21,United States,Duke MTMC,duke_mtmc,40.1019523,-88.2271615,UIUC,edu,c2a5f27d97744bc1f96d7e1074395749e3c59bc8,citation,https://arxiv.org/pdf/1804.05275.pdf,Horizontal Pyramid Matching for Person Re-identification,2019 +22,United States,Duke MTMC,duke_mtmc,37.8718992,-122.2585399,UC Berkeley,edu,8ba606d7667c50054d74083867230abbed755574,citation,https://arxiv.org/pdf/1811.01268.pdf,"ReXCam: Resource-Efficient, Cross-Camera Video Analytics at Enterprise Scale",2018 +23,United States,Duke MTMC,duke_mtmc,41.78468745,-87.60074933,University of Chicago,edu,8ba606d7667c50054d74083867230abbed755574,citation,https://arxiv.org/pdf/1811.01268.pdf,"ReXCam: Resource-Efficient, Cross-Camera Video Analytics at Enterprise Scale",2018 +24,United States,Duke MTMC,duke_mtmc,47.6423318,-122.1369302,Microsoft,company,8ba606d7667c50054d74083867230abbed755574,citation,https://arxiv.org/pdf/1811.01268.pdf,"ReXCam: Resource-Efficient, Cross-Camera Video Analytics at Enterprise Scale",2018 +25,China,Duke MTMC,duke_mtmc,30.491766,114.396237,South-Central University for Nationalities,edu,cbf5b3469c7216c37733efca6c2cdb94357b14a7,citation,,Person Re-identification Based on Feature Fusion and Triplet Loss Function,2018 +26,China,Duke MTMC,duke_mtmc,30.60903415,114.3514284,Wuhan University of Technology,edu,cbf5b3469c7216c37733efca6c2cdb94357b14a7,citation,,Person Re-identification Based on Feature Fusion and Triplet Loss Function,2018 +27,China,Duke MTMC,duke_mtmc,32.0565957,118.77408833,Nanjing University,edu,3b24dcb3a1ff4811386b3467943c0ccad266bc99,citation,https://arxiv.org/pdf/1811.08561.pdf,Adaptive Re-ranking of Deep Feature for Person Re-identification,2018 +28,Australia,Duke MTMC,duke_mtmc,-37.8087465,144.9638875,RMIT University,edu,3b24dcb3a1ff4811386b3467943c0ccad266bc99,citation,https://arxiv.org/pdf/1811.08561.pdf,Adaptive Re-ranking of Deep Feature for Person Re-identification,2018 +29,China,Duke MTMC,duke_mtmc,22.3874201,114.2082222,Hong Kong Baptist University,edu,3cbf60c4a73fadd05b59c3abd19df032303e8577,citation,,Incremental Deep Hidden Attribute Learning,2018 +30,China,Duke MTMC,duke_mtmc,30.508964,114.410577,Huazhong University of Science of Technology,edu,3cbf60c4a73fadd05b59c3abd19df032303e8577,citation,,Incremental Deep Hidden Attribute Learning,2018 +31,Japan,Duke MTMC,duke_mtmc,35.6924853,139.7582533,"National Institute of Informatics, Japan",edu,3cbf60c4a73fadd05b59c3abd19df032303e8577,citation,,Incremental Deep Hidden Attribute Learning,2018 +32,Japan,Duke MTMC,duke_mtmc,35.6924853,139.7582533,"National Institute of Informatics, Japan, Tokyo, Japan",edu,3cbf60c4a73fadd05b59c3abd19df032303e8577,citation,,Incremental Deep Hidden Attribute Learning,2018 +33,South Korea,Duke MTMC,duke_mtmc,35.2265288,126.839987,Gwangju Institute of Science and Technology,edu,5317bd54ad696f40594d78c3464d86d8e39bd75b,citation,https://arxiv.org/pdf/1901.08787.pdf,Multiple Hypothesis Tracking Algorithm for Multi-Target Multi-Camera Tracking with Disjoint Views,2018 +34,China,Duke MTMC,duke_mtmc,30.5097537,114.4062881,Huazhong University of Science and Technology,edu,0db41739f514c4c911c54a4c90ab5f07db3862dc,citation,https://pdfs.semanticscholar.org/0db4/1739f514c4c911c54a4c90ab5f07db3862dc.pdf,NCA-Net for Tracking Multiple Objects across Multiple Cameras,2018 +35,United Kingdom,Duke MTMC,duke_mtmc,51.4584837,-2.6097752,University of Bristol,edu,92939c68b2075d0446fee540bd174b6da26fea05,citation,https://arxiv.org/pdf/1806.04074.pdf,Semantically Selective Augmentation for Deep Compact Person Re-Identification,2018 +36,China,Duke MTMC,duke_mtmc,40.00229045,116.32098908,Tsinghua University,edu,69a7c8bca699ee4100fbe6a83b72459c132a6f10,citation,https://pdfs.semanticscholar.org/69a7/c8bca699ee4100fbe6a83b72459c132a6f10.pdf,Aware Person Re-identification across Multiple Resolutions,2018 +37,Thailand,Duke MTMC,duke_mtmc,13.74311795,100.53287901,Chulalongkorn University,edu,fcec633bbdeaab2d61fcc6d86f74383ccc3621f9,citation,,Robust video editing detection using Scalable Color and Color Layout Descriptors,2017 +38,China,Duke MTMC,duke_mtmc,30.672721,104.098806,University of Electronic Science and Technology of China,edu,a20f132a30e99541aa7ba6dddac86e6a393778e8,citation,https://arxiv.org/pdf/1809.08556.pdf,Self Attention Grid for Person Re-Identification,2018 +39,China,Duke MTMC,duke_mtmc,39.98177,116.330086,Chinese Academy of Sciences & University of Chinese Academy of Sciences,edu,56423685e039d82d3cc88f797fc2b73f2d93e200,citation,,A Unified Generative Adversarial Framework for Image Generation and Person Re-identification,2018 +40,China,Duke MTMC,duke_mtmc,39.9922379,116.30393816,Peking University,edu,56423685e039d82d3cc88f797fc2b73f2d93e200,citation,,A Unified Generative Adversarial Framework for Image Generation and Person Re-identification,2018 +41,Australia,Duke MTMC,duke_mtmc,-33.8809651,151.20107299,University of Technology Sydney,edu,f8f92624c8794d54e08b3a8f94910952ae03cade,citation,,CamStyle: A Novel Data Augmentation Method for Person Re-Identification,2019 +42,China,Duke MTMC,duke_mtmc,24.4399419,118.09301781,Xiamen University,edu,f8f92624c8794d54e08b3a8f94910952ae03cade,citation,,CamStyle: A Novel Data Augmentation Method for Person Re-Identification,2019 +43,Australia,Duke MTMC,duke_mtmc,-35.2776999,149.118527,Australian National University,edu,f8f92624c8794d54e08b3a8f94910952ae03cade,citation,,CamStyle: A Novel Data Augmentation Method for Person Re-Identification,2019 +44,China,Duke MTMC,duke_mtmc,22.4162632,114.2109318,Chinese University of Hong Kong,edu,08d2a558ea2deb117dd8066e864612bf2899905b,citation,https://arxiv.org/pdf/1807.09975.pdf,Person Re-identification with Deep Similarity-Guided Graph Neural Network,2018 +45,China,Duke MTMC,duke_mtmc,39.993008,116.329882,SenseTime,company,08d2a558ea2deb117dd8066e864612bf2899905b,citation,https://arxiv.org/pdf/1807.09975.pdf,Person Re-identification with Deep Similarity-Guided Graph Neural Network,2018 +46,United States,Duke MTMC,duke_mtmc,37.8718992,-122.2585399,University of California,edu,fefa8f07d998f8f4a6c85a7da781b19bf6b78d7d,citation,https://arxiv.org/pdf/1902.00749.pdf,Online Multi-Object Tracking with Dual Matching Attention Networks,2018 +47,China,Duke MTMC,duke_mtmc,39.9808333,116.34101249,Beihang University,edu,7bfc5bbad852f9e6bea3b86c25179d81e2e7fff6,citation,,Online Inter-Camera Trajectory Association Exploiting Person Re-Identification and Camera Topology,2018 +48,China,Duke MTMC,duke_mtmc,40.00229045,116.32098908,Tsinghua University,edu,be79ad118d0524d9b493f4a14a662c8184e6405a,citation,,Attend and Align: Improving Deep Representations with Feature Alignment Layer for Person Retrieval,2018 +49,China,Duke MTMC,duke_mtmc,40.00229045,116.32098908,Tsinghua University,edu,13ea9a2ed134a9e238d33024fba34d3dd6a010e0,citation,https://arxiv.org/pdf/1703.05693.pdf,SVDNet for Pedestrian Retrieval,2017 +50,Australia,Duke MTMC,duke_mtmc,-33.8809651,151.20107299,University of Technology Sydney,edu,13ea9a2ed134a9e238d33024fba34d3dd6a010e0,citation,https://arxiv.org/pdf/1703.05693.pdf,SVDNet for Pedestrian Retrieval,2017 +51,China,Duke MTMC,duke_mtmc,30.19331415,120.11930822,Zhejiang University,edu,608dede56161fd5f76bcf9228b4dd8c639d65b02,citation,https://arxiv.org/pdf/1807.00537.pdf,SphereReID: Deep Hypersphere Manifold Embedding for Person Re-Identification,2018 +52,United States,Duke MTMC,duke_mtmc,42.7298459,-73.67950216,Rensselaer Polytechnic Institute,edu,24d6d3adf2176516ef0de2e943ce2084e27c4f94,citation,https://arxiv.org/pdf/1811.07487.pdf,Re-Identification with Consistent Attentive Siamese Networks,2018 +53,Australia,Duke MTMC,duke_mtmc,-33.8809651,151.20107299,University of Technology Sydney,edu,15e1af79939dbf90790b03d8aa02477783fb1d0f,citation,https://arxiv.org/pdf/1701.07717.pdf,Unlabeled Samples Generated by GAN Improve the Person Re-identification Baseline in Vitro,2017 +54,China,Duke MTMC,duke_mtmc,30.778621,103.961236,XiHua University,edu,ec9c20ed6cce15e9b63ac96bb5a6d55e69661e0b,citation,https://pdfs.semanticscholar.org/ec9c/20ed6cce15e9b63ac96bb5a6d55e69661e0b.pdf,Robust Pedestrian Detection for Semi-automatic Construction of a Crowded Person Re-Identification Dataset,2018 +55,United Kingdom,Duke MTMC,duke_mtmc,51.24303255,-0.59001382,University of Surrey,edu,ec9c20ed6cce15e9b63ac96bb5a6d55e69661e0b,citation,https://pdfs.semanticscholar.org/ec9c/20ed6cce15e9b63ac96bb5a6d55e69661e0b.pdf,Robust Pedestrian Detection for Semi-automatic Construction of a Crowded Person Re-Identification Dataset,2018 +56,China,Duke MTMC,duke_mtmc,31.4854255,120.2739581,Jiangnan University,edu,ec9c20ed6cce15e9b63ac96bb5a6d55e69661e0b,citation,https://pdfs.semanticscholar.org/ec9c/20ed6cce15e9b63ac96bb5a6d55e69661e0b.pdf,Robust Pedestrian Detection for Semi-automatic Construction of a Crowded Person Re-Identification Dataset,2018 +57,United Kingdom,Duke MTMC,duke_mtmc,51.5247272,-0.03931035,Queen Mary University of London,edu,fa3fb32fe0cd392960549b0adb7a535eb3656abd,citation,https://arxiv.org/pdf/1711.08106.pdf,The Devil is in the Middle: Exploiting Mid-level Representations for Cross-Domain Instance Matching,2017 +58,United Kingdom,Duke MTMC,duke_mtmc,55.94951105,-3.19534913,University of Edinburgh,edu,fa3fb32fe0cd392960549b0adb7a535eb3656abd,citation,https://arxiv.org/pdf/1711.08106.pdf,The Devil is in the Middle: Exploiting Mid-level Representations for Cross-Domain Instance Matching,2017 +59,United States,Duke MTMC,duke_mtmc,40.1019523,-88.2271615,UIUC,edu,54c28bf64debbdb21c246795182f97d4f7917b74,citation,https://arxiv.org/pdf/1811.04129.pdf,STA: Spatial-Temporal Attention for Large-Scale Video-based Person Re-Identification,2018 +60,United States,Duke MTMC,duke_mtmc,34.0803829,-118.3909947,Tencent,company,3b311a1ce30f9c0f3dc1d9c0cf25f13127a5e48c,citation,https://arxiv.org/pdf/1810.12193.pdf,A Coarse-to-fine Pyramidal Model for Person Re-identification via Multi-Loss Dynamic Training,2018 +61,United States,Duke MTMC,duke_mtmc,37.3860784,-121.9877807,Google and Hewlett-Packard Labs,edu,4d799f6e09f442bde583a50a0a9f81131ef707bb,citation,,TAR: Enabling Fine-Grained Targeted Advertising in Retail Stores,2018 +62,United States,Duke MTMC,duke_mtmc,37.3860784,-121.9877807,Hewlett-Packard Labs,edu,4d799f6e09f442bde583a50a0a9f81131ef707bb,citation,,TAR: Enabling Fine-Grained Targeted Advertising in Retail Stores,2018 +63,United States,Duke MTMC,duke_mtmc,39.6321923,-76.3038146,LinkedIn and Hewlett-Packard Labs,edu,4d799f6e09f442bde583a50a0a9f81131ef707bb,citation,,TAR: Enabling Fine-Grained Targeted Advertising in Retail Stores,2018 +64,United States,Duke MTMC,duke_mtmc,34.0224149,-118.28634407,University of Southern California,edu,4d799f6e09f442bde583a50a0a9f81131ef707bb,citation,,TAR: Enabling Fine-Grained Targeted Advertising in Retail Stores,2018 +65,Canada,Duke MTMC,duke_mtmc,49.2767454,-122.91777375,Simon Fraser University,edu,5137ca9f0a7cf4c61f2254d4a252a0c56e5dcfcc,citation,https://arxiv.org/pdf/1811.07130.pdf,Batch Feature Erasing for Person Re-identification and Beyond,2018 +66,China,Duke MTMC,duke_mtmc,32.0565957,118.77408833,Nanjing University,edu,c37c3853ab428725f13906bb0ff4936ffe15d6af,citation,https://arxiv.org/pdf/1809.02874.pdf,Unsupervised Person Re-identification by Deep Learning Tracklet Association,2018 +67,United Kingdom,Duke MTMC,duke_mtmc,51.5247272,-0.03931035,Queen Mary University of London,edu,c37c3853ab428725f13906bb0ff4936ffe15d6af,citation,https://arxiv.org/pdf/1809.02874.pdf,Unsupervised Person Re-identification by Deep Learning Tracklet Association,2018 +68,United States,Duke MTMC,duke_mtmc,37.8687126,-122.25586815,"University of California, Berkeley",edu,a8d665fa7357f696dcfd188b91fda88da47b964e,citation,https://arxiv.org/pdf/1809.02318.pdf,Scaling Video Analytics Systems to Large Camera Deployments,2018 +69,United States,Duke MTMC,duke_mtmc,47.6423318,-122.1369302,Microsoft,company,a8d665fa7357f696dcfd188b91fda88da47b964e,citation,https://arxiv.org/pdf/1809.02318.pdf,Scaling Video Analytics Systems to Large Camera Deployments,2018 +70,United States,Duke MTMC,duke_mtmc,41.78468745,-87.60074933,University of Chicago,edu,a8d665fa7357f696dcfd188b91fda88da47b964e,citation,https://arxiv.org/pdf/1809.02318.pdf,Scaling Video Analytics Systems to Large Camera Deployments,2018 +71,China,Duke MTMC,duke_mtmc,23.09461185,113.28788994,Sun Yat-Sen University,edu,dda0b381c162695f21b8d1149aab22188b3c2bc0,citation,https://arxiv.org/pdf/1804.02792.pdf,Occluded Person Re-Identification,2018 +72,China,Duke MTMC,duke_mtmc,39.9082804,116.2458527,University of Chinese Academy of Sciences,edu,33f358f1d2b54042c524d69b20e80d98dde3dacd,citation,https://arxiv.org/pdf/1811.11405.pdf,Spectral Feature Transformation for Person Re-identification,2018 +73,United States,Duke MTMC,duke_mtmc,32.8734455,-117.2065636,TuSimple,edu,33f358f1d2b54042c524d69b20e80d98dde3dacd,citation,https://arxiv.org/pdf/1811.11405.pdf,Spectral Feature Transformation for Person Re-identification,2018 +74,China,Duke MTMC,duke_mtmc,30.672721,104.098806,University of Electronic Science and Technology of China,edu,8ffc49aead99fdacb0b180468a36984759f2fc1e,citation,https://arxiv.org/pdf/1809.04976.pdf,Sparse Label Smoothing for Semi-supervised Person Re-Identification,2018 +75,Germany,Duke MTMC,duke_mtmc,50.7791703,6.06728733,RWTH Aachen University,edu,10b36c003542545f1e2d73e8897e022c0c260c32,citation,https://arxiv.org/pdf/1705.04608.pdf,Towards a Principled Integration of Multi-camera Re-identification and Tracking Through Optimal Bayes Filters,2017 +76,United Kingdom,Duke MTMC,duke_mtmc,51.7534538,-1.25400997,University of Oxford,edu,94ed6dc44842368b457851b43023c23fd78d5390,citation,https://arxiv.org/pdf/1806.01794.pdf,"Sequential Attend, Infer, Repeat: Generative Modelling of Moving Objects",2018 +77,China,Duke MTMC,duke_mtmc,39.9041999,116.4073963,"Beijing, China",edu,280976bbb41d2948a5c0208f86605977397181cd,citation,https://arxiv.org/pdf/1811.08073.pdf,Factorized Distillation: Training Holistic Person Re-identification Model by Distilling an Ensemble of Partial ReID Models,2018 +78,China,Duke MTMC,duke_mtmc,40.00229045,116.32098908,Tsinghua University,edu,280976bbb41d2948a5c0208f86605977397181cd,citation,https://arxiv.org/pdf/1811.08073.pdf,Factorized Distillation: Training Holistic Person Re-identification Model by Distilling an Ensemble of Partial ReID Models,2018 +79,China,Duke MTMC,duke_mtmc,39.9922379,116.30393816,Peking University,edu,014e249422b6bd6ff32b3f7d385b5a0e8c4c9fcf,citation,https://arxiv.org/pdf/1810.05866.pdf,Attention driven person re-identification,2019 +80,Singapore,Duke MTMC,duke_mtmc,1.3484104,103.68297965,Nanyang Technological University,edu,014e249422b6bd6ff32b3f7d385b5a0e8c4c9fcf,citation,https://arxiv.org/pdf/1810.05866.pdf,Attention driven person re-identification,2019 +81,China,Duke MTMC,duke_mtmc,39.9808333,116.34101249,Beihang University,edu,e9d549989926f36abfa5dc7348ae3d79a567bf30,citation,,Orientation-Guided Similarity Learning for Person Re-identification,2018 +82,China,Duke MTMC,duke_mtmc,23.09461185,113.28788994,Sun Yat-Sen University,edu,95bdd45fed0392418e0e5d3e51d34714917e3c87,citation,https://arxiv.org/pdf/1812.03282.pdf,Spatial-Temporal Person Re-identification,2019 +83,China,Duke MTMC,duke_mtmc,31.30104395,121.50045497,Fudan University,edu,00e3957212517a252258baef833833921dd308d4,citation,,Adaptively Weighted Multi-task Deep Network for Person Attribute Classification,2017 +84,United Kingdom,Duke MTMC,duke_mtmc,51.5247272,-0.03931035,Queen Mary University of London,edu,705073015bb8ae97212532a30488c05d50894bec,citation,https://arxiv.org/pdf/1803.09786.pdf,Transferable Joint Attribute-Identity Deep Learning for Unsupervised Person Re-identification,2018 +85,United States,Duke MTMC,duke_mtmc,35.9990522,-78.9290629,Duke University,edu,9e644b1e33dd9367be167eb9d832174004840400,citation,https://users.cs.duke.edu/~tomasi/papers/ristani/ristaniTCAS16.pdf,Tracking Social Groups Within and Across Cameras,2017 +86,Italy,Duke MTMC,duke_mtmc,44.6451046,10.9279268,University of Modena,edu,9e644b1e33dd9367be167eb9d832174004840400,citation,https://users.cs.duke.edu/~tomasi/papers/ristani/ristaniTCAS16.pdf,Tracking Social Groups Within and Across Cameras,2017 +87,United States,Duke MTMC,duke_mtmc,35.9990522,-78.9290629,Duke University,edu,27a2fad58dd8727e280f97036e0d2bc55ef5424c,citation,https://arxiv.org/pdf/1609.01775.pdf,"Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking",2016 +88,Switzerland,Duke MTMC,duke_mtmc,46.5190557,6.5667576,EPFL,edu,4e4e3ddb55607e127a4abdef45d92adf1ff78de2,citation,http://openaccess.thecvf.com/content_ICCV_2017/papers/Maksai_Non-Markovian_Globally_Consistent_ICCV_2017_paper.pdf,Non-Markovian Globally Consistent Multi-object Tracking,2017 +89,Switzerland,Duke MTMC,duke_mtmc,46.109237,7.08453549,IDIAP Research Institute,edu,4e4e3ddb55607e127a4abdef45d92adf1ff78de2,citation,http://openaccess.thecvf.com/content_ICCV_2017/papers/Maksai_Non-Markovian_Globally_Consistent_ICCV_2017_paper.pdf,Non-Markovian Globally Consistent Multi-object Tracking,2017 +90,United States,Duke MTMC,duke_mtmc,40.11116745,-88.22587665,"University of Illinois, Urbana-Champaign",edu,4e4e3ddb55607e127a4abdef45d92adf1ff78de2,citation,http://openaccess.thecvf.com/content_ICCV_2017/papers/Maksai_Non-Markovian_Globally_Consistent_ICCV_2017_paper.pdf,Non-Markovian Globally Consistent Multi-object Tracking,2017 +91,United Kingdom,Duke MTMC,duke_mtmc,51.5247272,-0.03931035,Queen Mary University of London,edu,fc26fc2340a863d6da0b427cd924fb4cb101051b,citation,http://openaccess.thecvf.com/content_ICCV_2017_workshops/papers/w37/Chen_Person_Re-Identification_by_ICCV_2017_paper.pdf,Person Re-identification by Deep Learning Multi-scale Representations,2017 +92,United Kingdom,Duke MTMC,duke_mtmc,55.378051,-3.435973,"Vision Semantics Ltd, UK",edu,fc26fc2340a863d6da0b427cd924fb4cb101051b,citation,http://openaccess.thecvf.com/content_ICCV_2017_workshops/papers/w37/Chen_Person_Re-Identification_by_ICCV_2017_paper.pdf,Person Re-identification by Deep Learning Multi-scale Representations,2017 +93,Canada,Duke MTMC,duke_mtmc,43.4983503,-80.5478382,"Senstar Corporation, Waterloo, Canada",company,8e42568c2b3feaafd1e442e1e861ec50a4ac144f,citation,https://arxiv.org/pdf/1805.06086.pdf,An Evaluation of Deep CNN Baselines for Scene-Independent Person Re-identification,2018 +94,Italy,Duke MTMC,duke_mtmc,45.4377672,12.321807,University Iuav of Venice,edu,eddb1a126eafecad2cead01c6c3bb4b88120d78a,citation,https://arxiv.org/pdf/1802.02181.pdf,Applications of a Graph Theoretic Based Clustering Framework in Computer Vision and Pattern Recognition,2018 +95,China,Duke MTMC,duke_mtmc,40.00229045,116.32098908,Tsinghua University,edu,fc068f7f8a3b2921ec4f3246e9b6c6015165df9a,citation,https://arxiv.org/pdf/1711.09349.pdf,Beyond Part Models: Person Retrieval with Refined Part Pooling (and A Strong Convolutional Baseline),2018 +96,Australia,Duke MTMC,duke_mtmc,-33.8809651,151.20107299,University of Technology Sydney,edu,fc068f7f8a3b2921ec4f3246e9b6c6015165df9a,citation,https://arxiv.org/pdf/1711.09349.pdf,Beyond Part Models: Person Retrieval with Refined Part Pooling (and A Strong Convolutional Baseline),2018 +97,United States,Duke MTMC,duke_mtmc,29.58333105,-98.61944505,University of Texas at San Antonio,edu,fc068f7f8a3b2921ec4f3246e9b6c6015165df9a,citation,https://arxiv.org/pdf/1711.09349.pdf,Beyond Part Models: Person Retrieval with Refined Part Pooling (and A Strong Convolutional Baseline),2018 +98,United States,Duke MTMC,duke_mtmc,43.0008093,-78.7889697,University at Buffalo,edu,fdd1bde7066c7e9c7515f330546e0b3a8de8a4a6,citation,https://arxiv.org/pdf/1811.06582.pdf,CAN: Composite Appearance Network and a Novel Evaluation Metric for Person Tracking,2018 +99,United States,Duke MTMC,duke_mtmc,43.0008093,-78.7889697,University at Buffalo,edu,3144c9b3bedb6e3895dcd36998bcb0903271841d,citation,https://arxiv.org/pdf/1811.06582.pdf,CAN: Composite Appearance Network and a Novel Evaluation Metric for Person Tracking,2018 +100,China,Duke MTMC,duke_mtmc,29.1416432,119.7889248,"Alibaba Group, Zhejiang, People’s Republic of China",edu,f4e65ab81a0f4ffa50d0c9bc308d7365e012cc75,citation,https://arxiv.org/pdf/1812.05785.pdf,Deep Active Learning for Video-based Person Re-identification,2018 +101,China,Duke MTMC,duke_mtmc,30.19331415,120.11930822,Zhejiang University,edu,f4e65ab81a0f4ffa50d0c9bc308d7365e012cc75,citation,https://arxiv.org/pdf/1812.05785.pdf,Deep Active Learning for Video-based Person Re-identification,2018 +102,China,Duke MTMC,duke_mtmc,38.88140235,121.52281098,Dalian University of Technology,edu,5be74c6fa7f890ea530e427685dadf0d0a371fc1,citation,https://arxiv.org/pdf/1804.11027.pdf,Deep Co-attention based Comparators For Relative Representation Learning in Person Re-identification,2018 +103,Australia,Duke MTMC,duke_mtmc,-27.49741805,153.01316956,University of Queensland,edu,5be74c6fa7f890ea530e427685dadf0d0a371fc1,citation,https://arxiv.org/pdf/1804.11027.pdf,Deep Co-attention based Comparators For Relative Representation Learning in Person Re-identification,2018 +104,Australia,Duke MTMC,duke_mtmc,-33.88890695,151.18943366,University of Sydney,edu,5be74c6fa7f890ea530e427685dadf0d0a371fc1,citation,https://arxiv.org/pdf/1804.11027.pdf,Deep Co-attention based Comparators For Relative Representation Learning in Person Re-identification,2018 +105,Switzerland,Duke MTMC,duke_mtmc,46.5184121,6.5684654,École Polytechnique Fédérale de Lausanne,edu,0f3eb3719b6f6f544b766e0bfeb8f962c9bd59f4,citation,https://arxiv.org/pdf/1811.10984.pdf,Eliminating Exposure Bias and Loss-Evaluation Mismatch in Multiple Object Tracking,2018 +106,Italy,Duke MTMC,duke_mtmc,45.434532,12.326197,"DAIS, Università Ca’ Foscari, Venice, Italy",edu,6dce5866ebc46355a35b8667c1e04a4790c2289b,citation,https://pdfs.semanticscholar.org/6dce/5866ebc46355a35b8667c1e04a4790c2289b.pdf,Extensions of dominant sets and their applications in computer vision,2018 +107,United States,Duke MTMC,duke_mtmc,42.3383668,-71.08793524,Northeastern University,edu,8abe89ab85250fd7a8117da32bc339a71c67dc21,citation,https://arxiv.org/pdf/1709.07065.pdf,Multi-camera Multi-Object Tracking,2017 +108,Australia,Duke MTMC,duke_mtmc,-33.8809651,151.20107299,University of Technology Sydney,edu,b856c0eb039effce7da9ff45c3f5987f18928bef,citation,https://arxiv.org/pdf/1707.00408.pdf,Pedestrian Alignment Network for Large-scale Person Re-identification,2017 +109,Germany,Duke MTMC,duke_mtmc,49.10184375,8.4331256,Karlsruhe Institute of Technology,edu,bab66082d01b393e6b9e841e5e06782a6c61ec88,citation,https://arxiv.org/pdf/1803.08709.pdf,Pose-Driven Deep Models for Person Re-Identification,2018 +110,China,Duke MTMC,duke_mtmc,31.30104395,121.50045497,Fudan University,edu,e6d8f332ae26e9983d5b42af4466ff95b55f2341,citation,https://arxiv.org/pdf/1712.02225.pdf,Pose-Normalized Image Generation for Person Re-identification,2018 +111,Japan,Duke MTMC,duke_mtmc,34.7321121,135.7328585,Nara Institute of Science and Technology,edu,e6d8f332ae26e9983d5b42af4466ff95b55f2341,citation,https://arxiv.org/pdf/1712.02225.pdf,Pose-Normalized Image Generation for Person Re-identification,2018 +112,United Kingdom,Duke MTMC,duke_mtmc,51.5247272,-0.03931035,Queen Mary University of London,edu,e6d8f332ae26e9983d5b42af4466ff95b55f2341,citation,https://arxiv.org/pdf/1712.02225.pdf,Pose-Normalized Image Generation for Person Re-identification,2018 +113,China,Duke MTMC,duke_mtmc,22.8376,108.289839,Guangxi University,edu,4a91be40e6b382c3ddf3385ac44062b2399336a8,citation,https://arxiv.org/pdf/1809.09970.pdf,Random Occlusion-recovery for Person Re-identification,2018 +114,China,Duke MTMC,duke_mtmc,31.28473925,121.49694909,Tongji University,edu,4a91be40e6b382c3ddf3385ac44062b2399336a8,citation,https://arxiv.org/pdf/1809.09970.pdf,Random Occlusion-recovery for Person Re-identification,2018 +115,France,Duke MTMC,duke_mtmc,45.2173989,5.7921349,"Naver Labs Europe, Meylan, France",edu,4d8347a69e77cc02c1e1aba3a8b6646eac1a0b3d,citation,https://arxiv.org/pdf/1801.05339.pdf,Re-ID done right: towards good practices for person re-identification.,2018 +116,United States,Duke MTMC,duke_mtmc,28.59899755,-81.19712501,University of Central Florida,edu,a1e97c4043d5cc9896dc60ae7ca135782d89e5fc,citation,https://arxiv.org/pdf/1612.02155.pdf,"Re-identification of Humans in Crowds using Personal, Social and Environmental Constraints",2016 +117,China,Duke MTMC,duke_mtmc,39.9082804,116.2458527,University of Chinese Academy of Sciences,edu,0e36bf238d2db6c970ade0b5f68811ed6debc4e8,citation,https://arxiv.org/pdf/1810.07399.pdf,Recognizing Partial Biometric Patterns,2018 +118,United States,Duke MTMC,duke_mtmc,42.4505507,-76.4783513,Cornell University,edu,6d76eefecdcaa130a000d1d6c93cf57166ebd18e,citation,https://arxiv.org/pdf/1805.08805.pdf,Resource Aware Person Re-identification Across Multiple Resolutions,2018 +119,China,Duke MTMC,duke_mtmc,31.20081505,121.42840681,Shanghai Jiao Tong University,edu,6d76eefecdcaa130a000d1d6c93cf57166ebd18e,citation,https://arxiv.org/pdf/1805.08805.pdf,Resource Aware Person Re-identification Across Multiple Resolutions,2018 +120,China,Duke MTMC,duke_mtmc,40.00229045,116.32098908,Tsinghua University,edu,6d76eefecdcaa130a000d1d6c93cf57166ebd18e,citation,https://arxiv.org/pdf/1805.08805.pdf,Resource Aware Person Re-identification Across Multiple Resolutions,2018 +121,China,Duke MTMC,duke_mtmc,31.846918,117.29053367,Hefei University of Technology,edu,42dc432f58adfaa7bf6af07e5faf9e75fea29122,citation,https://arxiv.org/pdf/1811.08115.pdf,Sequence-based Person Attribute Recognition with Joint CTC-Attention Model,2018 +122,China,Duke MTMC,duke_mtmc,31.1675446,121.3974873,"Tencent, Shanghai, China",company,42dc432f58adfaa7bf6af07e5faf9e75fea29122,citation,https://arxiv.org/pdf/1811.08115.pdf,Sequence-based Person Attribute Recognition with Joint CTC-Attention Model,2018 +123,United States,Duke MTMC,duke_mtmc,47.6423318,-122.1369302,Microsoft,company,8a77025bde5479a1366bb93c6f2366b5a6293720,citation,https://arxiv.org/pdf/1805.02336.pdf,Sharp Attention Network via Adaptive Sampling for Person Re-identification,2018 +124,United States,Duke MTMC,duke_mtmc,40.11116745,-88.22587665,"University of Illinois, Urbana-Champaign",edu,8a77025bde5479a1366bb93c6f2366b5a6293720,citation,https://arxiv.org/pdf/1805.02336.pdf,Sharp Attention Network via Adaptive Sampling for Person Re-identification,2018 +125,China,Duke MTMC,duke_mtmc,30.19331415,120.11930822,Zhejiang University,edu,8a77025bde5479a1366bb93c6f2366b5a6293720,citation,https://arxiv.org/pdf/1805.02336.pdf,Sharp Attention Network via Adaptive Sampling for Person Re-identification,2018 +126,Australia,Duke MTMC,duke_mtmc,-35.2776999,149.118527,Australian National University,edu,304196021200067a838c06002d9e96d6a12a1e46,citation,https://arxiv.org/pdf/1811.10551.pdf,Similarity-preserving Image-image Domain Adaptation for Person Re-identification,2018 +127,Australia,Duke MTMC,duke_mtmc,-33.8809651,151.20107299,University of Technology Sydney,edu,304196021200067a838c06002d9e96d6a12a1e46,citation,https://arxiv.org/pdf/1811.10551.pdf,Similarity-preserving Image-image Domain Adaptation for Person Re-identification,2018 +128,China,Duke MTMC,duke_mtmc,28.2290209,112.99483204,"National University of Defense Technology, China",mil,e90816e1a0e14ea1e7039e0b2782260999aef786,citation,https://arxiv.org/pdf/1809.03137.pdf,Tracking by Animation: Unsupervised Learning of Multi-Object Attentive Trackers,2018 +129,United Kingdom,Duke MTMC,duke_mtmc,51.5231607,-0.1282037,University College London,edu,e90816e1a0e14ea1e7039e0b2782260999aef786,citation,https://arxiv.org/pdf/1809.03137.pdf,Tracking by Animation: Unsupervised Learning of Multi-Object Attentive Trackers,2018 +130,United States,Duke MTMC,duke_mtmc,37.2283843,-80.4234167,Virginia Tech,edu,e278218ba1ff1b85d06680e99b08e817d0962dab,citation,https://arxiv.org/pdf/1710.02139.pdf,Tracking Persons-of-Interest via Unsupervised Representation Adaptation,2017 +131,China,Duke MTMC,duke_mtmc,34.250803,108.983693,Xi’an Jiaotong University,edu,e278218ba1ff1b85d06680e99b08e817d0962dab,citation,https://arxiv.org/pdf/1710.02139.pdf,Tracking Persons-of-Interest via Unsupervised Representation Adaptation,2017 +132,China,Duke MTMC,duke_mtmc,30.508964,114.410577,"Huazhong Univ. of Science and Technology, China",edu,42656cf2b75dccc7f8f224f7a86c2ea4de1ae671,citation,https://arxiv.org/pdf/1807.11334.pdf,Unsupervised Domain Adaptive Re-Identification: Theory and Practice,2018 +133,China,Duke MTMC,duke_mtmc,23.09461185,113.28788994,Sun Yat-Sen University,edu,788ab52d4f7fedb4b79347bb81822c4f3c430d80,citation,https://arxiv.org/pdf/1901.10177.pdf,Unsupervised Person Re-identification by Deep Asymmetric Metric Embedding,2018 +134,Australia,Duke MTMC,duke_mtmc,-33.8809651,151.20107299,University of Technology Sydney,edu,31da1da2d4e7254dd8f2a4578d887c57e0678438,citation,https://arxiv.org/pdf/1705.10444.pdf,Unsupervised Person Re-identification: Clustering and Fine-tuning,2018 +135,United Kingdom,Duke MTMC,duke_mtmc,54.6141723,-5.9002151,Queen's University Belfast,edu,1e146982a7b088e7a3790d2683484944c3b9dcf7,citation,https://pdfs.semanticscholar.org/1e14/6982a7b088e7a3790d2683484944c3b9dcf7.pdf,Video Person Re-Identification for Wide Area Tracking based on Recurrent Neural Networks,2017 +136,Germany,Duke MTMC,duke_mtmc,49.01546,8.4257999,Fraunhofer,company,978716708762dab46e91059e170d43551be74732,citation,,A Pose-Sensitive Embedding for Person Re-identification with Expanded Cross Neighborhood Re-ranking,2018 +137,Germany,Duke MTMC,duke_mtmc,49.10184375,8.4331256,Karlsruhe Institute of Technology,edu,978716708762dab46e91059e170d43551be74732,citation,,A Pose-Sensitive Embedding for Person Re-identification with Expanded Cross Neighborhood Re-ranking,2018 +138,Taiwan,Duke MTMC,duke_mtmc,25.01682835,121.53846924,National Taiwan University,edu,d9216cc2a3c03659cb2392b7cc8509feb7829579,citation,,Adaptation and Re-identification Network: An Unsupervised Deep Transfer Learning Approach to Person Re-identification,2018 +139,China,Duke MTMC,duke_mtmc,39.979203,116.33287,"CRIPAC & NLPR, CASIA",edu,1bfe59be5b42d6b7257da4b35a408239c01ab79d,citation,,Adversarially Occluded Samples for Person Re-identification,2018 +140,China,Duke MTMC,duke_mtmc,40.0044795,116.370238,Chinese Academy of Sciences,edu,1bfe59be5b42d6b7257da4b35a408239c01ab79d,citation,,Adversarially Occluded Samples for Person Re-identification,2018 +141,China,Duke MTMC,duke_mtmc,22.543096,114.057865,"SenseNets Corporation, Shenzhen, China",company,14ce502bc19b225466126b256511f9c05cadcb6e,citation,,Attention-Aware Compositional Network for Person Re-identification,2018 +142,China,Duke MTMC,duke_mtmc,39.993008,116.329882,SenseTime,company,14ce502bc19b225466126b256511f9c05cadcb6e,citation,,Attention-Aware Compositional Network for Person Re-identification,2018 +143,Australia,Duke MTMC,duke_mtmc,-33.88890695,151.18943366,University of Sydney,edu,14ce502bc19b225466126b256511f9c05cadcb6e,citation,,Attention-Aware Compositional Network for Person Re-identification,2018 +144,Australia,Duke MTMC,duke_mtmc,-33.8809651,151.20107299,University of Technology Sydney,edu,1822ca8db58b0382b0c64f310840f0f875ea02c0,citation,,Camera Style Adaptation for Person Re-identification,2018 +145,China,Duke MTMC,duke_mtmc,24.4399419,118.09301781,Xiamen University,edu,1822ca8db58b0382b0c64f310840f0f875ea02c0,citation,,Camera Style Adaptation for Person Re-identification,2018 +146,China,Duke MTMC,duke_mtmc,36.16161795,120.49355276,Ocean University of China,edu,38259235a1c7b2c68ca09f3bc0930987ae99cf00,citation,,Deep Feature Ranking for Person Re-Identification,2019 +147,South Korea,Duke MTMC,duke_mtmc,35.84658875,127.1350133,Chonbuk National University,edu,c635564fe2f7d91b578bd6959904982aaa61234d,citation,,Deep Multi-Task Network for Learning Person Identity and Attributes,2018 +148,China,Duke MTMC,duke_mtmc,22.4162632,114.2109318,Chinese University of Hong Kong,edu,947954cafdefd471b75da8c3bb4c21b9e6d57838,citation,,End-to-End Deep Kronecker-Product Matching for Person Re-identification,2018 +149,China,Duke MTMC,duke_mtmc,39.993008,116.329882,SenseTime,company,947954cafdefd471b75da8c3bb4c21b9e6d57838,citation,,End-to-End Deep Kronecker-Product Matching for Person Re-identification,2018 +150,China,Duke MTMC,duke_mtmc,23.0502042,113.39880323,South China University of Technology,edu,cb68c60ac046a0ec1c7f67487f14b999037313e1,citation,,Exploit the Unknown Gradually: One-Shot Video-Based Person Re-identification by Stepwise Learning,2018 +151,Australia,Duke MTMC,duke_mtmc,-33.88890695,151.18943366,University of Sydney,edu,cb68c60ac046a0ec1c7f67487f14b999037313e1,citation,,Exploit the Unknown Gradually: One-Shot Video-Based Person Re-identification by Stepwise Learning,2018 +152,Australia,Duke MTMC,duke_mtmc,-33.8809651,151.20107299,University of Technology Sydney,edu,cb68c60ac046a0ec1c7f67487f14b999037313e1,citation,,Exploit the Unknown Gradually: One-Shot Video-Based Person Re-identification by Stepwise Learning,2018 +153,United States,Duke MTMC,duke_mtmc,35.9990522,-78.9290629,Duke University,edu,c0f01b8174a632448c20eb5472cd9d5b2c595e39,citation,,Features for Multi-target Multi-camera Tracking and Re-identification,2018 +154,China,Duke MTMC,duke_mtmc,22.4162632,114.2109318,Chinese University of Hong Kong,edu,308a13fd1d2847d98930a8e5542f773a9651a0ae,citation,,Group Consistent Similarity Learning via Deep CRF for Person Re-identification,2018 +155,Italy,Duke MTMC,duke_mtmc,46.0658836,11.1159894,University of Trento,edu,308a13fd1d2847d98930a8e5542f773a9651a0ae,citation,,Group Consistent Similarity Learning via Deep CRF for Person Re-identification,2018 +156,China,Duke MTMC,duke_mtmc,34.250803,108.983693,Xi’an Jiaotong University,edu,308a13fd1d2847d98930a8e5542f773a9651a0ae,citation,,Group Consistent Similarity Learning via Deep CRF for Person Re-identification,2018 +157,Turkey,Duke MTMC,duke_mtmc,41.10427915,29.02231159,Istanbul Technical University,edu,7ba225a614d77efd9bdf66bf74c80dd2da09229a,citation,,Human Semantic Parsing for Person Re-identification,2018 +158,United States,Duke MTMC,duke_mtmc,28.59899755,-81.19712501,University of Central Florida,edu,7ba225a614d77efd9bdf66bf74c80dd2da09229a,citation,,Human Semantic Parsing for Person Re-identification,2018 +159,Australia,Duke MTMC,duke_mtmc,-32.00686365,115.89691775,Curtin University,edu,292286c0024d6625fe606fb5b8a0df54ea3ffe91,citation,,Identity Adaptation for Person Re-Identification,2018 +160,United Kingdom,Duke MTMC,duke_mtmc,54.00975365,-2.78757491,Lancaster University,edu,292286c0024d6625fe606fb5b8a0df54ea3ffe91,citation,,Identity Adaptation for Person Re-Identification,2018 +161,Australia,Duke MTMC,duke_mtmc,-31.95040445,115.79790037,University of Western Australia,edu,292286c0024d6625fe606fb5b8a0df54ea3ffe91,citation,,Identity Adaptation for Person Re-Identification,2018 +162,China,Duke MTMC,duke_mtmc,40.0044795,116.370238,Chinese Academy of Sciences,edu,6cde93a5288e84671a7bee98cf6c94037f42da42,citation,,Image-Image Domain Adaptation with Preserved Self-Similarity and Domain-Dissimilarity for Person Re-identification,2018 +163,Singapore,Duke MTMC,duke_mtmc,1.340216,103.965089,Singapore University of Technology and Design,edu,6cde93a5288e84671a7bee98cf6c94037f42da42,citation,,Image-Image Domain Adaptation with Preserved Self-Similarity and Domain-Dissimilarity for Person Re-identification,2018 +164,Australia,Duke MTMC,duke_mtmc,-33.8809651,151.20107299,University of Technology Sydney,edu,6cde93a5288e84671a7bee98cf6c94037f42da42,citation,,Image-Image Domain Adaptation with Preserved Self-Similarity and Domain-Dissimilarity for Person Re-identification,2018 +165,China,Duke MTMC,duke_mtmc,39.0607286,117.1256421,Tianjin Normal University,edu,67289bd3b7c9406429c6012eb7292305e50dff0b,citation,,Integration Convolutional Neural Network for Person Re-Identification in Camera Networks,2018 +166,China,Duke MTMC,duke_mtmc,32.05765485,118.7550004,HoHai University,edu,fedb656c45aa332cfc373b413f3000b6228eee08,citation,,Joint Learning of Body and Part Representation for Person Re-Identification,2018 +167,China,Duke MTMC,duke_mtmc,33.5491006,119.035706,"Huaiyin Institute of Technology, Huaian, China",edu,fedb656c45aa332cfc373b413f3000b6228eee08,citation,,Joint Learning of Body and Part Representation for Person Re-Identification,2018 +168,Australia,Duke MTMC,duke_mtmc,-33.8809651,151.20107299,University of Technology Sydney,edu,fedb656c45aa332cfc373b413f3000b6228eee08,citation,,Joint Learning of Body and Part Representation for Person Re-Identification,2018 +169,China,Duke MTMC,duke_mtmc,23.09461185,113.28788994,Sun Yat-Sen University,edu,b37538f9364252eec4182bdbb80ef1e4614c3acd,citation,,Learning a Semantically Discriminative Joint Space for Attribute Based Person Re-identification,2017 +170,United Kingdom,Duke MTMC,duke_mtmc,51.5247272,-0.03931035,Queen Mary University of London,edu,004acfec16c36649408c561faa102dd9de76f085,citation,,Multi-level Factorisation Net for Person Re-identification,2018 +171,United Kingdom,Duke MTMC,duke_mtmc,55.94951105,-3.19534913,University of Edinburgh,edu,004acfec16c36649408c561faa102dd9de76f085,citation,,Multi-level Factorisation Net for Person Re-identification,2018 +172,China,Duke MTMC,duke_mtmc,39.0607286,117.1256421,Tianjin Normal University,edu,a80d8506fa28334c947989ca153b70aafc63ac7f,citation,,Pedestrian Retrieval via Part-Based Gradation Regularization in Sensor Networks,2018 +173,United States,Duke MTMC,duke_mtmc,35.9990522,-78.9290629,Duke University,edu,96e77135e745385e87fdd0f7ced951bf1fe9a756,citation,,People Tracking and Re-Identification from Multiple Cameras,2018 +174,China,Duke MTMC,duke_mtmc,30.274084,120.15507,Alibaba,company,90c18409b7a3be2cd6da599d02accba4c769e94e,citation,,Person Re-identification with Cascaded Pairwise Convolutions,2018 +175,China,Duke MTMC,duke_mtmc,31.83907195,117.26420748,University of Science and Technology of China,edu,90c18409b7a3be2cd6da599d02accba4c769e94e,citation,,Person Re-identification with Cascaded Pairwise Convolutions,2018 +176,China,Duke MTMC,duke_mtmc,30.5360485,114.3643219,"Wuhan Univeristy, Wuhan, China",edu,90c18409b7a3be2cd6da599d02accba4c769e94e,citation,,Person Re-identification with Cascaded Pairwise Convolutions,2018 +177,China,Duke MTMC,duke_mtmc,31.20081505,121.42840681,Shanghai Jiao Tong University,edu,df4ed9983f7114ca4f0ab71f1476c0bf7521e317,citation,,Pose Transferrable Person Re-identification,2018 +178,United States,Duke MTMC,duke_mtmc,40.4441619,-79.94272826,Carnegie Mellon University,edu,e307c6635472d3d1e512af6e20f2e56c95937bb7,citation,,Semi-Supervised Bayesian Attribute Learning for Person Re-Identification,2018 +179,Australia,Duke MTMC,duke_mtmc,-33.8809651,151.20107299,University of Technology Sydney,edu,e307c6635472d3d1e512af6e20f2e56c95937bb7,citation,,Semi-Supervised Bayesian Attribute Learning for Person Re-Identification,2018 diff --git a/site/datasets/verified/erce.csv b/site/datasets/verified/erce.csv new file mode 100644 index 00000000..c7594437 --- /dev/null +++ b/site/datasets/verified/erce.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,ERCe,erce,0.0,0.0,,,,main,,Video Synopsis by Heterogeneous Multi-source Correlation,2013 diff --git a/site/datasets/verified/expw.csv b/site/datasets/verified/expw.csv new file mode 100644 index 00000000..bdff0ca8 --- /dev/null +++ b/site/datasets/verified/expw.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,ExpW,expw,0.0,0.0,,,,main,,From Facial Expression Recognition to Interpersonal Relation Prediction,2017 diff --git a/site/datasets/verified/face_scrub.csv b/site/datasets/verified/face_scrub.csv new file mode 100644 index 00000000..9270f6c9 --- /dev/null +++ b/site/datasets/verified/face_scrub.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,FaceScrub,face_scrub,0.0,0.0,,,,main,,A data-driven approach to cleaning large face datasets,2014 diff --git a/site/datasets/verified/face_tracer.csv b/site/datasets/verified/face_tracer.csv new file mode 100644 index 00000000..7d9a786c --- /dev/null +++ b/site/datasets/verified/face_tracer.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,FaceTracer,face_tracer,0.0,0.0,,,,main,,FaceTracer: A Search Engine for Large Collections of Images with Faces,2008 diff --git a/site/datasets/verified/facebook_100.csv b/site/datasets/verified/facebook_100.csv new file mode 100644 index 00000000..7ac8bed2 --- /dev/null +++ b/site/datasets/verified/facebook_100.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,Facebook100,facebook_100,0.0,0.0,,,,main,,Scaling up biologically-inspired computer vision: A case study in unconstrained face recognition on facebook,2011 diff --git a/site/datasets/verified/families_in_the_wild.csv b/site/datasets/verified/families_in_the_wild.csv new file mode 100644 index 00000000..f7759a01 --- /dev/null +++ b/site/datasets/verified/families_in_the_wild.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,FIW,families_in_the_wild,0.0,0.0,,,,main,,Visual Kinship Recognition of Families in the Wild,2018 diff --git a/site/datasets/verified/fddb.csv b/site/datasets/verified/fddb.csv new file mode 100644 index 00000000..fad365aa --- /dev/null +++ b/site/datasets/verified/fddb.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,FDDB,fddb,0.0,0.0,,,,main,,FDDB: A benchmark for face detection in unconstrained settings,2010 diff --git a/site/datasets/verified/feret.csv b/site/datasets/verified/feret.csv new file mode 100644 index 00000000..9259d34e --- /dev/null +++ b/site/datasets/verified/feret.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,FERET,feret,0.0,0.0,,,,main,,The FERET Verification Testing Protocol for Face Recognition Algorithms,1998 diff --git a/site/datasets/verified/fiw_300.csv b/site/datasets/verified/fiw_300.csv new file mode 100644 index 00000000..afcd74c1 --- /dev/null +++ b/site/datasets/verified/fiw_300.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,300-W,fiw_300,0.0,0.0,,,,main,,A Semi-automatic Methodology for Facial Landmark Annotation,2013 diff --git a/site/datasets/verified/frgc.csv b/site/datasets/verified/frgc.csv new file mode 100644 index 00000000..1941ce0e --- /dev/null +++ b/site/datasets/verified/frgc.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,FRGC,frgc,0.0,0.0,,,,main,,Overview of the face recognition grand challenge,2005 diff --git a/site/datasets/verified/gallagher.csv b/site/datasets/verified/gallagher.csv new file mode 100644 index 00000000..60f38cab --- /dev/null +++ b/site/datasets/verified/gallagher.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,Gallagher,gallagher,0.0,0.0,,,,main,,Clothing cosegmentation for recognizing people,2008 diff --git a/site/datasets/verified/geofaces.csv b/site/datasets/verified/geofaces.csv new file mode 100644 index 00000000..9331c186 --- /dev/null +++ b/site/datasets/verified/geofaces.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,GeoFaces,geofaces,0.0,0.0,,,,main,,Exploring the geo-dependence of human face appearance,2014 diff --git a/site/datasets/verified/gfw.csv b/site/datasets/verified/gfw.csv new file mode 100644 index 00000000..5acd8bf1 --- /dev/null +++ b/site/datasets/verified/gfw.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,Grouping Face in the Wild,gfw,0.0,0.0,,,,main,,Merge or Not? Learning to Group Faces via Imitation Learning,2018 diff --git a/site/datasets/verified/helen.csv b/site/datasets/verified/helen.csv new file mode 100644 index 00000000..a9f9a846 --- /dev/null +++ b/site/datasets/verified/helen.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,Helen,helen,0.0,0.0,,,,main,,Interactive Facial Feature Localization,2012 diff --git a/site/datasets/verified/hipsterwars.csv b/site/datasets/verified/hipsterwars.csv new file mode 100644 index 00000000..7d6bd213 --- /dev/null +++ b/site/datasets/verified/hipsterwars.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,Hipsterwars,hipsterwars,0.0,0.0,,,,main,,Hipster Wars: Discovering Elements of Fashion Styles,2014 diff --git a/site/datasets/verified/hrt_transgender.csv b/site/datasets/verified/hrt_transgender.csv new file mode 100644 index 00000000..76cb4c41 --- /dev/null +++ b/site/datasets/verified/hrt_transgender.csv @@ -0,0 +1,6 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,HRT Transgender,hrt_transgender,0.0,0.0,,,,main,,Is the eye region more reliable than the face? A preliminary study of face-based recognition on a transgender dataset,2013 +1,United States,HRT Transgender,hrt_transgender,34.2249827,-77.86907744,University of North Carolina at Wilmington,edu,2f43b614607163abf41dfe5d17ef6749a1b61304,citation,,Investigating the Periocular-Based Face Recognition Across Gender Transformation,2014 +2,India,HRT Transgender,hrt_transgender,17.4454957,78.34854698,International Institute of Information Technology,edu,e6d46d923f201da644ae8d8bd04721dd9ac0e73d,citation,,Robust transgender face recognition: Approach based on appearance and therapy factors,2016 +3,Norway,HRT Transgender,hrt_transgender,60.7897318,10.6821927,"Norwegian Biometrics Laboratory, NTNU, Norway",edu,e6d46d923f201da644ae8d8bd04721dd9ac0e73d,citation,,Robust transgender face recognition: Approach based on appearance and therapy factors,2016 +4,Sweden,HRT Transgender,hrt_transgender,56.66340325,12.87929727,Halmstad University,edu,555f75077a02f33a05841f9b63a1388ec5fbcba5,citation,https://arxiv.org/pdf/1810.03360.pdf,A Survey on Periocular Biometrics Research,2016 diff --git a/site/datasets/verified/ibm_dif.csv b/site/datasets/verified/ibm_dif.csv new file mode 100644 index 00000000..4a78dc92 --- /dev/null +++ b/site/datasets/verified/ibm_dif.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,IBM Diversity in Faces,ibm_dif,0.0,0.0,,,,main,,Facial Coding Scheme Reference 1 Craniofacial Distances,2019 diff --git a/site/datasets/verified/ifad.csv b/site/datasets/verified/ifad.csv new file mode 100644 index 00000000..ca30c779 --- /dev/null +++ b/site/datasets/verified/ifad.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,IFAD,ifad,0.0,0.0,,,,main,,Indian Face Age Database: A Database for Face Recognition with Age Variation,2015 diff --git a/site/datasets/verified/ifdb.csv b/site/datasets/verified/ifdb.csv new file mode 100644 index 00000000..5d7eb156 --- /dev/null +++ b/site/datasets/verified/ifdb.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,IFDB,ifdb,0.0,0.0,,,,main,,Iranian Face Database and Evaluation with a New Detection Algorithm,2007 diff --git a/site/datasets/verified/ijb_a.csv b/site/datasets/verified/ijb_a.csv new file mode 100644 index 00000000..f3abe597 --- /dev/null +++ b/site/datasets/verified/ijb_a.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,IJB-A,ijb_a,0.0,0.0,,,,main,,Pushing the frontiers of unconstrained face detection and recognition: IARPA Janus Benchmark A,2015 diff --git a/site/datasets/verified/ijb_b.csv b/site/datasets/verified/ijb_b.csv new file mode 100644 index 00000000..6a78ed81 --- /dev/null +++ b/site/datasets/verified/ijb_b.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,IJB-B,ijb_b,0.0,0.0,,,,main,,IARPA Janus Benchmark-B Face Dataset,2017 diff --git a/site/datasets/verified/ijb_c.csv b/site/datasets/verified/ijb_c.csv new file mode 100644 index 00000000..4b8c251d --- /dev/null +++ b/site/datasets/verified/ijb_c.csv @@ -0,0 +1,6 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,IJB-C,ijb_c,0.0,0.0,,,,main,,IARPA Janus Benchmark - C: Face Dataset and Protocol,2018 +1,United Kingdom,IJB-C,ijb_c,51.7520849,-1.2516646,Oxford University,edu,9286eab328444401a848cd2e13186840be8f0409,citation,https://arxiv.org/pdf/1807.09192.pdf,Multicolumn Networks for Face Recognition,2018 +2,United Kingdom,IJB-C,ijb_c,51.7520849,-1.2516646,Oxford University,edu,ac5ab8f71edde6d1a2129da12d051ed03a8446a1,citation,https://arxiv.org/pdf/1807.11440.pdf,Comparator Networks,2018 +3,United States,IJB-C,ijb_c,29.7207902,-95.34406271,University of Houston,edu,3b3941524d97e7f778367a1250ba1efb9205d5fc,citation,https://arxiv.org/pdf/1901.09447.pdf,Open Source Face Recognition Performance Evaluation Package,2019 +4,United States,IJB-C,ijb_c,42.718568,-84.47791571,Michigan State University,edu,fa03cac5aa5192822a85273852090ca20a6c47aa,citation,https://arxiv.org/pdf/1805.00611.pdf,Towards Interpretable Face Recognition,2018 diff --git a/site/datasets/verified/ilids_mcts_vid.csv b/site/datasets/verified/ilids_mcts_vid.csv new file mode 100644 index 00000000..a8c49b3e --- /dev/null +++ b/site/datasets/verified/ilids_mcts_vid.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,iLIDS-VID,ilids_mcts_vid,0.0,0.0,,,,main,,Person Re-identification by Video Ranking,2014 diff --git a/site/datasets/verified/images_of_groups.csv b/site/datasets/verified/images_of_groups.csv new file mode 100644 index 00000000..cb1ca5b7 --- /dev/null +++ b/site/datasets/verified/images_of_groups.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,Images of Groups,images_of_groups,0.0,0.0,,,,main,,Understanding images of groups of people,2009 diff --git a/site/datasets/verified/imdb_face.csv b/site/datasets/verified/imdb_face.csv new file mode 100644 index 00000000..57609d4b --- /dev/null +++ b/site/datasets/verified/imdb_face.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,IMDb Face,imdb_face,0.0,0.0,,,,main,,The Devil of Face Recognition is in the Noise,2018 diff --git a/site/datasets/verified/imdb_wiki.csv b/site/datasets/verified/imdb_wiki.csv new file mode 100644 index 00000000..913f9f8d --- /dev/null +++ b/site/datasets/verified/imdb_wiki.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,IMDB,imdb_wiki,0.0,0.0,,,,main,,Deep Expectation of Real and Apparent Age from a Single Image Without Facial Landmarks,2016 diff --git a/site/datasets/verified/imfdb.csv b/site/datasets/verified/imfdb.csv new file mode 100644 index 00000000..d82b1665 --- /dev/null +++ b/site/datasets/verified/imfdb.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,IMFDB,imfdb,0.0,0.0,,,,main,,Indian Movie Face Database: A benchmark for face recognition under wide variations,2013 diff --git a/site/datasets/verified/kin_face.csv b/site/datasets/verified/kin_face.csv new file mode 100644 index 00000000..5f1a02c6 --- /dev/null +++ b/site/datasets/verified/kin_face.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,UB KinFace,kin_face,0.0,0.0,,,,main,,Understanding Kin Relationships in a Photo,2012 diff --git a/site/datasets/verified/lag.csv b/site/datasets/verified/lag.csv new file mode 100644 index 00000000..7021aad2 --- /dev/null +++ b/site/datasets/verified/lag.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,LAG,lag,0.0,0.0,,,,main,,Large age-gap face verification by feature injection in deep networks,2017 diff --git a/site/datasets/verified/laofiw.csv b/site/datasets/verified/laofiw.csv new file mode 100644 index 00000000..9bcabfed --- /dev/null +++ b/site/datasets/verified/laofiw.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,LAOFIW,laofiw,0.0,0.0,,,,main,,Turning a Blind Eye: Explicit Removal of Biases and Variation from Deep Neural Network Embeddings,2018 diff --git a/site/datasets/verified/lfpw.csv b/site/datasets/verified/lfpw.csv new file mode 100644 index 00000000..a2b6a265 --- /dev/null +++ b/site/datasets/verified/lfpw.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,LFWP,lfpw,0.0,0.0,,,,main,,Localizing Parts of Faces Using a Consensus of Exemplars,2011 diff --git a/site/datasets/verified/lfw.csv b/site/datasets/verified/lfw.csv new file mode 100644 index 00000000..e22ec3c1 --- /dev/null +++ b/site/datasets/verified/lfw.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,LFW,lfw,0.0,0.0,,,,main,,Labeled Faces in the Wild : Updates and New Reporting Procedures,2014 diff --git a/site/datasets/verified/market_1501.csv b/site/datasets/verified/market_1501.csv new file mode 100644 index 00000000..8561b33f --- /dev/null +++ b/site/datasets/verified/market_1501.csv @@ -0,0 +1,177 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,Market 1501,market_1501,0.0,0.0,,,,main,,Scalable Person Re-identification: A Benchmark,2015 +1,China,Market 1501,market_1501,31.83907195,117.26420748,University of Science and Technology of China,edu,5b309f6d98c503efb679eda51bd898543fb746f9,citation,https://arxiv.org/pdf/1809.05864.pdf,In Defense of the Classification Loss for Person Re-Identification,2018 +2,United States,Market 1501,market_1501,42.3614256,-71.0812092,Microsoft Research Asia,company,5b309f6d98c503efb679eda51bd898543fb746f9,citation,https://arxiv.org/pdf/1809.05864.pdf,In Defense of the Classification Loss for Person Re-Identification,2018 +3,United States,Market 1501,market_1501,39.2899685,-76.62196103,University of Maryland,edu,fe3f8826f615cc5ada33b01777b9f9dc93e0023c,citation,https://arxiv.org/pdf/1901.07702.pdf,Exploring Uncertainty in Conditional Multi-Modal Retrieval Systems,2019 +4,China,Market 1501,market_1501,24.4399419,118.09301781,Xiamen University,edu,d95ce873ed42b7c7facaa4c1e9c72b57b4e279f6,citation,https://pdfs.semanticscholar.org/d95c/e873ed42b7c7facaa4c1e9c72b57b4e279f6.pdf,Generalizing a Person Retrieval Model Hetero- and Homogeneously,2018 +5,Australia,Market 1501,market_1501,-33.8809651,151.20107299,University of Technology Sydney,edu,d95ce873ed42b7c7facaa4c1e9c72b57b4e279f6,citation,https://pdfs.semanticscholar.org/d95c/e873ed42b7c7facaa4c1e9c72b57b4e279f6.pdf,Generalizing a Person Retrieval Model Hetero- and Homogeneously,2018 +6,Australia,Market 1501,market_1501,-35.2776999,149.118527,Australian National University,edu,d95ce873ed42b7c7facaa4c1e9c72b57b4e279f6,citation,https://pdfs.semanticscholar.org/d95c/e873ed42b7c7facaa4c1e9c72b57b4e279f6.pdf,Generalizing a Person Retrieval Model Hetero- and Homogeneously,2018 +7,China,Market 1501,market_1501,31.20081505,121.42840681,Shanghai Jiao Tong University,edu,927ec8dde9eb0e3bc5bf0b1a0ae57f9cf745fd9c,citation,https://arxiv.org/pdf/1804.01438.pdf,Learning Discriminative Features with Multiple Granularities for Person Re-Identification,2018 +8,China,Market 1501,market_1501,31.83907195,117.26420748,University of Science and Technology of China,edu,04ca65f1454f1014ef5af5bfafb7aee576ee1be6,citation,https://arxiv.org/pdf/1812.08967.pdf,Densely Semantically Aligned Person Re-Identification,2018 +9,United States,Market 1501,market_1501,42.3614256,-71.0812092,Microsoft Research Asia,company,04ca65f1454f1014ef5af5bfafb7aee576ee1be6,citation,https://arxiv.org/pdf/1812.08967.pdf,Densely Semantically Aligned Person Re-Identification,2018 +10,China,Market 1501,market_1501,39.9601488,116.35193921,Beijing University of Posts and Telecommunications,edu,7daa2c0f76fd3bfc7feadf313d6ac7504d4ecd20,citation,https://arxiv.org/pdf/1803.09937.pdf,Dual Attention Matching Network for Context-Aware Feature Sequence Based Person Re-identification,2018 +11,Singapore,Market 1501,market_1501,1.3484104,103.68297965,Nanyang Technological University,edu,7daa2c0f76fd3bfc7feadf313d6ac7504d4ecd20,citation,https://arxiv.org/pdf/1803.09937.pdf,Dual Attention Matching Network for Context-Aware Feature Sequence Based Person Re-identification,2018 +12,China,Market 1501,market_1501,32.0565957,118.77408833,Nanjing University,edu,08b28a8f2699501d46d87956cbaa37255000daa3,citation,https://arxiv.org/pdf/1804.03864.pdf,MaskReID: A Mask Based Deep Ranking Neural Network for Person Re-identification,2018 +13,Australia,Market 1501,market_1501,-34.40505545,150.87834655,University of Wollongong,edu,08b28a8f2699501d46d87956cbaa37255000daa3,citation,https://arxiv.org/pdf/1804.03864.pdf,MaskReID: A Mask Based Deep Ranking Neural Network for Person Re-identification,2018 +14,United Kingdom,Market 1501,market_1501,51.5247272,-0.03931035,Queen Mary University of London,edu,baf5ab5e8972e9366951b7e66951e05e2a4b3e36,citation,https://arxiv.org/pdf/1802.08122.pdf,Harmonious Attention Network for Person Re-identification,2018 +15,United Kingdom,Market 1501,market_1501,52.3793131,-1.5604252,University of Warwick,edu,124d60fae338b1f87455d1fc4ede5fcfd806da1a,citation,https://arxiv.org/pdf/1807.01440.pdf,Multi-task Mid-level Feature Alignment Network for Unsupervised Cross-Dataset Person Re-Identification,2018 +16,Singapore,Market 1501,market_1501,1.3484104,103.68297965,Nanyang Technological University,edu,124d60fae338b1f87455d1fc4ede5fcfd806da1a,citation,https://arxiv.org/pdf/1807.01440.pdf,Multi-task Mid-level Feature Alignment Network for Unsupervised Cross-Dataset Person Re-Identification,2018 +17,Australia,Market 1501,market_1501,-35.0636071,147.3552234,Charles Sturt University,edu,124d60fae338b1f87455d1fc4ede5fcfd806da1a,citation,https://arxiv.org/pdf/1807.01440.pdf,Multi-task Mid-level Feature Alignment Network for Unsupervised Cross-Dataset Person Re-Identification,2018 +18,United States,Market 1501,market_1501,33.776033,-84.39884086,Georgia Institute of Technology,edu,45a44e61236f7c144d9ec11561e236b2960c7cf6,citation,https://pdfs.semanticscholar.org/4eb8/4fd65703fc92863f9f589e3a07e6c841f7c4.pdf,Multi-object Tracking with Neural Gating Using Bilinear LSTM,2018 +19,United States,Market 1501,market_1501,45.5198289,-122.67797964,Oregon State University,edu,45a44e61236f7c144d9ec11561e236b2960c7cf6,citation,https://pdfs.semanticscholar.org/4eb8/4fd65703fc92863f9f589e3a07e6c841f7c4.pdf,Multi-object Tracking with Neural Gating Using Bilinear LSTM,2018 +20,China,Market 1501,market_1501,34.1235825,108.83546,Xidian University,edu,55355b0317f6e0c5218887441de71f05da4b42f6,citation,https://arxiv.org/pdf/1811.12150.pdf,Parameter-Free Spatial Attention Network for Person Re-Identification,2018 +21,Germany,Market 1501,market_1501,49.2579566,7.04577417,Max Planck Institute for Informatics,edu,55355b0317f6e0c5218887441de71f05da4b42f6,citation,https://arxiv.org/pdf/1811.12150.pdf,Parameter-Free Spatial Attention Network for Person Re-Identification,2018 +22,China,Market 1501,market_1501,31.2284923,121.40211389,East China Normal University,edu,e1af55ad7bb26e5e1acde3ec6c5c43cffe884b04,citation,https://pdfs.semanticscholar.org/e1af/55ad7bb26e5e1acde3ec6c5c43cffe884b04.pdf,Person Re-identification by Mid-level Attribute and Part-based Identity Learning,2018 +23,Australia,Market 1501,market_1501,-35.2776999,149.118527,Australian National University,edu,c66350cbdee8c6873cc99807d342e932594aa0b9,citation,https://arxiv.org/pdf/1812.02162.pdf,Dissecting Person Re-identification from the Viewpoint of Viewpoint,2018 +24,Brazil,Market 1501,market_1501,-27.5953995,-48.6154218,University of Campinas,edu,b986a535e45751cef684a30631a74476e911a749,citation,https://arxiv.org/pdf/1807.05618.pdf,Improved Person Re-Identification Based on Saliency and Semantic Parsing with Deep Neural Network Models,2018 +25,South Korea,Market 1501,market_1501,37.26728,126.9841151,Seoul National University,edu,315df9b7dd354ae78ddf1049fb428b086eee632c,citation,https://arxiv.org/pdf/1804.07094.pdf,Part-Aligned Bilinear Representations for Person Re-identification,2018 +26,Germany,Market 1501,market_1501,48.7468939,9.0805141,Max Planck Institute for Intelligent Systems,edu,315df9b7dd354ae78ddf1049fb428b086eee632c,citation,https://arxiv.org/pdf/1804.07094.pdf,Part-Aligned Bilinear Representations for Person Re-identification,2018 +27,United States,Market 1501,market_1501,47.6423318,-122.1369302,Microsoft,company,315df9b7dd354ae78ddf1049fb428b086eee632c,citation,https://arxiv.org/pdf/1804.07094.pdf,Part-Aligned Bilinear Representations for Person Re-identification,2018 +28,Australia,Market 1501,market_1501,-33.8809651,151.20107299,University of Technology Sydney,edu,7f23a4bb0c777dd72cca7665a5f370ac7980217e,citation,https://arxiv.org/pdf/1703.07220.pdf,Improving Person Re-identification by Attribute and Identity Learning,2017 +29,United States,Market 1501,market_1501,40.1019523,-88.2271615,UIUC,edu,cc78e3f1e531342f639e4a1fc8107a7a778ae1cf,citation,https://arxiv.org/pdf/1811.10144.pdf,One Shot Domain Adaptation for Person Re-Identification,2018 +30,China,Market 1501,market_1501,22.053565,113.39913285,Jilin University,edu,4abf902cefca527f707e4f76dd4e14fcd5d47361,citation,https://arxiv.org/pdf/1811.11510.pdf,Identity Preserving Generative Adversarial Network for Cross-Domain Person Re-identification,2018 +31,China,Market 1501,market_1501,32.0565957,118.77408833,Nanjing University,edu,088e7b24bd1cf6e5922ae6c80d37439e05fadce9,citation,https://arxiv.org/pdf/1711.07155.pdf,Let Features Decide for Themselves: Feature Mask Network for Person Re-identification,2017 +32,China,Market 1501,market_1501,22.4162632,114.2109318,Chinese University of Hong Kong,edu,4f8e06ac894e9cc1eb1617a293e43448930c7d4f,citation,https://arxiv.org/pdf/1810.02936.pdf,FD-GAN: Pose-guided Feature Distilling GAN for Robust Person Re-identification,2018 +33,China,Market 1501,market_1501,39.993008,116.329882,SenseTime,company,4f8e06ac894e9cc1eb1617a293e43448930c7d4f,citation,https://arxiv.org/pdf/1810.02936.pdf,FD-GAN: Pose-guided Feature Distilling GAN for Robust Person Re-identification,2018 +34,United States,Market 1501,market_1501,39.3299013,-76.6205177,Johns Hopkins University,edu,4f8e06ac894e9cc1eb1617a293e43448930c7d4f,citation,https://arxiv.org/pdf/1810.02936.pdf,FD-GAN: Pose-guided Feature Distilling GAN for Robust Person Re-identification,2018 +35,China,Market 1501,market_1501,31.83907195,117.26420748,University of Science and Technology of China,edu,4f8e06ac894e9cc1eb1617a293e43448930c7d4f,citation,https://arxiv.org/pdf/1810.02936.pdf,FD-GAN: Pose-guided Feature Distilling GAN for Robust Person Re-identification,2018 +36,China,Market 1501,market_1501,30.19331415,120.11930822,Zhejiang University,edu,84984c7201a7e5bc8ef4c01f0a7cfbe08c2c523b,citation,https://arxiv.org/pdf/1804.06964.pdf,GNAS: A Greedy Neural Architecture Search Method for Multi-Attribute Learning,2018 +37,China,Market 1501,market_1501,30.5097537,114.4062881,Huazhong University of Science and Technology,edu,c753521ba6fb06c12369d6fff814bb704c682ef5,citation,https://pdfs.semanticscholar.org/c753/521ba6fb06c12369d6fff814bb704c682ef5.pdf,Mancs: A Multi-task Attentional Network with Curriculum Sampling for Person Re-Identification,2018 +38,China,Market 1501,market_1501,22.4162632,114.2109318,Chinese University of Hong Kong,edu,0a808a17f5c86413bd552a324ee6ba180a12f46d,citation,https://arxiv.org/pdf/1808.01571.pdf,Improving Deep Visual Representation for Person Re-identification by Global and Local Image-language Association,2018 +39,China,Market 1501,market_1501,39.993008,116.329882,SenseTime,company,0a808a17f5c86413bd552a324ee6ba180a12f46d,citation,https://arxiv.org/pdf/1808.01571.pdf,Improving Deep Visual Representation for Person Re-identification by Global and Local Image-language Association,2018 +40,China,Market 1501,market_1501,34.250803,108.983693,Xi’an Jiaotong University,edu,0a808a17f5c86413bd552a324ee6ba180a12f46d,citation,https://arxiv.org/pdf/1808.01571.pdf,Improving Deep Visual Representation for Person Re-identification by Global and Local Image-language Association,2018 +41,Germany,Market 1501,market_1501,48.7468939,9.0805141,"Max Planck Instutite for Intelligent Systems, Tüebingen",edu,9db841848aa96f60e765299de4cce7abe5ccb47d,citation,http://openaccess.thecvf.com/content_cvpr_2017/papers/Tang_Multiple_People_Tracking_CVPR_2017_paper.pdf,Multiple People Tracking by Lifted Multicut and Person Re-identification,2017 +42,Germany,Market 1501,market_1501,49.2578657,7.0457956,"Max-Planck-Institut für Informatik, Saarbrücken, Germany",edu,9db841848aa96f60e765299de4cce7abe5ccb47d,citation,http://openaccess.thecvf.com/content_cvpr_2017/papers/Tang_Multiple_People_Tracking_CVPR_2017_paper.pdf,Multiple People Tracking by Lifted Multicut and Person Re-identification,2017 +43,France,Market 1501,market_1501,48.8457981,2.3567236,Pierre and Marie Curie University,edu,231a12de5dedddf1184ae9caafbc4a954ce584c3,citation,https://pdfs.semanticscholar.org/231a/12de5dedddf1184ae9caafbc4a954ce584c3.pdf,Closed and Open World Multi-shot Person Re-identification. (Ré-identification de personnes à partir de multiples images dans le cadre de bases d'identités fermées et ouvertes),2017 +44,China,Market 1501,market_1501,30.5097537,114.4062881,Huazhong University of Science and Technology,edu,07dead6b98379faac1cf0b2cb34a5db842ab9de9,citation,https://arxiv.org/pdf/1711.10658.pdf,Deep-Person: Learning Discriminative Deep Features for Person Re-Identification,2017 +45,Canada,Market 1501,market_1501,46.7817463,-71.2747424,Université Laval,edu,a743127b44397b7a017a65a7ad52d0d7ccb4db93,citation,https://arxiv.org/pdf/1804.10094.pdf,Domain Adaptation Through Synthesis for Unsupervised Person Re-identification,2018 +46,Australia,Market 1501,market_1501,-35.2776999,149.118527,Australian National University,edu,12d62f1360587fdecee728e6c509acc378f38dc9,citation,https://arxiv.org/pdf/1805.06118.pdf,Feature Affinity based Pseudo Labeling for Semi-supervised Person Re-identification,2018 +47,China,Market 1501,market_1501,32.20541,118.726956,Nanjing University of Information Science & Technology,edu,12d62f1360587fdecee728e6c509acc378f38dc9,citation,https://arxiv.org/pdf/1805.06118.pdf,Feature Affinity based Pseudo Labeling for Semi-supervised Person Re-identification,2018 +48,Australia,Market 1501,market_1501,-33.8809651,151.20107299,University of Technology Sydney,edu,12d62f1360587fdecee728e6c509acc378f38dc9,citation,https://arxiv.org/pdf/1805.06118.pdf,Feature Affinity based Pseudo Labeling for Semi-supervised Person Re-identification,2018 +49,China,Market 1501,market_1501,40.0044795,116.370238,Chinese Academy of Sciences,edu,14b3a7aa61c15fd9cab0a4d8bc2a205a89fb572e,citation,https://arxiv.org/pdf/1807.11206.pdf,Hard-Aware Point-to-Set Deep Metric for Person Re-identification,2018 +50,China,Market 1501,market_1501,30.5097537,114.4062881,Huazhong University of Science and Technology,edu,14b3a7aa61c15fd9cab0a4d8bc2a205a89fb572e,citation,https://arxiv.org/pdf/1807.11206.pdf,Hard-Aware Point-to-Set Deep Metric for Person Re-identification,2018 +51,China,Market 1501,market_1501,22.304572,114.17976285,Hong Kong Polytechnic University,edu,fea0895326b663bf72be89151a751362db8ae881,citation,https://arxiv.org/pdf/1804.08866.pdf,Homocentric Hypersphere Feature Embedding for Person Re-identification,2018 +52,China,Market 1501,market_1501,22.4162632,114.2109318,Chinese University of Hong Kong,edu,0c769c19d894e0dbd6eb314781dc1db3c626df57,citation,https://arxiv.org/pdf/1604.01850.pdf,Joint Detection and Identification Feature Learning for Person Search,2017 +53,China,Market 1501,market_1501,39.993008,116.329882,SenseTime,company,0c769c19d894e0dbd6eb314781dc1db3c626df57,citation,https://arxiv.org/pdf/1604.01850.pdf,Joint Detection and Identification Feature Learning for Person Search,2017 +54,China,Market 1501,market_1501,23.09461185,113.28788994,Sun Yat-Sen University,edu,0c769c19d894e0dbd6eb314781dc1db3c626df57,citation,https://arxiv.org/pdf/1604.01850.pdf,Joint Detection and Identification Feature Learning for Person Search,2017 +55,China,Market 1501,market_1501,30.209484,120.220912,"Hikvision Digital Technology Co., Ltd.",company,ed3991046e6dfba0c5cebdbbe914cc3aa06d0235,citation,https://arxiv.org/pdf/1812.06576.pdf,Learning Incremental Triplet Margin for Person Re-identification,2019 +56,China,Market 1501,market_1501,24.4399419,118.09301781,Xiamen University,edu,e746447afc4898713a0bcf2bb560286eb4d20019,citation,https://arxiv.org/pdf/1811.02074.pdf,Leveraging Virtual and Real Person for Unsupervised Person Re-identification,2018 +57,United States,Market 1501,market_1501,40.4441619,-79.94272826,Carnegie Mellon University,edu,76fb9e2963928bf8e940944d45c13d52db947702,citation,https://arxiv.org/pdf/1710.00478.pdf,Margin Sample Mining Loss: A Deep Learning Based Method for Person Re-identification,2017 +58,China,Market 1501,market_1501,30.19331415,120.11930822,Zhejiang University,edu,76fb9e2963928bf8e940944d45c13d52db947702,citation,https://arxiv.org/pdf/1710.00478.pdf,Margin Sample Mining Loss: A Deep Learning Based Method for Person Re-identification,2017 +59,Italy,Market 1501,market_1501,45.434532,12.326197,"DAIS, Università Ca’ Foscari, Venice, Italy",edu,bee609ea6e71aba9b449731242efdb136d556222,citation,https://arxiv.org/pdf/1706.06196.pdf,Multi-Target Tracking in Multiple Non-Overlapping Cameras using Constrained Dominant Sets,2017 +60,Italy,Market 1501,market_1501,45.4377672,12.321807,University Iuav of Venice,edu,bee609ea6e71aba9b449731242efdb136d556222,citation,https://arxiv.org/pdf/1706.06196.pdf,Multi-Target Tracking in Multiple Non-Overlapping Cameras using Constrained Dominant Sets,2017 +61,India,Market 1501,market_1501,13.0222347,77.56718325,Indian Institute of Science Bangalore,edu,317f5a56519df95884cce81cfba180ee3adaf5a5,citation,https://arxiv.org/pdf/1807.07295.pdf,Operator-In-The-Loop Deep Sequential Multi-camera Feature Fusion for Person Re-identification,2018 +62,Spain,Market 1501,market_1501,41.5007811,2.11143663,Universitat Autònoma de Barcelona,edu,388b03244e7cdf28c750d7f6d4b4eb64219c3e7a,citation,https://arxiv.org/pdf/1812.02937.pdf,Optimizing Speed/Accuracy Trade-Off for Person Re-identification via Knowledge Distillation,2018 +63,China,Market 1501,market_1501,39.10041,121.821932,Dalian University,edu,ae5983048e59a339c77fee89e9279a4a787ba985,citation,https://arxiv.org/pdf/1705.02145.pdf,Part-Based Deep Hashing for Large-Scale Person Re-Identification,2017 +64,Australia,Market 1501,market_1501,-33.8809651,151.20107299,University of Technology Sydney,edu,ae5983048e59a339c77fee89e9279a4a787ba985,citation,https://arxiv.org/pdf/1705.02145.pdf,Part-Based Deep Hashing for Large-Scale Person Re-Identification,2017 +65,United States,Market 1501,market_1501,29.58333105,-98.61944505,University of Texas at San Antonio,edu,ae5983048e59a339c77fee89e9279a4a787ba985,citation,https://arxiv.org/pdf/1705.02145.pdf,Part-Based Deep Hashing for Large-Scale Person Re-Identification,2017 +66,Germany,Market 1501,market_1501,49.10184375,8.4331256,Karlsruhe Institute of Technology,edu,9812542cae5a470ea601e7c3a871331694105093,citation,http://openaccess.thecvf.com/content_cvpr_2017_workshops/w17/papers/Schumann_Person_Re-Identification_by_CVPR_2017_paper.pdf,Person Re-identification by Deep Learning Attribute-Complementary Information,2017 +67,China,Market 1501,market_1501,34.250803,108.983693,Xi’an Jiaotong University,edu,e1dcc3946fa750da4bc05b1154b6321db163ad62,citation,http://gr.xjtu.edu.cn/c/document_library/get_file?folderId=1540809&name=DLFE-80365.pdf,Similarity Learning with Spatial Constraints for Person Re-identification,2016 +68,United States,Market 1501,market_1501,42.366183,-71.092455,Mitsubishi Electric Research Laboratories,company,bb4f83458976755e9310b241a689c8d21b481238,citation,http://openaccess.thecvf.com/content_ICCV_2017_workshops/papers/w23/Jones_Improving_Face_Verification_ICCV_2017_paper.pdf,Improving Face Verification and Person Re-Identification Accuracy Using Hyperplane Similarity,2017 +69,United States,Market 1501,market_1501,42.3383668,-71.08793524,Northeastern University,edu,32dc3e04dea2306ec34ca3f39db27a2b0a49e0a1,citation,http://openaccess.thecvf.com/content_ICCV_2017_workshops/papers/w21/Gou_moM_Mean_of_ICCV_2017_paper.pdf,moM: Mean of Moments Feature for Person Re-identification,2017 +70,United States,Market 1501,market_1501,42.3383668,-71.08793524,Northeastern University,edu,0deca8c53adcc13d8da72050d9a4b638da52264b,citation,https://pdfs.semanticscholar.org/0dec/a8c53adcc13d8da72050d9a4b638da52264b.pdf,"A Comprehensive Evaluation and Benchmark for Person Re-Identification: Features, Metrics, and Datasets",2016 +71,Australia,Market 1501,market_1501,-33.8809651,151.20107299,University of Technology Sydney,edu,193089d56758ab88391d846edd08d359b1f9a863,citation,https://arxiv.org/pdf/1611.05666.pdf,A Discriminatively Learned CNN Embedding for Person Reidentification,2017 +72,China,Market 1501,market_1501,31.821994,117.28059,"USTC, Hefei, China",edu,83c19722450e8f7dcb89dabb38265f19efafba27,citation,https://arxiv.org/pdf/1803.02983.pdf,A framework with updateable joint images re-ranking for Person Re-identification.,2018 +73,Singapore,Market 1501,market_1501,1.3484104,103.68297965,Nanyang Technological University,edu,6bb8a5f9e2ddf1bdcd42aa7212eb0499992c1e9e,citation,https://arxiv.org/pdf/1607.08381.pdf,A Siamese Long Short-Term Memory Architecture for Human Re-Identification,2016 +74,China,Market 1501,market_1501,40.00229045,116.32098908,Tsinghua University,edu,6bb8a5f9e2ddf1bdcd42aa7212eb0499992c1e9e,citation,https://arxiv.org/pdf/1607.08381.pdf,A Siamese Long Short-Term Memory Architecture for Human Re-Identification,2016 +75,Australia,Market 1501,market_1501,-33.88890695,151.18943366,University of Sydney,edu,6bb8a5f9e2ddf1bdcd42aa7212eb0499992c1e9e,citation,https://arxiv.org/pdf/1607.08381.pdf,A Siamese Long Short-Term Memory Architecture for Human Re-Identification,2016 +76,Germany,Market 1501,market_1501,49.4109266,8.6979529,Heidelberg University,edu,5fdb3533152f9862e3e4c2282cd5f1400af18956,citation,https://arxiv.org/pdf/1804.04694.pdf,A Variational U-Net for Conditional Appearance and Shape Generation,2018 +77,China,Market 1501,market_1501,23.09461185,113.28788994,Sun Yat-Sen University,edu,635efc8bddec1cf94b1ee4951e4d216331758422,citation,https://arxiv.org/pdf/1812.00914.pdf,Accelerating Large Scale Knowledge Distillation via Dynamic Importance Sampling,2018 +78,Canada,Market 1501,market_1501,53.5238572,-113.52282665,University of Alberta,edu,635efc8bddec1cf94b1ee4951e4d216331758422,citation,https://arxiv.org/pdf/1812.00914.pdf,Accelerating Large Scale Knowledge Distillation via Dynamic Importance Sampling,2018 +79,China,Market 1501,market_1501,39.9808333,116.34101249,Beihang University,edu,19be4580df2e76b70a39af6e749bf189e1ca3975,citation,https://arxiv.org/pdf/1803.10914.pdf,Adversarial Binary Coding for Efficient Person Re-identification,2018 +80,United Kingdom,Market 1501,market_1501,51.7534538,-1.25400997,University of Oxford,edu,47f4dec5f733e933c8b9a8fdcda9419741f2bf62,citation,https://arxiv.org/pdf/1901.10650.pdf,Adversarial Metric Attack for Person Re-identification,2019 +81,United States,Market 1501,market_1501,39.3299013,-76.6205177,Johns Hopkins University,edu,47f4dec5f733e933c8b9a8fdcda9419741f2bf62,citation,https://arxiv.org/pdf/1901.10650.pdf,Adversarial Metric Attack for Person Re-identification,2019 +82,China,Market 1501,market_1501,23.09461185,113.28788994,Sun Yat-Sen University,edu,eee4cc389ca85d23700cba9627fa11e5ee65d740,citation,https://arxiv.org/pdf/1807.10482.pdf,Adversarial Open-World Person Re-Identification,2018 +83,China,Market 1501,market_1501,23.09461185,113.28788994,Sun Yat-Sen University,edu,7969cc315bbafcd38a637eb8cd5d45ba897be319,citation,https://arxiv.org/pdf/1604.07807.pdf,An enhanced deep feature representation for person re-identification,2016 +84,China,Market 1501,market_1501,22.3874201,114.2082222,Hong Kong Baptist University,edu,c0e9d06383442d89426808d723ca04586db91747,citation,https://pdfs.semanticscholar.org/c0e9/d06383442d89426808d723ca04586db91747.pdf,Cascaded SR-GAN for Scale-Adaptive Low Resolution Person Re-identification,2018 +85,China,Market 1501,market_1501,30.5097537,114.4062881,Huazhong University of Science and Technology,edu,c0e9d06383442d89426808d723ca04586db91747,citation,https://pdfs.semanticscholar.org/c0e9/d06383442d89426808d723ca04586db91747.pdf,Cascaded SR-GAN for Scale-Adaptive Low Resolution Person Re-identification,2018 +86,Japan,Market 1501,market_1501,35.6924853,139.7582533,"National Institute of Informatics, Japan",edu,c0e9d06383442d89426808d723ca04586db91747,citation,https://pdfs.semanticscholar.org/c0e9/d06383442d89426808d723ca04586db91747.pdf,Cascaded SR-GAN for Scale-Adaptive Low Resolution Person Re-identification,2018 +87,China,Market 1501,market_1501,40.00229045,116.32098908,Tsinghua University,edu,5e1514de6d20d3b1d148d6925edc89a6c891ce47,citation,http://openaccess.thecvf.com/content_cvpr_2017/papers/Lin_Consistent-Aware_Deep_Learning_CVPR_2017_paper.pdf,Consistent-Aware Deep Learning for Person Re-identification in a Camera Network,2017 +88,China,Market 1501,market_1501,40.0044795,116.370238,Chinese Academy of Sciences,edu,bff1e1ecf00c37ec91edc7c5c85c1390726c3687,citation,https://arxiv.org/pdf/1511.07545.pdf,Constrained Deep Metric Learning for Person Re-identification,2015 +89,China,Market 1501,market_1501,40.00229045,116.32098908,Tsinghua University,edu,6ce6da7a6b2d55fac604d986595ba6979580393b,citation,https://arxiv.org/pdf/1611.06026.pdf,Cross Domain Knowledge Transfer for Person Re-identification,2016 +90,China,Market 1501,market_1501,23.0502042,113.39880323,South China University of Technology,edu,c249f0aa1416c51bf82be5bb47cbeb8aac6dee35,citation,https://arxiv.org/pdf/1806.04533.pdf,Cross-Dataset Person Re-identification Using Similarity Preserved Generative Adversarial Networks,2018 +91,China,Market 1501,market_1501,40.00229045,116.32098908,Tsinghua University,edu,4f83ef534c164bd7fbd1e71fe6a3d09a30326b26,citation,https://arxiv.org/pdf/1810.10221.pdf,Cross-Resolution Person Re-identification with Deep Antithetical Learning,2018 +92,China,Market 1501,market_1501,28.16437,112.93251,Central South University,edu,a6bc69831dea3efc5804b8ab65cf5a06688ddae0,citation,https://arxiv.org/pdf/1801.01760.pdf,Crossing Generative Adversarial Networks for Cross-View Person Re-identification,2018 +93,Australia,Market 1501,market_1501,-27.49741805,153.01316956,University of Queensland,edu,a6bc69831dea3efc5804b8ab65cf5a06688ddae0,citation,https://arxiv.org/pdf/1801.01760.pdf,Crossing Generative Adversarial Networks for Cross-View Person Re-identification,2018 +94,Australia,Market 1501,market_1501,-33.91758275,151.23124025,University of New South Wales,edu,a6bc69831dea3efc5804b8ab65cf5a06688ddae0,citation,https://arxiv.org/pdf/1801.01760.pdf,Crossing Generative Adversarial Networks for Cross-View Person Re-identification,2018 +95,China,Market 1501,market_1501,39.98177,116.330086,National Laboratory of Pattern Recognition,edu,34b8e675d4651db45e484da34f3c415c60ef3ea2,citation,https://arxiv.org/pdf/1707.01220.pdf,DarkRank: Accelerating Deep Metric Learning via Cross Sample Similarities Transfer,2018 +96,China,Market 1501,market_1501,39.9082804,116.2458527,University of Chinese Academy of Sciences,edu,34b8e675d4651db45e484da34f3c415c60ef3ea2,citation,https://arxiv.org/pdf/1707.01220.pdf,DarkRank: Accelerating Deep Metric Learning via Cross Sample Similarities Transfer,2018 +97,Australia,Market 1501,market_1501,-27.49741805,153.01316956,University of Queensland,edu,d1ba33106567c880bf99daba2bd31fe88df4ecba,citation,https://arxiv.org/pdf/1706.03160.pdf,Deep Adaptive Feature Embedding with Local Sample Distributions for Person Re-identification,2018 +98,Australia,Market 1501,market_1501,-33.91758275,151.23124025,University of New South Wales,edu,d1ba33106567c880bf99daba2bd31fe88df4ecba,citation,https://arxiv.org/pdf/1706.03160.pdf,Deep Adaptive Feature Embedding with Local Sample Distributions for Person Re-identification,2018 +99,Australia,Market 1501,market_1501,-33.88890695,151.18943366,University of Sydney,edu,d1ba33106567c880bf99daba2bd31fe88df4ecba,citation,https://arxiv.org/pdf/1706.03160.pdf,Deep Adaptive Feature Embedding with Local Sample Distributions for Person Re-identification,2018 +100,China,Market 1501,market_1501,39.9922379,116.30393816,Peking University,edu,2788f382e4396290acfc8b21df45cc811586e66e,citation,https://arxiv.org/pdf/1605.03259.pdf,Deep Attributes Driven Multi-Camera Person Re-identification,2016 +101,China,Market 1501,market_1501,40.0044795,116.370238,Chinese Academy of Sciences,edu,2788f382e4396290acfc8b21df45cc811586e66e,citation,https://arxiv.org/pdf/1605.03259.pdf,Deep Attributes Driven Multi-Camera Person Re-identification,2016 +102,United States,Market 1501,market_1501,29.58333105,-98.61944505,University of Texas at San Antonio,edu,2788f382e4396290acfc8b21df45cc811586e66e,citation,https://arxiv.org/pdf/1605.03259.pdf,Deep Attributes Driven Multi-Camera Person Re-identification,2016 +103,United States,Market 1501,market_1501,40.4441619,-79.94272826,Carnegie Mellon University,edu,63e1ce7de0fdbce6e03d25b5001c670c30139aa8,citation,https://arxiv.org/pdf/1707.07791.pdf,Deep Feature Learning via Structured Graph Laplacian Embedding for Person Re-Identification,2018 +104,China,Market 1501,market_1501,34.250803,108.983693,Xi’an Jiaotong University,edu,63e1ce7de0fdbce6e03d25b5001c670c30139aa8,citation,https://arxiv.org/pdf/1707.07791.pdf,Deep Feature Learning via Structured Graph Laplacian Embedding for Person Re-Identification,2018 +105,United Kingdom,Market 1501,market_1501,51.5247272,-0.03931035,Queen Mary University of London,edu,e3e36ccd836458d51676789fb133b092d42dac16,citation,https://arxiv.org/pdf/1610.05047.pdf,Deep learning prototype domains for person re-identification,2017 +106,Australia,Market 1501,market_1501,-34.9189226,138.60423668,University of Adelaide,edu,63ac85ec1bff6009bb36f0b24ef189438836bc91,citation,https://arxiv.org/pdf/1606.01595.pdf,Deep linear discriminant analysis on fisher networks: A hybrid architecture for person re-identification,2017 +107,China,Market 1501,market_1501,40.0044795,116.370238,Chinese Academy of Sciences,edu,9a81f46fcf8c6c0efbe34649552b5056ce419a3d,citation,https://arxiv.org/pdf/1705.03332.pdf,Deep person re-identification with improved embedding and efficient training,2017 +108,China,Market 1501,market_1501,34.250803,108.983693,Xi’an Jiaotong University,edu,6562c40932ea734f46e5068555fbf3a185a345de,citation,https://arxiv.org/pdf/1707.00409.pdf,Deep Ranking Model by Large Adaptive Margin Learning for Person Re-identification,2018 +109,United Kingdom,Market 1501,market_1501,51.5247272,-0.03931035,Queen Mary University of London,edu,35b9af6057801fb2f28881840c8427c9cf648757,citation,https://arxiv.org/pdf/1707.02785.pdf,Deep Reinforcement Learning Attention Selection For Person Re-Identification,2017 +110,China,Market 1501,market_1501,40.0044795,116.370238,Chinese Academy of Sciences,edu,8961677300a9ee30ca51e1a3cf9815b4a162265b,citation,https://arxiv.org/pdf/1707.00798.pdf,Deep Representation Learning with Part Loss for Person Re-Identification,2017 +111,China,Market 1501,market_1501,39.9922379,116.30393816,Peking University,edu,8961677300a9ee30ca51e1a3cf9815b4a162265b,citation,https://arxiv.org/pdf/1707.00798.pdf,Deep Representation Learning with Part Loss for Person Re-Identification,2017 +112,United States,Market 1501,market_1501,29.58333105,-98.61944505,University of Texas at San Antonio,edu,8961677300a9ee30ca51e1a3cf9815b4a162265b,citation,https://arxiv.org/pdf/1707.00798.pdf,Deep Representation Learning with Part Loss for Person Re-Identification,2017 +113,China,Market 1501,market_1501,34.250803,108.983693,Xi’an Jiaotong University,edu,123286df95d93600f4281c60a60c69121c6440c7,citation,https://arxiv.org/pdf/1710.05711.pdf,Deep Self-Paced Learning for Person Re-Identification,2018 +114,China,Market 1501,market_1501,31.20081505,121.42840681,Shanghai Jiao Tong University,edu,d8949f4f4085b15978e20ed7c5c34a080dd637f2,citation,http://openaccess.thecvf.com/content_cvpr_2017_workshops/w17/papers/Chen_Deep_Spatial-Temporal_Fusion_CVPR_2017_paper.pdf,Deep Spatial-Temporal Fusion Network for Video-Based Person Re-identification,2017 +115,China,Market 1501,market_1501,39.9922379,116.30393816,Peking University,edu,31c0968fb5f587918f1c49bf7fa51453b3e89cf7,citation,https://arxiv.org/pdf/1611.05244.pdf,Deep Transfer Learning for Person Re-Identification,2018 +116,China,Market 1501,market_1501,30.19331415,120.11930822,Zhejiang University,edu,50bf4f77d8b66ec838ad59a869630eace7e0e4a7,citation,https://arxiv.org/pdf/1707.07256.pdf,Deeply-Learned Part-Aligned Representations for Person Re-identification,2017 +117,United States,Market 1501,market_1501,47.6423318,-122.1369302,Microsoft,company,50bf4f77d8b66ec838ad59a869630eace7e0e4a7,citation,https://arxiv.org/pdf/1707.07256.pdf,Deeply-Learned Part-Aligned Representations for Person Re-identification,2017 +118,China,Market 1501,market_1501,39.9601488,116.35193921,Beijing University of Posts and Telecommunications,edu,d497543834f23f72f4092252b613bf3adaefc606,citation,https://arxiv.org/pdf/1805.07698.pdf,Density-Adaptive Kernel based Re-Ranking for Person Re-Identification,2018 +119,China,Market 1501,market_1501,23.09461185,113.28788994,Sun Yat-Sen University,edu,19a0f34440c25323544b90d9d822a212bfed0eb5,citation,https://arxiv.org/pdf/1901.10100.pdf,Discovering Underlying Person Structure Pattern with Relative Local Distance for Person Re-identification,2019 +120,China,Market 1501,market_1501,34.250803,108.983693,Xi’an Jiaotong University,edu,7b2e0c87aece7ff1404ef2034d4c5674770301b2,citation,https://arxiv.org/pdf/1807.01455.pdf,Discriminative Feature Learning with Foreground Attention for Person Re-Identification,2018 +121,China,Market 1501,market_1501,31.2284923,121.40211389,East China Normal University,edu,0353fe24ecd237f4d9ae4dbc277a6a67a69ce8ed,citation,https://pdfs.semanticscholar.org/0353/fe24ecd237f4d9ae4dbc277a6a67a69ce8ed.pdf,Discriminative Feature Representation for Person Re-identification by Batch-contrastive Loss,2018 +122,United Kingdom,Market 1501,market_1501,55.94951105,-3.19534913,University of Edinburgh,edu,68621721705e3115355268450b4b447362e455c6,citation,https://arxiv.org/pdf/1812.02605.pdf,Disjoint Label Space Transfer Learning with Common Factorised Space,2019 +123,China,Market 1501,market_1501,30.5097537,114.4062881,Huazhong University of Science and Technology,edu,d950af49c44bc5d9f4a5cc1634e606004790b1e5,citation,https://arxiv.org/pdf/1708.04169.pdf,Divide and Fuse: A Re-ranking Approach for Person Re-identification,2017 +124,United Arab Emirates,Market 1501,market_1501,24.453884,54.3773438,New York University Abu Dhabi,edu,a94b832facb57ea37b18927b13d2dd4c5fa3a9ea,citation,https://arxiv.org/pdf/1803.09733.pdf,Domain transfer convolutional attribute embedding,2018 +125,China,Market 1501,market_1501,39.9106327,116.3356321,Chinese Academy of Science,edu,7f8d4494aba2a2b11a88bf7de4b8879b047dd69b,citation,http://openaccess.thecvf.com/content_cvpr_2018/papers/Zhou_Easy_Identification_From_CVPR_2018_paper.pdf,Easy Identification from Better Constraints: Multi-shot Person Re-identification from Reference Constraints,2018 +126,United States,Market 1501,market_1501,42.0551164,-87.67581113,Northwestern University,edu,7f8d4494aba2a2b11a88bf7de4b8879b047dd69b,citation,http://openaccess.thecvf.com/content_cvpr_2018/papers/Zhou_Easy_Identification_From_CVPR_2018_paper.pdf,Easy Identification from Better Constraints: Multi-shot Person Re-identification from Reference Constraints,2018 +127,China,Market 1501,market_1501,40.0044795,116.370238,Chinese Academy of Sciences,edu,ca1db9dc493a045e3fadf8d8209eaa4311bbdc70,citation,https://arxiv.org/pdf/1709.09304.pdf,Effective Image Retrieval via Multilinear Multi-index Fusion,2017 +128,United States,Market 1501,market_1501,29.58333105,-98.61944505,University of Texas at San Antonio,edu,ca1db9dc493a045e3fadf8d8209eaa4311bbdc70,citation,https://arxiv.org/pdf/1709.09304.pdf,Effective Image Retrieval via Multilinear Multi-index Fusion,2017 +129,United States,Market 1501,market_1501,42.0551164,-87.67581113,Northwestern University,edu,00bf7bcf31ee71f5f325ca5307883157ba3d580f,citation,http://openaccess.thecvf.com/content_ICCV_2017/papers/Zhou_Efficient_Online_Local_ICCV_2017_paper.pdf,Efficient Online Local Metric Adaptation via Negative Samples for Person Re-identification,2017 +130,China,Market 1501,market_1501,40.0044795,116.370238,Chinese Academy of Sciences,edu,febff0f6faa8dde77848845e4b3e6f6c91180d33,citation,https://arxiv.org/pdf/1611.00137.pdf,Embedding Deep Metric for Person Re-identication A Study Against Large Variations,2016 +131,China,Market 1501,market_1501,39.9082804,116.2458527,University of Chinese Academy of Sciences,edu,febff0f6faa8dde77848845e4b3e6f6c91180d33,citation,https://arxiv.org/pdf/1611.00137.pdf,Embedding Deep Metric for Person Re-identication A Study Against Large Variations,2016 +132,China,Market 1501,market_1501,23.09461185,113.28788994,Sun Yat-Sen University,edu,febff0f6faa8dde77848845e4b3e6f6c91180d33,citation,https://arxiv.org/pdf/1611.00137.pdf,Embedding Deep Metric for Person Re-identication A Study Against Large Variations,2016 +133,China,Market 1501,market_1501,31.846918,117.29053367,Hefei University of Technology,edu,fd0e1fecf7e72318a4c53463fd5650720df40281,citation,https://arxiv.org/pdf/1606.04404.pdf,End-to-End Comparative Attention Networks for Person Re-Identification,2017 +134,China,Market 1501,market_1501,39.9041999,116.4073963,"Qihoo 360 AI Institute, Beijing, China",edu,fd0e1fecf7e72318a4c53463fd5650720df40281,citation,https://arxiv.org/pdf/1606.04404.pdf,End-to-End Comparative Attention Networks for Person Re-Identification,2017 +135,Singapore,Market 1501,market_1501,1.2966426,103.7763939,Singapore / National University of Singapore,edu,fd0e1fecf7e72318a4c53463fd5650720df40281,citation,https://arxiv.org/pdf/1606.04404.pdf,End-to-End Comparative Attention Networks for Person Re-Identification,2017 +136,China,Market 1501,market_1501,31.970907,118.8128989,PLA Army Engineering University,edu,c8ac121e9c4eb9964be9c5713f22a95c1c3b57e9,citation,https://arxiv.org/pdf/1901.05798.pdf,Ensemble Feature for Person Re-Identification,2019 +137,Spain,Market 1501,market_1501,41.5008957,2.111553,Autonomous University of Barcelona,edu,fe54a5a10288648f3bd0a71b053cdb896716b552,citation,https://arxiv.org/pdf/1804.04419.pdf,"Exploiting feature representations through similarity learning, post-ranking and ranking aggregation for person re-identification",2018 +138,Spain,Market 1501,market_1501,41.40657415,2.1945341,Universitat Oberta de Catalunya,edu,fe54a5a10288648f3bd0a71b053cdb896716b552,citation,https://arxiv.org/pdf/1804.04419.pdf,"Exploiting feature representations through similarity learning, post-ranking and ranking aggregation for person re-identification",2018 +139,Spain,Market 1501,market_1501,41.3868913,2.16352385,University of Barcelona,edu,fe54a5a10288648f3bd0a71b053cdb896716b552,citation,https://arxiv.org/pdf/1804.04419.pdf,"Exploiting feature representations through similarity learning, post-ranking and ranking aggregation for person re-identification",2018 +140,United States,Market 1501,market_1501,33.2416008,-111.8839083,Intel,company,6a9c3011b5092daa1d0cacda23f20ca4ae74b902,citation,https://arxiv.org/pdf/1812.02465.pdf,Fast and Accurate Person Re-Identification with RMNet.,2018 +141,China,Market 1501,market_1501,39.9808333,116.34101249,Beihang University,edu,91cc3981c304227e13ae151a43fbb124419bc0ce,citation,http://openaccess.thecvf.com/content_cvpr_2017/papers/Chen_Fast_Person_Re-Identification_CVPR_2017_paper.pdf,Fast Person Re-identification via Cross-Camera Semantic Binary Transformation,2017 +142,United Kingdom,Market 1501,market_1501,52.6221571,1.2409136,University of East Anglia,edu,91cc3981c304227e13ae151a43fbb124419bc0ce,citation,http://openaccess.thecvf.com/content_cvpr_2017/papers/Chen_Fast_Person_Re-Identification_CVPR_2017_paper.pdf,Fast Person Re-identification via Cross-Camera Semantic Binary Transformation,2017 +143,Singapore,Market 1501,market_1501,1.3484104,103.68297965,Nanyang Technological University,edu,6123e52c1a560c88817d8720e05fbff8565271fb,citation,https://arxiv.org/pdf/1607.08378.pdf,Gated Siamese Convolutional Neural Network Architecture for Human Re-Identification,2016 +144,United States,Market 1501,market_1501,38.5336349,-121.79077264,"University of California, Davis",edu,79c959833ff49f860e20b6654dbf4d6acdee0230,citation,https://arxiv.org/pdf/1811.02545.pdf,Hide-and-Seek: A Data Augmentation Technique for Weakly-Supervised Localization and Beyond,2018 +145,China,Market 1501,market_1501,30.19331415,120.11930822,Zhejiang University,edu,79c959833ff49f860e20b6654dbf4d6acdee0230,citation,https://arxiv.org/pdf/1811.02545.pdf,Hide-and-Seek: A Data Augmentation Technique for Weakly-Supervised Localization and Beyond,2018 +146,Taiwan,Market 1501,market_1501,25.0410728,121.6147562,Institute of Information Science,edu,3cbb4cf942ee95d14505c0f83a48ba224abdd00b,citation,https://arxiv.org/pdf/1712.06820.pdf,Hierarchical Cross Network for Person Re-identification,2017 +147,Japan,Market 1501,market_1501,33.8941968,130.8394083,Kyushu Institute of Technology,edu,7da961cb039b1a01cad9b78d93bdfe2a69ed3ccf,citation,https://arxiv.org/pdf/1706.04318.pdf,Hierarchical Gaussian Descriptors with Application to Person Re-Identification,2017 +148,Japan,Market 1501,market_1501,33.59914655,130.22359848,Kyushu University,edu,7da961cb039b1a01cad9b78d93bdfe2a69ed3ccf,citation,https://arxiv.org/pdf/1706.04318.pdf,Hierarchical Gaussian Descriptors with Application to Person Re-Identification,2017 +149,Japan,Market 1501,market_1501,35.9020448,139.93622009,University of Tokyo,edu,7da961cb039b1a01cad9b78d93bdfe2a69ed3ccf,citation,https://arxiv.org/pdf/1706.04318.pdf,Hierarchical Gaussian Descriptors with Application to Person Re-Identification,2017 +150,United States,Market 1501,market_1501,42.3504253,-71.10056114,Boston University,edu,7c25ed788da1f5f61d8d1da23dd319dfb4e5ac2d,citation,https://arxiv.org/pdf/1612.01345.pdf,Human-In-The-Loop Person Re-Identification,2016 +151,United Kingdom,Market 1501,market_1501,51.5247272,-0.03931035,Queen Mary University of London,edu,7c25ed788da1f5f61d8d1da23dd319dfb4e5ac2d,citation,https://arxiv.org/pdf/1612.01345.pdf,Human-In-The-Loop Person Re-Identification,2016 +152,United Kingdom,Market 1501,market_1501,55.378051,-3.435973,"Vision Semantics Ltd, UK",edu,7c25ed788da1f5f61d8d1da23dd319dfb4e5ac2d,citation,https://arxiv.org/pdf/1612.01345.pdf,Human-In-The-Loop Person Re-Identification,2016 +153,Australia,Market 1501,market_1501,-37.9062737,145.1319449,"CSIRO, Australia",edu,53492cb14b33a26b10c91102daa2d5a2a3ed069d,citation,https://arxiv.org/pdf/1806.07592.pdf,Improving Online Multiple Object tracking with Deep Metric Learning,2018 +154,Germany,Market 1501,market_1501,50.7791703,6.06728733,RWTH Aachen University,edu,a3d11e98794896849ab2304a42bf83e2979e5fb5,citation,https://arxiv.org/pdf/1703.07737.pdf,In Defense of the Triplet Loss for Person Re-Identification,2017 +155,China,Market 1501,market_1501,34.250803,108.983693,Xi’an Jiaotong University,edu,cb8567f074573a0d66d50e75b5a91df283ccd503,citation,https://arxiv.org/pdf/1708.05512.pdf,Large Margin Learning in Set-to-Set Similarity Comparison for Person Reidentification,2018 +156,United Kingdom,Market 1501,market_1501,51.5247272,-0.03931035,Queen Mary University of London,edu,207e0ac5301a3c79af862951b70632ed650f74f7,citation,https://arxiv.org/pdf/1603.02139.pdf,Learning a Discriminative Null Space for Person Re-identification,2016 +157,China,Market 1501,market_1501,39.9082804,116.2458527,University of Chinese Academy of Sciences,edu,34cf90fcbf83025666c5c86ec30ac58b632b27b0,citation,https://arxiv.org/pdf/1710.06555.pdf,Learning Deep Context-Aware Features over Body and Latent Parts for Person Re-identification,2017 +158,United States,Market 1501,market_1501,40.007581,-105.2659417,University of Colorado,edu,ad3be20fe0106d80c567def71fef01146564df4b,citation,https://arxiv.org/pdf/1802.05312.pdf,Learning Deep Disentangled Embeddings With the F-Statistic Loss,2018 +159,Russia,Market 1501,market_1501,55.6846566,37.3407539,"Skolkovo Institute of Science and Technology, Skolkovo, Moscow",edu,218603147709344d4ff66625d83603deee2854bf,citation,https://arxiv.org/pdf/1611.00822.pdf,Learning Deep Embeddings with Histogram Loss,2016 +160,China,Market 1501,market_1501,23.09461185,113.28788994,Sun Yat-Sen University,edu,489decd84645b77d31001d17a66abb92bb96c731,citation,https://arxiv.org/pdf/1803.11333.pdf,Learning View-Specific Deep Networks for Person Re-Identification,2018 +161,Norway,Market 1501,market_1501,63.419499,10.4020771,Norwegian University of Science and Technology,edu,2102915d0c51cfda4d85133bd593ecb9508fa4bb,citation,https://arxiv.org/pdf/1701.03153.pdf,Looking Beyond Appearances: Synthetic Training Data for Deep CNNs in Re-identification,2018 +162,Italy,Market 1501,market_1501,41.9037626,12.5144384,Sapienza University of Rome,edu,2102915d0c51cfda4d85133bd593ecb9508fa4bb,citation,https://arxiv.org/pdf/1701.03153.pdf,Looking Beyond Appearances: Synthetic Training Data for Deep CNNs in Re-identification,2018 +163,Italy,Market 1501,market_1501,45.437398,11.003376,University of Verona,edu,2102915d0c51cfda4d85133bd593ecb9508fa4bb,citation,https://arxiv.org/pdf/1701.03153.pdf,Looking Beyond Appearances: Synthetic Training Data for Deep CNNs in Re-identification,2018 +164,China,Market 1501,market_1501,40.00229045,116.32098908,Tsinghua University,edu,c0387e788a52f10bf35d4d50659cfa515d89fbec,citation,https://pdfs.semanticscholar.org/c038/7e788a52f10bf35d4d50659cfa515d89fbec.pdf,MARS: A Video Benchmark for Large-Scale Person Re-Identification,2016 +165,China,Market 1501,market_1501,40.00229045,116.32098908,Tsinghua University,edu,1e83e2abcb258cd62b160e3f31a490a6bc042e83,citation,https://arxiv.org/pdf/1704.02492.pdf,Metric Learning in Codebook Generation of Bag-of-Words for Person Re-identification,2017 +166,China,Market 1501,market_1501,31.8405068,117.2638057,Hefei University,edu,7c9d8593cdf2f8ba9f27906b2b5827b145631a0b,citation,https://arxiv.org/pdf/1810.08534.pdf,MsCGAN: Multi-scale Conditional Generative Adversarial Networks for Person Image Generation,2018 +167,China,Market 1501,market_1501,23.09461185,113.28788994,Sun Yat-Sen University,edu,1565bf91f8fdfe5f5168a5050b1418debc662151,citation,https://arxiv.org/pdf/1711.03368.pdf,One-pass Person Re-identification by Sketch Online Discriminant Analysis,2017 +168,Australia,Market 1501,market_1501,-33.8809651,151.20107299,University of Technology Sydney,edu,592e555ebe4bd2d821230e7074d7e9626af716b0,citation,https://arxiv.org/pdf/1809.02681.pdf,Open Set Adversarial Examples,2018 +169,China,Market 1501,market_1501,40.0044795,116.370238,Chinese Academy of Sciences,edu,fcaa88dcb1a440ef09c4e5d724ed209bfc5d3367,citation,https://arxiv.org/pdf/1811.09928.pdf,PCGAN: Partition-Controlled Human Image Generation,2019 +170,China,Market 1501,market_1501,39.9082804,116.2458527,University of Chinese Academy of Sciences,edu,fcaa88dcb1a440ef09c4e5d724ed209bfc5d3367,citation,https://arxiv.org/pdf/1811.09928.pdf,PCGAN: Partition-Controlled Human Image Generation,2019 +171,China,Market 1501,market_1501,22.4162632,114.2109318,Chinese University of Hong Kong,edu,2fad06ed34169a5b1f736112364c58140577a6b4,citation,https://pdfs.semanticscholar.org/2fad/06ed34169a5b1f736112364c58140577a6b4.pdf,Pedestrian Color Naming via Convolutional Neural Network,2016 +172,China,Market 1501,market_1501,22.4162632,114.2109318,Chinese University of Hong Kong,edu,25bb4212af72d64ec20cac533f58f7af1472e057,citation,https://arxiv.org/pdf/1703.08837.pdf,Person Re-Identification by Camera Correlation Aware Feature Augmentation,2018 +173,China,Market 1501,market_1501,28.2290209,112.99483204,"National University of Defense Technology, China",mil,25bb4212af72d64ec20cac533f58f7af1472e057,citation,https://arxiv.org/pdf/1703.08837.pdf,Person Re-Identification by Camera Correlation Aware Feature Augmentation,2018 +174,China,Market 1501,market_1501,23.09461185,113.28788994,Sun Yat-Sen University,edu,25bb4212af72d64ec20cac533f58f7af1472e057,citation,https://arxiv.org/pdf/1703.08837.pdf,Person Re-Identification by Camera Correlation Aware Feature Augmentation,2018 +175,United Kingdom,Market 1501,market_1501,51.5247272,-0.03931035,Queen Mary University of London,edu,744cc8c69255cbe9d992315e456b9efb06f42e20,citation,https://arxiv.org/pdf/1705.04724.pdf,Person Re-Identification by Deep Joint Learning of Multi-Loss Classification,2017 diff --git a/site/datasets/verified/mars.csv b/site/datasets/verified/mars.csv new file mode 100644 index 00000000..cb6901a5 --- /dev/null +++ b/site/datasets/verified/mars.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,MARS,mars,0.0,0.0,,,,main,,MARS: A Video Benchmark for Large-Scale Person Re-Identification,2016 diff --git a/site/datasets/verified/megaage.csv b/site/datasets/verified/megaage.csv new file mode 100644 index 00000000..04702674 --- /dev/null +++ b/site/datasets/verified/megaage.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,MegaAge,megaage,0.0,0.0,,,,main,,Quantifying Facial Age by Posterior of Age Comparisons,2017 diff --git a/site/datasets/verified/megaface.csv b/site/datasets/verified/megaface.csv new file mode 100644 index 00000000..d9f78ec3 --- /dev/null +++ b/site/datasets/verified/megaface.csv @@ -0,0 +1,4 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,MegaFace,megaface,0.0,0.0,,,,main,,Level Playing Field for Million Scale Face Recognition,2017 +1,Netherlands,MegaFace,megaface,53.21967825,6.56251482,University of Groningen,edu,8efda5708bbcf658d4f567e3866e3549fe045bbb,citation,https://pdfs.semanticscholar.org/8efd/a5708bbcf658d4f567e3866e3549fe045bbb.pdf,Pre-trained Deep Convolutional Neural Networks for Face Recognition,2018 +2,United States,MegaFace,megaface,41.70456775,-86.23822026,University of Notre Dame,edu,e64c166dc5bb33bc61462a8b5ac92edb24d905a1,citation,https://arxiv.org/pdf/1811.01474.pdf,Fast Face Image Synthesis with Minimal Training.,2018 diff --git a/site/datasets/verified/mifs.csv b/site/datasets/verified/mifs.csv new file mode 100644 index 00000000..4e127e79 --- /dev/null +++ b/site/datasets/verified/mifs.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,MIFS,mifs,0.0,0.0,,,,main,,Spoofing faces using makeup: An investigative study,2017 diff --git a/site/datasets/verified/miw.csv b/site/datasets/verified/miw.csv new file mode 100644 index 00000000..11bc2e33 --- /dev/null +++ b/site/datasets/verified/miw.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,MIW,miw,0.0,0.0,,,,main,,Automatic facial makeup detection with application in face recognition,2013 diff --git a/site/datasets/verified/morph.csv b/site/datasets/verified/morph.csv new file mode 100644 index 00000000..b0a66a5f --- /dev/null +++ b/site/datasets/verified/morph.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,MORPH Commercial,morph,0.0,0.0,,,,main,,MORPH: a longitudinal image database of normal adult age-progression,2006 diff --git a/site/datasets/verified/morph_nc.csv b/site/datasets/verified/morph_nc.csv new file mode 100644 index 00000000..a14720dd --- /dev/null +++ b/site/datasets/verified/morph_nc.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,MORPH Non-Commercial,morph_nc,0.0,0.0,,,,main,,MORPH: a longitudinal image database of normal adult age-progression,2006 diff --git a/site/datasets/verified/mot.csv b/site/datasets/verified/mot.csv new file mode 100644 index 00000000..ae532522 --- /dev/null +++ b/site/datasets/verified/mot.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,MOT,mot,0.0,0.0,,,,main,,Evaluating Multiple Object Tracking Performance: The CLEAR MOT Metrics,2008 diff --git a/site/datasets/verified/msceleb.csv b/site/datasets/verified/msceleb.csv new file mode 100644 index 00000000..d1a7ec8c --- /dev/null +++ b/site/datasets/verified/msceleb.csv @@ -0,0 +1,127 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,MsCeleb,msceleb,0.0,0.0,,,,main,,MS-Celeb-1M: A Dataset and Benchmark for Large-Scale Face Recognition,2016 +1,China,MsCeleb,msceleb,22.4162632,114.2109318,Chinese University of Hong Kong,edu,2011d4da646f794456bebb617d1500ddf71989ed,citation,https://pdfs.semanticscholar.org/2011/d4da646f794456bebb617d1500ddf71989ed.pdf,Transductive Centroid Projection for Semi-supervised Large-Scale Recognition,2018 +2,China,MsCeleb,msceleb,39.993008,116.329882,SenseTime,company,2011d4da646f794456bebb617d1500ddf71989ed,citation,https://pdfs.semanticscholar.org/2011/d4da646f794456bebb617d1500ddf71989ed.pdf,Transductive Centroid Projection for Semi-supervised Large-Scale Recognition,2018 +3,United States,MsCeleb,msceleb,39.2899685,-76.62196103,University of Maryland,edu,23dd8d17ce09c22d367e4d62c1ccf507bcbc64da,citation,https://pdfs.semanticscholar.org/23dd/8d17ce09c22d367e4d62c1ccf507bcbc64da.pdf,Deep Density Clustering of Unconstrained Faces ( Supplementary Material ),2018 +4,United States,MsCeleb,msceleb,37.3936717,-122.0807262,Facebook,company,628a3f027b7646f398c68a680add48c7969ab1d9,citation,https://pdfs.semanticscholar.org/628a/3f027b7646f398c68a680add48c7969ab1d9.pdf,Plan for Final Year Project : HKU-Face : A Large Scale Dataset for Deep Face Recognition,2017 +5,United States,MsCeleb,msceleb,37.4219999,-122.0840575,Google,company,628a3f027b7646f398c68a680add48c7969ab1d9,citation,https://pdfs.semanticscholar.org/628a/3f027b7646f398c68a680add48c7969ab1d9.pdf,Plan for Final Year Project : HKU-Face : A Large Scale Dataset for Deep Face Recognition,2017 +6,France,MsCeleb,msceleb,46.1476461,-1.1549415,University of La Rochelle,edu,153fbae25efd061f9046970071d0cfe739a35a0e,citation,,FaceLiveNet: End-to-End Networks Combining Face Verification with Interactive Facial Expression-Based Liveness Detection,2018 +7,China,MsCeleb,msceleb,26.89887,112.590435,University of South China,edu,98518fc368d7e1478cef40f5f8fd4468763645ad,citation,http://downloads.hindawi.com/journals/cin/2018/4512473.pdf,A Community Detection Approach to Cleaning Extremely Large Face Database,2018 +8,China,MsCeleb,msceleb,28.2290209,112.99483204,"National University of Defense Technology, China",mil,98518fc368d7e1478cef40f5f8fd4468763645ad,citation,http://downloads.hindawi.com/journals/cin/2018/4512473.pdf,A Community Detection Approach to Cleaning Extremely Large Face Database,2018 +9,China,MsCeleb,msceleb,39.9601488,116.35193921,Beijing University of Posts and Telecommunications,edu,6cdbbced12bff53bcbdde3cdb6d20b4bd02a9d6c,citation,https://arxiv.org/pdf/1811.12026.pdf,Attacks on State-of-the-Art Face Recognition using Attentional Adversarial Attack Generative Network,2018 +10,China,MsCeleb,msceleb,39.98177,116.330086,National Laboratory of Pattern Recognition,edu,e47f4a127f41c055fb7893ddc295932ead783c63,citation,https://arxiv.org/pdf/1709.03675.pdf,Adversarial Discriminative Heterogeneous Face Recognition,2018 +11,China,MsCeleb,msceleb,39.9082804,116.2458527,University of Chinese Academy of Sciences,edu,e47f4a127f41c055fb7893ddc295932ead783c63,citation,https://arxiv.org/pdf/1709.03675.pdf,Adversarial Discriminative Heterogeneous Face Recognition,2018 +12,China,MsCeleb,msceleb,22.4162632,114.2109318,Chinese University of Hong Kong,edu,2401cd5606c6bc5390acc352d00c1685f0c8af60,citation,https://arxiv.org/pdf/1809.01407.pdf,Consensus-Driven Propagation in Massive Unlabeled Data for Face Recognition,2018 +13,China,MsCeleb,msceleb,39.993008,116.329882,SenseTime,company,2401cd5606c6bc5390acc352d00c1685f0c8af60,citation,https://arxiv.org/pdf/1809.01407.pdf,Consensus-Driven Propagation in Massive Unlabeled Data for Face Recognition,2018 +14,Singapore,MsCeleb,msceleb,1.3484104,103.68297965,Nanyang Technological University,edu,2401cd5606c6bc5390acc352d00c1685f0c8af60,citation,https://arxiv.org/pdf/1809.01407.pdf,Consensus-Driven Propagation in Massive Unlabeled Data for Face Recognition,2018 +15,United States,MsCeleb,msceleb,42.718568,-84.47791571,Michigan State University,edu,b446bcd7fb78adfe346cf7a01a38e4f43760f363,citation,https://pdfs.semanticscholar.org/b446/bcd7fb78adfe346cf7a01a38e4f43760f363.pdf,To appear in ICB 2018 Longitudinal Study of Child Face Recognition,2017 +16,United Kingdom,MsCeleb,msceleb,51.3791442,-2.3252332,University of Bath,edu,26567da544239cc6628c5696b0b10539144cbd57,citation,https://arxiv.org/pdf/1811.12784.pdf,The GAN that Warped: Semantic Attribute Editing with Unpaired Data,2018 +17,United Kingdom,MsCeleb,msceleb,51.49887085,-0.17560797,Imperial College London,edu,40bb090a4e303f11168dce33ed992f51afe02ff7,citation,http://openaccess.thecvf.com/content_cvpr_2017_workshops/w33/papers/Deng_Marginal_Loss_for_CVPR_2017_paper.pdf,Marginal Loss for Deep Face Recognition,2017 +18,China,MsCeleb,msceleb,40.0044795,116.370238,Chinese Academy of Sciences,edu,4cdb6144d56098b819076a8572a664a2c2d27f72,citation,https://arxiv.org/pdf/1806.01196.pdf,Face Synthesis for Eyeglass-Robust Face Recognition,2018 +19,China,MsCeleb,msceleb,39.9082804,116.2458527,University of Chinese Academy of Sciences,edu,4cdb6144d56098b819076a8572a664a2c2d27f72,citation,https://arxiv.org/pdf/1806.01196.pdf,Face Synthesis for Eyeglass-Robust Face Recognition,2018 +20,United States,MsCeleb,msceleb,39.2899685,-76.62196103,University of Maryland,edu,872dfdeccf99bbbed7c8f1ea08afb2d713ebe085,citation,https://arxiv.org/pdf/1703.09507.pdf,L2-constrained Softmax Loss for Discriminative Face Verification,2017 +21,United States,MsCeleb,msceleb,42.718568,-84.47791571,Michigan State University,edu,3011b5fce49112228711a9e5f92d6f191687c1ea,citation,https://arxiv.org/pdf/1803.09014.pdf,Feature Transfer Learning for Deep Face Recognition with Long-Tail Data,2018 +22,United Kingdom,MsCeleb,msceleb,51.49887085,-0.17560797,Imperial College London,edu,1929863fff917ee7f6dc428fc1ce732777668eca,citation,https://arxiv.org/pdf/1712.04695.pdf,UV-GAN: Adversarial Facial UV Map Completion for Pose-Invariant Face Recognition,2018 +23,China,MsCeleb,msceleb,22.4162632,114.2109318,Chinese University of Hong Kong,edu,d949fadc9b6c5c8b067fa42265ad30945f9caa99,citation,https://arxiv.org/pdf/1710.00870.pdf,Rethinking Feature Discrimination and Polymerization for Large-scale Recognition,2017 +24,China,MsCeleb,msceleb,31.30104395,121.50045497,Fudan University,edu,5a259f2f5337435f841d39dada832ab24e7b3325,citation,,Face Recognition via Active Annotation and Learning,2016 +25,China,MsCeleb,msceleb,40.0044795,116.370238,Chinese Academy of Sciences,edu,5a259f2f5337435f841d39dada832ab24e7b3325,citation,,Face Recognition via Active Annotation and Learning,2016 +26,China,MsCeleb,msceleb,39.993008,116.329882,SenseTime,company,c72a2ea819df9b0e8cd267eebcc6528b8741e03d,citation,https://arxiv.org/pdf/1708.09687.pdf,Quantifying Facial Age by Posterior of Age Comparisons,2017 +27,China,MsCeleb,msceleb,22.4162632,114.2109318,Chinese University of Hong Kong,edu,c72a2ea819df9b0e8cd267eebcc6528b8741e03d,citation,https://arxiv.org/pdf/1708.09687.pdf,Quantifying Facial Age by Posterior of Age Comparisons,2017 +28,United States,MsCeleb,msceleb,39.2899685,-76.62196103,University of Maryland,edu,b6f758be954d34817d4ebaa22b30c63a4b8ddb35,citation,https://arxiv.org/pdf/1703.04835.pdf,A Proximity-Aware Hierarchical Clustering of Faces,2017 +29,United States,MsCeleb,msceleb,42.718568,-84.47791571,Michigan State University,edu,19fa871626df604639550c6445d2f76cd369dd13,citation,https://arxiv.org/pdf/1805.02283.pdf,DocFace: Matching ID Document Photos to Selfies,2018 +30,United States,MsCeleb,msceleb,32.87935255,-117.23110049,"University of California, San Diego",edu,d35534f3f59631951011539da2fe83f2844ca245,citation,https://arxiv.org/pdf/1705.07904.pdf,Semantically Decomposing the Latent Spaces of Generative Adversarial Networks,2017 +31,United States,MsCeleb,msceleb,37.43131385,-122.16936535,Stanford University,edu,d35534f3f59631951011539da2fe83f2844ca245,citation,https://arxiv.org/pdf/1705.07904.pdf,Semantically Decomposing the Latent Spaces of Generative Adversarial Networks,2017 +32,United States,MsCeleb,msceleb,40.4441619,-79.94272826,Carnegie Mellon University,edu,d35534f3f59631951011539da2fe83f2844ca245,citation,https://arxiv.org/pdf/1705.07904.pdf,Semantically Decomposing the Latent Spaces of Generative Adversarial Networks,2017 +33,Canada,MsCeleb,msceleb,49.2767454,-122.91777375,Simon Fraser University,edu,b301fd2fc33f24d6f75224e7c0991f4f04b64a65,citation,https://arxiv.org/pdf/1803.06340.pdf,Faces as Lighting Probes via Unsupervised Deep Highlight Extraction,2018 +34,China,MsCeleb,msceleb,28.2290209,112.99483204,"National University of Defense Technology, China",mil,b301fd2fc33f24d6f75224e7c0991f4f04b64a65,citation,https://arxiv.org/pdf/1803.06340.pdf,Faces as Lighting Probes via Unsupervised Deep Highlight Extraction,2018 +35,United States,MsCeleb,msceleb,42.3614256,-71.0812092,Microsoft Research Asia,company,b301fd2fc33f24d6f75224e7c0991f4f04b64a65,citation,https://arxiv.org/pdf/1803.06340.pdf,Faces as Lighting Probes via Unsupervised Deep Highlight Extraction,2018 +36,United Kingdom,MsCeleb,msceleb,51.7534538,-1.25400997,University of Oxford,edu,70c59dc3470ae867016f6ab0e008ac8ba03774a1,citation,https://arxiv.org/pdf/1710.08092.pdf,VGGFace2: A Dataset for Recognising Faces across Pose and Age,2018 +37,China,MsCeleb,msceleb,39.9041999,116.4073963,"Beijing, China",edu,7fa4e972da46735971aad52413d17c4014c49e6e,citation,https://arxiv.org/pdf/1709.02940.pdf,How to Train Triplet Networks with 100K Identities?,2017 +38,China,MsCeleb,msceleb,39.94976005,116.33629046,Beijing Jiaotong University,edu,d7cbedbee06293e78661335c7dd9059c70143a28,citation,https://arxiv.org/pdf/1804.07573.pdf,MobileFaceNets: Efficient CNNs for Accurate Real-time Face Verification on Mobile Devices,2018 +39,Singapore,MsCeleb,msceleb,1.2962018,103.77689944,National University of Singapore,edu,fca9ebaa30d69ccec8bb577c31693c936c869e72,citation,https://arxiv.org/pdf/1809.00338.pdf,Look Across Elapse: Disentangled Representation Learning and Photorealistic Cross-Age Face Synthesis for Age-Invariant Face Recognition,2018 +40,China,MsCeleb,msceleb,40.0044795,116.370238,Chinese Academy of Sciences,edu,fca9ebaa30d69ccec8bb577c31693c936c869e72,citation,https://arxiv.org/pdf/1809.00338.pdf,Look Across Elapse: Disentangled Representation Learning and Photorealistic Cross-Age Face Synthesis for Age-Invariant Face Recognition,2018 +41,Japan,MsCeleb,msceleb,35.6992503,139.7721568,"Hitachi, Ltd., Tokyo, Japan",company,3b4da93fbdf7ae520fa00d39ffa694e850b85162,citation,,Face-Voice Matching using Cross-modal Embeddings,2018 +42,China,MsCeleb,msceleb,30.19331415,120.11930822,Zhejiang University,edu,85860d38c66a5cf2e6ffd6475a3a2ba096ea2920,citation,,Celeb-500K: A Large Training Dataset for Face Recognition,2018 +43,China,MsCeleb,msceleb,22.4162632,114.2109318,Chinese University of Hong Kong,edu,6fed504da4e192fe4c2d452754d23d3db4a4e5e3,citation,https://arxiv.org/pdf/1702.06890.pdf,Learning Deep Features via Congenerous Cosine Loss for Person Recognition,2017 +44,China,MsCeleb,msceleb,40.0044795,116.370238,Chinese Academy of Sciences,edu,6f5309d8cc76d3d300b72745887addd2a2480ba8,citation,,KinNet: Fine-to-Coarse Deep Metric Learning for Kinship Verification,2017 +45,China,MsCeleb,msceleb,40.00229045,116.32098908,Tsinghua University,edu,09ad80c4e80e1e02afb8fa4cb6dab260fb66df53,citation,,Feature Learning for One-Shot Face Recognition,2018 +46,United States,MsCeleb,msceleb,40.4441619,-79.94272826,Carnegie Mellon University,edu,c71217b2b111a51a31cf1107c71d250348d1ff68,citation,https://arxiv.org/pdf/1703.09912.pdf,One Network to Solve Them All — Solving Linear Inverse Problems Using Deep Projection Models,2017 +47,United Kingdom,MsCeleb,msceleb,51.7534538,-1.25400997,University of Oxford,edu,05ee231749c9ce97f036c71c1d2d599d660a8c81,citation,https://arxiv.org/pdf/1810.09951.pdf,GhostVLAD for set-based face recognition,2018 +48,United States,MsCeleb,msceleb,45.57022705,-122.63709346,Concordia University,edu,db374308655256da1479c272582d7c7139c97173,citation,https://arxiv.org/pdf/1811.11080.pdf,MobiFace: A Lightweight Deep Learning Face Recognition on Mobile Devices,2018 +49,United States,MsCeleb,msceleb,33.5866784,-101.87539204,Electrical and Computer Engineering,edu,db374308655256da1479c272582d7c7139c97173,citation,https://arxiv.org/pdf/1811.11080.pdf,MobiFace: A Lightweight Deep Learning Face Recognition on Mobile Devices,2018 +50,United States,MsCeleb,msceleb,36.0678324,-94.1736551,University of Arkansas,edu,db374308655256da1479c272582d7c7139c97173,citation,https://arxiv.org/pdf/1811.11080.pdf,MobiFace: A Lightweight Deep Learning Face Recognition on Mobile Devices,2018 +51,China,MsCeleb,msceleb,22.4162632,114.2109318,Chinese University of Hong Kong,edu,de7d36173f9ca0e89e7a1991d541aed7c65127ea,citation,https://arxiv.org/pdf/1812.01288.pdf,FaceFeat-GAN: a Two-Stage Approach for Identity-Preserving Face Synthesis,2018 +52,China,MsCeleb,msceleb,22.59805605,113.98533784,Shenzhen Institutes of Advanced Technology,edu,de7d36173f9ca0e89e7a1991d541aed7c65127ea,citation,https://arxiv.org/pdf/1812.01288.pdf,FaceFeat-GAN: a Two-Stage Approach for Identity-Preserving Face Synthesis,2018 +53,China,MsCeleb,msceleb,40.0044795,116.370238,Chinese Academy of Sciences,edu,212608e00fc1e8912ff845ee7a4a67f88ba938fc,citation,https://arxiv.org/pdf/1704.02450.pdf,Coupled Deep Learning for Heterogeneous Face Recognition,2018 +54,China,MsCeleb,msceleb,22.4162632,114.2109318,Chinese University of Hong Kong,edu,1fd5d08394a3278ef0a89639e9bfec7cb482e0bf,citation,https://arxiv.org/pdf/1804.03487.pdf,Exploring Disentangled Feature Representation Beyond Face Identification,2018 +55,China,MsCeleb,msceleb,39.993008,116.329882,SenseTime,company,1fd5d08394a3278ef0a89639e9bfec7cb482e0bf,citation,https://arxiv.org/pdf/1804.03487.pdf,Exploring Disentangled Feature Representation Beyond Face Identification,2018 +56,United States,MsCeleb,msceleb,40.8722825,-73.89489171,City University of New York,edu,f74917fc0e55f4f5682909dcf6929abd19d33e2e,citation,https://pdfs.semanticscholar.org/f749/17fc0e55f4f5682909dcf6929abd19d33e2e.pdf,GAN Q UALITY I NDEX ( GQI ) BY GAN-INDUCED C LASSIFIER,2018 +57,United States,MsCeleb,msceleb,42.3383668,-71.08793524,Northeastern University,edu,f74917fc0e55f4f5682909dcf6929abd19d33e2e,citation,https://pdfs.semanticscholar.org/f749/17fc0e55f4f5682909dcf6929abd19d33e2e.pdf,GAN Q UALITY I NDEX ( GQI ) BY GAN-INDUCED C LASSIFIER,2018 +58,United States,MsCeleb,msceleb,47.6423318,-122.1369302,Microsoft,company,f74917fc0e55f4f5682909dcf6929abd19d33e2e,citation,https://pdfs.semanticscholar.org/f749/17fc0e55f4f5682909dcf6929abd19d33e2e.pdf,GAN Q UALITY I NDEX ( GQI ) BY GAN-INDUCED C LASSIFIER,2018 +59,China,MsCeleb,msceleb,32.0565957,118.77408833,Nanjing University,edu,8ff8c64288a2f7e4e8bf8fda865820b04ab3dbe8,citation,https://pdfs.semanticscholar.org/0056/92b9fa6728df3a7f14578c43410867bba425.pdf,Age Estimation Using Expectation of Label Distribution Learning,2018 +60,China,MsCeleb,msceleb,32.0575279,118.78682252,Southeast University,edu,8ff8c64288a2f7e4e8bf8fda865820b04ab3dbe8,citation,https://pdfs.semanticscholar.org/0056/92b9fa6728df3a7f14578c43410867bba425.pdf,Age Estimation Using Expectation of Label Distribution Learning,2018 +61,United States,MsCeleb,msceleb,42.4505507,-76.4783513,Cornell University,edu,dec0c26855da90876c405e9fd42830c3051c2f5f,citation,https://pdfs.semanticscholar.org/dec0/c26855da90876c405e9fd42830c3051c2f5f.pdf,Supplementary Material : Learning Compositional Visual Concepts with Mutual Consistency,2018 +62,France,MsCeleb,msceleb,48.8476037,2.2639934,"Université Paris-Saclay, France",edu,96e318f8ff91ba0b10348d4de4cb7c2142eb8ba9,citation,,State-of-the-art face recognition performance using publicly available software and datasets,2018 +63,United States,MsCeleb,msceleb,29.7207902,-95.34406271,University of Houston,edu,38d8ff137ff753f04689e6b76119a44588e143f3,citation,https://arxiv.org/pdf/1709.06532.pdf,When 3D-Aided 2D Face Recognition Meets Deep Learning: An extended UR2D for Pose-Invariant Face Recognition,2017 +64,United States,MsCeleb,msceleb,38.0333742,-84.5017758,University of Kentucky,edu,455a7e03a0c5ab618d0e86a06c9910ac179f0479,citation,https://arxiv.org/pdf/1807.08772.pdf,Identity Preserving Face Completion for Large Ocular Region Occlusion,2018 +65,United States,MsCeleb,msceleb,34.0224149,-118.28634407,University of Southern California,edu,455a7e03a0c5ab618d0e86a06c9910ac179f0479,citation,https://arxiv.org/pdf/1807.08772.pdf,Identity Preserving Face Completion for Large Ocular Region Occlusion,2018 +66,China,MsCeleb,msceleb,45.7413921,126.62552755,Harbin Institute of Technology,edu,455a7e03a0c5ab618d0e86a06c9910ac179f0479,citation,https://arxiv.org/pdf/1807.08772.pdf,Identity Preserving Face Completion for Large Ocular Region Occlusion,2018 +67,China,MsCeleb,msceleb,30.289532,120.009886,Hangzhou Normal University,edu,455a7e03a0c5ab618d0e86a06c9910ac179f0479,citation,https://arxiv.org/pdf/1807.08772.pdf,Identity Preserving Face Completion for Large Ocular Region Occlusion,2018 +68,United Kingdom,MsCeleb,msceleb,51.49887085,-0.17560797,Imperial College London,edu,51992fa881541ca3a4520c1ff9100b83e2f1ad87,citation,https://arxiv.org/pdf/1801.07698.pdf,ArcFace: Additive Angular Margin Loss for Deep Face Recognition,2018 +69,United States,MsCeleb,msceleb,30.40550035,-91.18620474,Louisiana State University,edu,5b9c6ca84268cb283941ae28b73989c0cf7e2ac2,citation,,A Pipeline to Improve Face Recognition Datasets and Applications,2018 +70,Italy,MsCeleb,msceleb,45.814548,8.827665,University of Insubria,edu,5b9c6ca84268cb283941ae28b73989c0cf7e2ac2,citation,,A Pipeline to Improve Face Recognition Datasets and Applications,2018 +71,United States,MsCeleb,msceleb,42.3383668,-71.08793524,Northeastern University,edu,c9efcd8e32dced6efa2bba64789df8d0a8e4996a,citation,,Deep Convolutional Neural Network with Independent Softmax for Large Scale Face Recognition,2016 +72,United Kingdom,MsCeleb,msceleb,51.49887085,-0.17560797,Imperial College London,edu,9b0489f2d5739213ef8c3e2e18739c4353c3a3b7,citation,https://arxiv.org/pdf/1801.06665.pdf,Visual Data Augmentation through Learning,2018 +73,United Kingdom,MsCeleb,msceleb,51.59029705,-0.22963221,Middlesex University,edu,9b0489f2d5739213ef8c3e2e18739c4353c3a3b7,citation,https://arxiv.org/pdf/1801.06665.pdf,Visual Data Augmentation through Learning,2018 +74,United States,MsCeleb,msceleb,42.718568,-84.47791571,Michigan State University,edu,ad2cb5c255e555d9767d526721a4c7053fa2ac58,citation,https://arxiv.org/pdf/1711.03990.pdf,Longitudinal Study of Child Face Recognition,2018 +75,China,MsCeleb,msceleb,22.4162632,114.2109318,Chinese University of Hong Kong,edu,9e182e0cd9d70f876f1be7652c69373bcdf37fb4,citation,https://arxiv.org/pdf/1807.07860.pdf,Talking Face Generation by Adversarially Disentangled Audio-Visual Representation,2018 +76,United States,MsCeleb,msceleb,38.99203005,-76.9461029,University of Maryland College Park,edu,06bd34951305d9f36eb29cf4532b25272da0e677,citation,https://arxiv.org/pdf/1809.07586.pdf,"A Fast and Accurate System for Face Detection, Identification, and Verification",2018 +77,China,MsCeleb,msceleb,40.0044795,116.370238,Chinese Academy of Sciences,edu,94f74c6314ffd02db581e8e887b5fd81ce288dbf,citation,https://arxiv.org/pdf/1511.02683.pdf,A Light CNN for Deep Face Representation With Noisy Labels,2018 +78,Spain,MsCeleb,msceleb,40.4167754,-3.7037902,"Computer Vision Group (www.vision4uav.com), Centro de Automática y Robótica (CAR) UPM-CSIC, Universidad Politécnica de Madrid, José Gutiérrez Abascal 2, 28006, Spain",edu,726f76f11e904d7fcb12736c276a0b00eb5cde49,citation,https://arxiv.org/pdf/1901.05903.pdf,A Performance Comparison of Loss Functions for Deep Face Recognition,2019 +79,India,MsCeleb,msceleb,13.5568171,80.0261283,"Indian Institute of Information Technology, Sri City, India",edu,726f76f11e904d7fcb12736c276a0b00eb5cde49,citation,https://arxiv.org/pdf/1901.05903.pdf,A Performance Comparison of Loss Functions for Deep Face Recognition,2019 +80,United States,MsCeleb,msceleb,38.99203005,-76.9461029,University of Maryland College Park,edu,83447d47bb2837b831b982ebf9e63616742bfdec,citation,https://arxiv.org/pdf/1812.04058.pdf,An Automatic System for Unconstrained Video-Based Face Recognition,2018 +81,United States,MsCeleb,msceleb,38.99203005,-76.9461029,University of Maryland College Park,edu,7323b594d3a8508f809e276aa2d224c4e7ec5a80,citation,https://arxiv.org/pdf/1808.05508.pdf,An Experimental Evaluation of Covariates Effects on Unconstrained Face Verification,2018 +82,United States,MsCeleb,msceleb,43.7192587,10.4207947,"CNR ISTI-Institute of Information Science and Technologies, Pisa, Italy",edu,266766818dbc5a4ca1161ae2bc14c9e269ddc490,citation,https://pdfs.semanticscholar.org/2667/66818dbc5a4ca1161ae2bc14c9e269ddc490.pdf,Boosting a Low-Cost Smart Home Environment with Usage and Access Control Rules,2018 +83,United States,MsCeleb,msceleb,38.99203005,-76.9461029,University of Maryland College Park,edu,944ea33211d67663e04d0181843db634e42cb2ca,citation,https://arxiv.org/pdf/1804.01159.pdf,Crystal Loss and Quality Pooling for Unconstrained Face Verification and Recognition.,2018 +84,Taiwan,MsCeleb,msceleb,25.01682835,121.53846924,National Taiwan University,edu,f15b7c317f106816bf444ac4ffb6c280cd6392c7,citation,http://openaccess.thecvf.com/content_cvpr_2018_workshops/papers/w1/Zhang_Deep_Disguised_Faces_CVPR_2018_paper.pdf,Deep Disguised Faces Recognition,2018 +85,United States,MsCeleb,msceleb,38.99203005,-76.9461029,University of Maryland College Park,edu,a50fa5048c61209149de0711b5f1b1806b43da00,citation,http://openaccess.thecvf.com/content_cvpr_2018_workshops/papers/w1/Bansal_Deep_Features_for_CVPR_2018_paper.pdf,Deep Features for Recognizing Disguised Faces in the Wild,2018 +86,China,MsCeleb,msceleb,40.00229045,116.32098908,Tsinghua University,edu,19d53bb35baf6ab02368756412800c218a2df71c,citation,https://arxiv.org/pdf/1711.09515.pdf,DeepDeblur: Fast one-step blurry face images restoration.,2017 +87,United States,MsCeleb,msceleb,42.718568,-84.47791571,Michigan State University,edu,12ba7c6f559a69fbfaacf61bfb2f8431505b09a0,citation,https://arxiv.org/pdf/1809.05620.pdf,DocFace+: ID Document to Selfie Matching,2018 +88,South Korea,MsCeleb,msceleb,37.5600406,126.9369248,Yonsei University,edu,d8526863f35b29cbf8ac2ae756eaae0d2930ffb1,citation,http://openaccess.thecvf.com/content_ICCV_2017_workshops/papers/w27/Choe_Face_Generation_for_ICCV_2017_paper.pdf,Face Generation for Low-Shot Learning Using Generative Adversarial Networks,2017 +89,China,MsCeleb,msceleb,38.880381,121.529021,Dailian University of Technology,edu,59fc69b3bc4759eef1347161e1248e886702f8f7,citation,https://pdfs.semanticscholar.org/59fc/69b3bc4759eef1347161e1248e886702f8f7.pdf,Final Report of Final Year Project HKU-Face : A Large Scale Dataset for Deep Face Recognition,2018 +90,Germany,MsCeleb,msceleb,52.381515,9.720171,"Leibniz Information Centre for Science and Technology, Hannover, Germany",edu,5209758096819efee15751c8875121bd27f2ee78,citation,https://arxiv.org/pdf/1806.08246.pdf,Finding Person Relations in Image Data of the Internet Archive,2018 +91,Germany,MsCeleb,msceleb,52.381515,9.720171,Leibniz Universität Hannover,edu,5209758096819efee15751c8875121bd27f2ee78,citation,https://arxiv.org/pdf/1806.08246.pdf,Finding Person Relations in Image Data of the Internet Archive,2018 +92,China,MsCeleb,msceleb,35.86166,104.195397,"Megvii Inc. (Face++), China",company,4874daed0f6a42d03011ed86e5ab46f231b02c13,citation,https://arxiv.org/pdf/1808.06210.pdf,GridFace: Face Rectification via Learning Local Homography Transformations,2018 +93,China,MsCeleb,msceleb,40.0044795,116.370238,Chinese Academy of Sciences,edu,a89cbc90bbb4477a48aec185f2a112ea7ebe9b4d,citation,http://openaccess.thecvf.com/content_ICCV_2017_workshops/papers/w27/Xu_High_Performance_Large_ICCV_2017_paper.pdf,High Performance Large Scale Face Recognition with Multi-cognition Softmax and Feature Retrieval,2017 +94,Singapore,MsCeleb,msceleb,1.2962018,103.77689944,National University of Singapore,edu,a89cbc90bbb4477a48aec185f2a112ea7ebe9b4d,citation,http://openaccess.thecvf.com/content_ICCV_2017_workshops/papers/w27/Xu_High_Performance_Large_ICCV_2017_paper.pdf,High Performance Large Scale Face Recognition with Multi-cognition Softmax and Feature Retrieval,2017 +95,Singapore,MsCeleb,msceleb,1.3392609,103.8916077,Panasonic Singapore,company,a89cbc90bbb4477a48aec185f2a112ea7ebe9b4d,citation,http://openaccess.thecvf.com/content_ICCV_2017_workshops/papers/w27/Xu_High_Performance_Large_ICCV_2017_paper.pdf,High Performance Large Scale Face Recognition with Multi-cognition Softmax and Feature Retrieval,2017 +96,United States,MsCeleb,msceleb,40.8722825,-73.89489171,City University of New York,edu,32aeb90992f6cf8494b1b5c67f4b912feef05e9c,citation,https://arxiv.org/pdf/1802.00853.pdf,Incremental Classifier Learning with Generative Adversarial Networks,2018 +97,United States,MsCeleb,msceleb,47.6423318,-122.1369302,Microsoft,company,32aeb90992f6cf8494b1b5c67f4b912feef05e9c,citation,https://arxiv.org/pdf/1802.00853.pdf,Incremental Classifier Learning with Generative Adversarial Networks,2018 +98,United States,MsCeleb,msceleb,42.3383668,-71.08793524,Northeastern University,edu,32aeb90992f6cf8494b1b5c67f4b912feef05e9c,citation,https://arxiv.org/pdf/1802.00853.pdf,Incremental Classifier Learning with Generative Adversarial Networks,2018 +99,Singapore,MsCeleb,msceleb,1.2962018,103.77689944,National University of Singapore,edu,c808c784237f167c78a87cc5a9d48152579c27a4,citation,http://openaccess.thecvf.com/content_ICCV_2017_workshops/papers/w27/Cheng_Know_You_at_ICCV_2017_paper.pdf,Know You at One Glance: A Compact Vector Representation for Low-Shot Learning,2017 +100,Singapore,MsCeleb,msceleb,1.3392609,103.8916077,Panasonic Singapore,company,c808c784237f167c78a87cc5a9d48152579c27a4,citation,http://openaccess.thecvf.com/content_ICCV_2017_workshops/papers/w27/Cheng_Know_You_at_ICCV_2017_paper.pdf,Know You at One Glance: A Compact Vector Representation for Low-Shot Learning,2017 +101,United States,MsCeleb,msceleb,42.3383668,-71.08793524,Northeastern University,edu,332548fd2e52b27e062bd6dcc1db0953ced6ed48,citation,http://openaccess.thecvf.com/content_ICCV_2017_workshops/papers/w27/Wu_Low-Shot_Face_Recognition_ICCV_2017_paper.pdf,Low-Shot Face Recognition with Hybrid Classifiers,2017 +102,United States,MsCeleb,msceleb,40.4441619,-79.94272826,Carnegie Mellon University,edu,98b2f21db344b8b9f7747feaf86f92558595990c,citation,https://pdfs.semanticscholar.org/98b2/f21db344b8b9f7747feaf86f92558595990c.pdf,PACES OF G ENERATIVE A DVERSARIAL N ETWORKS,2018 +103,United States,MsCeleb,msceleb,37.43131385,-122.16936535,Stanford University,edu,98b2f21db344b8b9f7747feaf86f92558595990c,citation,https://pdfs.semanticscholar.org/98b2/f21db344b8b9f7747feaf86f92558595990c.pdf,PACES OF G ENERATIVE A DVERSARIAL N ETWORKS,2018 +104,United States,MsCeleb,msceleb,32.87935255,-117.23110049,"University of California, San Diego",edu,98b2f21db344b8b9f7747feaf86f92558595990c,citation,https://pdfs.semanticscholar.org/98b2/f21db344b8b9f7747feaf86f92558595990c.pdf,PACES OF G ENERATIVE A DVERSARIAL N ETWORKS,2018 +105,China,MsCeleb,msceleb,22.5283157,113.94481,Shenzhen Institute of Wuhan University,edu,e13360cda1ebd6fa5c3f3386c0862f292e4dbee4,citation,https://arxiv.org/pdf/1611.08976.pdf,Range Loss for Deep Face Recognition with Long-Tailed Training Data,2016 +106,Australia,MsCeleb,msceleb,-33.8832376,151.2004942,Southern University of Science and Technology,edu,e13360cda1ebd6fa5c3f3386c0862f292e4dbee4,citation,https://arxiv.org/pdf/1611.08976.pdf,Range Loss for Deep Face Recognition with Long-Tailed Training Data,2016 +107,China,MsCeleb,msceleb,36.20304395,117.05842113,Tianjin University,edu,e13360cda1ebd6fa5c3f3386c0862f292e4dbee4,citation,https://arxiv.org/pdf/1611.08976.pdf,Range Loss for Deep Face Recognition with Long-Tailed Training Data,2016 +108,United Kingdom,MsCeleb,msceleb,51.49887085,-0.17560797,Imperial College London,edu,b26d5d929cc3c0d14da058961ddd024f4c9690f5,citation,https://arxiv.org/pdf/1805.08657.pdf,Robust Conditional Generative Adversarial Networks,2018 +109,France,MsCeleb,msceleb,46.1464423,-1.1570872,La Rochelle University,edu,5c54e0f46330787c4fac48aecced9a8f8e37658a,citation,http://openaccess.thecvf.com/content_ICCV_2017_workshops/papers/w23/Ming_Simple_Triplet_Loss_ICCV_2017_paper.pdf,Simple Triplet Loss Based on Intra/Inter-Class Metric Learning for Face Verification,2017 +110,China,MsCeleb,msceleb,39.9922379,116.30393816,Peking University,edu,4f0b641860d90dfa4c185670bf636149a2b2b717,citation,,Improve Cross-Domain Face Recognition with IBN-block,2018 +111,China,MsCeleb,msceleb,31.83907195,117.26420748,University of Science and Technology of China,edu,c5b324f7f9abdffc1be83f640674beda81b74315,citation,,Towards Open-Set Identity Preserving Face Synthesis,2018 +112,Italy,MsCeleb,msceleb,44.6451046,10.9279268,University of Modena and Reggio Emilia,edu,ff44d8938c52cfdca48c80f8e1618bbcbf91cb2a,citation,https://pdfs.semanticscholar.org/ff44/d8938c52cfdca48c80f8e1618bbcbf91cb2a.pdf,Towards Video Captioning with Naming: A Novel Dataset and a Multi-modal Approach,2017 +113,France,MsCeleb,msceleb,45.7833631,4.76877036,Ecole Centrale de Lyon,edu,727d03100d4a8e12620acd7b1d1972bbee54f0e6,citation,https://arxiv.org/pdf/1706.04264.pdf,von Mises-Fisher Mixture Model-based Deep learning: Application to Face Verification,2017 +114,France,MsCeleb,msceleb,48.832493,2.267474,Safran Identity and Security,company,727d03100d4a8e12620acd7b1d1972bbee54f0e6,citation,https://arxiv.org/pdf/1706.04264.pdf,von Mises-Fisher Mixture Model-based Deep learning: Application to Face Verification,2017 +115,China,MsCeleb,msceleb,39.980196,116.333305,"CASIA, Center for Research on Intelligent Perception and Computing, Beijing, 100190, China",edu,3ac09c2589178dac0b6a2ea2edf04b7629672d81,citation,https://arxiv.org/pdf/1708.02412.pdf,Wasserstein CNN: Learning Invariant Features for NIR-VIS Face Recognition,2018 +116,China,MsCeleb,msceleb,39.979203,116.33287,"CASIA, National Laboratory of Pattern Recognition",edu,3ac09c2589178dac0b6a2ea2edf04b7629672d81,citation,https://arxiv.org/pdf/1708.02412.pdf,Wasserstein CNN: Learning Invariant Features for NIR-VIS Face Recognition,2018 +117,China,MsCeleb,msceleb,40.0044795,116.370238,Chinese Academy of Sciences,edu,3ac09c2589178dac0b6a2ea2edf04b7629672d81,citation,https://arxiv.org/pdf/1708.02412.pdf,Wasserstein CNN: Learning Invariant Features for NIR-VIS Face Recognition,2018 +118,United States,MsCeleb,msceleb,38.99203005,-76.9461029,University of Maryland College Park,edu,b35ff9985aaee9371588330bcef0dfc88d1401d7,citation,,Deep Density Clustering of Unconstrained Faces,2018 +119,United States,MsCeleb,msceleb,30.6108365,-96.352128,Texas A&M University,edu,e36fdb50844132fc7925550398e68e7ae95467de,citation,,Face Verification with Disguise Variations via Deep Disguise Recognizer,2018 +120,United States,MsCeleb,msceleb,39.65404635,-79.96475355,West Virginia University,edu,e36fdb50844132fc7925550398e68e7ae95467de,citation,,Face Verification with Disguise Variations via Deep Disguise Recognizer,2018 +121,United States,MsCeleb,msceleb,42.4505507,-76.4783513,Cornell University,edu,9ccf528ef8df99372ce6286ffbb0bf6f9a505cca,citation,,Learning Compositional Visual Concepts with Mutual Consistency,2018 +122,United States,MsCeleb,msceleb,40.3442079,-74.5924599,"Siemens Corporate Research, Princeton, NJ",edu,9ccf528ef8df99372ce6286ffbb0bf6f9a505cca,citation,,Learning Compositional Visual Concepts with Mutual Consistency,2018 +123,United States,MsCeleb,msceleb,42.3383668,-71.08793524,Northeastern University,edu,3827f1cab643a57e3cd22fbffbf19dd5e8a298a8,citation,,One-Shot Face Recognition via Generative Learning,2018 +124,China,MsCeleb,msceleb,39.9106327,116.3356321,Chinese Academy of Science,edu,20f87ed94a423b5d8599d85d1f2f80bab8902107,citation,,Pose-Guided Photorealistic Face Rotation,2018 +125,United States,MsCeleb,msceleb,40.4441619,-79.94272826,Carnegie Mellon University,edu,67a9659de0bf671fafccd7f39b7587f85fb6dfbd,citation,,Ring Loss: Convex Feature Normalization for Face Recognition,2018 diff --git a/site/datasets/verified/mug_faces.csv b/site/datasets/verified/mug_faces.csv new file mode 100644 index 00000000..0ad9226e --- /dev/null +++ b/site/datasets/verified/mug_faces.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,MUG Faces,mug_faces,0.0,0.0,,,,main,,The MUG facial expression database,2010 diff --git a/site/datasets/verified/names_and_faces.csv b/site/datasets/verified/names_and_faces.csv new file mode 100644 index 00000000..56f2a57a --- /dev/null +++ b/site/datasets/verified/names_and_faces.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,News Dataset,names_and_faces,0.0,0.0,,,,main,,Names and faces in the news,2004 diff --git a/site/datasets/verified/orl.csv b/site/datasets/verified/orl.csv new file mode 100644 index 00000000..b9d29530 --- /dev/null +++ b/site/datasets/verified/orl.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,ORL,orl,0.0,0.0,,,,main,,Parameterisation of a stochastic model for human face identification,1994 diff --git a/site/datasets/verified/oxford_town_centre.csv b/site/datasets/verified/oxford_town_centre.csv new file mode 100644 index 00000000..8fb0f336 --- /dev/null +++ b/site/datasets/verified/oxford_town_centre.csv @@ -0,0 +1,114 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,TownCentre,oxford_town_centre,0.0,0.0,,,,main,,Stable multi-target tracking in real-time surveillance video,2011 +1,United States,TownCentre,oxford_town_centre,40.4441619,-79.94272826,Carnegie Mellon University,edu,03ae36b2ed0215b15c5bc7d42fbe20b1491e551a,citation,http://vishnu.boddeti.net/papers/cvpr-2015-abstract.pdf,Learning scene-specific pedestrian detectors without real data,2015 +2,United States,TownCentre,oxford_town_centre,37.3675905,-121.9133491,Sony,company,03ae36b2ed0215b15c5bc7d42fbe20b1491e551a,citation,http://vishnu.boddeti.net/papers/cvpr-2015-abstract.pdf,Learning scene-specific pedestrian detectors without real data,2015 +3,United States,TownCentre,oxford_town_centre,42.3504253,-71.10056114,Boston University,edu,9363bf52a5bb2ac94bf247ca56e7cf55fb29ee4e,citation,http://cs-www.bu.edu/groups/ivc/software/TrackerHierarchy/AVSS2012_TrackerHierarchy.pdf,Online Multi-person Tracking by Tracker Hierarchy,2012 +4,United States,TownCentre,oxford_town_centre,28.59899755,-81.19712501,University of Central Florida,edu,80b41fb824f3751b03017bf7ec8c5f71b7e214b2,citation,http://crcv-web.eecs.ucf.edu/papers/cvpr2013/CVPR2013_Yang_FinalVersion_HumanDetection.pdf,Semi-supervised Learning of Feature Hierarchies for Object Detection in a Video,2013 +5,United States,TownCentre,oxford_town_centre,39.2899685,-76.62196103,University of Maryland,edu,2b9410889dc6870cc6e0476dbc681049b28ccacb,citation,http://drum.lib.umd.edu/bitstream/1903/13339/1/CS-TR-5018.pdf,Learning to Detect Carried Objects with Minimal Supervision,2013 +6,United States,TownCentre,oxford_town_centre,28.59899755,-81.19712501,University of Central Florida,edu,5369b021f2abf5daa77fa5602569bb3b8bb18546,citation,http://crcv-web.eecs.ucf.edu/papers/cvpr2015/AfshinDehghan_GMMCP_CVPR15.pdf,GMMCP tracker: Globally optimal Generalized Maximum Multi Clique problem for multiple object tracking,2015 +7,United States,TownCentre,oxford_town_centre,28.59899755,-81.19712501,University of Central Florida,edu,076fd6fd85b93858155a1c775f1897f83d52b4c2,citation,http://crcv-web.eecs.ucf.edu/papers/cvpr2013/CVPR13_final_guang.pdf,Improving an Object Detector and Extracting Regions Using Superpixels,2013 +8,United Kingdom,TownCentre,oxford_town_centre,55.91029135,-3.32345777,Heriot-Watt University,edu,b02581323ad03125e9b18d74ba0c1909d6485dda,citation,https://pure.qub.ac.uk/portal/files/57462725/Anomaly1_s2.0_S0167865513004625_main.pdf,Contextual anomaly detection in crowded surveillance scenes,2014 +9,United Kingdom,TownCentre,oxford_town_centre,51.7534538,-1.25400997,University of Oxford,edu,184c3e66a746376716d5e816d95e1a7cb8e04390,citation,http://ben.benfold.com/docs/benfold_reid_iccv2011-poster.pdf,Unsupervised learning of a scene-specific coarse gaze estimator,2011 +10,United Kingdom,TownCentre,oxford_town_centre,51.7520209,-1.2577263,"Oxford, UK",edu,184c3e66a746376716d5e816d95e1a7cb8e04390,citation,http://ben.benfold.com/docs/benfold_reid_iccv2011-poster.pdf,Unsupervised learning of a scene-specific coarse gaze estimator,2011 +11,Israel,TownCentre,oxford_town_centre,31.262218,34.801461,Ben-Gurion University,edu,880e232f260b0f9d649a4e6408b1cf82f270bd6d,citation,http://www.cs.bgu.ac.il/~ben-shahar/Publications/2013-Ben_Ari_and_Ben_Shahar-A_Computationally_Efficient_Tracker_with_Direct_Appearance-Kinematic_Measure_and_Adaptive%20Kalman_Filter.pdf,A computationally efficient tracker with direct appearance-kinematic measure and adaptive Kalman filter,2013 +12,Israel,TownCentre,oxford_town_centre,31.8878767,34.7359885,"Orbotech Ltd., Yavne, Israel",company,880e232f260b0f9d649a4e6408b1cf82f270bd6d,citation,http://www.cs.bgu.ac.il/~ben-shahar/Publications/2013-Ben_Ari_and_Ben_Shahar-A_Computationally_Efficient_Tracker_with_Direct_Appearance-Kinematic_Measure_and_Adaptive%20Kalman_Filter.pdf,A computationally efficient tracker with direct appearance-kinematic measure and adaptive Kalman filter,2013 +13,Germany,TownCentre,oxford_town_centre,52.381515,9.720171,Leibniz Universität Hannover,edu,3e0db33884ca8c756b26dc0df85c498c18d5f2ec,citation,http://is.tuebingen.mpg.de/uploads_file/attachment/attachment/137/LeaPonRos11SocialLP.pdf,Exploiting pedestrian interaction via global optimization and social behaviors,2011 +14,United States,TownCentre,oxford_town_centre,33.5866784,-101.87539204,Electrical and Computer Engineering,edu,b28eb219db9370cf20063288225cc2f3e6e5f984,citation,http://faculty.ucmerced.edu/mhyang/papers/iccv15_pose.pdf,Fast and Accurate Head Pose Estimation via Random Projection Forests,2015 +15,United States,TownCentre,oxford_town_centre,37.3641651,-120.4254615,University of California at Merced,edu,b28eb219db9370cf20063288225cc2f3e6e5f984,citation,http://faculty.ucmerced.edu/mhyang/papers/iccv15_pose.pdf,Fast and Accurate Head Pose Estimation via Random Projection Forests,2015 +16,Austria,TownCentre,oxford_town_centre,47.05821,15.46019568,Graz University of Technology,edu,356ec17af375b63a015d590562381a62f352f7d5,citation,http://lrs.icg.tugraz.at/pubs/possegger_cvpr14.pdf,Occlusion Geodesics for Online Multi-object Tracking,2014 +17,United States,TownCentre,oxford_town_centre,45.57022705,-122.63709346,Concordia University,edu,b53289f3f3b17dad91fa4fd25d09fdbc14f8c8cc,citation,http://faculty.ucmerced.edu/mhyang/papers/cviu16_MOT.pdf,Online multi-object tracking via robust collaborative model and sample selection,2017 +18,United States,TownCentre,oxford_town_centre,37.8718992,-122.2585399,University of California,edu,b53289f3f3b17dad91fa4fd25d09fdbc14f8c8cc,citation,http://faculty.ucmerced.edu/mhyang/papers/cviu16_MOT.pdf,Online multi-object tracking via robust collaborative model and sample selection,2017 +19,United States,TownCentre,oxford_town_centre,28.59899755,-81.19712501,University of Central Florida,edu,920246280e7e70900762ddfa7c41a79ec4517350,citation,http://crcv-web.eecs.ucf.edu/papers/eccv2012/MPMPT-ECCV12.pdf,(MP) 2 T: multiple people multiple parts tracker,2012 +20,United States,TownCentre,oxford_town_centre,37.8718992,-122.2585399,University of California,edu,14d5bd23667db4413a7f362565be21d462d3fc93,citation,http://alumni.cs.ucr.edu/~zqin001/cvpr2014.pdf,An Online Learned Elementary Grouping Model for Multi-target Tracking,2014 +21,Germany,TownCentre,oxford_town_centre,52.381515,9.720171,Leibniz Universität Hannover,edu,9070045c1a9564a5f25b42f3facc7edf4c302483,citation,http://virtualhumans.mpi-inf.mpg.de/papers/lealPonsmollICCVW2011/lealPonsmollICCVW2011.pdf,Everybody needs somebody: Modeling social and grouping behavior on a linear programming multiple people tracker,2011 +22,Singapore,TownCentre,oxford_town_centre,1.3484104,103.68297965,Nanyang Technological University,edu,2323cb559c9e18673db836ffc283c27e4a002ed9,citation,http://arxiv.org/pdf/1605.04502v1.pdf,Joint Learning of Convolutional Neural Networks and Temporally Constrained Metrics for Tracklet Association,2016 +23,China,TownCentre,oxford_town_centre,39.905838,116.375516,"Huawei Technologies, Beijing, China",company,434627a03d4433b0df03058724524c3ac1c07478,citation,http://jianghz.com/pubs/mtt_tip_final.pdf,Online Multi-Target Tracking With Unified Handling of Complex Scenarios,2015 +24,China,TownCentre,oxford_town_centre,34.250803,108.983693,Xi’an Jiaotong University,edu,434627a03d4433b0df03058724524c3ac1c07478,citation,http://jianghz.com/pubs/mtt_tip_final.pdf,Online Multi-Target Tracking With Unified Handling of Complex Scenarios,2015 +25,United States,TownCentre,oxford_town_centre,28.59899755,-81.19712501,University of Central Florida,edu,084352b63e98d3b3310521fb3bda8cb4a77a0254,citation,http://crcv.ucf.edu/papers/1439.pdf,Part-based multiple-person tracking with partial occlusion handling,2012 +26,United States,TownCentre,oxford_town_centre,39.5469449,-119.81346566,University of Nevada,edu,084352b63e98d3b3310521fb3bda8cb4a77a0254,citation,http://crcv.ucf.edu/papers/1439.pdf,Part-based multiple-person tracking with partial occlusion handling,2012 +27,United Kingdom,TownCentre,oxford_town_centre,55.7782474,-4.1040988,University of the West of Scotland,edu,32b9be86de4f82c5a43da2a1a0a892515da8910d,citation,http://users.informatik.haw-hamburg.de/~ubicomp/arbeiten/papers/ICISP2014.pdf,Robust False Positive Detection for Real-Time Multi-target Tracking,2014 +28,Italy,TownCentre,oxford_town_centre,43.7776426,11.259765,"Università degli Studi di Firenze, Firenze",edu,2914a20df10f3bb55c5d4764ece85101c1a3e5a8,citation,http://www.micc.unifi.it/seidenari/wp-content/papercite-data/pdf/icpr_16.pdf,User interest profiling using tracking-free coarse gaze estimation,2016 +29,United States,TownCentre,oxford_town_centre,40.4441619,-79.94272826,Carnegie Mellon University,edu,1f4fed0183048d9014e22a72fd50e1e5fbe0777c,citation,https://pdfs.semanticscholar.org/6b7b/1760ed23ef15ec210b2d6795fdf9ad36d0e2.pdf,A Game-Theoretic Approach to Multi-Pedestrian Activity Forecasting,2016 +30,United States,TownCentre,oxford_town_centre,37.43131385,-122.16936535,Stanford University,edu,1f4fed0183048d9014e22a72fd50e1e5fbe0777c,citation,https://pdfs.semanticscholar.org/6b7b/1760ed23ef15ec210b2d6795fdf9ad36d0e2.pdf,A Game-Theoretic Approach to Multi-Pedestrian Activity Forecasting,2016 +31,United States,TownCentre,oxford_town_centre,42.3354481,-71.16813864,Boston College,edu,869df5e8221129850e81e77d4dc36e6c0f854fe6,citation,https://arxiv.org/pdf/1601.03094.pdf,A metric for sets of trajectories that is practical and mathematically consistent,2016 +32,United States,TownCentre,oxford_town_centre,34.1579742,-118.2894729,Disney Research,company,d8bc2e2537cecbe6e751d4791837251a249cd06d,citation,http://www.cse.psu.edu/~rtc12/Papers/wacv2016CarrCollins.pdf,Assessing tracking performance in complex scenarios using mean time between failures,2016 +33,United States,TownCentre,oxford_town_centre,40.7982133,-77.8599084,The Pennsylvania State University,edu,d8bc2e2537cecbe6e751d4791837251a249cd06d,citation,http://www.cse.psu.edu/~rtc12/Papers/wacv2016CarrCollins.pdf,Assessing tracking performance in complex scenarios using mean time between failures,2016 +34,United States,TownCentre,oxford_town_centre,28.59899755,-81.19712501,University of Central Florida,edu,2dfba157e0b5db5becb99b3c412ac729cf3bb32d,citation,https://pdfs.semanticscholar.org/7fb2/f6ce372db950f26f9395721651d6c6aa7b76.pdf,Automatic Detection and Tracking of Pedestrians in Videos with Various Crowd Densities,2012 +35,India,TownCentre,oxford_town_centre,12.9914929,80.2336907,"IIT Madras, India",edu,37f2e03c7cbec9ffc35eac51578e7e8fdfee3d4e,citation,http://www.cse.iitm.ac.in/~amittal/wacv2015_review.pdf,Co-operative Pedestrians Group Tracking in Crowded Scenes Using an MST Approach,2015 +36,United Kingdom,TownCentre,oxford_town_centre,55.91029135,-3.32345777,Heriot-Watt University,edu,b8af24279c58a718091817236f878c805a7843e1,citation,https://pdfs.semanticscholar.org/b8af/24279c58a718091817236f878c805a7843e1.pdf,Context Aware Anomalous Behaviour Detection in Crowded Surveillance,2013 +37,Russia,TownCentre,oxford_town_centre,55.8067104,37.5416381,"Faculty of Computer Science, Moscow, Russia",edu,224547337e1ace6411a69c2e06ce538bc67923f7,citation,https://pdfs.semanticscholar.org/2245/47337e1ace6411a69c2e06ce538bc67923f7.pdf,Convolutional Neural Network for Camera Pose Estimation from Object Detections,2017 +38,Germany,TownCentre,oxford_town_centre,48.7468939,9.0805141,Max Planck Institute for Intelligent Systems,edu,b6d0e461535116a675a0354e7da65b2c1d2958d4,citation,https://arxiv.org/pdf/1805.03430.pdf,Deep Directional Statistics: Pose Estimation with Uncertainty Quantification,2018 +39,United States,TownCentre,oxford_town_centre,38.7768106,-94.9442982,Amazon,company,b6d0e461535116a675a0354e7da65b2c1d2958d4,citation,https://arxiv.org/pdf/1805.03430.pdf,Deep Directional Statistics: Pose Estimation with Uncertainty Quantification,2018 +40,United States,TownCentre,oxford_town_centre,47.6423318,-122.1369302,Microsoft,company,b6d0e461535116a675a0354e7da65b2c1d2958d4,citation,https://arxiv.org/pdf/1805.03430.pdf,Deep Directional Statistics: Pose Estimation with Uncertainty Quantification,2018 +41,United Kingdom,TownCentre,oxford_town_centre,55.91029135,-3.32345777,Heriot-Watt University,edu,70be5432677c0fbe000ac0c28dda351a950e0536,citation,http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2014/W14/papers/Leach_Detecting_Social_Groups_2014_CVPR_paper.pdf,Detecting Social Groups in Crowded Surveillance Videos Using Visual Attention,2014 +42,Switzerland,TownCentre,oxford_town_centre,47.376313,8.5476699,ETH Zurich,edu,9458642e7645bfd865911140ee8413e2f5f9fcd6,citation,https://pdfs.semanticscholar.org/9458/642e7645bfd865911140ee8413e2f5f9fcd6.pdf,Efficient Multiple People Tracking Using Minimum Cost Arborescences,2014 +43,United Kingdom,TownCentre,oxford_town_centre,54.6141723,-5.9002151,Queen's University Belfast,edu,2a7935706d43c01789d43a81a1d391418f220a0a,citation,https://pure.qub.ac.uk/portal/files/31960902/285.pdf,Enhancing Linear Programming with Motion Modeling for Multi-target Tracking,2015 +44,Sri Lanka,TownCentre,oxford_town_centre,6.7970862,79.9019094,University of Moratuwa,edu,b183914d0b16647a41f0bfd4af64bf94a83a2b14,citation,http://iwinlab.eng.usf.edu/papers/Extensible%20video%20surveillance%20software%20with%20simultaneous%20event%20detection%20for%20low%20and%20high%20density%20crowd%20analysis.pdf,Extensible video surveillance software with simultaneous event detection for low and high density crowd analysis,2014 +45,United States,TownCentre,oxford_town_centre,33.5866784,-101.87539204,Electrical and Computer Engineering,edu,fa5aca45965e312362d2d75a69312a0678fdf5d7,citation,https://pdfs.semanticscholar.org/fa5a/ca45965e312362d2d75a69312a0678fdf5d7.pdf,Fast and Accurate Head Pose Estimation via Random Projection Forests : Supplementary Material,2015 +46,United States,TownCentre,oxford_town_centre,37.3641651,-120.4254615,University of California at Merced,edu,fa5aca45965e312362d2d75a69312a0678fdf5d7,citation,https://pdfs.semanticscholar.org/fa5a/ca45965e312362d2d75a69312a0678fdf5d7.pdf,Fast and Accurate Head Pose Estimation via Random Projection Forests : Supplementary Material,2015 +47,Australia,TownCentre,oxford_town_centre,-32.8892352,151.6998983,"University of Newcastle, Australia",edu,2feb7c57d51df998aafa6f3017662263a91625b4,citation,https://pdfs.semanticscholar.org/d344/9eaaf392fd07b676e744410049f4095b4b5c.pdf,Feature Selection for Intelligent Transportation Systems,2014 +48,Germany,TownCentre,oxford_town_centre,49.01546,8.4257999,Fraunhofer,company,1f82eebadc3ffa41820ad1a0f53770247fc96dcd,citation,https://pdfs.semanticscholar.org/c5ac/81b17b8fcc028f375fbbd090b558ba9a437a.pdf,Using Trajectories derived by Dense Optical Flows as a Spatial Component in Background Subtraction,2016 +49,United States,TownCentre,oxford_town_centre,42.3583961,-71.09567788,MIT,edu,b18f94c5296a9cebe9e779d50d193fd180f78ed9,citation,https://arxiv.org/pdf/1604.01431.pdf,Forecasting Interactive Dynamics of Pedestrians with Fictitious Play,2017 +50,United Kingdom,TownCentre,oxford_town_centre,51.7520849,-1.2516646,Oxford University,edu,b18f94c5296a9cebe9e779d50d193fd180f78ed9,citation,https://arxiv.org/pdf/1604.01431.pdf,Forecasting Interactive Dynamics of Pedestrians with Fictitious Play,2017 +51,United States,TownCentre,oxford_town_centre,37.43131385,-122.16936535,Stanford University,edu,b18f94c5296a9cebe9e779d50d193fd180f78ed9,citation,https://arxiv.org/pdf/1604.01431.pdf,Forecasting Interactive Dynamics of Pedestrians with Fictitious Play,2017 +52,Netherlands,TownCentre,oxford_town_centre,52.3553655,4.9501644,University of Amsterdam,edu,687ec23addf5a1279e49cc46b78e3245af94ac7b,citation,https://pdfs.semanticscholar.org/687e/c23addf5a1279e49cc46b78e3245af94ac7b.pdf,UvA-DARE ( Digital Academic Repository ) Visual Tracking : An Experimental Survey Smeulders,2013 +53,Italy,TownCentre,oxford_town_centre,45.1847248,9.1582069,"Italian Institute of Technology, Genova, Italy",edu,5ab9f00a707a55f4955b378981ad425aa1cb8ea3,citation,https://arxiv.org/pdf/1901.02000.pdf,Forecasting People Trajectories and Head Poses by Jointly Reasoning on Tracklets and Vislets,2019 +54,Germany,TownCentre,oxford_town_centre,48.1820038,11.5978282,"OSRAM GmbH, Germany",company,5ab9f00a707a55f4955b378981ad425aa1cb8ea3,citation,https://arxiv.org/pdf/1901.02000.pdf,Forecasting People Trajectories and Head Poses by Jointly Reasoning on Tracklets and Vislets,2019 +55,Italy,TownCentre,oxford_town_centre,45.437398,11.003376,University of Verona,edu,5ab9f00a707a55f4955b378981ad425aa1cb8ea3,citation,https://arxiv.org/pdf/1901.02000.pdf,Forecasting People Trajectories and Head Poses by Jointly Reasoning on Tracklets and Vislets,2019 +56,United Kingdom,TownCentre,oxford_town_centre,51.7534538,-1.25400997,University of Oxford,edu,3ed9730e5ec8716e8cdf55f207ef973a9c854574,citation,https://arxiv.org/pdf/1612.05234.pdf,Visual Compiler: Synthesizing a Scene-Specific Pedestrian Detector and Pose Estimator,2016 +57,United States,TownCentre,oxford_town_centre,29.7207902,-95.34406271,University of Houston,edu,58eba9930b63cc14715368acf40017293b8dc94f,citation,https://pdfs.semanticscholar.org/7508/ac08dd7b9694bcfe71a617df7fcf3df80952.pdf,What Do I See? Modeling Human Visual Perception for Multi-person Tracking,2014 +58,United States,TownCentre,oxford_town_centre,29.7207902,-95.34406271,University of Houston,edu,a0b489eeb4f7fd2249da756d829e179a6718d9d1,citation,,"""Seeing is Believing"": Pedestrian Trajectory Forecasting Using Visual Frustum of Attention",2018 +59,Belgium,TownCentre,oxford_town_centre,50.8779545,4.7002953,"KULeuven, EAVISE",edu,4ec4392246a7760d189cd6ea48a81664cd2fe4bf,citation,https://pdfs.semanticscholar.org/4ec4/392246a7760d189cd6ea48a81664cd2fe4bf.pdf,GPU Accelerated ACF Detector,2018 +60,United States,TownCentre,oxford_town_centre,40.7982133,-77.8599084,The Pennsylvania State University,edu,6e32c368a6157fb911c9363dc3e967a7fb2ad9f7,citation,https://pdfs.semanticscholar.org/8268/d68f6aa510a765466b2c7f2ba2ea34a48c51.pdf,Hybrid Stochastic / Deterministic Optimization for Tracking Sports Players and Pedestrians,2014 +61,United States,TownCentre,oxford_town_centre,40.4439789,-79.9464634,Disney Research Pittsburgh,edu,6e32c368a6157fb911c9363dc3e967a7fb2ad9f7,citation,https://pdfs.semanticscholar.org/8268/d68f6aa510a765466b2c7f2ba2ea34a48c51.pdf,Hybrid Stochastic / Deterministic Optimization for Tracking Sports Players and Pedestrians,2014 +62,India,TownCentre,oxford_town_centre,13.0304619,77.5646862,"M.S. Ramaiah Institute of Technology, Bangalore, India",edu,6f089f9959cc711e16f1ebe0c6251aaf8a65959a,citation,https://pdfs.semanticscholar.org/6f08/9f9959cc711e16f1ebe0c6251aaf8a65959a.pdf,Improvement in object detection using Super Pixels,2016 +63,United States,TownCentre,oxford_town_centre,38.99203005,-76.9461029,University of Maryland College Park,edu,4e82908e6482d973c280deb79c254631a60f1631,citation,https://pdfs.semanticscholar.org/4e82/908e6482d973c280deb79c254631a60f1631.pdf,Improving Efficiency and Scalability in Visual Surveillance Applications,2013 +64,United States,TownCentre,oxford_town_centre,37.8718992,-122.2585399,University of California,edu,38b5a83f7941fea5fd82466f8ce1ce4ed7749f59,citation,http://rlair.cs.ucr.edu/papers/docs/grouptracking.pdf,Improving multi-target tracking via social grouping,2012 +65,Singapore,TownCentre,oxford_town_centre,1.3484104,103.68297965,Nanyang Technological University,edu,13caf4d2e0a4b6fcfcd4b9e8e2341b8ebd38258d,citation,https://arxiv.org/pdf/1605.04502.pdf,Joint Learning of Siamese CNNs and Temporally Constrained Metrics for Tracklet Association,2016 +66,United States,TownCentre,oxford_town_centre,35.9049122,-79.0469134,The University of North Carolina at Chapel Hill,edu,45e459462a80af03e1bb51a178648c10c4250925,citation,https://arxiv.org/pdf/1606.08998.pdf,LCrowdV: Generating Labeled Videos for Simulation-based Crowd Behavior Learning,2016 +67,China,TownCentre,oxford_town_centre,30.5097537,114.4062881,Huazhong University of Science and Technology,edu,c0262e24324a6a4e6af5bd99fc79e2eb802519b3,citation,https://arxiv.org/pdf/1611.03968.pdf,Learning Scene-specific Object Detectors Based on a Generative-Discriminative Model with Minimal Supervision,2016 +68,China,TownCentre,oxford_town_centre,30.527151,114.400762,China University of Geosciences,edu,c0262e24324a6a4e6af5bd99fc79e2eb802519b3,citation,https://arxiv.org/pdf/1611.03968.pdf,Learning Scene-specific Object Detectors Based on a Generative-Discriminative Model with Minimal Supervision,2016 +69,China,TownCentre,oxford_town_centre,32.0565957,118.77408833,Nanjing University,edu,c0262e24324a6a4e6af5bd99fc79e2eb802519b3,citation,https://arxiv.org/pdf/1611.03968.pdf,Learning Scene-specific Object Detectors Based on a Generative-Discriminative Model with Minimal Supervision,2016 +70,United Kingdom,TownCentre,oxford_town_centre,51.5247272,-0.03931035,Queen Mary University of London,edu,1883387726897d94b663cc4de4df88e5c31df285,citation,http://www.eecs.qmul.ac.uk/~andrea/papers/2014_TIP_MultiTargetTrackingEvaluation_Tahir_Poiesi_Cavallaro.pdf,Measures of Effective Video Tracking,2014 +71,United States,TownCentre,oxford_town_centre,35.9113971,-79.0504529,University of North Carolina at Chapel Hill,edu,8d2bf6ecbfda94f57000b84509bf77f4c47c1c66,citation,https://arxiv.org/pdf/1707.09100.pdf,MixedPeds: Pedestrian Detection in Unannotated Videos Using Synthetically Generated Human-Agents for Training,2018 +72,United States,TownCentre,oxford_town_centre,37.8718992,-122.2585399,University of California,edu,b506aa23949b6d1f0c868ad03aaaeb5e5f7f6b57,citation,http://rlair.cs.ucr.edu/papers/docs/zqin-phd.pdf,Modeling Social and Temporal Context for Video Analysis,2015 +73,Australia,TownCentre,oxford_town_centre,-34.920603,138.6062277,Adelaide University,edu,5bae9822d703c585a61575dced83fa2f4dea1c6d,citation,https://arxiv.org/pdf/1504.01942.pdf,MOTChallenge 2015: Towards a Benchmark for Multi-Target Tracking,2015 +74,Switzerland,TownCentre,oxford_town_centre,47.376313,8.5476699,ETH Zurich,edu,5bae9822d703c585a61575dced83fa2f4dea1c6d,citation,https://arxiv.org/pdf/1504.01942.pdf,MOTChallenge 2015: Towards a Benchmark for Multi-Target Tracking,2015 +75,Germany,TownCentre,oxford_town_centre,49.8748277,8.6563281,TU Darmstadt,edu,5bae9822d703c585a61575dced83fa2f4dea1c6d,citation,https://arxiv.org/pdf/1504.01942.pdf,MOTChallenge 2015: Towards a Benchmark for Multi-Target Tracking,2015 +76,United States,TownCentre,oxford_town_centre,37.8718992,-122.2585399,University of California,edu,e6d48d23308a9e0a215f7b5ba6ae30ee5d2f0ef5,citation,https://pdfs.semanticscholar.org/e6d4/8d23308a9e0a215f7b5ba6ae30ee5d2f0ef5.pdf,Multi-person Tracking by Online Learned Grouping Model with Non-linear Motion Context,2015 +77,France,TownCentre,oxford_town_centre,45.217886,5.807369,INRIA,edu,fc30d7dbf4c3cdd377d8cd4e7eeabd5d73814b8f,citation,https://pdfs.semanticscholar.org/fc30/d7dbf4c3cdd377d8cd4e7eeabd5d73814b8f.pdf,Multiple Object Tracking by Efficient Graph Partitioning,2014 +78,Germany,TownCentre,oxford_town_centre,52.381515,9.720171,Leibniz Universität Hannover,edu,290eda31bc13cbd5933acec8b6a25b3e3761c788,citation,https://arxiv.org/pdf/1411.7935.pdf,Multiple object tracking with context awareness,2014 +79,Czech Republic,TownCentre,oxford_town_centre,49.20172,16.6033168,Brno University of Technology,edu,dc53c4bb04e787a0d45dd761ba2101cc51c17b82,citation,https://pdfs.semanticscholar.org/dc53/c4bb04e787a0d45dd761ba2101cc51c17b82.pdf,Multiple-Person Tracking by Detection,2016 +80,Germany,TownCentre,oxford_town_centre,48.1820038,11.5978282,"OSRAM GmbH, Germany",company,943b1b92b5bdee0b5770418c645a4a17bded1ccf,citation,https://arxiv.org/pdf/1805.00652.pdf,MX-LSTM: Mixing Tracklets and Vislets to Jointly Forecast Trajectories and Head Poses,2018 +81,Italy,TownCentre,oxford_town_centre,45.437398,11.003376,University of Verona,edu,943b1b92b5bdee0b5770418c645a4a17bded1ccf,citation,https://arxiv.org/pdf/1805.00652.pdf,MX-LSTM: Mixing Tracklets and Vislets to Jointly Forecast Trajectories and Head Poses,2018 +82,France,TownCentre,oxford_town_centre,48.8422058,2.3451689,"INRIA / Ecole Normale Supérieure, France",edu,47119c99f5aa1e47bbeb86de0f955e7c500e6a93,citation,https://arxiv.org/pdf/1408.3304.pdf,On pairwise costs for network flow multi-object tracking,2015 +83,United States,TownCentre,oxford_town_centre,42.3504253,-71.10056114,Boston University,edu,1ae3dd081b93c46cda4d72100d8b1d59eb585157,citation,https://pdfs.semanticscholar.org/fea1/0f39b0a77035fb549fc580fd951384b79f9b.pdf,Online Motion Agreement Tracking,2013 +84,Malaysia,TownCentre,oxford_town_centre,4.3400673,101.1429799,Universiti Tunku Abdul Rahman,edu,e1f815c50a6c0c6d790c60a1348393264f829e60,citation,https://pdfs.semanticscholar.org/e1f8/15c50a6c0c6d790c60a1348393264f829e60.pdf,PEDESTRIAN DETECTION AND TRACKING IN SURVEILLANCE VIDEO By PENNY CHONG,2016 +85,Germany,TownCentre,oxford_town_centre,52.381515,9.720171,Leibniz Universität Hannover,edu,422d352a7d26fef692a3cd24466bfb5b4526efea,citation,https://pdfs.semanticscholar.org/422d/352a7d26fef692a3cd24466bfb5b4526efea.pdf,Pedestrian interaction in tracking : the social force model and global optimization methods,2012 +86,Sweden,TownCentre,oxford_town_centre,57.6897063,11.9741654,Chalmers University of Technology,edu,367b5b814aa991329c2ae7f8793909ad8c0a56f1,citation,https://arxiv.org/pdf/1211.0191.pdf,Performance evaluation of random set based pedestrian tracking algorithms,2013 +87,Japan,TownCentre,oxford_town_centre,35.5152072,134.1733553,Tottori University,edu,9d89f1bc88fd65e90b31a2129719384796bed17a,citation,http://vision.unipv.it/CV/materiale2016-17/2nd%20Choice/0225.pdf,Person re-identification using co-occurrence attributes of physical and adhered human characteristics,2016 +88,Germany,TownCentre,oxford_town_centre,52.381515,9.720171,Leibniz Universität Hannover,edu,48705017d91a157949cfaaeb19b826014899a36b,citation,https://pdfs.semanticscholar.org/4870/5017d91a157949cfaaeb19b826014899a36b.pdf,PROBABILISTIC MULTI-PERSON TRACKING USING DYNAMIC BAYES NETWORKS,2015 +89,Italy,TownCentre,oxford_town_centre,39.2173657,9.1149218,"Università degli Studi di Cagliari, Italy",edu,7c1f47ca50a8a55f93bf69791d9df2f994019758,citation,http://veprints.unica.it/1295/1/PhD_ThesisPalaF.pdf,Re-identification and semantic retrieval of pedestrians in video surveillance scenarios,2016 +90,United Kingdom,TownCentre,oxford_town_centre,51.5247272,-0.03931035,Queen Mary University of London,edu,3a28059df29b74775f77fd20a15dc6b5fe857556,citation,https://pdfs.semanticscholar.org/3a28/059df29b74775f77fd20a15dc6b5fe857556.pdf,Riccardo Mazzon PhD Thesis 2013,2013 +91,Brazil,TownCentre,oxford_town_centre,-30.0338248,-51.218828,Federal University of Rio Grande do Sul,edu,057517452369751bd63d83902ea91558d58161da,citation,http://inf.ufrgs.br/~gfuhr/papers/102095_3.pdf,Robust Patch-Based Pedestrian Tracking Using Monocular Calibrated Cameras,2012 +92,China,TownCentre,oxford_town_centre,28.727339,115.816633,Jiangxi University of Finance and Economics,edu,1642358cd9410abe9ee512d34ba68296b308770e,citation,https://arxiv.org/pdf/1807.04562.pdf,Robustness Analysis of Pedestrian Detectors for Surveillance,2018 +93,Singapore,TownCentre,oxford_town_centre,1.3484104,103.68297965,Nanyang Technological University,edu,1642358cd9410abe9ee512d34ba68296b308770e,citation,https://arxiv.org/pdf/1807.04562.pdf,Robustness Analysis of Pedestrian Detectors for Surveillance,2018 +94,China,TownCentre,oxford_town_centre,34.250803,108.983693,Xi’an Jiaotong University,edu,1642358cd9410abe9ee512d34ba68296b308770e,citation,https://arxiv.org/pdf/1807.04562.pdf,Robustness Analysis of Pedestrian Detectors for Surveillance,2018 +95,Singapore,TownCentre,oxford_town_centre,1.3484104,103.68297965,Nanyang Technological University,edu,7c132e0a2b7e13c78784287af38ad74378da31e5,citation,https://pdfs.semanticscholar.org/7c13/2e0a2b7e13c78784287af38ad74378da31e5.pdf,Salient Parts based Multi-people Tracking,2015 +96,China,TownCentre,oxford_town_centre,40.0044795,116.370238,Chinese Academy of Sciences,edu,679136c2844eeddca34e98e483aca1ff6ef5e902,citation,https://arxiv.org/pdf/1712.08745.pdf,Scene-Specific Pedestrian Detection Based on Parallel Vision,2017 +97,China,TownCentre,oxford_town_centre,34.250803,108.983693,Xi’an Jiaotong University,edu,679136c2844eeddca34e98e483aca1ff6ef5e902,citation,https://arxiv.org/pdf/1712.08745.pdf,Scene-Specific Pedestrian Detection Based on Parallel Vision,2017 +98,China,TownCentre,oxford_town_centre,40.0044795,116.370238,Chinese Academy of Sciences,edu,57e9b0d3ab6295e914d5a30cfaa3b2c81189abc1,citation,https://arxiv.org/pdf/1611.07544.pdf,Self-Learning Scene-Specific Pedestrian Detectors Using a Progressive Latent Model,2017 +99,United States,TownCentre,oxford_town_centre,35.9990522,-78.9290629,Duke University,edu,57e9b0d3ab6295e914d5a30cfaa3b2c81189abc1,citation,https://arxiv.org/pdf/1611.07544.pdf,Self-Learning Scene-Specific Pedestrian Detectors Using a Progressive Latent Model,2017 +100,Switzerland,TownCentre,oxford_town_centre,47.3764534,8.54770931,ETH Zürich,edu,70b42bbd76e6312d39ea06b8a0c24beb4a93e022,citation,http://www.tnt.uni-hannover.de/papers/data/1075/WACV2015_Abstract.pdf,Solving Multiple People Tracking in a Minimum Cost Arborescence,2015 +101,United States,TownCentre,oxford_town_centre,42.718568,-84.47791571,Michigan State University,edu,acf0db156406ddad1ace2ff2696cb60d0a04cf7c,citation,http://hal.cse.msu.edu/assets/pdfs/papers/2018-ijcv-visual-compiler.pdf,Synthesizing a Scene-Specific Pedestrian Detector and Pose Estimator for Static Video Surveillance,2018 +102,United Kingdom,TownCentre,oxford_town_centre,51.7534538,-1.25400997,University of Oxford,edu,acf0db156406ddad1ace2ff2696cb60d0a04cf7c,citation,http://hal.cse.msu.edu/assets/pdfs/papers/2018-ijcv-visual-compiler.pdf,Synthesizing a Scene-Specific Pedestrian Detector and Pose Estimator for Static Video Surveillance,2018 +103,Japan,TownCentre,oxford_town_centre,36.05238585,140.11852361,Institute of Industrial Science,edu,acf0db156406ddad1ace2ff2696cb60d0a04cf7c,citation,http://hal.cse.msu.edu/assets/pdfs/papers/2018-ijcv-visual-compiler.pdf,Synthesizing a Scene-Specific Pedestrian Detector and Pose Estimator for Static Video Surveillance,2018 +104,United States,TownCentre,oxford_town_centre,40.4441619,-79.94272826,Carnegie Mellon University,edu,acf0db156406ddad1ace2ff2696cb60d0a04cf7c,citation,http://hal.cse.msu.edu/assets/pdfs/papers/2018-ijcv-visual-compiler.pdf,Synthesizing a Scene-Specific Pedestrian Detector and Pose Estimator for Static Video Surveillance,2018 +105,Sweden,TownCentre,oxford_town_centre,57.7172004,11.9218558,"Volvo Construction Equipment, Göthenburg, Sweden",company,acf0db156406ddad1ace2ff2696cb60d0a04cf7c,citation,http://hal.cse.msu.edu/assets/pdfs/papers/2018-ijcv-visual-compiler.pdf,Synthesizing a Scene-Specific Pedestrian Detector and Pose Estimator for Static Video Surveillance,2018 +106,United States,TownCentre,oxford_town_centre,35.9990522,-78.9290629,Duke University,edu,64e0690dd176a93de9d4328f6e31fc4afe1e7536,citation,https://pdfs.semanticscholar.org/64e0/690dd176a93de9d4328f6e31fc4afe1e7536.pdf,Tracking Multiple People Online and in Real Time,2014 +107,Switzerland,TownCentre,oxford_town_centre,47.3764534,8.54770931,ETH Zürich,edu,64c78c8bf779a27e819fd9d5dba91247ab5a902b,citation,https://arxiv.org/pdf/1607.07304.pdf,Tracking with multi-level features.,2016 +108,Germany,TownCentre,oxford_town_centre,52.381515,9.720171,Leibniz Universität Hannover,edu,64c78c8bf779a27e819fd9d5dba91247ab5a902b,citation,https://arxiv.org/pdf/1607.07304.pdf,Tracking with multi-level features.,2016 +109,Germany,TownCentre,oxford_town_centre,48.14955455,11.56775314,Technical University Munich,edu,64c78c8bf779a27e819fd9d5dba91247ab5a902b,citation,https://arxiv.org/pdf/1607.07304.pdf,Tracking with multi-level features.,2016 +110,Singapore,TownCentre,oxford_town_centre,1.3484104,103.68297965,Nanyang Technological University,edu,7d3698c0e828d05f147682b0f5bfcd3b681ff205,citation,https://arxiv.org/pdf/1511.06654.pdf,Tracklet Association by Online Target-Specific Metric Learning and Coherent Dynamics Estimation,2017 +111,Australia,TownCentre,oxford_town_centre,-35.2809368,149.1300092,"NICTA, Canberra",edu,f0cc615b14c97482faa9c47eb855303c71ff03a7,citation,https://pdfs.semanticscholar.org/f0cc/615b14c97482faa9c47eb855303c71ff03a7.pdf,Tracklet clustering for robust multiple object tracking using distance dependent Chinese restaurant processes,2016 +112,Germany,TownCentre,oxford_town_centre,52.5180641,13.3250425,TU Berlin,edu,c4cd19cf41a2f5cd543d81b94afe6cc42785920a,citation,http://elvera.nue.tu-berlin.de/files/1491Bochinski2016.pdf,Training a convolutional neural network for multi-class object detection using solely virtual world data,2016 diff --git a/site/datasets/verified/pa_100k.csv b/site/datasets/verified/pa_100k.csv new file mode 100644 index 00000000..b79ce7f3 --- /dev/null +++ b/site/datasets/verified/pa_100k.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,PA-100K,pa_100k,0.0,0.0,,,,main,,HydraPlus-Net: Attentive Deep Features for Pedestrian Analysis,2017 diff --git a/site/datasets/verified/penn_fudan.csv b/site/datasets/verified/penn_fudan.csv new file mode 100644 index 00000000..10427ed0 --- /dev/null +++ b/site/datasets/verified/penn_fudan.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,Penn Fudan,penn_fudan,0.0,0.0,,,,main,,Object Detection Combining Recognition and Segmentation,2007 diff --git a/site/datasets/verified/peta.csv b/site/datasets/verified/peta.csv new file mode 100644 index 00000000..e999095c --- /dev/null +++ b/site/datasets/verified/peta.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,PETA,peta,0.0,0.0,,,,main,,Pedestrian Attribute Recognition At Far Distance,2014 diff --git a/site/datasets/verified/pilot_parliament.csv b/site/datasets/verified/pilot_parliament.csv new file mode 100644 index 00000000..45279348 --- /dev/null +++ b/site/datasets/verified/pilot_parliament.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,PPB,pilot_parliament,0.0,0.0,,,,main,,Gender Shades: Intersectional Accuracy Disparities in Commercial Gender Classification,2018 diff --git a/site/datasets/verified/pipa.csv b/site/datasets/verified/pipa.csv new file mode 100644 index 00000000..3acdccff --- /dev/null +++ b/site/datasets/verified/pipa.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,PIPA,pipa,0.0,0.0,,,,main,,Beyond frontal faces: Improving Person Recognition using multiple cues,2015 diff --git a/site/datasets/verified/pku_reid.csv b/site/datasets/verified/pku_reid.csv new file mode 100644 index 00000000..46dea72b --- /dev/null +++ b/site/datasets/verified/pku_reid.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,PKU-Reid,pku_reid,0.0,0.0,,,,main,,Swiss-System Based Cascade Ranking for Gait-Based Person Re-Identification,2015 diff --git a/site/datasets/verified/prid.csv b/site/datasets/verified/prid.csv new file mode 100644 index 00000000..622bae62 --- /dev/null +++ b/site/datasets/verified/prid.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,PRID,prid,0.0,0.0,,,,main,,Person Re-identification by Descriptive and Discriminative Classification,2011 diff --git a/site/datasets/verified/pubfig.csv b/site/datasets/verified/pubfig.csv new file mode 100644 index 00000000..5152566a --- /dev/null +++ b/site/datasets/verified/pubfig.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,PubFig,pubfig,0.0,0.0,,,,main,,Attribute and simile classifiers for face verification,2009 diff --git a/site/datasets/verified/pubfig_83.csv b/site/datasets/verified/pubfig_83.csv new file mode 100644 index 00000000..9385e8cd --- /dev/null +++ b/site/datasets/verified/pubfig_83.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,pubfig83,pubfig_83,0.0,0.0,,,,main,,Scaling up biologically-inspired computer vision: A case study in unconstrained face recognition on facebook,2011 diff --git a/site/datasets/verified/social_relation.csv b/site/datasets/verified/social_relation.csv new file mode 100644 index 00000000..eb7f473e --- /dev/null +++ b/site/datasets/verified/social_relation.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,Social Relation,social_relation,0.0,0.0,,,,main,,Learning Social Relation Traits from Face Images,2015 diff --git a/site/datasets/verified/tisi.csv b/site/datasets/verified/tisi.csv new file mode 100644 index 00000000..80f164c4 --- /dev/null +++ b/site/datasets/verified/tisi.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,Times Square Intersection,tisi,0.0,0.0,,,,main,,Video Synopsis by Heterogeneous Multi-source Correlation,2013 diff --git a/site/datasets/verified/uccs.csv b/site/datasets/verified/uccs.csv new file mode 100644 index 00000000..d7c84820 --- /dev/null +++ b/site/datasets/verified/uccs.csv @@ -0,0 +1,9 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,UCCS,uccs,0.0,0.0,,,,main,,Large scale unconstrained open set face database,2013 +1,United States,UCCS,uccs,41.70456775,-86.23822026,University of Notre Dame,edu,841855205818d3a6d6f85ec17a22515f4f062882,citation,https://arxiv.org/pdf/1805.11529.pdf,Low Resolution Face Recognition in the Wild,2018 +2,United States,UCCS,uccs,40.11571585,-88.22750772,Beckman Institute,edu,288d2704205d9ca68660b9f3a8fda17e18329c13,citation,https://arxiv.org/pdf/1601.04153.pdf,Studying Very Low Resolution Recognition Using Deep Networks,2016 +3,United States,UCCS,uccs,38.8920756,-104.79716389,"University of Colorado, Colorado Springs",edu,d4f1eb008eb80595bcfdac368e23ae9754e1e745,citation,,Unconstrained Face Detection and Open-Set Face Recognition Challenge,2017 +4,United Kingdom,UCCS,uccs,51.5247272,-0.03931035,Queen Mary University of London,edu,2306b2a8fba28539306052764a77a0d0f5d1236a,citation,https://arxiv.org/pdf/1804.09691.pdf,Surveillance Face Recognition Challenge,2018 +5,United Kingdom,UCCS,uccs,55.378051,-3.435973,"Vision Semantics Ltd, UK",edu,2306b2a8fba28539306052764a77a0d0f5d1236a,citation,https://arxiv.org/pdf/1804.09691.pdf,Surveillance Face Recognition Challenge,2018 +6,China,UCCS,uccs,39.9808333,116.34101249,Beihang University,edu,c50e498ede6f5216cffd0645e747ce67fae2096a,citation,https://arxiv.org/pdf/1811.09998.pdf,Low-Resolution Face Recognition in the Wild via Selective Knowledge Distillation,2018 +7,China,UCCS,uccs,39.97426,116.21589,"Institute of Information Engineering, CAS, Beijing, China",edu,c50e498ede6f5216cffd0645e747ce67fae2096a,citation,https://arxiv.org/pdf/1811.09998.pdf,Low-Resolution Face Recognition in the Wild via Selective Knowledge Distillation,2018 diff --git a/site/datasets/verified/ucf_selfie.csv b/site/datasets/verified/ucf_selfie.csv new file mode 100644 index 00000000..c32488ba --- /dev/null +++ b/site/datasets/verified/ucf_selfie.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,UCF Selfie,ucf_selfie,0.0,0.0,,,,main,,How to Take a Good Selfie?,2015 diff --git a/site/datasets/verified/ufdd.csv b/site/datasets/verified/ufdd.csv new file mode 100644 index 00000000..cec3e352 --- /dev/null +++ b/site/datasets/verified/ufdd.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,UFDD,ufdd,0.0,0.0,,,,main,,Pushing the Limits of Unconstrained Face Detection: a Challenge Dataset and Baseline Results,2018 diff --git a/site/datasets/verified/umd_faces.csv b/site/datasets/verified/umd_faces.csv new file mode 100644 index 00000000..03a3ed68 --- /dev/null +++ b/site/datasets/verified/umd_faces.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,UMD,umd_faces,0.0,0.0,,,,main,,UMDFaces: An annotated face dataset for training deep networks,2017 diff --git a/site/datasets/verified/urban_tribes.csv b/site/datasets/verified/urban_tribes.csv new file mode 100644 index 00000000..be8799f6 --- /dev/null +++ b/site/datasets/verified/urban_tribes.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,Urban Tribes,urban_tribes,0.0,0.0,,,,main,,From Bikers to Surfers: Visual Recognition of Urban Tribes,2013 diff --git a/site/datasets/verified/used.csv b/site/datasets/verified/used.csv new file mode 100644 index 00000000..52c7be2f --- /dev/null +++ b/site/datasets/verified/used.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,USED Social Event Dataset,used,0.0,0.0,,,,main,,USED: a large-scale social event detection dataset,2016 diff --git a/site/datasets/verified/vadana.csv b/site/datasets/verified/vadana.csv new file mode 100644 index 00000000..43b21fa4 --- /dev/null +++ b/site/datasets/verified/vadana.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,VADANA,vadana,0.0,0.0,,,,main,,VADANA: A dense dataset for facial image analysis,2011 diff --git a/site/datasets/verified/vgg_celebs_in_places.csv b/site/datasets/verified/vgg_celebs_in_places.csv new file mode 100644 index 00000000..41086905 --- /dev/null +++ b/site/datasets/verified/vgg_celebs_in_places.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,CIP,vgg_celebs_in_places,0.0,0.0,,,,main,,Faces in Places: compound query retrieval,2016 diff --git a/site/datasets/verified/vgg_faces.csv b/site/datasets/verified/vgg_faces.csv new file mode 100644 index 00000000..9d95ac17 --- /dev/null +++ b/site/datasets/verified/vgg_faces.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,VGG Face,vgg_faces,0.0,0.0,,,,main,,Deep Face Recognition,2015 diff --git a/site/datasets/verified/vgg_faces2.csv b/site/datasets/verified/vgg_faces2.csv new file mode 100644 index 00000000..689b801e --- /dev/null +++ b/site/datasets/verified/vgg_faces2.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,VGG Face2,vgg_faces2,0.0,0.0,,,,main,,VGGFace2: A Dataset for Recognising Faces across Pose and Age,2018 diff --git a/site/datasets/verified/viper.csv b/site/datasets/verified/viper.csv new file mode 100644 index 00000000..9885dfe5 --- /dev/null +++ b/site/datasets/verified/viper.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,VIPeR,viper,0.0,0.0,,,,main,,"Evaluating Appearance Models for Recognition, Reacquisition, and Tracking",2007 diff --git a/site/datasets/verified/vmu.csv b/site/datasets/verified/vmu.csv new file mode 100644 index 00000000..dd40d38b --- /dev/null +++ b/site/datasets/verified/vmu.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,VMU,vmu,0.0,0.0,,,,main,,Can facial cosmetics affect the matching accuracy of face recognition systems?,2012 diff --git a/site/datasets/verified/voc.csv b/site/datasets/verified/voc.csv new file mode 100644 index 00000000..89a14200 --- /dev/null +++ b/site/datasets/verified/voc.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,VOC,voc,0.0,0.0,,,,main,,The Pascal Visual Object Classes (VOC) Challenge,2009 diff --git a/site/datasets/verified/who_goes_there.csv b/site/datasets/verified/who_goes_there.csv new file mode 100644 index 00000000..8ff8ff9a --- /dev/null +++ b/site/datasets/verified/who_goes_there.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,WGT,who_goes_there,0.0,0.0,,,,main,,Who goes there?: approaches to mapping facial appearance diversity,2016 diff --git a/site/datasets/verified/wider.csv b/site/datasets/verified/wider.csv new file mode 100644 index 00000000..bfabc75b --- /dev/null +++ b/site/datasets/verified/wider.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,WIDER,wider,0.0,0.0,,,,main,,Recognize complex events from static images by fusing deep channels,2015 diff --git a/site/datasets/verified/wider_attribute.csv b/site/datasets/verified/wider_attribute.csv new file mode 100644 index 00000000..29165936 --- /dev/null +++ b/site/datasets/verified/wider_attribute.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,WIDER Attribute,wider_attribute,0.0,0.0,,,,main,,Human Attribute Recognition by Deep Hierarchical Contexts,2016 diff --git a/site/datasets/verified/wider_face.csv b/site/datasets/verified/wider_face.csv new file mode 100644 index 00000000..86c470e4 --- /dev/null +++ b/site/datasets/verified/wider_face.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,WIDER FACE,wider_face,0.0,0.0,,,,main,,WIDER FACE: A Face Detection Benchmark,2016 diff --git a/site/datasets/verified/wildtrack.csv b/site/datasets/verified/wildtrack.csv new file mode 100644 index 00000000..e6329a56 --- /dev/null +++ b/site/datasets/verified/wildtrack.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,WildTrack,wildtrack,0.0,0.0,,,,main,,WILDTRACK: A Multi-camera HD Dataset for Dense Unscripted Pedestrian Detection,2018 diff --git a/site/datasets/verified/yale_faces.csv b/site/datasets/verified/yale_faces.csv new file mode 100644 index 00000000..fd43e5cf --- /dev/null +++ b/site/datasets/verified/yale_faces.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,YaleFaces,yale_faces,0.0,0.0,,,,main,,From Few to Many: Illumination Cone Models for Face Recognition under Variable Lighting and Pose,2001 diff --git a/site/datasets/verified/yfcc_100m.csv b/site/datasets/verified/yfcc_100m.csv new file mode 100644 index 00000000..c7b3cd1f --- /dev/null +++ b/site/datasets/verified/yfcc_100m.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,YFCC100M,yfcc_100m,0.0,0.0,,,,main,,YFCC100M: the new data in multimedia research,2016 diff --git a/site/datasets/verified/youtube_celebrities.csv b/site/datasets/verified/youtube_celebrities.csv new file mode 100644 index 00000000..a3b08ee1 --- /dev/null +++ b/site/datasets/verified/youtube_celebrities.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,YouTube Celebrities,youtube_celebrities,0.0,0.0,,,,main,,Face tracking and recognition with visual constraints in real-world videos,2008 diff --git a/site/datasets/verified/youtube_faces.csv b/site/datasets/verified/youtube_faces.csv new file mode 100644 index 00000000..32356450 --- /dev/null +++ b/site/datasets/verified/youtube_faces.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,YouTubeFaces,youtube_faces,0.0,0.0,,,,main,,Face recognition in unconstrained videos with matched background similarity,2011 diff --git a/site/datasets/verified/youtube_makeup.csv b/site/datasets/verified/youtube_makeup.csv new file mode 100644 index 00000000..9ea99ac9 --- /dev/null +++ b/site/datasets/verified/youtube_makeup.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,YMU,youtube_makeup,0.0,0.0,,,,main,,Can facial cosmetics affect the matching accuracy of face recognition systems?,2012 diff --git a/site/datasets/verified/youtube_poses.csv b/site/datasets/verified/youtube_poses.csv new file mode 100644 index 00000000..5298f596 --- /dev/null +++ b/site/datasets/verified/youtube_poses.csv @@ -0,0 +1,2 @@ +id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year +0,,YouTube Pose,youtube_poses,0.0,0.0,,,,main,,Personalizing Human Video Pose Estimation,2016 diff --git a/site/includes/cite_our_work.html b/site/includes/cite_our_work.html new file mode 100644 index 00000000..810561e7 --- /dev/null +++ b/site/includes/cite_our_work.html @@ -0,0 +1,18 @@ +
+ +

Cite Our Work

+

+ + If you use our data, research, or graphics please cite our work: + +

+@online{megapixels,
+  author = {Harvey, Adam. LaPlace, Jules.},
+  title = {MegaPixels: Origins, Ethics, and Privacy Implications of Publicly Available Face Recognition Image Datasets},
+  year = 2019,
+  url = {https://megapixels.cc/},
+  urldate = {2019-04-20}
+}
+ +

+
diff --git a/site/public/about/faq/index.html b/site/public/about/faq/index.html deleted file mode 100644 index 168abd0b..00000000 --- a/site/public/about/faq/index.html +++ /dev/null @@ -1,59 +0,0 @@ - - - - MegaPixels - - - - - - - - - - - -
- - -
MegaPixels
- -
- -
-
- -

FAQs

-
- -

[ page under development ]

-
- -
- - - - - \ No newline at end of file diff --git a/site/public/about/index.html b/site/public/about/index.html deleted file mode 100644 index 64dded77..00000000 --- a/site/public/about/index.html +++ /dev/null @@ -1,93 +0,0 @@ - - - - MegaPixels - - - - - - - - - - - -
- - -
MegaPixels
- -
- -
-
- -

About MegaPixels

-
- -

MegaPixels is an independent art and research project by Adam Harvey and Jules LaPlace investigating the ethics and individual privacy implications of publicly available face recognition datasets, and their role in industry and governmental expansion into biometric surveillance technologies.

-

The MegaPixels site is made possible with support from Mozilla

-
-
-

Adam Harvey

-

is Berlin-based American artist and researcher. His previous projects (CV Dazzle, Stealth Wear, and SkyLift) explore the potential for counter-surveillance as artwork. He is the founder of VFRAME (visual forensics software for human rights groups) and is a currently researcher in residence at Karlsruhe HfG.

-

ahprojects.com

-

-
-
-

Jules LaPlace

-

is an American technologist and artist also based in Berlin. He was previously the CTO of a digital agency in NYC and now also works at VFRAME, developing computer vision and data analysis software for human rights groups. Jules also builds experimental software for artists and musicians. -

-

asdf.us

-
-

MegaPixels.cc is a research project about publicly available face recognition datasets. This website is based, in part, on an earlier installations and research about facial recognition datasets. Since then it has evolved into a large-scale survey of publicly-available face and person analysis datasets. Initially this site was planned as a facial recognition tool to search the datasets. After building several prototypes using over 1 million face images from these datasets, it became clear that facial recognition was mereley a face similar search. The results were not accurate enough to align with goals of this website: to promote responsible use of data and expose existing and past ethical breaches.

-

An academic report and presentation on the findings of this project is forthcoming. Throughout 2019, this site will be updated with more datasets and research reports on the general themes of remote biometric analysis and media collected "in the wild". The continued research on MegaPixels is supported by a 1 year Researcher-in-Residence grant from Karlsruhe HfG (2019-2020).

-

When possible, and once thoroughly verified, data generated for MegaPixels will be made available for download on github.com/adamhrv/megapixels

-

Team

-
    -
  • Adam Harvey: Concept, research and analysis, design, computer vision
  • -
  • Jules LaPlace: Information and systems architecture, data management, web applications
  • -
-

Contributing Researchers

-
    -
  • Berit Gilma: Dataset statistics
  • -
  • Beth (aka Ms. Celeb): Dataset usage verification
  • -
  • Mathana Stender: Commercial usage verification
  • -
-

Code and Libraries

-
    -
  • Semantic Scholar for citation aggregation
  • -
  • Leaflet.js for maps
  • -
  • C3.js for charts
  • -
  • ThreeJS for 3D visualizations
  • -
  • PDFMiner.Six and Pandas for research paper data analysis
  • -
-

Please direct questions, comments, or feedback to mastodon.social/@adamhrv

-
- -
- - - - - \ No newline at end of file diff --git a/site/public/about/legal/index.html b/site/public/about/legal/index.html deleted file mode 100644 index a603f999..00000000 --- a/site/public/about/legal/index.html +++ /dev/null @@ -1,85 +0,0 @@ - - - - MegaPixels - - - - - - - - - - - -
- - -
MegaPixels
- -
- -
-
- -

Legal

-
- -

MegaPixels.cc Terms and Privacy

-

MegaPixels is an independent and academic art and research project about the origins and ethics of publicly available face analysis image datasets. By accessing MegaPixels (the Service or Services) you agree to the terms and conditions set forth below.

-

Privacy

-

The MegaPixels site has been designed to minimize the amount of network requests to 3rd party services and therefore prioritize the privacy of the viewer. This site does not use any local or external analytics programs to monitor site viewers. In fact, the only data collected are the necessary server logs used only for preventing misuse, which are deleted at short-term intervals.

-

3rd Party Services

-

In order to provide certain features of the site, some 3rd party services are needed. Currently, the MegaPixels.cc site uses two 3rd party services: (1) Leaflet.js for the interactive map and (2) Digital Ocean Spaces as a content delivery network. Both services encrypt your requests to their server using HTTPS and neither service requires storing any cookies or authentication. However, both services will store files in your web browser's local cache (local storage) to improve loading performance. None of these local storage files are using for analytics, tracking, or any similar purpose.

-

Links To Other Web Sites

-

The MegaPixels.cc contains many links to 3rd party websites, especially in the list of citations that are provided for each dataset. This website has no control over and assumes no responsibility for, the content, privacy policies, or practices of any third party web sites or services. You acknowledge and agree that megapixels.cc shall not be responsible or liable, directly or indirectly, for any damage or loss caused or alleged to be caused by or in connection with use of or reliance on any such content, goods or services available on or through any such web sites or services.

-

We advise you to read the terms and conditions and privacy policies of any third-party web sites or services that you visit.

-

The Information We Provide

-

While every intention is made to publish only verifiable information, at times existing information may be revised or deleted and new information may be added for clarity or correction. In no event will the operators of this site be liable for your use or misuse of the information provided.

-

We may terminate or suspend access to our Service immediately without prior notice or liability, for any reason whatsoever, including without limitation if you breach the Terms.

-

All provisions of the Terms which by their nature should survive termination shall survive termination, including, without limitation, ownership provisions, warranty disclaimers, indemnity and limitations of liability.

-

Prohibited Uses

-

You may not access or use, or attempt to access or use, the Services to take any action that could harm us or a third party. You may not use the Services in violation of applicable laws or in violation of our or any third party’s intellectual property or other proprietary or legal rights. You further agree that you shall not attempt (or encourage or support anyone else's attempt) to circumvent, reverse engineer, decrypt, or otherwise alter or interfere with the Services, or any content thereof, or make any unauthorized use thereof.

-

Without prior written consent, you shall not:

-

(i) access any part of the Services, Content, data or information you do not have permission or authorization to access;

-

(ii) use robots, spiders, scripts, service, software or any manual or automatic device, tool, or process designed to data mine or scrape the Content, data or information from the Services, or otherwise access or collect the Content, data or information from the Services using automated means;

-

(iii) use services, software or any manual or automatic device, tool, or process designed to circumvent any restriction, condition, or technological measure that controls access to the Services in any way, including overriding any security feature or bypassing or circumventing any access controls or use limits of the Services;

-

(iv) cache or archive the Content (except for a public search engine’s use of spiders for creating search indices) with prior written consent;

-

(v) take action that imposes an unreasonable or disproportionately large load on our network or infrastructure; and

-

(vi) do anything that could disable, damage or change the functioning or appearance of the Services, including the presentation of advertising.

-

Engaging in a prohibited use of the Services may result in civil, criminal, and/or administrative penalties, fines, or sanctions against the user and those assisting the user.

-

Governing Law

-

These Terms shall be governed and construed in accordance with the laws of Berlin, Germany, without regard to its conflict of law provisions.

-

Our failure to enforce any right or provision of these Terms will not be considered a waiver of those rights. If any provision of these Terms is held to be invalid or unenforceable by a court, the remaining provisions of these Terms will remain in effect. These Terms constitute the entire agreement between us regarding our Service, and supersede and replace any prior agreements we might have between us regarding the Service.

-

Indemnity

-

You hereby indemnify, defend and hold harmless MegaPixels (and its creators) and all officers, directors, owners, agents, information providers, affiliates, licensors and licensees (collectively, the "Indemnified Parties") from and against any and all liability and costs, including, without limitation, reasonable attorneys' fees, incurred by the Indemnified Parties in connection with any claim arising out of any breach by you or any user of your account of these Terms of Service or the foregoing representations, warranties and covenants. You shall cooperate as fully as reasonably required in the defense of any such claim. We reserves the right, at its own expense, to assume the exclusive defense and control of any matter subject to indemnification by you.

-

Changes

-

We reserve the right, at our sole discretion, to modify or replace these Terms at any time. By continuing to use or access our Service after revisions become effective, you agree to be bound by the revised terms. If you do not agree to revised terms, please do not use the Service.

-
- -
- - - - - \ No newline at end of file diff --git a/site/public/about/press/index.html b/site/public/about/press/index.html deleted file mode 100644 index 3e5a5f8e..00000000 --- a/site/public/about/press/index.html +++ /dev/null @@ -1,58 +0,0 @@ - - - - MegaPixels - - - - - - - - - - - -
- - -
MegaPixels
- -
- -
-
- -

Press

-
- -
-
- -
- - - - - \ No newline at end of file diff --git a/site/public/datasets/50_people_one_question/index.html b/site/public/datasets/50_people_one_question/index.html deleted file mode 100644 index b27fa3e5..00000000 --- a/site/public/datasets/50_people_one_question/index.html +++ /dev/null @@ -1,114 +0,0 @@ - - - - MegaPixels - - - - - - - - - - - -
- - -
MegaPixels
-
50 People One Question Dataset
-
- -
-
- -
People One Question is a dataset of people from an online video series on YouTube and Vimeo used for building facial recogntion algorithms
People One Question dataset includes ... -

50 People 1 Question

-

[ page under development ]

-
-

Who used 50 People One Question Dataset?

- -

- This bar chart presents a ranking of the top countries where dataset citations originated. Mouse over individual columns to see yearly totals. These charts show at most the top 10 countries. -

- -
- -
- -
-
- -
-
-
- -
- -

Biometric Trade Routes

- -

- To help understand how 50 People One Question Dataset has been used around the world by commercial, military, and academic organizations; existing publicly available research citing 50 People One Question was collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal research projects at that location. -

- -
- -
-
-
- -
-
    -
  • Academic
  • -
  • Commercial
  • -
  • Military / Government
  • -
-
Citation data is collected using SemanticScholar.org then dataset usage verified and geolocated.
-
- - -
- -

Dataset Citations

-

- The dataset citations used in the visualizations were collected from Semantic Scholar, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms. -

- -
-
- -
- - - - - \ No newline at end of file diff --git a/site/public/datasets/afad/index.html b/site/public/datasets/afad/index.html deleted file mode 100644 index 67a4e981..00000000 --- a/site/public/datasets/afad/index.html +++ /dev/null @@ -1,127 +0,0 @@ - - - - MegaPixels - - - - - - - - - - - -
- - -
MegaPixels
-
Asian Face Age Dataset
-
- -
-
- -

Asian Face Age Dataset

-

[ page under development ]

-
-

Who used Asian Face Age Dataset?

- -

- This bar chart presents a ranking of the top countries where dataset citations originated. Mouse over individual columns to see yearly totals. These charts show at most the top 10 countries. -

- -
- -
- -
-
- -
-
-
- -
- -

Biometric Trade Routes

- -

- To help understand how Asian Face Age Dataset has been used around the world by commercial, military, and academic organizations; existing publicly available research citing The Asian Face Age Dataset was collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal research projects at that location. -

- -
- -
-
-
- -
-
    -
  • Academic
  • -
  • Commercial
  • -
  • Military / Government
  • -
-
Citation data is collected using SemanticScholar.org then dataset usage verified and geolocated.
-
- - -
- -

Dataset Citations

-

- The dataset citations used in the visualizations were collected from Semantic Scholar, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms. -

- -
-

(ignore) research notes

-

The Asian Face Age Dataset (AFAD) is a new dataset proposed for evaluating the performance of age estimation, which contains more than 160K facial images and the corresponding age and gender labels. This dataset is oriented to age estimation on Asian faces, so all the facial images are for Asian faces. It is noted that the AFAD is the biggest dataset for age estimation to date. It is well suited to evaluate how deep learning methods can be adopted for age estimation. -Motivation

-

For age estimation, there are several public datasets for evaluating the performance of a specific algorithm, such as FG-NET [1] (1002 face images), MORPH I (1690 face images), and MORPH II[2] (55,608 face images). Among them, the MORPH II is the biggest public dataset to date. On the other hand, as we know it is necessary to collect a large scale dataset to train a deep Convolutional Neural Network. Therefore, the MORPH II dataset is extensively used to evaluate how deep learning methods can be adopted for age estimation [3][4].

-

However, the ethnic is very unbalanced for the MORPH II dataset, i.e., it has only less than 1% Asian faces. In order to evaluate the previous methods for age estimation on Asian Faces, the Asian Face Age Dataset (AFAD) was proposed.

-

There are 164,432 well-labeled photos in the AFAD dataset. It consist of 63,680 photos for female as well as 100,752 photos for male, and the ages range from 15 to 40. The distribution of photo counts for distinct ages are illustrated in the figure above. Some samples are shown in the Figure on the top. Its download link is provided in the "Download" section.

-

In addition, we also provide a subset of the AFAD dataset, called AFAD-Lite, which only contains PLACEHOLDER well-labeled photos. It consist of PLACEHOLDER photos for female as well as PLACEHOLDER photos for male, and the ages range from 15 to 40. The distribution of photo counts for distinct ages are illustrated in Fig. PLACEHOLDER. Its download link is also provided in the "Download" section.

-

The AFAD dataset is built by collecting selfie photos on a particular social network -- RenRen Social Network (RSN) [5]. The RSN is widely used by Asian students including middle school, high school, undergraduate, and graduate students. Even after leaving from school, some people still access their RSN account to connect with their old classmates. So, the age of the RSN user crosses a wide range from 15-years to more than 40-years old.

-

Please notice that this dataset is made available for academic research purpose only.

-
-

https://afad-dataset.github.io/

-
- -
- - - - - \ No newline at end of file diff --git a/site/public/datasets/aflw/index.html b/site/public/datasets/aflw/index.html deleted file mode 100644 index 81fb7335..00000000 --- a/site/public/datasets/aflw/index.html +++ /dev/null @@ -1,53 +0,0 @@ - - - - MegaPixels - - - - - - - - - - - -
- - -
MegaPixels
- -
- -
-
- -

Annotated Facial Landmarks in The Wild

-
Years
1993-1996
Images
25,993
Identities
1,199
Origin
Flickr

RESEARCH below this line

-

The motivation for the AFLW database is the need for a large-scale, multi-view, real-world face database with annotated facial features. We gathered the images on Flickr using a wide range of face relevant tags (e.g., face, mugshot, profile face). The downloaded set of images was manually scanned for images containing faces. The key data and most important properties of the database are:

-
-

https://www.tugraz.at/institute/icg/research/team-bischof/lrs/downloads/aflw/

-
- -
- - - - - \ No newline at end of file diff --git a/site/public/datasets/brainwash/index.html b/site/public/datasets/brainwash/index.html deleted file mode 100644 index 10ee577c..00000000 --- a/site/public/datasets/brainwash/index.html +++ /dev/null @@ -1,146 +0,0 @@ - - - - MegaPixels - - - - - - - - - - - -
- - -
MegaPixels
-
Brainwash Dataset
-
- -
-
- -
Brainwash is a dataset of webcam images taken from the Brainwash Cafe in San Francisco in 2014
The Brainwash dataset includes 11,918 images of "everyday life of a busy downtown cafe" and is used for training head detection surveillance algorithms -

Brainwash Dataset

-

Brainwash is a head detection dataset created from San Francisco's Brainwash Cafe livecam footage. It includes 11,918 images of "everyday life of a busy downtown cafe" 1 captured at 100 second intervals throught the entire day. Brainwash dataset was captured during 3 days in 2014: October 27, November 13, and November 24. According the author's reserach paper introducing the dataset, the images were acquired with the help of Angelcam.com. 2

-

Brainwash is not a widely used dataset but since its publication by Stanford University in 2015, it has notably appeared in several research papers from the National University of Defense Technology in Changsha, China. In 2016 and in 2017 researchers there conducted studies on detecting people's heads in crowded scenes for the purpose of surveillance. 3 4

-

If you happen to have been at Brainwash cafe in San Francisco at any time on October 26, November 13, or November 24 in 2014 you are most likely included in the Brainwash dataset and have unwittingly contributed to surveillance research.

-
-

Who used Brainwash Dataset?

- -

- This bar chart presents a ranking of the top countries where dataset citations originated. Mouse over individual columns to see yearly totals. These charts show at most the top 10 countries. -

- -
- -
- -
-
- -
-
-
- -
- -

Biometric Trade Routes

- -

- To help understand how Brainwash Dataset has been used around the world by commercial, military, and academic organizations; existing publicly available research citing Brainwash Dataset was collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal research projects at that location. -

- -
- -
-
-
- -
-
    -
  • Academic
  • -
  • Commercial
  • -
  • Military / Government
  • -
-
Citation data is collected using SemanticScholar.org then dataset usage verified and geolocated.
-
- - -
- -

Dataset Citations

-

- The dataset citations used in the visualizations were collected from Semantic Scholar, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms. -

- -
-
- -
-
-
-
- -

Supplementary Information

- -
 A visualization of 81,973 head annotations from the Brainwash dataset training partition. © megapixels.cc
A visualization of 81,973 head annotations from the Brainwash dataset training partition. © megapixels.cc
 An sample image from the Brainwash dataset used for training face and head detection algorithms for surveillance. The datset contains about 12,000 images. License: Open Data Commons Public Domain Dedication (PDDL)
An sample image from the Brainwash dataset used for training face and head detection algorithms for surveillance. The datset contains about 12,000 images. License: Open Data Commons Public Domain Dedication (PDDL)
 49 of the 11,918 images included in the Brainwash dataset. License: Open Data Commons Public Domain Dedication (PDDL)
49 of the 11,918 images included in the Brainwash dataset. License: Open Data Commons Public Domain Dedication (PDDL)

TODO

-
    -
  • change supp images to 2x2 grid with bboxes
  • -
  • add bounding boxes to the header image
  • -
  • remake montage with randomized images, with bboxes
  • -
  • add ethics link to Stanford
  • -
  • add optout info
  • -
-
  • a

    "readme.txt" https://exhibits.stanford.edu/data/catalog/sx925dc9385.

    -
  • a

    Stewart, Russel. Andriluka, Mykhaylo. "End-to-end people detection in crowded scenes". 2016.

    -
  • a

    Li, Y. and Dou, Y. and Liu, X. and Li, T. Localized Region Context and Object Feature Fusion for People Head Detection. ICIP16 Proceedings. 2016. Pages 594-598.

    -
  • a

    Zhao. X, Wang Y, Dou, Y. A Replacement Algorithm of Non-Maximum Suppression Base on Graph Clustering.

    -
- -
- - - - - \ No newline at end of file diff --git a/site/public/datasets/caltech_10k/index.html b/site/public/datasets/caltech_10k/index.html deleted file mode 100644 index 10925b09..00000000 --- a/site/public/datasets/caltech_10k/index.html +++ /dev/null @@ -1,124 +0,0 @@ - - - - MegaPixels - - - - - - - - - - - -
- - -
MegaPixels
-
Brainwash Dataset
-
- -
-
- -

Caltech 10K Faces Dataset

-

[ page under development ]

-
-

Who used Brainwash Dataset?

- -

- This bar chart presents a ranking of the top countries where dataset citations originated. Mouse over individual columns to see yearly totals. These charts show at most the top 10 countries. -

- -
- -
- -
-
- -
-
-
- -
- -

Biometric Trade Routes

- -

- To help understand how Brainwash Dataset has been used around the world by commercial, military, and academic organizations; existing publicly available research citing Brainwash Dataset was collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal research projects at that location. -

- -
- -
-
-
- -
-
    -
  • Academic
  • -
  • Commercial
  • -
  • Military / Government
  • -
-
Citation data is collected using SemanticScholar.org then dataset usage verified and geolocated.
-
- - -
- -

Dataset Citations

-

- The dataset citations used in the visualizations were collected from Semantic Scholar, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms. -

- -
-

(ignore) research notes

-

The dataset contains images of people collected from the web by typing common given names into Google Image Search. The coordinates of the eyes, the nose and the center of the mouth for each frontal face are provided in a ground truth file. This information can be used to align and crop the human faces or as a ground truth for a face detection algorithm. The dataset has 10,524 human faces of various resolutions and in different settings, e.g. portrait images, groups of people, etc. Profile faces or very low resolution faces are not labeled.

-
- -
- - - - - \ No newline at end of file diff --git a/site/public/datasets/celeba/index.html b/site/public/datasets/celeba/index.html deleted file mode 100644 index 3b9883dc..00000000 --- a/site/public/datasets/celeba/index.html +++ /dev/null @@ -1,126 +0,0 @@ - - - - MegaPixels - - - - - - - - - - - -
- - -
MegaPixels
-
CelebA Dataset
-
- -
-
- -
CelebA is a dataset of people...
CelebA includes... -

CelebA Dataset

-

[ PAGE UNDER DEVELOPMENT ]

-
-

Who used CelebA Dataset?

- -

- This bar chart presents a ranking of the top countries where dataset citations originated. Mouse over individual columns to see yearly totals. These charts show at most the top 10 countries. -

- -
- -
- -
-
- -
-
-
- -
- -

Biometric Trade Routes

- -

- To help understand how CelebA Dataset has been used around the world by commercial, military, and academic organizations; existing publicly available research citing Large-scale CelebFaces Attributes Dataset was collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal research projects at that location. -

- -
- -
-
-
- -
-
    -
  • Academic
  • -
  • Commercial
  • -
  • Military / Government
  • -
-
Citation data is collected using SemanticScholar.org then dataset usage verified and geolocated.
-
- - -
- -

Dataset Citations

-

- The dataset citations used in the visualizations were collected from Semantic Scholar, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms. -

- -
-

Research

-
    -
  • "An Unsupervised Approach to Solving Inverse Problems using Generative Adversarial Networks" mentions use by sponsored by an agency of the United States government. Neither the United States government nor Lawrence Livermore National Security, LLC, nor any of their"
  • -
  • 7dab6fbf42f82f0f5730fc902f72c3fb628ef2f0
  • -
  • principal responsibility is ensuring the safety, security and reliability of the nation's nuclear weapons NNSA ( National Nuclear Security Administration )
  • -
-
- -
- - - - - \ No newline at end of file diff --git a/site/public/datasets/cofw/index.html b/site/public/datasets/cofw/index.html deleted file mode 100644 index f335442c..00000000 --- a/site/public/datasets/cofw/index.html +++ /dev/null @@ -1,179 +0,0 @@ - - - - MegaPixels - - - - - - - - - - - -
- - -
MegaPixels
-
COFW Dataset
-
- -
-
- -

Caltech Occluded Faces in the Wild

-

[ PAGE UNDER DEVELOPMENT ]

-
-

Who used COFW Dataset?

- -

- This bar chart presents a ranking of the top countries where dataset citations originated. Mouse over individual columns to see yearly totals. These charts show at most the top 10 countries. -

- -
- -
- -
-
- -
-
-
- -
- -

Biometric Trade Routes

- -

- To help understand how COFW Dataset has been used around the world by commercial, military, and academic organizations; existing publicly available research citing Caltech Occluded Faces in the Wild was collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal research projects at that location. -

- -
- -
-
-
- -
-
    -
  • Academic
  • -
  • Commercial
  • -
  • Military / Government
  • -
-
Citation data is collected using SemanticScholar.org then dataset usage verified and geolocated.
-
- - -
- -

Dataset Citations

-

- The dataset citations used in the visualizations were collected from Semantic Scholar, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms. -

- -
-

(ignore) research notes

-
Years
1993-1996
Images
14,126
Identities
1,199
Origin
Web Searches
Funded by
ODNI, IARPA, Microsoft

COFW is "is designed to benchmark face landmark algorithms in realistic conditions, which include heavy occlusions and large shape variations" [Robust face landmark estimation under occlusion].

-

We asked four people with different levels of computer vision knowledge to each collect 250 faces representative of typical real-world images, with the clear goal of challenging computer vision methods. -The result is 1,007 images of faces obtained from a variety of sources.

-
-

Robust face landmark estimation under occlusion

-

Our face dataset is designed to present faces in real-world conditions. Faces show large variations in shape and occlusions due to differences in pose, expression, use of accessories such as sunglasses and hats and interactions with objects (e.g. food, hands, microphones, etc.). All images were hand annotated in our lab using the same 29 landmarks as in LFPW. We annotated both the landmark positions as well as their occluded/unoccluded state. The faces are occluded to different degrees, with large variations in the type of occlusions encountered. COFW has an average occlusion of over 23%. -To increase the number of training images, and since COFW has the exact same landmarks as LFPW, for training we use the original non-augmented 845 LFPW faces + 500 COFW faces (1345 total), and for testing the remaining 507 COFW faces. To make sure all images had occlusion labels, we annotated occlusion on the available 845 LFPW training images, finding an average of only 2% occlusion.

-
-

http://www.vision.caltech.edu/xpburgos/ICCV13/

-

This research is supported by NSF Grant 0954083 and by the Office of the Director of National Intelligence (ODNI), Intelligence Advanced Research Projects Activity (IARPA), via IARPA R&D Contract No. 2014-14071600012.

-
-

https://www.cs.cmu.edu/~peiyunh/topdown/

-
- -

Biometric Trade Routes

- -

- To help understand how COFW Dataset has been used around the world by commercial, military, and academic organizations; existing publicly available research citing Caltech Occluded Faces in the Wild was collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the location markers to reveal research projects at that location. -

- -
- -
-
-
- -
-
    -
  • Academic
  • -
  • Commercial
  • -
  • Military / Government
  • -
-
Citation data is collected using SemanticScholar.org and then dataset usage verified and geolocated.
-
- -
-
-
-
- -

Supplementary Information

- -
- -

Dataset Citations

-

- The dataset citations used in the visualizations were collected from Semantic Scholar, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms. -

- -
-
-

Who used COFW Dataset?

- -

- This bar chart presents a ranking of the top countries where dataset citations originated. Mouse over individual columns to see yearly totals. These charts show at most the top 10 countries. -

- -
- -
- -
-

TODO

-

- replace graphic

-
- -
- - - - - \ No newline at end of file diff --git a/site/public/datasets/duke_mtmc/index.html b/site/public/datasets/duke_mtmc/index.html deleted file mode 100644 index 0d082c15..00000000 --- a/site/public/datasets/duke_mtmc/index.html +++ /dev/null @@ -1,144 +0,0 @@ - - - - MegaPixels - - - - - - - - - - - -
- - -
MegaPixels
-
Duke MTMC Dataset
-
- -
-
- -
Duke MTMC is a dataset of surveillance camera footage of students on Duke University campus
Duke MTMC contains over 2 million video frames and 2,700 unique identities collected from 8 HD cameras at Duke University campus in March 2014 -

Duke MTMC

-

The Duke Multi-Target, Multi-Camera Tracking Dataset (MTMC) is a dataset of video recorded on Duke University campus for research and development of networked camera surveillance systems. MTMC tracking is used for citywide dragnet surveillance systems such as those used throughout China by SenseTime 1 and the oppressive monitoring of 2.5 million Uyghurs in Xinjiang by SenseNets 2. In fact researchers from both SenseTime 4 5 and SenseNets 3 used the Duke MTMC dataset for their research.

-

The Duke MTMC dataset is unique because it is the largest publicly available MTMC and person re-identification dataset and has the longest duration of annotated video. In total, the Duke MTMC dataset provides over 14 hours of 1080p video from 8 synchronized surveillance cameras. 6 It is among the most widely used person re-identification datasets in the world. The approximately 2,700 unique people in the Duke MTMC videos, most of whom are students, are used for research and development of surveillance technologies by commercial, academic, and even defense organizations.

-
 A collection of 1,600 out of the 2,700 students and passersby captured into the Duke MTMC surveillance research dataset. These students were also included in the Duke MTMC Re-ID dataset extension used for person re-identification. © megapixels.cc
A collection of 1,600 out of the 2,700 students and passersby captured into the Duke MTMC surveillance research dataset. These students were also included in the Duke MTMC Re-ID dataset extension used for person re-identification. © megapixels.cc

The creation and publication of the Duke MTMC dataset in 2016 was originally funded by the U.S. Army Research Laboratory and the National Science Foundation 6. Since 2016 use of the Duke MTMC dataset images have been publicly acknowledged in research funded by or on behalf of the Chinese National University of Defense 7 8, IARPA and IBM 9, and U.S. Department of Homeland Security 10.

-

The 8 cameras deployed on Duke's campus were specifically setup to capture students "during periods between lectures, when pedestrian traffic is heavy". 6 Camera 7 and 2 capture large groups of prospective students and children. Camera 5 was positioned to capture students as they enter and exit Duke University's main chapel. Each camera's location is documented below.

-
 Duke MTMC camera locations on Duke University campus © megapixels.cc
Duke MTMC camera locations on Duke University campus © megapixels.cc
 Duke MTMC camera views for 8 cameras deployed on campus © megapixels.cc
Duke MTMC camera views for 8 cameras deployed on campus © megapixels.cc
 Duke MTMC pedestrian detection saliency maps for 8 cameras deployed on campus © megapixels.cc
Duke MTMC pedestrian detection saliency maps for 8 cameras deployed on campus © megapixels.cc
-

Who used Duke MTMC Dataset?

- -

- This bar chart presents a ranking of the top countries where dataset citations originated. Mouse over individual columns to see yearly totals. These charts show at most the top 10 countries. -

- -
- -
- -
-
- -
-
-
- -
- -

Biometric Trade Routes

- -

- To help understand how Duke MTMC Dataset has been used around the world by commercial, military, and academic organizations; existing publicly available research citing Duke Multi-Target, Multi-Camera Tracking Project was collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal research projects at that location. -

- -
- -
-
-
- -
-
    -
  • Academic
  • -
  • Commercial
  • -
  • Military / Government
  • -
-
Citation data is collected using SemanticScholar.org then dataset usage verified and geolocated.
-
- - -
- -

Dataset Citations

-

- The dataset citations used in the visualizations were collected from Semantic Scholar, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms. -

- -
-
- -
-
-
-
- -

Supplementary Information

- -

Notes

-

The Duke MTMC dataset paper mentions 2,700 identities, but their ground truth file only lists annotations for 1,812

-
- -
- - - - - \ No newline at end of file diff --git a/site/public/datasets/facebook/index.html b/site/public/datasets/facebook/index.html deleted file mode 100644 index be413510..00000000 --- a/site/public/datasets/facebook/index.html +++ /dev/null @@ -1,54 +0,0 @@ - - - - MegaPixels - - - - - - - - - - - -
- - -
MegaPixels
- -
- -
-
- -
TBD
TBD -
TBD

Statistics

-
Years
2002-2004
Images
13,233
Identities
5,749
Origin
Yahoo News Images
Funding
(Possibly, partially CIA)

Ignore content below these lines

- -
- -
- - - - - \ No newline at end of file diff --git a/site/public/datasets/feret/index.html b/site/public/datasets/feret/index.html deleted file mode 100644 index 5cd29c4c..00000000 --- a/site/public/datasets/feret/index.html +++ /dev/null @@ -1,87 +0,0 @@ - - - - MegaPixels - - - - - - - - - - - -
- - -
MegaPixels
-
LFW
-
- -
-
- -

Funding

-

The FERET program is sponsored by the U.S. Depart- ment of Defense’s Counterdrug Technology Development Program Office. The U.S. Army Research Laboratory (ARL) is the technical agent for the FERET program. ARL designed, administered, and scored the FERET tests. George Mason University collected, processed, and main- tained the FERET database. Inquiries regarding the FERET database or test should be directed to P. Jonathon Phillips.

-
- -
- - - - - \ No newline at end of file diff --git a/site/public/datasets/hrt_transgender/index.html b/site/public/datasets/hrt_transgender/index.html deleted file mode 100644 index 099dea4e..00000000 --- a/site/public/datasets/hrt_transgender/index.html +++ /dev/null @@ -1,67 +0,0 @@ - - - - MegaPixels - - - - - - - - - - - -
- - -
MegaPixels
-
HRT Transgender
-
- -
-
- -
TBD
TBD -

HRT Transgender Dataset

-

[ page under development ]

-

{% include 'dashboard.html' }

-
- -
- - - - - \ No newline at end of file diff --git a/site/public/datasets/index.html b/site/public/datasets/index.html deleted file mode 100644 index 6f87ff68..00000000 --- a/site/public/datasets/index.html +++ /dev/null @@ -1,145 +0,0 @@ - - - - MegaPixels - - - - - - - - - - - -
- - -
MegaPixels
- -
- -
- - - - - - \ No newline at end of file diff --git a/site/public/datasets/lfpw/index.html b/site/public/datasets/lfpw/index.html deleted file mode 100644 index 005b7aaa..00000000 --- a/site/public/datasets/lfpw/index.html +++ /dev/null @@ -1,116 +0,0 @@ - - - - MegaPixels - - - - - - - - - - - -
- - -
MegaPixels
-
LFWP
-
- -
-
- -

Labeled Face Parts in The Wild

-
-

Who used LFWP?

- -

- This bar chart presents a ranking of the top countries where dataset citations originated. Mouse over individual columns to see yearly totals. These charts show at most the top 10 countries. -

- -
- -
- -
-
- -
-
-
- -
- -

Biometric Trade Routes

- -

- To help understand how LFWP has been used around the world by commercial, military, and academic organizations; existing publicly available research citing Labeled Face Parts in the Wild was collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal research projects at that location. -

- -
- -
-
-
- -
-
    -
  • Academic
  • -
  • Commercial
  • -
  • Military / Government
  • -
-
Citation data is collected using SemanticScholar.org then dataset usage verified and geolocated.
-
- - -
- -

Dataset Citations

-

- The dataset citations used in the visualizations were collected from Semantic Scholar, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms. -

- -
-

RESEARCH below this line

-

Release 1 of LFPW consists of 1,432 faces from images downloaded from the web using simple text queries on sites such as google.com, flickr.com, and yahoo.com. Each image was labeled by three MTurk workers, and 29 fiducial points, shown below, are included in dataset. LFPW was originally described in the following publication:

-

Due to copyright issues, we cannot distribute image files in any format to anyone. Instead, we have made available a list of image URLs where you can download the images yourself. We realize that this makes it impossible to exactly compare numbers, as image links will slowly disappear over time, but we have no other option. This seems to be the way other large web-based databases seem to be evolving.

-
-

https://neerajkumar.org/databases/lfpw/

-

This research was performed at Kriegman-Belhumeur Vision Technologies and was funded by the CIA through the Office of the Chief Scientist. https://www.cs.cmu.edu/~peiyunh/topdown/ (nk_cvpr2011_faceparts.pdf)

-
-
- -
- - - - - \ No newline at end of file diff --git a/site/public/datasets/lfw/index.html b/site/public/datasets/lfw/index.html deleted file mode 100644 index cb487913..00000000 --- a/site/public/datasets/lfw/index.html +++ /dev/null @@ -1,166 +0,0 @@ - - - - MegaPixels - - - - - - - - - - - -
- - -
MegaPixels
-
LFW
-
- -
-
- -
Labeled Faces in The Wild (LFW) is the first facial recognition dataset created entirely from online photos
It includes 13,456 images of 4,432 people's images copied from the Internet during 2002-2004 and is the most frequently used dataset in the world for benchmarking face recognition algorithms. -

Labeled Faces in the Wild

-

[ PAGE UNDER DEVELOPMENT ]

-

Labeled Faces in The Wild (LFW) is "a database of face photographs designed for studying the problem of unconstrained face recognition 1. It is used to evaluate and improve the performance of facial recognition algorithms in academic, commercial, and government research. According to BiometricUpdate.com 3, LFW is "the most widely used evaluation set in the field of facial recognition, LFW attracts a few dozen teams from around the globe including Google, Facebook, Microsoft Research Asia, Baidu, Tencent, SenseTime, Face++ and Chinese University of Hong Kong."

-

The LFW dataset includes 13,233 images of 5,749 people that were collected between 2002-2004. LFW is a subset of Names of Faces and is part of the first facial recognition training dataset created entirely from images appearing on the Internet. The people appearing in LFW are...

-

The Names and Faces dataset was the first face recognition dataset created entire from online photos. However, Names and Faces and LFW are not the first face recognition dataset created entirely "in the wild". That title belongs to the UCD dataset. Images obtained "in the wild" means using an image without explicit consent or awareness from the subject or photographer.

-

The Names and Faces dataset was the first face recognition dataset created entire from online photos. However, Names and Faces and LFW are not the first face recognition dataset created entirely "in the wild". That title belongs to the UCD dataset. Images obtained "in the wild" means using an image without explicit consent or awareness from the subject or photographer.

-
All 5,379 people in the Labeled Faces in The Wild Dataset. Showing one face per person
All 5,379 people in the Labeled Faces in The Wild Dataset. Showing one face per person

The Names and Faces dataset was the first face recognition dataset created entire from online photos. However, Names and Faces and LFW are not the first face recognition dataset created entirely "in the wild". That title belongs to the UCD dataset. Images obtained "in the wild" means using an image without explicit consent or awareness from the subject or photographer.

-

The Names and Faces dataset was the first face recognition dataset created entire from online photos. However, Names and Faces and LFW are not the first face recognition dataset created entirely "in the wild". That title belongs to the UCD dataset. Images obtained "in the wild" means using an image without explicit consent or awareness from the subject or photographer.

-
-

Who used LFW?

- -

- This bar chart presents a ranking of the top countries where dataset citations originated. Mouse over individual columns to see yearly totals. These charts show at most the top 10 countries. -

- -
- -
- -
-
- -
-
-
- -
- -

Biometric Trade Routes

- -

- To help understand how LFW has been used around the world by commercial, military, and academic organizations; existing publicly available research citing Labeled Faces in the Wild was collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal research projects at that location. -

- -
- -
-
-
- -
-
    -
  • Academic
  • -
  • Commercial
  • -
  • Military / Government
  • -
-
Citation data is collected using SemanticScholar.org then dataset usage verified and geolocated.
-
- - -
- -

Dataset Citations

-

- The dataset citations used in the visualizations were collected from Semantic Scholar, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms. -

- -
-
- -
-
-
-
- -

Supplementary Information

- -

Commercial Use

-

Add a paragraph about how usage extends far beyond academia into research centers for largest companies in the world. And even funnels into CIA funded research in the US and defense industry usage in China.

-

Research

-
    -
  • "In our experiments, we used 10000 images and associated captions from the Faces in the wilddata set [3]."
  • -
  • "This work was supported in part by the Center for Intelligent Information Retrieval, the Central Intelligence Agency, the National Security Agency and National Science Foundation under CAREER award IIS-0546666 and grant IIS-0326249."
  • -
  • From: "People-LDA: Anchoring Topics to People using Face Recognition" https://www.semanticscholar.org/paper/People-LDA%3A-Anchoring-Topics-to-People-using-Face-Jain-Learned-Miller/10f17534dba06af1ddab96c4188a9c98a020a459 and https://ieeexplore.ieee.org/document/4409055
  • -
  • This paper was presented at IEEE 11th ICCV conference Oct 14-21 and the main LFW paper "Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments" was also published that same year
  • -
  • 10f17534dba06af1ddab96c4188a9c98a020a459
  • -
  • This research is based upon work supported in part by the Office of the Director of National Intelligence (ODNI), Intelligence Advanced Research Projects Activity (IARPA), via contract number 2014-14071600010.
  • -
  • From "Labeled Faces in the Wild: Updates and New Reporting Procedures"
  • -
  • 70% of people in the dataset have only 1 image and 29% have 2 or more images
  • -
  • The LFW dataset is considered the "most popular benchmark for face recognition" 2
  • -
  • The LFW dataset is "the most widely used evaluation set in the field of facial recognition" 3
  • -
  • All images in LFW dataset were obtained "in the wild" meaning without any consent from the subject or from the photographer
  • -
  • The faces in the LFW dataset were detected using the Viola-Jones haarcascade face detector [^lfw_website] [^lfw-survey]
  • -
  • The LFW dataset is used by several of the largest tech companies in the world including "Google, Facebook, Microsoft Research Asia, Baidu, Tencent, SenseTime, Face++ and Chinese University of Hong Kong." 3
  • -
  • All images in the LFW dataset were copied from Yahoo News between 2002 - 2004
  • -
  • In 2014, two of the four original authors of the LFW dataset received funding from IARPA and ODNI for their followup paper Labeled Faces in the Wild: Updates and New Reporting Procedures via IARPA contract number 2014-14071600010
  • -
  • The dataset includes 2 images of George Tenet, the former Director of Central Intelligence (DCI) for the Central Intelligence Agency whose facial biometrics were eventually used to help train facial recognition software in China and Russia
  • -
  • ./15/155205b8e288fd49bf203135871d66de879c8c04/paper.txt shows usage by DSTO Australia, supported parimal@iisc.ac.in
  • -
-
Created
2002 – 2004
Images
13,233
Identities
5,749
Origin
Yahoo! News Images
Used by
Facebook, Google, Microsoft, Baidu, Tencent, SenseTime, Face++, CIA, NSA, IARPA
Website
    -
  • There are about 3 men for every 1 woman in the LFW dataset 1
  • -
  • The person with the most images is George W. Bush with 530
  • -
  • There are about 3 George W. Bush's for every 1 Tony Blair
  • -
  • The LFW dataset includes over 500 actors, 30 models, 10 presidents, 124 basketball players, 24 football players, 11 kings, 7 queens, and 1 Moby
  • -
  • In all 3 of the LFW publications [^lfw_original_paper], [^lfw_survey], [^lfw_tech_report] the words "ethics", "consent", and "privacy" appear 0 times
  • -
  • The word "future" appears 71 times
  • -
  • * denotes partial funding for related research
  • -
-
- -
- - - - - \ No newline at end of file diff --git a/site/public/datasets/market_1501/index.html b/site/public/datasets/market_1501/index.html deleted file mode 100644 index 059b1a49..00000000 --- a/site/public/datasets/market_1501/index.html +++ /dev/null @@ -1,132 +0,0 @@ - - - - MegaPixels - - - - - - - - - - - -
- - -
MegaPixels
-
Market 1501
-
- -
-
- -
Market-1501 is a dataset is collection of CCTV footage from Tsinghua University
The Market-1501 dataset includes 1,261 people from 5 HD surveillance cameras located on campus -

Market-1501 Dataset

-

[ PAGE UNDER DEVELOPMENT]

-
-

Who used Market 1501?

- -

- This bar chart presents a ranking of the top countries where dataset citations originated. Mouse over individual columns to see yearly totals. These charts show at most the top 10 countries. -

- -
- -
- -
-
- -
-
-
- -
- -

Biometric Trade Routes

- -

- To help understand how Market 1501 has been used around the world by commercial, military, and academic organizations; existing publicly available research citing Market 1501 Dataset was collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal research projects at that location. -

- -
- -
-
-
- -
-
    -
  • Academic
  • -
  • Commercial
  • -
  • Military / Government
  • -
-
Citation data is collected using SemanticScholar.org then dataset usage verified and geolocated.
-
- - -
- -

Dataset Citations

-

- The dataset citations used in the visualizations were collected from Semantic Scholar, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms. -

- -
-

(ignore) research Notes

-
    -
  • "MARS is an extension of the Market-1501 dataset. During collection, we placed six near synchronized cameras in the campus of Tsinghua university. There were Five 1,0801920 HD cameras and one 640480 SD camera. MARS consists of 1,261 different pedestrians whom are captured by at least 2 cameras. Given a query tracklet, MARS aims to retrieve tracklets that contain the same ID." - main paper
  • -
  • bbox "0065C1T0002F0016.jpg", "0065" is the ID of the pedestrian. "C1" denotes the first -camera (there are totally 6 cameras). "T0002" means the 2th tracklet. "F016" is the 16th frame -within this tracklet. For the tracklets, their names are accumulated for each ID; but for frames, -they start from "F001" in each tracklet.
  • -
-

@proceedings{zheng2016mars, -title={MARS: A Video Benchmark for Large-Scale Person Re-identification}, -author={Zheng, Liang and Bie, Zhi and Sun, Yifan and Wang, Jingdong and Su, Chi and Wang, Shengjin and Tian, Qi}, -booktitle={European Conference on Computer Vision}, -year={2016}, -organization={Springer} -}

-
- -
- - - - - \ No newline at end of file diff --git a/site/public/datasets/msceleb/index.html b/site/public/datasets/msceleb/index.html deleted file mode 100644 index fd64189c..00000000 --- a/site/public/datasets/msceleb/index.html +++ /dev/null @@ -1,139 +0,0 @@ - - - - MegaPixels - - - - - - - - - - - -
- - -
MegaPixels
-
Microsoft Celeb
-
- -
-
- -
MS Celeb is a dataset of web images used for training and evaluating face recognition algorithms
The MS Celeb dataset includes over 10,000,000 images and 93,000 identities of semi-public figures collected using the Bing search engine -

Microsoft Celeb Dataset (MS Celeb)

-

[ PAGE UNDER DEVELOPMENT ]

-
-

Who used Microsoft Celeb?

- -

- This bar chart presents a ranking of the top countries where dataset citations originated. Mouse over individual columns to see yearly totals. These charts show at most the top 10 countries. -

- -
- -
- -
-
- -
-
-
- -
- -

Biometric Trade Routes

- -

- To help understand how Microsoft Celeb has been used around the world by commercial, military, and academic organizations; existing publicly available research citing Microsoft Celebrity Dataset was collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal research projects at that location. -

- -
- -
-
-
- -
-
    -
  • Academic
  • -
  • Commercial
  • -
  • Military / Government
  • -
-
Citation data is collected using SemanticScholar.org then dataset usage verified and geolocated.
-
- - -
- -

Dataset Citations

-

- The dataset citations used in the visualizations were collected from Semantic Scholar, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms. -

- -
-
- -
-
-
-
- -

Supplementary Information

- -

Additional Information

- -
  • "readme.txt" https://exhibits.stanford.edu/data/catalog/sx925dc9385.

    -
  • Li, Y. and Dou, Y. and Liu, X. and Li, T. Localized Region Context and Object Feature Fusion for People Head Detection. ICIP16 Proceedings. 2016. Pages 594-598.

    -
  • Zhao. X, Wang Y, Dou, Y. A Replacement Algorithm of Non-Maximum Suppression Base on Graph Clustering.

    -
- -
- - - - - \ No newline at end of file diff --git a/site/public/datasets/oxford_town_centre/index.html b/site/public/datasets/oxford_town_centre/index.html deleted file mode 100644 index 5379682c..00000000 --- a/site/public/datasets/oxford_town_centre/index.html +++ /dev/null @@ -1,146 +0,0 @@ - - - - MegaPixels - - - - - - - - - - - -
- - -
MegaPixels
-
TownCentre
-
- -
-
- -
Oxford Town Centre is a dataset of surveillance camera footage from Cornmarket St Oxford, England
The Oxford Town Centre dataset includes approximately 2,200 identities and is used for research and development of face recognition systems -

Oxford Town Centre

-

The Oxford Town Centre dataset is a CCTV video of pedestrians in a busy downtown area in Oxford used for research and development of activity and face recognition systems. 1 The CCTV video was obtained from a public surveillance camera at the corner of Cornmarket and Market St. in Oxford, England and includes approximately 2,200 people. Since its publication in 2009 2 the Oxford Town Centre dataset has been used in over 80 verified research projects including commercial research by Amazon, Disney, OSRAM, and Huawei; and academic research in China, Israel, Russia, Singapore, the US, and Germany among dozens more.

-

The Oxford Town Centre dataset is unique in that it uses footage from a public surveillance camera that would otherwise be designated for public safety. The video shows that the pedestrians act normally and unrehearsed indicating they neither knew of or consented to participation in the research project.

-
-

Who used TownCentre?

- -

- This bar chart presents a ranking of the top countries where dataset citations originated. Mouse over individual columns to see yearly totals. These charts show at most the top 10 countries. -

- -
- -
- -
-
- -
-
-
- -
- -

Biometric Trade Routes

- -

- To help understand how TownCentre has been used around the world by commercial, military, and academic organizations; existing publicly available research citing Oxford Town Centre was collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal research projects at that location. -

- -
- -
-
-
- -
-
    -
  • Academic
  • -
  • Commercial
  • -
  • Military / Government
  • -
-
Citation data is collected using SemanticScholar.org then dataset usage verified and geolocated.
-
- - -
- -

Dataset Citations

-

- The dataset citations used in the visualizations were collected from Semantic Scholar, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms. -

- -
-
- -
-
-
-
- -

Supplementary Information

- -

Location

-

The street location of the camera used for the Oxford Town Centre dataset was confirmed by matching the road, benches, and store signs source. At that location, two public CCTV cameras exist mounted on the side of the Northgate House building at 13-20 Cornmarket St. Because of the lower camera's mounting pole directionality, a view from a private camera in the building across the street can be ruled out because it would have to show more of silhouette of the lower camera's mounting pole. Two options remain: either the public CCTV camera mounted to the side of the building was used or the researchers mounted their own camera to the side of the building in the same location. Because the researchers used many other existing public CCTV cameras for their research projects it is likely that they would also be able to access to this camera.

-

To discredit the theory that this public CCTV is only seen pointing the other way in Google Street View images, at least one public photo shows the upper CCTV camera pointing in the same direction as the Oxford Town Centre dataset proving the camera can and has been rotated before.

-

As for the capture date, the text on the storefront display shows a sale happening from December 2nd – 7th indicating the capture date was between or just before those dates. The capture year is either 2008 or 2007 since prior to 2007 the Carphone Warehouse (photo, history) did not exist at this location. Since the sweaters in the GAP window display are more similar to those in a GAP website snapshot from November 2007, our guess is that the footage was obtained during late November or early December 2007. The lack of street vendors and slight waste residue near the bench suggests that is was probably a weekday after rubbish removal.

-
 Footage from this public CCTV camera was used to create the Oxford Town Centre dataset. Image sources: Google Street View (<a href="https://www.google.com/maps/@51.7528162,-1.2581152,3a,50.3y,310.59h,87.23t/data=!3m7!1e1!3m5!1s3FsGN-PqYC-VhQGjWgmBdQ!2e0!5s20120601T000000!7i13312!8i6656">map</a>)
Footage from this public CCTV camera was used to create the Oxford Town Centre dataset. Image sources: Google Street View (map)
 Heat map body visualization of the pedestrians detected in the Oxford Town Centre dataset © megapixels.cc
Heat map body visualization of the pedestrians detected in the Oxford Town Centre dataset © megapixels.cc
 Heat map face visualization of the pedestrians detected in the Oxford Town Centre dataset © megapixels.cc
Heat map face visualization of the pedestrians detected in the Oxford Town Centre dataset © megapixels.cc

Demo Videos Using Oxford Town Centre Dataset

-

Several researchers have posted their demo videos using the Oxford Town Centre dataset on YouTube:

- -
  • a

    Benfold, Ben and Reid, Ian. "Stable Multi-Target Tracking in Real-Time Surveillance Video". CVPR 2011. Pages 3457-3464.

    -
  • a

    "Guiding Visual Surveillance by Tracking Human Attention". 2009.

    -
- -
- - - - - \ No newline at end of file diff --git a/site/public/datasets/pipa/index.html b/site/public/datasets/pipa/index.html deleted file mode 100644 index 7a4fbc0e..00000000 --- a/site/public/datasets/pipa/index.html +++ /dev/null @@ -1,120 +0,0 @@ - - - - MegaPixels - - - - - - - - - - - -
- - -
MegaPixels
-
PIPA Dataset
-
- -
-
- -
People in Photo Albums (PIPA) is a dataset...
[ add subdescrition ] -

People in Photo Albums

-

[ PAGE UNDER DEVELOPMENT ]

-
-

Who used PIPA Dataset?

- -

- This bar chart presents a ranking of the top countries where dataset citations originated. Mouse over individual columns to see yearly totals. These charts show at most the top 10 countries. -

- -
- -
- -
-
- -
-
-
- -
- -

Biometric Trade Routes

- -

- To help understand how PIPA Dataset has been used around the world by commercial, military, and academic organizations; existing publicly available research citing People in Photo Albums Dataset was collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal research projects at that location. -

- -
- -
-
-
- -
-
    -
  • Academic
  • -
  • Commercial
  • -
  • Military / Government
  • -
-
Citation data is collected using SemanticScholar.org then dataset usage verified and geolocated.
-
- - -
- -

Dataset Citations

-

- The dataset citations used in the visualizations were collected from Semantic Scholar, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms. -

- -
-
- -
- - - - - \ No newline at end of file diff --git a/site/public/datasets/pubfig/index.html b/site/public/datasets/pubfig/index.html deleted file mode 100644 index c46eeea3..00000000 --- a/site/public/datasets/pubfig/index.html +++ /dev/null @@ -1,117 +0,0 @@ - - - - MegaPixels - - - - - - - - - - - -
- - -
MegaPixels
-
PubFig
-
- -
-
- -
PubFig is a dataset...
[ add subdescrition ] -

PubFig

-

[ PAGE UNDER DEVELOPMENT ]

-
-

Who used PubFig?

- -

- This bar chart presents a ranking of the top countries where dataset citations originated. Mouse over individual columns to see yearly totals. These charts show at most the top 10 countries. -

- -
- -
- -
-
- -
-
-
- -
- -

Biometric Trade Routes

- -

- To help understand how PubFig has been used around the world by commercial, military, and academic organizations; existing publicly available research citing Public Figures Face Dataset was collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal research projects at that location. -

- -
- -
-
-
- -
-
    -
  • Academic
  • -
  • Commercial
  • -
  • Military / Government
  • -
-
Citation data is collected using SemanticScholar.org then dataset usage verified and geolocated.
-
- - -
- -

Dataset Citations

-

- The dataset citations used in the visualizations were collected from Semantic Scholar, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms. -

- -
-
- -
- - - - - \ No newline at end of file diff --git a/site/public/datasets/uccs/index.html b/site/public/datasets/uccs/index.html deleted file mode 100644 index c9faac68..00000000 --- a/site/public/datasets/uccs/index.html +++ /dev/null @@ -1,255 +0,0 @@ - - - - MegaPixels - - - - - - - - - - - -
- - -
MegaPixels
-
UCCS
-
- -
-
- -
UnConstrained College Students is a dataset of long-range surveillance photos of students at University of Colorado in Colorado Springs
The UnConstrained College Students dataset includes 16,149 images and 1,732 identities of subjects on University of Colorado Colorado Springs campus and is used for making face recognition and face detection algorithms -

UnConstrained College Students

-

[ page under development ]

-

UnConstrained College Students (UCCS) is a dataset of long-range surveillance photos captured at University of Colorado Colorado Springs. According to the authors of two papers associated with the dataset, subjects were "photographed using a long-range high-resolution surveillance camera without their knowledge" 2. To create the dataset, the researchers used a Canon 7D digital camera fitted with a Sigma 800mm telephoto lens and photographed students 150–200m away through their office window. Photos were taken during the morning and afternoon while students were walking to and from classes. The primary uses of this dataset are to train, validate, and build recognition and face detection algorithms for realistic surveillance scenarios.

-

What makes the UCCS dataset unique is that it includes the highest resolution images of any publicly available face recognition dataset discovered so far (18MP), that it was captured on a campus without consent or awareness using a long-range telephoto lens, and that it was funded by United States defense and intelligence agencies.

-

Combined funding sources for the creation of the initial and final release of the dataset include ODNI (Office of Director of National Intelligence), IARPA (Intelligence Advance Research Projects Activity), ONR MURI (Office of Naval Research and The Department of Defense Multidisciplinary University Research Initiative), Army SBIR (Small Business Innovation Research), SOCOM SBIR (Special Operations Command and Small Business Innovation Research), and the National Science Foundation. 1 2

-

In 2017 the UCCS face dataset was used for a defense and intelligence agency funded face recognition challenge at the International Joint Biometrics Conference in Denver, CO. And in 2018 the dataset was used for the 2nd Unconstrained Face Detection and Open Set Recognition Challenge at the European Computer Vision Conference (ECCV) in Munich, Germany. Additional research projects that have used the UCCS dataset are included below in the list of verified citations.

-
-

Who used UCCS?

- -

- This bar chart presents a ranking of the top countries where dataset citations originated. Mouse over individual columns to see yearly totals. These charts show at most the top 10 countries. -

- -
- -
- -
-
- -
-
-
- -
- -

Biometric Trade Routes

- -

- To help understand how UCCS has been used around the world by commercial, military, and academic organizations; existing publicly available research citing UnConstrained College Students Dataset was collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal research projects at that location. -

- -
- -
-
-
- -
-
    -
  • Academic
  • -
  • Commercial
  • -
  • Military / Government
  • -
-
Citation data is collected using SemanticScholar.org then dataset usage verified and geolocated.
-
- - -
- -

Dataset Citations

-

- The dataset citations used in the visualizations were collected from Semantic Scholar, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms. -

- -
-
- -
-
-
-
- -

Supplementary Information

- -

Dates and Times

-

The images in UCCS were taken on 18 non-consecutive days during 2012–2013. Analysis of the EXIF data embedded in original images reveal that most of the images were taken on Tuesdays, and the most frequent capture time throughout the week was 12:30PM.

-
 UCCS photos captured per weekday © megapixels.cc
UCCS photos captured per weekday © megapixels.cc
 UCCS photos captured per 10-minute intervals per weekday © megapixels.cc
UCCS photos captured per 10-minute intervals per weekday © megapixels.cc

UCCS photos taken in 2012

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
DatePhotos
Feb 23, 2012132
March 6, 2012288
March 8, 2012506
March 13, 2012160
March 20, 20121,840
March 22, 2012445
April 3, 20121,639
April 12, 201214
April 17, 201219
April 24, 201263
April 25, 201211
April 26, 201220
-

UCCS photos taken in 2013

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
DatePhotos
Jan 28, 20131,056
Jan 29, 20131,561
Feb 13, 2013739
Feb 19, 2013723
Feb 20, 2013965
Feb 26, 2013736
-

Location

-

The location of the camera and subjects can confirmed using several visual cues in the dataset images: the unique pattern of the sidewalk that is only used on the UCCS Pedestrian Spine near the West Lawn, the two UCCS sign poles with matching graphics still visible in Google Street View, the no parking sign and directionality of its arrow, the back of street sign next to it, the slight bend in the sidewalk, the presence of cars passing in the background of the image, and the far wall of the parking garage all match images in the dataset. The original papers also provides another clue: a picture of the camera inside the office that was used to create the dataset. The window view in this image provides another match for the brick pattern on the north facade of the Kraember Family Library and the green metal fence along the sidewalk. View the location on Google Maps

-
 Location on campus where students were unknowingly photographed with a telephoto lens to be used for defense and intelligence agency funded research on face recognition. Image: Google Maps
Location on campus where students were unknowingly photographed with a telephoto lens to be used for defense and intelligence agency funded research on face recognition. Image: Google Maps
 3D view showing the angle of view of the surveillance camera used for UCCS dataset. Image: Google Maps
3D view showing the angle of view of the surveillance camera used for UCCS dataset. Image: Google Maps

Funding

-

The UnConstrained College Students dataset is associated with two main research papers: "Large Scale Unconstrained Open Set Face Database" and "Unconstrained Face Detection and Open-Set Face Recognition Challenge". Collectively, these papers and the creation of the dataset have received funding from the following organizations:

-
    -
  • ONR (Office of Naval Research) MURI (The Department of Defense Multidisciplinary University Research Initiative) grant N00014-08-1-0638
  • -
  • Army SBIR (Small Business Innovation Research) grant W15P7T-12-C-A210
  • -
  • SOCOM (Special Operations Command) SBIR (Small Business Innovation Research) grant H92222-07-P-0020
  • -
  • National Science Foundation Grant IIS-1320956
  • -
  • ODNI (Office of Director of National Intelligence)
  • -
  • IARPA (Intelligence Advance Research Projects Activity) R&D contract 2014-14071600012
  • -
-

Opting Out

-

If you attended University of Colorado Colorado Springs and were captured by the long range surveillance camera used to create this dataset, there is unfortunately currently no way to be removed. The authors do not provide any options for students to opt-out nor were students informed they would be used for training face recognition. According to the authors, the lack of any consent or knowledge of participation is what provides part of the value of Unconstrained College Students Dataset.

-

Ethics

-

Please direct any questions about the ethics of the dataset to the University of Colorado Colorado Springs Ethics and Compliance Office

-

Technical Details

-

For further technical information about the dataset, visit the UCCS dataset project page.

-

Under Development

-
    -
  • adding more verified locations to map and charts
  • -
  • add EXIF file to CDN
  • -
-
  • a

    Sapkota, Archana and Boult, Terrance. "Large Scale Unconstrained Open Set Face Database." 2013.

    -
  • ab

    Günther, M. et. al. "Unconstrained Face Detection and Open-Set Face Recognition Challenge," 2018. Arxiv 1708.02337v3.

    -
- -
- - - - - \ No newline at end of file diff --git a/site/public/datasets/vgg_face2/index.html b/site/public/datasets/vgg_face2/index.html deleted file mode 100644 index 321fb203..00000000 --- a/site/public/datasets/vgg_face2/index.html +++ /dev/null @@ -1,142 +0,0 @@ - - - - MegaPixels - - - - - - - - - - - -
- - -
MegaPixels
-
Brainwash Dataset
-
- -
-
- -

VGG Face 2

-

[ page under development ]

-
-

Who used Brainwash Dataset?

- -

- This bar chart presents a ranking of the top countries where dataset citations originated. Mouse over individual columns to see yearly totals. These charts show at most the top 10 countries. -

- -
- -
- -
-
- -
-
-
- -
- -

Biometric Trade Routes

- -

- To help understand how Brainwash Dataset has been used around the world by commercial, military, and academic organizations; existing publicly available research citing Brainwash Dataset was collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal research projects at that location. -

- -
- -
-
-
- -
-
    -
  • Academic
  • -
  • Commercial
  • -
  • Military / Government
  • -
-
Citation data is collected using SemanticScholar.org then dataset usage verified and geolocated.
-
- - -
- -

Dataset Citations

-

- The dataset citations used in the visualizations were collected from Semantic Scholar, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms. -

- -
-

(ignore) research notes

-
    -
  • The VGG Face 2 dataset includes approximately 1,331 actresses, 139 presidents, 16 wives, 3 husbands, 2 snooker player, and 1 guru
  • -
  • The original VGGF2 name list has been updated with the results returned from Google Knowledge
  • -
  • Names with a similarity score greater than 0.75 where automatically updated. Scores computed using import difflib; seq = difflib.SequenceMatcher(a=a.lower(), b=b.lower()); score = seq.ratio()
  • -
  • The 97 names with a score of 0.75 or lower were manually reviewed and includes name changes validating using Wikipedia.org results for names such as "Bruce Jenner" to "Caitlyn Jenner", spousal last-name changes, and discretionary changes to improve search results such as combining nicknames with full name when appropriate, for example changing "Aleksandar Petrović" to "Aleksandar 'Aco' Petrović" and minor changes such as "Mohammad Ali" to "Muhammad Ali"
  • -
  • The 'Description' text was automatically added when the Knowledge Graph score was greater than 250
  • -
-

TODO

-
    -
  • create name list, and populate with Knowledge graph information like LFW
  • -
  • make list of interesting number stats, by the numbers
  • -
  • make list of interesting important facts
  • -
  • write intro abstract
  • -
  • write analysis of usage
  • -
  • find examples, citations, and screenshots of useage
  • -
  • find list of companies using it for table
  • -
  • create montages of the dataset, like LFW
  • -
  • create right to removal information
  • -
-
- -
- - - - - \ No newline at end of file diff --git a/site/public/datasets/viper/index.html b/site/public/datasets/viper/index.html deleted file mode 100644 index ffce01fe..00000000 --- a/site/public/datasets/viper/index.html +++ /dev/null @@ -1,122 +0,0 @@ - - - - MegaPixels - - - - - - - - - - - -
- - -
MegaPixels
-
VIPeR
-
- -
-
- -
VIPeR is a person re-identification dataset of images captured at UC Santa Cruz in 2007
VIPeR contains 1,264 images and 632 persons on the UC Santa Cruz campus and is used to train person re-identification algorithms for surveillance -

VIPeR Dataset

-

[ page under development ]

-

VIPeR (Viewpoint Invariant Pedestrian Recognition) is a dataset of pedestrian images captured at University of California Santa Cruz in 2007. Accoriding to the reserachers 2 "cameras were placed in different locations in an academic setting and subjects were notified of the presence of cameras, but were not coached or instructed in any way."

-

VIPeR is amongst the most widely used publicly available person re-identification datasets. In 2017 the VIPeR dataset was combined into a larger person re-identification created by the Chinese University of Hong Kong called PETA (PEdesTrian Attribute).

-
-

Who used VIPeR?

- -

- This bar chart presents a ranking of the top countries where dataset citations originated. Mouse over individual columns to see yearly totals. These charts show at most the top 10 countries. -

- -
- -
- -
-
- -
-
-
- -
- -

Biometric Trade Routes

- -

- To help understand how VIPeR has been used around the world by commercial, military, and academic organizations; existing publicly available research citing Viewpoint Invariant Pedestrian Recognition was collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal research projects at that location. -

- -
- -
-
-
- -
-
    -
  • Academic
  • -
  • Commercial
  • -
  • Military / Government
  • -
-
Citation data is collected using SemanticScholar.org then dataset usage verified and geolocated.
-
- - -
- -

Dataset Citations

-

- The dataset citations used in the visualizations were collected from Semantic Scholar, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms. -

- -
-
- -
- - - - - \ No newline at end of file diff --git a/site/public/datasets/youtube_celebrities/index.html b/site/public/datasets/youtube_celebrities/index.html deleted file mode 100644 index b19add4e..00000000 --- a/site/public/datasets/youtube_celebrities/index.html +++ /dev/null @@ -1,113 +0,0 @@ - - - - MegaPixels - - - - - - - - - - - -
- - -
MegaPixels
-
YouTube Celebrities
-
- -
-
- -

YouTube Celebrities

-

[ page under development ]

-
-

Who used YouTube Celebrities?

- -

- This bar chart presents a ranking of the top countries where dataset citations originated. Mouse over individual columns to see yearly totals. These charts show at most the top 10 countries. -

- -
- -
- -
-
- -
-
-
- -
- -

Biometric Trade Routes

- -

- To help understand how YouTube Celebrities has been used around the world by commercial, military, and academic organizations; existing publicly available research citing YouTube Celebrities was collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal research projects at that location. -

- -
- -
-
-
- -
-
    -
  • Academic
  • -
  • Commercial
  • -
  • Military / Government
  • -
-
Citation data is collected using SemanticScholar.org then dataset usage verified and geolocated.
-
- - -
- -

Dataset Citations

-

- The dataset citations used in the visualizations were collected from Semantic Scholar, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms. -

- -
-

Notes...

-
    -
  • Selected dataset sequences: (a) MBGC, (b) CMU MoBo, (c) First -Honda/UCSD, and (d) YouTube Celebrities.
  • -
  • This research is supported by the Central Intelligence Agency, the Biometrics -Task Force and the Technical Support Working Group through US Army contract -W91CRB-08-C-0093. The opinions, (cid:12)ndings, and conclusions or recommendations -expressed in this publication are those of the authors and do not necessarily re(cid:13)ect -the views of our sponsors.
  • -
  • in "Face Recognition From Video Draft 17"
  • -
  • International Journal of Pattern Recognition and Artifcial Intelligence WorldScientific Publishing Company
  • -
-
- -
- - - - - \ No newline at end of file diff --git a/site/public/index.html b/site/public/index.html deleted file mode 100644 index 118814be..00000000 --- a/site/public/index.html +++ /dev/null @@ -1,39 +0,0 @@ - - - - MegaPixels - - - - - - - - - - -
- - -
MegaPixels
-
- -
-
-
-
-
-
- MegaPixels is a research project by Adam Harvey about facial recognition datasets, developed in partnership with Mozilla. -
-
- MegaPixels ©2017-19 Adam R. Harvey /  - ahprojects.com -
-
- - - \ No newline at end of file diff --git a/site/public/info/index.html b/site/public/info/index.html deleted file mode 100644 index 749c29ba..00000000 --- a/site/public/info/index.html +++ /dev/null @@ -1,50 +0,0 @@ - - - - MegaPixels - - - - - - - - - - - -
- - -
MegaPixels
- -
- -
-
- -

Face Analysis

-

Results are only stored for the duration of the analysis and are deleted when you leave this page.

-
- -
- - - - - \ No newline at end of file diff --git a/site/public/research/00_introduction/index.html b/site/public/research/00_introduction/index.html deleted file mode 100644 index 353e3270..00000000 --- a/site/public/research/00_introduction/index.html +++ /dev/null @@ -1,101 +0,0 @@ - - - - MegaPixels - - - - - - - - - - - -
- - -
MegaPixels
- -
- -
-
- -
-

00: Introduction

-
-
-
Posted
-
2018-12-15
-
-
-
By
-
Megapixels
-
- -
-
- -
Posted
Dec. 15
Author
Adam Harvey

Facial recognition is a scam.

-

During the last 20 years commericial, academic, and governmental agencies have promoted the false dream of a future with face recognition. This essay debunks the popular myth that such a thing ever existed.

-

There is no such thing as face recognition. For the last 20 years, government agencies, commercial organizations, and academic institutions have played the public as a fool, selling a roadmap of the future that simply does not exist. Facial recognition, as it is currently defined, promoted, and sold to the public, government, and commercial sector is a scam.

-

Committed to developing robust solutions with superhuman accuracy, the industry has repeatedly undermined itself by never actually developing anything close to "face recognition".

-

There is only biased feature vector clustering and probabilistic thresholding.

-

Motivation

-

Ever since government agencies began developing face recognition in the early 1960's, datasets of face images have always been central to developing and validating face recognition technologies. Today, these datasets no longer originate in labs, but instead from family photo albums posted on photo sharing sites, surveillance camera footage from college campuses, search engine queries for celebrities, cafe livestreams, or videos on YouTube.

-

During the last year, hundreds of these facial analysis datasets created "in the wild" have been collected to understand how they contribute to a global supply chain of biometric data that is powering the global facial recognition industry.

-

While many of these datasets include public figures such as politicians, athletes, and actors; they also include many non-public figures: digital activists, students, pedestrians, and semi-private shared photo albums are all considered "in the wild" and fair game for research projects. Some images are used with creative commons licenses, yet others were taken in unconstrained scenarios without awareness or consent. At first glance it appears many of the datasets were created for seemingly harmless academic research, but when examined further it becomes clear that they're also used by foreign defense agencies.

-

The MegaPixels site is based on an earlier installation (also supported by Mozilla) at the Tactical Tech Glassroom in London in 2017; and a commission from the Elevate arts festival curated by Berit Gilma about pedestrian recognition datasets in 2018, and research during CV Dazzle from 2010-2015. Through the many prototypes, conversations, pitches, PDFs, and false starts this project has endured during the last 5 years, it eventually evolved into something much different than originally imagined. Now, as datasets become increasingly influential in shaping the computational future, it's clear that they must be critically analyzed to understand the biases, shortcomings, funding sources, and contributions to the surveillance industry. However, it's misguided to only criticize these datasets for their flaws without also praising their contribution to society. Without publicly available facial analysis datasets there would be less public discourse, less open-source software, and less peer-reviewed research. Public datasets can indeed become a vital public good for the information economy but as this projects aims to illustrate, many ethical questions arise about consent, intellectual property, surveillance, and privacy.

- - - - - -

Ever since the first computational facial recognition research project by the CIA in the early 1960s, data has always played a vital role in the development of our biometric future. Without facial recognition datasets there would be no facial recognition. Datasets are an indispensable part of any artificial intelligence system because, as Geoffrey Hinton points out:

-

Our relationship to computers has changed. Instead of programming them, we now show them and they figure it out. - Geoffrey Hinton

-
-

Algorithms learn from datasets. And we program algorithms by building datasets. But datasets aren't like code. There's no programming language made of data except for the data itself.

-

Ignore content below these lines

-

It was the early 2000s. Face recognition was new and no one seemed sure exactly how well it was going to perform in practice. In theory, face recognition was poised to be a game changer, a force multiplier, a strategic military advantage, a way to make cities safer and to secure borders. This was the future John Ashcroft demanded with the Total Information Awareness act of the 2003 and that spooks had dreamed of for decades. It was a future that academics at Carnegie Mellon Universtiy and Colorado State University would help build. It was also a future that celebrities would play a significant role in building. And to the surprise of ordinary Internet users like myself and perhaps you, it was a future that millions of Internet users would unwittingly play role in creating.

-

Now the future has arrived and it doesn't make sense. Facial recognition works yet it doesn't actually work. Facial recognition is cheap and accessible but also expensive and out of control. Facial recognition research has achieved headline grabbing superhuman accuracies over 99.9% yet facial recognition is also dangerously inaccurate. During a trial installation at Sudkreuz station in Berlin in 2018, 20% of the matches were wrong, a number so low that it should not have any connection to law enforcement or justice. And in London, the Metropolitan police had been using facial recognition software that mistakenly identified an alarming 98% of people as criminals 1, which perhaps is a crime itself.

-

MegaPixels is an online art project that explores the history of facial recognition from the perspective of datasets. To paraphrase the artist Trevor Paglen, whoever controls the dataset controls the meaning. MegaPixels aims to unravel the meanings behind the data and expose the darker corners of the biometric industry that have contributed to its growth. MegaPixels does not start with a conclusion, a moralistic slant, or a

-

Whether or not to build facial recognition was a question that can no longer be asked. As an outspoken critic of face recognition I've developed, and hopefully furthered, my understanding during the last 10 years I've spent working with computer vision. Though I initially disagreed, I've come to see technocratic perspective as a non-negotiable reality. As Oren (nytimes article) wrote in NYT Op-Ed "the horse is out of the barn" and the only thing we can do collectively or individually is to steer towards the least worse outcome. Computational communication has entered a new era and it's both exciting and frightening to explore the potentials and opportunities. In 1997 getting access to 1 teraFLOPS of computational power would have cost you $55 million and required a strategic partnership with the Department of Defense. At the time of writing, anyone can rent 1 teraFLOPS on a cloud GPU marketplace for less than $1/day. 2.

-

I hope that this project will illuminate the darker areas of strange world of facial recognition that have not yet received attention and encourage discourse in academic, industry, and . By no means do I believe discourse can save the day. Nor do I think creating artwork can. In fact, I'm not exactly sure what the outcome of this project will be. The project is not so much what I publish here but what happens after. This entire project is only a prologue.

-

As McLuhan wrote, "You can't have a static, fixed position in the electric age". And in our hyper-connected age of mass surveillance, artificial intelligece, and unevenly distributed virtual futures the most irrational thing to be is rational. Increasingly the world is becoming a contradiction where people use surveillance to protest surveillance, use

-

Like many projects, MegaPixels had spent years meandering between formats, unfeasible budgets, and was generally too niche of a subject. The basic idea for this project, as proposed to the original Glass Room installation in 2016 in NYC, was to build an interactive mirror that showed people if they had been included in the LFW facial recognition dataset. The idea was based on my reaction to all the datasets I'd come across during research for the CV Dazzle project. I'd noticed strange datasets created for training and testing face detection algorithms. Most were created in labratory settings and their interpretation of face data was very strict.

-

for other post

-

It was the early 2000s. Face recognition was new and no one seemed sure how well it was going to perform in practice. In theory, face recognition was poised to be a game changer, a force multiplier, a strategic military advantage, a way to make cities safer and to secure the borders. It was the future that John Ashcroft demanded with the Total Information Awareness act of the 2003. It was a future that academics helped build. It was a future that celebrities helped build. And it was a future that

-

A decade earlier the Department of Homeland Security and the Counterdrug Technology Development Program Office initated a feasibilty study called FERET (FacE REcognition Technology) to "develop automatic face recognition capabilities that could be employed to assist security, intelligence, and law enforcement personnel in the performance of their duties [^feret_website]."

-

One problem with FERET dataset was that the photos were in controlled settings. For face recognition to work it would have to be used in uncontrolled settings. Even newer datasets such as the Multi-PIE (Pose, Illumination, and Expression) from Carnegie Mellon University included only indoor photos of cooperative subjects. Not only were the photos completely unrealistic, CMU's Multi-Pie included only 18 individuals and cost $500 for academic use [^cmu_multipie_cost], took years to create, and required consent from every participant.

-

Add progressive gan of FERET

-
-
-
  1. Sharman, Jon. "Metropolitan Police's facial recognition technology 98% inaccurate, figures show". 2018. https://www.independent.co.uk/news/uk/home-news/met-police-facial-recognition-success-south-wales-trial-home-office-false-positive-a8345036.html

  2. -
  3. Calle, Dan. "Supercomptuers". 1997. http://ei.cs.vt.edu/~history/SUPERCOM.Calle.HTML

  4. -
-
-
- -
- - - - - \ No newline at end of file diff --git a/site/public/research/01_from_1_to_100_pixels/index.html b/site/public/research/01_from_1_to_100_pixels/index.html deleted file mode 100644 index 9426ef0f..00000000 --- a/site/public/research/01_from_1_to_100_pixels/index.html +++ /dev/null @@ -1,139 +0,0 @@ - - - - MegaPixels - - - - - - - - - - - -
- - -
MegaPixels
- -
- -
-
- -
-

From 1 to 100 Pixels

-
-
-
Posted
-
2018-12-04
-
-
-
By
-
Adam Harvey
-
- -
-
- -

High resolution insights from low resolution data

-

This post will be about the meaning of "face". How do people define it? How to biometrics researchers define it? How has it changed during the last decade.

-

What can you know from a very small amount of information?

-
    -
  • 1 pixel grayscale
  • -
  • 2x2 pixels grayscale, font example, can encode letters
  • -
  • 3x3 pixels: can create a font
  • -
  • 4x4 pixels: how many variations
  • -
  • 8x8 yotta yotta, many more variations
  • -
  • 5x7 face recognition
  • -
  • 12x16 activity recognition
  • -
  • 6/5 (up to 124/106) pixels in height/width, and the average is 24/20 for QMUL SurvFace
  • -
  • (prepare a Progan render of the QMUL dataset and TinyFaces)
  • -
  • 20x16 tiny faces paper
  • -
  • 20x20 MNIST handwritten images http://yann.lecun.com/exdb/mnist/
  • -
  • 24x24 haarcascade detector idealized images
  • -
  • 32x32 CIFAR image dataset
  • -
  • 40x40 can do emotion detection, face recognition at scale, 3d modeling of the face. include datasets with faces at this resolution including pedestrian.
  • -
  • NIST standards begin to appear from 40x40, distinguish occular pixels
  • -
  • need more material from 60-100
  • -
  • 60x60 show how texture emerges and pupils, eye color, higher resolution of features and compare to lower resolution faces
  • -
  • 100x100 all you need for medical diagnosis
  • -
  • 100x100 0.5% of one Instagram photo
  • -
-

Ideas:

-
    -
  • Find specific cases of facial resolution being used in legal cases, forensic investigations, or military footage
  • -
  • resolution of boston bomber face
  • -
  • resolution of the state of the union image
  • -
-

Research

-
    -
  • NIST report on sres states several resolutions
  • -
  • "Results show that the tested face recognition systems yielded similar performance for query sets with eye-to-eye distance from 60 pixels to 30 pixels" 1
  • -
-
    -
  • "Note that we only keep the images with a minimal side length of 80 pixels." and "a face will be labeled as “Ignore” if it is very difficult to be detected due to blurring, severe deformation and unrecognizable eyes, or the side length of its bounding box is less than 32 pixels." Ge_Detecting_Masked_Faces_CVPR_2017_paper.pdf
  • -
  • IBM DiF: "Faces with region size less than 50x50 or inter-ocular distance of less than 30 pixels were discarded. Faces with non-frontal pose, or anything beyond being slightly tilted to the left or the right, were also discarded."
  • -
-

As the resolution -formatted as rectangular databases of 16 bit RGB-tuples or 8 bit grayscale values

-

To consider how visual privacy applies to real world surveillance situations, the first

-

A single 8-bit grayscale pixel with 256 values is enough to represent the entire alphabet a-Z0-9 with room to spare.

-

A 2x2 pixels contains

-

Using no more than a 42 pixel (6x7 image) face image researchers [cite] were able to correctly distinguish between a group of 50 people. Yet

-

The likely outcome of face recognition research is that more data is needed to improve. Indeed, resolution is the determining factor for all biometric systems, both as training data to increase

-

Pixels, typically considered the buiding blocks of images and vidoes, can also be plotted as a graph of sensor values corresponding to the intensity of RGB-calibrated sensors.

-

Wi-Fi and cameras presents elevated risks for transmitting videos and image documentation from conflict zones, high-risk situations, or even sharing on social media. How can new developments in computer vision also be used in reverse, as a counter-forensic tool, to minimize an individual's privacy risk?

-

As the global Internet becomes increasingly effecient at turning the Internet into a giant dataset for machine learning, forensics, and data analysing, it would be prudent to also consider tools for decreasing the resolution. The Visual Defense module is just that. What are new ways to minimize the adverse effects of surveillance by dulling the blade. For example, a researcher paper showed that by decreasing a face size to 12x16 it was possible to do 98% accuracy with 50 people. This is clearly an example of

-

This research module, tentatively called Visual Defense Tools, aims to explore the

-

Prior Research

-
    -
  • MPI visual privacy advisor
  • -
  • NIST: super resolution
  • -
  • YouTube blur tool
  • -
  • WITNESS: blur tool
  • -
  • Pixellated text
  • -
  • CV Dazzle
  • -
  • Bellingcat guide to geolocation
  • -
  • Peng! magic passport
  • -
-

Notes

-
    -
  • In China, out of the approximately 200 million surveillance cameras only about 15% have enough resolution for face recognition.
  • -
  • In Apple's FaceID security guide, the probability of someone else's face unlocking your phone is 1 out of 1,000,000.
  • -
  • In England, the Metropolitan Police reported a false-positive match rate of 98% when attempting to use face recognition to locate wanted criminals.
  • -
  • In a face recognition trial at Berlin's Sudkreuz station, the false-match rate was 20%.
  • -
-

What all 3 examples illustrate is that face recognition is anything but absolute. In a 2017 talk, Jason Matheny the former directory of IARPA, admitted the face recognition is so brittle it can be subverted by using a magic marker and drawing "a few dots on your forehead". In fact face recognition is a misleading term. Face recognition is search engine for faces that can only ever show you the mos likely match. This presents real a real threat to privacy and lends

-

Globally, iPhone users unwittingly agree to 1/1,000,000 probably -relying on FaceID and TouchID to protect their information agree to a

-
-
-
  1. NIST 906932. Performance Assessment of Face Recognition Using Super-Resolution. Shuowen Hu, Robert Maschal, S. Susan Young, Tsai Hong Hong, Jonathon P. Phillips

  2. -
-
-
- -
- - - - - \ No newline at end of file diff --git a/site/public/research/02_what_computers_can_see/index.html b/site/public/research/02_what_computers_can_see/index.html deleted file mode 100644 index 920f78cc..00000000 --- a/site/public/research/02_what_computers_can_see/index.html +++ /dev/null @@ -1,310 +0,0 @@ - - - - MegaPixels - - - - - - - - - - - -
- - -
MegaPixels
- -
- -
-
- -
-

What Computers Can See

-
-
-
Posted
-
2018-12-15
-
-
-
By
-
Adam Harvey
-
- -
-
- -

A list of 100 things computer vision can see, eg:

-
    -
  • age, race, gender, ancestral origin, body mass index
  • -
  • eye color, hair color, facial hair, glasses
  • -
  • beauty score,
  • -
  • intelligence
  • -
  • what you're looking at
  • -
  • medical conditions
  • -
  • tired, drowsiness in car
  • -
  • affectiva: interest in product, intent to buy
  • -
-

From PubFig Dataset

-
    -
  • Male
  • -
  • Asian
  • -
  • White
  • -
  • Black
  • -
  • Baby
  • -
  • Child
  • -
  • Youth
  • -
  • Middle Aged
  • -
  • Senior
  • -
  • Black Hair
  • -
  • Blond Hair
  • -
  • Brown Hair
  • -
  • Bald
  • -
  • No Eyewear
  • -
  • Eyeglasses
  • -
  • Sunglasses
  • -
  • Mustache
  • -
  • Smiling Frowning
  • -
  • Chubby
  • -
  • Blurry
  • -
  • Harsh Lighting
  • -
  • Flash
  • -
  • Soft Lighting
  • -
  • Outdoor Curly Hair
  • -
  • Wavy Hair
  • -
  • Straight Hair
  • -
  • Receding Hairline
  • -
  • Bangs
  • -
  • Sideburns
  • -
  • Fully Visible Forehead
  • -
  • Partially Visible Forehead
  • -
  • Obstructed Forehead
  • -
  • Bushy Eyebrows
  • -
  • Arched Eyebrows
  • -
  • Narrow Eyes
  • -
  • Eyes Open
  • -
  • Big Nose
  • -
  • Pointy Nose
  • -
  • Big Lips
  • -
  • Mouth Closed
  • -
  • Mouth Slightly Open
  • -
  • Mouth Wide Open
  • -
  • Teeth Not Visible
  • -
  • No Beard
  • -
  • Goatee
  • -
  • Round Jaw
  • -
  • Double Chin
  • -
  • Wearing Hat
  • -
  • Oval Face
  • -
  • Square Face
  • -
  • Round Face
  • -
  • Color Photo
  • -
  • Posed Photo
  • -
  • Attractive Man
  • -
  • Attractive Woman
  • -
  • Indian
  • -
  • Gray Hair
  • -
  • Bags Under Eyes
  • -
  • Heavy Makeup
  • -
  • Rosy Cheeks
  • -
  • Shiny Skin
  • -
  • Pale Skin
  • -
  • 5 o' Clock Shadow
  • -
  • Strong Nose-Mouth Lines
  • -
  • Wearing Lipstick
  • -
  • Flushed Face
  • -
  • High Cheekbones
  • -
  • Brown Eyes
  • -
  • Wearing Earrings
  • -
  • Wearing Necktie
  • -
  • Wearing Necklace
  • -
-

for i in {1..9};do wget http://visiond1.cs.umbc.edu/webpage/codedata/ADLdataset/ADL_videos/P_0$i.MP4;done;for i in {10..20}; do wget http://visiond1.cs.umbc.edu/webpage/codedata/ADLdataset/ADL_videos/P_$i.MP4;done

-

From Market 1501

-

The 27 attributes are:

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
attributerepresentation in filelabel
gendergendermale(1), female(2)
hair lengthhairshort hair(1), long hair(2)
sleeve lengthuplong sleeve(1), short sleeve(2)
length of lower-body clothingdownlong lower body clothing(1), short(2)
type of lower-body clothingclothesdress(1), pants(2)
wearing hathatno(1), yes(2)
carrying backpackbackpackno(1), yes(2)
carrying bagbagno(1), yes(2)
carrying handbaghandbagno(1), yes(2)
ageageyoung(1), teenager(2), adult(3), old(4)
8 color of upper-body clothingupblack, upwhite, upred, uppurple, upyellow, upgray, upblue, upgreenno(1), yes(2)
9 color of lower-body clothingdownblack, downwhite, downpink, downpurple, downyellow, downgray, downblue, downgreen,downbrownno(1), yes(2)
-

source: https://github.com/vana77/Market-1501_Attribute/blob/master/README.md

-

From DukeMTMC

-

The 23 attributes are:

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
attributerepresentation in filelabel
gendergendermale(1), female(2)
length of upper-body clothingtopshort upper body clothing(1), long(2)
wearing bootsbootsno(1), yes(2)
wearing hathatno(1), yes(2)
carrying backpackbackpackno(1), yes(2)
carrying bagbagno(1), yes(2)
carrying handbaghandbagno(1), yes(2)
color of shoesshoesdark(1), light(2)
8 color of upper-body clothingupblack, upwhite, upred, uppurple, upgray, upblue, upgreen, upbrownno(1), yes(2)
7 color of lower-body clothingdownblack, downwhite, downred, downgray, downblue, downgreen, downbrownno(1), yes(2)
-

source: https://github.com/vana77/DukeMTMC-attribute/blob/master/README.md

-

From H3D Dataset

-

The joints and other keypoints (eyes, ears, nose, shoulders, elbows, wrists, hips, knees and ankles) -The 3D pose inferred from the keypoints. -Visibility boolean for each keypoint -Region annotations (upper clothes, lower clothes, dress, socks, shoes, hands, gloves, neck, face, hair, hat, sunglasses, bag, occluder) -Body type (male, female or child)

-

source: https://www2.eecs.berkeley.edu/Research/Projects/CS/vision/shape/h3d/

-

From Leeds Sports Pose

-

=INDEX(A2:A9,MATCH(datasets!D1,B2:B9,0)) -=VLOOKUP(A2, datasets!A:J, 7, FALSE)

-

Right ankle -Right knee -Right hip -Left hip -Left knee -Left ankle -Right wrist -Right elbow -Right shoulder -Left shoulder -Left elbow -Left wrist -Neck -Head top

-

source: http://web.archive.org/web/20170915023005/sam.johnson.io/research/lsp.html

-
- -
- - - - - \ No newline at end of file diff --git a/site/public/research/index.html b/site/public/research/index.html deleted file mode 100644 index 1be8203f..00000000 --- a/site/public/research/index.html +++ /dev/null @@ -1,49 +0,0 @@ - - - - MegaPixels - - - - - - - - - - - -
- - -
MegaPixels
- -
- -
-
- -

Research Blog

-
- -
- - - - - \ No newline at end of file diff --git a/site/public/test/chart/index.html b/site/public/test/chart/index.html deleted file mode 100644 index e882ecc5..00000000 --- a/site/public/test/chart/index.html +++ /dev/null @@ -1,50 +0,0 @@ - - - - MegaPixels - - - - - - - - - - - -
- - -
MegaPixels
- -
- -
-
- -

Chart

-

← Back to test index

-
- -
- - - - - \ No newline at end of file diff --git a/site/public/test/citations/index.html b/site/public/test/citations/index.html deleted file mode 100644 index a8af41df..00000000 --- a/site/public/test/citations/index.html +++ /dev/null @@ -1,50 +0,0 @@ - - - - MegaPixels - - - - - - - - - - - -
- - -
MegaPixels
- -
- -
-
- -

Citations

-

← Back to test index

-
- -
- - - - - \ No newline at end of file diff --git a/site/public/test/csv/index.html b/site/public/test/csv/index.html deleted file mode 100644 index 2c2242b4..00000000 --- a/site/public/test/csv/index.html +++ /dev/null @@ -1,50 +0,0 @@ - - - - MegaPixels - - - - - - - - - - - -
- - -
MegaPixels
- -
- -
-
- -

CSV Test

-

← Back to test index

-
- -
- - - - - \ No newline at end of file diff --git a/site/public/test/datasets/index.html b/site/public/test/datasets/index.html deleted file mode 100644 index bf08418f..00000000 --- a/site/public/test/datasets/index.html +++ /dev/null @@ -1,50 +0,0 @@ - - - - MegaPixels - - - - - - - - - - - -
- - -
MegaPixels
- -
- -
-
- -

Index of datasets

-

← Back to test index

-
- -
- - - - - \ No newline at end of file diff --git a/site/public/test/face_search/index.html b/site/public/test/face_search/index.html deleted file mode 100644 index 75bb907b..00000000 --- a/site/public/test/face_search/index.html +++ /dev/null @@ -1,50 +0,0 @@ - - - - MegaPixels - - - - - - - - - - - -
- - -
MegaPixels
- -
- -
-
- -

Face search

-

← Back to test index

-
- -
- - - - - \ No newline at end of file diff --git a/site/public/test/gallery/index.html b/site/public/test/gallery/index.html deleted file mode 100644 index 8958f369..00000000 --- a/site/public/test/gallery/index.html +++ /dev/null @@ -1,68 +0,0 @@ - - - - MegaPixels - - - - - - - - - - - -
- - -
MegaPixels
- -
- -
-
- -

Gallery test

-

← Back to test index

-
Modal image 1
Modal image 1
-
Modal image 2
Modal image 2
-
Modal image 3
Modal image 3

Test table

- - - - - - - - - - - - - - -
Col1Col2Col3
Content1Content2Content3
-
- -
- - - - - \ No newline at end of file diff --git a/site/public/test/index.html b/site/public/test/index.html deleted file mode 100644 index e660bb2d..00000000 --- a/site/public/test/index.html +++ /dev/null @@ -1,61 +0,0 @@ - - - - MegaPixels - - - - - - - - - - - -
- - -
MegaPixels
- -
- -
- - - - - - \ No newline at end of file diff --git a/site/public/test/map/index.html b/site/public/test/map/index.html deleted file mode 100644 index 21229ec1..00000000 --- a/site/public/test/map/index.html +++ /dev/null @@ -1,50 +0,0 @@ - - - - MegaPixels - - - - - - - - - - - -
- - -
MegaPixels
- -
- -
-
- -

Map test

-

← Back to test index

-
- -
- - - - - \ No newline at end of file diff --git a/site/public/test/name_search/index.html b/site/public/test/name_search/index.html deleted file mode 100644 index b0bdb86f..00000000 --- a/site/public/test/name_search/index.html +++ /dev/null @@ -1,50 +0,0 @@ - - - - MegaPixels - - - - - - - - - - - -
- - -
MegaPixels
- -
- -
-
- -

Name search

-

← Back to test index

-
- -
- - - - - \ No newline at end of file diff --git a/site/public/test/pie_chart/index.html b/site/public/test/pie_chart/index.html deleted file mode 100644 index 98a89ff4..00000000 --- a/site/public/test/pie_chart/index.html +++ /dev/null @@ -1,50 +0,0 @@ - - - - MegaPixels - - - - - - - - - - - -
- - -
MegaPixels
- -
- -
-
- -

Pie Chart

-

← Back to test index

-
- -
- - - - - \ No newline at end of file -- cgit v1.2.3-70-g09d2 From 828ab34ca5e01e03e055ef9e091a88cd516a6061 Mon Sep 17 00:00:00 2001 From: adamhrv Date: Mon, 15 Apr 2019 14:08:35 +0200 Subject: fix up duke --- site/assets/css/applets.css | 2 +- site/assets/css/css.css | 60 ++-- site/content/pages/datasets/duke_mtmc/index.md | 78 ++++-- site/content/pages/datasets/msceleb/index.md | 4 + site/content/pages/datasets/uccs/index.md | 17 +- site/public/about/assets/LICENSE/index.html | 58 ++++ site/public/about/attribution/index.html | 78 ++++++ site/public/about/index.html | 107 +++++++ site/public/about/legal/index.html | 108 +++++++ site/public/about/press/index.html | 59 ++++ .../datasets/50_people_one_question/index.html | 114 ++++++++ site/public/datasets/afad/index.html | 127 +++++++++ site/public/datasets/brainwash/index.html | 163 +++++++++++ site/public/datasets/caltech_10k/index.html | 124 +++++++++ site/public/datasets/celeba/index.html | 126 +++++++++ site/public/datasets/cofw/index.html | 179 ++++++++++++ site/public/datasets/duke_mtmc/index.html | 139 +++++++-- site/public/datasets/feret/index.html | 87 ++++++ site/public/datasets/hrt_transgender/index.html | 2 +- site/public/datasets/lfpw/index.html | 116 ++++++++ site/public/datasets/lfw/index.html | 2 +- site/public/datasets/market_1501/index.html | 132 +++++++++ site/public/datasets/msceleb/index.html | 4 +- site/public/datasets/oxford_town_centre/index.html | 29 +- site/public/datasets/pipa/index.html | 120 ++++++++ site/public/datasets/pubfig/index.html | 117 ++++++++ site/public/datasets/uccs/index.html | 274 ++++++++++++++++++ site/public/datasets/vgg_face2/index.html | 142 ++++++++++ site/public/datasets/viper/index.html | 122 ++++++++ .../public/datasets/youtube_celebrities/index.html | 113 ++++++++ site/public/index.html | 39 +++ site/public/info/index.html | 50 ++++ site/public/research/00_introduction/index.html | 101 +++++++ .../research/01_from_1_to_100_pixels/index.html | 139 +++++++++ .../research/02_what_computers_can_see/index.html | 310 +++++++++++++++++++++ site/public/research/index.html | 49 ++++ site/public/test/chart/index.html | 50 ++++ site/public/test/citations/index.html | 50 ++++ site/public/test/csv/index.html | 50 ++++ site/public/test/datasets/index.html | 50 ++++ site/public/test/face_search/index.html | 50 ++++ site/public/test/gallery/index.html | 68 +++++ site/public/test/index.html | 61 ++++ site/public/test/map/index.html | 50 ++++ site/public/test/name_search/index.html | 50 ++++ site/public/test/pie_chart/index.html | 50 ++++ 46 files changed, 3947 insertions(+), 73 deletions(-) create mode 100644 site/public/about/assets/LICENSE/index.html create mode 100644 site/public/about/attribution/index.html create mode 100644 site/public/about/index.html create mode 100644 site/public/about/legal/index.html create mode 100644 site/public/about/press/index.html create mode 100644 site/public/datasets/50_people_one_question/index.html create mode 100644 site/public/datasets/afad/index.html create mode 100644 site/public/datasets/brainwash/index.html create mode 100644 site/public/datasets/caltech_10k/index.html create mode 100644 site/public/datasets/celeba/index.html create mode 100644 site/public/datasets/cofw/index.html create mode 100644 site/public/datasets/feret/index.html create mode 100644 site/public/datasets/lfpw/index.html create mode 100644 site/public/datasets/market_1501/index.html create mode 100644 site/public/datasets/pipa/index.html create mode 100644 site/public/datasets/pubfig/index.html create mode 100644 site/public/datasets/uccs/index.html create mode 100644 site/public/datasets/vgg_face2/index.html create mode 100644 site/public/datasets/viper/index.html create mode 100644 site/public/datasets/youtube_celebrities/index.html create mode 100644 site/public/index.html create mode 100644 site/public/info/index.html create mode 100644 site/public/research/00_introduction/index.html create mode 100644 site/public/research/01_from_1_to_100_pixels/index.html create mode 100644 site/public/research/02_what_computers_can_see/index.html create mode 100644 site/public/research/index.html create mode 100644 site/public/test/chart/index.html create mode 100644 site/public/test/citations/index.html create mode 100644 site/public/test/csv/index.html create mode 100644 site/public/test/datasets/index.html create mode 100644 site/public/test/face_search/index.html create mode 100644 site/public/test/gallery/index.html create mode 100644 site/public/test/index.html create mode 100644 site/public/test/map/index.html create mode 100644 site/public/test/name_search/index.html create mode 100644 site/public/test/pie_chart/index.html (limited to 'site/public/datasets/feret/index.html') diff --git a/site/assets/css/applets.css b/site/assets/css/applets.css index e48af7b1..ef9f09e2 100644 --- a/site/assets/css/applets.css +++ b/site/assets/css/applets.css @@ -9,7 +9,7 @@ min-height: 0; } .applet { - margin-bottom: 40px; + margin-bottom: 60px; transition: opacity 0.2s cubic-bezier(0,0,1,1); opacity: 0; } diff --git a/site/assets/css/css.css b/site/assets/css/css.css index a61a1875..58f4d490 100644 --- a/site/assets/css/css.css +++ b/site/assets/css/css.css @@ -181,27 +181,34 @@ h2 { font-weight: 400; font-size: 34px; line-height: 43px; - margin: 20px 0 20px; + margin: 20px auto 20px auto; padding: 0; transition: color 0.1s cubic-bezier(0,0,1,1); font-family: 'Roboto Mono', monospace; } h3 { - margin: 0 0 18px 0; - padding: 20px 0 0 0; + margin: 20px auto 10px auto; font-size: 28px; font-weight: 400; transition: color 0.1s cubic-bezier(0,0,1,1); font-family: 'Roboto Mono', monospace; } h4 { - margin: 0 0 10px 0; + margin: 6px auto 10px auto; padding: 0; font-size: 18pt; font-weight: 400; transition: color 0.1s cubic-bezier(0,0,1,1); font-family: 'Roboto Mono', monospace; } +h5 { + margin: 6px auto 10px auto; + padding: 0; + font-size: 14pt; + font-weight: 400; + transition: color 0.1s cubic-bezier(0,0,1,1); + font-family: 'Roboto Mono', monospace; +} .content h3 a { color: #888; text-decoration: none; @@ -256,13 +263,25 @@ section { margin-top: 30px; line-height: 36px; } -p { - margin: 0 10px 20px 0; +section p { + margin: 10px auto 20px auto; line-height: 2; - font-size: 18px; + font-size: 17px; font-weight: 300; color: #dedede; } +section ul { + margin: 10px auto 20px auto; + max-width: 720px; +} +section h1, section h2, section h3, section h4, section h5, section h6, section p{ + max-width: 720px; +} + +.content-dataset section:nth-child(2) p:first-child{ + font-size:19px; + color:red; +} p.subp{ font-size: 14px; } @@ -299,10 +318,9 @@ p.subp{ .right-sidebar { float: right; width: 240px; - margin-left: 20px; - padding-top: 10px; + margin: 75px 20px 20px 20px; padding-left: 20px; - border-left: 1px solid #444; + border-left: 1px solid #333; font-family: 'Roboto'; font-size: 14px; font-weight: 400; @@ -363,12 +381,14 @@ code { background: rgba(255,255,255,0.1); } pre { - margin: 0 0 40px 0; + margin: 0 auto; + max-width: 720px; border: 1px solid #666; border-radius: 2px; padding: 10px; display: block; background: #333; + overflow: auto } pre code { display: block; @@ -450,7 +470,17 @@ blockquote { .footnotes hr { display: none; } -ul.footnotes li, ul.footnotes p{ +ul.footnotes{ + max-width:720px; + margin:0 auto; + font-size:12px; +} +ul.footnotes li{ + font-size:12px; + list-style-type: decimal; + margin-bottom:12px; +} +ul.footnotes p{ font-size:12px; } .footnotes ol:before { @@ -1060,10 +1090,6 @@ a.footnote_shim { bottom: 5px; margin-right: 2px; } -ul.footnotes { - list-style-type: decimal; - margin-left: 30px; -} li p { margin: 0; padding: 0; display: inline; @@ -1072,6 +1098,8 @@ li p { /* citation browser */ .citationHeader { + max-width: 720px; + margin: 10px auto; padding-bottom: 10px } .download-btn{ diff --git a/site/content/pages/datasets/duke_mtmc/index.md b/site/content/pages/datasets/duke_mtmc/index.md index 8308eee7..28c586f9 100644 --- a/site/content/pages/datasets/duke_mtmc/index.md +++ b/site/content/pages/datasets/duke_mtmc/index.md @@ -18,17 +18,19 @@ authors: Adam Harvey ## Duke MTMC -The Duke Multi-Target, Multi-Camera Tracking Dataset (MTMC) is a dataset of video recorded on Duke University campus for research and development of networked camera surveillance systems. MTMC tracking is used for citywide dragnet surveillance systems such as those used throughout China by SenseTime[^sensetime_qz] and the oppressive monitoring of 2.5 million Uyghurs in Xinjiang by SenseNets[^sensenets_uyghurs]. In fact researchers from both SenseTime[^sensetime1] [^sensetime2] and SenseNets[^sensenets_sensetime] used the Duke MTMC dataset for their research. +Duke MTMC (Multi-Target, Multi-Camera Tracking) is a dataset of video recorded on Duke University campus for research and development of networked camera surveillance systems. MTMC tracking algorithms are used for citywide dragnet surveillance systems such as those used throughout China by SenseTime[^sensetime_qz] and the oppressive monitoring of 2.5 million Uyghurs in Xinjiang by SenseNets[^sensenets_uyghurs]. In fact researchers from both SenseTime[^sensetime1] [^sensetime2] and SenseNets[^sensenets_sensetime] used the Duke MTMC dataset for their research. -The Duke MTMC dataset is unique because it is the largest publicly available MTMC and person re-identification dataset and has the longest duration of annotated video. In total, the Duke MTMC dataset provides over 14 hours of 1080p video from 8 synchronized surveillance cameras.[^duke_mtmc_orig] It is among the most widely used person re-identification datasets in the world. The approximately 2,700 unique people in the Duke MTMC videos, most of whom are students, are used for research and development of surveillance technologies by commercial, academic, and even defense organizations. +In this investigation into the Duke MTMC dataset, we found that researchers at Duke Univesity in Durham, North Carolina captured over 2,000 students, faculty members, and passersby into one of the most prolific public surveillance research datasets that's used around the world by commercial and defense surveillance organizations. -![caption: A collection of 1,600 out of the 2,700 students and passersby captured into the Duke MTMC surveillance research dataset. These students were also included in the Duke MTMC Re-ID dataset extension used for person re-identification. © megapixels.cc](assets/duke_mtmc_reid_montage.jpg) +Since it's publication in 2016, the Duke MTMC dataset has been used in over 100 studies at organizations around the world including SenseTime[^sensetime1] [^sensetime2], SenseNets[^sensenets_sensetime], IARPA and IBM[^iarpa_ibm], Chinese National University of Defense [^cn_defense1][^cn_defense2], US Department of Homeland Security[^us_dhs], Tencent, Microsoft, Microsft Asia, Fraunhofer, Senstar Corp., Alibaba, Naver Labs, Google and Hewlett-Packard Labs to name only a few. -The creation and publication of the Duke MTMC dataset in 2016 was originally funded by the U.S. Army Research Laboratory and the National Science Foundation[^duke_mtmc_orig]. Since 2016 use of the Duke MTMC dataset images have been publicly acknowledged in research funded by or on behalf of the Chinese National University of Defense[^cn_defense1][^cn_defense2], IARPA and IBM[^iarpa_ibm], and U.S. Department of Homeland Security[^us_dhs]. +The creation and publication of the Duke MTMC dataset in 2014 (published in 2016) was originally funded by the U.S. Army Research Laboratory and the National Science Foundation[^duke_mtmc_orig]. Though our analysis of the geographic locations of the publicly available research shows over twice as many citations by researchers from China (44% China, 20% United States). In 2018 alone, there were 70 research project citations from China. -The 8 cameras deployed on Duke's campus were specifically setup to capture students "during periods between lectures, when pedestrian traffic is heavy".[^duke_mtmc_orig] Camera 7 and 2 capture large groups of prospective students and children. Camera 5 was positioned to capture students as they enter and exit Duke University's main chapel. Each camera's location is documented below. +![caption: A collection of 1,600 out of the 2,700 students and passersby captured into the Duke MTMC surveillance research and development dataset on . These students were also included in the Duke MTMC Re-ID dataset extension used for person re-identification. Open Data Commons Attribution License.](assets/duke_mtmc_reid_montage.jpg) -![caption: Duke MTMC camera locations on Duke University campus © megapixels.cc](assets/duke_mtmc_camera_map.jpg) +The 8 cameras deployed on Duke's campus were specifically setup to capture students "during periods between lectures, when pedestrian traffic is heavy".[^duke_mtmc_orig]. Camera 5 was positioned to capture students as entering and exiting the university's main chapel. Each camera's location and approximate field of view. The heat map visualization shows the locations where pedestrians were most frequently annotated in each video from the Duke MTMC datset. + +![caption: Duke MTMC camera locations on Duke University campus. Open Data Commons Attribution License.](assets/duke_mtmc_camera_map.jpg) ![caption: Duke MTMC camera views for 8 cameras deployed on campus © megapixels.cc](assets/duke_mtmc_cameras.jpg) @@ -39,23 +41,67 @@ The 8 cameras deployed on Duke's campus were specifically setup to capture stude {% include 'supplementary_header.html' %} +#### Funding + +Original funding for the Duke MTMC dataset was provided by the Army Research Office under Grant No. W911NF-10-1-0387 and by the National Science Foundation +under Grants IIS-10-17017 and IIS-14-20894. + +#### Video Timestamps + +The video timestamps contain the likely, but not yet confirmed, date and times of capture. Because the video timestamps align with the start and stop [time sync data](http://vision.cs.duke.edu/DukeMTMC/details.html#time-sync) provided by the researchers, it at least aligns the relative time. The [rainy weather](https://www.wunderground.com/history/daily/KIGX/date/2014-3-19?req_city=Durham&req_state=NC&req_statename=North%20Carolina&reqdb.zip=27708&reqdb.magic=1&reqdb.wmo=99999) on that day also contribute towards the likelihood of March 14, 2014.. + +=== columns 2 + +| Camera | Date | Start | End | +| --- | --- | --- | --- | +| Camera 1 | March 14, 2014 | 4:14PM | 5:43PM | +| Camera 2 | March 14, 2014 | 4:13PM | 4:43PM | +| Camera 3 | March 14, 2014 | 4:20PM | 5:48PM | +| Camera 4 | March 14, 2014 | 4:21PM | 5:54PM | + +=========== -### Notes +| Camera | Date | Start | End | +| --- | --- | --- | --- | +| Camera 5 | March 14, 2014 | 4:12PM | 5:43PM | +| Camera 6 | March 14, 2014 | 4:18PM | 5:43PM | +| Camera 7 | March 14, 2014 | 4:16PM | 5:40PM | +| Camera 8 | March 14, 2014 | 4:25PM | 5:42PM | + +=== end columns + + +### Opting Out + +If you attended Duke University and were captured by any of the 8 surveillance cameras positioned on campus in 2014, there is unfortunately no way to be removed. The dataset files have been distributed throughout the world and it would not be possible to contact all the owners for removal. Nor do the authors provide any options for students to opt-out, nor did they even inform students they would be used at test subjects for surveillance research and development in a project funded, in part, by the United States Army Research Office. + +#### Notes + +- The Duke MTMC dataset paper mentions 2,700 identities, but their ground truth file only lists annotations for 1,812 -The Duke MTMC dataset paper mentions 2,700 identities, but their ground truth file only lists annotations for 1,812 {% include 'cite_our_work.html' %} +If you use any data from the Duke MTMC please follow their [license](http://vision.cs.duke.edu/DukeMTMC/#how-to-cite) and cite their work as: + +
+@inproceedings{ristani2016MTMC,
+  title =        {Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking},
+  author =       {Ristani, Ergys and Solera, Francesco and Zou, Roger and Cucchiara, Rita and Tomasi, Carlo},
+  booktitle =    {European Conference on Computer Vision workshop on Benchmarking Multi-Target Tracking},
+  year =         {2016}
+}
+
### Footnotes [^sensetime_qz]: [^sensenets_uyghurs]: -[^sensenets_sensetime]: "Attention-Aware Compositional Network for Person Re-identification". 2018. [Source](https://www.semanticscholar.org/paper/Attention-Aware-Compositional-Network-for-Person-Xu-Zhao/14ce502bc19b225466126b256511f9c05cadcb6e) -[^sensetime1]: "End-to-End Deep Kronecker-Product Matching for Person Re-identification". 2018. [source](https://www.semanticscholar.org/paper/End-to-End-Deep-Kronecker-Product-Matching-for-Shen-Xiao/947954cafdefd471b75da8c3bb4c21b9e6d57838) -[^sensetime2]: "Person Re-identification with Deep Similarity-Guided Graph Neural Network". 2018. [Source](https://www.semanticscholar.org/paper/Person-Re-identification-with-Deep-Graph-Neural-Shen-Li/08d2a558ea2deb117dd8066e864612bf2899905b) -[^duke_mtmc_orig]: "Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking". 2016. [Source](https://www.semanticscholar.org/paper/Performance-Measures-and-a-Data-Set-for-Tracking-Ristani-Solera/27a2fad58dd8727e280f97036e0d2bc55ef5424c) -[^cn_defense1]: "Tracking by Animation: Unsupervised Learning of Multi-Object Attentive Trackers". 2018. [Source](https://www.semanticscholar.org/paper/Tracking-by-Animation%3A-Unsupervised-Learning-of-He-Liu/e90816e1a0e14ea1e7039e0b2782260999aef786) -[^cn_defense2]: "Unsupervised Multi-Object Detection for Video Surveillance Using Memory-Based Recurrent Attention Networks". 2018. [Source](https://www.semanticscholar.org/paper/Unsupervised-Multi-Object-Detection-for-Video-Using-He-He/59f357015054bab43fb8cbfd3f3dbf17b1d1f881) -[^iarpa_ibm]: "Horizontal Pyramid Matching for Person Re-identification". 2019. [Source](https://www.semanticscholar.org/paper/Horizontal-Pyramid-Matching-for-Person-Fu-Wei/c2a5f27d97744bc1f96d7e1074395749e3c59bc8) -[^us_dhs]: "Re-Identification with Consistent Attentive Siamese Networks". 2018. [Source](https://www.semanticscholar.org/paper/Re-Identification-with-Consistent-Attentive-Siamese-Zheng-Karanam/24d6d3adf2176516ef0de2e943ce2084e27c4f94) \ No newline at end of file +[^sensenets_sensetime]: "Attention-Aware Compositional Network for Person Re-identification". 2018. [SemanticScholar](https://www.semanticscholar.org/paper/Attention-Aware-Compositional-Network-for-Person-Xu-Zhao/14ce502bc19b225466126b256511f9c05cadcb6e), [PDF](http://openaccess.thecvf.com/content_cvpr_2018/papers/Xu_Attention-Aware_Compositional_Network_CVPR_2018_paper.pdf) +[^sensetime1]: "End-to-End Deep Kronecker-Product Matching for Person Re-identification". 2018. [SemanticScholar](https://www.semanticscholar.org/paper/End-to-End-Deep-Kronecker-Product-Matching-for-Shen-Xiao/947954cafdefd471b75da8c3bb4c21b9e6d57838), [PDF](http://openaccess.thecvf.com/content_cvpr_2018/papers/Shen_End-to-End_Deep_Kronecker-Product_CVPR_2018_paper.pdf) +[^sensetime2]: "Person Re-identification with Deep Similarity-Guided Graph Neural Network". 2018. [SemanticScholar](https://www.semanticscholar.org/paper/Person-Re-identification-with-Deep-Graph-Neural-Shen-Li/08d2a558ea2deb117dd8066e864612bf2899905b) +[^duke_mtmc_orig]: "Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking". 2016. [SemanticScholar](https://www.semanticscholar.org/paper/Performance-Measures-and-a-Data-Set-for-Tracking-Ristani-Solera/27a2fad58dd8727e280f97036e0d2bc55ef5424c) +[^cn_defense1]: "Tracking by Animation: Unsupervised Learning of Multi-Object Attentive Trackers". 2018. [SemanticScholar](https://www.semanticscholar.org/paper/Tracking-by-Animation%3A-Unsupervised-Learning-of-He-Liu/e90816e1a0e14ea1e7039e0b2782260999aef786) +[^cn_defense2]: "Unsupervised Multi-Object Detection for Video Surveillance Using Memory-Based Recurrent Attention Networks". 2018. [SemanticScholar](https://www.semanticscholar.org/paper/Unsupervised-Multi-Object-Detection-for-Video-Using-He-He/59f357015054bab43fb8cbfd3f3dbf17b1d1f881) +[^iarpa_ibm]: "Horizontal Pyramid Matching for Person Re-identification". 2019. [SemanticScholar](https://www.semanticscholar.org/paper/Horizontal-Pyramid-Matching-for-Person-Fu-Wei/c2a5f27d97744bc1f96d7e1074395749e3c59bc8) +[^us_dhs]: "Re-Identification with Consistent Attentive Siamese Networks". 2018. [SemanticScholar](https://www.semanticscholar.org/paper/Re-Identification-with-Consistent-Attentive-Siamese-Zheng-Karanam/24d6d3adf2176516ef0de2e943ce2084e27c4f94) \ No newline at end of file diff --git a/site/content/pages/datasets/msceleb/index.md b/site/content/pages/datasets/msceleb/index.md index 065ee75d..34f220f5 100644 --- a/site/content/pages/datasets/msceleb/index.md +++ b/site/content/pages/datasets/msceleb/index.md @@ -21,6 +21,10 @@ authors: Adam Harvey [ PAGE UNDER DEVELOPMENT ] +https://www.hrw.org/news/2019/01/15/letter-microsoft-face-surveillance-technology + +https://www.scmp.com/tech/science-research/article/3005733/what-you-need-know-about-sensenets-facial-recognition-firm + {% include 'dashboard.html' %} {% include 'supplementary_header.html' %} diff --git a/site/content/pages/datasets/uccs/index.md b/site/content/pages/datasets/uccs/index.md index 5cd17fa8..f2451c30 100644 --- a/site/content/pages/datasets/uccs/index.md +++ b/site/content/pages/datasets/uccs/index.md @@ -30,6 +30,10 @@ Combined funding sources for the creation of the initial and final release of th In 2017 the UCCS face dataset was used for a defense and intelligence agency funded [face recognition challenge](http://www.face-recognition-challenge.com/) at the International Joint Biometrics Conference in Denver, CO. And in 2018 the dataset was used for the [2nd Unconstrained Face Detection and Open Set Recognition Challenge](https://erodner.github.io/ial2018eccv/) at the European Computer Vision Conference (ECCV) in Munich, Germany. Additional research projects that have used the UCCS dataset are included below in the list of verified citations. +UCCS is part of the IARAP Janus team https://vast.uccs.edu/project/iarpa-janus/ + +https://arxiv.org/abs/1708.02337 + {% include 'dashboard.html' %} {% include 'supplementary_header.html' %} @@ -104,20 +108,15 @@ If you attended University of Colorado Colorado Springs and were captured by the ### Ethics -Please direct any questions about the ethics of the dataset to the University of Colorado Colorado Springs [Ethics and Compliance Office](https://www.uccs.edu/compliance/) - -### Technical Details +- Please direct any questions about the ethics of the dataset to the University of Colorado Colorado Springs [Ethics and Compliance Office](https://www.uccs.edu/compliance/) +- For further technical information about the dataset, visit the [UCCS dataset project page](https://vast.uccs.edu/Opensetface). -For further technical information about the dataset, visit the [UCCS dataset project page](https://vast.uccs.edu/Opensetface). +### Downloads -## Under Development - -- adding more verified locations to map and charts -- add EXIF file to CDN +- Download EXIF data for UCCS photos: [uccs_camera_exif.csv](https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/uccs/assets/uccs_camera_exif.csv) {% include 'cite_our_work.html' %} - ### Footnotes [^funding_sb]: Sapkota, Archana and Boult, Terrance. "Large Scale Unconstrained Open Set Face Database." 2013. diff --git a/site/public/about/assets/LICENSE/index.html b/site/public/about/assets/LICENSE/index.html new file mode 100644 index 00000000..0d3a7878 --- /dev/null +++ b/site/public/about/assets/LICENSE/index.html @@ -0,0 +1,58 @@ + + + + MegaPixels + + + + + + + + + + + +
+ + +
MegaPixels
+ +
+ +
+
+ +

and include this license and attribution protocol within any derivative work.

+

If you publish data derived from MegaPixels, the original dataset creators should first be notified.

+

The MegaPixels dataset is made available under the Open Data Commons Attribution License (https://opendatacommons.org/licenses/by/1.0/) and for academic use only.

+

READABLE SUMMARY OF Open Data Commons Attribution License

+

You are free:

+

To Share: To copy, distribute and use the dataset + To Create: To produce works from the dataset + To Adapt: To modify, transform and build upon the database

+

As long as you:

+

Attribute: You must attribute any public use of the database, or works produced from the database, in the manner specified in the license. For any use or redistribution of the database, or works produced from it, you must make clear to others the license of the database and keep intact any notices on the original database.

+
+ +
+ + + + + \ No newline at end of file diff --git a/site/public/about/attribution/index.html b/site/public/about/attribution/index.html new file mode 100644 index 00000000..0a1b8e0f --- /dev/null +++ b/site/public/about/attribution/index.html @@ -0,0 +1,78 @@ + + + + MegaPixels + + + + + + + + + + + +
+ + +
MegaPixels
+ +
+ +
+
+ +

Legal

+
+ +

ATTRIBUTION PROTOCOL

+

If you use the MegaPixels data or any data derived from it, please cite the original work as follows:

+
+@online{megapixels,
+ author = {Harvey, Adam. LaPlace, Jules.},
+ title = {MegaPixels: Origins, Ethics, and Privacy Implications of Publicly Available Face Recognition Image Datasets},
+ year = 2019,
+ url = {https://megapixels.cc/},
+ urldate = {2019-04-20}
+}
+

and include this license and attribution protocol within any derivative work.

+

If you publish data derived from MegaPixels, the original dataset creators should first be notified.

+

The MegaPixels dataset is made available under the Open Data Commons Attribution License (https://opendatacommons.org/licenses/by/1.0/) and for academic use only.

+

READABLE SUMMARY OF Open Data Commons Attribution License

+

You are free:

+

To Share: To copy, distribute and use the dataset +To Create: To produce works from the dataset +To Adapt: To modify, transform and build upon the database

+
+

As long as you:

+

Attribute: You must attribute any public use of the database, or works produced from the database, in the manner specified in the license. For any use or redistribution of the database, or works produced from it, you must make clear to others the license of the database and keep intact any notices on the original database.

+
+
+ +
+ + + + + \ No newline at end of file diff --git a/site/public/about/index.html b/site/public/about/index.html new file mode 100644 index 00000000..4a4ab3c6 --- /dev/null +++ b/site/public/about/index.html @@ -0,0 +1,107 @@ + + + + MegaPixels + + + + + + + + + + + +
+ + +
MegaPixels
+ +
+ +
+
+ +

About MegaPixels

+
+ +

MegaPixels is an independent art and research project by Adam Harvey and Jules LaPlace that investigates the ethics, origins, and individual privacy implications of face recognition image datasets and their role in the expansion of biometric surveillance technologies.

+

The MegaPixels site is made possible with support from Mozilla

+
+
+

Adam Harvey

+

is Berlin-based American artist and researcher. His previous projects (CV Dazzle, Stealth Wear, and SkyLift) explore the potential for counter-surveillance as artwork. He is the founder of VFRAME (visual forensics software for human rights groups) and is a currently researcher in residence at Karlsruhe HfG.

+

ahprojects.com

+

+
+
+

Jules LaPlace

+

is an American technologist and artist also based in Berlin. He was previously the CTO of a digital agency in NYC and now also works at VFRAME, developing computer vision and data analysis software for human rights groups. Jules also builds experimental software for artists and musicians. +

+

asdf.us

+
+

The MegaPixels website is based on an earlier installation from 2017 and ongoing research and lectures (TedX, CPDP) about facial recognition datasets. Over the last several years this project has evolved into a large-scale interrogation of hundreds of publicly-available face and person analysis datasets.

+

MegaPixels aims to provide a critical perspective on machine learning image datsets, one that might otherwise escape academia and the industry funded artificial intelligence think tanks that are often supported by the same technology companies who have created many of the datasets presented on this site.

+

MegaPixels is an independent project, designed as a public resource for educators, students, journalists, and researchers. Each dataset presented on this site undergoes a thorough review of its images, intent, and funding sources. Though the goals are similar to publishing a public academic paper, MegaPixels is a website-first reserch project aligns closley with the goals of pre-print academic publications. As such we welcome feedback and ways to improve this site and the clarity of the research.

+

Because this project surfaces many funding issues with datasets (from datasets funded by the C.I.A. to the National Unviversity of Defense and Technology in China), it is important that we are transparent about own funding. The original MegaPixels installation in 2017 was built as a commission for and with support from Tactical Technology Collective and Mozilla. The bulk of the research and web-development during 2018 - 2018 was supported by a grant from Mozilla. Continued development in 2019 is partially supported by a 1-year Reseacher-in-Residence grant from Karlsruhe HfG, lecture and workshop fees, and from commissions and sales from the Privacy Gift Shop.

+

Please get in touch if you are interested in supporting this project.

+
Team
+
    +
  • Adam Harvey: Concept, research and analysis, design, computer vision
  • +
  • Jules LaPlace: Information and systems architecture, data management, web applications +You are free:
  • +
+
Contributing Researchers
+
    +
  • Berit Gilma
  • +
  • Beth (aka Ms. Celeb)
  • +
  • Mathana Stender
  • +
+
Code and Libraries
+
    +
  • Semantic Scholar for citation aggregation
  • +
  • Leaflet.js for maps
  • +
  • C3.js for charts
  • +
  • ThreeJS for 3D visualizations
  • +
  • PDFMiner.Six and Pandas for research paper data analysis
  • +
+

Please direct questions, comments, or feedback to mastodon.social/@adamhrv

+
Attribution
+

If you use MegaPixels or any data derived from it for your work, please cite our original work as follows:

+
+@online{megapixels,
+ author = {Harvey, Adam. LaPlace, Jules.},
+ title = {MegaPixels: Origins, Ethics, and Privacy Implications of Publicly Available Face Recognition Image Datasets},
+ year = 2019,
+ url = {https://megapixels.cc/},
+ urldate = {2019-04-20}
+}
+
+ +
+ + + + + \ No newline at end of file diff --git a/site/public/about/legal/index.html b/site/public/about/legal/index.html new file mode 100644 index 00000000..9eb5dd5a --- /dev/null +++ b/site/public/about/legal/index.html @@ -0,0 +1,108 @@ + + + + MegaPixels + + + + + + + + + + + +
+ + +
MegaPixels
+ +
+ +
+
+ +

Legal

+
+ +

MegaPixels.cc Terms and Privacy

+

MegaPixels is an independent and academic art and research project about the origins and ethics of publicly available face analysis image datasets. By accessing MegaPixels (the Service or Services) you agree to the terms and conditions set forth below.

+

Privacy

+

The MegaPixels site has been designed to minimize the amount of network requests to 3rd party services and therefore prioritize the privacy of the viewer. This site does not use any local or external analytics programs to monitor site viewers. In fact, the only data collected are the necessary server logs used only for preventing misuse, which are deleted at short-term intervals.

+

3rd Party Services

+

In order to provide certain features of the site, some 3rd party services are needed. Currently, the MegaPixels.cc site uses two 3rd party services: (1) Leaflet.js for the interactive map and (2) Digital Ocean Spaces as a content delivery network. Both services encrypt your requests to their server using HTTPS and neither service requires storing any cookies or authentication. However, both services will store files in your web browser's local cache (local storage) to improve loading performance. None of these local storage files are using for analytics, tracking, or any similar purpose.

+

Links To Other Web Sites

+

The MegaPixels.cc contains many links to 3rd party websites, especially in the list of citations that are provided for each dataset. This website has no control over and assumes no responsibility for, the content, privacy policies, or practices of any third party web sites or services. You acknowledge and agree that megapixels.cc shall not be responsible or liable, directly or indirectly, for any damage or loss caused or alleged to be caused by or in connection with use of or reliance on any such content, goods or services available on or through any such web sites or services.

+

We advise you to read the terms and conditions and privacy policies of any third-party web sites or services that you visit.

+

Information We Collect

+

When you access the Service, we record your visit to the site in a server log file for the purposes of maintaining site security and preventing misuse. This includes your IP address and the header information sent by your web browser which includes the User Agent, referrer, and the requested page on our site.

+

Information We Share

+

We do not share or make public any information about individual site visitors, unless where required by law to the extent that server logs are only retained for a limited duration.

+

Information We Provide

+

We provide information for educational, journalistic, and research purposes. The published information on MegaPixels is made available under the Open Data Commons Attribution License (https://opendatacommons.org/licenses/by/1.0/) and for academic use only.

+

You are free:

+

To Share: To copy, distribute and use the dataset +To Create: To produce works from the dataset +To Adapt: To modify, transform and build upon the database

+
+

As long as you:

+

Attribute: You must attribute any public use of the database, or works produced from the database, in the manner specified in the license. For any use or redistribution of the database, or works produced from it, you must make clear to others the license of the database and keep intact any notices on the original database.

+
+

If you use the MegaPixels data or any data derived from it, please cite the original work as follows:

+
+@online{megapixels,
+ author = {Harvey, Adam. LaPlace, Jules.},
+ title = {MegaPixels: Origins, Ethics, and Privacy Implications of Publicly Available Face Recognition Image Datasets},
+ year = 2019,
+ url = {https://megapixels.cc/},
+ urldate = {2019-04-20}
+}
+

While every intention is made to publish only verifiable information, at times information may be edited, removed, or appended for clarity or correction. In no event will the operators of this site be liable for your use or misuse of the information provided.

+

We may terminate or suspend access to our Service immediately without prior notice or liability, for any reason whatsoever, including without limitation if you breach the Terms.

+

All provisions of the Terms which by their nature should survive termination shall survive termination, including, without limitation, ownership provisions, warranty disclaimers, indemnity and limitations of liability.

+

Prohibited Uses

+

You may not access or use, or attempt to access or use, the Services to take any action that could harm us or a third party. You may not use the Services in violation of applicable laws or in violation of our or any third party’s intellectual property or other proprietary or legal rights. You further agree that you shall not attempt (or encourage or support anyone else's attempt) to circumvent, reverse engineer, decrypt, or otherwise alter or interfere with the Services, or any content thereof, or make any unauthorized use thereof.

+

Without prior written consent, you shall not:

+

(i) access any part of the Services, Content, data or information you do not have permission or authorization to access;

+

(ii) use robots, spiders, scripts, service, software or any manual or automatic device, tool, or process designed to data mine or scrape the Content, data or information from the Services, or otherwise access or collect the Content, data or information from the Services using automated means;

+

(iii) use services, software or any manual or automatic device, tool, or process designed to circumvent any restriction, condition, or technological measure that controls access to the Services in any way, including overriding any security feature or bypassing or circumventing any access controls or use limits of the Services;

+

(iv) cache or archive the Content (except for a public search engine’s use of spiders for creating search indices) with prior written consent;

+

(v) take action that imposes an unreasonable or disproportionately large load on our network or infrastructure; and

+

(vi) do anything that could disable, damage or change the functioning or appearance of the Services, including the presentation of advertising.

+

Engaging in a prohibited use of the Services may result in civil, criminal, and/or administrative penalties, fines, or sanctions against the user and those assisting the user.

+

Governing Law

+

These Terms shall be governed and construed in accordance with the laws of Berlin, Germany, without regard to its conflict of law provisions.

+

Our failure to enforce any right or provision of these Terms will not be considered a waiver of those rights. If any provision of these Terms is held to be invalid or unenforceable by a court, the remaining provisions of these Terms will remain in effect. These Terms constitute the entire agreement between us regarding our Service, and supersede and replace any prior agreements we might have between us regarding the Service.

+

Indemnity

+

You hereby indemnify, defend and hold harmless MegaPixels (and its creators) and all officers, directors, owners, agents, information providers, affiliates, licensors and licensees (collectively, the "Indemnified Parties") from and against any and all liability and costs, including, without limitation, reasonable attorneys' fees, incurred by the Indemnified Parties in connection with any claim arising out of any breach by you or any user of your account of these Terms of Service or the foregoing representations, warranties and covenants. You shall cooperate as fully as reasonably required in the defense of any such claim. We reserves the right, at its own expense, to assume the exclusive defense and control of any matter subject to indemnification by you.

+

Changes

+

We reserve the right, at our sole discretion, to modify or replace these Terms at any time. By continuing to use or access our Service after revisions become effective, you agree to be bound by the revised terms. If you do not agree to revised terms, please do not use the Service.

+
+ +
+ + + + + \ No newline at end of file diff --git a/site/public/about/press/index.html b/site/public/about/press/index.html new file mode 100644 index 00000000..7b0a3e87 --- /dev/null +++ b/site/public/about/press/index.html @@ -0,0 +1,59 @@ + + + + MegaPixels + + + + + + + + + + + +
+ + +
MegaPixels
+ +
+ +
+
+ +

Press

+
+ +
+
+ +
+ + + + + \ No newline at end of file diff --git a/site/public/datasets/50_people_one_question/index.html b/site/public/datasets/50_people_one_question/index.html new file mode 100644 index 00000000..dfd8cbff --- /dev/null +++ b/site/public/datasets/50_people_one_question/index.html @@ -0,0 +1,114 @@ + + + + MegaPixels + + + + + + + + + + + +
+ + +
MegaPixels
+
50 People One Question Dataset
+
+ +
+
+ +
People One Question is a dataset of people from an online video series on YouTube and Vimeo used for building facial recogntion algorithms
People One Question dataset includes ... +

50 People 1 Question

+

[ page under development ]

+
+

Who used 50 People One Question Dataset?

+ +

+ This bar chart presents a ranking of the top countries where dataset citations originated. Mouse over individual columns to see yearly totals. These charts show at most the top 10 countries. +

+ +
+ +
+ +
+
+ +
+
+
+ +
+ +

Biometric Trade Routes

+ +

+ To help understand how 50 People One Question Dataset has been used around the world by commercial, military, and academic organizations; existing publicly available research citing 50 People One Question was collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal research projects at that location. +

+ +
+ +
+
+
+ +
+
    +
  • Academic
  • +
  • Commercial
  • +
  • Military / Government
  • +
+
Citation data is collected using SemanticScholar.org then dataset usage verified and geolocated.
+
+ + +
+ +

Dataset Citations

+

+ The dataset citations used in the visualizations were collected from Semantic Scholar, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms. +

+ +
+
+ +
+ + + + + \ No newline at end of file diff --git a/site/public/datasets/afad/index.html b/site/public/datasets/afad/index.html new file mode 100644 index 00000000..df14e7cd --- /dev/null +++ b/site/public/datasets/afad/index.html @@ -0,0 +1,127 @@ + + + + MegaPixels + + + + + + + + + + + +
+ + +
MegaPixels
+
Asian Face Age Dataset
+
+ +
+
+ +

Asian Face Age Dataset

+

[ page under development ]

+
+

Who used Asian Face Age Dataset?

+ +

+ This bar chart presents a ranking of the top countries where dataset citations originated. Mouse over individual columns to see yearly totals. These charts show at most the top 10 countries. +

+ +
+ +
+ +
+
+ +
+
+
+ +
+ +

Biometric Trade Routes

+ +

+ To help understand how Asian Face Age Dataset has been used around the world by commercial, military, and academic organizations; existing publicly available research citing The Asian Face Age Dataset was collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal research projects at that location. +

+ +
+ +
+
+
+ +
+
    +
  • Academic
  • +
  • Commercial
  • +
  • Military / Government
  • +
+
Citation data is collected using SemanticScholar.org then dataset usage verified and geolocated.
+
+ + +
+ +

Dataset Citations

+

+ The dataset citations used in the visualizations were collected from Semantic Scholar, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms. +

+ +
+

(ignore) research notes

+

The Asian Face Age Dataset (AFAD) is a new dataset proposed for evaluating the performance of age estimation, which contains more than 160K facial images and the corresponding age and gender labels. This dataset is oriented to age estimation on Asian faces, so all the facial images are for Asian faces. It is noted that the AFAD is the biggest dataset for age estimation to date. It is well suited to evaluate how deep learning methods can be adopted for age estimation. +Motivation

+

For age estimation, there are several public datasets for evaluating the performance of a specific algorithm, such as FG-NET [1] (1002 face images), MORPH I (1690 face images), and MORPH II[2] (55,608 face images). Among them, the MORPH II is the biggest public dataset to date. On the other hand, as we know it is necessary to collect a large scale dataset to train a deep Convolutional Neural Network. Therefore, the MORPH II dataset is extensively used to evaluate how deep learning methods can be adopted for age estimation [3][4].

+

However, the ethnic is very unbalanced for the MORPH II dataset, i.e., it has only less than 1% Asian faces. In order to evaluate the previous methods for age estimation on Asian Faces, the Asian Face Age Dataset (AFAD) was proposed.

+

There are 164,432 well-labeled photos in the AFAD dataset. It consist of 63,680 photos for female as well as 100,752 photos for male, and the ages range from 15 to 40. The distribution of photo counts for distinct ages are illustrated in the figure above. Some samples are shown in the Figure on the top. Its download link is provided in the "Download" section.

+

In addition, we also provide a subset of the AFAD dataset, called AFAD-Lite, which only contains PLACEHOLDER well-labeled photos. It consist of PLACEHOLDER photos for female as well as PLACEHOLDER photos for male, and the ages range from 15 to 40. The distribution of photo counts for distinct ages are illustrated in Fig. PLACEHOLDER. Its download link is also provided in the "Download" section.

+

The AFAD dataset is built by collecting selfie photos on a particular social network -- RenRen Social Network (RSN) [5]. The RSN is widely used by Asian students including middle school, high school, undergraduate, and graduate students. Even after leaving from school, some people still access their RSN account to connect with their old classmates. So, the age of the RSN user crosses a wide range from 15-years to more than 40-years old.

+

Please notice that this dataset is made available for academic research purpose only.

+
+

https://afad-dataset.github.io/

+
+ +
+ + + + + \ No newline at end of file diff --git a/site/public/datasets/brainwash/index.html b/site/public/datasets/brainwash/index.html new file mode 100644 index 00000000..03331a2d --- /dev/null +++ b/site/public/datasets/brainwash/index.html @@ -0,0 +1,163 @@ + + + + MegaPixels + + + + + + + + + + + +
+ + +
MegaPixels
+
Brainwash Dataset
+
+ +
+
+ +
Brainwash is a dataset of webcam images taken from the Brainwash Cafe in San Francisco in 2014
The Brainwash dataset includes 11,918 images of "everyday life of a busy downtown cafe" and is used for training head detection surveillance algorithms +

Brainwash Dataset

+

Brainwash is a head detection dataset created from San Francisco's Brainwash Cafe livecam footage. It includes 11,918 images of "everyday life of a busy downtown cafe" 1 captured at 100 second intervals throught the entire day. Brainwash dataset was captured during 3 days in 2014: October 27, November 13, and November 24. According the author's reserach paper introducing the dataset, the images were acquired with the help of Angelcam.com. 2

+

Brainwash is not a widely used dataset but since its publication by Stanford University in 2015, it has notably appeared in several research papers from the National University of Defense Technology in Changsha, China. In 2016 and in 2017 researchers there conducted studies on detecting people's heads in crowded scenes for the purpose of surveillance. 3 4

+

If you happen to have been at Brainwash cafe in San Francisco at any time on October 26, November 13, or November 24 in 2014 you are most likely included in the Brainwash dataset and have unwittingly contributed to surveillance research.

+
+

Who used Brainwash Dataset?

+ +

+ This bar chart presents a ranking of the top countries where dataset citations originated. Mouse over individual columns to see yearly totals. These charts show at most the top 10 countries. +

+ +
+ +
+ +
+
+ +
+
+
+ +
+ +

Biometric Trade Routes

+ +

+ To help understand how Brainwash Dataset has been used around the world by commercial, military, and academic organizations; existing publicly available research citing Brainwash Dataset was collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal research projects at that location. +

+ +
+ +
+
+
+ +
+
    +
  • Academic
  • +
  • Commercial
  • +
  • Military / Government
  • +
+
Citation data is collected using SemanticScholar.org then dataset usage verified and geolocated.
+
+ + +
+ +

Dataset Citations

+

+ The dataset citations used in the visualizations were collected from Semantic Scholar, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms. +

+ +
+
+ +
+
+
+
+ +

Supplementary Information

+ +
 A visualization of 81,973 head annotations from the Brainwash dataset training partition. © megapixels.cc
A visualization of 81,973 head annotations from the Brainwash dataset training partition. © megapixels.cc
 An sample image from the Brainwash dataset used for training face and head detection algorithms for surveillance. The datset contains about 12,000 images. License: Open Data Commons Public Domain Dedication (PDDL)
An sample image from the Brainwash dataset used for training face and head detection algorithms for surveillance. The datset contains about 12,000 images. License: Open Data Commons Public Domain Dedication (PDDL)
 49 of the 11,918 images included in the Brainwash dataset. License: Open Data Commons Public Domain Dedication (PDDL)
49 of the 11,918 images included in the Brainwash dataset. License: Open Data Commons Public Domain Dedication (PDDL)

TODO

+
    +
  • change supp images to 2x2 grid with bboxes
  • +
  • add bounding boxes to the header image
  • +
  • remake montage with randomized images, with bboxes
  • +
  • add ethics link to Stanford
  • +
  • add optout info
  • +
+
+ +

Cite Our Work

+

+ + If you use our data, research, or graphics please cite our work: + +

+@online{megapixels,
+  author = {Harvey, Adam. LaPlace, Jules.},
+  title = {MegaPixels: Origins, Ethics, and Privacy Implications of Publicly Available Face Recognition Image Datasets},
+  year = 2019,
+  url = {https://megapixels.cc/},
+  urldate = {2019-04-20}
+}
+ +

+

References

  • a

    "readme.txt" https://exhibits.stanford.edu/data/catalog/sx925dc9385.

    +
  • a

    Stewart, Russel. Andriluka, Mykhaylo. "End-to-end people detection in crowded scenes". 2016.

    +
  • a

    Li, Y. and Dou, Y. and Liu, X. and Li, T. Localized Region Context and Object Feature Fusion for People Head Detection. ICIP16 Proceedings. 2016. Pages 594-598.

    +
  • a

    Zhao. X, Wang Y, Dou, Y. A Replacement Algorithm of Non-Maximum Suppression Base on Graph Clustering.

    +
+ +
+ + + + + \ No newline at end of file diff --git a/site/public/datasets/caltech_10k/index.html b/site/public/datasets/caltech_10k/index.html new file mode 100644 index 00000000..00b5e7fd --- /dev/null +++ b/site/public/datasets/caltech_10k/index.html @@ -0,0 +1,124 @@ + + + + MegaPixels + + + + + + + + + + + +
+ + +
MegaPixels
+
Brainwash Dataset
+
+ +
+
+ +

Caltech 10K Faces Dataset

+

[ page under development ]

+
+

Who used Brainwash Dataset?

+ +

+ This bar chart presents a ranking of the top countries where dataset citations originated. Mouse over individual columns to see yearly totals. These charts show at most the top 10 countries. +

+ +
+ +
+ +
+
+ +
+
+
+ +
+ +

Biometric Trade Routes

+ +

+ To help understand how Brainwash Dataset has been used around the world by commercial, military, and academic organizations; existing publicly available research citing Brainwash Dataset was collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal research projects at that location. +

+ +
+ +
+
+
+ +
+
    +
  • Academic
  • +
  • Commercial
  • +
  • Military / Government
  • +
+
Citation data is collected using SemanticScholar.org then dataset usage verified and geolocated.
+
+ + +
+ +

Dataset Citations

+

+ The dataset citations used in the visualizations were collected from Semantic Scholar, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms. +

+ +
+

(ignore) research notes

+

The dataset contains images of people collected from the web by typing common given names into Google Image Search. The coordinates of the eyes, the nose and the center of the mouth for each frontal face are provided in a ground truth file. This information can be used to align and crop the human faces or as a ground truth for a face detection algorithm. The dataset has 10,524 human faces of various resolutions and in different settings, e.g. portrait images, groups of people, etc. Profile faces or very low resolution faces are not labeled.

+
+ +
+ + + + + \ No newline at end of file diff --git a/site/public/datasets/celeba/index.html b/site/public/datasets/celeba/index.html new file mode 100644 index 00000000..c4caef20 --- /dev/null +++ b/site/public/datasets/celeba/index.html @@ -0,0 +1,126 @@ + + + + MegaPixels + + + + + + + + + + + +
+ + +
MegaPixels
+
CelebA Dataset
+
+ +
+
+ +
CelebA is a dataset of people...
CelebA includes... +

CelebA Dataset

+

[ PAGE UNDER DEVELOPMENT ]

+
+

Who used CelebA Dataset?

+ +

+ This bar chart presents a ranking of the top countries where dataset citations originated. Mouse over individual columns to see yearly totals. These charts show at most the top 10 countries. +

+ +
+ +
+ +
+
+ +
+
+
+ +
+ +

Biometric Trade Routes

+ +

+ To help understand how CelebA Dataset has been used around the world by commercial, military, and academic organizations; existing publicly available research citing Large-scale CelebFaces Attributes Dataset was collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal research projects at that location. +

+ +
+ +
+
+
+ +
+
    +
  • Academic
  • +
  • Commercial
  • +
  • Military / Government
  • +
+
Citation data is collected using SemanticScholar.org then dataset usage verified and geolocated.
+
+ + +
+ +

Dataset Citations

+

+ The dataset citations used in the visualizations were collected from Semantic Scholar, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms. +

+ +
+

Research

+
    +
  • "An Unsupervised Approach to Solving Inverse Problems using Generative Adversarial Networks" mentions use by sponsored by an agency of the United States government. Neither the United States government nor Lawrence Livermore National Security, LLC, nor any of their"
  • +
  • 7dab6fbf42f82f0f5730fc902f72c3fb628ef2f0
  • +
  • principal responsibility is ensuring the safety, security and reliability of the nation's nuclear weapons NNSA ( National Nuclear Security Administration )
  • +
+
+ +
+ + + + + \ No newline at end of file diff --git a/site/public/datasets/cofw/index.html b/site/public/datasets/cofw/index.html new file mode 100644 index 00000000..4851e256 --- /dev/null +++ b/site/public/datasets/cofw/index.html @@ -0,0 +1,179 @@ + + + + MegaPixels + + + + + + + + + + + +
+ + +
MegaPixels
+
COFW Dataset
+
+ +
+
+ +

Caltech Occluded Faces in the Wild

+

[ PAGE UNDER DEVELOPMENT ]

+
+

Who used COFW Dataset?

+ +

+ This bar chart presents a ranking of the top countries where dataset citations originated. Mouse over individual columns to see yearly totals. These charts show at most the top 10 countries. +

+ +
+ +
+ +
+
+ +
+
+
+ +
+ +

Biometric Trade Routes

+ +

+ To help understand how COFW Dataset has been used around the world by commercial, military, and academic organizations; existing publicly available research citing Caltech Occluded Faces in the Wild was collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal research projects at that location. +

+ +
+ +
+
+
+ +
+
    +
  • Academic
  • +
  • Commercial
  • +
  • Military / Government
  • +
+
Citation data is collected using SemanticScholar.org then dataset usage verified and geolocated.
+
+ + +
+ +

Dataset Citations

+

+ The dataset citations used in the visualizations were collected from Semantic Scholar, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms. +

+ +
+

(ignore) research notes

+
Years
1993-1996
Images
14,126
Identities
1,199
Origin
Web Searches
Funded by
ODNI, IARPA, Microsoft

COFW is "is designed to benchmark face landmark algorithms in realistic conditions, which include heavy occlusions and large shape variations" [Robust face landmark estimation under occlusion].

+

We asked four people with different levels of computer vision knowledge to each collect 250 faces representative of typical real-world images, with the clear goal of challenging computer vision methods. +The result is 1,007 images of faces obtained from a variety of sources.

+
+

Robust face landmark estimation under occlusion

+

Our face dataset is designed to present faces in real-world conditions. Faces show large variations in shape and occlusions due to differences in pose, expression, use of accessories such as sunglasses and hats and interactions with objects (e.g. food, hands, microphones, etc.). All images were hand annotated in our lab using the same 29 landmarks as in LFPW. We annotated both the landmark positions as well as their occluded/unoccluded state. The faces are occluded to different degrees, with large variations in the type of occlusions encountered. COFW has an average occlusion of over 23%. +To increase the number of training images, and since COFW has the exact same landmarks as LFPW, for training we use the original non-augmented 845 LFPW faces + 500 COFW faces (1345 total), and for testing the remaining 507 COFW faces. To make sure all images had occlusion labels, we annotated occlusion on the available 845 LFPW training images, finding an average of only 2% occlusion.

+
+

http://www.vision.caltech.edu/xpburgos/ICCV13/

+

This research is supported by NSF Grant 0954083 and by the Office of the Director of National Intelligence (ODNI), Intelligence Advanced Research Projects Activity (IARPA), via IARPA R&D Contract No. 2014-14071600012.

+
+

https://www.cs.cmu.edu/~peiyunh/topdown/

+
+ +

Biometric Trade Routes

+ +

+ To help understand how COFW Dataset has been used around the world by commercial, military, and academic organizations; existing publicly available research citing Caltech Occluded Faces in the Wild was collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the location markers to reveal research projects at that location. +

+ +
+ +
+
+
+ +
+
    +
  • Academic
  • +
  • Commercial
  • +
  • Military / Government
  • +
+
Citation data is collected using SemanticScholar.org and then dataset usage verified and geolocated.
+
+ +
+
+
+
+ +

Supplementary Information

+ +
+ +

Dataset Citations

+

+ The dataset citations used in the visualizations were collected from Semantic Scholar, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms. +

+ +
+
+

Who used COFW Dataset?

+ +

+ This bar chart presents a ranking of the top countries where dataset citations originated. Mouse over individual columns to see yearly totals. These charts show at most the top 10 countries. +

+ +
+ +
+ +
+

TODO

+

- replace graphic

+
+ +
+ + + + + \ No newline at end of file diff --git a/site/public/datasets/duke_mtmc/index.html b/site/public/datasets/duke_mtmc/index.html index 9bec47ed..ba32484a 100644 --- a/site/public/datasets/duke_mtmc/index.html +++ b/site/public/datasets/duke_mtmc/index.html @@ -27,7 +27,7 @@
Duke MTMC is a dataset of surveillance camera footage of students on Duke University campus
Duke MTMC contains over 2 million video frames and 2,700 unique identities collected from 8 HD cameras at Duke University campus in March 2014 -

Duke MTMC

-

The Duke Multi-Target, Multi-Camera Tracking Dataset (MTMC) is a dataset of video recorded on Duke University campus for research and development of networked camera surveillance systems. MTMC tracking is used for citywide dragnet surveillance systems such as those used throughout China by SenseTime 1 and the oppressive monitoring of 2.5 million Uyghurs in Xinjiang by SenseNets 2. In fact researchers from both SenseTime 4 5 and SenseNets 3 used the Duke MTMC dataset for their research.

-

The Duke MTMC dataset is unique because it is the largest publicly available MTMC and person re-identification dataset and has the longest duration of annotated video. In total, the Duke MTMC dataset provides over 14 hours of 1080p video from 8 synchronized surveillance cameras. 6 It is among the most widely used person re-identification datasets in the world. The approximately 2,700 unique people in the Duke MTMC videos, most of whom are students, are used for research and development of surveillance technologies by commercial, academic, and even defense organizations.

-
 A collection of 1,600 out of the 2,700 students and passersby captured into the Duke MTMC surveillance research dataset. These students were also included in the Duke MTMC Re-ID dataset extension used for person re-identification. © megapixels.cc
A collection of 1,600 out of the 2,700 students and passersby captured into the Duke MTMC surveillance research dataset. These students were also included in the Duke MTMC Re-ID dataset extension used for person re-identification. © megapixels.cc

The creation and publication of the Duke MTMC dataset in 2016 was originally funded by the U.S. Army Research Laboratory and the National Science Foundation 6. Since 2016 use of the Duke MTMC dataset images have been publicly acknowledged in research funded by or on behalf of the Chinese National University of Defense 7 8, IARPA and IBM 9, and U.S. Department of Homeland Security 10.

-

The 8 cameras deployed on Duke's campus were specifically setup to capture students "during periods between lectures, when pedestrian traffic is heavy". 6 Camera 7 and 2 capture large groups of prospective students and children. Camera 5 was positioned to capture students as they enter and exit Duke University's main chapel. Each camera's location is documented below.

-
 Duke MTMC camera locations on Duke University campus © megapixels.cc
Duke MTMC camera locations on Duke University campus © megapixels.cc
 Duke MTMC camera views for 8 cameras deployed on campus © megapixels.cc
Duke MTMC camera views for 8 cameras deployed on campus © megapixels.cc
 Duke MTMC pedestrian detection saliency maps for 8 cameras deployed on campus © megapixels.cc
Duke MTMC pedestrian detection saliency maps for 8 cameras deployed on campus © megapixels.cc
+

Duke MTMC (Multi-Target, Multi-Camera Tracking) is a dataset of video recorded on Duke University campus for research and development of networked camera surveillance systems. MTMC tracking algorithms are used for citywide dragnet surveillance systems such as those used throughout China by SenseTime 1 and the oppressive monitoring of 2.5 million Uyghurs in Xinjiang by SenseNets 2. In fact researchers from both SenseTime 4 5 and SenseNets 3 used the Duke MTMC dataset for their research.

+

In this investigation into the Duke MTMC dataset, we found that researchers at Duke Univesity in Durham, North Carolina captured over 2,000 students, faculty members, and passersby into one of the most prolific public surveillance research datasets that's used around the world by commercial and defense surveillance organizations.

+

Since it's publication in 2016, the Duke MTMC dataset has been used in over 100 studies at organizations around the world including SenseTime 4 5, SenseNets 3, IARPA and IBM 9, Chinese National University of Defense 7 8, US Department of Homeland Security 10, Tencent, Microsoft, Microsft Asia, Fraunhofer, Senstar Corp., Alibaba, Naver Labs, Google and Hewlett-Packard Labs to name only a few.

+

The creation and publication of the Duke MTMC dataset in 2014 (published in 2016) was originally funded by the U.S. Army Research Laboratory and the National Science Foundation 6. Though our analysis of the geographic locations of the publicly available research shows over twice as many citations by researchers from China (44% China, 20% United States). In 2018 alone, there were 70 research project citations from China.

+
 A collection of 1,600 out of the 2,700 students and passersby captured into the Duke MTMC surveillance research and development dataset on . These students were also included in the Duke MTMC Re-ID dataset extension used for person re-identification. Open Data Commons Attribution License.
A collection of 1,600 out of the 2,700 students and passersby captured into the Duke MTMC surveillance research and development dataset on . These students were also included in the Duke MTMC Re-ID dataset extension used for person re-identification. Open Data Commons Attribution License.

The 8 cameras deployed on Duke's campus were specifically setup to capture students "during periods between lectures, when pedestrian traffic is heavy". 6. Camera 5 was positioned to capture students as entering and exiting the university's main chapel. Each camera's location and approximate field of view. The heat map visualization shows the locations where pedestrians were most frequently annotated in each video from the Duke MTMC datset.

+
 Duke MTMC camera locations on Duke University campus. Open Data Commons Attribution License.
Duke MTMC camera locations on Duke University campus. Open Data Commons Attribution License.
 Duke MTMC camera views for 8 cameras deployed on campus © megapixels.cc
Duke MTMC camera views for 8 cameras deployed on campus © megapixels.cc
 Duke MTMC pedestrian detection saliency maps for 8 cameras deployed on campus © megapixels.cc
Duke MTMC pedestrian detection saliency maps for 8 cameras deployed on campus © megapixels.cc

Who used Duke MTMC Dataset?

@@ -110,18 +111,122 @@

Supplementary Information

-

Notes

-

The Duke MTMC dataset paper mentions 2,700 identities, but their ground truth file only lists annotations for 1,812

-

References

Funding

+

Original funding for the Duke MTMC dataset was provided by the Army Research Office under Grant No. W911NF-10-1-0387 and by the National Science Foundation +under Grants IIS-10-17017 and IIS-14-20894.

+

Video Timestamps

+

The video timestamps contain the likely, but not yet confirmed, date and times of capture. Because the video timestamps align with the start and stop time sync data provided by the researchers, it at least aligns the relative time. The rainy weather on that day also contribute towards the likelihood of March 14, 2014..

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
CameraDateStartEnd
Camera 1March 14, 20144:14PM5:43PM
Camera 2March 14, 20144:13PM4:43PM
Camera 3March 14, 20144:20PM5:48PM
Camera 4March 14, 20144:21PM5:54PM
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
CameraDateStartEnd
Camera 5March 14, 20144:12PM5:43PM
Camera 6March 14, 20144:18PM5:43PM
Camera 7March 14, 20144:16PM5:40PM
Camera 8March 14, 20144:25PM5:42PM
+

Opting Out

+

If you attended Duke University and were captured by any of the 8 surveillance cameras positioned on campus in 2014, there is unfortunately no way to be removed. The dataset files have been distributed throughout the world and it would not be possible to contact all the owners for removal. Nor do the authors provide any options for students to opt-out, nor did they even inform students they would be used at test subjects for surveillance research and development in a project funded, in part, by the United States Army Research Office.

+

Notes

+
    +
  • The Duke MTMC dataset paper mentions 2,700 identities, but their ground truth file only lists annotations for 1,812
  • +
+
+ +

Cite Our Work

+

+ + If you use our data, research, or graphics please cite our work: + +

+@online{megapixels,
+  author = {Harvey, Adam. LaPlace, Jules.},
+  title = {MegaPixels: Origins, Ethics, and Privacy Implications of Publicly Available Face Recognition Image Datasets},
+  year = 2019,
+  url = {https://megapixels.cc/},
+  urldate = {2019-04-20}
+}
+ +

+

If you use any data from the Duke MTMC please follow their license and cite their work as:

+
+@inproceedings{ristani2016MTMC,
+ title =        {Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking},
+ author =       {Ristani, Ergys and Solera, Francesco and Zou, Roger and Cucchiara, Rita and Tomasi, Carlo},
+ booktitle =    {European Conference on Computer Vision workshop on Benchmarking Multi-Target Tracking},
+ year =         {2016}
+}
+

References

diff --git a/site/public/datasets/feret/index.html b/site/public/datasets/feret/index.html new file mode 100644 index 00000000..089cd351 --- /dev/null +++ b/site/public/datasets/feret/index.html @@ -0,0 +1,87 @@ + + + + MegaPixels + + + + + + + + + + + +
+ + +
MegaPixels
+
LFW
+
+ +
+
+ +

Funding

+

The FERET program is sponsored by the U.S. Depart- ment of Defense’s Counterdrug Technology Development Program Office. The U.S. Army Research Laboratory (ARL) is the technical agent for the FERET program. ARL designed, administered, and scored the FERET tests. George Mason University collected, processed, and main- tained the FERET database. Inquiries regarding the FERET database or test should be directed to P. Jonathon Phillips.

+
+ +
+ + + + + \ No newline at end of file diff --git a/site/public/datasets/hrt_transgender/index.html b/site/public/datasets/hrt_transgender/index.html index 486b9122..231a5271 100644 --- a/site/public/datasets/hrt_transgender/index.html +++ b/site/public/datasets/hrt_transgender/index.html @@ -27,7 +27,7 @@
TBD
TBD -

[ PAGE UNDER DEVELOPMENT ]

Who used PIPA Dataset?

diff --git a/site/public/datasets/pubfig/index.html b/site/public/datasets/pubfig/index.html index 2c8bd7b1..ed593054 100644 --- a/site/public/datasets/pubfig/index.html +++ b/site/public/datasets/pubfig/index.html @@ -27,7 +27,8 @@
PubFig is a dataset...
[ add subdescrition ] -

PubFig

+

PubFig

-

[ PAGE UNDER DEVELOPMENT ]

+

[ PAGE UNDER DEVELOPMENT ]

Who used PubFig?

diff --git a/site/public/datasets/uccs/index.html b/site/public/datasets/uccs/index.html index 1d76de3a..27d30716 100644 --- a/site/public/datasets/uccs/index.html +++ b/site/public/datasets/uccs/index.html @@ -28,7 +28,7 @@
UnConstrained College Students is a dataset of long-range surveillance photos of students on University of Colorado in Colorado Springs campus
The UnConstrained College Students dataset includes 16,149 images of 1,732 students, faculty, and pedestrians and is used for developing face recognition and face detection algorithms

UnConstrained College Students

-

UnConstrained College Students (UCCS) is a dataset of long-range surveillance photos captured at University of Colorado Colorado Springs. According to the authors of two papers associated with the dataset, over 1,700 students and pedestrians were "photographed using a long-range high-resolution surveillance camera without their knowledge" 2. In this investigation, we examine the funding sources, contents of the dataset, photo EXIF data, and publicy available research project citations.

+

UnConstrained College Students (UCCS) is a dataset of long-range surveillance photos captured at University of Colorado Colorado Springs. According to the authors of two papers associated with the dataset, over 1,700 students and pedestrians were "photographed using a long-range high-resolution surveillance camera without their knowledge" 2. In this investigation, we examine the funding sources, contents of the dataset, photo EXIF data, and publicy available research project citations.

According to the author's of the the UnConstrained College Students dataset it is primarliy used for research and development of "face detection and recognition research towards surveillance applications that are becoming more popular and more required nowadays, and where no automatic recognition algorithm has proven to be useful yet." Applications of this technology include usage by defense and intelligence agencies, who were also the primary funding sources of the UCCS dataset.

In the two papers associated with the release of the UCCS dataset (Unconstrained Face Detection and Open-Set Face Recognition Challenge and Large Scale Unconstrained Open Set Face Database), the researchers disclosed their funding sources as ODNI (United States Office of Director of National Intelligence), IARPA (Intelligence Advance Research Projects Activity), ONR MURI (Office of Naval Research and The Department of Defense Multidisciplinary University Research Initiative), Army SBIR (Small Business Innovation Research), SOCOM SBIR (Special Operations Command and Small Business Innovation Research), and the National Science Foundation. Further, UCCS's VAST site explicity states they are part of the IARPA Janus, a face recognition project developed to serve the needs of national intelligence interests.

 Location on campus where students were unknowingly photographed with a telephoto lens to be used for defense and intelligence agency funded research on face recognition. Image: Google Maps
Location on campus where students were unknowingly photographed with a telephoto lens to be used for defense and intelligence agency funded research on face recognition. Image: Google Maps

The UCCS dataset includes the highest resolution images of any publicly available face recognition dataset discovered so far (18MP) and was, as of 2018, the "largest surveillance FR benchmark in the public domain." 3 To create the dataset, the researchers used a Canon 7D digital camera fitted with a Sigma 800mm telephoto lens and photographed students from a distance of 150–200m through their office window. Photos were taken during the morning and afternoon while students were walking to and from classes. According to an analysis of the EXIF data embedded in the photos, nearly half of the 16,149 photos were taken on Tuesdays. The most popular time was during lunch break. All of the photos were taken during the spring semester in 2012 and 2013 but the dataset was not publicy released until 2016.

diff --git a/site/public/datasets/vgg_face2/index.html b/site/public/datasets/vgg_face2/index.html index 75d73824..3c2859a5 100644 --- a/site/public/datasets/vgg_face2/index.html +++ b/site/public/datasets/vgg_face2/index.html @@ -26,7 +26,8 @@
-

[ page under development ]

Who used Brainwash Dataset?

diff --git a/site/public/datasets/viper/index.html b/site/public/datasets/viper/index.html index 5b3ac35b..494c249b 100644 --- a/site/public/datasets/viper/index.html +++ b/site/public/datasets/viper/index.html @@ -27,7 +27,8 @@
VIPeR is a person re-identification dataset of images captured at UC Santa Cruz in 2007
VIPeR contains 1,264 images and 632 persons on the UC Santa Cruz campus and is used to train person re-identification algorithms for surveillance -

VIPeR Dataset

+

VIPeR Dataset

-

[ page under development ]

+

[ page under development ]

VIPeR (Viewpoint Invariant Pedestrian Recognition) is a dataset of pedestrian images captured at University of California Santa Cruz in 2007. Accoriding to the reserachers 2 "cameras were placed in different locations in an academic setting and subjects were notified of the presence of cameras, but were not coached or instructed in any way."

VIPeR is amongst the most widely used publicly available person re-identification datasets. In 2017 the VIPeR dataset was combined into a larger person re-identification created by the Chinese University of Hong Kong called PETA (PEdesTrian Attribute).

diff --git a/site/public/datasets/youtube_celebrities/index.html b/site/public/datasets/youtube_celebrities/index.html index 39670c19..9a6ae18e 100644 --- a/site/public/datasets/youtube_celebrities/index.html +++ b/site/public/datasets/youtube_celebrities/index.html @@ -26,8 +26,8 @@
-

YouTube Celebrities

-

[ page under development ]

+

YouTube Celebrities

+

[ page under development ]

Who used YouTube Celebrities?

diff --git a/todo.md b/todo.md index cc4736cd..4586611e 100644 --- a/todo.md +++ b/todo.md @@ -16,7 +16,6 @@ ## Datasets - JL: this paper isn't appearing in the UCCS list of verified papers but should be included https://arxiv.org/pdf/1708.02337.pdf -- JL: add h2 dataset title above the right-sidebar so title extends full width - AH: add dataset analysis for MS Celeb, IJB-C - AH: fix dataset analysis for UCCS, brainwahs graphics - AH: add license information to each dataset page -- cgit v1.2.3-70-g09d2 From 9d6d12f0b16d10219c62f25ce036b9377417de70 Mon Sep 17 00:00:00 2001 From: Jules Laplace Date: Tue, 16 Apr 2019 18:55:02 +0200 Subject: build --- megapixels/app/site/parser.py | 3 +- .../datasets/50_people_one_question/index.html | 56 +++++++++- site/public/datasets/afad/index.html | 56 +++++++++- site/public/datasets/brainwash/index.html | 86 +++++++++++++-- site/public/datasets/caltech_10k/index.html | 58 +++++++++- site/public/datasets/celeba/index.html | 58 +++++++++- site/public/datasets/cofw/index.html | 117 +++++++++++++++++++-- site/public/datasets/duke_mtmc/index.html | 99 ++++++++++++++--- site/public/datasets/feret/index.html | 58 +++++++++- site/public/datasets/hrt_transgender/index.html | 6 +- site/public/datasets/lfpw/index.html | 56 +++++++++- site/public/datasets/lfw/index.html | 77 ++++++++++++-- site/public/datasets/market_1501/index.html | 56 +++++++++- site/public/datasets/msceleb/index.html | 68 +++++++++++- site/public/datasets/oxford_town_centre/index.html | 89 ++++++++++++++-- site/public/datasets/pipa/index.html | 56 +++++++++- site/public/datasets/pubfig/index.html | 56 +++++++++- site/public/datasets/uccs/index.html | 103 +++++++++++++++--- site/public/datasets/vgg_face2/index.html | 58 +++++++++- site/public/datasets/viper/index.html | 56 +++++++++- .../public/datasets/youtube_celebrities/index.html | 56 +++++++++- 21 files changed, 1212 insertions(+), 116 deletions(-) (limited to 'site/public/datasets/feret/index.html') diff --git a/megapixels/app/site/parser.py b/megapixels/app/site/parser.py index aa2ddcda..1489d056 100644 --- a/megapixels/app/site/parser.py +++ b/megapixels/app/site/parser.py @@ -60,8 +60,7 @@ def parse_markdown(metadata, sections, s3_path, skip_h1=False): current_group.append(section) in_stats = True if 'end sidebar' in section.lower(): - groups.append(format_section(current_group, s3_path, 'right-sidebar', tag='div')) - current_group = [] + current_group = [format_section(current_group, s3_path, 'right-sidebar', tag='div')] in_stats = False elif in_stats and not section.strip().startswith('## ') and 'end sidebar' not in section.lower(): current_group.append(section) diff --git a/site/public/datasets/50_people_one_question/index.html b/site/public/datasets/50_people_one_question/index.html index 577d4d8c..dc7919f7 100644 --- a/site/public/datasets/50_people_one_question/index.html +++ b/site/public/datasets/50_people_one_question/index.html @@ -28,7 +28,7 @@
People One Question is a dataset of people from an online video series on YouTube and Vimeo used for building facial recogntion algorithms
People One Question dataset includes ...

50 People 1 Question

-
+

[ page under development ]

+
+

Who used 50 People One Question Dataset?

+ +

+ This bar chart presents a ranking of the top countries where dataset citations originated. Mouse over individual columns to see yearly totals. These charts show at most the top 10 countries. +

+ +
+ +
+ +
+
+ +
+
+
+ +
+ +

Biometric Trade Routes

+ +

+ To help understand how 50 People One Question Dataset has been used around the world by commercial, military, and academic organizations; existing publicly available research citing 50 People One Question was collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal research projects at that location. +

+ +
+ +
+
+
+ +
+
    +
  • Academic
  • +
  • Commercial
  • +
  • Military / Government
  • +
+
Citation data is collected using SemanticScholar.org then dataset usage verified and geolocated.
+
+ + +
+ +

Dataset Citations

+

+ The dataset citations used in the visualizations were collected from Semantic Scholar, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms. +

+ +
diff --git a/site/public/datasets/afad/index.html b/site/public/datasets/afad/index.html index 6ef13948..f2b0a5ba 100644 --- a/site/public/datasets/afad/index.html +++ b/site/public/datasets/afad/index.html @@ -42,9 +42,59 @@
Website
github.io
-

[ page under development ]

-

{% include 'dashboard.html' %}

-

(ignore) research notes

+

[ page under development ]

+
+

Who used Asian Face Age Dataset?

+ +

+ This bar chart presents a ranking of the top countries where dataset citations originated. Mouse over individual columns to see yearly totals. These charts show at most the top 10 countries. +

+ +
+ +
+ +
+
+ +
+
+
+ +
+ +

Biometric Trade Routes

+ +

+ To help understand how Asian Face Age Dataset has been used around the world by commercial, military, and academic organizations; existing publicly available research citing The Asian Face Age Dataset was collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal research projects at that location. +

+ +
+ +
+
+
+ +
+ +
Citation data is collected using SemanticScholar.org then dataset usage verified and geolocated.
+
+ + +
+ +

Dataset Citations

+

+ The dataset citations used in the visualizations were collected from Semantic Scholar, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms. +

+ +
+

(ignore) research notes

The Asian Face Age Dataset (AFAD) is a new dataset proposed for evaluating the performance of age estimation, which contains more than 160K facial images and the corresponding age and gender labels. This dataset is oriented to age estimation on Asian faces, so all the facial images are for Asian faces. It is noted that the AFAD is the biggest dataset for age estimation to date. It is well suited to evaluate how deep learning methods can be adopted for age estimation. Motivation

For age estimation, there are several public datasets for evaluating the performance of a specific algorithm, such as FG-NET [1] (1002 face images), MORPH I (1690 face images), and MORPH II[2] (55,608 face images). Among them, the MORPH II is the biggest public dataset to date. On the other hand, as we know it is necessary to collect a large scale dataset to train a deep Convolutional Neural Network. Therefore, the MORPH II dataset is extensively used to evaluate how deep learning methods can be adopted for age estimation [3][4].

diff --git a/site/public/datasets/brainwash/index.html b/site/public/datasets/brainwash/index.html index 2a6044d0..95f0d77d 100644 --- a/site/public/datasets/brainwash/index.html +++ b/site/public/datasets/brainwash/index.html @@ -28,7 +28,7 @@
Brainwash is a dataset of webcam images taken from the Brainwash Cafe in San Francisco in 2014
The Brainwash dataset includes 11,918 images of "everyday life of a busy downtown cafe" and is used for training head detection surveillance algorithms

Brainwash Dataset

-
+

Brainwash is a head detection dataset created from San Francisco's Brainwash Cafe livecam footage. It includes 11,918 images of "everyday life of a busy downtown cafe" 1 captured at 100 second intervals throught the entire day. Brainwash dataset was captured during 3 days in 2014: October 27, November 13, and November 24. According the author's reserach paper introducing the dataset, the images were acquired with the help of Angelcam.com. 2

Brainwash is not a widely used dataset but since its publication by Stanford University in 2015, it has notably appeared in several research papers from the National University of Defense Technology in Changsha, China. In 2016 and in 2017 researchers there conducted studies on detecting people's heads in crowded scenes for the purpose of surveillance. 3 4

If you happen to have been at Brainwash cafe in San Francisco at any time on October 26, November 13, or November 24 in 2014 you are most likely included in the Brainwash dataset and have unwittingly contributed to surveillance research.

-

{% include 'dashboard.html' %}

-

{% include 'supplementary_header.html' %}

-
 A visualization of 81,973 head annotations from the Brainwash dataset training partition. © megapixels.cc
A visualization of 81,973 head annotations from the Brainwash dataset training partition. © megapixels.cc
 An sample image from the Brainwash dataset used for training face and head detection algorithms for surveillance. The datset contains about 12,000 images. License: Open Data Commons Public Domain Dedication (PDDL)
An sample image from the Brainwash dataset used for training face and head detection algorithms for surveillance. The datset contains about 12,000 images. License: Open Data Commons Public Domain Dedication (PDDL)
 49 of the 11,918 images included in the Brainwash dataset. License: Open Data Commons Public Domain Dedication (PDDL)
49 of the 11,918 images included in the Brainwash dataset. License: Open Data Commons Public Domain Dedication (PDDL)

TODO

+
+

Who used Brainwash Dataset?

+ +

+ This bar chart presents a ranking of the top countries where dataset citations originated. Mouse over individual columns to see yearly totals. These charts show at most the top 10 countries. +

+ +
+ +
+ +
+
+ +
+
+
+ +
+ +

Biometric Trade Routes

+ +

+ To help understand how Brainwash Dataset has been used around the world by commercial, military, and academic organizations; existing publicly available research citing Brainwash Dataset was collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal research projects at that location. +

+ +
+ +
+
+
+ +
+
    +
  • Academic
  • +
  • Commercial
  • +
  • Military / Government
  • +
+
Citation data is collected using SemanticScholar.org then dataset usage verified and geolocated.
+
+ + +
+ +

Dataset Citations

+

+ The dataset citations used in the visualizations were collected from Semantic Scholar, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms. +

+ +
+
+ +
+
+
+
+ +

Supplementary Information

+ +
 A visualization of 81,973 head annotations from the Brainwash dataset training partition. © megapixels.cc
A visualization of 81,973 head annotations from the Brainwash dataset training partition. © megapixels.cc
 An sample image from the Brainwash dataset used for training face and head detection algorithms for surveillance. The datset contains about 12,000 images. License: Open Data Commons Public Domain Dedication (PDDL)
An sample image from the Brainwash dataset used for training face and head detection algorithms for surveillance. The datset contains about 12,000 images. License: Open Data Commons Public Domain Dedication (PDDL)
 49 of the 11,918 images included in the Brainwash dataset. License: Open Data Commons Public Domain Dedication (PDDL)
49 of the 11,918 images included in the Brainwash dataset. License: Open Data Commons Public Domain Dedication (PDDL)

TODO

  • change supp images to 2x2 grid with bboxes
  • add bounding boxes to the header image
  • @@ -62,7 +120,23 @@
  • add ethics link to Stanford
  • add optout info
-

{% include 'cite_our_work.html' %}

+
+ +

Cite Our Work

+

+ + If you use our data, research, or graphics please cite our work: + +

+@online{megapixels,
+  author = {Harvey, Adam. LaPlace, Jules.},
+  title = {MegaPixels: Origins, Ethics, and Privacy Implications of Publicly Available Face Recognition Image Datasets},
+  year = 2019,
+  url = {https://megapixels.cc/},
+  urldate = {2019-04-20}
+}
+ +

References

  • a

    "readme.txt" https://exhibits.stanford.edu/data/catalog/sx925dc9385.

  • a

    Stewart, Russel. Andriluka, Mykhaylo. "End-to-end people detection in crowded scenes". 2016.

  • a

    Li, Y. and Dou, Y. and Liu, X. and Li, T. Localized Region Context and Object Feature Fusion for People Head Detection. ICIP16 Proceedings. 2016. Pages 594-598.

    diff --git a/site/public/datasets/caltech_10k/index.html b/site/public/datasets/caltech_10k/index.html index 4cbb7ce6..04d63ee3 100644 --- a/site/public/datasets/caltech_10k/index.html +++ b/site/public/datasets/caltech_10k/index.html @@ -27,7 +27,7 @@

    Caltech 10K Faces Dataset

    -
    +

    [ page under development ]

    +
+

Who used Brainwash Dataset?

+ +

+ This bar chart presents a ranking of the top countries where dataset citations originated. Mouse over individual columns to see yearly totals. These charts show at most the top 10 countries. +

+ +
+ +
+ +
+
+ +
+
+
+ +
+ +

Biometric Trade Routes

+ +

+ To help understand how Brainwash Dataset has been used around the world by commercial, military, and academic organizations; existing publicly available research citing Brainwash Dataset was collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal research projects at that location. +

+ +
+ +
+
+
+ +
+
    +
  • Academic
  • +
  • Commercial
  • +
  • Military / Government
  • +
+
Citation data is collected using SemanticScholar.org then dataset usage verified and geolocated.
+
+ + +
+ +

Dataset Citations

+

+ The dataset citations used in the visualizations were collected from Semantic Scholar, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms. +

+ +
+

(ignore) research notes

The dataset contains images of people collected from the web by typing common given names into Google Image Search. The coordinates of the eyes, the nose and the center of the mouth for each frontal face are provided in a ground truth file. This information can be used to align and crop the human faces or as a ground truth for a face detection algorithm. The dataset has 10,524 human faces of various resolutions and in different settings, e.g. portrait images, groups of people, etc. Profile faces or very low resolution faces are not labeled.

diff --git a/site/public/datasets/celeba/index.html b/site/public/datasets/celeba/index.html index 9d75b428..c72f3798 100644 --- a/site/public/datasets/celeba/index.html +++ b/site/public/datasets/celeba/index.html @@ -28,7 +28,7 @@
CelebA is a dataset of people...
CelebA includes...

CelebA Dataset

-
+

[ PAGE UNDER DEVELOPMENT ]

+
+

Who used CelebA Dataset?

+ +

+ This bar chart presents a ranking of the top countries where dataset citations originated. Mouse over individual columns to see yearly totals. These charts show at most the top 10 countries. +

+ +
+ +
+ +
+
+ +
+
+
+ +
+ +

Biometric Trade Routes

+ +

+ To help understand how CelebA Dataset has been used around the world by commercial, military, and academic organizations; existing publicly available research citing Large-scale CelebFaces Attributes Dataset was collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal research projects at that location. +

+ +
+ +
+
+
+ +
+
    +
  • Academic
  • +
  • Commercial
  • +
  • Military / Government
  • +
+
Citation data is collected using SemanticScholar.org then dataset usage verified and geolocated.
+
+ + +
+ +

Dataset Citations

+

+ The dataset citations used in the visualizations were collected from Semantic Scholar, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms. +

+ +
+

Research

  • "An Unsupervised Approach to Solving Inverse Problems using Generative Adversarial Networks" mentions use by sponsored by an agency of the United States government. Neither the United States government nor Lawrence Livermore National Security, LLC, nor any of their"
  • 7dab6fbf42f82f0f5730fc902f72c3fb628ef2f0
  • diff --git a/site/public/datasets/cofw/index.html b/site/public/datasets/cofw/index.html index 084cf7c2..eef8cf5e 100644 --- a/site/public/datasets/cofw/index.html +++ b/site/public/datasets/cofw/index.html @@ -27,7 +27,7 @@

    Caltech Occluded Faces in the Wild

    -
    +

    [ PAGE UNDER DEVELOPMENT ]

    +
+

Who used COFW Dataset?

+ +

+ This bar chart presents a ranking of the top countries where dataset citations originated. Mouse over individual columns to see yearly totals. These charts show at most the top 10 countries. +

+ +
+ +
+ +
+
+ +
+
+
+ +
+ +

Biometric Trade Routes

+ +

+ To help understand how COFW Dataset has been used around the world by commercial, military, and academic organizations; existing publicly available research citing Caltech Occluded Faces in the Wild was collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal research projects at that location. +

+ +
+ +
+
+
+ +
+
    +
  • Academic
  • +
  • Commercial
  • +
  • Military / Government
  • +
+
Citation data is collected using SemanticScholar.org then dataset usage verified and geolocated.
+
+ + +
+ +

Dataset Citations

+

+ The dataset citations used in the visualizations were collected from Semantic Scholar, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms. +

+ +
+

(ignore) research notes

+
Years
1993-1996
Images
14,126
Identities
1,199
Origin
Web Searches
Funded by
ODNI, IARPA, Microsoft

COFW is "is designed to benchmark face landmark algorithms in realistic conditions, which include heavy occlusions and large shape variations" [Robust face landmark estimation under occlusion].

We asked four people with different levels of computer vision knowledge to each collect 250 faces representative of typical real-world images, with the clear goal of challenging computer vision methods. The result is 1,007 images of faces obtained from a variety of sources.

@@ -54,11 +104,58 @@ To increase the number of training images, and since COFW has the exact same la

This research is supported by NSF Grant 0954083 and by the Office of the Director of National Intelligence (ODNI), Intelligence Advanced Research Projects Activity (IARPA), via IARPA R&D Contract No. 2014-14071600012.

https://www.cs.cmu.edu/~peiyunh/topdown/

-

{% include 'map.html' %}

-

{% include 'supplementary_header.html' %}

-

{% include 'citations.html' %}

-

{% include 'chart.html' %}

-

TODO

+
+ +

Biometric Trade Routes

+ +

+ To help understand how COFW Dataset has been used around the world by commercial, military, and academic organizations; existing publicly available research citing Caltech Occluded Faces in the Wild was collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the location markers to reveal research projects at that location. +

+ +
+ +
+
+
+ +
+
    +
  • Academic
  • +
  • Commercial
  • +
  • Military / Government
  • +
+
Citation data is collected using SemanticScholar.org and then dataset usage verified and geolocated.
+
+ +
+
+
+
+ +

Supplementary Information

+ +
+ +

Dataset Citations

+

+ The dataset citations used in the visualizations were collected from Semantic Scholar, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms. +

+ +
+
+

Who used COFW Dataset?

+ +

+ This bar chart presents a ranking of the top countries where dataset citations originated. Mouse over individual columns to see yearly totals. These charts show at most the top 10 countries. +

+ +
+ +
+ +
+

TODO

- replace graphic

diff --git a/site/public/datasets/duke_mtmc/index.html b/site/public/datasets/duke_mtmc/index.html index 3cd19708..5cb6fb0c 100644 --- a/site/public/datasets/duke_mtmc/index.html +++ b/site/public/datasets/duke_mtmc/index.html @@ -28,7 +28,7 @@
Duke MTMC is a dataset of surveillance camera footage of students on Duke University campus
Duke MTMC contains over 2 million video frames and 2,700 unique identities collected from 8 HD cameras at Duke University campus in March 2014

Duke MTMC

-
+

[ page under development ]

Duke MTMC (Multi-Target, Multi-Camera Tracking) is a dataset of video recorded on Duke University campus for research and development of networked camera surveillance systems. MTMC tracking algorithms are used for citywide dragnet surveillance systems such as those used throughout China by SenseTime 1 and the oppressive monitoring of 2.5 million Uyghurs in Xinjiang by SenseNets 2. In fact researchers from both SenseTime 4 5 and SenseNets 3 used the Duke MTMC dataset for their research.

In this investigation into the Duke MTMC dataset, we found that researchers at Duke University in Durham, North Carolina captured over 2,000 students, faculty members, and passersby into one of the most prolific public surveillance research datasets that's used around the world by commercial and defense surveillance organizations.

Since it's publication in 2016, the Duke MTMC dataset has been used in over 100 studies at organizations around the world including SenseTime 4 5, SenseNets 3, IARPA and IBM 9, Chinese National University of Defense 7 8, US Department of Homeland Security 10, Tencent, Microsoft, Microsft Asia, Fraunhofer, Senstar Corp., Alibaba, Naver Labs, Google and Hewlett-Packard Labs to name only a few.

The creation and publication of the Duke MTMC dataset in 2014 (published in 2016) was originally funded by the U.S. Army Research Laboratory and the National Science Foundation 6. Though our analysis of the geographic locations of the publicly available research shows over twice as many citations by researchers from China (44% China, 20% United States). In 2018 alone, there were 70 research project citations from China.

-
 A collection of 1,600 out of the 2,700 students and passersby captured into the Duke MTMC surveillance research and development dataset on . These students were also included in the Duke MTMC Re-ID dataset extension used for person re-identification. Open Data Commons Attribution License.
A collection of 1,600 out of the 2,700 students and passersby captured into the Duke MTMC surveillance research and development dataset on . These students were also included in the Duke MTMC Re-ID dataset extension used for person re-identification. Open Data Commons Attribution License.

The 8 cameras deployed on Duke's campus were specifically setup to capture students "during periods between lectures, when pedestrian traffic is heavy". 6. Camera 5 was positioned to capture students as entering and exiting the university's main chapel. Each camera's location and approximate field of view. The heat map visualization shows the locations where pedestrians were most frequently annotated in each video from the Duke MTMC dataset.

-
 Duke MTMC camera locations on Duke University campus. Open Data Commons Attribution License.
Duke MTMC camera locations on Duke University campus. Open Data Commons Attribution License.
 Duke MTMC camera views for 8 cameras deployed on campus © megapixels.cc
Duke MTMC camera views for 8 cameras deployed on campus © megapixels.cc
 Duke MTMC pedestrian detection saliency maps for 8 cameras deployed on campus © megapixels.cc
Duke MTMC pedestrian detection saliency maps for 8 cameras deployed on campus © megapixels.cc

{% include 'dashboard.html' %}

-

{% include 'supplementary_header.html' %}

-

Funding

+
 A collection of 1,600 out of the 2,700 students and passersby captured into the Duke MTMC surveillance research and development dataset on . These students were also included in the Duke MTMC Re-ID dataset extension used for person re-identification. Open Data Commons Attribution License.
A collection of 1,600 out of the 2,700 students and passersby captured into the Duke MTMC surveillance research and development dataset on . These students were also included in the Duke MTMC Re-ID dataset extension used for person re-identification. Open Data Commons Attribution License.

The 8 cameras deployed on Duke's campus were specifically setup to capture students "during periods between lectures, when pedestrian traffic is heavy". 6. Camera 5 was positioned to capture students as entering and exiting the university's main chapel. Each camera's location and approximate field of view. The heat map visualization shows the locations where pedestrians were most frequently annotated in each video from the Duke MTMC dataset.

+
 Duke MTMC camera locations on Duke University campus. Open Data Commons Attribution License.
Duke MTMC camera locations on Duke University campus. Open Data Commons Attribution License.
 Duke MTMC camera views for 8 cameras deployed on campus © megapixels.cc
Duke MTMC camera views for 8 cameras deployed on campus © megapixels.cc
 Duke MTMC pedestrian detection saliency maps for 8 cameras deployed on campus © megapixels.cc
Duke MTMC pedestrian detection saliency maps for 8 cameras deployed on campus © megapixels.cc
+

Who used Duke MTMC Dataset?

+ +

+ This bar chart presents a ranking of the top countries where dataset citations originated. Mouse over individual columns to see yearly totals. These charts show at most the top 10 countries. +

+ +
+ +
+ +
+
+ +
+
+
+ +
+ +

Biometric Trade Routes

+ +

+ To help understand how Duke MTMC Dataset has been used around the world by commercial, military, and academic organizations; existing publicly available research citing Duke Multi-Target, Multi-Camera Tracking Project was collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal research projects at that location. +

+ +
+ +
+
+
+ +
+
    +
  • Academic
  • +
  • Commercial
  • +
  • Military / Government
  • +
+
Citation data is collected using SemanticScholar.org then dataset usage verified and geolocated.
+
+ + +
+ +

Dataset Citations

+

+ The dataset citations used in the visualizations were collected from Semantic Scholar, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms. +

+ +
+
+ +
+
+
+
+ +

Supplementary Information

+ +

Funding

Original funding for the Duke MTMC dataset was provided by the Army Research Office under Grant No. W911NF-10-1-0387 and by the National Science Foundation under Grants IIS-10-17017 and IIS-14-20894.

Video Timestamps

The video timestamps contain the likely, but not yet confirmed, date and times of capture. Because the video timestamps align with the start and stop time sync data provided by the researchers, it at least aligns the relative time. The rainy weather on that day also contribute towards the likelihood of March 14, 2014..

-

=== columns 2

- +
@@ -95,8 +152,7 @@ under Grants IIS-10-17017 and IIS-14-20894.

Camera Date
-

===========

- +
@@ -131,15 +187,30 @@ under Grants IIS-10-17017 and IIS-14-20894.

Camera Date
-

=== end columns

-

Opting Out

+

Opting Out

If you attended Duke University and were captured by any of the 8 surveillance cameras positioned on campus in 2014, there is unfortunately no way to be removed. The dataset files have been distributed throughout the world and it would not be possible to contact all the owners for removal. Nor do the authors provide any options for students to opt-out, nor did they even inform students they would be used at test subjects for surveillance research and development in a project funded, in part, by the United States Army Research Office.

Notes

  • The Duke MTMC dataset paper mentions 2,700 identities, but their ground truth file only lists annotations for 1,812
-

{% include 'cite_our_work.html' %}

-

If you use any data from the Duke MTMC please follow their license and cite their work as:

+
+ +

Cite Our Work

+

+ + If you use our data, research, or graphics please cite our work: + +

+@online{megapixels,
+  author = {Harvey, Adam. LaPlace, Jules.},
+  title = {MegaPixels: Origins, Ethics, and Privacy Implications of Publicly Available Face Recognition Image Datasets},
+  year = 2019,
+  url = {https://megapixels.cc/},
+  urldate = {2019-04-20}
+}
+ +

+

If you use any data from the Duke MTMC please follow their license and cite their work as:

 @inproceedings{ristani2016MTMC,
  title =        {Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking},
diff --git a/site/public/datasets/feret/index.html b/site/public/datasets/feret/index.html
index 8af139ab..387826b0 100644
--- a/site/public/datasets/feret/index.html
+++ b/site/public/datasets/feret/index.html
@@ -42,9 +42,59 @@
   
Website
-

[ page under development ]

-

{% include 'dashboard.html' %}

-

(ignore) RESEARCH below this line

+

[ page under development ]

+
+

Who used LFW?

+ +

+ This bar chart presents a ranking of the top countries where dataset citations originated. Mouse over individual columns to see yearly totals. These charts show at most the top 10 countries. +

+ +
+ +
+ +
+
+ +
+
+
+ +
+ +

Biometric Trade Routes

+ +

+ To help understand how LFW has been used around the world by commercial, military, and academic organizations; existing publicly available research citing Labeled Faces in the Wild was collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal research projects at that location. +

+ +
+ +
+
+
+ +
+
    +
  • Academic
  • +
  • Commercial
  • +
  • Military / Government
  • +
+
Citation data is collected using SemanticScholar.org then dataset usage verified and geolocated.
+
+ + +
+ +

Dataset Citations

+

+ The dataset citations used in the visualizations were collected from Semantic Scholar, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms. +

+ +
+

(ignore) RESEARCH below this line

  • Years: 1993-1996
  • Images: 14,126
  • @@ -63,7 +113,7 @@
    • "A release form is necessary because of the privacy laws in the United States."
    -

    Funding

    +

    Funding

    The FERET program is sponsored by the U.S. Depart- ment of Defense’s Counterdrug Technology Development Program Office. The U.S. Army Research Laboratory (ARL) is the technical agent for the FERET program. ARL designed, administered, and scored the FERET tests. George Mason University collected, processed, and main- tained the FERET database. Inquiries regarding the FERET database or test should be directed to P. Jonathon Phillips.

diff --git a/site/public/datasets/hrt_transgender/index.html b/site/public/datasets/hrt_transgender/index.html index 15cf4807..6b9ae7be 100644 --- a/site/public/datasets/hrt_transgender/index.html +++ b/site/public/datasets/hrt_transgender/index.html @@ -28,7 +28,7 @@
TBD
TBD

HRT Transgender Dataset

-
+

[ page under development ]

+

{% include 'dashboard.html' }

diff --git a/site/public/datasets/lfpw/index.html b/site/public/datasets/lfpw/index.html index 7f16cd01..45de2599 100644 --- a/site/public/datasets/lfpw/index.html +++ b/site/public/datasets/lfpw/index.html @@ -27,7 +27,7 @@

Labeled Face Parts in The Wild

-
+
+

Who used LFWP?

+ +

+ This bar chart presents a ranking of the top countries where dataset citations originated. Mouse over individual columns to see yearly totals. These charts show at most the top 10 countries. +

+ +
+ +
+ +
+
+ +
+
+
+ +
+ +

Biometric Trade Routes

+ +

+ To help understand how LFWP has been used around the world by commercial, military, and academic organizations; existing publicly available research citing Labeled Face Parts in the Wild was collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal research projects at that location. +

+ +
+ +
+
+
+ +
+
    +
  • Academic
  • +
  • Commercial
  • +
  • Military / Government
  • +
+
Citation data is collected using SemanticScholar.org then dataset usage verified and geolocated.
+
+ + +
+ +

Dataset Citations

+

+ The dataset citations used in the visualizations were collected from Semantic Scholar, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms. +

+ +
+

RESEARCH below this line

Release 1 of LFPW consists of 1,432 faces from images downloaded from the web using simple text queries on sites such as google.com, flickr.com, and yahoo.com. Each image was labeled by three MTurk workers, and 29 fiducial points, shown below, are included in dataset. LFPW was originally described in the following publication:

Due to copyright issues, we cannot distribute image files in any format to anyone. Instead, we have made available a list of image URLs where you can download the images yourself. We realize that this makes it impossible to exactly compare numbers, as image links will slowly disappear over time, but we have no other option. This seems to be the way other large web-based databases seem to be evolving.

diff --git a/site/public/datasets/lfw/index.html b/site/public/datasets/lfw/index.html index 54b10611..7997629f 100644 --- a/site/public/datasets/lfw/index.html +++ b/site/public/datasets/lfw/index.html @@ -28,7 +28,7 @@
Labeled Faces in The Wild (LFW) is the first facial recognition dataset created entirely from online photos
It includes 13,456 images of 4,432 people's images copied from the Internet during 2002-2004 and is the most frequently used dataset in the world for benchmarking face recognition algorithms.

Labeled Faces in the Wild

-
+

[ PAGE UNDER DEVELOPMENT ]

Labeled Faces in The Wild (LFW) is "a database of face photographs designed for studying the problem of unconstrained face recognition 1. It is used to evaluate and improve the performance of facial recognition algorithms in academic, commercial, and government research. According to BiometricUpdate.com 3, LFW is "the most widely used evaluation set in the field of facial recognition, LFW attracts a few dozen teams from around the globe including Google, Facebook, Microsoft Research Asia, Baidu, Tencent, SenseTime, Face++ and Chinese University of Hong Kong."

The LFW dataset includes 13,233 images of 5,749 people that were collected between 2002-2004. LFW is a subset of Names of Faces and is part of the first facial recognition training dataset created entirely from images appearing on the Internet. The people appearing in LFW are...

The Names and Faces dataset was the first face recognition dataset created entire from online photos. However, Names and Faces and LFW are not the first face recognition dataset created entirely "in the wild". That title belongs to the UCD dataset. Images obtained "in the wild" means using an image without explicit consent or awareness from the subject or photographer.

The Names and Faces dataset was the first face recognition dataset created entire from online photos. However, Names and Faces and LFW are not the first face recognition dataset created entirely "in the wild". That title belongs to the UCD dataset. Images obtained "in the wild" means using an image without explicit consent or awareness from the subject or photographer.

-
All 5,379 people in the Labeled Faces in The Wild Dataset. Showing one face per person
All 5,379 people in the Labeled Faces in The Wild Dataset. Showing one face per person

The Names and Faces dataset was the first face recognition dataset created entire from online photos. However, Names and Faces and LFW are not the first face recognition dataset created entirely "in the wild". That title belongs to the UCD dataset. Images obtained "in the wild" means using an image without explicit consent or awareness from the subject or photographer.

+
All 5,379 people in the Labeled Faces in The Wild Dataset. Showing one face per person
All 5,379 people in the Labeled Faces in The Wild Dataset. Showing one face per person

The Names and Faces dataset was the first face recognition dataset created entire from online photos. However, Names and Faces and LFW are not the first face recognition dataset created entirely "in the wild". That title belongs to the UCD dataset. Images obtained "in the wild" means using an image without explicit consent or awareness from the subject or photographer.

The Names and Faces dataset was the first face recognition dataset created entire from online photos. However, Names and Faces and LFW are not the first face recognition dataset created entirely "in the wild". That title belongs to the UCD dataset. Images obtained "in the wild" means using an image without explicit consent or awareness from the subject or photographer.

-

{% include 'dashboard.html' %}

-

{% include 'supplementary_header.html' %}

-

Commercial Use

+
+

Who used LFW?

+ +

+ This bar chart presents a ranking of the top countries where dataset citations originated. Mouse over individual columns to see yearly totals. These charts show at most the top 10 countries. +

+ +
+ +
+ +
+
+ +
+
+
+ +
+ +

Biometric Trade Routes

+ +

+ To help understand how LFW has been used around the world by commercial, military, and academic organizations; existing publicly available research citing Labeled Faces in the Wild was collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal research projects at that location. +

+ +
+ +
+
+
+ +
+
    +
  • Academic
  • +
  • Commercial
  • +
  • Military / Government
  • +
+
Citation data is collected using SemanticScholar.org then dataset usage verified and geolocated.
+
+ + +
+ +

Dataset Citations

+

+ The dataset citations used in the visualizations were collected from Semantic Scholar, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms. +

+ +
+
+ +
+
+
+
+ +

Supplementary Information

+ +

Commercial Use

Add a paragraph about how usage extends far beyond academia into research centers for largest companies in the world. And even funnels into CIA funded research in the US and defense industry usage in China.

-
load_file assets/lfw_commercial_use.csv
-name_display, company_url, example_url, country, description
-
-

Research

+

Research

  • "In our experiments, we used 10000 images and associated captions from the Faces in the wilddata set [3]."
  • "This work was supported in part by the Center for Intelligent Information Retrieval, the Central Intelligence Agency, the National Security Agency and National Science Foundation under CAREER award IIS-0546666 and grant IIS-0326249."
  • @@ -77,7 +132,7 @@ name_display, company_url, example_url, country, description
  • The dataset includes 2 images of George Tenet, the former Director of Central Intelligence (DCI) for the Central Intelligence Agency whose facial biometrics were eventually used to help train facial recognition software in China and Russia
  • ./15/155205b8e288fd49bf203135871d66de879c8c04/paper.txt shows usage by DSTO Australia, supported parimal@iisc.ac.in
-
Created
2002 – 2004
Images
13,233
Identities
5,749
Origin
Yahoo! News Images
Used by
Facebook, Google, Microsoft, Baidu, Tencent, SenseTime, Face++, CIA, NSA, IARPA
Website
    +
Created
2002 – 2004
Images
13,233
Identities
5,749
Origin
Yahoo! News Images
Used by
Facebook, Google, Microsoft, Baidu, Tencent, SenseTime, Face++, CIA, NSA, IARPA
Website
  • There are about 3 men for every 1 woman in the LFW dataset 1
  • The person with the most images is George W. Bush with 530
  • There are about 3 George W. Bush's for every 1 Tony Blair
  • diff --git a/site/public/datasets/market_1501/index.html b/site/public/datasets/market_1501/index.html index a76a8859..7c545335 100644 --- a/site/public/datasets/market_1501/index.html +++ b/site/public/datasets/market_1501/index.html @@ -43,9 +43,59 @@
    Website
    -

    [ PAGE UNDER DEVELOPMENT]

    -

    {% include 'dashboard.html' %}

    -

    (ignore) research Notes

    +

    [ PAGE UNDER DEVELOPMENT]

    +
+

Who used Market 1501?

+ +

+ This bar chart presents a ranking of the top countries where dataset citations originated. Mouse over individual columns to see yearly totals. These charts show at most the top 10 countries. +

+ +
+ +
+ +
+
+ +
+
+
+ +
+ +

Biometric Trade Routes

+ +

+ To help understand how Market 1501 has been used around the world by commercial, military, and academic organizations; existing publicly available research citing Market 1501 Dataset was collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal research projects at that location. +

+ +
+ +
+
+
+ +
+
    +
  • Academic
  • +
  • Commercial
  • +
  • Military / Government
  • +
+
Citation data is collected using SemanticScholar.org then dataset usage verified and geolocated.
+
+ + +
+ +

Dataset Citations

+

+ The dataset citations used in the visualizations were collected from Semantic Scholar, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms. +

+ +
+

(ignore) research Notes

  • "MARS is an extension of the Market-1501 dataset. During collection, we placed six near synchronized cameras in the campus of Tsinghua university. There were Five 1,0801920 HD cameras and one 640480 SD camera. MARS consists of 1,261 different pedestrians whom are captured by at least 2 cameras. Given a query tracklet, MARS aims to retrieve tracklets that contain the same ID." - main paper
  • bbox "0065C1T0002F0016.jpg", "0065" is the ID of the pedestrian. "C1" denotes the first diff --git a/site/public/datasets/msceleb/index.html b/site/public/datasets/msceleb/index.html index 60b08b50..8b070118 100644 --- a/site/public/datasets/msceleb/index.html +++ b/site/public/datasets/msceleb/index.html @@ -28,7 +28,7 @@
    MS Celeb is a dataset of web images used for training and evaluating face recognition algorithms
    The MS Celeb dataset includes over 10,000,000 images and 93,000 identities of semi-public figures collected using the Bing search engine

    Microsoft Celeb Dataset (MS Celeb)

    -
    +

    [ PAGE UNDER DEVELOPMENT ]

    https://www.hrw.org/news/2019/01/15/letter-microsoft-face-surveillance-technology

    https://www.scmp.com/tech/science-research/article/3005733/what-you-need-know-about-sensenets-facial-recognition-firm

    -

    {% include 'dashboard.html' %}

    -

    {% include 'supplementary_header.html' %}

    -

    Additional Information

    +
    +

    Who used Microsoft Celeb?

    + +

    + This bar chart presents a ranking of the top countries where dataset citations originated. Mouse over individual columns to see yearly totals. These charts show at most the top 10 countries. +

    + +
    + +
    + +
    +
    + +
    +
    +
    + +
    + +

    Biometric Trade Routes

    + +

    + To help understand how Microsoft Celeb has been used around the world by commercial, military, and academic organizations; existing publicly available research citing Microsoft Celebrity Dataset was collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal research projects at that location. +

    + +
    + +
    +
    +
    + +
    +
      +
    • Academic
    • +
    • Commercial
    • +
    • Military / Government
    • +
    +
    Citation data is collected using SemanticScholar.org then dataset usage verified and geolocated.
    +
    + + +
    + +

    Dataset Citations

    +

    + The dataset citations used in the visualizations were collected from Semantic Scholar, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms. +

    + +
    +
    + +
    +
    +
    +
    + +

    Supplementary Information

    + +

    Additional Information

    diff --git a/site/public/datasets/oxford_town_centre/index.html b/site/public/datasets/oxford_town_centre/index.html index d6f7378f..b48efe3e 100644 --- a/site/public/datasets/oxford_town_centre/index.html +++ b/site/public/datasets/oxford_town_centre/index.html @@ -28,7 +28,7 @@
    Oxford Town Centre is a dataset of surveillance camera footage from Cornmarket St Oxford, England
    The Oxford Town Centre dataset includes approximately 2,200 identities and is used for research and development of face recognition systems

    Oxford Town Centre

    -
    +

    The Oxford Town Centre dataset is a CCTV video of pedestrians in a busy downtown area in Oxford used for research and development of activity and face recognition systems. 1 The CCTV video was obtained from a public surveillance camera at the corner of Cornmarket and Market St. in Oxford, England and includes approximately 2,200 people. Since its publication in 2009 2 the Oxford Town Centre dataset has been used in over 80 verified research projects including commercial research by Amazon, Disney, OSRAM, and Huawei; and academic research in China, Israel, Russia, Singapore, the US, and Germany among dozens more.

    The Oxford Town Centre dataset is unique in that it uses footage from a public surveillance camera that would otherwise be designated for public safety. The video shows that the pedestrians act normally and unrehearsed indicating they neither knew of or consented to participation in the research project.

    -

    {% include 'dashboard.html' %}

    -

    {% include 'supplementary_header.html' %}

    -

    Location

    +
    +

    Who used TownCentre?

    + +

    + This bar chart presents a ranking of the top countries where dataset citations originated. Mouse over individual columns to see yearly totals. These charts show at most the top 10 countries. +

    + +
    + +
    + +
    +
    + +
    +
    +
    + +
    + +

    Biometric Trade Routes

    + +

    + To help understand how TownCentre has been used around the world by commercial, military, and academic organizations; existing publicly available research citing Oxford Town Centre was collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal research projects at that location. +

    + +
    + +
    +
    +
    + +
    +
      +
    • Academic
    • +
    • Commercial
    • +
    • Military / Government
    • +
    +
    Citation data is collected using SemanticScholar.org then dataset usage verified and geolocated.
    +
    + + +
    + +

    Dataset Citations

    +

    + The dataset citations used in the visualizations were collected from Semantic Scholar, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms. +

    + +
    +
    + +
    +
    +
    +
    + +

    Supplementary Information

    + +

    Location

    The street location of the camera used for the Oxford Town Centre dataset was confirmed by matching the road, benches, and store signs source. At that location, two public CCTV cameras exist mounted on the side of the Northgate House building at 13-20 Cornmarket St. Because of the lower camera's mounting pole directionality, a view from a private camera in the building across the street can be ruled out because it would have to show more of silhouette of the lower camera's mounting pole. Two options remain: either the public CCTV camera mounted to the side of the building was used or the researchers mounted their own camera to the side of the building in the same location. Because the researchers used many other existing public CCTV cameras for their research projects it is likely that they would also be able to access to this camera.

    To discredit the theory that this public CCTV is only seen pointing the other way in Google Street View images, at least one public photo shows the upper CCTV camera pointing in the same direction as the Oxford Town Centre dataset proving the camera can and has been rotated before.

    As for the capture date, the text on the storefront display shows a sale happening from December 2nd – 7th indicating the capture date was between or just before those dates. The capture year is either 2008 or 2007 since prior to 2007 the Carphone Warehouse (photo, history) did not exist at this location. Since the sweaters in the GAP window display are more similar to those in a GAP website snapshot from November 2007, our guess is that the footage was obtained during late November or early December 2007. The lack of street vendors and slight waste residue near the bench suggests that is was probably a weekday after rubbish removal.

    -
     Footage from this public CCTV camera was used to create the Oxford Town Centre dataset. Image sources: Google Street View (<a href="https://www.google.com/maps/@51.7528162,-1.2581152,3a,50.3y,310.59h,87.23t/data=!3m7!1e1!3m5!1s3FsGN-PqYC-VhQGjWgmBdQ!2e0!5s20120601T000000!7i13312!8i6656">map</a>)
    Footage from this public CCTV camera was used to create the Oxford Town Centre dataset. Image sources: Google Street View (map)

    ==== columns

    -
     Heat map body visualization of the pedestrians detected in the Oxford Town Centre dataset © megapixels.cc
    Heat map body visualization of the pedestrians detected in the Oxford Town Centre dataset © megapixels.cc

    ====

    -
     Heat map face visualization of the pedestrians detected in the Oxford Town Centre dataset © megapixels.cc
    Heat map face visualization of the pedestrians detected in the Oxford Town Centre dataset © megapixels.cc

    === end columns

    -

    {% include 'cite_our_work.html' %}

    +
     Footage from this public CCTV camera was used to create the Oxford Town Centre dataset. Image sources: Google Street View (<a href="https://www.google.com/maps/@51.7528162,-1.2581152,3a,50.3y,310.59h,87.23t/data=!3m7!1e1!3m5!1s3FsGN-PqYC-VhQGjWgmBdQ!2e0!5s20120601T000000!7i13312!8i6656">map</a>)
    Footage from this public CCTV camera was used to create the Oxford Town Centre dataset. Image sources: Google Street View (map)
     Heat map body visualization of the pedestrians detected in the Oxford Town Centre dataset © megapixels.cc
    Heat map body visualization of the pedestrians detected in the Oxford Town Centre dataset © megapixels.cc
     Heat map face visualization of the pedestrians detected in the Oxford Town Centre dataset © megapixels.cc
    Heat map face visualization of the pedestrians detected in the Oxford Town Centre dataset © megapixels.cc
    + +

    Cite Our Work

    +

    + + If you use our data, research, or graphics please cite our work: + +

    +@online{megapixels,
    +  author = {Harvey, Adam. LaPlace, Jules.},
    +  title = {MegaPixels: Origins, Ethics, and Privacy Implications of Publicly Available Face Recognition Image Datasets},
    +  year = 2019,
    +  url = {https://megapixels.cc/},
    +  urldate = {2019-04-20}
    +}
    + +

    References

    • a

      Benfold, Ben and Reid, Ian. "Stable Multi-Target Tracking in Real-Time Surveillance Video". CVPR 2011. Pages 3457-3464.

    • a

      "Guiding Visual Surveillance by Tracking Human Attention". 2009.

    diff --git a/site/public/datasets/pipa/index.html b/site/public/datasets/pipa/index.html index 28da8d4b..6c920b46 100644 --- a/site/public/datasets/pipa/index.html +++ b/site/public/datasets/pipa/index.html @@ -28,7 +28,7 @@
    People in Photo Albums (PIPA) is a dataset...
    [ add subdescrition ]

    People in Photo Albums

    -
    +

    [ PAGE UNDER DEVELOPMENT ]

    +
    +

    Who used PIPA Dataset?

    + +

    + This bar chart presents a ranking of the top countries where dataset citations originated. Mouse over individual columns to see yearly totals. These charts show at most the top 10 countries. +

    + +
    + +
    + +
    +
    + +
    +
    +
    + +
    + +

    Biometric Trade Routes

    + +

    + To help understand how PIPA Dataset has been used around the world by commercial, military, and academic organizations; existing publicly available research citing People in Photo Albums Dataset was collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal research projects at that location. +

    + +
    + +
    +
    +
    + +
    +
      +
    • Academic
    • +
    • Commercial
    • +
    • Military / Government
    • +
    +
    Citation data is collected using SemanticScholar.org then dataset usage verified and geolocated.
    +
    + + +
    + +

    Dataset Citations

    +

    + The dataset citations used in the visualizations were collected from Semantic Scholar, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms. +

    + +
    diff --git a/site/public/datasets/pubfig/index.html b/site/public/datasets/pubfig/index.html index 1a6ffebf..e81e12bc 100644 --- a/site/public/datasets/pubfig/index.html +++ b/site/public/datasets/pubfig/index.html @@ -28,7 +28,7 @@
    PubFig is a dataset...
    [ add subdescrition ]

    PubFig

    -
    +

    [ PAGE UNDER DEVELOPMENT ]

    +
    +

    Who used PubFig?

    + +

    + This bar chart presents a ranking of the top countries where dataset citations originated. Mouse over individual columns to see yearly totals. These charts show at most the top 10 countries. +

    + +
    + +
    + +
    +
    + +
    +
    +
    + +
    + +

    Biometric Trade Routes

    + +

    + To help understand how PubFig has been used around the world by commercial, military, and academic organizations; existing publicly available research citing Public Figures Face Dataset was collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal research projects at that location. +

    + +
    + +
    +
    +
    + +
    +
      +
    • Academic
    • +
    • Commercial
    • +
    • Military / Government
    • +
    +
    Citation data is collected using SemanticScholar.org then dataset usage verified and geolocated.
    +
    + + +
    + +

    Dataset Citations

    +

    + The dataset citations used in the visualizations were collected from Semantic Scholar, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms. +

    + +
    diff --git a/site/public/datasets/uccs/index.html b/site/public/datasets/uccs/index.html index 4c106922..32f7cdb2 100644 --- a/site/public/datasets/uccs/index.html +++ b/site/public/datasets/uccs/index.html @@ -28,7 +28,7 @@
    UnConstrained College Students is a dataset of long-range surveillance photos of students on University of Colorado in Colorado Springs campus
    The UnConstrained College Students dataset includes 16,149 images of 1,732 students, faculty, and pedestrians and is used for developing face recognition and face detection algorithms

    UnConstrained College Students

    -
    +

    UnConstrained College Students (UCCS) is a dataset of long-range surveillance photos captured at University of Colorado Colorado Springs developed primarily for research and development of "face detection and recognition research towards surveillance applications" 1. According to the authors of two papers associated with the dataset, over 1,700 students and pedestrians were "photographed using a long-range high-resolution surveillance camera without their knowledge". 3 In this investigation, we examine the contents of the dataset, funding sources, photo EXIF data, and information from publicly available research project citations.

    The UCCS dataset includes over 1,700 unique identities, most of which are students walking to and from class. As of 2018, it was the "largest surveillance [face recognition] benchmark in the public domain." 4 The photos were taken during the spring semesters of 2012 – 2013 on the West Lawn of the University of Colorado Colorado Springs campus. The photographs were timed to capture students during breaks between their scheduled classes in the morning and afternoon during Monday through Thursday. "For example, a student taking Monday-Wednesday classes at 12:30 PM will show up in the camera on almost every Monday and Wednesday." 2.

    -
     Example images from the UnConstrained College Students Dataset.
    Example images from the UnConstrained College Students Dataset.

    The long-range surveillance images in the UnContsrained College Students dataset were captured using a Canon 7D 18 megapixel digital camera fitted with a Sigma 800mm F5.6 EX APO DG HSM telephoto lens and pointed out an office window across the university's West Lawn. The students were photographed from a distance of approximately 150 meters through an office window. "The camera [was] programmed to start capturing images at specific time intervals between classes to maximize the number of faces being captured." 2 +

     Example images from the UnConstrained College Students Dataset.
    Example images from the UnConstrained College Students Dataset.

    The long-range surveillance images in the UnContsrained College Students dataset were captured using a Canon 7D 18 megapixel digital camera fitted with a Sigma 800mm F5.6 EX APO DG HSM telephoto lens and pointed out an office window across the university's West Lawn. The students were photographed from a distance of approximately 150 meters through an office window. "The camera [was] programmed to start capturing images at specific time intervals between classes to maximize the number of faces being captured." 2 Their setup made it impossible for students to know they were being photographed, providing the researchers with realistic surveillance images to help build face detection and recognition systems for real world applications in defense, intelligence, and commercial applications.

    -
     The location at University of Colorado Colorado Springs where students were surreptitiously photographed with a long-range surveillance camera for use in a defense and intelligence agency funded research project on face recognition. Image: Google Maps
    The location at University of Colorado Colorado Springs where students were surreptitiously photographed with a long-range surveillance camera for use in a defense and intelligence agency funded research project on face recognition. Image: Google Maps

    In the two papers associated with the release of the UCCS dataset (Unconstrained Face Detection and Open-Set Face Recognition Challenge and Large Scale Unconstrained Open Set Face Database), the researchers disclosed their funding sources as ODNI (United States Office of Director of National Intelligence), IARPA (Intelligence Advance Research Projects Activity), ONR MURI (Office of Naval Research and The Department of Defense Multidisciplinary University Research Initiative), Army SBIR (Small Business Innovation Research), SOCOM SBIR (Special Operations Command and Small Business Innovation Research), and the National Science Foundation. Further, UCCS's VAST site explicity states they are part of the IARPA Janus, a face recognition project developed to serve the needs of national intelligence interests.

    +
     The location at University of Colorado Colorado Springs where students were surreptitiously photographed with a long-range surveillance camera for use in a defense and intelligence agency funded research project on face recognition. Image: Google Maps
    The location at University of Colorado Colorado Springs where students were surreptitiously photographed with a long-range surveillance camera for use in a defense and intelligence agency funded research project on face recognition. Image: Google Maps

    In the two papers associated with the release of the UCCS dataset (Unconstrained Face Detection and Open-Set Face Recognition Challenge and Large Scale Unconstrained Open Set Face Database), the researchers disclosed their funding sources as ODNI (United States Office of Director of National Intelligence), IARPA (Intelligence Advance Research Projects Activity), ONR MURI (Office of Naval Research and The Department of Defense Multidisciplinary University Research Initiative), Army SBIR (Small Business Innovation Research), SOCOM SBIR (Special Operations Command and Small Business Innovation Research), and the National Science Foundation. Further, UCCS's VAST site explicity states they are part of the IARPA Janus, a face recognition project developed to serve the needs of national intelligence interests.

    The EXIF data embedded in the images shows that the photo capture times follow a similar pattern, but also highlights that the vast majority of photos (over 7,000) were taken on Tuesdays around noon during students' lunch break. The lack of any photos taken on Friday shows that the researchers were only interested in capturing images of students.

    -
     UCCS photos captured per weekday © megapixels.cc
    UCCS photos captured per weekday © megapixels.cc
     UCCS photos captured per weekday © megapixels.cc
    UCCS photos captured per weekday © megapixels.cc

    The two research papers associated with the release of the UCCS dataset (Unconstrained Face Detection and Open-Set Face Recognition Challenge and Large Scale Unconstrained Open Set Face Database), acknowledge that the primary funding sources for their work were United States defense and intelligence agencies. Specifically, development of the UnContrianed College Students dataset was funded by the Intelligence Advanced Research Projects Activity (IARPA), Office of Director of National Intelligence (ODNI), Office of Naval Research and The Department of Defense Multidisciplinary University Research Initiative (ONR MURI), Small Business Innovation Research (SBIR), Special Operations Command and Small Business Innovation Research (SOCOM SBIR), and the National Science Foundation. Further, UCCS's VAST site explicitly states they are part of the IARPA Janus, a face recognition project developed to serve the needs of national intelligence interests, clearly establishing the the funding sources and immediate benefactors of this dataset are United States defense and intelligence agencies.

    +
     UCCS photos captured per weekday © megapixels.cc
    UCCS photos captured per weekday © megapixels.cc
     UCCS photos captured per weekday © megapixels.cc
    UCCS photos captured per weekday © megapixels.cc

    The two research papers associated with the release of the UCCS dataset (Unconstrained Face Detection and Open-Set Face Recognition Challenge and Large Scale Unconstrained Open Set Face Database), acknowledge that the primary funding sources for their work were United States defense and intelligence agencies. Specifically, development of the UnContrianed College Students dataset was funded by the Intelligence Advanced Research Projects Activity (IARPA), Office of Director of National Intelligence (ODNI), Office of Naval Research and The Department of Defense Multidisciplinary University Research Initiative (ONR MURI), Small Business Innovation Research (SBIR), Special Operations Command and Small Business Innovation Research (SOCOM SBIR), and the National Science Foundation. Further, UCCS's VAST site explicitly states they are part of the IARPA Janus, a face recognition project developed to serve the needs of national intelligence interests, clearly establishing the the funding sources and immediate benefactors of this dataset are United States defense and intelligence agencies.

    Although the images were first captured in 2012 – 2013 the dataset was not publicly released until 2016. Then in 2017 the UCCS face dataset formed the basis for a defense and intelligence agency funded face recognition challenge project at the International Joint Biometrics Conference in Denver, CO. And in 2018 the dataset was again used for the 2nd Unconstrained Face Detection and Open Set Recognition Challenge at the European Computer Vision Conference (ECCV) in Munich, Germany.

    As of April 15, 2019, the UCCS dataset is no longer available for public download. But during the three years it was publicly available (2016-2019) the UCCS dataset appeared in at least 6 publicly available research papers including verified usage from Beihang University who is known to provide research and development for China's military.

    -

    {% include 'dashboard.html' %}

    -

    {% include 'supplementary_header.html' %}

    -

    To show the types of face images used in the UCCS student dataset while protecting their individual privacy, a generative adversarial network was used to interpolate between identities in the dataset. The image below shows a generative adversarial network trained on the UCCS face bounding box areas from 16,000 images and over 90,000 face regions.

    -
     GAN generated approximations of students in the UCCS dataset. © megapixels.cc 2018
    GAN generated approximations of students in the UCCS dataset. © megapixels.cc 2018

    === columns 2

    -

    UCCS photos taken in 2012

    +
    +

    Who used UCCS?

    + +

    + This bar chart presents a ranking of the top countries where dataset citations originated. Mouse over individual columns to see yearly totals. These charts show at most the top 10 countries. +

    + +
    + +
    + +
    +
    + +
    +
    +
    + +
    + +

    Biometric Trade Routes

    + +

    + To help understand how UCCS has been used around the world by commercial, military, and academic organizations; existing publicly available research citing UnConstrained College Students Dataset was collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal research projects at that location. +

    + +
    + +
    +
    +
    + +
    +
      +
    • Academic
    • +
    • Commercial
    • +
    • Military / Government
    • +
    +
    Citation data is collected using SemanticScholar.org then dataset usage verified and geolocated.
    +
    + + +
    + +

    Dataset Citations

    +

    + The dataset citations used in the visualizations were collected from Semantic Scholar, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms. +

    + +
    +
    + +
    +
    +
    +
    + +

    Supplementary Information

    + +

    To show the types of face images used in the UCCS student dataset while protecting their individual privacy, a generative adversarial network was used to interpolate between identities in the dataset. The image below shows a generative adversarial network trained on the UCCS face bounding box areas from 16,000 images and over 90,000 face regions.

    +
     GAN generated approximations of students in the UCCS dataset. © megapixels.cc 2018
    GAN generated approximations of students in the UCCS dataset. © megapixels.cc 2018

    UCCS photos taken in 2012

    @@ -120,8 +177,7 @@ Their setup made it impossible for students to know they were being photographed
    Date
    -

    ===========

    -

    UCCS photos taken in 2013

    +

    UCCS photos taken in 2013

    @@ -155,10 +211,9 @@ Their setup made it impossible for students to know they were being photographed
    Date
    -

    === end columns

    -

    Location

    +

    Location

    The location of the camera and subjects can confirmed using several visual cues in the dataset images: the unique pattern of the sidewalk that is only used on the UCCS Pedestrian Spine near the West Lawn, the two UCCS sign poles with matching graphics still visible in Google Street View, the no parking sign and directionality of its arrow, the back of street sign next to it, the slight bend in the sidewalk, the presence of cars passing in the background of the image, and the far wall of the parking garage all match images in the dataset. The original papers also provides another clue: a picture of the camera inside the office that was used to create the dataset. The window view in this image provides another match for the brick pattern on the north facade of the Kraember Family Library and the green metal fence along the sidewalk. View the location on Google Maps

    -
     3D view showing the angle of view of the surveillance camera used for UCCS dataset. Image: Google Maps
    3D view showing the angle of view of the surveillance camera used for UCCS dataset. Image: Google Maps

    Funding

    +
     3D view showing the angle of view of the surveillance camera used for UCCS dataset. Image: Google Maps
    3D view showing the angle of view of the surveillance camera used for UCCS dataset. Image: Google Maps

    Funding

    The UnConstrained College Students dataset is associated with two main research papers: "Large Scale Unconstrained Open Set Face Database" and "Unconstrained Face Detection and Open-Set Face Recognition Challenge". Collectively, these papers and the creation of the dataset have received funding from the following organizations:

    • ONR (Office of Naval Research) MURI (The Department of Defense Multidisciplinary University Research Initiative) grant N00014-08-1-0638
    • @@ -179,7 +234,23 @@ Their setup made it impossible for students to know they were being photographed -

      {% include 'cite_our_work.html' %}

      +
    + +

    Cite Our Work

    +

    + + If you use our data, research, or graphics please cite our work: + +

    +@online{megapixels,
    +  author = {Harvey, Adam. LaPlace, Jules.},
    +  title = {MegaPixels: Origins, Ethics, and Privacy Implications of Publicly Available Face Recognition Image Datasets},
    +  year = 2019,
    +  url = {https://megapixels.cc/},
    +  urldate = {2019-04-20}
    +}
    + +

    References

    • a

      "2nd Unconstrained Face Detection and Open Set Recognition Challenge." https://vast.uccs.edu/Opensetface/. Accessed April 15, 2019.

    • ab

      Sapkota, Archana and Boult, Terrance. "Large Scale Unconstrained Open Set Face Database." 2013.

    • a

      Günther, M. et. al. "Unconstrained Face Detection and Open-Set Face Recognition Challenge," 2018. Arxiv 1708.02337v3.

      diff --git a/site/public/datasets/vgg_face2/index.html b/site/public/datasets/vgg_face2/index.html index e23a3afd..a9d318f1 100644 --- a/site/public/datasets/vgg_face2/index.html +++ b/site/public/datasets/vgg_face2/index.html @@ -48,9 +48,59 @@
      Website
      -

      [ page under development ]

      -

      {% include 'dashboard.html' %}

      -

      (ignore) research notes

      +

      [ page under development ]

      +
    +

    Who used Brainwash Dataset?

    + +

    + This bar chart presents a ranking of the top countries where dataset citations originated. Mouse over individual columns to see yearly totals. These charts show at most the top 10 countries. +

    + +
    + +
    + +
    +
    + +
    +
    +
    + +
    + +

    Biometric Trade Routes

    + +

    + To help understand how Brainwash Dataset has been used around the world by commercial, military, and academic organizations; existing publicly available research citing Brainwash Dataset was collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal research projects at that location. +

    + +
    + +
    +
    +
    + +
    +
      +
    • Academic
    • +
    • Commercial
    • +
    • Military / Government
    • +
    +
    Citation data is collected using SemanticScholar.org then dataset usage verified and geolocated.
    +
    + + +
    + +

    Dataset Citations

    +

    + The dataset citations used in the visualizations were collected from Semantic Scholar, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms. +

    + +
    +

    (ignore) research notes

    • The VGG Face 2 dataset includes approximately 1,331 actresses, 139 presidents, 16 wives, 3 husbands, 2 snooker player, and 1 guru
    • The original VGGF2 name list has been updated with the results returned from Google Knowledge
    • @@ -58,7 +108,7 @@
    • The 97 names with a score of 0.75 or lower were manually reviewed and includes name changes validating using Wikipedia.org results for names such as "Bruce Jenner" to "Caitlyn Jenner", spousal last-name changes, and discretionary changes to improve search results such as combining nicknames with full name when appropriate, for example changing "Aleksandar Petrović" to "Aleksandar 'Aco' Petrović" and minor changes such as "Mohammad Ali" to "Muhammad Ali"
    • The 'Description' text was automatically added when the Knowledge Graph score was greater than 250
    -

    TODO

    +

    TODO

    • create name list, and populate with Knowledge graph information like LFW
    • make list of interesting number stats, by the numbers
    • diff --git a/site/public/datasets/viper/index.html b/site/public/datasets/viper/index.html index 6f646bb8..bc4ddd3d 100644 --- a/site/public/datasets/viper/index.html +++ b/site/public/datasets/viper/index.html @@ -28,7 +28,7 @@
      VIPeR is a person re-identification dataset of images captured at UC Santa Cruz in 2007
      VIPeR contains 1,264 images and 632 persons on the UC Santa Cruz campus and is used to train person re-identification algorithms for surveillance

      VIPeR Dataset

      -
      +

      [ page under development ]

      VIPeR (Viewpoint Invariant Pedestrian Recognition) is a dataset of pedestrian images captured at University of California Santa Cruz in 2007. Accoriding to the reserachers 2 "cameras were placed in different locations in an academic setting and subjects were notified of the presence of cameras, but were not coached or instructed in any way."

      VIPeR is amongst the most widely used publicly available person re-identification datasets. In 2017 the VIPeR dataset was combined into a larger person re-identification created by the Chinese University of Hong Kong called PETA (PEdesTrian Attribute).

      -

      {% include 'dashboard.html' %}

      +
      +

      Who used VIPeR?

      + +

      + This bar chart presents a ranking of the top countries where dataset citations originated. Mouse over individual columns to see yearly totals. These charts show at most the top 10 countries. +

      + +
      + +
      + +
      +
      + +
      +
      +
      + +
      + +

      Biometric Trade Routes

      + +

      + To help understand how VIPeR has been used around the world by commercial, military, and academic organizations; existing publicly available research citing Viewpoint Invariant Pedestrian Recognition was collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal research projects at that location. +

      + +
      + +
      +
      +
      + +
      +
        +
      • Academic
      • +
      • Commercial
      • +
      • Military / Government
      • +
      +
      Citation data is collected using SemanticScholar.org then dataset usage verified and geolocated.
      +
      + + +
      + +

      Dataset Citations

      +

      + The dataset citations used in the visualizations were collected from Semantic Scholar, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms. +

      + +
      diff --git a/site/public/datasets/youtube_celebrities/index.html b/site/public/datasets/youtube_celebrities/index.html index c491e6af..69b3a02e 100644 --- a/site/public/datasets/youtube_celebrities/index.html +++ b/site/public/datasets/youtube_celebrities/index.html @@ -27,9 +27,59 @@

      YouTube Celebrities

      -

      [ page under development ]

      -

      {% include 'dashboard.html' %}

      -

      Notes...

      +

      [ page under development ]

      +
      +

      Who used YouTube Celebrities?

      + +

      + This bar chart presents a ranking of the top countries where dataset citations originated. Mouse over individual columns to see yearly totals. These charts show at most the top 10 countries. +

      + +
      + +
      + +
      +
      + +
      +
      +
      + +
      + +

      Biometric Trade Routes

      + +

      + To help understand how YouTube Celebrities has been used around the world by commercial, military, and academic organizations; existing publicly available research citing YouTube Celebrities was collected, verified, and geocoded to show the biometric trade routes of people appearing in the images. Click on the markers to reveal research projects at that location. +

      + +
      + +
      +
      +
      + +
      +
        +
      • Academic
      • +
      • Commercial
      • +
      • Military / Government
      • +
      +
      Citation data is collected using SemanticScholar.org then dataset usage verified and geolocated.
      +
      + + +
      + +

      Dataset Citations

      +

      + The dataset citations used in the visualizations were collected from Semantic Scholar, a website which aggregates and indexes research papers. Each citation was geocoded using names of institutions found in the PDF front matter, or as listed on other resources. These papers have been manually verified to show that researchers downloaded and used the dataset to train or test machine learning algorithms. +

      + +
      +

      Notes...

      • Selected dataset sequences: (a) MBGC, (b) CMU MoBo, (c) First Honda/UCSD, and (d) YouTube Celebrities.
      • -- cgit v1.2.3-70-g09d2 From 43c3e3904f80eb56769fba4634729d0e567f9a32 Mon Sep 17 00:00:00 2001 From: adamhrv Date: Wed, 17 Apr 2019 16:51:44 +0200 Subject: update duke --- site/content/pages/datasets/duke_mtmc/index.md | 71 ++++--- site/public/about/assets/LICENSE/index.html | 22 +- site/public/about/attribution/index.html | 22 +- site/public/about/index.html | 30 +-- site/public/about/legal/index.html | 22 +- site/public/about/press/index.html | 22 +- .../datasets/50_people_one_question/index.html | 22 +- site/public/datasets/afad/index.html | 22 +- site/public/datasets/brainwash/index.html | 23 +- site/public/datasets/caltech_10k/index.html | 22 +- site/public/datasets/celeba/index.html | 22 +- site/public/datasets/cofw/index.html | 22 +- site/public/datasets/duke_mtmc/index.html | 231 +++++++++++++++++---- site/public/datasets/feret/index.html | 22 +- site/public/datasets/hrt_transgender/index.html | 22 +- site/public/datasets/index.html | 24 +-- site/public/datasets/lfpw/index.html | 22 +- site/public/datasets/lfw/index.html | 22 +- site/public/datasets/market_1501/index.html | 22 +- site/public/datasets/msceleb/index.html | 22 +- site/public/datasets/oxford_town_centre/index.html | 22 +- site/public/datasets/pipa/index.html | 22 +- site/public/datasets/pubfig/index.html | 22 +- site/public/datasets/uccs/index.html | 33 ++- site/public/datasets/vgg_face2/index.html | 22 +- site/public/datasets/viper/index.html | 22 +- .../public/datasets/youtube_celebrities/index.html | 22 +- site/public/info/index.html | 22 +- site/public/research/00_introduction/index.html | 22 +- .../research/01_from_1_to_100_pixels/index.html | 22 +- .../research/02_what_computers_can_see/index.html | 22 +- site/public/research/index.html | 22 +- site/public/test/chart/index.html | 22 +- site/public/test/citations/index.html | 22 +- site/public/test/csv/index.html | 22 +- site/public/test/datasets/index.html | 22 +- site/public/test/face_search/index.html | 22 +- site/public/test/gallery/index.html | 22 +- site/public/test/index.html | 22 +- site/public/test/map/index.html | 22 +- site/public/test/name_search/index.html | 22 +- site/public/test/pie_chart/index.html | 22 +- 42 files changed, 686 insertions(+), 518 deletions(-) (limited to 'site/public/datasets/feret/index.html') diff --git a/site/content/pages/datasets/duke_mtmc/index.md b/site/content/pages/datasets/duke_mtmc/index.md index ac0a3f2e..2a8bfe05 100644 --- a/site/content/pages/datasets/duke_mtmc/index.md +++ b/site/content/pages/datasets/duke_mtmc/index.md @@ -18,35 +18,63 @@ authors: Adam Harvey ### sidebar ### end sidebar -[ page under development ] +Duke MTMC (Multi-Target, Multi-Camera) is a dataset of surveillance video footage taken on Duke University's campus in 2014 and is used for research and development of video tracking systems, person re-identification, and low-resolution facial recognition. The dataset contains over 14 hours of synchronized surveillance video from 8 cameras at 1080p and 60FPS with over 2 million frames of 2,000 students walking to and from classes. The 8 surveillance cameras deployed on campus were specifically setup to capture students "during periods between lectures, when pedestrian traffic is heavy"[^duke_mtmc_orig]. -Duke MTMC (Multi-Target, Multi-Camera Tracking) is a dataset of video recorded on Duke University campus for research and development of networked camera surveillance systems. MTMC tracking algorithms are used for citywide dragnet surveillance systems such as those used throughout China by SenseTime[^sensetime_qz] and the oppressive monitoring of 2.5 million Uyghurs in Xinjiang by SenseNets[^sensenets_uyghurs]. In fact researchers from both SenseTime[^sensetime1] [^sensetime2] and SenseNets[^sensenets_sensetime] used the Duke MTMC dataset for their research. +In this investigation into the Duke MTMC dataset we tracked down over 100 publicly available research papers that explicitly acknowledged using Duke MTMC. Our analysis shows that the dataset has spread far beyond its origins and intentions in academic research projects at Duke University. Since its publication in 2016, more than twice as many research citations originated in China as in the United States. Among these citations were papers with explicit and direct links to the Chinese military and several of the companies known to provide Chinese authorities with the oppressive surveillance technology used to monitor millions of Uighur Muslims. -In this investigation into the Duke MTMC dataset, we found that researchers at Duke University in Durham, North Carolina captured over 2,000 students, faculty members, and passersby into one of the most prolific public surveillance research datasets that's used around the world by commercial and defense surveillance organizations. +In one 2018 [paper](http://openaccess.thecvf.com/content_cvpr_2018/papers/Xu_Attention-Aware_Compositional_Network_CVPR_2018_paper.pdf) jointly published by researchers from SenseNets and SenseTime (and funded by SenseTime Group Limited) entitled [Attention-Aware Compositional Network for Person Re-identification](https://www.semanticscholar.org/paper/Attention-Aware-Compositional-Network-for-Person-Xu-Zhao/14ce502bc19b225466126b256511f9c05cadcb6e), the Duke MTMC dataset was used for "extensive experiments" on improving person re-identification across multiple surveillance cameras with important applications in "finding missing elderly and children, and suspect tracking, etc." Both SenseNets and SenseTime have been directly linked to the providing surveillance technology to monitor Uighur Muslims in China. [^sensetime_qz][^sensenets_uyghurs][^xinjiang_nyt] -Since it's publication in 2016, the Duke MTMC dataset has been used in over 100 studies at organizations around the world including SenseTime[^sensetime1] [^sensetime2], SenseNets[^sensenets_sensetime], IARPA and IBM[^iarpa_ibm], Chinese National University of Defense [^cn_defense1][^cn_defense2], US Department of Homeland Security[^us_dhs], Tencent, Microsoft, Microsft Asia, Fraunhofer, Senstar Corp., Alibaba, Naver Labs, Google and Hewlett-Packard Labs to name only a few. +![caption: A collection of 1,600 out of the approximately 2,000 students and pedestrians in the Duke MTMC dataset. These students were also included in the Duke MTMC Re-ID dataset extension used for person re-identification, and eventually the QMUL SurvFace face recognition dataset. Open Data Commons Attribution License.](assets/duke_mtmc_reid_montage.jpg) -The creation and publication of the Duke MTMC dataset in 2014 (published in 2016) was originally funded by the U.S. Army Research Laboratory and the National Science Foundation[^duke_mtmc_orig]. Though our analysis of the geographic locations of the publicly available research shows over twice as many citations by researchers from China (44% China, 20% United States). In 2018 alone, there were 70 research project citations from China. +Despite [repeated](https://www.hrw.org/news/2017/11/19/china-police-big-data-systems-violate-privacy-target-dissent) [warnings](https://www.hrw.org/news/2018/02/26/china-big-data-fuels-crackdown-minority-region) by Human Rights Watch that the authoritarian surveillance used in China represents a violation of human rights, researchers at Duke University continued to provide open access to their dataset for anyone to use for any project. As the surveillance crisis in China grew, so did the number of citations with links to organizations complicit in the crisis. In 2018 alone there were over 70 research projects happening in China that publicly acknowledged benefiting from the Duke MTMC dataset. Amongst these were projects from SenseNets, SenseTime, CloudWalk, Megvii, Beihang University, and the PLA's National University of Defense Technology. -![caption: A collection of 1,600 out of the 2,700 students and passersby captured into the Duke MTMC surveillance research and development dataset on . These students were also included in the Duke MTMC Re-ID dataset extension used for person re-identification. Open Data Commons Attribution License.](assets/duke_mtmc_reid_montage.jpg) +| Organization | Paper | Link | Year | Used Duke MTMC | +|---|---|---|---| +| SenseNets, SenseTime | Attention-Aware Compositional Network for Person Re-identification | [SemanticScholar](https://www.semanticscholar.org/paper/Attention-Aware-Compositional-Network-for-Person-Xu-Zhao/14ce502bc19b225466126b256511f9c05cadcb6e) | 2018 | ✔ | +|SenseTime| End-to-End Deep Kronecker-Product Matching for Person Re-identification | [thcvf.com](http://openaccess.thecvf.com/content_cvpr_2018/papers/Shen_End-to-End_Deep_Kronecker-Product_CVPR_2018_paper.pdf) | 2018| ✔ | +|CloudWalk| Horizontal Pyramid Matching for Person Re-identification | [arxiv.org](https://arxiv.org/pdf/1804.05275.pdf) | 20xx | ✔ | +| Megvii | Multi-Target, Multi-Camera Tracking by Hierarchical Clustering: Recent Progress on DukeMTMC Project | [SemanticScholar](https://www.semanticscholar.org/paper/Multi-Target%2C-Multi-Camera-Tracking-by-Hierarchical-Zhang-Wu/10c20cf47d61063032dce4af73a4b8e350bf1128) | 2018 | ✔ | +| Megvii | Person Re-Identification (slides) | [github.io](https://zsc.github.io/megvii-pku-dl-course/slides/Lecture%2011,%20Human%20Understanding_%20ReID%20and%20Pose%20and%20Attributes%20and%20Activity%20.pdf) | 2017 | ✔ | +| Megvii | SCPNet: Spatial-Channel Parallelism Network for Joint Holistic and Partial PersonRe-Identification | [arxiv.org](https://arxiv.org/abs/1810.06996) | 2018 | ✔ | +| CloudWalk | CloudWalk re-identification technology extends facial biometric tracking with improved accuracy | [BiometricUpdate.com](https://www.biometricupdate.com/201903/cloudwalk-re-identification-technology-extends-facial-biometric-tracking-with-improved-accuracy) | 2018 | ✔ | +| CloudWalk | Horizontal Pyramid Matching for Person Re-identification | [arxiv.org](https://arxiv.org/abs/1804.05275)] | 2018 | ✔ | +| National University of Defense Technology | Tracking by Animation: Unsupervised Learning of Multi-Object Attentive Trackers | [SemanticScholar.org](https://www.semanticscholar.org/paper/Tracking-by-Animation%3A-Unsupervised-Learning-of-He-Liu/e90816e1a0e14ea1e7039e0b2782260999aef786) | 2018 | ✔ | +| National University of Defense Technology | Unsupervised Multi-Object Detection for Video Surveillance Using Memory-Based Recurrent Attention Networks | [SemanticScholar.org](https://www.semanticscholar.org/paper/Unsupervised-Multi-Object-Detection-for-Video-Using-He-He/59f357015054bab43fb8cbfd3f3dbf17b1d1f881) | 2018 | ✔ | +| Beihang University | Orientation-Guided Similarity Learning for Person Re-identification | [ieee.org](https://ieeexplore.ieee.org/document/8545620) | 2018 | ✔ | +| Beihang University | Online Inter-Camera Trajectory Association Exploiting Person Re-Identification and Camera Topology | [acm.org](https://dl.acm.org/citation.cfm?id=3240663) | 2018 | ✔ | -The 8 cameras deployed on Duke's campus were specifically setup to capture students "during periods between lectures, when pedestrian traffic is heavy".[^duke_mtmc_orig]. Camera 5 was positioned to capture students as entering and exiting the university's main chapel. Each camera's location and approximate field of view. The heat map visualization shows the locations where pedestrians were most frequently annotated in each video from the Duke MTMC dataset. +The reasons that companies in China use the Duke MTMC dataset for research are technically no different than the reasons it is used in the United States and Europe. In fact the original creators of the dataset published a follow up report in 2017 titled [Tracking Social Groups Within and Across Cameras](https://www.semanticscholar.org/paper/Tracking-Social-Groups-Within-and-Across-Cameras-Solera-Calderara/9e644b1e33dd9367be167eb9d832174004840400) with specific applications to "automated analysis of crowds and social gatherings for surveillance and security applications". Their work, as well as the creation of the original dataset in 2014 were both supported in part by the United States Army Research Laboratory. -![caption: Duke MTMC camera locations on Duke University campus. Open Data Commons Attribution License.](assets/duke_mtmc_camera_map.jpg) +Citations from the United States and Europe show a similar trend to that in China, including publicly acknowledged and verified usage of the Duke MTMC dataset supported or carried out by the United States Department of Homeland Security, IARPA, IBM, Microsoft (who provides surveillance to ICE), and Vision Semantics (who works with the UK Ministry of Defence). One [paper](https://pdfs.semanticscholar.org/59f3/57015054bab43fb8cbfd3f3dbf17b1d1f881.pdf) is even jointly published by researchers affiliated with both the University College of London and the National University of Defense Technology in China. -![caption: Duke MTMC camera views for 8 cameras deployed on campus © megapixels.cc](assets/duke_mtmc_cameras.jpg) +| Organization | Paper | Link | Year | Used Duke MTMC | +|---|---|---|---| +| IARPA, IBM, CloudWalk | Horizontal Pyramid Matching for Person Re-identification | [arxiv.org](https://arxiv.org/abs/1804.05275) | 2018 | ✔ | +| Microsoft | ReXCam: Resource-Efficient, Cross-CameraVideo Analytics at Enterprise Scale | [arxiv.org](https://arxiv.org/abs/1811.01268) | 2018 | ✔ | +| Microsoft | Scaling Video Analytics Systems to Large Camera Deployments | [arxiv.org](https://arxiv.org/pdf/1809.02318.pdf) | 2018 | ✔ | +| University College of London, National University of Defense Technology | Unsupervised Multi-Object Detection for Video Surveillance Using Memory-Based RecurrentAttention Networks | [PDF](https://pdfs.semanticscholar.org/59f3/57015054bab43fb8cbfd3f3dbf17b1d1f881.pdf) | 2018 | ✔ | +| Vision Semantics Ltd. | Unsupervised Person Re-identification by Deep Learning Tracklet Association | [arxiv.org](https://arxiv.org/abs/1809.02874) | 2018 | ✔ | +| US Dept. of Homeland Security | Re-Identification with Consistent Attentive Siamese Networks | [arxiv.org](https://arxiv.org/abs/1811.07487/) | 2019 | ✔ | + + +By some metrics the dataset is considered a huge success. It is regarded as highly influential research and has contributed to hundreds, if not thousands, of projects to advance artificial intelligence for person tracking and monitoring. All the above citations, regardless of which country is using it, align perfectly with the original [intent](http://vision.cs.duke.edu/DukeMTMC/) of the Duke MTMC dataset: "to accelerate advances in multi-target multi-camera tracking". + +The same logic applies for all the new extensions of the Duke MTMC dataset including [Duke MTMC Re-ID](https://github.com/layumi/DukeMTMC-reID_evaluation), [Duke MTMC Video Re-ID](https://github.com/Yu-Wu/DukeMTMC-VideoReID), Duke MTMC Groups, and [Duke MTMC Attribute](https://github.com/vana77/DukeMTMC-attribute). And it also applies to all the new specialized datasets that will be created from Duke MTMC, such as the low-resolution face recognition dataset called [QMUL-SurvFace](https://qmul-survface.github.io/), which was funded in part by [SeeQuestor](https://seequestor.com), a computer vision provider to law enforcement agencies including Scotland Yards and Queensland Police. From the perspective of academic researchers, companies, and defense agencies using these datasets to advance their organization's work, Duke MTMC contributes value their their bottom line. Regardless of who is using these datasets or how they're used, they are simple provided to make networks of surveillance cameras more powerful. ![caption: Duke MTMC pedestrian detection saliency maps for 8 cameras deployed on campus © megapixels.cc](assets/duke_mtmc_saliencies.jpg) +But from a privacy and human rights perspective the creation and distribution of the Duke MTMC illustrates an egregious prioritization of surveillance technologies over individual rights, where the simple act of going to class could implicate your biometric data in a surveillance training dataset. + +For the approximately 2,000 students in Duke MTMC dataset there is unfortunately no escape. It would be impossible to remove oneself from all copies of the dataset downloaded around the world. Instead, over 2,000 students and visitors who happened to be walking to class on March 13, 2014 will forever remain in all downloaded copies of the Duke MTMC dataset and all its extensions, contributing to a global supply chain of data that powers governmental and commercial expansion of biometric surveillance technologies. + {% include 'dashboard.html' %} {% include 'supplementary_header.html' %} -#### Funding +![caption: Duke MTMC camera locations on Duke University campus. Open Data Commons Attribution License.](assets/duke_mtmc_camera_map.jpg) -Original funding for the Duke MTMC dataset was provided by the Army Research Office under Grant No. W911NF-10-1-0387 and by the National Science Foundation -under Grants IIS-10-17017 and IIS-14-20894. +![caption: Duke MTMC camera views for 8 cameras deployed on campus © megapixels.cc](assets/duke_mtmc_cameras.jpg) #### Video Timestamps @@ -73,16 +101,10 @@ The video timestamps contain the likely, but not yet confirmed, date and times o === end columns -### Opting Out - -If you attended Duke University and were captured by any of the 8 surveillance cameras positioned on campus in 2014, there is unfortunately no way to be removed. The dataset files have been distributed throughout the world and it would not be possible to contact all the owners for removal. Nor do the authors provide any options for students to opt-out, nor did they even inform students they would be used at test subjects for surveillance research and development in a project funded, in part, by the United States Army Research Office. - #### Notes - The Duke MTMC dataset paper mentions 2,700 identities, but their ground truth file only lists annotations for 1,812 -{% include 'cite_our_work.html' %} - If you use any data from the Duke MTMC please follow their [license](http://vision.cs.duke.edu/DukeMTMC/#how-to-cite) and cite their work as:
        @@ -94,19 +116,16 @@ If you use any data from the Duke MTMC please follow their [license](http://visi
         }
         
        +{% include 'cite_our_work.html' %} + + #### ToDo - clean up citations, formatting ### Footnotes +[^xinjiang_nyt]: Mozur, Paul. "One Month, 500,000 Face Scans: How China Is Using A.I. to Profile a Minority". https://www.nytimes.com/2019/04/14/technology/china-surveillance-artificial-intelligence-racial-profiling.html. April 14, 2019. [^sensetime_qz]: [^sensenets_uyghurs]: -[^sensenets_sensetime]: "Attention-Aware Compositional Network for Person Re-identification". 2018. [SemanticScholar](https://www.semanticscholar.org/paper/Attention-Aware-Compositional-Network-for-Person-Xu-Zhao/14ce502bc19b225466126b256511f9c05cadcb6e), [PDF](http://openaccess.thecvf.com/content_cvpr_2018/papers/Xu_Attention-Aware_Compositional_Network_CVPR_2018_paper.pdf) -[^sensetime1]: "End-to-End Deep Kronecker-Product Matching for Person Re-identification". 2018. [SemanticScholar](https://www.semanticscholar.org/paper/End-to-End-Deep-Kronecker-Product-Matching-for-Shen-Xiao/947954cafdefd471b75da8c3bb4c21b9e6d57838), [PDF](http://openaccess.thecvf.com/content_cvpr_2018/papers/Shen_End-to-End_Deep_Kronecker-Product_CVPR_2018_paper.pdf) -[^sensetime2]: "Person Re-identification with Deep Similarity-Guided Graph Neural Network". 2018. [SemanticScholar](https://www.semanticscholar.org/paper/Person-Re-identification-with-Deep-Graph-Neural-Shen-Li/08d2a558ea2deb117dd8066e864612bf2899905b) -[^duke_mtmc_orig]: "Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking". 2016. [SemanticScholar](https://www.semanticscholar.org/paper/Performance-Measures-and-a-Data-Set-for-Tracking-Ristani-Solera/27a2fad58dd8727e280f97036e0d2bc55ef5424c) -[^cn_defense1]: "Tracking by Animation: Unsupervised Learning of Multi-Object Attentive Trackers". 2018. [SemanticScholar](https://www.semanticscholar.org/paper/Tracking-by-Animation%3A-Unsupervised-Learning-of-He-Liu/e90816e1a0e14ea1e7039e0b2782260999aef786) -[^cn_defense2]: "Unsupervised Multi-Object Detection for Video Surveillance Using Memory-Based Recurrent Attention Networks". 2018. [SemanticScholar](https://www.semanticscholar.org/paper/Unsupervised-Multi-Object-Detection-for-Video-Using-He-He/59f357015054bab43fb8cbfd3f3dbf17b1d1f881) -[^iarpa_ibm]: "Horizontal Pyramid Matching for Person Re-identification". 2019. [SemanticScholar](https://www.semanticscholar.org/paper/Horizontal-Pyramid-Matching-for-Person-Fu-Wei/c2a5f27d97744bc1f96d7e1074395749e3c59bc8) -[^us_dhs]: "Re-Identification with Consistent Attentive Siamese Networks". 2018. [SemanticScholar](https://www.semanticscholar.org/paper/Re-Identification-with-Consistent-Attentive-Siamese-Zheng-Karanam/24d6d3adf2176516ef0de2e943ce2084e27c4f94) \ No newline at end of file +[^duke_mtmc_orig]: "Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking". 2016. [SemanticScholar](https://www.semanticscholar.org/paper/Performance-Measures-and-a-Data-Set-for-Tracking-Ristani-Solera/27a2fad58dd8727e280f97036e0d2bc55ef5424c) \ No newline at end of file diff --git a/site/public/about/assets/LICENSE/index.html b/site/public/about/assets/LICENSE/index.html index 0d3a7878..66d8b3ac 100644 --- a/site/public/about/assets/LICENSE/index.html +++ b/site/public/about/assets/LICENSE/index.html @@ -40,17 +40,17 @@
      diff --git a/site/public/about/attribution/index.html b/site/public/about/attribution/index.html index 0a1b8e0f..5fe92b8d 100644 --- a/site/public/about/attribution/index.html +++ b/site/public/about/attribution/index.html @@ -60,17 +60,17 @@ To Adapt: To modify, transform and build upon the database

      diff --git a/site/public/about/index.html b/site/public/about/index.html index 4a4ab3c6..b83736d3 100644 --- a/site/public/about/index.html +++ b/site/public/about/index.html @@ -35,7 +35,8 @@
    • Legal / Privacy

    MegaPixels is an independent art and research project by Adam Harvey and Jules LaPlace that investigates the ethics, origins, and individual privacy implications of face recognition image datasets and their role in the expansion of biometric surveillance technologies.

    -

    The MegaPixels site is made possible with support from Mozilla

    +

    MegaPixels is made possible with support from Mozilla, our primary funding partner.

    +

    Additional support for MegaPixels is provided by the European ARTificial Intelligence Network (AI LAB) at the Ars Electronica Center, 1-year research-in-residence grant from Karlsruhe HfG, and sales from the Privacy Gift Shop.

    Adam Harvey

    @@ -75,6 +76,11 @@ You are free:
  • PDFMiner.Six and Pandas for research paper data analysis

Please direct questions, comments, or feedback to mastodon.social/@adamhrv

+

Funding Partners

+

The MegaPixels website, research, and development is made possible with support form Mozilla, our primary funding partner.

+

[ add logos ]

+

Additional support is provided by the European ARTificial Intelligence Network (AI LAB) at the Ars Electronica Center and a 1-year research-in-residence grant from Karlsruhe HfG.

+

[ add logos ]

Attribution

If you use MegaPixels or any data derived from it for your work, please cite our original work as follows:

@@ -89,17 +95,17 @@ You are free:
 
   
   
 
 
diff --git a/site/public/about/legal/index.html b/site/public/about/legal/index.html
index 9eb5dd5a..ce10014a 100644
--- a/site/public/about/legal/index.html
+++ b/site/public/about/legal/index.html
@@ -90,17 +90,17 @@ To Adapt: To modify, transform and build upon the database

diff --git a/site/public/about/press/index.html b/site/public/about/press/index.html index 7b0a3e87..70caf03c 100644 --- a/site/public/about/press/index.html +++ b/site/public/about/press/index.html @@ -41,17 +41,17 @@ diff --git a/site/public/datasets/50_people_one_question/index.html b/site/public/datasets/50_people_one_question/index.html index dc7919f7..79411122 100644 --- a/site/public/datasets/50_people_one_question/index.html +++ b/site/public/datasets/50_people_one_question/index.html @@ -96,17 +96,17 @@ diff --git a/site/public/datasets/afad/index.html b/site/public/datasets/afad/index.html index f2b0a5ba..7969c1d6 100644 --- a/site/public/datasets/afad/index.html +++ b/site/public/datasets/afad/index.html @@ -109,17 +109,17 @@ Motivation

diff --git a/site/public/datasets/brainwash/index.html b/site/public/datasets/brainwash/index.html index 95f0d77d..becc8949 100644 --- a/site/public/datasets/brainwash/index.html +++ b/site/public/datasets/brainwash/index.html @@ -50,6 +50,7 @@
Website

Brainwash is a head detection dataset created from San Francisco's Brainwash Cafe livecam footage. It includes 11,918 images of "everyday life of a busy downtown cafe" 1 captured at 100 second intervals throught the entire day. Brainwash dataset was captured during 3 days in 2014: October 27, November 13, and November 24. According the author's reserach paper introducing the dataset, the images were acquired with the help of Angelcam.com. 2

+

People's Liberation Army National University of Defense Science and Technology

Brainwash is not a widely used dataset but since its publication by Stanford University in 2015, it has notably appeared in several research papers from the National University of Defense Technology in Changsha, China. In 2016 and in 2017 researchers there conducted studies on detecting people's heads in crowded scenes for the purpose of surveillance. 3 4

If you happen to have been at Brainwash cafe in San Francisco at any time on October 26, November 13, or November 24 in 2014 you are most likely included in the Brainwash dataset and have unwittingly contributed to surveillance research.

@@ -145,17 +146,17 @@ diff --git a/site/public/datasets/caltech_10k/index.html b/site/public/datasets/caltech_10k/index.html index 04d63ee3..abb55148 100644 --- a/site/public/datasets/caltech_10k/index.html +++ b/site/public/datasets/caltech_10k/index.html @@ -106,17 +106,17 @@ diff --git a/site/public/datasets/celeba/index.html b/site/public/datasets/celeba/index.html index c72f3798..a4a7efa2 100644 --- a/site/public/datasets/celeba/index.html +++ b/site/public/datasets/celeba/index.html @@ -108,17 +108,17 @@ diff --git a/site/public/datasets/cofw/index.html b/site/public/datasets/cofw/index.html index eef8cf5e..c6d7417e 100644 --- a/site/public/datasets/cofw/index.html +++ b/site/public/datasets/cofw/index.html @@ -161,17 +161,17 @@ To increase the number of training images, and since COFW has the exact same la diff --git a/site/public/datasets/duke_mtmc/index.html b/site/public/datasets/duke_mtmc/index.html index 5cb6fb0c..48c90d66 100644 --- a/site/public/datasets/duke_mtmc/index.html +++ b/site/public/datasets/duke_mtmc/index.html @@ -46,13 +46,167 @@
Website
-

[ page under development ]

-

Duke MTMC (Multi-Target, Multi-Camera Tracking) is a dataset of video recorded on Duke University campus for research and development of networked camera surveillance systems. MTMC tracking algorithms are used for citywide dragnet surveillance systems such as those used throughout China by SenseTime 1 and the oppressive monitoring of 2.5 million Uyghurs in Xinjiang by SenseNets 2. In fact researchers from both SenseTime 4 5 and SenseNets 3 used the Duke MTMC dataset for their research.

-

In this investigation into the Duke MTMC dataset, we found that researchers at Duke University in Durham, North Carolina captured over 2,000 students, faculty members, and passersby into one of the most prolific public surveillance research datasets that's used around the world by commercial and defense surveillance organizations.

-

Since it's publication in 2016, the Duke MTMC dataset has been used in over 100 studies at organizations around the world including SenseTime 4 5, SenseNets 3, IARPA and IBM 9, Chinese National University of Defense 7 8, US Department of Homeland Security 10, Tencent, Microsoft, Microsft Asia, Fraunhofer, Senstar Corp., Alibaba, Naver Labs, Google and Hewlett-Packard Labs to name only a few.

-

The creation and publication of the Duke MTMC dataset in 2014 (published in 2016) was originally funded by the U.S. Army Research Laboratory and the National Science Foundation 6. Though our analysis of the geographic locations of the publicly available research shows over twice as many citations by researchers from China (44% China, 20% United States). In 2018 alone, there were 70 research project citations from China.

-
 A collection of 1,600 out of the 2,700 students and passersby captured into the Duke MTMC surveillance research and development dataset on . These students were also included in the Duke MTMC Re-ID dataset extension used for person re-identification. Open Data Commons Attribution License.
A collection of 1,600 out of the 2,700 students and passersby captured into the Duke MTMC surveillance research and development dataset on . These students were also included in the Duke MTMC Re-ID dataset extension used for person re-identification. Open Data Commons Attribution License.

The 8 cameras deployed on Duke's campus were specifically setup to capture students "during periods between lectures, when pedestrian traffic is heavy". 6. Camera 5 was positioned to capture students as entering and exiting the university's main chapel. Each camera's location and approximate field of view. The heat map visualization shows the locations where pedestrians were most frequently annotated in each video from the Duke MTMC dataset.

-
 Duke MTMC camera locations on Duke University campus. Open Data Commons Attribution License.
Duke MTMC camera locations on Duke University campus. Open Data Commons Attribution License.
 Duke MTMC camera views for 8 cameras deployed on campus © megapixels.cc
Duke MTMC camera views for 8 cameras deployed on campus © megapixels.cc
 Duke MTMC pedestrian detection saliency maps for 8 cameras deployed on campus © megapixels.cc
Duke MTMC pedestrian detection saliency maps for 8 cameras deployed on campus © megapixels.cc
+

Duke MTMC (Multi-Target, Multi-Camera) is a dataset of surveillance video footage taken on Duke University's campus in 2014 and is used for research and development of video tracking systems, person re-identification, and low-resolution facial recognition. The dataset contains over 14 hours of synchronized surveillance video from 8 cameras at 1080p and 60FPS with over 2 million frames of 2,000 students walking to and from classes. The 8 surveillance cameras deployed on campus were specifically setup to capture students "during periods between lectures, when pedestrian traffic is heavy" 4.

+

In this investigation into the Duke MTMC dataset we tracked down over 100 publicly available research papers that explicitly acknowledged using Duke MTMC. Our analysis shows that the dataset has spread far beyond its origins and intentions in academic research projects at Duke University. Since its publication in 2016, more than twice as many research citations originated in China as in the United States. Among these citations were papers with explicit and direct links to the Chinese military and several of the companies known to provide Chinese authorities with the oppressive surveillance technology used to monitor millions of Uighur Muslims.

+

In one 2018 paper jointly published by researchers from SenseNets and SenseTime (and funded by SenseTime Group Limited) entitled Attention-Aware Compositional Network for Person Re-identification, the Duke MTMC dataset was used for "extensive experiments" on improving person re-identification across multiple surveillance cameras with important applications in "finding missing elderly and children, and suspect tracking, etc." Both SenseNets and SenseTime have been directly linked to the providing surveillance technology to monitor Uighur Muslims in China. 2 3 1

+
 A collection of 1,600 out of the approximately 2,000 students and pedestrians in the Duke MTMC dataset. These students were also included in the Duke MTMC Re-ID dataset extension used for person re-identification, and eventually the QMUL SurvFace face recognition dataset. Open Data Commons Attribution License.
A collection of 1,600 out of the approximately 2,000 students and pedestrians in the Duke MTMC dataset. These students were also included in the Duke MTMC Re-ID dataset extension used for person re-identification, and eventually the QMUL SurvFace face recognition dataset. Open Data Commons Attribution License.

Despite repeated warnings by Human Rights Watch that the authoritarian surveillance used in China represents a violation of human rights, researchers at Duke University continued to provide open access to their dataset for anyone to use for any project. As the surveillance crisis in China grew, so did the number of citations with links to organizations complicit in the crisis. In 2018 alone there were over 70 research projects happening in China that publicly acknowledged benefiting from the Duke MTMC dataset. Amongst these were projects from SenseNets, SenseTime, CloudWalk, Megvii, Beihang University, and the PLA's National University of Defense Technology.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
OrganizationPaperLinkYearUsed Duke MTMC
SenseNets, SenseTimeAttention-Aware Compositional Network for Person Re-identificationSemanticScholar2018
SenseTimeEnd-to-End Deep Kronecker-Product Matching for Person Re-identificationthcvf.com2018
CloudWalkHorizontal Pyramid Matching for Person Re-identificationarxiv.org20xx
MegviiMulti-Target, Multi-Camera Tracking by Hierarchical Clustering: Recent Progress on DukeMTMC ProjectSemanticScholar2018
MegviiPerson Re-Identification (slides)github.io2017
MegviiSCPNet: Spatial-Channel Parallelism Network for Joint Holistic and Partial PersonRe-Identificationarxiv.org2018
CloudWalkCloudWalk re-identification technology extends facial biometric tracking with improved accuracyBiometricUpdate.com2018
CloudWalkHorizontal Pyramid Matching for Person Re-identificationarxiv.org]2018
National University of Defense TechnologyTracking by Animation: Unsupervised Learning of Multi-Object Attentive TrackersSemanticScholar.org2018
National University of Defense TechnologyUnsupervised Multi-Object Detection for Video Surveillance Using Memory-Based Recurrent Attention NetworksSemanticScholar.org2018
Beihang UniversityOrientation-Guided Similarity Learning for Person Re-identificationieee.org2018
Beihang UniversityOnline Inter-Camera Trajectory Association Exploiting Person Re-Identification and Camera Topologyacm.org2018
+

The reasons that companies in China use the Duke MTMC dataset for research are technically no different than the reasons it is used in the United States and Europe. In fact the original creators of the dataset published a follow up report in 2017 titled Tracking Social Groups Within and Across Cameras with specific applications to "automated analysis of crowds and social gatherings for surveillance and security applications". Their work, as well as the creation of the original dataset in 2014 were both supported in part by the United States Army Research Laboratory.

+

Citations from the United States and Europe show a similar trend to that in China, including publicly acknowledged and verified usage of the Duke MTMC dataset supported or carried out by the United States Department of Homeland Security, IARPA, IBM, Microsoft (who provides surveillance to ICE), and Vision Semantics (who works with the UK Ministry of Defence). One paper is even jointly published by researchers affiliated with both the University College of London and the National University of Defense Technology in China.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
OrganizationPaperLinkYearUsed Duke MTMC
IARPA, IBM, CloudWalkHorizontal Pyramid Matching for Person Re-identificationarxiv.org2018
MicrosoftReXCam: Resource-Efficient, Cross-CameraVideo Analytics at Enterprise Scalearxiv.org2018
MicrosoftScaling Video Analytics Systems to Large Camera Deploymentsarxiv.org2018
University College of London, National University of Defense TechnologyUnsupervised Multi-Object Detection for Video Surveillance Using Memory-Based RecurrentAttention NetworksPDF2018
Vision Semantics Ltd.Unsupervised Person Re-identification by Deep Learning Tracklet Associationarxiv.org2018
US Dept. of Homeland SecurityRe-Identification with Consistent Attentive Siamese Networksarxiv.org2019
+

By some metrics the dataset is considered a huge success. It is regarded as highly influential research and has contributed to hundreds, if not thousands, of projects to advance artificial intelligence for person tracking and monitoring. All the above citations, regardless of which country is using it, align perfectly with the original intent of the Duke MTMC dataset: "to accelerate advances in multi-target multi-camera tracking".

+

The same logic applies for all the new extensions of the Duke MTMC dataset including Duke MTMC Re-ID, Duke MTMC Video Re-ID, Duke MTMC Groups, and Duke MTMC Attribute. And it also applies to all the new specialized datasets that will be created from Duke MTMC, such as the low-resolution face recognition dataset called QMUL-SurvFace, which was funded in part by SeeQuestor, a computer vision provider to law enforcement agencies including Scotland Yards and Queensland Police. From the perspective of academic researchers, companies, and defense agencies using these datasets to advance their organization's work, Duke MTMC contributes value their their bottom line. Regardless of who is using these datasets or how they're used, they are simple provided to make networks of surveillance cameras more powerful.

+
 Duke MTMC pedestrian detection saliency maps for 8 cameras deployed on campus © megapixels.cc
Duke MTMC pedestrian detection saliency maps for 8 cameras deployed on campus © megapixels.cc

But from a privacy and human rights perspective the creation and distribution of the Duke MTMC illustrates an egregious prioritization of surveillance technologies over individual rights, where the simple act of going to class could implicate your biometric data in a surveillance training dataset.

+

For the approximately 2,000 students in Duke MTMC dataset there is unfortunately no escape. It would be impossible to remove oneself from all copies of the dataset downloaded around the world. Instead, over 2,000 students and visitors who happened to be walking to class on March 13, 2014 will forever remain in all downloaded copies of the Duke MTMC dataset and all its extensions, contributing to a global supply chain of data that powers governmental and commercial expansion of biometric surveillance technologies.

+

Who used Duke MTMC Dataset?

@@ -112,10 +266,7 @@

Supplementary Information

-

Funding

-

Original funding for the Duke MTMC dataset was provided by the Army Research Office under Grant No. W911NF-10-1-0387 and by the National Science Foundation -under Grants IIS-10-17017 and IIS-14-20894.

-

Video Timestamps

+
 Duke MTMC camera locations on Duke University campus. Open Data Commons Attribution License.
Duke MTMC camera locations on Duke University campus. Open Data Commons Attribution License.
 Duke MTMC camera views for 8 cameras deployed on campus © megapixels.cc
Duke MTMC camera views for 8 cameras deployed on campus © megapixels.cc

Video Timestamps

The video timestamps contain the likely, but not yet confirmed, date and times of capture. Because the video timestamps align with the start and stop time sync data provided by the researchers, it at least aligns the relative time. The rainy weather on that day also contribute towards the likelihood of March 14, 2014..

@@ -187,13 +338,19 @@ under Grants IIS-10-17017 and IIS-14-20894.

-

Opting Out

-

If you attended Duke University and were captured by any of the 8 surveillance cameras positioned on campus in 2014, there is unfortunately no way to be removed. The dataset files have been distributed throughout the world and it would not be possible to contact all the owners for removal. Nor do the authors provide any options for students to opt-out, nor did they even inform students they would be used at test subjects for surveillance research and development in a project funded, in part, by the United States Army Research Office.

-

Notes

+

Notes

  • The Duke MTMC dataset paper mentions 2,700 identities, but their ground truth file only lists annotations for 1,812
-
+

If you use any data from the Duke MTMC please follow their license and cite their work as:

+
+@inproceedings{ristani2016MTMC,
+ title =        {Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking},
+ author =       {Ristani, Ergys and Solera, Francesco and Zou, Roger and Cucchiara, Rita and Tomasi, Carlo},
+ booktitle =    {European Conference on Computer Vision workshop on Benchmarking Multi-Target Tracking},
+ year =         {2016}
+}
+

Cite Our Work

@@ -210,43 +367,29 @@ under Grants IIS-10-17017 and IIS-14-20894.

}

-

If you use any data from the Duke MTMC please follow their license and cite their work as:

-
-@inproceedings{ristani2016MTMC,
- title =        {Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking},
- author =       {Ristani, Ergys and Solera, Francesco and Zou, Roger and Cucchiara, Rita and Tomasi, Carlo},
- booktitle =    {European Conference on Computer Vision workshop on Benchmarking Multi-Target Tracking},
- year =         {2016}
-}
-

ToDo

+

ToDo

  • clean up citations, formatting
-

References

References

diff --git a/site/public/datasets/feret/index.html b/site/public/datasets/feret/index.html index 387826b0..7f9ed94c 100644 --- a/site/public/datasets/feret/index.html +++ b/site/public/datasets/feret/index.html @@ -119,17 +119,17 @@ diff --git a/site/public/datasets/hrt_transgender/index.html b/site/public/datasets/hrt_transgender/index.html index 6b9ae7be..4e566a4a 100644 --- a/site/public/datasets/hrt_transgender/index.html +++ b/site/public/datasets/hrt_transgender/index.html @@ -49,17 +49,17 @@ diff --git a/site/public/datasets/index.html b/site/public/datasets/index.html index 75961089..5c8e2546 100644 --- a/site/public/datasets/index.html +++ b/site/public/datasets/index.html @@ -28,7 +28,7 @@

Facial Recognition Datasets

-

Explore publicly available facial recognition datasets. More datasets will be added throughout 2019.

+

Explore publicly available facial recognition datasets feeding into research and development of biometric surveillance technologies at the largest technology companies and defense contractors in the world.

@@ -115,17 +115,17 @@ diff --git a/site/public/datasets/lfpw/index.html b/site/public/datasets/lfpw/index.html index 45de2599..a9eb025d 100644 --- a/site/public/datasets/lfpw/index.html +++ b/site/public/datasets/lfpw/index.html @@ -98,17 +98,17 @@ diff --git a/site/public/datasets/lfw/index.html b/site/public/datasets/lfw/index.html index 7997629f..ff7a3cd9 100644 --- a/site/public/datasets/lfw/index.html +++ b/site/public/datasets/lfw/index.html @@ -148,17 +148,17 @@ diff --git a/site/public/datasets/market_1501/index.html b/site/public/datasets/market_1501/index.html index 7c545335..05750dc7 100644 --- a/site/public/datasets/market_1501/index.html +++ b/site/public/datasets/market_1501/index.html @@ -114,17 +114,17 @@ organization={Springer} diff --git a/site/public/datasets/msceleb/index.html b/site/public/datasets/msceleb/index.html index 8b070118..84c62bd2 100644 --- a/site/public/datasets/msceleb/index.html +++ b/site/public/datasets/msceleb/index.html @@ -123,17 +123,17 @@ diff --git a/site/public/datasets/oxford_town_centre/index.html b/site/public/datasets/oxford_town_centre/index.html index b48efe3e..2c7c26fc 100644 --- a/site/public/datasets/oxford_town_centre/index.html +++ b/site/public/datasets/oxford_town_centre/index.html @@ -138,17 +138,17 @@ diff --git a/site/public/datasets/pipa/index.html b/site/public/datasets/pipa/index.html index 6c920b46..ae8aef6d 100644 --- a/site/public/datasets/pipa/index.html +++ b/site/public/datasets/pipa/index.html @@ -102,17 +102,17 @@ diff --git a/site/public/datasets/pubfig/index.html b/site/public/datasets/pubfig/index.html index e81e12bc..ef289954 100644 --- a/site/public/datasets/pubfig/index.html +++ b/site/public/datasets/pubfig/index.html @@ -99,17 +99,17 @@ diff --git a/site/public/datasets/uccs/index.html b/site/public/datasets/uccs/index.html index 32f7cdb2..9347d536 100644 --- a/site/public/datasets/uccs/index.html +++ b/site/public/datasets/uccs/index.html @@ -51,12 +51,11 @@

UnConstrained College Students (UCCS) is a dataset of long-range surveillance photos captured at University of Colorado Colorado Springs developed primarily for research and development of "face detection and recognition research towards surveillance applications" 1. According to the authors of two papers associated with the dataset, over 1,700 students and pedestrians were "photographed using a long-range high-resolution surveillance camera without their knowledge". 3 In this investigation, we examine the contents of the dataset, funding sources, photo EXIF data, and information from publicly available research project citations.

The UCCS dataset includes over 1,700 unique identities, most of which are students walking to and from class. As of 2018, it was the "largest surveillance [face recognition] benchmark in the public domain." 4 The photos were taken during the spring semesters of 2012 – 2013 on the West Lawn of the University of Colorado Colorado Springs campus. The photographs were timed to capture students during breaks between their scheduled classes in the morning and afternoon during Monday through Thursday. "For example, a student taking Monday-Wednesday classes at 12:30 PM will show up in the camera on almost every Monday and Wednesday." 2.

-
 Example images from the UnConstrained College Students Dataset.
Example images from the UnConstrained College Students Dataset.

The long-range surveillance images in the UnContsrained College Students dataset were captured using a Canon 7D 18 megapixel digital camera fitted with a Sigma 800mm F5.6 EX APO DG HSM telephoto lens and pointed out an office window across the university's West Lawn. The students were photographed from a distance of approximately 150 meters through an office window. "The camera [was] programmed to start capturing images at specific time intervals between classes to maximize the number of faces being captured." 2 -Their setup made it impossible for students to know they were being photographed, providing the researchers with realistic surveillance images to help build face detection and recognition systems for real world applications in defense, intelligence, and commercial applications.

-
 The location at University of Colorado Colorado Springs where students were surreptitiously photographed with a long-range surveillance camera for use in a defense and intelligence agency funded research project on face recognition. Image: Google Maps
The location at University of Colorado Colorado Springs where students were surreptitiously photographed with a long-range surveillance camera for use in a defense and intelligence agency funded research project on face recognition. Image: Google Maps

In the two papers associated with the release of the UCCS dataset (Unconstrained Face Detection and Open-Set Face Recognition Challenge and Large Scale Unconstrained Open Set Face Database), the researchers disclosed their funding sources as ODNI (United States Office of Director of National Intelligence), IARPA (Intelligence Advance Research Projects Activity), ONR MURI (Office of Naval Research and The Department of Defense Multidisciplinary University Research Initiative), Army SBIR (Small Business Innovation Research), SOCOM SBIR (Special Operations Command and Small Business Innovation Research), and the National Science Foundation. Further, UCCS's VAST site explicity states they are part of the IARPA Janus, a face recognition project developed to serve the needs of national intelligence interests.

-

The EXIF data embedded in the images shows that the photo capture times follow a similar pattern, but also highlights that the vast majority of photos (over 7,000) were taken on Tuesdays around noon during students' lunch break. The lack of any photos taken on Friday shows that the researchers were only interested in capturing images of students.

-
 UCCS photos captured per weekday © megapixels.cc
UCCS photos captured per weekday © megapixels.cc
 UCCS photos captured per weekday © megapixels.cc
UCCS photos captured per weekday © megapixels.cc

The two research papers associated with the release of the UCCS dataset (Unconstrained Face Detection and Open-Set Face Recognition Challenge and Large Scale Unconstrained Open Set Face Database), acknowledge that the primary funding sources for their work were United States defense and intelligence agencies. Specifically, development of the UnContrianed College Students dataset was funded by the Intelligence Advanced Research Projects Activity (IARPA), Office of Director of National Intelligence (ODNI), Office of Naval Research and The Department of Defense Multidisciplinary University Research Initiative (ONR MURI), Small Business Innovation Research (SBIR), Special Operations Command and Small Business Innovation Research (SOCOM SBIR), and the National Science Foundation. Further, UCCS's VAST site explicitly states they are part of the IARPA Janus, a face recognition project developed to serve the needs of national intelligence interests, clearly establishing the the funding sources and immediate benefactors of this dataset are United States defense and intelligence agencies.

-

Although the images were first captured in 2012 – 2013 the dataset was not publicly released until 2016. Then in 2017 the UCCS face dataset formed the basis for a defense and intelligence agency funded face recognition challenge project at the International Joint Biometrics Conference in Denver, CO. And in 2018 the dataset was again used for the 2nd Unconstrained Face Detection and Open Set Recognition Challenge at the European Computer Vision Conference (ECCV) in Munich, Germany.

+
 Example images from the UnConstrained College Students Dataset.
Example images from the UnConstrained College Students Dataset.

The long-range surveillance images in the UnContsrained College Students dataset were taken using a Canon 7D 18-megapixel digital camera fitted with a Sigma 800mm F5.6 EX APO DG HSM telephoto lens and pointed out an office window across the university's West Lawn. The students were photographed from a distance of approximately 150 meters through an office window. "The camera [was] programmed to start capturing images at specific time intervals between classes to maximize the number of faces being captured." 2 +Their setup made it impossible for students to know they were being photographed, providing the researchers with realistic surveillance images to help build face recognition systems for real world applications in defense, intelligence, and commercial sectors.

+
 The location at University of Colorado Colorado Springs where students were surreptitiously photographed with a long-range surveillance camera for use in a defense and intelligence agency funded research project on face recognition. Image: Google Maps
The location at University of Colorado Colorado Springs where students were surreptitiously photographed with a long-range surveillance camera for use in a defense and intelligence agency funded research project on face recognition. Image: Google Maps

The EXIF data embedded in the images shows that the photo capture times follow a similar pattern, but also highlights that the vast majority of photos (over 7,000) were taken on Tuesdays around noon during students' lunch break. The lack of any photos taken on Friday shows that the researchers were only interested in capturing images of students.

+
 UCCS photos captured per weekday © megapixels.cc
UCCS photos captured per weekday © megapixels.cc
 UCCS photos captured per weekday © megapixels.cc
UCCS photos captured per weekday © megapixels.cc

The two research papers associated with the release of the UCCS dataset (Unconstrained Face Detection and Open-Set Face Recognition Challenge and Large Scale Unconstrained Open Set Face Database), acknowledge that the primary funding sources for their work were United States defense and intelligence agencies. Specifically, development of the UnContsrianed College Students dataset was funded by the Intelligence Advanced Research Projects Activity (IARPA), Office of Director of National Intelligence (ODNI), Office of Naval Research and The Department of Defense Multidisciplinary University Research Initiative (ONR MURI), and the Special Operations Command and Small Business Innovation Research (SOCOM SBIR) amongst others. UCCS's VAST site also explicitly states that they are part of the IARPA Janus, a face recognition project developed to serve the needs of national intelligence interests, clearly establishing the the funding sources and immediate benefactors of this dataset are United States defense and intelligence agencies.

+

Although the images were first captured in 2012 – 2013 the dataset was not publicly released until 2016. In 2017 the UCCS face dataset formed the basis for a defense and intelligence agency funded face recognition challenge project at the International Joint Biometrics Conference in Denver, CO. And in 2018 the dataset was again used for the 2nd Unconstrained Face Detection and Open Set Recognition Challenge at the European Computer Vision Conference (ECCV) in Munich, Germany.

As of April 15, 2019, the UCCS dataset is no longer available for public download. But during the three years it was publicly available (2016-2019) the UCCS dataset appeared in at least 6 publicly available research papers including verified usage from Beihang University who is known to provide research and development for China's military.

Who used UCCS?

@@ -259,17 +258,17 @@ Their setup made it impossible for students to know they were being photographed diff --git a/site/public/datasets/vgg_face2/index.html b/site/public/datasets/vgg_face2/index.html index a9d318f1..24ce4b2d 100644 --- a/site/public/datasets/vgg_face2/index.html +++ b/site/public/datasets/vgg_face2/index.html @@ -124,17 +124,17 @@ diff --git a/site/public/datasets/viper/index.html b/site/public/datasets/viper/index.html index bc4ddd3d..e4b2a05a 100644 --- a/site/public/datasets/viper/index.html +++ b/site/public/datasets/viper/index.html @@ -104,17 +104,17 @@ diff --git a/site/public/datasets/youtube_celebrities/index.html b/site/public/datasets/youtube_celebrities/index.html index 69b3a02e..e90b45cb 100644 --- a/site/public/datasets/youtube_celebrities/index.html +++ b/site/public/datasets/youtube_celebrities/index.html @@ -95,17 +95,17 @@ the views of our sponsors. diff --git a/site/public/info/index.html b/site/public/info/index.html index 749c29ba..7e7ecf80 100644 --- a/site/public/info/index.html +++ b/site/public/info/index.html @@ -32,17 +32,17 @@ diff --git a/site/public/research/00_introduction/index.html b/site/public/research/00_introduction/index.html index 353e3270..535958cc 100644 --- a/site/public/research/00_introduction/index.html +++ b/site/public/research/00_introduction/index.html @@ -83,17 +83,17 @@ diff --git a/site/public/research/01_from_1_to_100_pixels/index.html b/site/public/research/01_from_1_to_100_pixels/index.html index 9426ef0f..fe49e998 100644 --- a/site/public/research/01_from_1_to_100_pixels/index.html +++ b/site/public/research/01_from_1_to_100_pixels/index.html @@ -121,17 +121,17 @@ relying on FaceID and TouchID to protect their information agree to a

diff --git a/site/public/research/02_what_computers_can_see/index.html b/site/public/research/02_what_computers_can_see/index.html index 920f78cc..d139e83e 100644 --- a/site/public/research/02_what_computers_can_see/index.html +++ b/site/public/research/02_what_computers_can_see/index.html @@ -292,17 +292,17 @@ Head top

diff --git a/site/public/research/index.html b/site/public/research/index.html index 1be8203f..0386fa99 100644 --- a/site/public/research/index.html +++ b/site/public/research/index.html @@ -31,17 +31,17 @@ diff --git a/site/public/test/chart/index.html b/site/public/test/chart/index.html index e882ecc5..05081cf5 100644 --- a/site/public/test/chart/index.html +++ b/site/public/test/chart/index.html @@ -32,17 +32,17 @@ diff --git a/site/public/test/citations/index.html b/site/public/test/citations/index.html index a8af41df..36021752 100644 --- a/site/public/test/citations/index.html +++ b/site/public/test/citations/index.html @@ -32,17 +32,17 @@ diff --git a/site/public/test/csv/index.html b/site/public/test/csv/index.html index 2c2242b4..301ed718 100644 --- a/site/public/test/csv/index.html +++ b/site/public/test/csv/index.html @@ -32,17 +32,17 @@ diff --git a/site/public/test/datasets/index.html b/site/public/test/datasets/index.html index bf08418f..58555895 100644 --- a/site/public/test/datasets/index.html +++ b/site/public/test/datasets/index.html @@ -32,17 +32,17 @@ diff --git a/site/public/test/face_search/index.html b/site/public/test/face_search/index.html index 75bb907b..e2db70df 100644 --- a/site/public/test/face_search/index.html +++ b/site/public/test/face_search/index.html @@ -32,17 +32,17 @@ diff --git a/site/public/test/gallery/index.html b/site/public/test/gallery/index.html index 8958f369..869c3aaa 100644 --- a/site/public/test/gallery/index.html +++ b/site/public/test/gallery/index.html @@ -50,17 +50,17 @@ diff --git a/site/public/test/index.html b/site/public/test/index.html index e660bb2d..9c15d431 100644 --- a/site/public/test/index.html +++ b/site/public/test/index.html @@ -43,17 +43,17 @@ diff --git a/site/public/test/map/index.html b/site/public/test/map/index.html index 21229ec1..ba2756ae 100644 --- a/site/public/test/map/index.html +++ b/site/public/test/map/index.html @@ -32,17 +32,17 @@ diff --git a/site/public/test/name_search/index.html b/site/public/test/name_search/index.html index b0bdb86f..c956ff0b 100644 --- a/site/public/test/name_search/index.html +++ b/site/public/test/name_search/index.html @@ -32,17 +32,17 @@ diff --git a/site/public/test/pie_chart/index.html b/site/public/test/pie_chart/index.html index 98a89ff4..2e3ba39c 100644 --- a/site/public/test/pie_chart/index.html +++ b/site/public/test/pie_chart/index.html @@ -32,17 +32,17 @@ -- cgit v1.2.3-70-g09d2