summaryrefslogtreecommitdiff
path: root/scraper/reports
diff options
context:
space:
mode:
authorjules@lens <julescarbon@gmail.com>2019-05-03 16:01:54 +0200
committerjules@lens <julescarbon@gmail.com>2019-05-03 16:01:54 +0200
commit8b0408ab56c687352228e8ec50a71ad48bdd6d18 (patch)
tree2c0c170fd2b4133ae24990f7d322e67976e82311 /scraper/reports
parent479a23a67a8f398a0ba554dd6f423660c3a037f0 (diff)
build citations
Diffstat (limited to 'scraper/reports')
-rw-r--r--scraper/reports/doi_institutions_geocoded.csv4
-rw-r--r--scraper/reports/paper_title_report.html2
-rw-r--r--scraper/reports/paper_title_report_no_location.html2
-rw-r--r--scraper/reports/report_coverage.html2
-rw-r--r--scraper/reports/report_index.html2
5 files changed, 6 insertions, 6 deletions
diff --git a/scraper/reports/doi_institutions_geocoded.csv b/scraper/reports/doi_institutions_geocoded.csv
index 0653dfaf..1334538e 100644
--- a/scraper/reports/doi_institutions_geocoded.csv
+++ b/scraper/reports/doi_institutions_geocoded.csv
@@ -2590,7 +2590,7 @@ f180db1f0216c097ed9d669ea69e9d3eddd8eb8a,Distributed eigenfaces for massive face
05dc1d27ade984a1d85e104b11aa3380fcd0d8ad,Establishing Point Correspondence of 3D Faces Via Sparse Facial Deformable Model,Zhejiang University,Zhejiang University,"浙江大学之江校区, 之江路, 转塘街道, 西湖区 (Xihu), 杭州市 Hangzhou, 浙江省, 310008, 中国",30.19331415,120.11930822,edu,,China
0547c44cb896e1cc38130ae8cc6b04dc21179045,Fast-Match: Fast Affine Template Matching,Tel Aviv University,Tel Aviv University,"אוניברסיטת תל אביב, כיכר מנדל, תל אביב - יפו, אפקה, תל אביב-יפו, מחוז תל אביב, NO, ישראל",32.11198890,34.80459702,edu,,Israel
05b9c1ad0cfe1bdb68f470492c9a593bf78d5192,Temporal Localization of Actions with Actoms,INRIA Grenoble,"INRIA Grenoble Rhone-Alpes, FRANCE","INRIA, 655, Avenue de l'Europe, Innovallée Montbonnot, Montbonnot-Saint-Martin, Grenoble, Isère, Auvergne-Rhône-Alpes, France métropolitaine, 38330, France",45.21829860,5.80703193,edu,,France
-05ab005c19a7c06c2570727dedf1d77df41b4884,A Multi-Task Learning CNN for Image Steganalysis,"Guangdong Police College Guangzhou, Guangdong, China","Faculty of Forensic Science and Technology, Guangdong Police College Guangzhou, Guangdong, P. R. China","500 Binjiang E Rd, BinJiang Lu, Haizhu Qu, Guangzhou Shi, Guangdong Sheng, China",23.10468800,113.28799600,company,,China
+05ab005c19a7c06c2570727dedf1d77df41b4884,A Multi-Task Learning CNN for Image Steganalysis,"Guangdong Police College Guangzhou, Guangdong, China","Faculty of Forensic Science and Technology, Guangdong Police College Guangzhou, Guangdong, P. R. China","500 Binjiang E Rd, BinJiang Lu, Haizhu Qu, Guangzhou Shi, Guangdong Sheng, China",23.10468800,113.28799600,gov,,China
05287cbad6093deffe9a0fdb9115605595dfeaf0,yBRIEF: A study of non-Gaussian Binary Elementary Features,"Neptec Design Group Ltd, Kanata, Ontario K2K 1Y5, Canada","Neptec Design Group Ltd, Kanata, Ontario K2K 1Y5, Canada","302 Legget Dr, Kanata, ON K2K 1Y5, Canada",45.34141540,-75.90070440,edu,,Canada
05785cb0dcaace54801aa486d4f8fdad3245b27a,Novel generative model for facial expressions based on statistical shape analysis of landmarks trajectories,"CRIStAL UMR, France","T&#x00E9;l&#x00E9;com Lille, CRIStAL UMR (CNRS 9189), France","Lille, France",50.62925000,3.05725600,edu,,France
0599c0c33a99d6d261ed7e93a7f727ba8bfe0e0c,Computationally efficient scene categorization in complex dynamic environments,"Army Research Laboratory, Adelphi, Maryland, United States of America","Army Research Laboratory, Adelphi, Maryland, United States of America","2800 Powder Mill Rd, Adelphi, MD 20783, USA",39.02985870,-76.96380270,edu,,United States
@@ -6222,7 +6222,7 @@ f7dbb15ed72d1282445178dab3368d7676763aa6,On multi-modal people tracking from mob
7549fbaa8911c499ffd0d1ad9013a72eee5d95cc,Recognizing planar kinematic mechanisms from a single image using evolutionary computation,Carnegie Mellon University - Silicon Valley,"Carnegie Mellon University - Silicon Valley, Moffett Field, CA, USA","NASA Research Park, Building 23 Moffett Field, CA 94035, United States",37.41043000,-122.05975300,edu,,United States
75410eb80800f8b51b555da7d61b03b3fe58cc47,Single shot object detection with top-down refinement,Tsinghua University,Tsinghua University,"清华大学, 30, 双清路, 五道口, 后八家, 海淀区, 100084, 中国",40.00229045,116.32098908,edu,,China
75a1406d26bc6faef8ffa3da00fdf6b7621b7754,Reliable scale estimation and correction for monocular Visual Odometry,Australian National University,Australian National University,"Canberra ACT 0200, Australia",-35.27769990,149.11852700,edu,,Australia
-7587a09d924cab41822a07cd1a988068b74baabb,Image scoring: Patch based CNN model for small or medium dataset,"Sichuan Police College, Luzhou, China","Sichuan Police College, Luzhou, China","China, Sichuan Sheng, Luzhou Shi, Jiangyang Qu, 江阳西路34号",28.87451300,105.43182700,edu,,China
+7587a09d924cab41822a07cd1a988068b74baabb,Image scoring: Patch based CNN model for small or medium dataset,"Sichuan Police College, Luzhou, China","Sichuan Police College, Luzhou, China","China, Sichuan Sheng, Luzhou Shi, Jiangyang Qu, 江阳西路34号",28.87451300,105.43182700,gov,,China
75b84ce3900558621af0b83ed5d413542014d911,Vision-based detection and pose estimation for formation of micro aerial vehicles,National University of Singapore,National University of Singapore,"NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore",1.29620180,103.77689944,edu,,Singapore
75b51140d08acdc7f0af11b0ffa1edb40ebbd059,Selecting discriminant eigenfaces by using binary feature selection,Shanghai Jiao Tong University,Shanghai Jiao Tong University,"上海交通大学(徐汇校区), 淮海西路, 番禺小区, 平阴桥, 徐汇区, 上海市, 200052, 中国",31.20081505,121.42840681,edu,,China
75b60cdbaa2ab77b169ed0d1e478ebff07468ef8,Multiscale Logarithm Difference Edgemaps for Face Recognition Against Varying Lighting Conditions,JiaYing University,"JiaYing University, Meizhou, China","China, Guangdong, Meizhou, Meijiang, 梅松路100号",24.32538700,116.12873200,edu,,China
diff --git a/scraper/reports/paper_title_report.html b/scraper/reports/paper_title_report.html
index 9a54a65b..9d1d2613 100644
--- a/scraper/reports/paper_title_report.html
+++ b/scraper/reports/paper_title_report.html
@@ -1 +1 @@
-<!doctype html><html><head><meta charset='utf-8'><title>Paper Title Sanity Check</title><link rel='stylesheet' href='reports.css'></head><body><h2>Paper Title Sanity Check</h2><table border='1' cellpadding='3' cellspacing='3'><th>key</th><th>name</th><th>our title</th><th>found title</th><th></th><th></th><th>address</th><th>s2 id</th><tr><td>10k_US_adult_faces</td><td>10K US Adult Faces</td><td>The intrinsic memorability of face images</td><td>The intrinsic memorability of face photographs.</td><td><a href="https://pdfs.semanticscholar.org/8b2d/d5c61b23ead5ae5508bb8ce808b5ea266730.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the intrinsic memorability of face images&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8b2dd5c61b23ead5ae5508bb8ce808b5ea266730</td></tr><tr><td>3d_rma</td><td>3D-RMA</td><td>Automatic 3D Face Authentication</td><td>Automatic 3D face authentication</td><td><a href="http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.31.9190&rep=rep1&type=pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=automatic 3d face authentication&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2160788824c4c29ffe213b2cbeb3f52972d73f37</td></tr><tr><td>3dddb_unconstrained</td><td>3D Dynamic</td><td>A 3D Dynamic Database for Unconstrained Face Recognition</td><td>A 3D Dynamic Database for Unconstrained Face Recognition</td><td><a href="https://pdfs.semanticscholar.org/4d4b/b462c9f1d4e4ab1e4aa6a75cc0bc71b38461.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a 3d dynamic database for unconstrained face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4d4bb462c9f1d4e4ab1e4aa6a75cc0bc71b38461</td></tr><tr><td>3dpes</td><td>3DPeS</td><td>3DPes: 3D People Dataset for Surveillance and Forensics</td><td>3DPeS: 3D people dataset for surveillance and forensics</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=3dpes: 3d people dataset for surveillance and forensics&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2e8d0f1802e50cccfd3c0aabac0d0beab3a7846e</td></tr><tr><td>4dfab</td><td>4DFAB</td><td>4DFAB: A Large Scale 4D Facial Expression Database for Biometric Applications</td><td>4DFAB: A Large Scale 4D Facial Expression Database for Biometric Applications</td><td><a href="https://arxiv.org/pdf/1712.01443.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=4dfab: a large scale 4d facial expression database for biometric applications&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a40f9bfd3c45658ee8da70e1f2dfbe1f0c744d43</td></tr><tr><td>50_people_one_question</td><td>50 People One Question</td><td>Merging Pose Estimates Across Space and Time</td><td>Merging Pose Estimates Across Space and Time</td><td><a href="http://authors.library.caltech.edu/41565/1/tracking_bmvc.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=merging pose estimates across space and time&sort=relevance" target="_blank">[s2]</a></td><td></td><td>5753b2b5e442eaa3be066daa4a2ca8d8a0bb1725</td></tr><tr><td>adience</td><td>Adience</td><td>Age and Gender Estimation of Unfiltered Faces</td><td>Age and Gender Estimation of Unfiltered Faces</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=age and gender estimation of unfiltered faces&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1be498d4bbc30c3bfd0029114c784bc2114d67c0</td></tr><tr><td>afad</td><td>AFAD</td><td>Ordinal Regression with a Multiple Output CNN for Age Estimation</td><td>Ordinal Regression with Multiple Output CNN for Age Estimation</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=ordinal regression with a multiple output cnn for age estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6618cff7f2ed440a0d2fa9e74ad5469df5cdbe4c</td></tr><tr><td>afew_va</td><td>AFEW-VA</td><td>AFEW-VA database for valence and arousal estimation in-the-wild</td><td>AFEW-VA database for valence and arousal estimation in-the-wild</td><td><a href="https://ibug.doc.ic.ac.uk/media/uploads/documents/afew-va.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=afew-va database for valence and arousal estimation in-the-wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2624d84503bc2f8e190e061c5480b6aa4d89277a</td></tr><tr><td>afew_va</td><td>AFEW-VA</td><td>Collecting Large, Richly Annotated Facial-Expression Databases from Movies</td><td>Collecting Large, Richly Annotated Facial-Expression Databases from Movies</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=collecting large, richly annotated facial-expression databases from movies&sort=relevance" target="_blank">[s2]</a></td><td>Australian National University</td><td>b1f4423c227fa37b9680787be38857069247a307</td></tr><tr><td>affectnet</td><td>AffectNet</td><td>AffectNet: A Database for Facial Expression, Valence, and Arousal Computing in the Wild</td><td>AffectNet: A Database for Facial Expression, Valence, and Arousal Computing in the Wild</td><td><a href="https://arxiv.org/pdf/1708.03985.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=affectnet: a database for facial expression, valence, and arousal computing in the wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>758d7e1be64cc668c59ef33ba8882c8597406e53</td></tr><tr><td>aflw</td><td>AFLW</td><td>Annotated Facial Landmarks in the Wild: A Large-scale, Real-world Database for Facial Landmark Localization</td><td>Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=annotated facial landmarks in the wild: a large-scale, real-world database for facial landmark localization&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td></tr><tr><td>afw</td><td>AFW</td><td>Face detection, pose estimation and landmark localization in the wild</td><td>Face detection, pose estimation, and landmark localization in the wild</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=face detection, pose estimation and landmark localization in the wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0e986f51fe45b00633de9fd0c94d082d2be51406</td></tr><tr><td>agedb</td><td>AgeDB</td><td>AgeDB: the first manually collected, in-the-wild age database</td><td>AgeDB: The First Manually Collected, In-the-Wild Age Database</td><td><a href="http://eprints.mdx.ac.uk/22044/1/agedb_kotsia.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=agedb: the first manually collected, in-the-wild age database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>d818568838433a6d6831adde49a58cef05e0c89f</td></tr><tr><td>alert_airport</td><td>ALERT Airport</td><td>A Systematic Evaluation and Benchmark for Person Re-Identification: Features, Metrics, and Datasets</td><td>A Systematic Evaluation and Benchmark for Person Re-Identification: Features, Metrics, and Datasets</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a systematic evaluation and benchmark for person re-identification: features, metrics, and datasets&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6403117f9c005ae81f1e8e6d1302f4a045e3d99d</td></tr><tr><td>am_fed</td><td>AM-FED</td><td>Affectiva MIT Facial Expression Dataset (AM-FED): Naturalistic and Spontaneous Facial Expressions Collected “In the Wild”</td><td>Affectiva-MIT Facial Expression Dataset (AM-FED): Naturalistic and Spontaneous Facial Expressions Collected "In-the-Wild"</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=affectiva mit facial expression dataset (am-fed): naturalistic and spontaneous facial expressions collected “in the wild”&sort=relevance" target="_blank">[s2]</a></td><td></td><td>47aeb3b82f54b5ae8142b4bdda7b614433e69b9a</td></tr><tr><td>apis</td><td>APiS1.0</td><td>Pedestrian Attribute Classification in Surveillance: Database and Evaluation</td><td>Pedestrian Attribute Classification in Surveillance: Database and Evaluation</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pedestrian attribute classification in surveillance: database and evaluation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>488e475eeb3bb39a145f23ede197cd3620f1d98a</td></tr><tr><td>appa_real</td><td>APPA-REAL</td><td>Apparent and Real Age Estimation in Still Images with Deep Residual Regressors on Appa-Real Database</td><td>Apparent and Real Age Estimation in Still Images with Deep Residual Regressors on Appa-Real Database</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=apparent and real age estimation in still images with deep residual regressors on appa-real database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>633c851ebf625ad7abdda2324e9de093cf623141</td></tr><tr><td>appa_real</td><td>APPA-REAL</td><td>From Apparent to Real Age: Gender, Age, Ethnic, Makeup, and Expression Bias Analysis in Real Age Estimation</td><td>From Apparent to Real Age: Gender, Age, Ethnic, Makeup, and Expression Bias Analysis in Real Age Estimation</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=from apparent to real age: gender, age, ethnic, makeup, and expression bias analysis in real age estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>7b92d1e53cc87f7a4256695de590098a2f30261e</td></tr><tr><td>ar_facedb</td><td>AR Face</td><td>The AR Face Database</td><td>The AR face database</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the ar face database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6d96f946aaabc734af7fe3fc4454cf8547fcd5ed</td></tr><tr><td>awe_ears</td><td>AWE Ears</td><td>Ear Recognition: More Than a Survey</td><td>Ear Recognition: More Than a Survey</td><td><a href="https://arxiv.org/pdf/1611.06203.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=ear recognition: more than a survey&sort=relevance" target="_blank">[s2]</a></td><td></td><td>84fe5b4ac805af63206012d29523a1e033bc827e</td></tr><tr><td>b3d_ac</td><td>B3D(AC)</td><td>A 3-D Audio-Visual Corpus of Affective Communication</td><td>A 3-D Audio-Visual Corpus of Affective Communication</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a 3-d audio-visual corpus of affective communication&sort=relevance" target="_blank">[s2]</a></td><td></td><td>d08cc366a4a0192a01e9a7495af1eb5d9f9e73ae</td></tr><tr><td>bbc_pose</td><td>BBC Pose</td><td>Automatic and Efficient Human Pose Estimation for Sign Language Videos</td><td>Automatic and Efficient Human Pose Estimation for Sign Language Videos</td><td><a href="http://tomas.pfister.fi/files/charles13ijcv.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=automatic and efficient human pose estimation for sign language videos&sort=relevance" target="_blank">[s2]</a></td><td></td><td>213a579af9e4f57f071b884aa872651372b661fd</td></tr><tr><td>bfm</td><td>BFM</td><td>A 3D Face Model for Pose and Illumination Invariant Face Recognition</td><td>A 3D Face Model for Pose and Illumination Invariant Face Recognition</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a 3d face model for pose and illumination invariant face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>639937b3a1b8bded3f7e9a40e85bd3770016cf3c</td></tr><tr><td>bio_id</td><td>BioID Face</td><td>Robust Face Detection Using the Hausdorff Distance</td><td>Robust Face Detection Using the Hausdorff Distance</td><td><a href="https://pdfs.semanticscholar.org/4053/e3423fb70ad9140ca89351df49675197196a.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=robust face detection using the hausdorff distance&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4053e3423fb70ad9140ca89351df49675197196a</td></tr><tr><td>bosphorus</td><td>The Bosphorus</td><td>Bosphorus Database for 3D Face Analysis</td><td>Bosphorus Database for 3D Face Analysis</td><td><a href="https://pdfs.semanticscholar.org/4254/fbba3846008f50671edc9cf70b99d7304543.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=bosphorus database for 3d face analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2acf7e58f0a526b957be2099c10aab693f795973</td></tr><tr><td>bp4d_plus</td><td>BP4D+</td><td>Multimodal Spontaneous Emotion Corpus for Human Behavior Analysis</td><td>Multimodal Spontaneous Emotion Corpus for Human Behavior Analysis</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multimodal spontaneous emotion corpus for human behavior analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>53ae38a6bb2b21b42bac4f0c4c8ed1f9fa02f9d4</td></tr><tr><td>bp4d_spontanous</td><td>BP4D-Spontanous</td><td>A high resolution spontaneous 3D dynamic facial expression database</td><td>A high-resolution spontaneous 3D dynamic facial expression database</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a high resolution spontaneous 3d dynamic facial expression database&sort=relevance" target="_blank">[s2]</a></td><td>SUNY Binghamton</td><td>b91f54e1581fbbf60392364323d00a0cd43e493c</td></tr><tr><td>bpad</td><td>BPAD</td><td>Describing People: A Poselet-Based Approach to Attribute Classification</td><td>Describing people: A poselet-based approach to attribute classification</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=describing people: a poselet-based approach to attribute classification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>7808937b46acad36e43c30ae4e9f3fd57462853d</td></tr><tr><td>brainwash</td><td>Brainwash</td><td>End-to-End People Detection in Crowded Scenes</td><td>End-to-End People Detection in Crowded Scenes</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=end-to-end people detection in crowded scenes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1bd1645a629f1b612960ab9bba276afd4cf7c666</td></tr><tr><td>bu_3dfe</td><td>BU-3DFE</td><td>A 3D Facial Expression Database For Facial Behavior Research</td><td>A 3D facial expression database for facial behavior research</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a 3d facial expression database for facial behavior research&sort=relevance" target="_blank">[s2]</a></td><td></td><td>cc589c499dcf323fe4a143bbef0074c3e31f9b60</td></tr><tr><td>cacd</td><td></td><td>Cross-Age Reference Coding for Age-Invariant Face Recognition and Retrieval</td><td>Cross-Age Reference Coding for Age-Invariant Face Recognition and Retrieval</td><td><a href="https://pdfs.semanticscholar.org/c44c/84540db1c38ace232ef34b03bda1c81ba039.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=cross-age reference coding for age-invariant face recognition and retrieval&sort=relevance" target="_blank">[s2]</a></td><td></td><td>c44c84540db1c38ace232ef34b03bda1c81ba039</td></tr><tr><td>cafe</td><td>#N/A</td><td>The Child Affective Facial Expression (CAFE) Set: Validity and reliability from untrained adults</td><td>The Child Affective Facial Expression (CAFE) set: validity and reliability from untrained adults</td><td><a href="https://pdfs.semanticscholar.org/2038/8099cc415c772926e47bcbbe554e133343d1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the child affective facial expression (cafe) set: validity and reliability from untrained adults&sort=relevance" target="_blank">[s2]</a></td><td></td><td>20388099cc415c772926e47bcbbe554e133343d1</td></tr><tr><td>caltech_10k_web_faces</td><td>Caltech 10K Web Faces</td><td>Pruning Training Sets for Learning of Object Categories</td><td>Pruning training sets for learning of object categories</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pruning training sets for learning of object categories&sort=relevance" target="_blank">[s2]</a></td><td></td><td>636b8ffc09b1b23ff714ac8350bb35635e49fa3c</td></tr><tr><td>caltech_crp</td><td>Caltech CRP</td><td>Fine-grained classification of pedestrians in video: Benchmark and state of the art</td><td>Fine-grained classification of pedestrians in video: Benchmark and state of the art</td><td><a href="https://arxiv.org/pdf/1605.06177.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=fine-grained classification of pedestrians in video: benchmark and state of the art&sort=relevance" target="_blank">[s2]</a></td><td></td><td>060820f110a72cbf02c14a6d1085bd6e1d994f6a</td></tr><tr><td>caltech_pedestrians</td><td>Caltech Pedestrians</td><td>Pedestrian Detection: A Benchmark</td><td>Pedestrian detection: A benchmark</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pedestrian detection: a benchmark&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1dc35905a1deff8bc74688f2d7e2f48fd2273275</td></tr><tr><td>caltech_pedestrians</td><td>Caltech Pedestrians</td><td>Pedestrian Detection: An Evaluation of the State of the Art</td><td>Pedestrian Detection: An Evaluation of the State of the Art</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pedestrian detection: an evaluation of the state of the art&sort=relevance" target="_blank">[s2]</a></td><td>California Institute of Technology</td><td>f72f6a45ee240cc99296a287ff725aaa7e7ebb35</td></tr><tr><td>cas_peal</td><td>CAS-PEAL</td><td>The CAS-PEAL Large-Scale Chinese Face Database and Baseline Evaluations</td><td>The CAS-PEAL Large-Scale Chinese Face Database and Baseline Evaluations</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the cas-peal large-scale chinese face database and baseline evaluations&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2485c98aa44131d1a2f7d1355b1e372f2bb148ad</td></tr><tr><td>casablanca</td><td>Casablanca</td><td>Context-aware {CNNs} for person head detection</td><td>Context-Aware CNNs for Person Head Detection</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=context-aware {cnns} for person head detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td></tr><tr><td>casia_webface</td><td>CASIA Webface</td><td>Learning Face Representation from Scratch</td><td>Learning Face Representation from Scratch</td><td><a href="https://arxiv.org/pdf/1411.7923.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning face representation from scratch&sort=relevance" target="_blank">[s2]</a></td><td></td><td>853bd61bc48a431b9b1c7cab10c603830c488e39</td></tr><tr><td>celeba</td><td>CelebA</td><td>Deep Learning Face Attributes in the Wild</td><td>Deep Learning Face Attributes in the Wild</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=deep learning face attributes in the wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6424b69f3ff4d35249c0bb7ef912fbc2c86f4ff4</td></tr><tr><td>cfd</td><td>CFD</td><td>The Chicago face database: A free stimulus set of faces and norming data</td><td>The Chicago face database: A free stimulus set of faces and norming data.</td><td><a href="https://pdfs.semanticscholar.org/4df3/143922bcdf7db78eb91e6b5359d6ada004d2.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the chicago face database: a free stimulus set of faces and norming data&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4df3143922bcdf7db78eb91e6b5359d6ada004d2</td></tr><tr><td>chalearn</td><td>ChaLearn</td><td>ChaLearn Looking at People: A Review of Events and Resources</td><td>ChaLearn looking at people: A review of events and resources</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=chalearn looking at people: a review of events and resources&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8d5998cd984e7cce307da7d46f155f9db99c6590</td></tr><tr><td>chokepoint</td><td>ChokePoint</td><td>Patch-based Probabilistic Image Quality Assessment for Face Selection and Improved Video-based Face Recognition</td><td>Patch-based probabilistic image quality assessment for face selection and improved video-based face recognition</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=patch-based probabilistic image quality assessment for face selection and improved video-based face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0486214fb58ee9a04edfe7d6a74c6d0f661a7668</td></tr><tr><td>clothing_co_parsing</td><td>CCP</td><td>Clothing Co-Parsing by Joint Image Segmentation and Labeling</td><td>Clothing Co-parsing by Joint Image Segmentation and Labeling</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clothing co-parsing by joint image segmentation and labeling&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2bf8541199728262f78d4dced6fb91479b39b738</td></tr><tr><td>cmdp</td><td>CMDP</td><td>Distance Estimation of an Unknown Person from a Portrait</td><td>Distance Estimation of an Unknown Person from a Portrait</td><td><a href="http://authors.library.caltech.edu/49084/13/FaceDistanceEstimation_RONCHI.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=distance estimation of an unknown person from a portrait&sort=relevance" target="_blank">[s2]</a></td><td></td><td>56ae6d94fc6097ec4ca861f0daa87941d1c10b70</td></tr><tr><td>cmu_pie</td><td>CMU PIE</td><td>The CMU Pose, Illumination, and Expression Database</td><td>The CMU Pose, Illumination, and Expression (PIE) Database</td><td><a href="http://www.comp.nus.edu.sg/~tsim/piedb.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the cmu pose, illumination, and expression database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4d423acc78273b75134e2afd1777ba6d3a398973</td></tr><tr><td>coco</td><td>COCO</td><td>Microsoft COCO: Common Objects in Context</td><td>Microsoft COCO: Common Objects in Context</td><td><a href="https://arxiv.org/pdf/1405.0312.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=microsoft coco: common objects in context&sort=relevance" target="_blank">[s2]</a></td><td></td><td>5e0f8c355a37a5a89351c02f174e7a5ddcb98683</td></tr><tr><td>coco_action</td><td>COCO-a</td><td>Describing Common Human Visual Actions in Images</td><td>Describing Common Human Visual Actions in Images</td><td><a href="https://arxiv.org/pdf/1506.02203.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=describing common human visual actions in images&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4946ba10a4d5a7d0a38372f23e6622bd347ae273</td></tr><tr><td>coco_qa</td><td>COCO QA</td><td>Exploring Models and Data for Image Question Answering</td><td>Exploring Models and Data for Image Question Answering</td><td><a href="https://arxiv.org/pdf/1505.02074.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=exploring models and data for image question answering&sort=relevance" target="_blank">[s2]</a></td><td></td><td>35b0331dfcd2897abd5749b49ff5e2b8ba0f7a62</td></tr><tr><td>cofw</td><td>COFW</td><td>Robust face landmark estimation under occlusion</td><td>Robust Face Landmark Estimation under Occlusion</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=robust face landmark estimation under occlusion&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2724ba85ec4a66de18da33925e537f3902f21249</td></tr><tr><td>cohn_kanade</td><td>CK</td><td>Comprehensive Database for Facial Expression Analysis</td><td>Comprehensive Database for Facial Expression Analysis</td><td><a href="https://pdfs.semanticscholar.org/23fc/83c8cfff14a16df7ca497661264fc54ed746.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=comprehensive database for facial expression analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>23fc83c8cfff14a16df7ca497661264fc54ed746</td></tr><tr><td>cohn_kanade_plus</td><td>CK+</td><td>The Extended Cohn-Kanade Dataset (CK+): A complete dataset for action unit and emotion-specified expression</td><td>The Extended Cohn-Kanade Dataset (CK+): A complete dataset for action unit and emotion-specified expression</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the extended cohn-kanade dataset (ck+): a complete dataset for action unit and emotion-specified expression&sort=relevance" target="_blank">[s2]</a></td><td>University of Pittsburgh</td><td>4d9a02d080636e9666c4d1cc438b9893391ec6c7</td></tr><tr><td>columbia_gaze</td><td>Columbia Gaze</td><td>Gaze Locking: Passive Eye Contact Detection for Human–Object Interaction</td><td>Gaze locking: passive eye contact detection for human-object interaction</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=gaze locking: passive eye contact detection for human–object interaction&sort=relevance" target="_blank">[s2]</a></td><td>Columbia University</td><td>06f02199690961ba52997cde1527e714d2b3bf8f</td></tr><tr><td>complex_activities</td><td>Ongoing Complex Activities</td><td>Recognition of Ongoing Complex Activities by Sequence Prediction over a Hierarchical Label Space</td><td>Recognition of ongoing complex activities by sequence prediction over a hierarchical label space</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=recognition of ongoing complex activities by sequence prediction over a hierarchical label space&sort=relevance" target="_blank">[s2]</a></td><td></td><td>65355cbb581a219bd7461d48b3afd115263ea760</td></tr><tr><td>cuhk_train_station</td><td>CUHK Train Station Dataset</td><td>Understanding collective crowd behaviors: Learning a Mixture model of Dynamic pedestrian-Agents</td><td>Understanding collective crowd behaviors: Learning a Mixture model of Dynamic pedestrian-Agents</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=understanding collective crowd behaviors: learning a mixture model of dynamic pedestrian-agents&sort=relevance" target="_blank">[s2]</a></td><td>Chinese University of Hong Kong</td><td>5a4df9bef1872865f0b619ac3aacc97f49e4a035</td></tr><tr><td>cuhk_campus_03</td><td>CUHK03 Campus</td><td>Human Reidentification with Transferred Metric Learning</td><td>Human Reidentification with Transferred Metric Learning</td><td><a href="http://www.ee.cuhk.edu.hk/~xgwang/papers/liZWaccv12.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=human reidentification with transferred metric learning&sort=relevance" target="_blank">[s2]</a></td><td></td><td>44484d2866f222bbb9b6b0870890f9eea1ffb2d0</td></tr><tr><td>cuhk_campus_03</td><td>CUHK03 Campus</td><td>Locally Aligned Feature Transforms across Views</td><td>Locally Aligned Feature Transforms across Views</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=locally aligned feature transforms across views&sort=relevance" target="_blank">[s2]</a></td><td></td><td>38b55d95189c5e69cf4ab45098a48fba407609b4</td></tr><tr><td>cuhk_campus_03</td><td>CUHK03 Campus</td><td>DeepReID: Deep Filter Pairing Neural Network for Person Re-identification</td><td>DeepReID: Deep Filter Pairing Neural Network for Person Re-identification</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=deepreid: deep filter pairing neural network for person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6bd36e9fd0ef20a3074e1430a6cc601e6d407fc3</td></tr><tr><td>cvc_01_barcelona</td><td>CVC-01</td><td>Adaptive Image Sampling and Windows Classification for On-board Pedestrian Detection</td><td>Adaptive Image Sampling and Windows Classification for On-board Pedestrian Detection</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=adaptive image sampling and windows classification for on-board pedestrian detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>57fe081950f21ca03b5b375ae3e84b399c015861</td></tr><tr><td>ufi</td><td>UFI</td><td>Unconstrained Facial Images: Database for Face Recognition under Real-world Conditions</td><td>Unconstrained Facial Images: Database for Face Recognition Under Real-World Conditions</td><td><a href="http://home.zcu.cz/~pkral/papers/kral_micai15.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=unconstrained facial images: database for face recognition under real-world conditions&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4b4106614c1d553365bad75d7866bff0de6056ed</td></tr><tr><td>d3dfacs</td><td>D3DFACS</td><td>A FACS Valid 3D Dynamic Action Unit database with Applications to 3D Dynamic Morphable Facial Modelling</td><td>A FACS valid 3D dynamic action unit database with applications to 3D dynamic morphable facial modeling</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a facs valid 3d dynamic action unit database with applications to 3d dynamic morphable facial modelling&sort=relevance" target="_blank">[s2]</a></td><td></td><td>070de852bc6eb275d7ca3a9cdde8f6be8795d1a3</td></tr><tr><td>dartmouth_children</td><td>Dartmouth Children</td><td>The Dartmouth Database of Children's Faces: Acquisition and validation of a new face stimulus set</td><td>The Dartmouth Database of Children’s Faces: Acquisition and Validation of a New Face Stimulus Set</td><td><a href="https://pdfs.semanticscholar.org/4e6e/e936eb50dd032f7138702fa39b7c18ee8907.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the dartmouth database of children's faces: acquisition and validation of a new face stimulus set&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4e6ee936eb50dd032f7138702fa39b7c18ee8907</td></tr><tr><td>data_61</td><td>Data61 Pedestrian</td><td>A Multi-Modal Graphical Model for Scene Analysis</td><td>A Multi-modal Graphical Model for Scene Analysis</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a multi-modal graphical model for scene analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>563c940054e4b456661762c1ab858e6f730c3159</td></tr><tr><td>deep_fashion</td><td>DeepFashion</td><td>DeepFashion: Powering Robust Clothes Recognition and Retrieval with Rich Annotations</td><td>DeepFashion: Powering Robust Clothes Recognition and Retrieval with Rich Annotations</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=deepfashion: powering robust clothes recognition and retrieval with rich annotations&sort=relevance" target="_blank">[s2]</a></td><td></td><td>18010284894ed0edcca74e5bf768ee2e15ef7841</td></tr><tr><td>deep_fashion</td><td>DeepFashion</td><td>Fashion Landmark Detection in the Wild</td><td>Fashion Landmark Detection in the Wild</td><td><a href="https://arxiv.org/pdf/1608.03049.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=fashion landmark detection in the wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4fefd1bc8dc4e0ab37ee3324ddfa43ad9d6a04a7</td></tr><tr><td>disfa</td><td>DISFA</td><td>1</td><td>DISFA: A Spontaneous Facial Action Intensity Database</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=1&sort=relevance" target="_blank">[s2]</a></td><td>University of Denver</td><td>5a5f0287484f0d480fed1ce585dbf729586f0edc</td></tr><tr><td>distance_nighttime</td><td>Long Distance Heterogeneous Face</td><td>Nighttime Face Recognition at Long Distance: Cross-distance and Cross-spectral Matching</td><td>Nighttime Face Recognition at Long Distance: Cross-Distance and Cross-Spectral Matching</td><td><a href="https://pdfs.semanticscholar.org/4156/b7e88f2e0ab0a7c095b9bab199ae2b23bd06.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=nighttime face recognition at long distance: cross-distance and cross-spectral matching&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4156b7e88f2e0ab0a7c095b9bab199ae2b23bd06</td></tr><tr><td>duke_mtmc</td><td>Duke MTMC</td><td>Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</td><td>Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</td><td><a href="https://arxiv.org/pdf/1609.01775.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=performance measures and a data set for multi-target, multi-camera tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>27a2fad58dd8727e280f97036e0d2bc55ef5424c</td></tr><tr><td>duke_mtmc</td><td>Duke MTMC</td><td>Improving Person Re-identification by Attribute and Identity Learning</td><td>Improving Person Re-identification by Attribute and Identity Learning</td><td><a href="https://arxiv.org/pdf/1703.07220.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=improving person re-identification by attribute and identity learning&sort=relevance" target="_blank">[s2]</a></td><td></td><td>7f23a4bb0c777dd72cca7665a5f370ac7980217e</td></tr><tr><td>duke_mtmc</td><td>Duke MTMC</td><td>Unlabeled Samples Generated by GAN Improve the Person Re-identification Baseline in Vitro</td><td>Unlabeled Samples Generated by GAN Improve the Person Re-identification Baseline in Vitro</td><td><a href="https://arxiv.org/pdf/1701.07717.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=unlabeled samples generated by gan improve the person re-identification baseline in vitro&sort=relevance" target="_blank">[s2]</a></td><td></td><td>15e1af79939dbf90790b03d8aa02477783fb1d0f</td></tr><tr><td>duke_mtmc</td><td>Duke MTMC</td><td>Tracking Social Groups Within and Across Cameras</td><td>Tracking Social Groups Within and Across Cameras</td><td><a href="https://users.cs.duke.edu/~tomasi/papers/ristani/ristaniTCAS16.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=tracking social groups within and across cameras&sort=relevance" target="_blank">[s2]</a></td><td>Duke University</td><td>9e644b1e33dd9367be167eb9d832174004840400</td></tr><tr><td>duke_mtmc</td><td>Duke MTMC</td><td>Tracking Multiple People Online and in Real Time</td><td>Tracking Multiple People Online and in Real Time</td><td><a href="https://pdfs.semanticscholar.org/64e0/690dd176a93de9d4328f6e31fc4afe1e7536.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=tracking multiple people online and in real time&sort=relevance" target="_blank">[s2]</a></td><td></td><td>64e0690dd176a93de9d4328f6e31fc4afe1e7536</td></tr><tr><td>emotio_net</td><td>EmotioNet Database</td><td>EmotioNet: An Accurate, Real-Time Algorithm for the Automatic Annotation of a Million Facial Expressions in the Wild</td><td>EmotioNet: An Accurate, Real-Time Algorithm for the Automatic Annotation of a Million Facial Expressions in the Wild</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=emotionet: an accurate, real-time algorithm for the automatic annotation of a million facial expressions in the wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>c900e0ad4c95948baaf0acd8449fde26f9b4952a</td></tr><tr><td>erce</td><td>ERCe</td><td>Video Synopsis by Heterogeneous Multi-source Correlation</td><td>Video Synopsis by Heterogeneous Multi-source Correlation</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=video synopsis by heterogeneous multi-source correlation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>b6c293f0420f7e945b5916ae44269fb53e139275</td></tr><tr><td>erce</td><td>ERCe</td><td>Learning from Multiple Sources for Video Summarisation</td><td>Learning from Multiple Sources for Video Summarisation</td><td><a href="https://arxiv.org/pdf/1501.03069.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning from multiple sources for video summarisation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>287ddcb3db5562235d83aee318f318b8d5e43fb1</td></tr><tr><td>eth_andreas_ess</td><td>ETHZ Pedestrian</td><td>Depth and Appearance for Mobile Scene Analysis</td><td>Depth and Appearance for Mobile Scene Analysis</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=depth and appearance for mobile scene analysis&sort=relevance" target="_blank">[s2]</a></td><td>ETH Zurich</td><td>13f06b08f371ba8b5d31c3e288b4deb61335b462</td></tr><tr><td>europersons</td><td>EuroCity Persons</td><td>The EuroCity Persons Dataset: A Novel Benchmark for Object Detection</td><td>The EuroCity Persons Dataset: A Novel Benchmark for Object Detection</td><td><a href="https://arxiv.org/pdf/1805.07193.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the eurocity persons dataset: a novel benchmark for object detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>72a155c987816ae81c858fddbd6beab656d86220</td></tr><tr><td>expw</td><td>ExpW</td><td>From Facial Expression Recognition to Interpersonal Relation Prediction</td><td>From Facial Expression Recognition to Interpersonal Relation Prediction</td><td><a href="https://arxiv.org/pdf/1609.06426.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=from facial expression recognition to interpersonal relation prediction&sort=relevance" target="_blank">[s2]</a></td><td></td><td>22f656d0f8426c84a33a267977f511f127bfd7f3</td></tr><tr><td>face_scrub</td><td>FaceScrub</td><td>A data-driven approach to cleaning large face datasets</td><td>A data-driven approach to cleaning large face datasets</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a data-driven approach to cleaning large face datasets&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0d3bb75852098b25d90f31d2f48fd0cb4944702b</td></tr><tr><td>face_tracer</td><td>FaceTracer</td><td>FaceTracer: A Search Engine for Large Collections of Images with Faces</td><td>FaceTracer: A Search Engine for Large Collections of Images with Faces</td><td><a href="https://pdfs.semanticscholar.org/73a8/1d311eedac8dea3ca24dc15b6990fa4a725e.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=facetracer: a search engine for large collections of images with faces&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4c170a0dcc8de75587dae21ca508dab2f9343974</td></tr><tr><td>face_tracer</td><td>FaceTracer</td><td>Face Swapping: Automatically Replacing Faces in Photographs</td><td>Face swapping: automatically replacing faces in photographs</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=face swapping: automatically replacing faces in photographs&sort=relevance" target="_blank">[s2]</a></td><td></td><td>670637d0303a863c1548d5b19f705860a23e285c</td></tr><tr><td>facebook_100</td><td>Facebook100</td><td>Scaling Up Biologically-Inspired Computer Vision: A Case Study in Unconstrained Face Recognition on Facebook</td><td>Scaling up biologically-inspired computer vision: A case study in unconstrained face recognition on facebook</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=scaling up biologically-inspired computer vision: a case study in unconstrained face recognition on facebook&sort=relevance" target="_blank">[s2]</a></td><td>Harvard University</td><td>9c23859ec7313f2e756a3e85575735e0c52249f4</td></tr><tr><td>faceplace</td><td>Face Place</td><td>Recognizing disguised faces</td><td>Recognizing disguised faces</td><td><a href="https://pdfs.semanticscholar.org/d936/7ceb0be378c3a9ddf7cb741c678c1a3c574c.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=recognizing disguised faces&sort=relevance" target="_blank">[s2]</a></td><td></td><td>25474c21613607f6bb7687a281d5f9d4ffa1f9f3</td></tr><tr><td>families_in_the_wild</td><td>FIW</td><td>Visual Kinship Recognition of Families in the Wild</td><td>Visual Kinship Recognition of Families in the Wild</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=visual kinship recognition of families in the wild&sort=relevance" target="_blank">[s2]</a></td><td>University of Massachusetts Dartmouth</td><td>dd65f71dac86e36eecbd3ed225d016c3336b4a13</td></tr><tr><td>fddb</td><td>FDDB</td><td>FDDB: A Benchmark for Face Detection in Unconstrained Settings</td><td>FDDB: A benchmark for face detection in unconstrained settings</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=fddb: a benchmark for face detection in unconstrained settings&sort=relevance" target="_blank">[s2]</a></td><td></td><td>75da1df4ed319926c544eefe17ec8d720feef8c0</td></tr><tr><td>fei</td><td>FEI</td><td>Captura e Alinhamento de Imagens: Um Banco de Faces Brasileiro</td><td>A new ranking method for principal components analysis and its application to face image analysis</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=captura e alinhamento de imagens: um banco de faces brasileiro&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8b56e33f33e582f3e473dba573a16b598ed9bcdc</td></tr><tr><td>feret</td><td>FERET</td><td>The FERET Verification Testing Protocol for Face Recognition Algorithms</td><td>The FERET Verification Testing Protocol for Face Recognition Algorithms</td><td><a href="https://pdfs.semanticscholar.org/8d2a/1c768fce6f71584dd993fb97e7b6419aaf60.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the feret verification testing protocol for face recognition algorithms&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0c4a139bb87c6743c7905b29a3cfec27a5130652</td></tr><tr><td>feret</td><td>FERET</td><td>The FERET Evaluation Methodology for Face-Recognition Algorithms</td><td>The FERET Evaluation Methodology for Face-Recognition Algorithms</td><td><a href="https://pdfs.semanticscholar.org/5099/7a5605c1f61e09e9a96789ed7495be6625aa.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the feret evaluation methodology for face-recognition algorithms&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0f0fcf041559703998abf310e56f8a2f90ee6f21</td></tr><tr><td>feret</td><td>FERET</td><td>FERET ( Face Recognition Technology ) Recognition Algorithm Development and Test Results</td><td>FERET ( Face Recognition Technology ) Recognition Algorithm Development and Test Results</td><td><a href="https://pdfs.semanticscholar.org/31de/9b3dd6106ce6eec9a35991b2b9083395fd0b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=feret ( face recognition technology ) recognition algorithm development and test results&sort=relevance" target="_blank">[s2]</a></td><td></td><td>31de9b3dd6106ce6eec9a35991b2b9083395fd0b</td></tr><tr><td>feret</td><td>FERET</td><td>The FERET database and evaluation procedure for face-recognition algorithms</td><td>The FERET database and evaluation procedure for face-recognition algorithms</td><td><a href="http://biometrics.nist.gov/cs_links/face/frvt/feret/FERET_Database_evaluation_procedure.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the feret database and evaluation procedure for face-recognition algorithms&sort=relevance" target="_blank">[s2]</a></td><td></td><td>dc8b25e35a3acb812beb499844734081722319b4</td></tr><tr><td>ferplus</td><td>FER+</td><td>Training Deep Networks for Facial Expression Recognition with Crowd-Sourced Label Distribution</td><td>Training deep networks for facial expression recognition with crowd-sourced label distribution</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=training deep networks for facial expression recognition with crowd-sourced label distribution&sort=relevance" target="_blank">[s2]</a></td><td></td><td>298cbc3dfbbb3a20af4eed97906650a4ea1c29e0</td></tr><tr><td>fia</td><td>CMU FiA</td><td>The CMU Face In Action (FIA) Database</td><td>The CMU Face In Action (FIA) Database</td><td><a href="https://pdfs.semanticscholar.org/4766/2d1a368daf70ba70ef2d59eb6209f98b675d.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the cmu face in action (fia) database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>47662d1a368daf70ba70ef2d59eb6209f98b675d</td></tr><tr><td>fiw_300</td><td>300-W</td><td>A semi-automatic methodology for facial landmark annotation</td><td>A Semi-automatic Methodology for Facial Landmark Annotation</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a semi-automatic methodology for facial landmark annotation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>013909077ad843eb6df7a3e8e290cfd5575999d2</td></tr><tr><td>fiw_300</td><td>300-W</td><td>300 Faces in-the-Wild Challenge: The first facial landmark localization Challenge</td><td>300 Faces in-the-Wild Challenge: The First Facial Landmark Localization Challenge</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=300 faces in-the-wild challenge: the first facial landmark localization challenge&sort=relevance" target="_blank">[s2]</a></td><td></td><td>044d9a8c61383312cdafbcc44b9d00d650b21c70</td></tr><tr><td>fiw_300</td><td>300-W</td><td>300 faces In-the-wild challenge: Database and results</td><td>300 Faces In-The-Wild Challenge: database and results</td><td><a href="http://ibug.doc.ic.ac.uk/media/uploads/documents/1-s2.0-s0262885616000147-main.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=300 faces in-the-wild challenge: database and results&sort=relevance" target="_blank">[s2]</a></td><td></td><td>e4754afaa15b1b53e70743880484b8d0736990ff</td></tr><tr><td>frav3d</td><td>FRAV3D</td><td>MULTIMODAL 2D, 2.5D & 3D FACE VERIFICATION</td><td>Multimodal 2D, 2.5D & 3D Face Verification</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multimodal 2d, 2.5d & 3d face verification&sort=relevance" target="_blank">[s2]</a></td><td>Universidad Rey Juan Carlos, Spain</td><td>2b926b3586399d028b46315d7d9fb9d879e4f79c</td></tr><tr><td>frgc</td><td>FRGC</td><td>Overview of the Face Recognition Grand Challenge</td><td>Overview of the face recognition grand challenge</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=overview of the face recognition grand challenge&sort=relevance" target="_blank">[s2]</a></td><td>NIST</td><td>18ae7c9a4bbc832b8b14bc4122070d7939f5e00e</td></tr><tr><td>gallagher</td><td>Gallagher</td><td>Clothing Cosegmentation for Recognizing People</td><td>Clothing cosegmentation for recognizing people</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clothing cosegmentation for recognizing people&sort=relevance" target="_blank">[s2]</a></td><td>Carnegie Mellon University</td><td>22ad2c8c0f4d6aa4328b38d894b814ec22579761</td></tr><tr><td>geofaces</td><td>GeoFaces</td><td>FACE2GPS: Estimating geographic location from facial features</td><td>Exploring the geo-dependence of human face appearance</td><td><a href="http://cs.uky.edu/~jacobs/papers/islam2014faces.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=face2gps: estimating geographic location from facial features&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2cd7821fcf5fae53a185624f7eeda007434ae037</td></tr><tr><td>geofaces</td><td>GeoFaces</td><td>Large-scale geo-facial image analysis</td><td>Large-scale geo-facial image analysis</td><td><a href="https://pdfs.semanticscholar.org/3ede/3ed28329bf48fbd06438a69c4f855bef003f.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=large-scale geo-facial image analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4af89578ac237278be310f7660a408b03f12d603</td></tr><tr><td>geofaces</td><td>GeoFaces</td><td>Exploring the Geo-Dependence of Human Face Appearance</td><td>Exploring the geo-dependence of human face appearance</td><td><a href="http://cs.uky.edu/~jacobs/papers/islam2014faces.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=exploring the geo-dependence of human face appearance&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2cd7821fcf5fae53a185624f7eeda007434ae037</td></tr><tr><td>geofaces</td><td>GeoFaces</td><td>GeoFaceExplorer: Exploring the Geo-Dependence of Facial Attributes</td><td>GeoFaceExplorer: exploring the geo-dependence of facial attributes</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=geofaceexplorer: exploring the geo-dependence of facial attributes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>17b46e2dad927836c689d6787ddb3387c6159ece</td></tr><tr><td>georgia_tech_face_database</td><td>Georgia Tech Face</td><td>Maximum likelihood training of the embedded HMM for face detection and recognition</td><td>Maximum Likelihood Training of the Embedded HMM for Face Detection and Recognition</td><td><a href="http://www.researchgate.net/profile/Monson_Hayes/publication/221124512_Maximum_Likelihood_Training_of_the_Embedded_HMM_for_Face_Detection_and_Recognition/links/0deec53509be9d6f55000000.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=maximum likelihood training of the embedded hmm for face detection and recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3dc3f0b64ef80f573e3a5f96e456e52ee980b877</td></tr><tr><td>gfw</td><td>Grouping Face in the Wild</td><td>Merge or Not? Learning to Group Faces via Imitation Learning</td><td>Merge or Not? Learning to Group Faces via Imitation Learning</td><td><a href="https://arxiv.org/pdf/1707.03986.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=merge or not? learning to group faces via imitation learning&sort=relevance" target="_blank">[s2]</a></td><td></td><td>e58dd160a76349d46f881bd6ddbc2921f08d1050</td></tr><tr><td>graz</td><td>Graz Pedestrian</td><td>Weak Hypotheses and Boosting for Generic Object Detection and Recognition</td><td>Weak Hypotheses and Boosting for Generic Object Detection and Recognition</td><td><a href="https://pdfs.semanticscholar.org/0c91/808994a250d7be332400a534a9291ca3b60e.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=weak hypotheses and boosting for generic object detection and recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0c91808994a250d7be332400a534a9291ca3b60e</td></tr><tr><td>graz</td><td>Graz Pedestrian</td><td>Object Recognition Using Segmentation for Feature Detection</td><td>Object recognition using segmentation for feature detection</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=object recognition using segmentation for feature detection&sort=relevance" target="_blank">[s2]</a></td><td>Inst. of Comput. Sci., Univ. of Leoben, Austria</td><td>12ad3b5bbbf407f8e54ea692c07633d1a867c566</td></tr><tr><td>graz</td><td>Graz Pedestrian</td><td>Generic Object Recognition with Boosting</td><td>Generic object recognition with boosting</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=generic object recognition with boosting&sort=relevance" target="_blank">[s2]</a></td><td>TU Graz</td><td>2eed184680edcdec8a3b605ad1a3ba8e8f7cc2e9</td></tr><tr><td>h3d</td><td>H3D</td><td>Poselets: Body Part Detectors Trained Using 3D Human Pose Annotations</td><td>Poselets: Body part detectors trained using 3D human pose annotations</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=poselets: body part detectors trained using 3d human pose annotations&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2830fb5282de23d7784b4b4bc37065d27839a412</td></tr><tr><td>hda_plus</td><td>HDA+</td><td>The HDA+ data set for research on fully automated re-identification systems</td><td>The HDA+ Data Set for Research on Fully Automated Re-identification Systems</td><td><a href="https://pdfs.semanticscholar.org/8f02/ec0be21461fbcedf51d864f944cfc42c875f.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the hda+ data set for research on fully automated re-identification systems&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8f02ec0be21461fbcedf51d864f944cfc42c875f</td></tr><tr><td>hda_plus</td><td>HDA+</td><td>A Multi-camera video data set for research on High-Definition surveillance</td><td>HDA dataset-DRAFT 1 A Multi-camera video data set for research on High-Definition surveillance</td><td><a href="https://pdfs.semanticscholar.org/bd88/bb2e4f351352d88ee7375af834360e223498.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a multi-camera video data set for research on high-definition surveillance&sort=relevance" target="_blank">[s2]</a></td><td></td><td>bd88bb2e4f351352d88ee7375af834360e223498</td></tr><tr><td>helen</td><td>Helen</td><td>Interactive Facial Feature Localization</td><td>Interactive Facial Feature Localization</td><td><a href="https://pdfs.semanticscholar.org/95f1/2d27c3b4914e0668a268360948bce92f7db3.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=interactive facial feature localization&sort=relevance" target="_blank">[s2]</a></td><td></td><td>95f12d27c3b4914e0668a268360948bce92f7db3</td></tr><tr><td>hi4d_adsip</td><td>Hi4D-ADSIP</td><td>Hi4D-ADSIP 3-D dynamic facial articulation database</td><td>Hi4D-ADSIP 3-D dynamic facial articulation database</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=hi4d-adsip 3-d dynamic facial articulation database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a8d0b149c2eadaa02204d3e4356fbc8eccf3b315</td></tr><tr><td>hipsterwars</td><td>Hipsterwars</td><td>Hipster Wars: Discovering Elements of Fashion Styles</td><td>Hipster Wars: Discovering Elements of Fashion Styles</td><td><a href="https://pdfs.semanticscholar.org/04c2/cda00e5536f4b1508cbd80041e9552880e67.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=hipster wars: discovering elements of fashion styles&sort=relevance" target="_blank">[s2]</a></td><td></td><td>04c2cda00e5536f4b1508cbd80041e9552880e67</td></tr><tr><td>hollywood_headset</td><td>HollywoodHeads</td><td>Context-aware CNNs for person head detection</td><td>Context-Aware CNNs for Person Head Detection</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=context-aware cnns for person head detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td></tr><tr><td>hrt_transgender</td><td>HRT Transgender</td><td>Is the Eye Region More Reliable Than the Face? A Preliminary Study of Face-based Recognition on a Transgender Dataset</td><td>Is the eye region more reliable than the face? A preliminary study of face-based recognition on a transgender dataset</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=is the eye region more reliable than the face? a preliminary study of face-based recognition on a transgender dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>137aa2f891d474fce1e7a1d1e9b3aefe21e22b34</td></tr><tr><td>hrt_transgender</td><td>HRT Transgender</td><td>Investigating the Periocular-Based Face Recognition Across Gender Transformation</td><td>Investigating the Periocular-Based Face Recognition Across Gender Transformation</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=investigating the periocular-based face recognition across gender transformation&sort=relevance" target="_blank">[s2]</a></td><td>University of North Carolina at Wilmington</td><td>2f43b614607163abf41dfe5d17ef6749a1b61304</td></tr><tr><td>ibm_dif</td><td>IBM Diversity in Faces</td><td>Diversity in Faces</td><td>Facial Coding Scheme Reference 1 Craniofacial Distances</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=diversity in faces&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0ab7cff2ccda7269b73ff6efd9d37e1318f7db25</td></tr><tr><td>ifad</td><td>IFAD</td><td>Indian Face Age Database: A Database for Face Recognition with Age Variation</td><td>Indian Face Age Database: A Database for Face Recognition with Age Variation</td><td><a href="https://pdfs.semanticscholar.org/025e/4cf3fd3fdeced91e9373b56ee14af7ca432c.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=indian face age database: a database for face recognition with age variation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>55c40cbcf49a0225e72d911d762c27bb1c2d14aa</td></tr><tr><td>ifdb</td><td>IFDB</td><td>Iranian Face Database and Evaluation with a New Detection Algorithm</td><td>Iranian Face Database and Evaluation with a New Detection Algorithm</td><td><a href="https://pdfs.semanticscholar.org/066d/71fcd997033dce4ca58df924397dfe0b5fd1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=iranian face database and evaluation with a new detection algorithm&sort=relevance" target="_blank">[s2]</a></td><td></td><td>066d71fcd997033dce4ca58df924397dfe0b5fd1</td></tr><tr><td>ifdb</td><td>IFDB</td><td>Iranian Face Database with age, pose and expression</td><td>Iranian Face Database with age, pose and expression</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=iranian face database with age, pose and expression&sort=relevance" target="_blank">[s2]</a></td><td>Islamic Azad University</td><td>b71d1aa90dcbe3638888725314c0d56640c1fef1</td></tr><tr><td>iit_dehli_ear</td><td>IIT Dehli Ear</td><td>Automated human identification using ear imaging</td><td>Automated Human Identification Using Ear Imaging</td><td><a href="https://pdfs.semanticscholar.org/faf4/0ce28857aedf183e193486f5b4b0a8c478a2.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=automated human identification using ear imaging&sort=relevance" target="_blank">[s2]</a></td><td></td><td>faf40ce28857aedf183e193486f5b4b0a8c478a2</td></tr><tr><td>ijb_b</td><td>IJB-B</td><td>IARPA Janus Benchmark-B Face Dataset</td><td>IARPA Janus Benchmark-B Face Dataset</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=iarpa janus benchmark-b face dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0cb2dd5f178e3a297a0c33068961018659d0f443</td></tr><tr><td>ijb_a</td><td>IJB-A</td><td>Pushing the Frontiers of Unconstrained Face Detection and Recognition: IARPA Janus Benchmark A</td><td>Pushing the frontiers of unconstrained face detection and recognition: IARPA Janus Benchmark A</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pushing the frontiers of unconstrained face detection and recognition: iarpa janus benchmark a&sort=relevance" target="_blank">[s2]</a></td><td></td><td>140c95e53c619eac594d70f6369f518adfea12ef</td></tr><tr><td>ijb_c</td><td>IJB-C</td><td>IARPA Janus Benchmark C</td><td>IARPA Janus Benchmark - C: Face Dataset and Protocol</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=iarpa janus benchmark c&sort=relevance" target="_blank">[s2]</a></td><td></td><td>57178b36c21fd7f4529ac6748614bb3374714e91</td></tr><tr><td>ilids_mcts</td><td>i-LIDS Multiple-Camera</td><td>Imagery Library for Intelligent Detection Systems: The i-LIDS User Guide</td><td>Imagery Library for Intelligent Detection Systems (i-LIDS); A Standard for Testing Video Based Detection Systems</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=imagery library for intelligent detection systems: the i-lids user guide&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0297448f3ed948e136bb06ceff10eccb34e5bb77</td></tr><tr><td>ilids_mcts_vid</td><td>iLIDS-VID</td><td>Person Re-Identi cation by Video Ranking</td><td>Person Re-identification by Video Ranking</td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person re-identi cation by video ranking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>98bb029afe2a1239c3fdab517323066f0957b81b</td></tr><tr><td>images_of_groups</td><td>Images of Groups</td><td>Understanding Groups of Images of People</td><td>Understanding images of groups of people</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=understanding groups of images of people&sort=relevance" target="_blank">[s2]</a></td><td>Carnegie Mellon University</td><td>21d9d0deed16f0ad62a4865e9acf0686f4f15492</td></tr><tr><td>imdb_face</td><td>IMDb Face</td><td>The Devil of Face Recognition is in the Noise</td><td>The Devil of Face Recognition is in the Noise</td><td><a href="https://arxiv.org/pdf/1807.11649.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the devil of face recognition is in the noise&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9e31e77f9543ab42474ba4e9330676e18c242e72</td></tr><tr><td>imdb_wiki</td><td>IMDB</td><td>Deep expectation of real and apparent age from a single image without facial landmarks</td><td>Deep Expectation of Real and Apparent Age from a Single Image Without Facial Landmarks</td><td><a href="http://www.vision.ee.ethz.ch/en/publications/papers/articles/eth_biwi_01299.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=deep expectation of real and apparent age from a single image without facial landmarks&sort=relevance" target="_blank">[s2]</a></td><td></td><td>10195a163ab6348eef37213a46f60a3d87f289c5</td></tr><tr><td>imdb_wiki</td><td>IMDB</td><td>DEX: Deep EXpectation of apparent age from a single image</td><td>DEX: Deep EXpectation of Apparent Age from a Single Image</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=dex: deep expectation of apparent age from a single image&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8355d095d3534ef511a9af68a3b2893339e3f96b</td></tr><tr><td>imfdb</td><td>IMFDB</td><td>Indian Movie Face Database: A Benchmark for Face Recognition Under Wide Variations</td><td>Indian Movie Face Database: A benchmark for face recognition under wide variations</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=indian movie face database: a benchmark for face recognition under wide variations&sort=relevance" target="_blank">[s2]</a></td><td>BVBCET, Hubli, India</td><td>ca3e88d87e1344d076c964ea89d91a75c417f5ee</td></tr><tr><td>immediacy</td><td>Immediacy</td><td>Multi-task Recurrent Neural Network for Immediacy Prediction</td><td>Multi-task Recurrent Neural Network for Immediacy Prediction</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-task recurrent neural network for immediacy prediction&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1e3df3ca8feab0b36fd293fe689f93bb2aaac591</td></tr><tr><td>imsitu</td><td>imSitu</td><td>Situation Recognition: Visual Semantic Role Labeling for Image Understanding</td><td>Situation Recognition: Visual Semantic Role Labeling for Image Understanding</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=situation recognition: visual semantic role labeling for image understanding&sort=relevance" target="_blank">[s2]</a></td><td></td><td>51eba481dac6b229a7490f650dff7b17ce05df73</td></tr><tr><td>inria_person</td><td>INRIA Pedestrian</td><td>Histograms of Oriented Gradients for Human Detection</td><td>Histograms of oriented gradients for human detection</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=histograms of oriented gradients for human detection&sort=relevance" target="_blank">[s2]</a></td><td>INRIA Rhone-Alps, Montbonnot, France</td><td>10d6b12fa07c7c8d6c8c3f42c7f1c061c131d4c5</td></tr><tr><td>jaffe</td><td>JAFFE</td><td>Coding Facial Expressions with Gabor Wavelets</td><td>Coding Facial Expressions with Gabor Wavelets</td><td><a href="http://physics.lbl.gov/patrecog/images/Facerecog_gabor.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=coding facial expressions with gabor wavelets&sort=relevance" target="_blank">[s2]</a></td><td></td><td>45c31cde87258414f33412b3b12fc5bec7cb3ba9</td></tr><tr><td>jiku_mobile</td><td>Jiku Mobile Video Dataset</td><td>The Jiku Mobile Video Dataset</td><td>The jiku mobile video dataset</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the jiku mobile video dataset&sort=relevance" target="_blank">[s2]</a></td><td>National University of Singapore</td><td>d178cde92ab3dc0dd2ebee5a76a33d556c39448b</td></tr><tr><td>jpl_pose</td><td>JPL-Interaction dataset</td><td>First-Person Activity Recognition: What Are They Doing to Me?</td><td>First-Person Activity Recognition: What Are They Doing to Me?</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=first-person activity recognition: what are they doing to me?&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1aad2da473888cb7ebc1bfaa15bfa0f1502ce005</td></tr><tr><td>kin_face</td><td>UB KinFace</td><td>Understanding Kin Relationships in a Photo</td><td>Understanding Kin Relationships in a Photo</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=understanding kin relationships in a photo&sort=relevance" target="_blank">[s2]</a></td><td></td><td>08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7</td></tr><tr><td>kin_face</td><td>UB KinFace</td><td>Genealogical Face Recognition based on UB KinFace Database</td><td>Genealogical face recognition based on UB KinFace database</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=genealogical face recognition based on ub kinface database&sort=relevance" target="_blank">[s2]</a></td><td>SUNY Buffalo</td><td>2eb84aaba316b095d4bb51da1a3e4365bbf9ab1d</td></tr><tr><td>kin_face</td><td>UB KinFace</td><td>Kinship Verification through Transfer Learning</td><td>Kinship Verification through Transfer Learning</td><td><a href="https://pdfs.semanticscholar.org/4793/f11fbca4a7dba898b9fff68f70d868e2497c.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=kinship verification through transfer learning&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4793f11fbca4a7dba898b9fff68f70d868e2497c</td></tr><tr><td>kinectface</td><td>KinectFaceDB</td><td>KinectFaceDB: A Kinect Database for Face Recognition</td><td>KinectFaceDB: A Kinect Database for Face Recognition</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=kinectfacedb: a kinect database for face recognition&sort=relevance" target="_blank">[s2]</a></td><td>University of North Carolina at Chapel Hill</td><td>0b440695c822a8e35184fb2f60dcdaa8a6de84ae</td></tr><tr><td>kitti</td><td>KITTI</td><td>Vision meets Robotics: The KITTI Dataset</td><td>Vision meets robotics: The KITTI dataset</td><td><a href="https://pdfs.semanticscholar.org/026e/3363b7f76b51cc711886597a44d5f1fd1de2.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=vision meets robotics: the kitti dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>026e3363b7f76b51cc711886597a44d5f1fd1de2</td></tr><tr><td>lag</td><td>LAG</td><td>Large Age-Gap Face Verification by Feature Injection in Deep Networks</td><td>Large age-gap face verification by feature injection in deep networks</td><td><a href="https://arxiv.org/pdf/1602.06149.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=large age-gap face verification by feature injection in deep networks&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0d2dd4fc016cb6a517d8fb43a7cc3ff62964832e</td></tr><tr><td>laofiw</td><td>LAOFIW</td><td>Turning a Blind Eye: Explicit Removal of Biases and Variation from Deep Neural Network Embeddings</td><td>Turning a Blind Eye: Explicit Removal of Biases and Variation from Deep Neural Network Embeddings</td><td><a href="https://arxiv.org/pdf/1809.02169.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=turning a blind eye: explicit removal of biases and variation from deep neural network embeddings&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4eab317b5ac436a949849ed286baa3de2a541eef</td></tr><tr><td>large_scale_person_search</td><td>Large Scale Person Search</td><td>End-to-End Deep Learning for Person Search</td><td>End-to-End Deep Learning for Person Search</td><td><a href="https://pdfs.semanticscholar.org/2161/f6b7ee3c0acc81603b01dc0df689683577b9.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=end-to-end deep learning for person search&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2161f6b7ee3c0acc81603b01dc0df689683577b9</td></tr><tr><td>leeds_sports_pose</td><td>Leeds Sports Pose</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td><a href="http://www.bmva.org/bmvc/2010/conference/paper12/abstract12.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clustered pose and nonlinear appearance models for human pose estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td></tr><tr><td>leeds_sports_pose_extended</td><td>Leeds Sports Pose Extended</td><td>Learning Effective Human Pose Estimation from Inaccurate Annotation</td><td>Learning effective human pose estimation from inaccurate annotation</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning effective human pose estimation from inaccurate annotation&sort=relevance" target="_blank">[s2]</a></td><td>University of Leeds</td><td>4e4746094bf60ee83e40d8597a6191e463b57f76</td></tr><tr><td>lfpw</td><td>LFWP</td><td>Localizing Parts of Faces Using a Consensus of Exemplars</td><td>Localizing Parts of Faces Using a Consensus of Exemplars</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=localizing parts of faces using a consensus of exemplars&sort=relevance" target="_blank">[s2]</a></td><td></td><td>140438a77a771a8fb656b39a78ff488066eb6b50</td></tr><tr><td>lfw</td><td>LFW</td><td>Labeled Faces in the Wild: Updates and New Reporting Procedures</td><td>Labeled Faces in the Wild : Updates and New Reporting Procedures</td><td><a href="https://pdfs.semanticscholar.org/2d34/82dcff69c7417c7b933f22de606a0e8e42d4.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=labeled faces in the wild: updates and new reporting procedures&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2d3482dcff69c7417c7b933f22de606a0e8e42d4</td></tr><tr><td>lfw</td><td>LFW</td><td>Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments</td><td>Labeled Faces in the Wild: A Database forStudying Face Recognition in Unconstrained Environments</td><td><a href="https://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=labeled faces in the wild: a database for studying face recognition in unconstrained environments&sort=relevance" target="_blank">[s2]</a></td><td></td><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td></tr><tr><td>lfw</td><td>LFW</td><td>Labeled Faces in the Wild: A Survey</td><td>Labeled Faces in the Wild: A Survey</td><td><a href="https://pdfs.semanticscholar.org/7de6/e81d775e9cd7becbfd1bd685f4e2a5eebb22.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=labeled faces in the wild: a survey&sort=relevance" target="_blank">[s2]</a></td><td></td><td>7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22</td></tr><tr><td>lfw</td><td>LFW</td><td>Effective Unconstrained Face Recognition by Combining Multiple Descriptors and Learned Background Statistics</td><td>Effective Unconstrained Face Recognition by Combining Multiple Descriptors and Learned Background Statistics</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=effective unconstrained face recognition by combining multiple descriptors and learned background statistics&sort=relevance" target="_blank">[s2]</a></td><td></td><td>133f01aec1534604d184d56de866a4bd531dac87</td></tr><tr><td>m2vts</td><td>m2vts</td><td>The M2VTS Multimodal Face Database (Release 1.00)</td><td>The M2VTS Multimodal Face Database (Release 1.00)</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the m2vts multimodal face database (release 1.00)&sort=relevance" target="_blank">[s2]</a></td><td>Laboratoire de Télécommunications et Télédétection, UCL, Louvain-La-Neuve, Belgium</td><td>8be57cdad86fdf8c8290df4ca3149592f3c46dd3</td></tr><tr><td>m2vtsdb_extended</td><td>xm2vtsdb</td><td>XM2VTSDB: The Extended M2VTS Database</td><td>XM2VTSDB : The extended M2VTS database</td><td><a href="https://pdfs.semanticscholar.org/b626/28ac06bbac998a3ab825324a41a11bc3a988.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=xm2vtsdb: the extended m2vts database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>b62628ac06bbac998a3ab825324a41a11bc3a988</td></tr><tr><td>mafa</td><td>MAsked FAces</td><td>Detecting Masked Faces in the Wild with LLE-CNNs</td><td>Detecting Masked Faces in the Wild with LLE-CNNs</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=detecting masked faces in the wild with lle-cnns&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9cc8cf0c7d7fa7607659921b6ff657e17e135ecc</td></tr><tr><td>mafl</td><td>MAFL</td><td>Facial Landmark Detection by Deep Multi-task Learning</td><td>Facial Landmark Detection by Deep Multi-task Learning</td><td><a href="https://pdfs.semanticscholar.org/fcd7/1c18192928a2e0b264edd4d919ab2f8f652a.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=facial landmark detection by deep multi-task learning&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8a3c5507237957d013a0fe0f082cab7f757af6ee</td></tr><tr><td>mafl</td><td>MAFL</td><td>Learning Deep Representation for Face Alignment with Auxiliary Attributes</td><td>Learning Deep Representation for Face Alignment with Auxiliary Attributes</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning deep representation for face alignment with auxiliary attributes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td></tr><tr><td>malf</td><td>MALF</td><td>Fine-grained Evaluation on Face Detection in the Wild.</td><td>Fine-grained evaluation on face detection in the wild</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=fine-grained evaluation on face detection in the wild.&sort=relevance" target="_blank">[s2]</a></td><td></td><td>45e616093a92e5f1e61a7c6037d5f637aa8964af</td></tr><tr><td>mapillary</td><td>Mapillary</td><td>The Mapillary Vistas Dataset for Semantic Understanding of Street Scenes</td><td>The Mapillary Vistas Dataset for Semantic Understanding of Street Scenes</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the mapillary vistas dataset for semantic understanding of street scenes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>79828e6e9f137a583082b8b5a9dfce0c301989b8</td></tr><tr><td>market_1501</td><td>Market 1501</td><td>Improving Person Re-identification by Attribute and Identity Learning</td><td>Improving Person Re-identification by Attribute and Identity Learning</td><td><a href="https://arxiv.org/pdf/1703.07220.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=improving person re-identification by attribute and identity learning&sort=relevance" target="_blank">[s2]</a></td><td></td><td>7f23a4bb0c777dd72cca7665a5f370ac7980217e</td></tr><tr><td>market_1501</td><td>Market 1501</td><td>Scalable Person Re-identification: A Benchmark</td><td>Scalable Person Re-identification: A Benchmark</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=scalable person re-identification: a benchmark&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4308bd8c28e37e2ed9a3fcfe74d5436cce34b410</td></tr><tr><td>market_1501</td><td>Market 1501</td><td>Orientation Driven Bag of Appearances for Person Re-identification</td><td>Orientation Driven Bag of Appearances for Person Re-identification</td><td><a href="https://arxiv.org/pdf/1605.02464.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=orientation driven bag of appearances for person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a7fe834a0af614ce6b50dc093132b031dd9a856b</td></tr><tr><td>mars</td><td>MARS</td><td>MARS: A Video Benchmark for Large-Scale Person Re-identification</td><td>MARS: A Video Benchmark for Large-Scale Person Re-Identification</td><td><a href="https://pdfs.semanticscholar.org/c038/7e788a52f10bf35d4d50659cfa515d89fbec.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=mars: a video benchmark for large-scale person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>c0387e788a52f10bf35d4d50659cfa515d89fbec</td></tr><tr><td>mcgill</td><td>McGill Real World</td><td>Hierarchical Temporal Graphical Model for Head Pose Estimation and Subsequent Attribute Classification in Real-World Videos</td><td>Hierarchical temporal graphical model for head pose estimation and subsequent attribute classification in real-world videos</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=hierarchical temporal graphical model for head pose estimation and subsequent attribute classification in real-world videos&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2d45cfd838016a6e39f6b766ffe85acd649440c7</td></tr><tr><td>mcgill</td><td>McGill Real World</td><td>Robust Semi-automatic Head Pose Labeling for Real-World Face Video Sequences</td><td>Robust semi-automatic head pose labeling for real-world face video sequences</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=robust semi-automatic head pose labeling for real-world face video sequences&sort=relevance" target="_blank">[s2]</a></td><td>McGill University</td><td>c570d1247e337f91e555c3be0e8c8a5aba539d9f</td></tr><tr><td>megaage</td><td>MegaAge</td><td>Quantifying Facial Age by Posterior of Age Comparisons</td><td>Quantifying Facial Age by Posterior of Age Comparisons</td><td><a href="https://arxiv.org/pdf/1708.09687.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=quantifying facial age by posterior of age comparisons&sort=relevance" target="_blank">[s2]</a></td><td></td><td>c72a2ea819df9b0e8cd267eebcc6528b8741e03d</td></tr><tr><td>megaface</td><td>MegaFace</td><td>Level Playing Field for Million Scale Face Recognition</td><td>Level Playing Field for Million Scale Face Recognition</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=level playing field for million scale face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>28d4e027c7e90b51b7d8908fce68128d1964668a</td></tr><tr><td>megaface</td><td>MegaFace</td><td>The MegaFace Benchmark: 1 Million Faces for Recognition at Scale</td><td>The MegaFace Benchmark: 1 Million Faces for Recognition at Scale</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the megaface benchmark: 1 million faces for recognition at scale&sort=relevance" target="_blank">[s2]</a></td><td></td><td>96e0cfcd81cdeb8282e29ef9ec9962b125f379b0</td></tr><tr><td>mifs</td><td>MIFS</td><td>Spoofing Faces Using Makeup: An Investigative Study</td><td>Spoofing faces using makeup: An investigative study</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=spoofing faces using makeup: an investigative study&sort=relevance" target="_blank">[s2]</a></td><td>INRIA Méditerranée</td><td>23e824d1dfc33f3780dd18076284f07bd99f1c43</td></tr><tr><td>mit_cbcl</td><td>MIT CBCL</td><td>Component-based Face Recognition with 3D Morphable Models</td><td>Component-Based Face Recognition with 3D Morphable Models</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=component-based face recognition with 3d morphable models&sort=relevance" target="_blank">[s2]</a></td><td></td><td>079a0a3bf5200994e1f972b1b9197bf2f90e87d4</td></tr><tr><td>miw</td><td>MIW</td><td>Automatic Facial Makeup Detection with Application in Face Recognition</td><td>Automatic facial makeup detection with application in face recognition</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=automatic facial makeup detection with application in face recognition&sort=relevance" target="_blank">[s2]</a></td><td>West Virginia University</td><td>fcc6fe6007c322641796cb8792718641856a22a7</td></tr><tr><td>mmi_facial_expression</td><td>MMI Facial Expression Dataset</td><td>WEB-BASED DATABASE FOR FACIAL EXPRESSION ANALYSIS</td><td>Web-based database for facial expression analysis</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=web-based database for facial expression analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2a75f34663a60ab1b04a0049ed1d14335129e908</td></tr><tr><td>moments_in_time</td><td>Moments in Time</td><td>Moments in Time Dataset: one million videos for event understanding</td><td>Moments in Time Dataset: one million videos for event understanding</td><td><a href="https://arxiv.org/pdf/1801.03150.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=moments in time dataset: one million videos for event understanding&sort=relevance" target="_blank">[s2]</a></td><td></td><td>41976ebc8ab76d9a6861487c97cc7fcbe3b6015f</td></tr><tr><td>morph</td><td>MORPH Commercial</td><td>MORPH: A Longitudinal Image Database of Normal Adult Age-Progression</td><td>MORPH: a longitudinal image database of normal adult age-progression</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=morph: a longitudinal image database of normal adult age-progression&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9055b155cbabdce3b98e16e5ac9c0edf00f9552f</td></tr><tr><td>morph_nc</td><td>MORPH Non-Commercial</td><td>MORPH: A Longitudinal Image Database of Normal Adult Age-Progression</td><td>MORPH: a longitudinal image database of normal adult age-progression</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=morph: a longitudinal image database of normal adult age-progression&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9055b155cbabdce3b98e16e5ac9c0edf00f9552f</td></tr><tr><td>mot</td><td>MOT</td><td>Evaluating Multiple Object Tracking Performance: The CLEAR MOT Metrics</td><td>Evaluating Multiple Object Tracking Performance: The CLEAR MOT Metrics</td><td><a href="https://cvhci.anthropomatik.kit.edu/images/stories/msmmi/papers/eurasip2008.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=evaluating multiple object tracking performance: the clear mot metrics&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2258e01865367018ed6f4262c880df85b94959f8</td></tr><tr><td>mot</td><td>MOT</td><td>Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</td><td>Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</td><td><a href="https://arxiv.org/pdf/1609.01775.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=performance measures and a data set for multi-target, multi-camera tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>27a2fad58dd8727e280f97036e0d2bc55ef5424c</td></tr><tr><td>mot</td><td>MOT</td><td>Learning to associate: HybridBoosted multi-target tracker for crowded scene</td><td>Learning to associate: HybridBoosted multi-target tracker for crowded scene</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning to associate: hybridboosted multi-target tracker for crowded scene&sort=relevance" target="_blank">[s2]</a></td><td>University of Southern California</td><td>5981e6479c3fd4e31644db35d236bfb84ae46514</td></tr><tr><td>mpi_large</td><td>Large MPI Facial Expression</td><td>The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</td><td>The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</td><td><a href="https://pdfs.semanticscholar.org/ea05/0801199f98a1c7c1df6769f23f658299a3ae.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the mpi facial expression database — a validated database of emotional and conversational facial expressions&sort=relevance" target="_blank">[s2]</a></td><td></td><td>ea050801199f98a1c7c1df6769f23f658299a3ae</td></tr><tr><td>mpi_small</td><td>Small MPI Facial Expression</td><td>The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</td><td>The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</td><td><a href="https://pdfs.semanticscholar.org/ea05/0801199f98a1c7c1df6769f23f658299a3ae.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the mpi facial expression database — a validated database of emotional and conversational facial expressions&sort=relevance" target="_blank">[s2]</a></td><td></td><td>ea050801199f98a1c7c1df6769f23f658299a3ae</td></tr><tr><td>mpii_gaze</td><td>MPIIGaze</td><td>Appearance-based Gaze Estimation in the Wild</td><td>Appearance-based gaze estimation in the wild</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=appearance-based gaze estimation in the wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0df0d1adea39a5bef318b74faa37de7f3e00b452</td></tr><tr><td>mpii_human_pose</td><td>MPII Human Pose</td><td>2D Human Pose Estimation: New Benchmark and State of the Art Analysis</td><td>2D Human Pose Estimation: New Benchmark and State of the Art Analysis</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=2d human pose estimation: new benchmark and state of the art analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3325860c0c82a93b2eac654f5324dd6a776f609e</td></tr><tr><td>mr2</td><td>MR2</td><td>The MR2: A multi-racial mega-resolution database of facial stimuli</td><td>The MR2: A multi-racial, mega-resolution database of facial stimuli.</td><td><a href="http://www.mpmlab.org/The%20MR2%20face%20database.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the mr2: a multi-racial mega-resolution database of facial stimuli&sort=relevance" target="_blank">[s2]</a></td><td></td><td>578d4ad74818086bb64f182f72e2c8bd31e3d426</td></tr><tr><td>mrp_drone</td><td>MRP Drone</td><td>Investigating Open-World Person Re-identification Using a Drone</td><td>Investigating Open-World Person Re-identification Using a Drone</td><td><a href="https://pdfs.semanticscholar.org/ad01/687649d95cd5b56d7399a9603c4b8e2217d7.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=investigating open-world person re-identification using a drone&sort=relevance" target="_blank">[s2]</a></td><td></td><td>ad01687649d95cd5b56d7399a9603c4b8e2217d7</td></tr><tr><td>msceleb</td><td>MsCeleb</td><td>MS-Celeb-1M: A Dataset and Benchmark for Large-Scale Face Recognition</td><td>MS-Celeb-1M: A Dataset and Benchmark for Large-Scale Face Recognition</td><td><a href="https://arxiv.org/pdf/1607.08221.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=ms-celeb-1m: a dataset and benchmark for large-scale face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>291265db88023e92bb8c8e6390438e5da148e8f5</td></tr><tr><td>msmt_17</td><td>MSMT17</td><td>Person Transfer GAN to Bridge Domain Gap for Person Re-Identification</td><td>Person Transfer GAN to Bridge Domain Gap for Person Re-identification</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person transfer gan to bridge domain gap for person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a0cc5f73a37723a6dd465924143f1cb4976d0169</td></tr><tr><td>mtfl</td><td>MTFL</td><td>Facial Landmark Detection by Deep Multi-task Learning</td><td>Facial Landmark Detection by Deep Multi-task Learning</td><td><a href="https://pdfs.semanticscholar.org/fcd7/1c18192928a2e0b264edd4d919ab2f8f652a.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=facial landmark detection by deep multi-task learning&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8a3c5507237957d013a0fe0f082cab7f757af6ee</td></tr><tr><td>mtfl</td><td>MTFL</td><td>Learning Deep Representation for Face Alignment with Auxiliary Attributes</td><td>Learning Deep Representation for Face Alignment with Auxiliary Attributes</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning deep representation for face alignment with auxiliary attributes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td></tr><tr><td>mug_faces</td><td>MUG Faces</td><td>The MUG Facial Expression Database</td><td>The MUG facial expression database</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the mug facial expression database&sort=relevance" target="_blank">[s2]</a></td><td>Aristotle University of Thessaloniki</td><td>f1af714b92372c8e606485a3982eab2f16772ad8</td></tr><tr><td>multi_pie</td><td>MULTIPIE</td><td>Multi-PIE</td><td>The CMU Pose, Illumination, and Expression (PIE) Database</td><td><a href="http://www.comp.nus.edu.sg/~tsim/piedb.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-pie&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4d423acc78273b75134e2afd1777ba6d3a398973</td></tr><tr><td>names_and_faces</td><td>News Dataset</td><td>Names and Faces</td><td>Names and faces in the news</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=names and faces&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2fda164863a06a92d3a910b96eef927269aeb730</td></tr><tr><td>nd_2006</td><td>ND-2006</td><td>Using a Multi-Instance Enrollment Representation to Improve 3D Face Recognition</td><td>Using a Multi-Instance Enrollment Representation to Improve 3D Face Recognition</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=using a multi-instance enrollment representation to improve 3d face recognition&sort=relevance" target="_blank">[s2]</a></td><td>University of Notre Dame</td><td>fd8168f1c50de85bac58a8d328df0a50248b16ae</td></tr><tr><td>nova_emotions</td><td>Novaemötions Dataset</td><td>Competitive affective gamming: Winning with a smile</td><td>Competitive affective gaming: winning with a smile</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=competitive affective gamming: winning with a smile&sort=relevance" target="_blank">[s2]</a></td><td>Universidade NOVA de Lisboa, Caparica, Portugal</td><td>7f4040b482d16354d5938c1d1b926b544652bf5b</td></tr><tr><td>nova_emotions</td><td>Novaemötions Dataset</td><td>Crowdsourcing facial expressions for affective-interaction</td><td>Crowdsourcing facial expressions for affective-interaction</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=crowdsourcing facial expressions for affective-interaction&sort=relevance" target="_blank">[s2]</a></td><td></td><td>c06b13d0ec3f5c43e2782cd22542588e233733c3</td></tr><tr><td>orl</td><td>ORL</td><td>Parameterisation of a Stochastic Model for Human Face Identification</td><td>Parameterisation of a stochastic model for human face identification</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=parameterisation of a stochastic model for human face identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>55206f0b5f57ce17358999145506cd01e570358c</td></tr><tr><td>pa_100k</td><td>PA-100K</td><td>HydraPlus-Net: Attentive Deep Features for Pedestrian Analysis</td><td>HydraPlus-Net: Attentive Deep Features for Pedestrian Analysis</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=hydraplus-net: attentive deep features for pedestrian analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>f41c7bb02fc97d5fb9cadd7a49c3e558a1c58a44</td></tr><tr><td>penn_fudan</td><td>Penn Fudan</td><td>Object Detection Combining Recognition and Segmentation</td><td>Object Detection Combining Recognition and Segmentation</td><td><a href="https://pdfs.semanticscholar.org/3394/168ff0719b03ff65bcea35336a76b21fe5e4.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=object detection combining recognition and segmentation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3394168ff0719b03ff65bcea35336a76b21fe5e4</td></tr><tr><td>peta</td><td>PETA</td><td>Pedestrian Attribute Recognition At Far Distance</td><td>Pedestrian Attribute Recognition At Far Distance</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pedestrian attribute recognition at far distance&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2a4bbee0b4cf52d5aadbbc662164f7efba89566c</td></tr><tr><td>pets</td><td>PETS 2017</td><td>PETS 2017: Dataset and Challenge</td><td>PETS 2017: Dataset and Challenge</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pets 2017: dataset and challenge&sort=relevance" target="_blank">[s2]</a></td><td></td><td>22909dd19a0ec3b6065334cb5be5392cb24d839d</td></tr><tr><td>pilot_parliament</td><td>PPB</td><td>Gender Shades: Intersectional Accuracy Disparities in Commercial Gender Classification</td><td>Gender Shades: Intersectional Accuracy Disparities in Commercial Gender Classification</td><td><a href="http://proceedings.mlr.press/v81/buolamwini18a/buolamwini18a-supp.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=gender shades: intersectional accuracy disparities in commercial gender classification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>18858cc936947fc96b5c06bbe3c6c2faa5614540</td></tr><tr><td>pipa</td><td>PIPA</td><td>Beyond Frontal Faces: Improving Person Recognition Using Multiple Cues</td><td>Beyond frontal faces: Improving Person Recognition using multiple cues</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=beyond frontal faces: improving person recognition using multiple cues&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0a85bdff552615643dd74646ac881862a7c7072d</td></tr><tr><td>pku_reid</td><td>PKU-Reid</td><td>Swiss-System Based Cascade Ranking for Gait-based Person Re-identification</td><td>Swiss-System Based Cascade Ranking for Gait-Based Person Re-Identification</td><td><a href="https://pdfs.semanticscholar.org/f6c8/d5e35d7e4d60a0104f233ac1a3ab757da53f.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=swiss-system based cascade ranking for gait-based person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>f6c8d5e35d7e4d60a0104f233ac1a3ab757da53f</td></tr><tr><td>pku_reid</td><td>PKU-Reid</td><td>Orientation driven bag of appearances for person re-identification</td><td>Orientation Driven Bag of Appearances for Person Re-identification</td><td><a href="https://arxiv.org/pdf/1605.02464.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=orientation driven bag of appearances for person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a7fe834a0af614ce6b50dc093132b031dd9a856b</td></tr><tr><td>precarious</td><td>Precarious</td><td>Expecting the Unexpected: Training Detectors for Unusual Pedestrians With Adversarial Imposters</td><td>Expecting the Unexpected: Training Detectors for Unusual Pedestrians with Adversarial Imposters</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=expecting the unexpected: training detectors for unusual pedestrians with adversarial imposters&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9e5378e7b336c89735d3bb15cf67eff96f86d39a</td></tr><tr><td>prid</td><td>PRID</td><td>Person Re-Identification by Descriptive and Discriminative Classification</td><td>Person Re-identification by Descriptive and Discriminative Classification</td><td><a href="https://pdfs.semanticscholar.org/4c1b/f0592be3e535faf256c95e27982db9b3d3d3.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person re-identification by descriptive and discriminative classification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>16c7c31a7553d99f1837fc6e88e77b5ccbb346b8</td></tr><tr><td>prw</td><td>PRW</td><td>Person Re-identification in the Wild</td><td>Person Re-identification in the Wild</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person re-identification in the wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0b84f07af44f964817675ad961def8a51406dd2e</td></tr><tr><td>psu</td><td>PSU</td><td>Vision-based Analysis of Small Groups in Pedestrian Crowds</td><td>Vision-Based Analysis of Small Groups in Pedestrian Crowds</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=vision-based analysis of small groups in pedestrian crowds&sort=relevance" target="_blank">[s2]</a></td><td></td><td>066000d44d6691d27202896691f08b27117918b9</td></tr><tr><td>pubfig</td><td>PubFig</td><td>Attribute and Simile Classifiers for Face Verification</td><td>Attribute and simile classifiers for face verification</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=attribute and simile classifiers for face verification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>759a3b3821d9f0e08e0b0a62c8b693230afc3f8d</td></tr><tr><td>pubfig_83</td><td>pubfig83</td><td>Scaling Up Biologically-Inspired Computer Vision: A Case Study in Unconstrained Face Recognition on Facebook</td><td>Scaling up biologically-inspired computer vision: A case study in unconstrained face recognition on facebook</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=scaling up biologically-inspired computer vision: a case study in unconstrained face recognition on facebook&sort=relevance" target="_blank">[s2]</a></td><td>Harvard University</td><td>9c23859ec7313f2e756a3e85575735e0c52249f4</td></tr><tr><td>put_face</td><td>Put Face</td><td>The PUT face database</td><td>The put face database</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the put face database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>ae0aee03d946efffdc7af2362a42d3750e7dd48a</td></tr><tr><td>qmul_grid</td><td>GRID</td><td>Time-delayed correlation analysis for multi-camera activity understanding</td><td>Time-Delayed Correlation Analysis for Multi-Camera Activity Understanding</td><td><a href="http://www.eecs.qmul.ac.uk/~ccloy/files/ijcv_2010.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=time-delayed correlation analysis for multi-camera activity understanding&sort=relevance" target="_blank">[s2]</a></td><td>Queen Mary University of London</td><td>2edb87494278ad11641b6cf7a3f8996de12b8e14</td></tr><tr><td>qmul_grid</td><td>GRID</td><td>Multi-Camera Activity Correlation Analysis</td><td>Multi-camera activity correlation analysis</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-camera activity correlation analysis&sort=relevance" target="_blank">[s2]</a></td><td>Queen Mary University of London</td><td>3b5b6d19d4733ab606c39c69a889f9e67967f151</td></tr><tr><td>qmul_surv_face</td><td>QMUL-SurvFace</td><td>Surveillance Face Recognition Challenge</td><td>Surveillance Face Recognition Challenge</td><td><a href="https://arxiv.org/pdf/1804.09691.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=surveillance face recognition challenge&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2306b2a8fba28539306052764a77a0d0f5d1236a</td></tr><tr><td>rafd</td><td>RaFD</td><td>Presentation and validation of the Radboud Faces Database</td><td>Presentation and validation of the Radboud Faces Database</td><td><a href="https://pdfs.semanticscholar.org/3765/df816dc5a061bc261e190acc8bdd9d47bec0.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=presentation and validation of the radboud faces database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3765df816dc5a061bc261e190acc8bdd9d47bec0</td></tr><tr><td>raid</td><td>RAiD</td><td>Consistent Re-identification in a Camera Network</td><td>Consistent Re-identification in a Camera Network</td><td><a href="https://pdfs.semanticscholar.org/c27f/099e6e7e3f7f9979cbe9e0a5175fc5848ea0.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=consistent re-identification in a camera network&sort=relevance" target="_blank">[s2]</a></td><td></td><td>09d78009687bec46e70efcf39d4612822e61cb8c</td></tr><tr><td>rap_pedestrian</td><td>RAP</td><td>A Richly Annotated Dataset for Pedestrian Attribute Recognition</td><td>A Richly Annotated Dataset for Pedestrian Attribute Recognition</td><td><a href="https://arxiv.org/pdf/1603.07054.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a richly annotated dataset for pedestrian attribute recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>221c18238b829c12b911706947ab38fd017acef7</td></tr><tr><td>reseed</td><td>ReSEED</td><td>ReSEED: Social Event dEtection Dataset</td><td>ReSEED: social event dEtection dataset</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=reseed: social event detection dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>54983972aafc8e149259d913524581357b0f91c3</td></tr><tr><td>saivt</td><td>SAIVT SoftBio</td><td>A Database for Person Re-Identification in Multi-Camera Surveillance Networks</td><td>A Database for Person Re-Identification in Multi-Camera Surveillance Networks</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a database for person re-identification in multi-camera surveillance networks&sort=relevance" target="_blank">[s2]</a></td><td></td><td>22646e00a7ba34d1b5fbe3b1efcd91a1e1be3c2b</td></tr><tr><td>sarc3d</td><td>Sarc3D</td><td>SARC3D: a new 3D body model for People Tracking and Re-identification</td><td>SARC3D: A New 3D Body Model for People Tracking and Re-identification</td><td><a href="https://pdfs.semanticscholar.org/e27e/f52c641c2b5100a1b34fd0b819e84a31b4df.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=sarc3d: a new 3d body model for people tracking and re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>e27ef52c641c2b5100a1b34fd0b819e84a31b4df</td></tr><tr><td>scface</td><td>SCface</td><td>SCface – surveillance cameras face database</td><td>SCface – surveillance cameras face database</td><td><a href="http://scface.org/SCface%20-%20Surveillance%20Cameras%20Face%20Database.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=scface – surveillance cameras face database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>29a705a5fa76641e0d8963f1fdd67ee4c0d92d3d</td></tr><tr><td>scut_fbp</td><td>SCUT-FBP</td><td>SCUT-FBP: A Benchmark Dataset for Facial Beauty Perception</td><td>SCUT-FBP: A Benchmark Dataset for Facial Beauty Perception</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=scut-fbp: a benchmark dataset for facial beauty perception&sort=relevance" target="_blank">[s2]</a></td><td></td><td>bd26dabab576adb6af30484183c9c9c8379bf2e0</td></tr><tr><td>scut_head</td><td>SCUT HEAD</td><td>Detecting Heads using Feature Refine Net and Cascaded Multi-scale Architecture</td><td>Detecting Heads using Feature Refine Net and Cascaded Multi-scale Architecture</td><td><a href="https://arxiv.org/pdf/1803.09256.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=detecting heads using feature refine net and cascaded multi-scale architecture&sort=relevance" target="_blank">[s2]</a></td><td></td><td>d3200d49a19a4a4e4e9745ee39649b65d80c834b</td></tr><tr><td>sdu_vid</td><td>SDU-VID</td><td>A Spatio-Temporal Appearance Representation for Video-Based Pedestrian Re-Identification</td><td>A Spatio-Temporal Appearance Representation for Video-Based Pedestrian Re-Identification</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a spatio-temporal appearance representation for video-based pedestrian re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3b4ec8af470948a72a6ed37a9fd226719a874ebc</td></tr><tr><td>sdu_vid</td><td>SDU-VID</td><td>Local descriptors encoded by Fisher vectors for person re-identification</td><td>Local Descriptors Encoded by Fisher Vectors for Person Re-identification</td><td><a href="https://pdfs.semanticscholar.org/a105/f1ef67b4b02da38eadce8ffb4e13aa301a93.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=local descriptors encoded by fisher vectors for person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>46a01565e6afe7c074affb752e7069ee3bf2e4ef</td></tr><tr><td>sdu_vid</td><td>SDU-VID</td><td>Person reidentification by video ranking</td><td>Person Re-identification by Video Ranking</td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person reidentification by video ranking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>98bb029afe2a1239c3fdab517323066f0957b81b</td></tr><tr><td>social_relation</td><td>Social Relation</td><td>Learning Social Relation Traits from Face Images</td><td>Learning Social Relation Traits from Face Images</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning social relation traits from face images&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2a171f8d14b6b8735001a11c217af9587d095848</td></tr><tr><td>soton</td><td>SOTON HiD</td><td>On a Large Sequence-Based Human Gait Database</td><td>On a Large Sequence-Based Human Gait Database</td><td><a href="https://pdfs.semanticscholar.org/4f93/cd09785c6e77bf4bc5a788e079df524c8d21.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=on a large sequence-based human gait database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4f93cd09785c6e77bf4bc5a788e079df524c8d21</td></tr><tr><td>sports_videos_in_the_wild</td><td>SVW</td><td>Sports Videos in the Wild (SVW): A Video Dataset for Sports Analysis</td><td>Sports Videos in the Wild (SVW): A video dataset for sports analysis</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=sports videos in the wild (svw): a video dataset for sports analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1a40092b493c6b8840257ab7f96051d1a4dbfeb2</td></tr><tr><td>stair_actions</td><td>STAIR Action</td><td>STAIR Actions: A Video Dataset of Everyday Home Actions</td><td>STAIR Actions: A Video Dataset of Everyday Home Actions</td><td><a href="https://arxiv.org/pdf/1804.04326.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=stair actions: a video dataset of everyday home actions&sort=relevance" target="_blank">[s2]</a></td><td></td><td>d3f5a1848b0028d8ab51d0b0673732cad2e3c8c9</td></tr><tr><td>stanford_drone</td><td>Stanford Drone</td><td>Learning Social Etiquette: Human Trajectory Prediction In Crowded Scenes</td><td>Social LSTM: Human Trajectory Prediction in Crowded Spaces</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning social etiquette: human trajectory prediction in crowded scenes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>570f37ed63142312e6ccdf00ecc376341ec72b9f</td></tr><tr><td>stickmen_buffy</td><td>Buffy Stickmen</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td><a href="http://www.bmva.org/bmvc/2010/conference/paper12/abstract12.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clustered pose and nonlinear appearance models for human pose estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td></tr><tr><td>stickmen_buffy</td><td>Buffy Stickmen</td><td>Learning to Parse Images of Articulated Objects</td><td>Learning to parse images of articulated bodies</td><td><a href="http://machinelearning.wustl.edu/mlpapers/paper_files/NIPS2006_899.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning to parse images of articulated objects&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td></tr><tr><td>stickmen_family</td><td>We Are Family Stickmen</td><td>We Are Family: Joint Pose Estimation of Multiple Persons</td><td>We Are Family: Joint Pose Estimation of Multiple Persons</td><td><a href="http://eprints.pascal-network.org/archive/00007964/01/eichner10eccv.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=we are family: joint pose estimation of multiple persons&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0dc11a37cadda92886c56a6fb5191ded62099c28</td></tr><tr><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td>Learning to parse images of articulated bodies</td><td><a href="http://machinelearning.wustl.edu/mlpapers/paper_files/NIPS2006_899.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clustered pose and nonlinear appearance models for human pose estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td></tr><tr><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td>Learning to Parse Images of Articulated Objects</td><td>Learning to parse images of articulated bodies</td><td><a href="http://machinelearning.wustl.edu/mlpapers/paper_files/NIPS2006_899.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning to parse images of articulated objects&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td></tr><tr><td>sun_attributes</td><td>SUN</td><td>The SUN Attribute Database: Beyond Categories for Deeper Scene Understanding</td><td>The SUN Attribute Database: Beyond Categories for Deeper Scene Understanding</td><td><a href="http://www.cc.gatech.edu/~hays/papers/attribute_ijcv.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the sun attribute database: beyond categories for deeper scene understanding&sort=relevance" target="_blank">[s2]</a></td><td></td><td>66e6f08873325d37e0ec20a4769ce881e04e964e</td></tr><tr><td>sun_attributes</td><td>SUN</td><td>SUN Attribute Database: Discovering, Annotating, and Recognizing Scene Attributes</td><td>SUN attribute database: Discovering, annotating, and recognizing scene attributes</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=sun attribute database: discovering, annotating, and recognizing scene attributes&sort=relevance" target="_blank">[s2]</a></td><td>Brown University</td><td>833fa04463d90aab4a9fe2870d480f0b40df446e</td></tr><tr><td>svs</td><td>SVS</td><td>Pedestrian Attribute Classification in Surveillance: Database and Evaluation</td><td>Pedestrian Attribute Classification in Surveillance: Database and Evaluation</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pedestrian attribute classification in surveillance: database and evaluation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>488e475eeb3bb39a145f23ede197cd3620f1d98a</td></tr><tr><td>texas_3dfrd</td><td>Texas 3DFRD</td><td>Anthropometric 3D Face Recognition</td><td>Anthropometric 3D Face Recognition</td><td><a href="http://live.ece.utexas.edu/publications/2010/sg_ijcv_june10.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=anthropometric 3d face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2ce2560cf59db59ce313bbeb004e8ce55c5ce928</td></tr><tr><td>texas_3dfrd</td><td>Texas 3DFRD</td><td>Texas 3D Face Recognition Database</td><td>Texas 3D Face Recognition Database</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=texas 3d face recognition database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4d58f886f5150b2d5e48fd1b5a49e09799bf895d</td></tr><tr><td>tiny_faces</td><td>TinyFace</td><td>Low-Resolution Face Recognition</td><td>Low-Resolution Face Recognition</td><td><a href="https://arxiv.org/pdf/1811.08965.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=low-resolution face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8990cdce3f917dad622e43e033db686b354d057c</td></tr><tr><td>tiny_images</td><td>Tiny Images</td><td>80 million tiny images: a large dataset for non-parametric object and scene recognition</td><td>80 Million Tiny Images: A Large Data Set for Nonparametric Object and Scene Recognition</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=80 million tiny images: a large dataset for non-parametric object and scene recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>31b58ced31f22eab10bd3ee2d9174e7c14c27c01</td></tr><tr><td>tisi</td><td>Times Square Intersection</td><td>Video Synopsis by Heterogeneous Multi-source Correlation</td><td>Video Synopsis by Heterogeneous Multi-source Correlation</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=video synopsis by heterogeneous multi-source correlation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>b6c293f0420f7e945b5916ae44269fb53e139275</td></tr><tr><td>tisi</td><td>Times Square Intersection</td><td>Learning from Multiple Sources for Video Summarisation</td><td>Learning from Multiple Sources for Video Summarisation</td><td><a href="https://arxiv.org/pdf/1501.03069.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning from multiple sources for video summarisation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>287ddcb3db5562235d83aee318f318b8d5e43fb1</td></tr><tr><td>oxford_town_centre</td><td>TownCentre</td><td>Stable Multi-Target Tracking in Real-Time Surveillance Video</td><td>Stable multi-target tracking in real-time surveillance video</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=stable multi-target tracking in real-time surveillance video&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9361b784e73e9238d5cefbea5ac40d35d1e3103f</td></tr><tr><td>tud_brussels</td><td>TUD-Brussels</td><td>Multi-Cue Onboard Pedestrian Detection</td><td>Multi-cue onboard pedestrian detection</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-cue onboard pedestrian detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6ad5a38df8dd4cdddd74f31996ce096d41219f72</td></tr><tr><td>tud_campus</td><td>TUD-Campus</td><td>People-Tracking-by-Detection and People-Detection-by-Tracking</td><td>People-tracking-by-detection and people-detection-by-tracking</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=people-tracking-by-detection and people-detection-by-tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td></tr><tr><td>tud_crossing</td><td>TUD-Crossing</td><td>People-Tracking-by-Detection and People-Detection-by-Tracking</td><td>People-tracking-by-detection and people-detection-by-tracking</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=people-tracking-by-detection and people-detection-by-tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td></tr><tr><td>tud_motionpairs</td><td>TUD-Motionparis</td><td>Multi-Cue Onboard Pedestrian Detection</td><td>Multi-cue onboard pedestrian detection</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-cue onboard pedestrian detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6ad5a38df8dd4cdddd74f31996ce096d41219f72</td></tr><tr><td>tud_multiview</td><td>TUD-Multiview</td><td>Monocular 3D Pose Estimation and Tracking by Detection</td><td>Monocular 3D pose estimation and tracking by detection</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=monocular 3d pose estimation and tracking by detection&sort=relevance" target="_blank">[s2]</a></td><td>TU Darmstadt</td><td>436f798d1a4e54e5947c1e7d7375c31b2bdb4064</td></tr><tr><td>tud_pedestrian</td><td>TUD-Pedestrian</td><td>People-Tracking-by-Detection and People-Detection-by-Tracking</td><td>People-tracking-by-detection and people-detection-by-tracking</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=people-tracking-by-detection and people-detection-by-tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td></tr><tr><td>tud_stadtmitte</td><td>TUD-Stadtmitte</td><td>Monocular 3D Pose Estimation and Tracking by Detection</td><td>Monocular 3D pose estimation and tracking by detection</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=monocular 3d pose estimation and tracking by detection&sort=relevance" target="_blank">[s2]</a></td><td>TU Darmstadt</td><td>436f798d1a4e54e5947c1e7d7375c31b2bdb4064</td></tr><tr><td>tvhi</td><td>TVHI</td><td>High Five: Recognising human interactions in TV shows</td><td>High Five: Recognising human interactions in TV shows</td><td><a href="https://pdfs.semanticscholar.org/3cd4/0bfa1ff193a96bde0207e5140a399476466c.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=high five: recognising human interactions in tv shows&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3cd40bfa1ff193a96bde0207e5140a399476466c</td></tr><tr><td>uccs</td><td>UCCS</td><td>Large scale unconstrained open set face database</td><td>Large scale unconstrained open set face database</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=large scale unconstrained open set face database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>07fcbae86f7a3ad3ea1cf95178459ee9eaf77cb1</td></tr><tr><td>uccs</td><td>UCCS</td><td>Unconstrained Face Detection and Open-Set Face Recognition Challenge</td><td>Unconstrained Face Detection and Open-Set Face Recognition Challenge</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=unconstrained face detection and open-set face recognition challenge&sort=relevance" target="_blank">[s2]</a></td><td></td><td>d4f1eb008eb80595bcfdac368e23ae9754e1e745</td></tr><tr><td>ucf_101</td><td>UCF101</td><td>UCF101: A Dataset of 101 Human Actions Classes From Videos in The Wild</td><td>UCF101: A Dataset of 101 Human Actions Classes From Videos in The Wild</td><td><a href="https://arxiv.org/pdf/1212.0402.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=ucf101: a dataset of 101 human actions classes from videos in the wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>b5f2846a506fc417e7da43f6a7679146d99c5e96</td></tr><tr><td>ucf_crowd</td><td>UCF-CC-50</td><td>Multi-Source Multi-Scale Counting in Extremely Dense Crowd Images</td><td>Multi-source Multi-scale Counting in Extremely Dense Crowd Images</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-source multi-scale counting in extremely dense crowd images&sort=relevance" target="_blank">[s2]</a></td><td></td><td>32c801cb7fbeb742edfd94cccfca4934baec71da</td></tr><tr><td>ucf_selfie</td><td>UCF Selfie</td><td>How to Take a Good Selfie?</td><td>How to Take a Good Selfie?</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=how to take a good selfie?&sort=relevance" target="_blank">[s2]</a></td><td></td><td>041d3eedf5e45ce5c5229f0181c5c576ed1fafd6</td></tr><tr><td>ufdd</td><td>UFDD</td><td>Pushing the Limits of Unconstrained Face Detection: a Challenge Dataset and Baseline Results</td><td>Pushing the Limits of Unconstrained Face Detection: a Challenge Dataset and Baseline Results</td><td><a href="https://arxiv.org/pdf/1804.10275.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pushing the limits of unconstrained face detection: a challenge dataset and baseline results&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3531332efe19be21e7401ba1f04570a142617236</td></tr><tr><td>umb</td><td>UMB</td><td>UMB-DB: A Database of Partially Occluded 3D Faces</td><td>UMB-DB: A database of partially occluded 3D faces</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=umb-db: a database of partially occluded 3d faces&sort=relevance" target="_blank">[s2]</a></td><td></td><td>16e8b0a1e8451d5f697b94c0c2b32a00abee1d52</td></tr><tr><td>umd_faces</td><td>UMD</td><td>UMDFaces: An Annotated Face Dataset for Training Deep Networks</td><td>UMDFaces: An annotated face dataset for training deep networks</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=umdfaces: an annotated face dataset for training deep networks&sort=relevance" target="_blank">[s2]</a></td><td></td><td>31b05f65405534a696a847dd19c621b7b8588263</td></tr><tr><td>umd_faces</td><td>UMD</td><td>The Do's and Don'ts for CNN-based Face Verification</td><td>The Do’s and Don’ts for CNN-Based Face Verification</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the do's and don'ts for cnn-based face verification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>71b7fc715e2f1bb24c0030af8d7e7b6e7cd128a6</td></tr><tr><td>unbc_shoulder_pain</td><td>UNBC-McMaster Pain</td><td>PAINFUL DATA: The UNBC-McMaster Shoulder Pain Expression Archive Database</td><td>Painful data: The UNBC-McMaster shoulder pain expression archive database</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=painful data: the unbc-mcmaster shoulder pain expression archive database&sort=relevance" target="_blank">[s2]</a></td><td>Carnegie Mellon University</td><td>56ffa7d906b08d02d6d5a12c7377a57e24ef3391</td></tr><tr><td>urban_tribes</td><td>Urban Tribes</td><td>From Bikers to Surfers: Visual Recognition of Urban Tribes</td><td>From Bikers to Surfers: Visual Recognition of Urban Tribes</td><td><a href="http://vision.cornell.edu/se3/wp-content/uploads/2014/09/utribes_bmvc13_final.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=from bikers to surfers: visual recognition of urban tribes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>774cbb45968607a027ae4729077734db000a1ec5</td></tr><tr><td>used</td><td>USED Social Event Dataset</td><td>USED: A Large-scale Social Event Detection Dataset</td><td>USED: a large-scale social event detection dataset</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=used: a large-scale social event detection dataset&sort=relevance" target="_blank">[s2]</a></td><td>University of Trento</td><td>8627f019882b024aef92e4eb9355c499c733e5b7</td></tr><tr><td>v47</td><td>V47</td><td>Re-identification of Pedestrians with Variable Occlusion and Scale</td><td>Re-identification of pedestrians with variable occlusion and scale</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=re-identification of pedestrians with variable occlusion and scale&sort=relevance" target="_blank">[s2]</a></td><td>Kingston University</td><td>922e0a51a3b8c67c4c6ac09a577ff674cbd28b34</td></tr><tr><td>vadana</td><td>VADANA</td><td>VADANA: A dense dataset for facial image analysis</td><td>VADANA: A dense dataset for facial image analysis</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=vadana: a dense dataset for facial image analysis&sort=relevance" target="_blank">[s2]</a></td><td>University of Delaware</td><td>4563b46d42079242f06567b3f2e2f7a80cb3befe</td></tr><tr><td>vgg_celebs_in_places</td><td>CIP</td><td>Faces in Places: Compound Query Retrieval</td><td>Faces in Places: compound query retrieval</td><td><a href="https://pdfs.semanticscholar.org/7ebb/153704706e457ab57b432793d2b6e5d12592.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=faces in places: compound query retrieval&sort=relevance" target="_blank">[s2]</a></td><td></td><td>7ebb153704706e457ab57b432793d2b6e5d12592</td></tr><tr><td>vgg_faces</td><td>VGG Face</td><td>Deep Face Recognition</td><td>Deep Face Recognition</td><td><a href="https://pdfs.semanticscholar.org/f372/ab9b3270d4e4f6a0258c83c2736c3a5c0454.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=deep face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>162ea969d1929ed180cc6de9f0bf116993ff6e06</td></tr><tr><td>vgg_faces2</td><td>VGG Face2</td><td>VGGFace2: A dataset for recognising faces across pose and age</td><td>VGGFace2: A Dataset for Recognising Faces across Pose and Age</td><td><a href="https://arxiv.org/pdf/1710.08092.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=vggface2: a dataset for recognising faces across pose and age&sort=relevance" target="_blank">[s2]</a></td><td></td><td>70c59dc3470ae867016f6ab0e008ac8ba03774a1</td></tr><tr><td>violent_flows</td><td>Violent Flows</td><td>Violent Flows: Real-Time Detection of Violent Crowd Behavior</td><td>Violent flows: Real-time detection of violent crowd behavior</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=violent flows: real-time detection of violent crowd behavior&sort=relevance" target="_blank">[s2]</a></td><td>Open University of Israel</td><td>5194cbd51f9769ab25260446b4fa17204752e799</td></tr><tr><td>viper</td><td>VIPeR</td><td>Evaluating Appearance Models for Recognition, Reacquisition, and Tracking</td><td>Evaluating Appearance Models for Recognition, Reacquisition, and Tracking</td><td><a href="https://pdfs.semanticscholar.org/7847/b1fbccadb780b655e72c66d3f9e93ddb880c.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=evaluating appearance models for recognition, reacquisition, and tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6273b3491e94ea4dd1ce42b791d77bdc96ee73a8</td></tr><tr><td>visual_phrases</td><td>Phrasal Recognition</td><td>Recognition using Visual Phrases</td><td>Recognition using visual phrases</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=recognition using visual phrases&sort=relevance" target="_blank">[s2]</a></td><td>University of Illinois, Urbana-Champaign</td><td>e8de844fefd54541b71c9823416daa238be65546</td></tr><tr><td>vmu</td><td>VMU</td><td>Can Facial Cosmetics Affect the Matching Accuracy of Face Recognition Systems?</td><td>Can facial cosmetics affect the matching accuracy of face recognition systems?</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=can facial cosmetics affect the matching accuracy of face recognition systems?&sort=relevance" target="_blank">[s2]</a></td><td>West Virginia University</td><td>37d6f0eb074d207b53885bd2eb78ccc8a04be597</td></tr><tr><td>voc</td><td>VOC</td><td>The PASCAL Visual Object Classes (VOC) Challenge</td><td>The Pascal Visual Object Classes (VOC) Challenge</td><td><a href="http://eprints.pascal-network.org/archive/00006187/01/PascalVOC_IJCV2009.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the pascal visual object classes (voc) challenge&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0ee1916a0cb2dc7d3add086b5f1092c3d4beb38a</td></tr><tr><td>vqa</td><td>VQA</td><td>VQA: Visual Question Answering</td><td>VQA: Visual Question Answering</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=vqa: visual question answering&sort=relevance" target="_blank">[s2]</a></td><td></td><td>01959ef569f74c286956024866c1d107099199f7</td></tr><tr><td>ward</td><td>WARD</td><td>Re-identify people in wide area camera network</td><td>Re-identify people in wide area camera network</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=re-identify people in wide area camera network&sort=relevance" target="_blank">[s2]</a></td><td>University of Udine</td><td>6f3c76b7c0bd8e1d122c6ea808a271fd4749c951</td></tr><tr><td>who_goes_there</td><td>WGT</td><td>Who Goes There? Approaches to Mapping Facial Appearance Diversity</td><td>Who goes there?: approaches to mapping facial appearance diversity</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=who goes there? approaches to mapping facial appearance diversity&sort=relevance" target="_blank">[s2]</a></td><td>University of Kentucky</td><td>9b9bf5e623cb8af7407d2d2d857bc3f1b531c182</td></tr><tr><td>wider</td><td>WIDER</td><td>Recognize Complex Events from Static Images by Fusing Deep Channels</td><td>Recognize complex events from static images by fusing deep channels</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=recognize complex events from static images by fusing deep channels&sort=relevance" target="_blank">[s2]</a></td><td></td><td>356b431d4f7a2a0a38cf971c84568207dcdbf189</td></tr><tr><td>wider_attribute</td><td>WIDER Attribute</td><td>Human Attribute Recognition by Deep Hierarchical Contexts</td><td>Human Attribute Recognition by Deep Hierarchical Contexts</td><td><a href="https://pdfs.semanticscholar.org/8e28/07f2dd53b03a759e372e07f7191cae65c9fd.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=human attribute recognition by deep hierarchical contexts&sort=relevance" target="_blank">[s2]</a></td><td></td><td>44d23df380af207f5ac5b41459c722c87283e1eb</td></tr><tr><td>wider_face</td><td>WIDER FACE</td><td>WIDER FACE: A Face Detection Benchmark</td><td>WIDER FACE: A Face Detection Benchmark</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=wider face: a face detection benchmark&sort=relevance" target="_blank">[s2]</a></td><td></td><td>52d7eb0fbc3522434c13cc247549f74bb9609c5d</td></tr><tr><td>wildtrack</td><td>WildTrack</td><td>WILDTRACK: A Multi-camera HD Dataset for Dense Unscripted Pedestrian Detection</td><td>WILDTRACK: A Multi-camera HD Dataset for Dense Unscripted Pedestrian Detection</td><td><a href="http://openaccess.thecvf.com/content_cvpr_2018/Supplemental/1562-supp.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=wildtrack: a multi-camera hd dataset for dense unscripted pedestrian detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>36bccfb2ad847096bc76777e544f305813cd8f5b</td></tr><tr><td>wlfdb</td><td>WLFDB</td><td>WLFDB: Weakly Labeled Face Databases</td><td>WLFDB : Weakly Labeled Face Databases</td><td><a href="https://pdfs.semanticscholar.org/5ad4/e9f947c1653c247d418f05dad758a3f9277b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=wlfdb: weakly labeled face databases&sort=relevance" target="_blank">[s2]</a></td><td></td><td>5ad4e9f947c1653c247d418f05dad758a3f9277b</td></tr><tr><td>yale_faces</td><td>YaleFaces</td><td>From Few to Many: Illumination Cone Models for Face Recognition under Variable Lighting and Pose</td><td>From Few to Many: Illumination Cone Models for Face Recognition under Variable Lighting and Pose</td><td><a href="http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.147.1487&rep=rep1&type=pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=from few to many: illumination cone models for face recognition under variable lighting and pose&sort=relevance" target="_blank">[s2]</a></td><td></td><td>18c72175ddbb7d5956d180b65a96005c100f6014</td></tr><tr><td>yale_faces</td><td>YaleFaces</td><td>Acquiring Linear Subspaces for Face Recognition under Variable Lighting</td><td>Acquiring linear subspaces for face recognition under variable lighting</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=acquiring linear subspaces for face recognition under variable lighting&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2ad0ee93d029e790ebb50574f403a09854b65b7e</td></tr><tr><td>yawdd</td><td>YawDD</td><td>YawDD: A Yawning Detection Dataset</td><td>YawDD: a yawning detection dataset</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=yawdd: a yawning detection dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a94cae786d515d3450d48267e12ca954aab791c4</td></tr><tr><td>yfcc_100m</td><td>YFCC100M</td><td>YFCC100M: The New Data in Multimedia Research</td><td>YFCC100M: the new data in multimedia research</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=yfcc100m: the new data in multimedia research&sort=relevance" target="_blank">[s2]</a></td><td></td><td>010f0f4929e6a6644fb01f0e43820f91d0fad292</td></tr><tr><td>york_3d</td><td>UOY 3D Face Database</td><td>Three-Dimensional Face Recognition: An Eigensurface Approach</td><td>Three-dimensional face recognition: an eigensurface approach</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=three-dimensional face recognition: an eigensurface approach&sort=relevance" target="_blank">[s2]</a></td><td></td><td>19d1b811df60f86cbd5e04a094b07f32fff7a32a</td></tr><tr><td>youtube_celebrities</td><td>YouTube Celebrities</td><td>Face Tracking and Recognition with Visual Constraints in Real-World Videos</td><td>Face tracking and recognition with visual constraints in real-world videos</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=face tracking and recognition with visual constraints in real-world videos&sort=relevance" target="_blank">[s2]</a></td><td>Rutgers University</td><td>6204776d31359d129a582057c2d788a14f8aadeb</td></tr><tr><td>youtube_faces</td><td>YouTubeFaces</td><td>Face Recognition in Unconstrained Videos with Matched Background Similarity</td><td>Face recognition in unconstrained videos with matched background similarity</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=face recognition in unconstrained videos with matched background similarity&sort=relevance" target="_blank">[s2]</a></td><td></td><td>560e0e58d0059259ddf86fcec1fa7975dee6a868</td></tr><tr><td>youtube_makeup</td><td>YMU</td><td>Can Facial Cosmetics Affect the Matching Accuracy of Face Recognition Systems?</td><td>Can facial cosmetics affect the matching accuracy of face recognition systems?</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=can facial cosmetics affect the matching accuracy of face recognition systems?&sort=relevance" target="_blank">[s2]</a></td><td>West Virginia University</td><td>37d6f0eb074d207b53885bd2eb78ccc8a04be597</td></tr><tr><td>youtube_makeup</td><td>YMU</td><td>Automatic Facial Makeup Detection with Application in Face Recognition</td><td>Automatic facial makeup detection with application in face recognition</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=automatic facial makeup detection with application in face recognition&sort=relevance" target="_blank">[s2]</a></td><td>West Virginia University</td><td>fcc6fe6007c322641796cb8792718641856a22a7</td></tr><tr><td>youtube_poses</td><td>YouTube Pose</td><td>Personalizing Human Video Pose Estimation</td><td>Personalizing Human Video Pose Estimation</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=personalizing human video pose estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1c2802c2199b6d15ecefe7ba0c39bfe44363de38</td></tr></table></body></html> \ No newline at end of file
+<!doctype html><html><head><meta charset='utf-8'><title>Paper Title Sanity Check</title><link rel='stylesheet' href='reports.css'></head><body><h2>Paper Title Sanity Check</h2><table border='1' cellpadding='3' cellspacing='3'><th>key</th><th>name</th><th>our title</th><th>found title</th><th></th><th></th><th>address</th><th>s2 id</th><tr><td>10k_US_adult_faces</td><td>10K US Adult Faces</td><td>The intrinsic memorability of face images</td><td>The intrinsic memorability of face photographs.</td><td><a href="https://pdfs.semanticscholar.org/8b2d/d5c61b23ead5ae5508bb8ce808b5ea266730.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the intrinsic memorability of face images&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8b2dd5c61b23ead5ae5508bb8ce808b5ea266730</td></tr><tr><td>3d_rma</td><td>3D-RMA</td><td>Automatic 3D Face Authentication</td><td>Automatic 3D face authentication</td><td><a href="http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.31.9190&rep=rep1&type=pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=automatic 3d face authentication&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2160788824c4c29ffe213b2cbeb3f52972d73f37</td></tr><tr><td>3dddb_unconstrained</td><td>3D Dynamic</td><td>A 3D Dynamic Database for Unconstrained Face Recognition</td><td>A 3D Dynamic Database for Unconstrained Face Recognition</td><td><a href="https://pdfs.semanticscholar.org/4d4b/b462c9f1d4e4ab1e4aa6a75cc0bc71b38461.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a 3d dynamic database for unconstrained face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4d4bb462c9f1d4e4ab1e4aa6a75cc0bc71b38461</td></tr><tr><td>3dpes</td><td>3DPeS</td><td>3DPes: 3D People Dataset for Surveillance and Forensics</td><td>3DPeS: 3D people dataset for surveillance and forensics</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=3dpes: 3d people dataset for surveillance and forensics&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2e8d0f1802e50cccfd3c0aabac0d0beab3a7846e</td></tr><tr><td>4dfab</td><td>4DFAB</td><td>4DFAB: A Large Scale 4D Facial Expression Database for Biometric Applications</td><td>4DFAB: A Large Scale 4D Facial Expression Database for Biometric Applications</td><td><a href="https://arxiv.org/pdf/1712.01443.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=4dfab: a large scale 4d facial expression database for biometric applications&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a40f9bfd3c45658ee8da70e1f2dfbe1f0c744d43</td></tr><tr><td>fpoq</td><td>50 People One Question</td><td>Merging Pose Estimates Across Space and Time</td><td>Merging Pose Estimates Across Space and Time</td><td><a href="http://authors.library.caltech.edu/41565/1/tracking_bmvc.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=merging pose estimates across space and time&sort=relevance" target="_blank">[s2]</a></td><td></td><td>5753b2b5e442eaa3be066daa4a2ca8d8a0bb1725</td></tr><tr><td>adience</td><td>Adience</td><td>Age and Gender Estimation of Unfiltered Faces</td><td>Age and Gender Estimation of Unfiltered Faces</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=age and gender estimation of unfiltered faces&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1be498d4bbc30c3bfd0029114c784bc2114d67c0</td></tr><tr><td>afad</td><td>AFAD</td><td>Ordinal Regression with a Multiple Output CNN for Age Estimation</td><td>Ordinal Regression with Multiple Output CNN for Age Estimation</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=ordinal regression with a multiple output cnn for age estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6618cff7f2ed440a0d2fa9e74ad5469df5cdbe4c</td></tr><tr><td>afew_va</td><td>AFEW-VA</td><td>AFEW-VA database for valence and arousal estimation in-the-wild</td><td>AFEW-VA database for valence and arousal estimation in-the-wild</td><td><a href="https://ibug.doc.ic.ac.uk/media/uploads/documents/afew-va.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=afew-va database for valence and arousal estimation in-the-wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2624d84503bc2f8e190e061c5480b6aa4d89277a</td></tr><tr><td>afew_va</td><td>AFEW-VA</td><td>Collecting Large, Richly Annotated Facial-Expression Databases from Movies</td><td>Collecting Large, Richly Annotated Facial-Expression Databases from Movies</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=collecting large, richly annotated facial-expression databases from movies&sort=relevance" target="_blank">[s2]</a></td><td>Australian National University</td><td>b1f4423c227fa37b9680787be38857069247a307</td></tr><tr><td>affectnet</td><td>AffectNet</td><td>AffectNet: A Database for Facial Expression, Valence, and Arousal Computing in the Wild</td><td>AffectNet: A Database for Facial Expression, Valence, and Arousal Computing in the Wild</td><td><a href="https://arxiv.org/pdf/1708.03985.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=affectnet: a database for facial expression, valence, and arousal computing in the wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>758d7e1be64cc668c59ef33ba8882c8597406e53</td></tr><tr><td>aflw</td><td>AFLW</td><td>Annotated Facial Landmarks in the Wild: A Large-scale, Real-world Database for Facial Landmark Localization</td><td>Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=annotated facial landmarks in the wild: a large-scale, real-world database for facial landmark localization&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td></tr><tr><td>afw</td><td>AFW</td><td>Face detection, pose estimation and landmark localization in the wild</td><td>Face detection, pose estimation, and landmark localization in the wild</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=face detection, pose estimation and landmark localization in the wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0e986f51fe45b00633de9fd0c94d082d2be51406</td></tr><tr><td>agedb</td><td>AgeDB</td><td>AgeDB: the first manually collected, in-the-wild age database</td><td>AgeDB: The First Manually Collected, In-the-Wild Age Database</td><td><a href="http://eprints.mdx.ac.uk/22044/1/agedb_kotsia.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=agedb: the first manually collected, in-the-wild age database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>d818568838433a6d6831adde49a58cef05e0c89f</td></tr><tr><td>alert_airport</td><td>ALERT Airport</td><td>A Systematic Evaluation and Benchmark for Person Re-Identification: Features, Metrics, and Datasets</td><td>A Systematic Evaluation and Benchmark for Person Re-Identification: Features, Metrics, and Datasets</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a systematic evaluation and benchmark for person re-identification: features, metrics, and datasets&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6403117f9c005ae81f1e8e6d1302f4a045e3d99d</td></tr><tr><td>am_fed</td><td>AM-FED</td><td>Affectiva MIT Facial Expression Dataset (AM-FED): Naturalistic and Spontaneous Facial Expressions Collected “In the Wild”</td><td>Affectiva-MIT Facial Expression Dataset (AM-FED): Naturalistic and Spontaneous Facial Expressions Collected "In-the-Wild"</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=affectiva mit facial expression dataset (am-fed): naturalistic and spontaneous facial expressions collected “in the wild”&sort=relevance" target="_blank">[s2]</a></td><td></td><td>47aeb3b82f54b5ae8142b4bdda7b614433e69b9a</td></tr><tr><td>apis</td><td>APiS1.0</td><td>Pedestrian Attribute Classification in Surveillance: Database and Evaluation</td><td>Pedestrian Attribute Classification in Surveillance: Database and Evaluation</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pedestrian attribute classification in surveillance: database and evaluation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>488e475eeb3bb39a145f23ede197cd3620f1d98a</td></tr><tr><td>appa_real</td><td>APPA-REAL</td><td>Apparent and Real Age Estimation in Still Images with Deep Residual Regressors on Appa-Real Database</td><td>Apparent and Real Age Estimation in Still Images with Deep Residual Regressors on Appa-Real Database</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=apparent and real age estimation in still images with deep residual regressors on appa-real database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>633c851ebf625ad7abdda2324e9de093cf623141</td></tr><tr><td>appa_real</td><td>APPA-REAL</td><td>From Apparent to Real Age: Gender, Age, Ethnic, Makeup, and Expression Bias Analysis in Real Age Estimation</td><td>From Apparent to Real Age: Gender, Age, Ethnic, Makeup, and Expression Bias Analysis in Real Age Estimation</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=from apparent to real age: gender, age, ethnic, makeup, and expression bias analysis in real age estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>7b92d1e53cc87f7a4256695de590098a2f30261e</td></tr><tr><td>ar_facedb</td><td>AR Face</td><td>The AR Face Database</td><td>The AR face database</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the ar face database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6d96f946aaabc734af7fe3fc4454cf8547fcd5ed</td></tr><tr><td>awe_ears</td><td>AWE Ears</td><td>Ear Recognition: More Than a Survey</td><td>Ear Recognition: More Than a Survey</td><td><a href="https://arxiv.org/pdf/1611.06203.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=ear recognition: more than a survey&sort=relevance" target="_blank">[s2]</a></td><td></td><td>84fe5b4ac805af63206012d29523a1e033bc827e</td></tr><tr><td>b3d_ac</td><td>B3D(AC)</td><td>A 3-D Audio-Visual Corpus of Affective Communication</td><td>A 3-D Audio-Visual Corpus of Affective Communication</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a 3-d audio-visual corpus of affective communication&sort=relevance" target="_blank">[s2]</a></td><td></td><td>d08cc366a4a0192a01e9a7495af1eb5d9f9e73ae</td></tr><tr><td>bbc_pose</td><td>BBC Pose</td><td>Automatic and Efficient Human Pose Estimation for Sign Language Videos</td><td>Automatic and Efficient Human Pose Estimation for Sign Language Videos</td><td><a href="http://tomas.pfister.fi/files/charles13ijcv.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=automatic and efficient human pose estimation for sign language videos&sort=relevance" target="_blank">[s2]</a></td><td></td><td>213a579af9e4f57f071b884aa872651372b661fd</td></tr><tr><td>bfm</td><td>BFM</td><td>A 3D Face Model for Pose and Illumination Invariant Face Recognition</td><td>A 3D Face Model for Pose and Illumination Invariant Face Recognition</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a 3d face model for pose and illumination invariant face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>639937b3a1b8bded3f7e9a40e85bd3770016cf3c</td></tr><tr><td>bio_id</td><td>BioID Face</td><td>Robust Face Detection Using the Hausdorff Distance</td><td>Robust Face Detection Using the Hausdorff Distance</td><td><a href="https://pdfs.semanticscholar.org/4053/e3423fb70ad9140ca89351df49675197196a.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=robust face detection using the hausdorff distance&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4053e3423fb70ad9140ca89351df49675197196a</td></tr><tr><td>bosphorus</td><td>The Bosphorus</td><td>Bosphorus Database for 3D Face Analysis</td><td>Bosphorus Database for 3D Face Analysis</td><td><a href="https://pdfs.semanticscholar.org/4254/fbba3846008f50671edc9cf70b99d7304543.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=bosphorus database for 3d face analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2acf7e58f0a526b957be2099c10aab693f795973</td></tr><tr><td>bp4d_plus</td><td>BP4D+</td><td>Multimodal Spontaneous Emotion Corpus for Human Behavior Analysis</td><td>Multimodal Spontaneous Emotion Corpus for Human Behavior Analysis</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multimodal spontaneous emotion corpus for human behavior analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>53ae38a6bb2b21b42bac4f0c4c8ed1f9fa02f9d4</td></tr><tr><td>bp4d_spontanous</td><td>BP4D-Spontanous</td><td>A high resolution spontaneous 3D dynamic facial expression database</td><td>A high-resolution spontaneous 3D dynamic facial expression database</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a high resolution spontaneous 3d dynamic facial expression database&sort=relevance" target="_blank">[s2]</a></td><td>SUNY Binghamton</td><td>b91f54e1581fbbf60392364323d00a0cd43e493c</td></tr><tr><td>bpad</td><td>BPAD</td><td>Describing People: A Poselet-Based Approach to Attribute Classification</td><td>Describing people: A poselet-based approach to attribute classification</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=describing people: a poselet-based approach to attribute classification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>7808937b46acad36e43c30ae4e9f3fd57462853d</td></tr><tr><td>brainwash</td><td>Brainwash</td><td>End-to-End People Detection in Crowded Scenes</td><td>End-to-End People Detection in Crowded Scenes</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=end-to-end people detection in crowded scenes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1bd1645a629f1b612960ab9bba276afd4cf7c666</td></tr><tr><td>bu_3dfe</td><td>BU-3DFE</td><td>A 3D Facial Expression Database For Facial Behavior Research</td><td>A 3D facial expression database for facial behavior research</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a 3d facial expression database for facial behavior research&sort=relevance" target="_blank">[s2]</a></td><td></td><td>cc589c499dcf323fe4a143bbef0074c3e31f9b60</td></tr><tr><td>cacd</td><td></td><td>Cross-Age Reference Coding for Age-Invariant Face Recognition and Retrieval</td><td>Cross-Age Reference Coding for Age-Invariant Face Recognition and Retrieval</td><td><a href="https://pdfs.semanticscholar.org/c44c/84540db1c38ace232ef34b03bda1c81ba039.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=cross-age reference coding for age-invariant face recognition and retrieval&sort=relevance" target="_blank">[s2]</a></td><td></td><td>c44c84540db1c38ace232ef34b03bda1c81ba039</td></tr><tr><td>cafe</td><td>#N/A</td><td>The Child Affective Facial Expression (CAFE) Set: Validity and reliability from untrained adults</td><td>The Child Affective Facial Expression (CAFE) set: validity and reliability from untrained adults</td><td><a href="https://pdfs.semanticscholar.org/2038/8099cc415c772926e47bcbbe554e133343d1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the child affective facial expression (cafe) set: validity and reliability from untrained adults&sort=relevance" target="_blank">[s2]</a></td><td></td><td>20388099cc415c772926e47bcbbe554e133343d1</td></tr><tr><td>caltech_10k_web_faces</td><td>Caltech 10K Web Faces</td><td>Pruning Training Sets for Learning of Object Categories</td><td>Pruning training sets for learning of object categories</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pruning training sets for learning of object categories&sort=relevance" target="_blank">[s2]</a></td><td></td><td>636b8ffc09b1b23ff714ac8350bb35635e49fa3c</td></tr><tr><td>caltech_crp</td><td>Caltech CRP</td><td>Fine-grained classification of pedestrians in video: Benchmark and state of the art</td><td>Fine-grained classification of pedestrians in video: Benchmark and state of the art</td><td><a href="https://arxiv.org/pdf/1605.06177.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=fine-grained classification of pedestrians in video: benchmark and state of the art&sort=relevance" target="_blank">[s2]</a></td><td></td><td>060820f110a72cbf02c14a6d1085bd6e1d994f6a</td></tr><tr><td>caltech_pedestrians</td><td>Caltech Pedestrians</td><td>Pedestrian Detection: A Benchmark</td><td>Pedestrian detection: A benchmark</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pedestrian detection: a benchmark&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1dc35905a1deff8bc74688f2d7e2f48fd2273275</td></tr><tr><td>caltech_pedestrians</td><td>Caltech Pedestrians</td><td>Pedestrian Detection: An Evaluation of the State of the Art</td><td>Pedestrian Detection: An Evaluation of the State of the Art</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pedestrian detection: an evaluation of the state of the art&sort=relevance" target="_blank">[s2]</a></td><td>California Institute of Technology</td><td>f72f6a45ee240cc99296a287ff725aaa7e7ebb35</td></tr><tr><td>cas_peal</td><td>CAS-PEAL</td><td>The CAS-PEAL Large-Scale Chinese Face Database and Baseline Evaluations</td><td>The CAS-PEAL Large-Scale Chinese Face Database and Baseline Evaluations</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the cas-peal large-scale chinese face database and baseline evaluations&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2485c98aa44131d1a2f7d1355b1e372f2bb148ad</td></tr><tr><td>casablanca</td><td>Casablanca</td><td>Context-aware {CNNs} for person head detection</td><td>Context-Aware CNNs for Person Head Detection</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=context-aware {cnns} for person head detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td></tr><tr><td>casia_webface</td><td>CASIA Webface</td><td>Learning Face Representation from Scratch</td><td>Learning Face Representation from Scratch</td><td><a href="https://arxiv.org/pdf/1411.7923.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning face representation from scratch&sort=relevance" target="_blank">[s2]</a></td><td></td><td>853bd61bc48a431b9b1c7cab10c603830c488e39</td></tr><tr><td>celeba</td><td>CelebA</td><td>Deep Learning Face Attributes in the Wild</td><td>Deep Learning Face Attributes in the Wild</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=deep learning face attributes in the wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6424b69f3ff4d35249c0bb7ef912fbc2c86f4ff4</td></tr><tr><td>cfd</td><td>CFD</td><td>The Chicago face database: A free stimulus set of faces and norming data</td><td>The Chicago face database: A free stimulus set of faces and norming data.</td><td><a href="https://pdfs.semanticscholar.org/4df3/143922bcdf7db78eb91e6b5359d6ada004d2.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the chicago face database: a free stimulus set of faces and norming data&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4df3143922bcdf7db78eb91e6b5359d6ada004d2</td></tr><tr><td>chalearn</td><td>ChaLearn</td><td>ChaLearn Looking at People: A Review of Events and Resources</td><td>ChaLearn looking at people: A review of events and resources</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=chalearn looking at people: a review of events and resources&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8d5998cd984e7cce307da7d46f155f9db99c6590</td></tr><tr><td>chokepoint</td><td>ChokePoint</td><td>Patch-based Probabilistic Image Quality Assessment for Face Selection and Improved Video-based Face Recognition</td><td>Patch-based probabilistic image quality assessment for face selection and improved video-based face recognition</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=patch-based probabilistic image quality assessment for face selection and improved video-based face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0486214fb58ee9a04edfe7d6a74c6d0f661a7668</td></tr><tr><td>clothing_co_parsing</td><td>CCP</td><td>Clothing Co-Parsing by Joint Image Segmentation and Labeling</td><td>Clothing Co-parsing by Joint Image Segmentation and Labeling</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clothing co-parsing by joint image segmentation and labeling&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2bf8541199728262f78d4dced6fb91479b39b738</td></tr><tr><td>cmdp</td><td>CMDP</td><td>Distance Estimation of an Unknown Person from a Portrait</td><td>Distance Estimation of an Unknown Person from a Portrait</td><td><a href="http://authors.library.caltech.edu/49084/13/FaceDistanceEstimation_RONCHI.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=distance estimation of an unknown person from a portrait&sort=relevance" target="_blank">[s2]</a></td><td></td><td>56ae6d94fc6097ec4ca861f0daa87941d1c10b70</td></tr><tr><td>cmu_pie</td><td>CMU PIE</td><td>The CMU Pose, Illumination, and Expression Database</td><td>The CMU Pose, Illumination, and Expression (PIE) Database</td><td><a href="http://www.comp.nus.edu.sg/~tsim/piedb.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the cmu pose, illumination, and expression database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4d423acc78273b75134e2afd1777ba6d3a398973</td></tr><tr><td>coco</td><td>COCO</td><td>Microsoft COCO: Common Objects in Context</td><td>Microsoft COCO: Common Objects in Context</td><td><a href="https://arxiv.org/pdf/1405.0312.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=microsoft coco: common objects in context&sort=relevance" target="_blank">[s2]</a></td><td></td><td>5e0f8c355a37a5a89351c02f174e7a5ddcb98683</td></tr><tr><td>coco_action</td><td>COCO-a</td><td>Describing Common Human Visual Actions in Images</td><td>Describing Common Human Visual Actions in Images</td><td><a href="https://arxiv.org/pdf/1506.02203.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=describing common human visual actions in images&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4946ba10a4d5a7d0a38372f23e6622bd347ae273</td></tr><tr><td>coco_qa</td><td>COCO QA</td><td>Exploring Models and Data for Image Question Answering</td><td>Exploring Models and Data for Image Question Answering</td><td><a href="https://arxiv.org/pdf/1505.02074.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=exploring models and data for image question answering&sort=relevance" target="_blank">[s2]</a></td><td></td><td>35b0331dfcd2897abd5749b49ff5e2b8ba0f7a62</td></tr><tr><td>cofw</td><td>COFW</td><td>Robust face landmark estimation under occlusion</td><td>Robust Face Landmark Estimation under Occlusion</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=robust face landmark estimation under occlusion&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2724ba85ec4a66de18da33925e537f3902f21249</td></tr><tr><td>cohn_kanade</td><td>CK</td><td>Comprehensive Database for Facial Expression Analysis</td><td>Comprehensive Database for Facial Expression Analysis</td><td><a href="https://pdfs.semanticscholar.org/23fc/83c8cfff14a16df7ca497661264fc54ed746.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=comprehensive database for facial expression analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>23fc83c8cfff14a16df7ca497661264fc54ed746</td></tr><tr><td>cohn_kanade_plus</td><td>CK+</td><td>The Extended Cohn-Kanade Dataset (CK+): A complete dataset for action unit and emotion-specified expression</td><td>The Extended Cohn-Kanade Dataset (CK+): A complete dataset for action unit and emotion-specified expression</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the extended cohn-kanade dataset (ck+): a complete dataset for action unit and emotion-specified expression&sort=relevance" target="_blank">[s2]</a></td><td>University of Pittsburgh</td><td>4d9a02d080636e9666c4d1cc438b9893391ec6c7</td></tr><tr><td>columbia_gaze</td><td>Columbia Gaze</td><td>Gaze Locking: Passive Eye Contact Detection for Human–Object Interaction</td><td>Gaze locking: passive eye contact detection for human-object interaction</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=gaze locking: passive eye contact detection for human–object interaction&sort=relevance" target="_blank">[s2]</a></td><td>Columbia University</td><td>06f02199690961ba52997cde1527e714d2b3bf8f</td></tr><tr><td>complex_activities</td><td>Ongoing Complex Activities</td><td>Recognition of Ongoing Complex Activities by Sequence Prediction over a Hierarchical Label Space</td><td>Recognition of ongoing complex activities by sequence prediction over a hierarchical label space</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=recognition of ongoing complex activities by sequence prediction over a hierarchical label space&sort=relevance" target="_blank">[s2]</a></td><td></td><td>65355cbb581a219bd7461d48b3afd115263ea760</td></tr><tr><td>cuhk_train_station</td><td>CUHK Train Station Dataset</td><td>Understanding collective crowd behaviors: Learning a Mixture model of Dynamic pedestrian-Agents</td><td>Understanding collective crowd behaviors: Learning a Mixture model of Dynamic pedestrian-Agents</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=understanding collective crowd behaviors: learning a mixture model of dynamic pedestrian-agents&sort=relevance" target="_blank">[s2]</a></td><td>Chinese University of Hong Kong</td><td>5a4df9bef1872865f0b619ac3aacc97f49e4a035</td></tr><tr><td>cuhk_campus_03</td><td>CUHK03 Campus</td><td>Human Reidentification with Transferred Metric Learning</td><td>Human Reidentification with Transferred Metric Learning</td><td><a href="http://www.ee.cuhk.edu.hk/~xgwang/papers/liZWaccv12.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=human reidentification with transferred metric learning&sort=relevance" target="_blank">[s2]</a></td><td></td><td>44484d2866f222bbb9b6b0870890f9eea1ffb2d0</td></tr><tr><td>cuhk_campus_03</td><td>CUHK03 Campus</td><td>Locally Aligned Feature Transforms across Views</td><td>Locally Aligned Feature Transforms across Views</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=locally aligned feature transforms across views&sort=relevance" target="_blank">[s2]</a></td><td></td><td>38b55d95189c5e69cf4ab45098a48fba407609b4</td></tr><tr><td>cuhk_campus_03</td><td>CUHK03 Campus</td><td>DeepReID: Deep Filter Pairing Neural Network for Person Re-identification</td><td>DeepReID: Deep Filter Pairing Neural Network for Person Re-identification</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=deepreid: deep filter pairing neural network for person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6bd36e9fd0ef20a3074e1430a6cc601e6d407fc3</td></tr><tr><td>cvc_01_barcelona</td><td>CVC-01</td><td>Adaptive Image Sampling and Windows Classification for On-board Pedestrian Detection</td><td>Adaptive Image Sampling and Windows Classification for On-board Pedestrian Detection</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=adaptive image sampling and windows classification for on-board pedestrian detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>57fe081950f21ca03b5b375ae3e84b399c015861</td></tr><tr><td>ufi</td><td>UFI</td><td>Unconstrained Facial Images: Database for Face Recognition under Real-world Conditions</td><td>Unconstrained Facial Images: Database for Face Recognition Under Real-World Conditions</td><td><a href="http://home.zcu.cz/~pkral/papers/kral_micai15.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=unconstrained facial images: database for face recognition under real-world conditions&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4b4106614c1d553365bad75d7866bff0de6056ed</td></tr><tr><td>d3dfacs</td><td>D3DFACS</td><td>A FACS Valid 3D Dynamic Action Unit database with Applications to 3D Dynamic Morphable Facial Modelling</td><td>A FACS valid 3D dynamic action unit database with applications to 3D dynamic morphable facial modeling</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a facs valid 3d dynamic action unit database with applications to 3d dynamic morphable facial modelling&sort=relevance" target="_blank">[s2]</a></td><td></td><td>070de852bc6eb275d7ca3a9cdde8f6be8795d1a3</td></tr><tr><td>dartmouth_children</td><td>Dartmouth Children</td><td>The Dartmouth Database of Children's Faces: Acquisition and validation of a new face stimulus set</td><td>The Dartmouth Database of Children’s Faces: Acquisition and Validation of a New Face Stimulus Set</td><td><a href="https://pdfs.semanticscholar.org/4e6e/e936eb50dd032f7138702fa39b7c18ee8907.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the dartmouth database of children's faces: acquisition and validation of a new face stimulus set&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4e6ee936eb50dd032f7138702fa39b7c18ee8907</td></tr><tr><td>data_61</td><td>Data61 Pedestrian</td><td>A Multi-Modal Graphical Model for Scene Analysis</td><td>A Multi-modal Graphical Model for Scene Analysis</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a multi-modal graphical model for scene analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>563c940054e4b456661762c1ab858e6f730c3159</td></tr><tr><td>deep_fashion</td><td>DeepFashion</td><td>DeepFashion: Powering Robust Clothes Recognition and Retrieval with Rich Annotations</td><td>DeepFashion: Powering Robust Clothes Recognition and Retrieval with Rich Annotations</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=deepfashion: powering robust clothes recognition and retrieval with rich annotations&sort=relevance" target="_blank">[s2]</a></td><td></td><td>18010284894ed0edcca74e5bf768ee2e15ef7841</td></tr><tr><td>deep_fashion</td><td>DeepFashion</td><td>Fashion Landmark Detection in the Wild</td><td>Fashion Landmark Detection in the Wild</td><td><a href="https://arxiv.org/pdf/1608.03049.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=fashion landmark detection in the wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4fefd1bc8dc4e0ab37ee3324ddfa43ad9d6a04a7</td></tr><tr><td>disfa</td><td>DISFA</td><td>1</td><td>DISFA: A Spontaneous Facial Action Intensity Database</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=1&sort=relevance" target="_blank">[s2]</a></td><td>University of Denver</td><td>5a5f0287484f0d480fed1ce585dbf729586f0edc</td></tr><tr><td>distance_nighttime</td><td>Long Distance Heterogeneous Face</td><td>Nighttime Face Recognition at Long Distance: Cross-distance and Cross-spectral Matching</td><td>Nighttime Face Recognition at Long Distance: Cross-Distance and Cross-Spectral Matching</td><td><a href="https://pdfs.semanticscholar.org/4156/b7e88f2e0ab0a7c095b9bab199ae2b23bd06.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=nighttime face recognition at long distance: cross-distance and cross-spectral matching&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4156b7e88f2e0ab0a7c095b9bab199ae2b23bd06</td></tr><tr><td>duke_mtmc</td><td>Duke MTMC</td><td>Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</td><td>Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</td><td><a href="https://arxiv.org/pdf/1609.01775.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=performance measures and a data set for multi-target, multi-camera tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>27a2fad58dd8727e280f97036e0d2bc55ef5424c</td></tr><tr><td>duke_mtmc</td><td>Duke MTMC</td><td>Improving Person Re-identification by Attribute and Identity Learning</td><td>Improving Person Re-identification by Attribute and Identity Learning</td><td><a href="https://arxiv.org/pdf/1703.07220.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=improving person re-identification by attribute and identity learning&sort=relevance" target="_blank">[s2]</a></td><td></td><td>7f23a4bb0c777dd72cca7665a5f370ac7980217e</td></tr><tr><td>duke_mtmc</td><td>Duke MTMC</td><td>Unlabeled Samples Generated by GAN Improve the Person Re-identification Baseline in Vitro</td><td>Unlabeled Samples Generated by GAN Improve the Person Re-identification Baseline in Vitro</td><td><a href="https://arxiv.org/pdf/1701.07717.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=unlabeled samples generated by gan improve the person re-identification baseline in vitro&sort=relevance" target="_blank">[s2]</a></td><td></td><td>15e1af79939dbf90790b03d8aa02477783fb1d0f</td></tr><tr><td>duke_mtmc</td><td>Duke MTMC</td><td>Tracking Social Groups Within and Across Cameras</td><td>Tracking Social Groups Within and Across Cameras</td><td><a href="https://users.cs.duke.edu/~tomasi/papers/ristani/ristaniTCAS16.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=tracking social groups within and across cameras&sort=relevance" target="_blank">[s2]</a></td><td>Duke University</td><td>9e644b1e33dd9367be167eb9d832174004840400</td></tr><tr><td>duke_mtmc</td><td>Duke MTMC</td><td>Tracking Multiple People Online and in Real Time</td><td>Tracking Multiple People Online and in Real Time</td><td><a href="https://pdfs.semanticscholar.org/64e0/690dd176a93de9d4328f6e31fc4afe1e7536.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=tracking multiple people online and in real time&sort=relevance" target="_blank">[s2]</a></td><td></td><td>64e0690dd176a93de9d4328f6e31fc4afe1e7536</td></tr><tr><td>emotio_net</td><td>EmotioNet Database</td><td>EmotioNet: An Accurate, Real-Time Algorithm for the Automatic Annotation of a Million Facial Expressions in the Wild</td><td>EmotioNet: An Accurate, Real-Time Algorithm for the Automatic Annotation of a Million Facial Expressions in the Wild</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=emotionet: an accurate, real-time algorithm for the automatic annotation of a million facial expressions in the wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>c900e0ad4c95948baaf0acd8449fde26f9b4952a</td></tr><tr><td>erce</td><td>ERCe</td><td>Video Synopsis by Heterogeneous Multi-source Correlation</td><td>Video Synopsis by Heterogeneous Multi-source Correlation</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=video synopsis by heterogeneous multi-source correlation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>b6c293f0420f7e945b5916ae44269fb53e139275</td></tr><tr><td>erce</td><td>ERCe</td><td>Learning from Multiple Sources for Video Summarisation</td><td>Learning from Multiple Sources for Video Summarisation</td><td><a href="https://arxiv.org/pdf/1501.03069.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning from multiple sources for video summarisation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>287ddcb3db5562235d83aee318f318b8d5e43fb1</td></tr><tr><td>eth_andreas_ess</td><td>ETHZ Pedestrian</td><td>Depth and Appearance for Mobile Scene Analysis</td><td>Depth and Appearance for Mobile Scene Analysis</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=depth and appearance for mobile scene analysis&sort=relevance" target="_blank">[s2]</a></td><td>ETH Zurich</td><td>13f06b08f371ba8b5d31c3e288b4deb61335b462</td></tr><tr><td>europersons</td><td>EuroCity Persons</td><td>The EuroCity Persons Dataset: A Novel Benchmark for Object Detection</td><td>The EuroCity Persons Dataset: A Novel Benchmark for Object Detection</td><td><a href="https://arxiv.org/pdf/1805.07193.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the eurocity persons dataset: a novel benchmark for object detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>72a155c987816ae81c858fddbd6beab656d86220</td></tr><tr><td>expw</td><td>ExpW</td><td>From Facial Expression Recognition to Interpersonal Relation Prediction</td><td>From Facial Expression Recognition to Interpersonal Relation Prediction</td><td><a href="https://arxiv.org/pdf/1609.06426.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=from facial expression recognition to interpersonal relation prediction&sort=relevance" target="_blank">[s2]</a></td><td></td><td>22f656d0f8426c84a33a267977f511f127bfd7f3</td></tr><tr><td>face_scrub</td><td>FaceScrub</td><td>A data-driven approach to cleaning large face datasets</td><td>A data-driven approach to cleaning large face datasets</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a data-driven approach to cleaning large face datasets&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0d3bb75852098b25d90f31d2f48fd0cb4944702b</td></tr><tr><td>face_tracer</td><td>FaceTracer</td><td>FaceTracer: A Search Engine for Large Collections of Images with Faces</td><td>FaceTracer: A Search Engine for Large Collections of Images with Faces</td><td><a href="https://pdfs.semanticscholar.org/73a8/1d311eedac8dea3ca24dc15b6990fa4a725e.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=facetracer: a search engine for large collections of images with faces&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4c170a0dcc8de75587dae21ca508dab2f9343974</td></tr><tr><td>face_tracer</td><td>FaceTracer</td><td>Face Swapping: Automatically Replacing Faces in Photographs</td><td>Face swapping: automatically replacing faces in photographs</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=face swapping: automatically replacing faces in photographs&sort=relevance" target="_blank">[s2]</a></td><td></td><td>670637d0303a863c1548d5b19f705860a23e285c</td></tr><tr><td>facebook_100</td><td>Facebook100</td><td>Scaling Up Biologically-Inspired Computer Vision: A Case Study in Unconstrained Face Recognition on Facebook</td><td>Scaling up biologically-inspired computer vision: A case study in unconstrained face recognition on facebook</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=scaling up biologically-inspired computer vision: a case study in unconstrained face recognition on facebook&sort=relevance" target="_blank">[s2]</a></td><td>Harvard University</td><td>9c23859ec7313f2e756a3e85575735e0c52249f4</td></tr><tr><td>faceplace</td><td>Face Place</td><td>Recognizing disguised faces</td><td>Recognizing disguised faces</td><td><a href="https://pdfs.semanticscholar.org/d936/7ceb0be378c3a9ddf7cb741c678c1a3c574c.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=recognizing disguised faces&sort=relevance" target="_blank">[s2]</a></td><td></td><td>25474c21613607f6bb7687a281d5f9d4ffa1f9f3</td></tr><tr><td>families_in_the_wild</td><td>FIW</td><td>Visual Kinship Recognition of Families in the Wild</td><td>Visual Kinship Recognition of Families in the Wild</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=visual kinship recognition of families in the wild&sort=relevance" target="_blank">[s2]</a></td><td>University of Massachusetts Dartmouth</td><td>dd65f71dac86e36eecbd3ed225d016c3336b4a13</td></tr><tr><td>fddb</td><td>FDDB</td><td>FDDB: A Benchmark for Face Detection in Unconstrained Settings</td><td>FDDB: A benchmark for face detection in unconstrained settings</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=fddb: a benchmark for face detection in unconstrained settings&sort=relevance" target="_blank">[s2]</a></td><td></td><td>75da1df4ed319926c544eefe17ec8d720feef8c0</td></tr><tr><td>fei</td><td>FEI</td><td>Captura e Alinhamento de Imagens: Um Banco de Faces Brasileiro</td><td>A new ranking method for principal components analysis and its application to face image analysis</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=captura e alinhamento de imagens: um banco de faces brasileiro&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8b56e33f33e582f3e473dba573a16b598ed9bcdc</td></tr><tr><td>feret</td><td>FERET</td><td>The FERET Verification Testing Protocol for Face Recognition Algorithms</td><td>The FERET Verification Testing Protocol for Face Recognition Algorithms</td><td><a href="https://pdfs.semanticscholar.org/8d2a/1c768fce6f71584dd993fb97e7b6419aaf60.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the feret verification testing protocol for face recognition algorithms&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0c4a139bb87c6743c7905b29a3cfec27a5130652</td></tr><tr><td>feret</td><td>FERET</td><td>The FERET Evaluation Methodology for Face-Recognition Algorithms</td><td>The FERET Evaluation Methodology for Face-Recognition Algorithms</td><td><a href="https://pdfs.semanticscholar.org/5099/7a5605c1f61e09e9a96789ed7495be6625aa.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the feret evaluation methodology for face-recognition algorithms&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0f0fcf041559703998abf310e56f8a2f90ee6f21</td></tr><tr><td>feret</td><td>FERET</td><td>FERET ( Face Recognition Technology ) Recognition Algorithm Development and Test Results</td><td>FERET ( Face Recognition Technology ) Recognition Algorithm Development and Test Results</td><td><a href="https://pdfs.semanticscholar.org/31de/9b3dd6106ce6eec9a35991b2b9083395fd0b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=feret ( face recognition technology ) recognition algorithm development and test results&sort=relevance" target="_blank">[s2]</a></td><td></td><td>31de9b3dd6106ce6eec9a35991b2b9083395fd0b</td></tr><tr><td>feret</td><td>FERET</td><td>The FERET database and evaluation procedure for face-recognition algorithms</td><td>The FERET database and evaluation procedure for face-recognition algorithms</td><td><a href="http://biometrics.nist.gov/cs_links/face/frvt/feret/FERET_Database_evaluation_procedure.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the feret database and evaluation procedure for face-recognition algorithms&sort=relevance" target="_blank">[s2]</a></td><td></td><td>dc8b25e35a3acb812beb499844734081722319b4</td></tr><tr><td>ferplus</td><td>FER+</td><td>Training Deep Networks for Facial Expression Recognition with Crowd-Sourced Label Distribution</td><td>Training deep networks for facial expression recognition with crowd-sourced label distribution</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=training deep networks for facial expression recognition with crowd-sourced label distribution&sort=relevance" target="_blank">[s2]</a></td><td></td><td>298cbc3dfbbb3a20af4eed97906650a4ea1c29e0</td></tr><tr><td>fia</td><td>CMU FiA</td><td>The CMU Face In Action (FIA) Database</td><td>The CMU Face In Action (FIA) Database</td><td><a href="https://pdfs.semanticscholar.org/4766/2d1a368daf70ba70ef2d59eb6209f98b675d.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the cmu face in action (fia) database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>47662d1a368daf70ba70ef2d59eb6209f98b675d</td></tr><tr><td>fiw_300</td><td>300-W</td><td>A semi-automatic methodology for facial landmark annotation</td><td>A Semi-automatic Methodology for Facial Landmark Annotation</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a semi-automatic methodology for facial landmark annotation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>013909077ad843eb6df7a3e8e290cfd5575999d2</td></tr><tr><td>fiw_300</td><td>300-W</td><td>300 Faces in-the-Wild Challenge: The first facial landmark localization Challenge</td><td>300 Faces in-the-Wild Challenge: The First Facial Landmark Localization Challenge</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=300 faces in-the-wild challenge: the first facial landmark localization challenge&sort=relevance" target="_blank">[s2]</a></td><td></td><td>044d9a8c61383312cdafbcc44b9d00d650b21c70</td></tr><tr><td>fiw_300</td><td>300-W</td><td>300 faces In-the-wild challenge: Database and results</td><td>300 Faces In-The-Wild Challenge: database and results</td><td><a href="http://ibug.doc.ic.ac.uk/media/uploads/documents/1-s2.0-s0262885616000147-main.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=300 faces in-the-wild challenge: database and results&sort=relevance" target="_blank">[s2]</a></td><td></td><td>e4754afaa15b1b53e70743880484b8d0736990ff</td></tr><tr><td>frav3d</td><td>FRAV3D</td><td>MULTIMODAL 2D, 2.5D & 3D FACE VERIFICATION</td><td>Multimodal 2D, 2.5D & 3D Face Verification</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multimodal 2d, 2.5d & 3d face verification&sort=relevance" target="_blank">[s2]</a></td><td>Universidad Rey Juan Carlos, Spain</td><td>2b926b3586399d028b46315d7d9fb9d879e4f79c</td></tr><tr><td>frgc</td><td>FRGC</td><td>Overview of the Face Recognition Grand Challenge</td><td>Overview of the face recognition grand challenge</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=overview of the face recognition grand challenge&sort=relevance" target="_blank">[s2]</a></td><td>NIST</td><td>18ae7c9a4bbc832b8b14bc4122070d7939f5e00e</td></tr><tr><td>gallagher</td><td>Gallagher</td><td>Clothing Cosegmentation for Recognizing People</td><td>Clothing cosegmentation for recognizing people</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clothing cosegmentation for recognizing people&sort=relevance" target="_blank">[s2]</a></td><td>Carnegie Mellon University</td><td>22ad2c8c0f4d6aa4328b38d894b814ec22579761</td></tr><tr><td>geofaces</td><td>GeoFaces</td><td>FACE2GPS: Estimating geographic location from facial features</td><td>Exploring the geo-dependence of human face appearance</td><td><a href="http://cs.uky.edu/~jacobs/papers/islam2014faces.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=face2gps: estimating geographic location from facial features&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2cd7821fcf5fae53a185624f7eeda007434ae037</td></tr><tr><td>geofaces</td><td>GeoFaces</td><td>Large-scale geo-facial image analysis</td><td>Large-scale geo-facial image analysis</td><td><a href="https://pdfs.semanticscholar.org/3ede/3ed28329bf48fbd06438a69c4f855bef003f.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=large-scale geo-facial image analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4af89578ac237278be310f7660a408b03f12d603</td></tr><tr><td>geofaces</td><td>GeoFaces</td><td>Exploring the Geo-Dependence of Human Face Appearance</td><td>Exploring the geo-dependence of human face appearance</td><td><a href="http://cs.uky.edu/~jacobs/papers/islam2014faces.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=exploring the geo-dependence of human face appearance&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2cd7821fcf5fae53a185624f7eeda007434ae037</td></tr><tr><td>geofaces</td><td>GeoFaces</td><td>GeoFaceExplorer: Exploring the Geo-Dependence of Facial Attributes</td><td>GeoFaceExplorer: exploring the geo-dependence of facial attributes</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=geofaceexplorer: exploring the geo-dependence of facial attributes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>17b46e2dad927836c689d6787ddb3387c6159ece</td></tr><tr><td>georgia_tech_face_database</td><td>Georgia Tech Face</td><td>Maximum likelihood training of the embedded HMM for face detection and recognition</td><td>Maximum Likelihood Training of the Embedded HMM for Face Detection and Recognition</td><td><a href="http://www.researchgate.net/profile/Monson_Hayes/publication/221124512_Maximum_Likelihood_Training_of_the_Embedded_HMM_for_Face_Detection_and_Recognition/links/0deec53509be9d6f55000000.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=maximum likelihood training of the embedded hmm for face detection and recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3dc3f0b64ef80f573e3a5f96e456e52ee980b877</td></tr><tr><td>gfw</td><td>Grouping Face in the Wild</td><td>Merge or Not? Learning to Group Faces via Imitation Learning</td><td>Merge or Not? Learning to Group Faces via Imitation Learning</td><td><a href="https://arxiv.org/pdf/1707.03986.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=merge or not? learning to group faces via imitation learning&sort=relevance" target="_blank">[s2]</a></td><td></td><td>e58dd160a76349d46f881bd6ddbc2921f08d1050</td></tr><tr><td>graz</td><td>Graz Pedestrian</td><td>Weak Hypotheses and Boosting for Generic Object Detection and Recognition</td><td>Weak Hypotheses and Boosting for Generic Object Detection and Recognition</td><td><a href="https://pdfs.semanticscholar.org/0c91/808994a250d7be332400a534a9291ca3b60e.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=weak hypotheses and boosting for generic object detection and recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0c91808994a250d7be332400a534a9291ca3b60e</td></tr><tr><td>graz</td><td>Graz Pedestrian</td><td>Object Recognition Using Segmentation for Feature Detection</td><td>Object recognition using segmentation for feature detection</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=object recognition using segmentation for feature detection&sort=relevance" target="_blank">[s2]</a></td><td>Inst. of Comput. Sci., Univ. of Leoben, Austria</td><td>12ad3b5bbbf407f8e54ea692c07633d1a867c566</td></tr><tr><td>graz</td><td>Graz Pedestrian</td><td>Generic Object Recognition with Boosting</td><td>Generic object recognition with boosting</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=generic object recognition with boosting&sort=relevance" target="_blank">[s2]</a></td><td>TU Graz</td><td>2eed184680edcdec8a3b605ad1a3ba8e8f7cc2e9</td></tr><tr><td>h3d</td><td>H3D</td><td>Poselets: Body Part Detectors Trained Using 3D Human Pose Annotations</td><td>Poselets: Body part detectors trained using 3D human pose annotations</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=poselets: body part detectors trained using 3d human pose annotations&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2830fb5282de23d7784b4b4bc37065d27839a412</td></tr><tr><td>hda_plus</td><td>HDA+</td><td>The HDA+ data set for research on fully automated re-identification systems</td><td>The HDA+ Data Set for Research on Fully Automated Re-identification Systems</td><td><a href="https://pdfs.semanticscholar.org/8f02/ec0be21461fbcedf51d864f944cfc42c875f.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the hda+ data set for research on fully automated re-identification systems&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8f02ec0be21461fbcedf51d864f944cfc42c875f</td></tr><tr><td>hda_plus</td><td>HDA+</td><td>A Multi-camera video data set for research on High-Definition surveillance</td><td>HDA dataset-DRAFT 1 A Multi-camera video data set for research on High-Definition surveillance</td><td><a href="https://pdfs.semanticscholar.org/bd88/bb2e4f351352d88ee7375af834360e223498.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a multi-camera video data set for research on high-definition surveillance&sort=relevance" target="_blank">[s2]</a></td><td></td><td>bd88bb2e4f351352d88ee7375af834360e223498</td></tr><tr><td>helen</td><td>Helen</td><td>Interactive Facial Feature Localization</td><td>Interactive Facial Feature Localization</td><td><a href="https://pdfs.semanticscholar.org/95f1/2d27c3b4914e0668a268360948bce92f7db3.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=interactive facial feature localization&sort=relevance" target="_blank">[s2]</a></td><td></td><td>95f12d27c3b4914e0668a268360948bce92f7db3</td></tr><tr><td>hi4d_adsip</td><td>Hi4D-ADSIP</td><td>Hi4D-ADSIP 3-D dynamic facial articulation database</td><td>Hi4D-ADSIP 3-D dynamic facial articulation database</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=hi4d-adsip 3-d dynamic facial articulation database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a8d0b149c2eadaa02204d3e4356fbc8eccf3b315</td></tr><tr><td>hipsterwars</td><td>Hipsterwars</td><td>Hipster Wars: Discovering Elements of Fashion Styles</td><td>Hipster Wars: Discovering Elements of Fashion Styles</td><td><a href="https://pdfs.semanticscholar.org/04c2/cda00e5536f4b1508cbd80041e9552880e67.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=hipster wars: discovering elements of fashion styles&sort=relevance" target="_blank">[s2]</a></td><td></td><td>04c2cda00e5536f4b1508cbd80041e9552880e67</td></tr><tr><td>hollywood_headset</td><td>HollywoodHeads</td><td>Context-aware CNNs for person head detection</td><td>Context-Aware CNNs for Person Head Detection</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=context-aware cnns for person head detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td></tr><tr><td>hrt_transgender</td><td>HRT Transgender</td><td>Is the Eye Region More Reliable Than the Face? A Preliminary Study of Face-based Recognition on a Transgender Dataset</td><td>Is the eye region more reliable than the face? A preliminary study of face-based recognition on a transgender dataset</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=is the eye region more reliable than the face? a preliminary study of face-based recognition on a transgender dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>137aa2f891d474fce1e7a1d1e9b3aefe21e22b34</td></tr><tr><td>hrt_transgender</td><td>HRT Transgender</td><td>Investigating the Periocular-Based Face Recognition Across Gender Transformation</td><td>Investigating the Periocular-Based Face Recognition Across Gender Transformation</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=investigating the periocular-based face recognition across gender transformation&sort=relevance" target="_blank">[s2]</a></td><td>University of North Carolina at Wilmington</td><td>2f43b614607163abf41dfe5d17ef6749a1b61304</td></tr><tr><td>ibm_dif</td><td>IBM Diversity in Faces</td><td>Diversity in Faces</td><td>Facial Coding Scheme Reference 1 Craniofacial Distances</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=diversity in faces&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0ab7cff2ccda7269b73ff6efd9d37e1318f7db25</td></tr><tr><td>ifad</td><td>IFAD</td><td>Indian Face Age Database: A Database for Face Recognition with Age Variation</td><td>Indian Face Age Database: A Database for Face Recognition with Age Variation</td><td><a href="https://pdfs.semanticscholar.org/025e/4cf3fd3fdeced91e9373b56ee14af7ca432c.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=indian face age database: a database for face recognition with age variation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>55c40cbcf49a0225e72d911d762c27bb1c2d14aa</td></tr><tr><td>ifdb</td><td>IFDB</td><td>Iranian Face Database and Evaluation with a New Detection Algorithm</td><td>Iranian Face Database and Evaluation with a New Detection Algorithm</td><td><a href="https://pdfs.semanticscholar.org/066d/71fcd997033dce4ca58df924397dfe0b5fd1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=iranian face database and evaluation with a new detection algorithm&sort=relevance" target="_blank">[s2]</a></td><td></td><td>066d71fcd997033dce4ca58df924397dfe0b5fd1</td></tr><tr><td>ifdb</td><td>IFDB</td><td>Iranian Face Database with age, pose and expression</td><td>Iranian Face Database with age, pose and expression</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=iranian face database with age, pose and expression&sort=relevance" target="_blank">[s2]</a></td><td>Islamic Azad University</td><td>b71d1aa90dcbe3638888725314c0d56640c1fef1</td></tr><tr><td>iit_dehli_ear</td><td>IIT Dehli Ear</td><td>Automated human identification using ear imaging</td><td>Automated Human Identification Using Ear Imaging</td><td><a href="https://pdfs.semanticscholar.org/faf4/0ce28857aedf183e193486f5b4b0a8c478a2.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=automated human identification using ear imaging&sort=relevance" target="_blank">[s2]</a></td><td></td><td>faf40ce28857aedf183e193486f5b4b0a8c478a2</td></tr><tr><td>ijb_b</td><td>IJB-B</td><td>IARPA Janus Benchmark-B Face Dataset</td><td>IARPA Janus Benchmark-B Face Dataset</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=iarpa janus benchmark-b face dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0cb2dd5f178e3a297a0c33068961018659d0f443</td></tr><tr><td>ijb_a</td><td>IJB-A</td><td>Pushing the Frontiers of Unconstrained Face Detection and Recognition: IARPA Janus Benchmark A</td><td>Pushing the frontiers of unconstrained face detection and recognition: IARPA Janus Benchmark A</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pushing the frontiers of unconstrained face detection and recognition: iarpa janus benchmark a&sort=relevance" target="_blank">[s2]</a></td><td></td><td>140c95e53c619eac594d70f6369f518adfea12ef</td></tr><tr><td>ijb_c</td><td>IJB-C</td><td>IARPA Janus Benchmark C</td><td>IARPA Janus Benchmark - C: Face Dataset and Protocol</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=iarpa janus benchmark c&sort=relevance" target="_blank">[s2]</a></td><td></td><td>57178b36c21fd7f4529ac6748614bb3374714e91</td></tr><tr><td>ilids_mcts</td><td>i-LIDS Multiple-Camera</td><td>Imagery Library for Intelligent Detection Systems: The i-LIDS User Guide</td><td>Imagery Library for Intelligent Detection Systems (i-LIDS); A Standard for Testing Video Based Detection Systems</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=imagery library for intelligent detection systems: the i-lids user guide&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0297448f3ed948e136bb06ceff10eccb34e5bb77</td></tr><tr><td>ilids_mcts_vid</td><td>iLIDS-VID</td><td>Person Re-Identi cation by Video Ranking</td><td>Person Re-identification by Video Ranking</td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person re-identi cation by video ranking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>98bb029afe2a1239c3fdab517323066f0957b81b</td></tr><tr><td>images_of_groups</td><td>Images of Groups</td><td>Understanding Groups of Images of People</td><td>Understanding images of groups of people</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=understanding groups of images of people&sort=relevance" target="_blank">[s2]</a></td><td>Carnegie Mellon University</td><td>21d9d0deed16f0ad62a4865e9acf0686f4f15492</td></tr><tr><td>imdb_face</td><td>IMDb Face</td><td>The Devil of Face Recognition is in the Noise</td><td>The Devil of Face Recognition is in the Noise</td><td><a href="https://arxiv.org/pdf/1807.11649.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the devil of face recognition is in the noise&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9e31e77f9543ab42474ba4e9330676e18c242e72</td></tr><tr><td>imdb_wiki</td><td>IMDB</td><td>Deep expectation of real and apparent age from a single image without facial landmarks</td><td>Deep Expectation of Real and Apparent Age from a Single Image Without Facial Landmarks</td><td><a href="http://www.vision.ee.ethz.ch/en/publications/papers/articles/eth_biwi_01299.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=deep expectation of real and apparent age from a single image without facial landmarks&sort=relevance" target="_blank">[s2]</a></td><td></td><td>10195a163ab6348eef37213a46f60a3d87f289c5</td></tr><tr><td>imdb_wiki</td><td>IMDB</td><td>DEX: Deep EXpectation of apparent age from a single image</td><td>DEX: Deep EXpectation of Apparent Age from a Single Image</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=dex: deep expectation of apparent age from a single image&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8355d095d3534ef511a9af68a3b2893339e3f96b</td></tr><tr><td>imfdb</td><td>IMFDB</td><td>Indian Movie Face Database: A Benchmark for Face Recognition Under Wide Variations</td><td>Indian Movie Face Database: A benchmark for face recognition under wide variations</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=indian movie face database: a benchmark for face recognition under wide variations&sort=relevance" target="_blank">[s2]</a></td><td>BVBCET, Hubli, India</td><td>ca3e88d87e1344d076c964ea89d91a75c417f5ee</td></tr><tr><td>immediacy</td><td>Immediacy</td><td>Multi-task Recurrent Neural Network for Immediacy Prediction</td><td>Multi-task Recurrent Neural Network for Immediacy Prediction</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-task recurrent neural network for immediacy prediction&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1e3df3ca8feab0b36fd293fe689f93bb2aaac591</td></tr><tr><td>imsitu</td><td>imSitu</td><td>Situation Recognition: Visual Semantic Role Labeling for Image Understanding</td><td>Situation Recognition: Visual Semantic Role Labeling for Image Understanding</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=situation recognition: visual semantic role labeling for image understanding&sort=relevance" target="_blank">[s2]</a></td><td></td><td>51eba481dac6b229a7490f650dff7b17ce05df73</td></tr><tr><td>inria_person</td><td>INRIA Pedestrian</td><td>Histograms of Oriented Gradients for Human Detection</td><td>Histograms of oriented gradients for human detection</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=histograms of oriented gradients for human detection&sort=relevance" target="_blank">[s2]</a></td><td>INRIA Rhone-Alps, Montbonnot, France</td><td>10d6b12fa07c7c8d6c8c3f42c7f1c061c131d4c5</td></tr><tr><td>jaffe</td><td>JAFFE</td><td>Coding Facial Expressions with Gabor Wavelets</td><td>Coding Facial Expressions with Gabor Wavelets</td><td><a href="http://physics.lbl.gov/patrecog/images/Facerecog_gabor.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=coding facial expressions with gabor wavelets&sort=relevance" target="_blank">[s2]</a></td><td></td><td>45c31cde87258414f33412b3b12fc5bec7cb3ba9</td></tr><tr><td>jiku_mobile</td><td>Jiku Mobile Video Dataset</td><td>The Jiku Mobile Video Dataset</td><td>The jiku mobile video dataset</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the jiku mobile video dataset&sort=relevance" target="_blank">[s2]</a></td><td>National University of Singapore</td><td>d178cde92ab3dc0dd2ebee5a76a33d556c39448b</td></tr><tr><td>jpl_pose</td><td>JPL-Interaction dataset</td><td>First-Person Activity Recognition: What Are They Doing to Me?</td><td>First-Person Activity Recognition: What Are They Doing to Me?</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=first-person activity recognition: what are they doing to me?&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1aad2da473888cb7ebc1bfaa15bfa0f1502ce005</td></tr><tr><td>kin_face</td><td>UB KinFace</td><td>Understanding Kin Relationships in a Photo</td><td>Understanding Kin Relationships in a Photo</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=understanding kin relationships in a photo&sort=relevance" target="_blank">[s2]</a></td><td></td><td>08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7</td></tr><tr><td>kin_face</td><td>UB KinFace</td><td>Genealogical Face Recognition based on UB KinFace Database</td><td>Genealogical face recognition based on UB KinFace database</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=genealogical face recognition based on ub kinface database&sort=relevance" target="_blank">[s2]</a></td><td>SUNY Buffalo</td><td>2eb84aaba316b095d4bb51da1a3e4365bbf9ab1d</td></tr><tr><td>kin_face</td><td>UB KinFace</td><td>Kinship Verification through Transfer Learning</td><td>Kinship Verification through Transfer Learning</td><td><a href="https://pdfs.semanticscholar.org/4793/f11fbca4a7dba898b9fff68f70d868e2497c.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=kinship verification through transfer learning&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4793f11fbca4a7dba898b9fff68f70d868e2497c</td></tr><tr><td>kinectface</td><td>KinectFaceDB</td><td>KinectFaceDB: A Kinect Database for Face Recognition</td><td>KinectFaceDB: A Kinect Database for Face Recognition</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=kinectfacedb: a kinect database for face recognition&sort=relevance" target="_blank">[s2]</a></td><td>University of North Carolina at Chapel Hill</td><td>0b440695c822a8e35184fb2f60dcdaa8a6de84ae</td></tr><tr><td>kitti</td><td>KITTI</td><td>Vision meets Robotics: The KITTI Dataset</td><td>Vision meets robotics: The KITTI dataset</td><td><a href="https://pdfs.semanticscholar.org/026e/3363b7f76b51cc711886597a44d5f1fd1de2.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=vision meets robotics: the kitti dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>026e3363b7f76b51cc711886597a44d5f1fd1de2</td></tr><tr><td>lag</td><td>LAG</td><td>Large Age-Gap Face Verification by Feature Injection in Deep Networks</td><td>Large age-gap face verification by feature injection in deep networks</td><td><a href="https://arxiv.org/pdf/1602.06149.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=large age-gap face verification by feature injection in deep networks&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0d2dd4fc016cb6a517d8fb43a7cc3ff62964832e</td></tr><tr><td>laofiw</td><td>LAOFIW</td><td>Turning a Blind Eye: Explicit Removal of Biases and Variation from Deep Neural Network Embeddings</td><td>Turning a Blind Eye: Explicit Removal of Biases and Variation from Deep Neural Network Embeddings</td><td><a href="https://arxiv.org/pdf/1809.02169.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=turning a blind eye: explicit removal of biases and variation from deep neural network embeddings&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4eab317b5ac436a949849ed286baa3de2a541eef</td></tr><tr><td>large_scale_person_search</td><td>Large Scale Person Search</td><td>End-to-End Deep Learning for Person Search</td><td>End-to-End Deep Learning for Person Search</td><td><a href="https://pdfs.semanticscholar.org/2161/f6b7ee3c0acc81603b01dc0df689683577b9.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=end-to-end deep learning for person search&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2161f6b7ee3c0acc81603b01dc0df689683577b9</td></tr><tr><td>leeds_sports_pose</td><td>Leeds Sports Pose</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td><a href="http://www.bmva.org/bmvc/2010/conference/paper12/abstract12.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clustered pose and nonlinear appearance models for human pose estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td></tr><tr><td>leeds_sports_pose_extended</td><td>Leeds Sports Pose Extended</td><td>Learning Effective Human Pose Estimation from Inaccurate Annotation</td><td>Learning effective human pose estimation from inaccurate annotation</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning effective human pose estimation from inaccurate annotation&sort=relevance" target="_blank">[s2]</a></td><td>University of Leeds</td><td>4e4746094bf60ee83e40d8597a6191e463b57f76</td></tr><tr><td>lfpw</td><td>LFWP</td><td>Localizing Parts of Faces Using a Consensus of Exemplars</td><td>Localizing Parts of Faces Using a Consensus of Exemplars</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=localizing parts of faces using a consensus of exemplars&sort=relevance" target="_blank">[s2]</a></td><td></td><td>140438a77a771a8fb656b39a78ff488066eb6b50</td></tr><tr><td>lfw</td><td>LFW</td><td>Labeled Faces in the Wild: Updates and New Reporting Procedures</td><td>Labeled Faces in the Wild : Updates and New Reporting Procedures</td><td><a href="https://pdfs.semanticscholar.org/2d34/82dcff69c7417c7b933f22de606a0e8e42d4.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=labeled faces in the wild: updates and new reporting procedures&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2d3482dcff69c7417c7b933f22de606a0e8e42d4</td></tr><tr><td>lfw</td><td>LFW</td><td>Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments</td><td>Labeled Faces in the Wild: A Database forStudying Face Recognition in Unconstrained Environments</td><td><a href="https://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=labeled faces in the wild: a database for studying face recognition in unconstrained environments&sort=relevance" target="_blank">[s2]</a></td><td></td><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td></tr><tr><td>lfw</td><td>LFW</td><td>Labeled Faces in the Wild: A Survey</td><td>Labeled Faces in the Wild: A Survey</td><td><a href="https://pdfs.semanticscholar.org/7de6/e81d775e9cd7becbfd1bd685f4e2a5eebb22.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=labeled faces in the wild: a survey&sort=relevance" target="_blank">[s2]</a></td><td></td><td>7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22</td></tr><tr><td>lfw</td><td>LFW</td><td>Effective Unconstrained Face Recognition by Combining Multiple Descriptors and Learned Background Statistics</td><td>Effective Unconstrained Face Recognition by Combining Multiple Descriptors and Learned Background Statistics</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=effective unconstrained face recognition by combining multiple descriptors and learned background statistics&sort=relevance" target="_blank">[s2]</a></td><td></td><td>133f01aec1534604d184d56de866a4bd531dac87</td></tr><tr><td>m2vts</td><td>m2vts</td><td>The M2VTS Multimodal Face Database (Release 1.00)</td><td>The M2VTS Multimodal Face Database (Release 1.00)</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the m2vts multimodal face database (release 1.00)&sort=relevance" target="_blank">[s2]</a></td><td>Laboratoire de Télécommunications et Télédétection, UCL, Louvain-La-Neuve, Belgium</td><td>8be57cdad86fdf8c8290df4ca3149592f3c46dd3</td></tr><tr><td>m2vtsdb_extended</td><td>xm2vtsdb</td><td>XM2VTSDB: The Extended M2VTS Database</td><td>XM2VTSDB : The extended M2VTS database</td><td><a href="https://pdfs.semanticscholar.org/b626/28ac06bbac998a3ab825324a41a11bc3a988.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=xm2vtsdb: the extended m2vts database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>b62628ac06bbac998a3ab825324a41a11bc3a988</td></tr><tr><td>mafa</td><td>MAsked FAces</td><td>Detecting Masked Faces in the Wild with LLE-CNNs</td><td>Detecting Masked Faces in the Wild with LLE-CNNs</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=detecting masked faces in the wild with lle-cnns&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9cc8cf0c7d7fa7607659921b6ff657e17e135ecc</td></tr><tr><td>mafl</td><td>MAFL</td><td>Facial Landmark Detection by Deep Multi-task Learning</td><td>Facial Landmark Detection by Deep Multi-task Learning</td><td><a href="https://pdfs.semanticscholar.org/fcd7/1c18192928a2e0b264edd4d919ab2f8f652a.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=facial landmark detection by deep multi-task learning&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8a3c5507237957d013a0fe0f082cab7f757af6ee</td></tr><tr><td>mafl</td><td>MAFL</td><td>Learning Deep Representation for Face Alignment with Auxiliary Attributes</td><td>Learning Deep Representation for Face Alignment with Auxiliary Attributes</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning deep representation for face alignment with auxiliary attributes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td></tr><tr><td>malf</td><td>MALF</td><td>Fine-grained Evaluation on Face Detection in the Wild.</td><td>Fine-grained evaluation on face detection in the wild</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=fine-grained evaluation on face detection in the wild.&sort=relevance" target="_blank">[s2]</a></td><td></td><td>45e616093a92e5f1e61a7c6037d5f637aa8964af</td></tr><tr><td>mapillary</td><td>Mapillary</td><td>The Mapillary Vistas Dataset for Semantic Understanding of Street Scenes</td><td>The Mapillary Vistas Dataset for Semantic Understanding of Street Scenes</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the mapillary vistas dataset for semantic understanding of street scenes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>79828e6e9f137a583082b8b5a9dfce0c301989b8</td></tr><tr><td>market_1501</td><td>Market 1501</td><td>Improving Person Re-identification by Attribute and Identity Learning</td><td>Improving Person Re-identification by Attribute and Identity Learning</td><td><a href="https://arxiv.org/pdf/1703.07220.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=improving person re-identification by attribute and identity learning&sort=relevance" target="_blank">[s2]</a></td><td></td><td>7f23a4bb0c777dd72cca7665a5f370ac7980217e</td></tr><tr><td>market_1501</td><td>Market 1501</td><td>Scalable Person Re-identification: A Benchmark</td><td>Scalable Person Re-identification: A Benchmark</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=scalable person re-identification: a benchmark&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4308bd8c28e37e2ed9a3fcfe74d5436cce34b410</td></tr><tr><td>market_1501</td><td>Market 1501</td><td>Orientation Driven Bag of Appearances for Person Re-identification</td><td>Orientation Driven Bag of Appearances for Person Re-identification</td><td><a href="https://arxiv.org/pdf/1605.02464.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=orientation driven bag of appearances for person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a7fe834a0af614ce6b50dc093132b031dd9a856b</td></tr><tr><td>mars</td><td>MARS</td><td>MARS: A Video Benchmark for Large-Scale Person Re-identification</td><td>MARS: A Video Benchmark for Large-Scale Person Re-Identification</td><td><a href="https://pdfs.semanticscholar.org/c038/7e788a52f10bf35d4d50659cfa515d89fbec.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=mars: a video benchmark for large-scale person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>c0387e788a52f10bf35d4d50659cfa515d89fbec</td></tr><tr><td>mcgill</td><td>McGill Real World</td><td>Hierarchical Temporal Graphical Model for Head Pose Estimation and Subsequent Attribute Classification in Real-World Videos</td><td>Hierarchical temporal graphical model for head pose estimation and subsequent attribute classification in real-world videos</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=hierarchical temporal graphical model for head pose estimation and subsequent attribute classification in real-world videos&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2d45cfd838016a6e39f6b766ffe85acd649440c7</td></tr><tr><td>mcgill</td><td>McGill Real World</td><td>Robust Semi-automatic Head Pose Labeling for Real-World Face Video Sequences</td><td>Robust semi-automatic head pose labeling for real-world face video sequences</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=robust semi-automatic head pose labeling for real-world face video sequences&sort=relevance" target="_blank">[s2]</a></td><td>McGill University</td><td>c570d1247e337f91e555c3be0e8c8a5aba539d9f</td></tr><tr><td>megaage</td><td>MegaAge</td><td>Quantifying Facial Age by Posterior of Age Comparisons</td><td>Quantifying Facial Age by Posterior of Age Comparisons</td><td><a href="https://arxiv.org/pdf/1708.09687.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=quantifying facial age by posterior of age comparisons&sort=relevance" target="_blank">[s2]</a></td><td></td><td>c72a2ea819df9b0e8cd267eebcc6528b8741e03d</td></tr><tr><td>megaface</td><td>MegaFace</td><td>Level Playing Field for Million Scale Face Recognition</td><td>Level Playing Field for Million Scale Face Recognition</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=level playing field for million scale face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>28d4e027c7e90b51b7d8908fce68128d1964668a</td></tr><tr><td>megaface</td><td>MegaFace</td><td>The MegaFace Benchmark: 1 Million Faces for Recognition at Scale</td><td>The MegaFace Benchmark: 1 Million Faces for Recognition at Scale</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the megaface benchmark: 1 million faces for recognition at scale&sort=relevance" target="_blank">[s2]</a></td><td></td><td>96e0cfcd81cdeb8282e29ef9ec9962b125f379b0</td></tr><tr><td>mifs</td><td>MIFS</td><td>Spoofing Faces Using Makeup: An Investigative Study</td><td>Spoofing faces using makeup: An investigative study</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=spoofing faces using makeup: an investigative study&sort=relevance" target="_blank">[s2]</a></td><td>INRIA Méditerranée</td><td>23e824d1dfc33f3780dd18076284f07bd99f1c43</td></tr><tr><td>mit_cbcl</td><td>MIT CBCL</td><td>Component-based Face Recognition with 3D Morphable Models</td><td>Component-Based Face Recognition with 3D Morphable Models</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=component-based face recognition with 3d morphable models&sort=relevance" target="_blank">[s2]</a></td><td></td><td>079a0a3bf5200994e1f972b1b9197bf2f90e87d4</td></tr><tr><td>miw</td><td>MIW</td><td>Automatic Facial Makeup Detection with Application in Face Recognition</td><td>Automatic facial makeup detection with application in face recognition</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=automatic facial makeup detection with application in face recognition&sort=relevance" target="_blank">[s2]</a></td><td>West Virginia University</td><td>fcc6fe6007c322641796cb8792718641856a22a7</td></tr><tr><td>mmi_facial_expression</td><td>MMI Facial Expression Dataset</td><td>WEB-BASED DATABASE FOR FACIAL EXPRESSION ANALYSIS</td><td>Web-based database for facial expression analysis</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=web-based database for facial expression analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2a75f34663a60ab1b04a0049ed1d14335129e908</td></tr><tr><td>moments_in_time</td><td>Moments in Time</td><td>Moments in Time Dataset: one million videos for event understanding</td><td>Moments in Time Dataset: one million videos for event understanding</td><td><a href="https://arxiv.org/pdf/1801.03150.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=moments in time dataset: one million videos for event understanding&sort=relevance" target="_blank">[s2]</a></td><td></td><td>41976ebc8ab76d9a6861487c97cc7fcbe3b6015f</td></tr><tr><td>morph</td><td>MORPH Commercial</td><td>MORPH: A Longitudinal Image Database of Normal Adult Age-Progression</td><td>MORPH: a longitudinal image database of normal adult age-progression</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=morph: a longitudinal image database of normal adult age-progression&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9055b155cbabdce3b98e16e5ac9c0edf00f9552f</td></tr><tr><td>morph_nc</td><td>MORPH Non-Commercial</td><td>MORPH: A Longitudinal Image Database of Normal Adult Age-Progression</td><td>MORPH: a longitudinal image database of normal adult age-progression</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=morph: a longitudinal image database of normal adult age-progression&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9055b155cbabdce3b98e16e5ac9c0edf00f9552f</td></tr><tr><td>mot</td><td>MOT</td><td>Evaluating Multiple Object Tracking Performance: The CLEAR MOT Metrics</td><td>Evaluating Multiple Object Tracking Performance: The CLEAR MOT Metrics</td><td><a href="https://cvhci.anthropomatik.kit.edu/images/stories/msmmi/papers/eurasip2008.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=evaluating multiple object tracking performance: the clear mot metrics&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2258e01865367018ed6f4262c880df85b94959f8</td></tr><tr><td>mot</td><td>MOT</td><td>Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</td><td>Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</td><td><a href="https://arxiv.org/pdf/1609.01775.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=performance measures and a data set for multi-target, multi-camera tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>27a2fad58dd8727e280f97036e0d2bc55ef5424c</td></tr><tr><td>mot</td><td>MOT</td><td>Learning to associate: HybridBoosted multi-target tracker for crowded scene</td><td>Learning to associate: HybridBoosted multi-target tracker for crowded scene</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning to associate: hybridboosted multi-target tracker for crowded scene&sort=relevance" target="_blank">[s2]</a></td><td>University of Southern California</td><td>5981e6479c3fd4e31644db35d236bfb84ae46514</td></tr><tr><td>mpi_large</td><td>Large MPI Facial Expression</td><td>The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</td><td>The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</td><td><a href="https://pdfs.semanticscholar.org/ea05/0801199f98a1c7c1df6769f23f658299a3ae.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the mpi facial expression database — a validated database of emotional and conversational facial expressions&sort=relevance" target="_blank">[s2]</a></td><td></td><td>ea050801199f98a1c7c1df6769f23f658299a3ae</td></tr><tr><td>mpi_small</td><td>Small MPI Facial Expression</td><td>The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</td><td>The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</td><td><a href="https://pdfs.semanticscholar.org/ea05/0801199f98a1c7c1df6769f23f658299a3ae.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the mpi facial expression database — a validated database of emotional and conversational facial expressions&sort=relevance" target="_blank">[s2]</a></td><td></td><td>ea050801199f98a1c7c1df6769f23f658299a3ae</td></tr><tr><td>mpii_gaze</td><td>MPIIGaze</td><td>Appearance-based Gaze Estimation in the Wild</td><td>Appearance-based gaze estimation in the wild</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=appearance-based gaze estimation in the wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0df0d1adea39a5bef318b74faa37de7f3e00b452</td></tr><tr><td>mpii_human_pose</td><td>MPII Human Pose</td><td>2D Human Pose Estimation: New Benchmark and State of the Art Analysis</td><td>2D Human Pose Estimation: New Benchmark and State of the Art Analysis</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=2d human pose estimation: new benchmark and state of the art analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3325860c0c82a93b2eac654f5324dd6a776f609e</td></tr><tr><td>mr2</td><td>MR2</td><td>The MR2: A multi-racial mega-resolution database of facial stimuli</td><td>The MR2: A multi-racial, mega-resolution database of facial stimuli.</td><td><a href="http://www.mpmlab.org/The%20MR2%20face%20database.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the mr2: a multi-racial mega-resolution database of facial stimuli&sort=relevance" target="_blank">[s2]</a></td><td></td><td>578d4ad74818086bb64f182f72e2c8bd31e3d426</td></tr><tr><td>mrp_drone</td><td>MRP Drone</td><td>Investigating Open-World Person Re-identification Using a Drone</td><td>Investigating Open-World Person Re-identification Using a Drone</td><td><a href="https://pdfs.semanticscholar.org/ad01/687649d95cd5b56d7399a9603c4b8e2217d7.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=investigating open-world person re-identification using a drone&sort=relevance" target="_blank">[s2]</a></td><td></td><td>ad01687649d95cd5b56d7399a9603c4b8e2217d7</td></tr><tr><td>msceleb</td><td>MsCeleb</td><td>MS-Celeb-1M: A Dataset and Benchmark for Large-Scale Face Recognition</td><td>MS-Celeb-1M: A Dataset and Benchmark for Large-Scale Face Recognition</td><td><a href="https://arxiv.org/pdf/1607.08221.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=ms-celeb-1m: a dataset and benchmark for large-scale face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>291265db88023e92bb8c8e6390438e5da148e8f5</td></tr><tr><td>msmt_17</td><td>MSMT17</td><td>Person Transfer GAN to Bridge Domain Gap for Person Re-Identification</td><td>Person Transfer GAN to Bridge Domain Gap for Person Re-identification</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person transfer gan to bridge domain gap for person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a0cc5f73a37723a6dd465924143f1cb4976d0169</td></tr><tr><td>mtfl</td><td>MTFL</td><td>Facial Landmark Detection by Deep Multi-task Learning</td><td>Facial Landmark Detection by Deep Multi-task Learning</td><td><a href="https://pdfs.semanticscholar.org/fcd7/1c18192928a2e0b264edd4d919ab2f8f652a.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=facial landmark detection by deep multi-task learning&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8a3c5507237957d013a0fe0f082cab7f757af6ee</td></tr><tr><td>mtfl</td><td>MTFL</td><td>Learning Deep Representation for Face Alignment with Auxiliary Attributes</td><td>Learning Deep Representation for Face Alignment with Auxiliary Attributes</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning deep representation for face alignment with auxiliary attributes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td></tr><tr><td>mug_faces</td><td>MUG Faces</td><td>The MUG Facial Expression Database</td><td>The MUG facial expression database</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the mug facial expression database&sort=relevance" target="_blank">[s2]</a></td><td>Aristotle University of Thessaloniki</td><td>f1af714b92372c8e606485a3982eab2f16772ad8</td></tr><tr><td>multi_pie</td><td>MULTIPIE</td><td>Multi-PIE</td><td>The CMU Pose, Illumination, and Expression (PIE) Database</td><td><a href="http://www.comp.nus.edu.sg/~tsim/piedb.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-pie&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4d423acc78273b75134e2afd1777ba6d3a398973</td></tr><tr><td>names_and_faces</td><td>News Dataset</td><td>Names and Faces</td><td>Names and faces in the news</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=names and faces&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2fda164863a06a92d3a910b96eef927269aeb730</td></tr><tr><td>nd_2006</td><td>ND-2006</td><td>Using a Multi-Instance Enrollment Representation to Improve 3D Face Recognition</td><td>Using a Multi-Instance Enrollment Representation to Improve 3D Face Recognition</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=using a multi-instance enrollment representation to improve 3d face recognition&sort=relevance" target="_blank">[s2]</a></td><td>University of Notre Dame</td><td>fd8168f1c50de85bac58a8d328df0a50248b16ae</td></tr><tr><td>nova_emotions</td><td>Novaemötions Dataset</td><td>Competitive affective gamming: Winning with a smile</td><td>Competitive affective gaming: winning with a smile</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=competitive affective gamming: winning with a smile&sort=relevance" target="_blank">[s2]</a></td><td>Universidade NOVA de Lisboa, Caparica, Portugal</td><td>7f4040b482d16354d5938c1d1b926b544652bf5b</td></tr><tr><td>nova_emotions</td><td>Novaemötions Dataset</td><td>Crowdsourcing facial expressions for affective-interaction</td><td>Crowdsourcing facial expressions for affective-interaction</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=crowdsourcing facial expressions for affective-interaction&sort=relevance" target="_blank">[s2]</a></td><td></td><td>c06b13d0ec3f5c43e2782cd22542588e233733c3</td></tr><tr><td>orl</td><td>ORL</td><td>Parameterisation of a Stochastic Model for Human Face Identification</td><td>Parameterisation of a stochastic model for human face identification</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=parameterisation of a stochastic model for human face identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>55206f0b5f57ce17358999145506cd01e570358c</td></tr><tr><td>pa_100k</td><td>PA-100K</td><td>HydraPlus-Net: Attentive Deep Features for Pedestrian Analysis</td><td>HydraPlus-Net: Attentive Deep Features for Pedestrian Analysis</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=hydraplus-net: attentive deep features for pedestrian analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>f41c7bb02fc97d5fb9cadd7a49c3e558a1c58a44</td></tr><tr><td>penn_fudan</td><td>Penn Fudan</td><td>Object Detection Combining Recognition and Segmentation</td><td>Object Detection Combining Recognition and Segmentation</td><td><a href="https://pdfs.semanticscholar.org/3394/168ff0719b03ff65bcea35336a76b21fe5e4.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=object detection combining recognition and segmentation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3394168ff0719b03ff65bcea35336a76b21fe5e4</td></tr><tr><td>peta</td><td>PETA</td><td>Pedestrian Attribute Recognition At Far Distance</td><td>Pedestrian Attribute Recognition At Far Distance</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pedestrian attribute recognition at far distance&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2a4bbee0b4cf52d5aadbbc662164f7efba89566c</td></tr><tr><td>pets</td><td>PETS 2017</td><td>PETS 2017: Dataset and Challenge</td><td>PETS 2017: Dataset and Challenge</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pets 2017: dataset and challenge&sort=relevance" target="_blank">[s2]</a></td><td></td><td>22909dd19a0ec3b6065334cb5be5392cb24d839d</td></tr><tr><td>pilot_parliament</td><td>PPB</td><td>Gender Shades: Intersectional Accuracy Disparities in Commercial Gender Classification</td><td>Gender Shades: Intersectional Accuracy Disparities in Commercial Gender Classification</td><td><a href="http://proceedings.mlr.press/v81/buolamwini18a/buolamwini18a-supp.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=gender shades: intersectional accuracy disparities in commercial gender classification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>18858cc936947fc96b5c06bbe3c6c2faa5614540</td></tr><tr><td>pipa</td><td>PIPA</td><td>Beyond Frontal Faces: Improving Person Recognition Using Multiple Cues</td><td>Beyond frontal faces: Improving Person Recognition using multiple cues</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=beyond frontal faces: improving person recognition using multiple cues&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0a85bdff552615643dd74646ac881862a7c7072d</td></tr><tr><td>pku_reid</td><td>PKU-Reid</td><td>Swiss-System Based Cascade Ranking for Gait-based Person Re-identification</td><td>Swiss-System Based Cascade Ranking for Gait-Based Person Re-Identification</td><td><a href="https://pdfs.semanticscholar.org/f6c8/d5e35d7e4d60a0104f233ac1a3ab757da53f.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=swiss-system based cascade ranking for gait-based person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>f6c8d5e35d7e4d60a0104f233ac1a3ab757da53f</td></tr><tr><td>pku_reid</td><td>PKU-Reid</td><td>Orientation driven bag of appearances for person re-identification</td><td>Orientation Driven Bag of Appearances for Person Re-identification</td><td><a href="https://arxiv.org/pdf/1605.02464.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=orientation driven bag of appearances for person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a7fe834a0af614ce6b50dc093132b031dd9a856b</td></tr><tr><td>precarious</td><td>Precarious</td><td>Expecting the Unexpected: Training Detectors for Unusual Pedestrians With Adversarial Imposters</td><td>Expecting the Unexpected: Training Detectors for Unusual Pedestrians with Adversarial Imposters</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=expecting the unexpected: training detectors for unusual pedestrians with adversarial imposters&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9e5378e7b336c89735d3bb15cf67eff96f86d39a</td></tr><tr><td>prid</td><td>PRID</td><td>Person Re-Identification by Descriptive and Discriminative Classification</td><td>Person Re-identification by Descriptive and Discriminative Classification</td><td><a href="https://pdfs.semanticscholar.org/4c1b/f0592be3e535faf256c95e27982db9b3d3d3.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person re-identification by descriptive and discriminative classification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>16c7c31a7553d99f1837fc6e88e77b5ccbb346b8</td></tr><tr><td>prw</td><td>PRW</td><td>Person Re-identification in the Wild</td><td>Person Re-identification in the Wild</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person re-identification in the wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0b84f07af44f964817675ad961def8a51406dd2e</td></tr><tr><td>psu</td><td>PSU</td><td>Vision-based Analysis of Small Groups in Pedestrian Crowds</td><td>Vision-Based Analysis of Small Groups in Pedestrian Crowds</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=vision-based analysis of small groups in pedestrian crowds&sort=relevance" target="_blank">[s2]</a></td><td></td><td>066000d44d6691d27202896691f08b27117918b9</td></tr><tr><td>pubfig</td><td>PubFig</td><td>Attribute and Simile Classifiers for Face Verification</td><td>Attribute and simile classifiers for face verification</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=attribute and simile classifiers for face verification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>759a3b3821d9f0e08e0b0a62c8b693230afc3f8d</td></tr><tr><td>pubfig_83</td><td>pubfig83</td><td>Scaling Up Biologically-Inspired Computer Vision: A Case Study in Unconstrained Face Recognition on Facebook</td><td>Scaling up biologically-inspired computer vision: A case study in unconstrained face recognition on facebook</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=scaling up biologically-inspired computer vision: a case study in unconstrained face recognition on facebook&sort=relevance" target="_blank">[s2]</a></td><td>Harvard University</td><td>9c23859ec7313f2e756a3e85575735e0c52249f4</td></tr><tr><td>put_face</td><td>Put Face</td><td>The PUT face database</td><td>The put face database</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the put face database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>ae0aee03d946efffdc7af2362a42d3750e7dd48a</td></tr><tr><td>qmul_grid</td><td>GRID</td><td>Time-delayed correlation analysis for multi-camera activity understanding</td><td>Time-Delayed Correlation Analysis for Multi-Camera Activity Understanding</td><td><a href="http://www.eecs.qmul.ac.uk/~ccloy/files/ijcv_2010.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=time-delayed correlation analysis for multi-camera activity understanding&sort=relevance" target="_blank">[s2]</a></td><td>Queen Mary University of London</td><td>2edb87494278ad11641b6cf7a3f8996de12b8e14</td></tr><tr><td>qmul_grid</td><td>GRID</td><td>Multi-Camera Activity Correlation Analysis</td><td>Multi-camera activity correlation analysis</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-camera activity correlation analysis&sort=relevance" target="_blank">[s2]</a></td><td>Queen Mary University of London</td><td>3b5b6d19d4733ab606c39c69a889f9e67967f151</td></tr><tr><td>qmul_surv_face</td><td>QMUL-SurvFace</td><td>Surveillance Face Recognition Challenge</td><td>Surveillance Face Recognition Challenge</td><td><a href="https://arxiv.org/pdf/1804.09691.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=surveillance face recognition challenge&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2306b2a8fba28539306052764a77a0d0f5d1236a</td></tr><tr><td>rafd</td><td>RaFD</td><td>Presentation and validation of the Radboud Faces Database</td><td>Presentation and validation of the Radboud Faces Database</td><td><a href="https://pdfs.semanticscholar.org/3765/df816dc5a061bc261e190acc8bdd9d47bec0.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=presentation and validation of the radboud faces database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3765df816dc5a061bc261e190acc8bdd9d47bec0</td></tr><tr><td>raid</td><td>RAiD</td><td>Consistent Re-identification in a Camera Network</td><td>Consistent Re-identification in a Camera Network</td><td><a href="https://pdfs.semanticscholar.org/c27f/099e6e7e3f7f9979cbe9e0a5175fc5848ea0.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=consistent re-identification in a camera network&sort=relevance" target="_blank">[s2]</a></td><td></td><td>09d78009687bec46e70efcf39d4612822e61cb8c</td></tr><tr><td>rap_pedestrian</td><td>RAP</td><td>A Richly Annotated Dataset for Pedestrian Attribute Recognition</td><td>A Richly Annotated Dataset for Pedestrian Attribute Recognition</td><td><a href="https://arxiv.org/pdf/1603.07054.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a richly annotated dataset for pedestrian attribute recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>221c18238b829c12b911706947ab38fd017acef7</td></tr><tr><td>reseed</td><td>ReSEED</td><td>ReSEED: Social Event dEtection Dataset</td><td>ReSEED: social event dEtection dataset</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=reseed: social event detection dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>54983972aafc8e149259d913524581357b0f91c3</td></tr><tr><td>saivt</td><td>SAIVT SoftBio</td><td>A Database for Person Re-Identification in Multi-Camera Surveillance Networks</td><td>A Database for Person Re-Identification in Multi-Camera Surveillance Networks</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a database for person re-identification in multi-camera surveillance networks&sort=relevance" target="_blank">[s2]</a></td><td></td><td>22646e00a7ba34d1b5fbe3b1efcd91a1e1be3c2b</td></tr><tr><td>sarc3d</td><td>Sarc3D</td><td>SARC3D: a new 3D body model for People Tracking and Re-identification</td><td>SARC3D: A New 3D Body Model for People Tracking and Re-identification</td><td><a href="https://pdfs.semanticscholar.org/e27e/f52c641c2b5100a1b34fd0b819e84a31b4df.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=sarc3d: a new 3d body model for people tracking and re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>e27ef52c641c2b5100a1b34fd0b819e84a31b4df</td></tr><tr><td>scface</td><td>SCface</td><td>SCface – surveillance cameras face database</td><td>SCface – surveillance cameras face database</td><td><a href="http://scface.org/SCface%20-%20Surveillance%20Cameras%20Face%20Database.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=scface – surveillance cameras face database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>29a705a5fa76641e0d8963f1fdd67ee4c0d92d3d</td></tr><tr><td>scut_fbp</td><td>SCUT-FBP</td><td>SCUT-FBP: A Benchmark Dataset for Facial Beauty Perception</td><td>SCUT-FBP: A Benchmark Dataset for Facial Beauty Perception</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=scut-fbp: a benchmark dataset for facial beauty perception&sort=relevance" target="_blank">[s2]</a></td><td></td><td>bd26dabab576adb6af30484183c9c9c8379bf2e0</td></tr><tr><td>scut_head</td><td>SCUT HEAD</td><td>Detecting Heads using Feature Refine Net and Cascaded Multi-scale Architecture</td><td>Detecting Heads using Feature Refine Net and Cascaded Multi-scale Architecture</td><td><a href="https://arxiv.org/pdf/1803.09256.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=detecting heads using feature refine net and cascaded multi-scale architecture&sort=relevance" target="_blank">[s2]</a></td><td></td><td>d3200d49a19a4a4e4e9745ee39649b65d80c834b</td></tr><tr><td>sdu_vid</td><td>SDU-VID</td><td>A Spatio-Temporal Appearance Representation for Video-Based Pedestrian Re-Identification</td><td>A Spatio-Temporal Appearance Representation for Video-Based Pedestrian Re-Identification</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a spatio-temporal appearance representation for video-based pedestrian re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3b4ec8af470948a72a6ed37a9fd226719a874ebc</td></tr><tr><td>sdu_vid</td><td>SDU-VID</td><td>Local descriptors encoded by Fisher vectors for person re-identification</td><td>Local Descriptors Encoded by Fisher Vectors for Person Re-identification</td><td><a href="https://pdfs.semanticscholar.org/a105/f1ef67b4b02da38eadce8ffb4e13aa301a93.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=local descriptors encoded by fisher vectors for person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>46a01565e6afe7c074affb752e7069ee3bf2e4ef</td></tr><tr><td>sdu_vid</td><td>SDU-VID</td><td>Person reidentification by video ranking</td><td>Person Re-identification by Video Ranking</td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person reidentification by video ranking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>98bb029afe2a1239c3fdab517323066f0957b81b</td></tr><tr><td>social_relation</td><td>Social Relation</td><td>Learning Social Relation Traits from Face Images</td><td>Learning Social Relation Traits from Face Images</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning social relation traits from face images&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2a171f8d14b6b8735001a11c217af9587d095848</td></tr><tr><td>soton</td><td>SOTON HiD</td><td>On a Large Sequence-Based Human Gait Database</td><td>On a Large Sequence-Based Human Gait Database</td><td><a href="https://pdfs.semanticscholar.org/4f93/cd09785c6e77bf4bc5a788e079df524c8d21.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=on a large sequence-based human gait database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4f93cd09785c6e77bf4bc5a788e079df524c8d21</td></tr><tr><td>sports_videos_in_the_wild</td><td>SVW</td><td>Sports Videos in the Wild (SVW): A Video Dataset for Sports Analysis</td><td>Sports Videos in the Wild (SVW): A video dataset for sports analysis</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=sports videos in the wild (svw): a video dataset for sports analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1a40092b493c6b8840257ab7f96051d1a4dbfeb2</td></tr><tr><td>stair_actions</td><td>STAIR Action</td><td>STAIR Actions: A Video Dataset of Everyday Home Actions</td><td>STAIR Actions: A Video Dataset of Everyday Home Actions</td><td><a href="https://arxiv.org/pdf/1804.04326.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=stair actions: a video dataset of everyday home actions&sort=relevance" target="_blank">[s2]</a></td><td></td><td>d3f5a1848b0028d8ab51d0b0673732cad2e3c8c9</td></tr><tr><td>stanford_drone</td><td>Stanford Drone</td><td>Learning Social Etiquette: Human Trajectory Prediction In Crowded Scenes</td><td>Social LSTM: Human Trajectory Prediction in Crowded Spaces</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning social etiquette: human trajectory prediction in crowded scenes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>570f37ed63142312e6ccdf00ecc376341ec72b9f</td></tr><tr><td>stickmen_buffy</td><td>Buffy Stickmen</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td><a href="http://www.bmva.org/bmvc/2010/conference/paper12/abstract12.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clustered pose and nonlinear appearance models for human pose estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td></tr><tr><td>stickmen_buffy</td><td>Buffy Stickmen</td><td>Learning to Parse Images of Articulated Objects</td><td>Learning to parse images of articulated bodies</td><td><a href="http://machinelearning.wustl.edu/mlpapers/paper_files/NIPS2006_899.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning to parse images of articulated objects&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td></tr><tr><td>stickmen_family</td><td>We Are Family Stickmen</td><td>We Are Family: Joint Pose Estimation of Multiple Persons</td><td>We Are Family: Joint Pose Estimation of Multiple Persons</td><td><a href="http://eprints.pascal-network.org/archive/00007964/01/eichner10eccv.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=we are family: joint pose estimation of multiple persons&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0dc11a37cadda92886c56a6fb5191ded62099c28</td></tr><tr><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td>Learning to parse images of articulated bodies</td><td><a href="http://machinelearning.wustl.edu/mlpapers/paper_files/NIPS2006_899.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clustered pose and nonlinear appearance models for human pose estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td></tr><tr><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td>Learning to Parse Images of Articulated Objects</td><td>Learning to parse images of articulated bodies</td><td><a href="http://machinelearning.wustl.edu/mlpapers/paper_files/NIPS2006_899.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning to parse images of articulated objects&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td></tr><tr><td>sun_attributes</td><td>SUN</td><td>The SUN Attribute Database: Beyond Categories for Deeper Scene Understanding</td><td>The SUN Attribute Database: Beyond Categories for Deeper Scene Understanding</td><td><a href="http://www.cc.gatech.edu/~hays/papers/attribute_ijcv.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the sun attribute database: beyond categories for deeper scene understanding&sort=relevance" target="_blank">[s2]</a></td><td></td><td>66e6f08873325d37e0ec20a4769ce881e04e964e</td></tr><tr><td>sun_attributes</td><td>SUN</td><td>SUN Attribute Database: Discovering, Annotating, and Recognizing Scene Attributes</td><td>SUN attribute database: Discovering, annotating, and recognizing scene attributes</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=sun attribute database: discovering, annotating, and recognizing scene attributes&sort=relevance" target="_blank">[s2]</a></td><td>Brown University</td><td>833fa04463d90aab4a9fe2870d480f0b40df446e</td></tr><tr><td>svs</td><td>SVS</td><td>Pedestrian Attribute Classification in Surveillance: Database and Evaluation</td><td>Pedestrian Attribute Classification in Surveillance: Database and Evaluation</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pedestrian attribute classification in surveillance: database and evaluation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>488e475eeb3bb39a145f23ede197cd3620f1d98a</td></tr><tr><td>texas_3dfrd</td><td>Texas 3DFRD</td><td>Anthropometric 3D Face Recognition</td><td>Anthropometric 3D Face Recognition</td><td><a href="http://live.ece.utexas.edu/publications/2010/sg_ijcv_june10.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=anthropometric 3d face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2ce2560cf59db59ce313bbeb004e8ce55c5ce928</td></tr><tr><td>texas_3dfrd</td><td>Texas 3DFRD</td><td>Texas 3D Face Recognition Database</td><td>Texas 3D Face Recognition Database</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=texas 3d face recognition database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4d58f886f5150b2d5e48fd1b5a49e09799bf895d</td></tr><tr><td>tiny_faces</td><td>TinyFace</td><td>Low-Resolution Face Recognition</td><td>Low-Resolution Face Recognition</td><td><a href="https://arxiv.org/pdf/1811.08965.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=low-resolution face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8990cdce3f917dad622e43e033db686b354d057c</td></tr><tr><td>tiny_images</td><td>Tiny Images</td><td>80 million tiny images: a large dataset for non-parametric object and scene recognition</td><td>80 Million Tiny Images: A Large Data Set for Nonparametric Object and Scene Recognition</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=80 million tiny images: a large dataset for non-parametric object and scene recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>31b58ced31f22eab10bd3ee2d9174e7c14c27c01</td></tr><tr><td>tisi</td><td>Times Square Intersection</td><td>Video Synopsis by Heterogeneous Multi-source Correlation</td><td>Video Synopsis by Heterogeneous Multi-source Correlation</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=video synopsis by heterogeneous multi-source correlation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>b6c293f0420f7e945b5916ae44269fb53e139275</td></tr><tr><td>tisi</td><td>Times Square Intersection</td><td>Learning from Multiple Sources for Video Summarisation</td><td>Learning from Multiple Sources for Video Summarisation</td><td><a href="https://arxiv.org/pdf/1501.03069.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning from multiple sources for video summarisation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>287ddcb3db5562235d83aee318f318b8d5e43fb1</td></tr><tr><td>oxford_town_centre</td><td>TownCentre</td><td>Stable Multi-Target Tracking in Real-Time Surveillance Video</td><td>Stable multi-target tracking in real-time surveillance video</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=stable multi-target tracking in real-time surveillance video&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9361b784e73e9238d5cefbea5ac40d35d1e3103f</td></tr><tr><td>tud_brussels</td><td>TUD-Brussels</td><td>Multi-Cue Onboard Pedestrian Detection</td><td>Multi-cue onboard pedestrian detection</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-cue onboard pedestrian detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6ad5a38df8dd4cdddd74f31996ce096d41219f72</td></tr><tr><td>tud_campus</td><td>TUD-Campus</td><td>People-Tracking-by-Detection and People-Detection-by-Tracking</td><td>People-tracking-by-detection and people-detection-by-tracking</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=people-tracking-by-detection and people-detection-by-tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td></tr><tr><td>tud_crossing</td><td>TUD-Crossing</td><td>People-Tracking-by-Detection and People-Detection-by-Tracking</td><td>People-tracking-by-detection and people-detection-by-tracking</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=people-tracking-by-detection and people-detection-by-tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td></tr><tr><td>tud_motionpairs</td><td>TUD-Motionparis</td><td>Multi-Cue Onboard Pedestrian Detection</td><td>Multi-cue onboard pedestrian detection</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-cue onboard pedestrian detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6ad5a38df8dd4cdddd74f31996ce096d41219f72</td></tr><tr><td>tud_multiview</td><td>TUD-Multiview</td><td>Monocular 3D Pose Estimation and Tracking by Detection</td><td>Monocular 3D pose estimation and tracking by detection</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=monocular 3d pose estimation and tracking by detection&sort=relevance" target="_blank">[s2]</a></td><td>TU Darmstadt</td><td>436f798d1a4e54e5947c1e7d7375c31b2bdb4064</td></tr><tr><td>tud_pedestrian</td><td>TUD-Pedestrian</td><td>People-Tracking-by-Detection and People-Detection-by-Tracking</td><td>People-tracking-by-detection and people-detection-by-tracking</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=people-tracking-by-detection and people-detection-by-tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td></tr><tr><td>tud_stadtmitte</td><td>TUD-Stadtmitte</td><td>Monocular 3D Pose Estimation and Tracking by Detection</td><td>Monocular 3D pose estimation and tracking by detection</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=monocular 3d pose estimation and tracking by detection&sort=relevance" target="_blank">[s2]</a></td><td>TU Darmstadt</td><td>436f798d1a4e54e5947c1e7d7375c31b2bdb4064</td></tr><tr><td>tvhi</td><td>TVHI</td><td>High Five: Recognising human interactions in TV shows</td><td>High Five: Recognising human interactions in TV shows</td><td><a href="https://pdfs.semanticscholar.org/3cd4/0bfa1ff193a96bde0207e5140a399476466c.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=high five: recognising human interactions in tv shows&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3cd40bfa1ff193a96bde0207e5140a399476466c</td></tr><tr><td>uccs</td><td>UCCS</td><td>Large scale unconstrained open set face database</td><td>Large scale unconstrained open set face database</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=large scale unconstrained open set face database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>07fcbae86f7a3ad3ea1cf95178459ee9eaf77cb1</td></tr><tr><td>uccs</td><td>UCCS</td><td>Unconstrained Face Detection and Open-Set Face Recognition Challenge</td><td>Unconstrained Face Detection and Open-Set Face Recognition Challenge</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=unconstrained face detection and open-set face recognition challenge&sort=relevance" target="_blank">[s2]</a></td><td></td><td>d4f1eb008eb80595bcfdac368e23ae9754e1e745</td></tr><tr><td>ucf_101</td><td>UCF101</td><td>UCF101: A Dataset of 101 Human Actions Classes From Videos in The Wild</td><td>UCF101: A Dataset of 101 Human Actions Classes From Videos in The Wild</td><td><a href="https://arxiv.org/pdf/1212.0402.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=ucf101: a dataset of 101 human actions classes from videos in the wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>b5f2846a506fc417e7da43f6a7679146d99c5e96</td></tr><tr><td>ucf_crowd</td><td>UCF-CC-50</td><td>Multi-Source Multi-Scale Counting in Extremely Dense Crowd Images</td><td>Multi-source Multi-scale Counting in Extremely Dense Crowd Images</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-source multi-scale counting in extremely dense crowd images&sort=relevance" target="_blank">[s2]</a></td><td></td><td>32c801cb7fbeb742edfd94cccfca4934baec71da</td></tr><tr><td>ucf_selfie</td><td>UCF Selfie</td><td>How to Take a Good Selfie?</td><td>How to Take a Good Selfie?</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=how to take a good selfie?&sort=relevance" target="_blank">[s2]</a></td><td></td><td>041d3eedf5e45ce5c5229f0181c5c576ed1fafd6</td></tr><tr><td>ufdd</td><td>UFDD</td><td>Pushing the Limits of Unconstrained Face Detection: a Challenge Dataset and Baseline Results</td><td>Pushing the Limits of Unconstrained Face Detection: a Challenge Dataset and Baseline Results</td><td><a href="https://arxiv.org/pdf/1804.10275.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pushing the limits of unconstrained face detection: a challenge dataset and baseline results&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3531332efe19be21e7401ba1f04570a142617236</td></tr><tr><td>umb</td><td>UMB</td><td>UMB-DB: A Database of Partially Occluded 3D Faces</td><td>UMB-DB: A database of partially occluded 3D faces</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=umb-db: a database of partially occluded 3d faces&sort=relevance" target="_blank">[s2]</a></td><td></td><td>16e8b0a1e8451d5f697b94c0c2b32a00abee1d52</td></tr><tr><td>umd_faces</td><td>UMD</td><td>UMDFaces: An Annotated Face Dataset for Training Deep Networks</td><td>UMDFaces: An annotated face dataset for training deep networks</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=umdfaces: an annotated face dataset for training deep networks&sort=relevance" target="_blank">[s2]</a></td><td></td><td>31b05f65405534a696a847dd19c621b7b8588263</td></tr><tr><td>umd_faces</td><td>UMD</td><td>The Do's and Don'ts for CNN-based Face Verification</td><td>The Do’s and Don’ts for CNN-Based Face Verification</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the do's and don'ts for cnn-based face verification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>71b7fc715e2f1bb24c0030af8d7e7b6e7cd128a6</td></tr><tr><td>unbc_shoulder_pain</td><td>UNBC-McMaster Pain</td><td>PAINFUL DATA: The UNBC-McMaster Shoulder Pain Expression Archive Database</td><td>Painful data: The UNBC-McMaster shoulder pain expression archive database</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=painful data: the unbc-mcmaster shoulder pain expression archive database&sort=relevance" target="_blank">[s2]</a></td><td>Carnegie Mellon University</td><td>56ffa7d906b08d02d6d5a12c7377a57e24ef3391</td></tr><tr><td>urban_tribes</td><td>Urban Tribes</td><td>From Bikers to Surfers: Visual Recognition of Urban Tribes</td><td>From Bikers to Surfers: Visual Recognition of Urban Tribes</td><td><a href="http://vision.cornell.edu/se3/wp-content/uploads/2014/09/utribes_bmvc13_final.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=from bikers to surfers: visual recognition of urban tribes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>774cbb45968607a027ae4729077734db000a1ec5</td></tr><tr><td>used</td><td>USED Social Event Dataset</td><td>USED: A Large-scale Social Event Detection Dataset</td><td>USED: a large-scale social event detection dataset</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=used: a large-scale social event detection dataset&sort=relevance" target="_blank">[s2]</a></td><td>University of Trento</td><td>8627f019882b024aef92e4eb9355c499c733e5b7</td></tr><tr><td>v47</td><td>V47</td><td>Re-identification of Pedestrians with Variable Occlusion and Scale</td><td>Re-identification of pedestrians with variable occlusion and scale</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=re-identification of pedestrians with variable occlusion and scale&sort=relevance" target="_blank">[s2]</a></td><td>Kingston University</td><td>922e0a51a3b8c67c4c6ac09a577ff674cbd28b34</td></tr><tr><td>vadana</td><td>VADANA</td><td>VADANA: A dense dataset for facial image analysis</td><td>VADANA: A dense dataset for facial image analysis</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=vadana: a dense dataset for facial image analysis&sort=relevance" target="_blank">[s2]</a></td><td>University of Delaware</td><td>4563b46d42079242f06567b3f2e2f7a80cb3befe</td></tr><tr><td>vgg_celebs_in_places</td><td>CIP</td><td>Faces in Places: Compound Query Retrieval</td><td>Faces in Places: compound query retrieval</td><td><a href="https://pdfs.semanticscholar.org/7ebb/153704706e457ab57b432793d2b6e5d12592.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=faces in places: compound query retrieval&sort=relevance" target="_blank">[s2]</a></td><td></td><td>7ebb153704706e457ab57b432793d2b6e5d12592</td></tr><tr><td>vgg_faces</td><td>VGG Face</td><td>Deep Face Recognition</td><td>Deep Face Recognition</td><td><a href="https://pdfs.semanticscholar.org/f372/ab9b3270d4e4f6a0258c83c2736c3a5c0454.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=deep face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>162ea969d1929ed180cc6de9f0bf116993ff6e06</td></tr><tr><td>vgg_faces2</td><td>VGG Face2</td><td>VGGFace2: A dataset for recognising faces across pose and age</td><td>VGGFace2: A Dataset for Recognising Faces across Pose and Age</td><td><a href="https://arxiv.org/pdf/1710.08092.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=vggface2: a dataset for recognising faces across pose and age&sort=relevance" target="_blank">[s2]</a></td><td></td><td>70c59dc3470ae867016f6ab0e008ac8ba03774a1</td></tr><tr><td>violent_flows</td><td>Violent Flows</td><td>Violent Flows: Real-Time Detection of Violent Crowd Behavior</td><td>Violent flows: Real-time detection of violent crowd behavior</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=violent flows: real-time detection of violent crowd behavior&sort=relevance" target="_blank">[s2]</a></td><td>Open University of Israel</td><td>5194cbd51f9769ab25260446b4fa17204752e799</td></tr><tr><td>viper</td><td>VIPeR</td><td>Evaluating Appearance Models for Recognition, Reacquisition, and Tracking</td><td>Evaluating Appearance Models for Recognition, Reacquisition, and Tracking</td><td><a href="https://pdfs.semanticscholar.org/7847/b1fbccadb780b655e72c66d3f9e93ddb880c.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=evaluating appearance models for recognition, reacquisition, and tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6273b3491e94ea4dd1ce42b791d77bdc96ee73a8</td></tr><tr><td>visual_phrases</td><td>Phrasal Recognition</td><td>Recognition using Visual Phrases</td><td>Recognition using visual phrases</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=recognition using visual phrases&sort=relevance" target="_blank">[s2]</a></td><td>University of Illinois, Urbana-Champaign</td><td>e8de844fefd54541b71c9823416daa238be65546</td></tr><tr><td>vmu</td><td>VMU</td><td>Can Facial Cosmetics Affect the Matching Accuracy of Face Recognition Systems?</td><td>Can facial cosmetics affect the matching accuracy of face recognition systems?</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=can facial cosmetics affect the matching accuracy of face recognition systems?&sort=relevance" target="_blank">[s2]</a></td><td>West Virginia University</td><td>37d6f0eb074d207b53885bd2eb78ccc8a04be597</td></tr><tr><td>voc</td><td>VOC</td><td>The PASCAL Visual Object Classes (VOC) Challenge</td><td>The Pascal Visual Object Classes (VOC) Challenge</td><td><a href="http://eprints.pascal-network.org/archive/00006187/01/PascalVOC_IJCV2009.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the pascal visual object classes (voc) challenge&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0ee1916a0cb2dc7d3add086b5f1092c3d4beb38a</td></tr><tr><td>vqa</td><td>VQA</td><td>VQA: Visual Question Answering</td><td>VQA: Visual Question Answering</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=vqa: visual question answering&sort=relevance" target="_blank">[s2]</a></td><td></td><td>01959ef569f74c286956024866c1d107099199f7</td></tr><tr><td>ward</td><td>WARD</td><td>Re-identify people in wide area camera network</td><td>Re-identify people in wide area camera network</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=re-identify people in wide area camera network&sort=relevance" target="_blank">[s2]</a></td><td>University of Udine</td><td>6f3c76b7c0bd8e1d122c6ea808a271fd4749c951</td></tr><tr><td>who_goes_there</td><td>WGT</td><td>Who Goes There? Approaches to Mapping Facial Appearance Diversity</td><td>Who goes there?: approaches to mapping facial appearance diversity</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=who goes there? approaches to mapping facial appearance diversity&sort=relevance" target="_blank">[s2]</a></td><td>University of Kentucky</td><td>9b9bf5e623cb8af7407d2d2d857bc3f1b531c182</td></tr><tr><td>wider</td><td>WIDER</td><td>Recognize Complex Events from Static Images by Fusing Deep Channels</td><td>Recognize complex events from static images by fusing deep channels</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=recognize complex events from static images by fusing deep channels&sort=relevance" target="_blank">[s2]</a></td><td></td><td>356b431d4f7a2a0a38cf971c84568207dcdbf189</td></tr><tr><td>wider_attribute</td><td>WIDER Attribute</td><td>Human Attribute Recognition by Deep Hierarchical Contexts</td><td>Human Attribute Recognition by Deep Hierarchical Contexts</td><td><a href="https://pdfs.semanticscholar.org/8e28/07f2dd53b03a759e372e07f7191cae65c9fd.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=human attribute recognition by deep hierarchical contexts&sort=relevance" target="_blank">[s2]</a></td><td></td><td>44d23df380af207f5ac5b41459c722c87283e1eb</td></tr><tr><td>wider_face</td><td>WIDER FACE</td><td>WIDER FACE: A Face Detection Benchmark</td><td>WIDER FACE: A Face Detection Benchmark</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=wider face: a face detection benchmark&sort=relevance" target="_blank">[s2]</a></td><td></td><td>52d7eb0fbc3522434c13cc247549f74bb9609c5d</td></tr><tr><td>wildtrack</td><td>WildTrack</td><td>WILDTRACK: A Multi-camera HD Dataset for Dense Unscripted Pedestrian Detection</td><td>WILDTRACK: A Multi-camera HD Dataset for Dense Unscripted Pedestrian Detection</td><td><a href="http://openaccess.thecvf.com/content_cvpr_2018/Supplemental/1562-supp.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=wildtrack: a multi-camera hd dataset for dense unscripted pedestrian detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>36bccfb2ad847096bc76777e544f305813cd8f5b</td></tr><tr><td>wlfdb</td><td>WLFDB</td><td>WLFDB: Weakly Labeled Face Databases</td><td>WLFDB : Weakly Labeled Face Databases</td><td><a href="https://pdfs.semanticscholar.org/5ad4/e9f947c1653c247d418f05dad758a3f9277b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=wlfdb: weakly labeled face databases&sort=relevance" target="_blank">[s2]</a></td><td></td><td>5ad4e9f947c1653c247d418f05dad758a3f9277b</td></tr><tr><td>yale_faces</td><td>YaleFaces</td><td>From Few to Many: Illumination Cone Models for Face Recognition under Variable Lighting and Pose</td><td>From Few to Many: Illumination Cone Models for Face Recognition under Variable Lighting and Pose</td><td><a href="http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.147.1487&rep=rep1&type=pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=from few to many: illumination cone models for face recognition under variable lighting and pose&sort=relevance" target="_blank">[s2]</a></td><td></td><td>18c72175ddbb7d5956d180b65a96005c100f6014</td></tr><tr><td>yale_faces</td><td>YaleFaces</td><td>Acquiring Linear Subspaces for Face Recognition under Variable Lighting</td><td>Acquiring linear subspaces for face recognition under variable lighting</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=acquiring linear subspaces for face recognition under variable lighting&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2ad0ee93d029e790ebb50574f403a09854b65b7e</td></tr><tr><td>yawdd</td><td>YawDD</td><td>YawDD: A Yawning Detection Dataset</td><td>YawDD: a yawning detection dataset</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=yawdd: a yawning detection dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a94cae786d515d3450d48267e12ca954aab791c4</td></tr><tr><td>yfcc_100m</td><td>YFCC100M</td><td>YFCC100M: The New Data in Multimedia Research</td><td>YFCC100M: the new data in multimedia research</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=yfcc100m: the new data in multimedia research&sort=relevance" target="_blank">[s2]</a></td><td></td><td>010f0f4929e6a6644fb01f0e43820f91d0fad292</td></tr><tr><td>york_3d</td><td>UOY 3D Face Database</td><td>Three-Dimensional Face Recognition: An Eigensurface Approach</td><td>Three-dimensional face recognition: an eigensurface approach</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=three-dimensional face recognition: an eigensurface approach&sort=relevance" target="_blank">[s2]</a></td><td></td><td>19d1b811df60f86cbd5e04a094b07f32fff7a32a</td></tr><tr><td>youtube_celebrities</td><td>YouTube Celebrities</td><td>Face Tracking and Recognition with Visual Constraints in Real-World Videos</td><td>Face tracking and recognition with visual constraints in real-world videos</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=face tracking and recognition with visual constraints in real-world videos&sort=relevance" target="_blank">[s2]</a></td><td>Rutgers University</td><td>6204776d31359d129a582057c2d788a14f8aadeb</td></tr><tr><td>youtube_faces</td><td>YouTubeFaces</td><td>Face Recognition in Unconstrained Videos with Matched Background Similarity</td><td>Face recognition in unconstrained videos with matched background similarity</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=face recognition in unconstrained videos with matched background similarity&sort=relevance" target="_blank">[s2]</a></td><td></td><td>560e0e58d0059259ddf86fcec1fa7975dee6a868</td></tr><tr><td>youtube_makeup</td><td>YMU</td><td>Can Facial Cosmetics Affect the Matching Accuracy of Face Recognition Systems?</td><td>Can facial cosmetics affect the matching accuracy of face recognition systems?</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=can facial cosmetics affect the matching accuracy of face recognition systems?&sort=relevance" target="_blank">[s2]</a></td><td>West Virginia University</td><td>37d6f0eb074d207b53885bd2eb78ccc8a04be597</td></tr><tr><td>youtube_makeup</td><td>YMU</td><td>Automatic Facial Makeup Detection with Application in Face Recognition</td><td>Automatic facial makeup detection with application in face recognition</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=automatic facial makeup detection with application in face recognition&sort=relevance" target="_blank">[s2]</a></td><td>West Virginia University</td><td>fcc6fe6007c322641796cb8792718641856a22a7</td></tr><tr><td>youtube_poses</td><td>YouTube Pose</td><td>Personalizing Human Video Pose Estimation</td><td>Personalizing Human Video Pose Estimation</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=personalizing human video pose estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1c2802c2199b6d15ecefe7ba0c39bfe44363de38</td></tr></table></body></html> \ No newline at end of file
diff --git a/scraper/reports/paper_title_report_no_location.html b/scraper/reports/paper_title_report_no_location.html
index 49c9eec8..534df2de 100644
--- a/scraper/reports/paper_title_report_no_location.html
+++ b/scraper/reports/paper_title_report_no_location.html
@@ -1 +1 @@
-<!doctype html><html><head><meta charset='utf-8'><title>Papers with no location</title><link rel='stylesheet' href='reports.css'></head><body><h2>Papers with no location</h2><table border='1' cellpadding='3' cellspacing='3'><th>key</th><th>name</th><th>our title</th><th>found title</th><th></th><th></th><th>address</th><th>s2 id</th><tr><td>10k_US_adult_faces</td><td>10K US Adult Faces</td><td>The intrinsic memorability of face images</td><td>The intrinsic memorability of face photographs.</td><td><a href="https://pdfs.semanticscholar.org/8b2d/d5c61b23ead5ae5508bb8ce808b5ea266730.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the intrinsic memorability of face images&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8b2dd5c61b23ead5ae5508bb8ce808b5ea266730</td></tr><tr><td>3d_rma</td><td>3D-RMA</td><td>Automatic 3D Face Authentication</td><td>Automatic 3D face authentication</td><td><a href="http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.31.9190&rep=rep1&type=pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=automatic 3d face authentication&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2160788824c4c29ffe213b2cbeb3f52972d73f37</td></tr><tr><td>3dddb_unconstrained</td><td>3D Dynamic</td><td>A 3D Dynamic Database for Unconstrained Face Recognition</td><td>A 3D Dynamic Database for Unconstrained Face Recognition</td><td><a href="https://pdfs.semanticscholar.org/4d4b/b462c9f1d4e4ab1e4aa6a75cc0bc71b38461.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a 3d dynamic database for unconstrained face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4d4bb462c9f1d4e4ab1e4aa6a75cc0bc71b38461</td></tr><tr><td>3dpes</td><td>3DPeS</td><td>3DPes: 3D People Dataset for Surveillance and Forensics</td><td>3DPeS: 3D people dataset for surveillance and forensics</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=3dpes: 3d people dataset for surveillance and forensics&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2e8d0f1802e50cccfd3c0aabac0d0beab3a7846e</td></tr><tr><td>4dfab</td><td>4DFAB</td><td>4DFAB: A Large Scale 4D Facial Expression Database for Biometric Applications</td><td>4DFAB: A Large Scale 4D Facial Expression Database for Biometric Applications</td><td><a href="https://arxiv.org/pdf/1712.01443.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=4dfab: a large scale 4d facial expression database for biometric applications&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a40f9bfd3c45658ee8da70e1f2dfbe1f0c744d43</td></tr><tr><td>50_people_one_question</td><td>50 People One Question</td><td>Merging Pose Estimates Across Space and Time</td><td>Merging Pose Estimates Across Space and Time</td><td><a href="http://authors.library.caltech.edu/41565/1/tracking_bmvc.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=merging pose estimates across space and time&sort=relevance" target="_blank">[s2]</a></td><td></td><td>5753b2b5e442eaa3be066daa4a2ca8d8a0bb1725</td></tr><tr><td>adience</td><td>Adience</td><td>Age and Gender Estimation of Unfiltered Faces</td><td>Age and Gender Estimation of Unfiltered Faces</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=age and gender estimation of unfiltered faces&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1be498d4bbc30c3bfd0029114c784bc2114d67c0</td></tr><tr><td>afad</td><td>AFAD</td><td>Ordinal Regression with a Multiple Output CNN for Age Estimation</td><td>Ordinal Regression with Multiple Output CNN for Age Estimation</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=ordinal regression with a multiple output cnn for age estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6618cff7f2ed440a0d2fa9e74ad5469df5cdbe4c</td></tr><tr><td>afew_va</td><td>AFEW-VA</td><td>AFEW-VA database for valence and arousal estimation in-the-wild</td><td>AFEW-VA database for valence and arousal estimation in-the-wild</td><td><a href="https://ibug.doc.ic.ac.uk/media/uploads/documents/afew-va.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=afew-va database for valence and arousal estimation in-the-wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2624d84503bc2f8e190e061c5480b6aa4d89277a</td></tr><tr><td>affectnet</td><td>AffectNet</td><td>AffectNet: A Database for Facial Expression, Valence, and Arousal Computing in the Wild</td><td>AffectNet: A Database for Facial Expression, Valence, and Arousal Computing in the Wild</td><td><a href="https://arxiv.org/pdf/1708.03985.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=affectnet: a database for facial expression, valence, and arousal computing in the wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>758d7e1be64cc668c59ef33ba8882c8597406e53</td></tr><tr><td>aflw</td><td>AFLW</td><td>Annotated Facial Landmarks in the Wild: A Large-scale, Real-world Database for Facial Landmark Localization</td><td>Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=annotated facial landmarks in the wild: a large-scale, real-world database for facial landmark localization&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td></tr><tr><td>afw</td><td>AFW</td><td>Face detection, pose estimation and landmark localization in the wild</td><td>Face detection, pose estimation, and landmark localization in the wild</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=face detection, pose estimation and landmark localization in the wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0e986f51fe45b00633de9fd0c94d082d2be51406</td></tr><tr><td>agedb</td><td>AgeDB</td><td>AgeDB: the first manually collected, in-the-wild age database</td><td>AgeDB: The First Manually Collected, In-the-Wild Age Database</td><td><a href="http://eprints.mdx.ac.uk/22044/1/agedb_kotsia.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=agedb: the first manually collected, in-the-wild age database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>d818568838433a6d6831adde49a58cef05e0c89f</td></tr><tr><td>alert_airport</td><td>ALERT Airport</td><td>A Systematic Evaluation and Benchmark for Person Re-Identification: Features, Metrics, and Datasets</td><td>A Systematic Evaluation and Benchmark for Person Re-Identification: Features, Metrics, and Datasets</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a systematic evaluation and benchmark for person re-identification: features, metrics, and datasets&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6403117f9c005ae81f1e8e6d1302f4a045e3d99d</td></tr><tr><td>am_fed</td><td>AM-FED</td><td>Affectiva MIT Facial Expression Dataset (AM-FED): Naturalistic and Spontaneous Facial Expressions Collected “In the Wild”</td><td>Affectiva-MIT Facial Expression Dataset (AM-FED): Naturalistic and Spontaneous Facial Expressions Collected "In-the-Wild"</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=affectiva mit facial expression dataset (am-fed): naturalistic and spontaneous facial expressions collected “in the wild”&sort=relevance" target="_blank">[s2]</a></td><td></td><td>47aeb3b82f54b5ae8142b4bdda7b614433e69b9a</td></tr><tr><td>apis</td><td>APiS1.0</td><td>Pedestrian Attribute Classification in Surveillance: Database and Evaluation</td><td>Pedestrian Attribute Classification in Surveillance: Database and Evaluation</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pedestrian attribute classification in surveillance: database and evaluation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>488e475eeb3bb39a145f23ede197cd3620f1d98a</td></tr><tr><td>appa_real</td><td>APPA-REAL</td><td>Apparent and Real Age Estimation in Still Images with Deep Residual Regressors on Appa-Real Database</td><td>Apparent and Real Age Estimation in Still Images with Deep Residual Regressors on Appa-Real Database</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=apparent and real age estimation in still images with deep residual regressors on appa-real database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>633c851ebf625ad7abdda2324e9de093cf623141</td></tr><tr><td>appa_real</td><td>APPA-REAL</td><td>From Apparent to Real Age: Gender, Age, Ethnic, Makeup, and Expression Bias Analysis in Real Age Estimation</td><td>From Apparent to Real Age: Gender, Age, Ethnic, Makeup, and Expression Bias Analysis in Real Age Estimation</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=from apparent to real age: gender, age, ethnic, makeup, and expression bias analysis in real age estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>7b92d1e53cc87f7a4256695de590098a2f30261e</td></tr><tr><td>ar_facedb</td><td>AR Face</td><td>The AR Face Database</td><td>The AR face database</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the ar face database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6d96f946aaabc734af7fe3fc4454cf8547fcd5ed</td></tr><tr><td>awe_ears</td><td>AWE Ears</td><td>Ear Recognition: More Than a Survey</td><td>Ear Recognition: More Than a Survey</td><td><a href="https://arxiv.org/pdf/1611.06203.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=ear recognition: more than a survey&sort=relevance" target="_blank">[s2]</a></td><td></td><td>84fe5b4ac805af63206012d29523a1e033bc827e</td></tr><tr><td>b3d_ac</td><td>B3D(AC)</td><td>A 3-D Audio-Visual Corpus of Affective Communication</td><td>A 3-D Audio-Visual Corpus of Affective Communication</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a 3-d audio-visual corpus of affective communication&sort=relevance" target="_blank">[s2]</a></td><td></td><td>d08cc366a4a0192a01e9a7495af1eb5d9f9e73ae</td></tr><tr><td>bbc_pose</td><td>BBC Pose</td><td>Automatic and Efficient Human Pose Estimation for Sign Language Videos</td><td>Automatic and Efficient Human Pose Estimation for Sign Language Videos</td><td><a href="http://tomas.pfister.fi/files/charles13ijcv.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=automatic and efficient human pose estimation for sign language videos&sort=relevance" target="_blank">[s2]</a></td><td></td><td>213a579af9e4f57f071b884aa872651372b661fd</td></tr><tr><td>bfm</td><td>BFM</td><td>A 3D Face Model for Pose and Illumination Invariant Face Recognition</td><td>A 3D Face Model for Pose and Illumination Invariant Face Recognition</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a 3d face model for pose and illumination invariant face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>639937b3a1b8bded3f7e9a40e85bd3770016cf3c</td></tr><tr><td>bio_id</td><td>BioID Face</td><td>Robust Face Detection Using the Hausdorff Distance</td><td>Robust Face Detection Using the Hausdorff Distance</td><td><a href="https://pdfs.semanticscholar.org/4053/e3423fb70ad9140ca89351df49675197196a.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=robust face detection using the hausdorff distance&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4053e3423fb70ad9140ca89351df49675197196a</td></tr><tr><td>bosphorus</td><td>The Bosphorus</td><td>Bosphorus Database for 3D Face Analysis</td><td>Bosphorus Database for 3D Face Analysis</td><td><a href="https://pdfs.semanticscholar.org/4254/fbba3846008f50671edc9cf70b99d7304543.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=bosphorus database for 3d face analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2acf7e58f0a526b957be2099c10aab693f795973</td></tr><tr><td>bp4d_plus</td><td>BP4D+</td><td>Multimodal Spontaneous Emotion Corpus for Human Behavior Analysis</td><td>Multimodal Spontaneous Emotion Corpus for Human Behavior Analysis</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multimodal spontaneous emotion corpus for human behavior analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>53ae38a6bb2b21b42bac4f0c4c8ed1f9fa02f9d4</td></tr><tr><td>bpad</td><td>BPAD</td><td>Describing People: A Poselet-Based Approach to Attribute Classification</td><td>Describing people: A poselet-based approach to attribute classification</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=describing people: a poselet-based approach to attribute classification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>7808937b46acad36e43c30ae4e9f3fd57462853d</td></tr><tr><td>brainwash</td><td>Brainwash</td><td>End-to-End People Detection in Crowded Scenes</td><td>End-to-End People Detection in Crowded Scenes</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=end-to-end people detection in crowded scenes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1bd1645a629f1b612960ab9bba276afd4cf7c666</td></tr><tr><td>bu_3dfe</td><td>BU-3DFE</td><td>A 3D Facial Expression Database For Facial Behavior Research</td><td>A 3D facial expression database for facial behavior research</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a 3d facial expression database for facial behavior research&sort=relevance" target="_blank">[s2]</a></td><td></td><td>cc589c499dcf323fe4a143bbef0074c3e31f9b60</td></tr><tr><td>cacd</td><td></td><td>Cross-Age Reference Coding for Age-Invariant Face Recognition and Retrieval</td><td>Cross-Age Reference Coding for Age-Invariant Face Recognition and Retrieval</td><td><a href="https://pdfs.semanticscholar.org/c44c/84540db1c38ace232ef34b03bda1c81ba039.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=cross-age reference coding for age-invariant face recognition and retrieval&sort=relevance" target="_blank">[s2]</a></td><td></td><td>c44c84540db1c38ace232ef34b03bda1c81ba039</td></tr><tr><td>cafe</td><td>#N/A</td><td>The Child Affective Facial Expression (CAFE) Set: Validity and reliability from untrained adults</td><td>The Child Affective Facial Expression (CAFE) set: validity and reliability from untrained adults</td><td><a href="https://pdfs.semanticscholar.org/2038/8099cc415c772926e47bcbbe554e133343d1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the child affective facial expression (cafe) set: validity and reliability from untrained adults&sort=relevance" target="_blank">[s2]</a></td><td></td><td>20388099cc415c772926e47bcbbe554e133343d1</td></tr><tr><td>caltech_10k_web_faces</td><td>Caltech 10K Web Faces</td><td>Pruning Training Sets for Learning of Object Categories</td><td>Pruning training sets for learning of object categories</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pruning training sets for learning of object categories&sort=relevance" target="_blank">[s2]</a></td><td></td><td>636b8ffc09b1b23ff714ac8350bb35635e49fa3c</td></tr><tr><td>caltech_crp</td><td>Caltech CRP</td><td>Fine-grained classification of pedestrians in video: Benchmark and state of the art</td><td>Fine-grained classification of pedestrians in video: Benchmark and state of the art</td><td><a href="https://arxiv.org/pdf/1605.06177.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=fine-grained classification of pedestrians in video: benchmark and state of the art&sort=relevance" target="_blank">[s2]</a></td><td></td><td>060820f110a72cbf02c14a6d1085bd6e1d994f6a</td></tr><tr><td>caltech_pedestrians</td><td>Caltech Pedestrians</td><td>Pedestrian Detection: A Benchmark</td><td>Pedestrian detection: A benchmark</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pedestrian detection: a benchmark&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1dc35905a1deff8bc74688f2d7e2f48fd2273275</td></tr><tr><td>cas_peal</td><td>CAS-PEAL</td><td>The CAS-PEAL Large-Scale Chinese Face Database and Baseline Evaluations</td><td>The CAS-PEAL Large-Scale Chinese Face Database and Baseline Evaluations</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the cas-peal large-scale chinese face database and baseline evaluations&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2485c98aa44131d1a2f7d1355b1e372f2bb148ad</td></tr><tr><td>casablanca</td><td>Casablanca</td><td>Context-aware {CNNs} for person head detection</td><td>Context-Aware CNNs for Person Head Detection</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=context-aware {cnns} for person head detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td></tr><tr><td>casia_webface</td><td>CASIA Webface</td><td>Learning Face Representation from Scratch</td><td>Learning Face Representation from Scratch</td><td><a href="https://arxiv.org/pdf/1411.7923.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning face representation from scratch&sort=relevance" target="_blank">[s2]</a></td><td></td><td>853bd61bc48a431b9b1c7cab10c603830c488e39</td></tr><tr><td>celeba</td><td>CelebA</td><td>Deep Learning Face Attributes in the Wild</td><td>Deep Learning Face Attributes in the Wild</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=deep learning face attributes in the wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6424b69f3ff4d35249c0bb7ef912fbc2c86f4ff4</td></tr><tr><td>cfd</td><td>CFD</td><td>The Chicago face database: A free stimulus set of faces and norming data</td><td>The Chicago face database: A free stimulus set of faces and norming data.</td><td><a href="https://pdfs.semanticscholar.org/4df3/143922bcdf7db78eb91e6b5359d6ada004d2.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the chicago face database: a free stimulus set of faces and norming data&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4df3143922bcdf7db78eb91e6b5359d6ada004d2</td></tr><tr><td>chalearn</td><td>ChaLearn</td><td>ChaLearn Looking at People: A Review of Events and Resources</td><td>ChaLearn looking at people: A review of events and resources</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=chalearn looking at people: a review of events and resources&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8d5998cd984e7cce307da7d46f155f9db99c6590</td></tr><tr><td>chokepoint</td><td>ChokePoint</td><td>Patch-based Probabilistic Image Quality Assessment for Face Selection and Improved Video-based Face Recognition</td><td>Patch-based probabilistic image quality assessment for face selection and improved video-based face recognition</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=patch-based probabilistic image quality assessment for face selection and improved video-based face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0486214fb58ee9a04edfe7d6a74c6d0f661a7668</td></tr><tr><td>clothing_co_parsing</td><td>CCP</td><td>Clothing Co-Parsing by Joint Image Segmentation and Labeling</td><td>Clothing Co-parsing by Joint Image Segmentation and Labeling</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clothing co-parsing by joint image segmentation and labeling&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2bf8541199728262f78d4dced6fb91479b39b738</td></tr><tr><td>cmdp</td><td>CMDP</td><td>Distance Estimation of an Unknown Person from a Portrait</td><td>Distance Estimation of an Unknown Person from a Portrait</td><td><a href="http://authors.library.caltech.edu/49084/13/FaceDistanceEstimation_RONCHI.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=distance estimation of an unknown person from a portrait&sort=relevance" target="_blank">[s2]</a></td><td></td><td>56ae6d94fc6097ec4ca861f0daa87941d1c10b70</td></tr><tr><td>cmu_pie</td><td>CMU PIE</td><td>The CMU Pose, Illumination, and Expression Database</td><td>The CMU Pose, Illumination, and Expression (PIE) Database</td><td><a href="http://www.comp.nus.edu.sg/~tsim/piedb.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the cmu pose, illumination, and expression database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4d423acc78273b75134e2afd1777ba6d3a398973</td></tr><tr><td>coco</td><td>COCO</td><td>Microsoft COCO: Common Objects in Context</td><td>Microsoft COCO: Common Objects in Context</td><td><a href="https://arxiv.org/pdf/1405.0312.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=microsoft coco: common objects in context&sort=relevance" target="_blank">[s2]</a></td><td></td><td>5e0f8c355a37a5a89351c02f174e7a5ddcb98683</td></tr><tr><td>coco_action</td><td>COCO-a</td><td>Describing Common Human Visual Actions in Images</td><td>Describing Common Human Visual Actions in Images</td><td><a href="https://arxiv.org/pdf/1506.02203.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=describing common human visual actions in images&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4946ba10a4d5a7d0a38372f23e6622bd347ae273</td></tr><tr><td>coco_qa</td><td>COCO QA</td><td>Exploring Models and Data for Image Question Answering</td><td>Exploring Models and Data for Image Question Answering</td><td><a href="https://arxiv.org/pdf/1505.02074.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=exploring models and data for image question answering&sort=relevance" target="_blank">[s2]</a></td><td></td><td>35b0331dfcd2897abd5749b49ff5e2b8ba0f7a62</td></tr><tr><td>cofw</td><td>COFW</td><td>Robust face landmark estimation under occlusion</td><td>Robust Face Landmark Estimation under Occlusion</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=robust face landmark estimation under occlusion&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2724ba85ec4a66de18da33925e537f3902f21249</td></tr><tr><td>cohn_kanade</td><td>CK</td><td>Comprehensive Database for Facial Expression Analysis</td><td>Comprehensive Database for Facial Expression Analysis</td><td><a href="https://pdfs.semanticscholar.org/23fc/83c8cfff14a16df7ca497661264fc54ed746.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=comprehensive database for facial expression analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>23fc83c8cfff14a16df7ca497661264fc54ed746</td></tr><tr><td>complex_activities</td><td>Ongoing Complex Activities</td><td>Recognition of Ongoing Complex Activities by Sequence Prediction over a Hierarchical Label Space</td><td>Recognition of ongoing complex activities by sequence prediction over a hierarchical label space</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=recognition of ongoing complex activities by sequence prediction over a hierarchical label space&sort=relevance" target="_blank">[s2]</a></td><td></td><td>65355cbb581a219bd7461d48b3afd115263ea760</td></tr><tr><td>cuhk_campus_03</td><td>CUHK03 Campus</td><td>Human Reidentification with Transferred Metric Learning</td><td>Human Reidentification with Transferred Metric Learning</td><td><a href="http://www.ee.cuhk.edu.hk/~xgwang/papers/liZWaccv12.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=human reidentification with transferred metric learning&sort=relevance" target="_blank">[s2]</a></td><td></td><td>44484d2866f222bbb9b6b0870890f9eea1ffb2d0</td></tr><tr><td>cuhk_campus_03</td><td>CUHK03 Campus</td><td>Locally Aligned Feature Transforms across Views</td><td>Locally Aligned Feature Transforms across Views</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=locally aligned feature transforms across views&sort=relevance" target="_blank">[s2]</a></td><td></td><td>38b55d95189c5e69cf4ab45098a48fba407609b4</td></tr><tr><td>cuhk_campus_03</td><td>CUHK03 Campus</td><td>DeepReID: Deep Filter Pairing Neural Network for Person Re-identification</td><td>DeepReID: Deep Filter Pairing Neural Network for Person Re-identification</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=deepreid: deep filter pairing neural network for person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6bd36e9fd0ef20a3074e1430a6cc601e6d407fc3</td></tr><tr><td>cvc_01_barcelona</td><td>CVC-01</td><td>Adaptive Image Sampling and Windows Classification for On-board Pedestrian Detection</td><td>Adaptive Image Sampling and Windows Classification for On-board Pedestrian Detection</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=adaptive image sampling and windows classification for on-board pedestrian detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>57fe081950f21ca03b5b375ae3e84b399c015861</td></tr><tr><td>ufi</td><td>UFI</td><td>Unconstrained Facial Images: Database for Face Recognition under Real-world Conditions</td><td>Unconstrained Facial Images: Database for Face Recognition Under Real-World Conditions</td><td><a href="http://home.zcu.cz/~pkral/papers/kral_micai15.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=unconstrained facial images: database for face recognition under real-world conditions&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4b4106614c1d553365bad75d7866bff0de6056ed</td></tr><tr><td>d3dfacs</td><td>D3DFACS</td><td>A FACS Valid 3D Dynamic Action Unit database with Applications to 3D Dynamic Morphable Facial Modelling</td><td>A FACS valid 3D dynamic action unit database with applications to 3D dynamic morphable facial modeling</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a facs valid 3d dynamic action unit database with applications to 3d dynamic morphable facial modelling&sort=relevance" target="_blank">[s2]</a></td><td></td><td>070de852bc6eb275d7ca3a9cdde8f6be8795d1a3</td></tr><tr><td>dartmouth_children</td><td>Dartmouth Children</td><td>The Dartmouth Database of Children's Faces: Acquisition and validation of a new face stimulus set</td><td>The Dartmouth Database of Children’s Faces: Acquisition and Validation of a New Face Stimulus Set</td><td><a href="https://pdfs.semanticscholar.org/4e6e/e936eb50dd032f7138702fa39b7c18ee8907.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the dartmouth database of children's faces: acquisition and validation of a new face stimulus set&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4e6ee936eb50dd032f7138702fa39b7c18ee8907</td></tr><tr><td>data_61</td><td>Data61 Pedestrian</td><td>A Multi-Modal Graphical Model for Scene Analysis</td><td>A Multi-modal Graphical Model for Scene Analysis</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a multi-modal graphical model for scene analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>563c940054e4b456661762c1ab858e6f730c3159</td></tr><tr><td>deep_fashion</td><td>DeepFashion</td><td>DeepFashion: Powering Robust Clothes Recognition and Retrieval with Rich Annotations</td><td>DeepFashion: Powering Robust Clothes Recognition and Retrieval with Rich Annotations</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=deepfashion: powering robust clothes recognition and retrieval with rich annotations&sort=relevance" target="_blank">[s2]</a></td><td></td><td>18010284894ed0edcca74e5bf768ee2e15ef7841</td></tr><tr><td>deep_fashion</td><td>DeepFashion</td><td>Fashion Landmark Detection in the Wild</td><td>Fashion Landmark Detection in the Wild</td><td><a href="https://arxiv.org/pdf/1608.03049.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=fashion landmark detection in the wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4fefd1bc8dc4e0ab37ee3324ddfa43ad9d6a04a7</td></tr><tr><td>distance_nighttime</td><td>Long Distance Heterogeneous Face</td><td>Nighttime Face Recognition at Long Distance: Cross-distance and Cross-spectral Matching</td><td>Nighttime Face Recognition at Long Distance: Cross-Distance and Cross-Spectral Matching</td><td><a href="https://pdfs.semanticscholar.org/4156/b7e88f2e0ab0a7c095b9bab199ae2b23bd06.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=nighttime face recognition at long distance: cross-distance and cross-spectral matching&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4156b7e88f2e0ab0a7c095b9bab199ae2b23bd06</td></tr><tr><td>duke_mtmc</td><td>Duke MTMC</td><td>Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</td><td>Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</td><td><a href="https://arxiv.org/pdf/1609.01775.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=performance measures and a data set for multi-target, multi-camera tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>27a2fad58dd8727e280f97036e0d2bc55ef5424c</td></tr><tr><td>duke_mtmc</td><td>Duke MTMC</td><td>Improving Person Re-identification by Attribute and Identity Learning</td><td>Improving Person Re-identification by Attribute and Identity Learning</td><td><a href="https://arxiv.org/pdf/1703.07220.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=improving person re-identification by attribute and identity learning&sort=relevance" target="_blank">[s2]</a></td><td></td><td>7f23a4bb0c777dd72cca7665a5f370ac7980217e</td></tr><tr><td>duke_mtmc</td><td>Duke MTMC</td><td>Unlabeled Samples Generated by GAN Improve the Person Re-identification Baseline in Vitro</td><td>Unlabeled Samples Generated by GAN Improve the Person Re-identification Baseline in Vitro</td><td><a href="https://arxiv.org/pdf/1701.07717.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=unlabeled samples generated by gan improve the person re-identification baseline in vitro&sort=relevance" target="_blank">[s2]</a></td><td></td><td>15e1af79939dbf90790b03d8aa02477783fb1d0f</td></tr><tr><td>duke_mtmc</td><td>Duke MTMC</td><td>Tracking Multiple People Online and in Real Time</td><td>Tracking Multiple People Online and in Real Time</td><td><a href="https://pdfs.semanticscholar.org/64e0/690dd176a93de9d4328f6e31fc4afe1e7536.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=tracking multiple people online and in real time&sort=relevance" target="_blank">[s2]</a></td><td></td><td>64e0690dd176a93de9d4328f6e31fc4afe1e7536</td></tr><tr><td>emotio_net</td><td>EmotioNet Database</td><td>EmotioNet: An Accurate, Real-Time Algorithm for the Automatic Annotation of a Million Facial Expressions in the Wild</td><td>EmotioNet: An Accurate, Real-Time Algorithm for the Automatic Annotation of a Million Facial Expressions in the Wild</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=emotionet: an accurate, real-time algorithm for the automatic annotation of a million facial expressions in the wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>c900e0ad4c95948baaf0acd8449fde26f9b4952a</td></tr><tr><td>erce</td><td>ERCe</td><td>Video Synopsis by Heterogeneous Multi-source Correlation</td><td>Video Synopsis by Heterogeneous Multi-source Correlation</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=video synopsis by heterogeneous multi-source correlation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>b6c293f0420f7e945b5916ae44269fb53e139275</td></tr><tr><td>erce</td><td>ERCe</td><td>Learning from Multiple Sources for Video Summarisation</td><td>Learning from Multiple Sources for Video Summarisation</td><td><a href="https://arxiv.org/pdf/1501.03069.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning from multiple sources for video summarisation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>287ddcb3db5562235d83aee318f318b8d5e43fb1</td></tr><tr><td>europersons</td><td>EuroCity Persons</td><td>The EuroCity Persons Dataset: A Novel Benchmark for Object Detection</td><td>The EuroCity Persons Dataset: A Novel Benchmark for Object Detection</td><td><a href="https://arxiv.org/pdf/1805.07193.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the eurocity persons dataset: a novel benchmark for object detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>72a155c987816ae81c858fddbd6beab656d86220</td></tr><tr><td>expw</td><td>ExpW</td><td>From Facial Expression Recognition to Interpersonal Relation Prediction</td><td>From Facial Expression Recognition to Interpersonal Relation Prediction</td><td><a href="https://arxiv.org/pdf/1609.06426.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=from facial expression recognition to interpersonal relation prediction&sort=relevance" target="_blank">[s2]</a></td><td></td><td>22f656d0f8426c84a33a267977f511f127bfd7f3</td></tr><tr><td>face_scrub</td><td>FaceScrub</td><td>A data-driven approach to cleaning large face datasets</td><td>A data-driven approach to cleaning large face datasets</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a data-driven approach to cleaning large face datasets&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0d3bb75852098b25d90f31d2f48fd0cb4944702b</td></tr><tr><td>face_tracer</td><td>FaceTracer</td><td>FaceTracer: A Search Engine for Large Collections of Images with Faces</td><td>FaceTracer: A Search Engine for Large Collections of Images with Faces</td><td><a href="https://pdfs.semanticscholar.org/73a8/1d311eedac8dea3ca24dc15b6990fa4a725e.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=facetracer: a search engine for large collections of images with faces&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4c170a0dcc8de75587dae21ca508dab2f9343974</td></tr><tr><td>face_tracer</td><td>FaceTracer</td><td>Face Swapping: Automatically Replacing Faces in Photographs</td><td>Face swapping: automatically replacing faces in photographs</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=face swapping: automatically replacing faces in photographs&sort=relevance" target="_blank">[s2]</a></td><td></td><td>670637d0303a863c1548d5b19f705860a23e285c</td></tr><tr><td>faceplace</td><td>Face Place</td><td>Recognizing disguised faces</td><td>Recognizing disguised faces</td><td><a href="https://pdfs.semanticscholar.org/d936/7ceb0be378c3a9ddf7cb741c678c1a3c574c.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=recognizing disguised faces&sort=relevance" target="_blank">[s2]</a></td><td></td><td>25474c21613607f6bb7687a281d5f9d4ffa1f9f3</td></tr><tr><td>fddb</td><td>FDDB</td><td>FDDB: A Benchmark for Face Detection in Unconstrained Settings</td><td>FDDB: A benchmark for face detection in unconstrained settings</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=fddb: a benchmark for face detection in unconstrained settings&sort=relevance" target="_blank">[s2]</a></td><td></td><td>75da1df4ed319926c544eefe17ec8d720feef8c0</td></tr><tr><td>fei</td><td>FEI</td><td>Captura e Alinhamento de Imagens: Um Banco de Faces Brasileiro</td><td>A new ranking method for principal components analysis and its application to face image analysis</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=captura e alinhamento de imagens: um banco de faces brasileiro&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8b56e33f33e582f3e473dba573a16b598ed9bcdc</td></tr><tr><td>feret</td><td>FERET</td><td>The FERET Verification Testing Protocol for Face Recognition Algorithms</td><td>The FERET Verification Testing Protocol for Face Recognition Algorithms</td><td><a href="https://pdfs.semanticscholar.org/8d2a/1c768fce6f71584dd993fb97e7b6419aaf60.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the feret verification testing protocol for face recognition algorithms&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0c4a139bb87c6743c7905b29a3cfec27a5130652</td></tr><tr><td>feret</td><td>FERET</td><td>The FERET Evaluation Methodology for Face-Recognition Algorithms</td><td>The FERET Evaluation Methodology for Face-Recognition Algorithms</td><td><a href="https://pdfs.semanticscholar.org/5099/7a5605c1f61e09e9a96789ed7495be6625aa.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the feret evaluation methodology for face-recognition algorithms&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0f0fcf041559703998abf310e56f8a2f90ee6f21</td></tr><tr><td>feret</td><td>FERET</td><td>FERET ( Face Recognition Technology ) Recognition Algorithm Development and Test Results</td><td>FERET ( Face Recognition Technology ) Recognition Algorithm Development and Test Results</td><td><a href="https://pdfs.semanticscholar.org/31de/9b3dd6106ce6eec9a35991b2b9083395fd0b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=feret ( face recognition technology ) recognition algorithm development and test results&sort=relevance" target="_blank">[s2]</a></td><td></td><td>31de9b3dd6106ce6eec9a35991b2b9083395fd0b</td></tr><tr><td>feret</td><td>FERET</td><td>The FERET database and evaluation procedure for face-recognition algorithms</td><td>The FERET database and evaluation procedure for face-recognition algorithms</td><td><a href="http://biometrics.nist.gov/cs_links/face/frvt/feret/FERET_Database_evaluation_procedure.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the feret database and evaluation procedure for face-recognition algorithms&sort=relevance" target="_blank">[s2]</a></td><td></td><td>dc8b25e35a3acb812beb499844734081722319b4</td></tr><tr><td>ferplus</td><td>FER+</td><td>Training Deep Networks for Facial Expression Recognition with Crowd-Sourced Label Distribution</td><td>Training deep networks for facial expression recognition with crowd-sourced label distribution</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=training deep networks for facial expression recognition with crowd-sourced label distribution&sort=relevance" target="_blank">[s2]</a></td><td></td><td>298cbc3dfbbb3a20af4eed97906650a4ea1c29e0</td></tr><tr><td>fia</td><td>CMU FiA</td><td>The CMU Face In Action (FIA) Database</td><td>The CMU Face In Action (FIA) Database</td><td><a href="https://pdfs.semanticscholar.org/4766/2d1a368daf70ba70ef2d59eb6209f98b675d.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the cmu face in action (fia) database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>47662d1a368daf70ba70ef2d59eb6209f98b675d</td></tr><tr><td>fiw_300</td><td>300-W</td><td>A semi-automatic methodology for facial landmark annotation</td><td>A Semi-automatic Methodology for Facial Landmark Annotation</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a semi-automatic methodology for facial landmark annotation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>013909077ad843eb6df7a3e8e290cfd5575999d2</td></tr><tr><td>fiw_300</td><td>300-W</td><td>300 Faces in-the-Wild Challenge: The first facial landmark localization Challenge</td><td>300 Faces in-the-Wild Challenge: The First Facial Landmark Localization Challenge</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=300 faces in-the-wild challenge: the first facial landmark localization challenge&sort=relevance" target="_blank">[s2]</a></td><td></td><td>044d9a8c61383312cdafbcc44b9d00d650b21c70</td></tr><tr><td>fiw_300</td><td>300-W</td><td>300 faces In-the-wild challenge: Database and results</td><td>300 Faces In-The-Wild Challenge: database and results</td><td><a href="http://ibug.doc.ic.ac.uk/media/uploads/documents/1-s2.0-s0262885616000147-main.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=300 faces in-the-wild challenge: database and results&sort=relevance" target="_blank">[s2]</a></td><td></td><td>e4754afaa15b1b53e70743880484b8d0736990ff</td></tr><tr><td>geofaces</td><td>GeoFaces</td><td>FACE2GPS: Estimating geographic location from facial features</td><td>Exploring the geo-dependence of human face appearance</td><td><a href="http://cs.uky.edu/~jacobs/papers/islam2014faces.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=face2gps: estimating geographic location from facial features&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2cd7821fcf5fae53a185624f7eeda007434ae037</td></tr><tr><td>geofaces</td><td>GeoFaces</td><td>Large-scale geo-facial image analysis</td><td>Large-scale geo-facial image analysis</td><td><a href="https://pdfs.semanticscholar.org/3ede/3ed28329bf48fbd06438a69c4f855bef003f.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=large-scale geo-facial image analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4af89578ac237278be310f7660a408b03f12d603</td></tr><tr><td>geofaces</td><td>GeoFaces</td><td>Exploring the Geo-Dependence of Human Face Appearance</td><td>Exploring the geo-dependence of human face appearance</td><td><a href="http://cs.uky.edu/~jacobs/papers/islam2014faces.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=exploring the geo-dependence of human face appearance&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2cd7821fcf5fae53a185624f7eeda007434ae037</td></tr><tr><td>geofaces</td><td>GeoFaces</td><td>GeoFaceExplorer: Exploring the Geo-Dependence of Facial Attributes</td><td>GeoFaceExplorer: exploring the geo-dependence of facial attributes</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=geofaceexplorer: exploring the geo-dependence of facial attributes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>17b46e2dad927836c689d6787ddb3387c6159ece</td></tr><tr><td>georgia_tech_face_database</td><td>Georgia Tech Face</td><td>Maximum likelihood training of the embedded HMM for face detection and recognition</td><td>Maximum Likelihood Training of the Embedded HMM for Face Detection and Recognition</td><td><a href="http://www.researchgate.net/profile/Monson_Hayes/publication/221124512_Maximum_Likelihood_Training_of_the_Embedded_HMM_for_Face_Detection_and_Recognition/links/0deec53509be9d6f55000000.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=maximum likelihood training of the embedded hmm for face detection and recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3dc3f0b64ef80f573e3a5f96e456e52ee980b877</td></tr><tr><td>gfw</td><td>Grouping Face in the Wild</td><td>Merge or Not? Learning to Group Faces via Imitation Learning</td><td>Merge or Not? Learning to Group Faces via Imitation Learning</td><td><a href="https://arxiv.org/pdf/1707.03986.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=merge or not? learning to group faces via imitation learning&sort=relevance" target="_blank">[s2]</a></td><td></td><td>e58dd160a76349d46f881bd6ddbc2921f08d1050</td></tr><tr><td>graz</td><td>Graz Pedestrian</td><td>Weak Hypotheses and Boosting for Generic Object Detection and Recognition</td><td>Weak Hypotheses and Boosting for Generic Object Detection and Recognition</td><td><a href="https://pdfs.semanticscholar.org/0c91/808994a250d7be332400a534a9291ca3b60e.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=weak hypotheses and boosting for generic object detection and recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0c91808994a250d7be332400a534a9291ca3b60e</td></tr><tr><td>h3d</td><td>H3D</td><td>Poselets: Body Part Detectors Trained Using 3D Human Pose Annotations</td><td>Poselets: Body part detectors trained using 3D human pose annotations</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=poselets: body part detectors trained using 3d human pose annotations&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2830fb5282de23d7784b4b4bc37065d27839a412</td></tr><tr><td>hda_plus</td><td>HDA+</td><td>The HDA+ data set for research on fully automated re-identification systems</td><td>The HDA+ Data Set for Research on Fully Automated Re-identification Systems</td><td><a href="https://pdfs.semanticscholar.org/8f02/ec0be21461fbcedf51d864f944cfc42c875f.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the hda+ data set for research on fully automated re-identification systems&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8f02ec0be21461fbcedf51d864f944cfc42c875f</td></tr><tr><td>hda_plus</td><td>HDA+</td><td>A Multi-camera video data set for research on High-Definition surveillance</td><td>HDA dataset-DRAFT 1 A Multi-camera video data set for research on High-Definition surveillance</td><td><a href="https://pdfs.semanticscholar.org/bd88/bb2e4f351352d88ee7375af834360e223498.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a multi-camera video data set for research on high-definition surveillance&sort=relevance" target="_blank">[s2]</a></td><td></td><td>bd88bb2e4f351352d88ee7375af834360e223498</td></tr><tr><td>helen</td><td>Helen</td><td>Interactive Facial Feature Localization</td><td>Interactive Facial Feature Localization</td><td><a href="https://pdfs.semanticscholar.org/95f1/2d27c3b4914e0668a268360948bce92f7db3.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=interactive facial feature localization&sort=relevance" target="_blank">[s2]</a></td><td></td><td>95f12d27c3b4914e0668a268360948bce92f7db3</td></tr><tr><td>hi4d_adsip</td><td>Hi4D-ADSIP</td><td>Hi4D-ADSIP 3-D dynamic facial articulation database</td><td>Hi4D-ADSIP 3-D dynamic facial articulation database</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=hi4d-adsip 3-d dynamic facial articulation database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a8d0b149c2eadaa02204d3e4356fbc8eccf3b315</td></tr><tr><td>hipsterwars</td><td>Hipsterwars</td><td>Hipster Wars: Discovering Elements of Fashion Styles</td><td>Hipster Wars: Discovering Elements of Fashion Styles</td><td><a href="https://pdfs.semanticscholar.org/04c2/cda00e5536f4b1508cbd80041e9552880e67.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=hipster wars: discovering elements of fashion styles&sort=relevance" target="_blank">[s2]</a></td><td></td><td>04c2cda00e5536f4b1508cbd80041e9552880e67</td></tr><tr><td>hollywood_headset</td><td>HollywoodHeads</td><td>Context-aware CNNs for person head detection</td><td>Context-Aware CNNs for Person Head Detection</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=context-aware cnns for person head detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td></tr><tr><td>hrt_transgender</td><td>HRT Transgender</td><td>Is the Eye Region More Reliable Than the Face? A Preliminary Study of Face-based Recognition on a Transgender Dataset</td><td>Is the eye region more reliable than the face? A preliminary study of face-based recognition on a transgender dataset</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=is the eye region more reliable than the face? a preliminary study of face-based recognition on a transgender dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>137aa2f891d474fce1e7a1d1e9b3aefe21e22b34</td></tr><tr><td>ibm_dif</td><td>IBM Diversity in Faces</td><td>Diversity in Faces</td><td>Facial Coding Scheme Reference 1 Craniofacial Distances</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=diversity in faces&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0ab7cff2ccda7269b73ff6efd9d37e1318f7db25</td></tr><tr><td>ifad</td><td>IFAD</td><td>Indian Face Age Database: A Database for Face Recognition with Age Variation</td><td>Indian Face Age Database: A Database for Face Recognition with Age Variation</td><td><a href="https://pdfs.semanticscholar.org/025e/4cf3fd3fdeced91e9373b56ee14af7ca432c.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=indian face age database: a database for face recognition with age variation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>55c40cbcf49a0225e72d911d762c27bb1c2d14aa</td></tr><tr><td>ifdb</td><td>IFDB</td><td>Iranian Face Database and Evaluation with a New Detection Algorithm</td><td>Iranian Face Database and Evaluation with a New Detection Algorithm</td><td><a href="https://pdfs.semanticscholar.org/066d/71fcd997033dce4ca58df924397dfe0b5fd1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=iranian face database and evaluation with a new detection algorithm&sort=relevance" target="_blank">[s2]</a></td><td></td><td>066d71fcd997033dce4ca58df924397dfe0b5fd1</td></tr><tr><td>iit_dehli_ear</td><td>IIT Dehli Ear</td><td>Automated human identification using ear imaging</td><td>Automated Human Identification Using Ear Imaging</td><td><a href="https://pdfs.semanticscholar.org/faf4/0ce28857aedf183e193486f5b4b0a8c478a2.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=automated human identification using ear imaging&sort=relevance" target="_blank">[s2]</a></td><td></td><td>faf40ce28857aedf183e193486f5b4b0a8c478a2</td></tr><tr><td>ijb_b</td><td>IJB-B</td><td>IARPA Janus Benchmark-B Face Dataset</td><td>IARPA Janus Benchmark-B Face Dataset</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=iarpa janus benchmark-b face dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0cb2dd5f178e3a297a0c33068961018659d0f443</td></tr><tr><td>ijb_a</td><td>IJB-A</td><td>Pushing the Frontiers of Unconstrained Face Detection and Recognition: IARPA Janus Benchmark A</td><td>Pushing the frontiers of unconstrained face detection and recognition: IARPA Janus Benchmark A</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pushing the frontiers of unconstrained face detection and recognition: iarpa janus benchmark a&sort=relevance" target="_blank">[s2]</a></td><td></td><td>140c95e53c619eac594d70f6369f518adfea12ef</td></tr><tr><td>ijb_c</td><td>IJB-C</td><td>IARPA Janus Benchmark C</td><td>IARPA Janus Benchmark - C: Face Dataset and Protocol</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=iarpa janus benchmark c&sort=relevance" target="_blank">[s2]</a></td><td></td><td>57178b36c21fd7f4529ac6748614bb3374714e91</td></tr><tr><td>ilids_mcts</td><td>i-LIDS Multiple-Camera</td><td>Imagery Library for Intelligent Detection Systems: The i-LIDS User Guide</td><td>Imagery Library for Intelligent Detection Systems (i-LIDS); A Standard for Testing Video Based Detection Systems</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=imagery library for intelligent detection systems: the i-lids user guide&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0297448f3ed948e136bb06ceff10eccb34e5bb77</td></tr><tr><td>ilids_mcts_vid</td><td>iLIDS-VID</td><td>Person Re-Identi cation by Video Ranking</td><td>Person Re-identification by Video Ranking</td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person re-identi cation by video ranking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>98bb029afe2a1239c3fdab517323066f0957b81b</td></tr><tr><td>imdb_face</td><td>IMDb Face</td><td>The Devil of Face Recognition is in the Noise</td><td>The Devil of Face Recognition is in the Noise</td><td><a href="https://arxiv.org/pdf/1807.11649.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the devil of face recognition is in the noise&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9e31e77f9543ab42474ba4e9330676e18c242e72</td></tr><tr><td>imdb_wiki</td><td>IMDB</td><td>Deep expectation of real and apparent age from a single image without facial landmarks</td><td>Deep Expectation of Real and Apparent Age from a Single Image Without Facial Landmarks</td><td><a href="http://www.vision.ee.ethz.ch/en/publications/papers/articles/eth_biwi_01299.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=deep expectation of real and apparent age from a single image without facial landmarks&sort=relevance" target="_blank">[s2]</a></td><td></td><td>10195a163ab6348eef37213a46f60a3d87f289c5</td></tr><tr><td>imdb_wiki</td><td>IMDB</td><td>DEX: Deep EXpectation of apparent age from a single image</td><td>DEX: Deep EXpectation of Apparent Age from a Single Image</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=dex: deep expectation of apparent age from a single image&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8355d095d3534ef511a9af68a3b2893339e3f96b</td></tr><tr><td>immediacy</td><td>Immediacy</td><td>Multi-task Recurrent Neural Network for Immediacy Prediction</td><td>Multi-task Recurrent Neural Network for Immediacy Prediction</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-task recurrent neural network for immediacy prediction&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1e3df3ca8feab0b36fd293fe689f93bb2aaac591</td></tr><tr><td>imsitu</td><td>imSitu</td><td>Situation Recognition: Visual Semantic Role Labeling for Image Understanding</td><td>Situation Recognition: Visual Semantic Role Labeling for Image Understanding</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=situation recognition: visual semantic role labeling for image understanding&sort=relevance" target="_blank">[s2]</a></td><td></td><td>51eba481dac6b229a7490f650dff7b17ce05df73</td></tr><tr><td>jaffe</td><td>JAFFE</td><td>Coding Facial Expressions with Gabor Wavelets</td><td>Coding Facial Expressions with Gabor Wavelets</td><td><a href="http://physics.lbl.gov/patrecog/images/Facerecog_gabor.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=coding facial expressions with gabor wavelets&sort=relevance" target="_blank">[s2]</a></td><td></td><td>45c31cde87258414f33412b3b12fc5bec7cb3ba9</td></tr><tr><td>jpl_pose</td><td>JPL-Interaction dataset</td><td>First-Person Activity Recognition: What Are They Doing to Me?</td><td>First-Person Activity Recognition: What Are They Doing to Me?</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=first-person activity recognition: what are they doing to me?&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1aad2da473888cb7ebc1bfaa15bfa0f1502ce005</td></tr><tr><td>kin_face</td><td>UB KinFace</td><td>Understanding Kin Relationships in a Photo</td><td>Understanding Kin Relationships in a Photo</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=understanding kin relationships in a photo&sort=relevance" target="_blank">[s2]</a></td><td></td><td>08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7</td></tr><tr><td>kin_face</td><td>UB KinFace</td><td>Kinship Verification through Transfer Learning</td><td>Kinship Verification through Transfer Learning</td><td><a href="https://pdfs.semanticscholar.org/4793/f11fbca4a7dba898b9fff68f70d868e2497c.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=kinship verification through transfer learning&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4793f11fbca4a7dba898b9fff68f70d868e2497c</td></tr><tr><td>kitti</td><td>KITTI</td><td>Vision meets Robotics: The KITTI Dataset</td><td>Vision meets robotics: The KITTI dataset</td><td><a href="https://pdfs.semanticscholar.org/026e/3363b7f76b51cc711886597a44d5f1fd1de2.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=vision meets robotics: the kitti dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>026e3363b7f76b51cc711886597a44d5f1fd1de2</td></tr><tr><td>lag</td><td>LAG</td><td>Large Age-Gap Face Verification by Feature Injection in Deep Networks</td><td>Large age-gap face verification by feature injection in deep networks</td><td><a href="https://arxiv.org/pdf/1602.06149.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=large age-gap face verification by feature injection in deep networks&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0d2dd4fc016cb6a517d8fb43a7cc3ff62964832e</td></tr><tr><td>laofiw</td><td>LAOFIW</td><td>Turning a Blind Eye: Explicit Removal of Biases and Variation from Deep Neural Network Embeddings</td><td>Turning a Blind Eye: Explicit Removal of Biases and Variation from Deep Neural Network Embeddings</td><td><a href="https://arxiv.org/pdf/1809.02169.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=turning a blind eye: explicit removal of biases and variation from deep neural network embeddings&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4eab317b5ac436a949849ed286baa3de2a541eef</td></tr><tr><td>large_scale_person_search</td><td>Large Scale Person Search</td><td>End-to-End Deep Learning for Person Search</td><td>End-to-End Deep Learning for Person Search</td><td><a href="https://pdfs.semanticscholar.org/2161/f6b7ee3c0acc81603b01dc0df689683577b9.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=end-to-end deep learning for person search&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2161f6b7ee3c0acc81603b01dc0df689683577b9</td></tr><tr><td>leeds_sports_pose</td><td>Leeds Sports Pose</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td><a href="http://www.bmva.org/bmvc/2010/conference/paper12/abstract12.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clustered pose and nonlinear appearance models for human pose estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td></tr><tr><td>lfpw</td><td>LFWP</td><td>Localizing Parts of Faces Using a Consensus of Exemplars</td><td>Localizing Parts of Faces Using a Consensus of Exemplars</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=localizing parts of faces using a consensus of exemplars&sort=relevance" target="_blank">[s2]</a></td><td></td><td>140438a77a771a8fb656b39a78ff488066eb6b50</td></tr><tr><td>lfw</td><td>LFW</td><td>Labeled Faces in the Wild: Updates and New Reporting Procedures</td><td>Labeled Faces in the Wild : Updates and New Reporting Procedures</td><td><a href="https://pdfs.semanticscholar.org/2d34/82dcff69c7417c7b933f22de606a0e8e42d4.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=labeled faces in the wild: updates and new reporting procedures&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2d3482dcff69c7417c7b933f22de606a0e8e42d4</td></tr><tr><td>lfw</td><td>LFW</td><td>Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments</td><td>Labeled Faces in the Wild: A Database forStudying Face Recognition in Unconstrained Environments</td><td><a href="https://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=labeled faces in the wild: a database for studying face recognition in unconstrained environments&sort=relevance" target="_blank">[s2]</a></td><td></td><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td></tr><tr><td>lfw</td><td>LFW</td><td>Labeled Faces in the Wild: A Survey</td><td>Labeled Faces in the Wild: A Survey</td><td><a href="https://pdfs.semanticscholar.org/7de6/e81d775e9cd7becbfd1bd685f4e2a5eebb22.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=labeled faces in the wild: a survey&sort=relevance" target="_blank">[s2]</a></td><td></td><td>7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22</td></tr><tr><td>lfw</td><td>LFW</td><td>Effective Unconstrained Face Recognition by Combining Multiple Descriptors and Learned Background Statistics</td><td>Effective Unconstrained Face Recognition by Combining Multiple Descriptors and Learned Background Statistics</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=effective unconstrained face recognition by combining multiple descriptors and learned background statistics&sort=relevance" target="_blank">[s2]</a></td><td></td><td>133f01aec1534604d184d56de866a4bd531dac87</td></tr><tr><td>m2vtsdb_extended</td><td>xm2vtsdb</td><td>XM2VTSDB: The Extended M2VTS Database</td><td>XM2VTSDB : The extended M2VTS database</td><td><a href="https://pdfs.semanticscholar.org/b626/28ac06bbac998a3ab825324a41a11bc3a988.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=xm2vtsdb: the extended m2vts database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>b62628ac06bbac998a3ab825324a41a11bc3a988</td></tr><tr><td>mafa</td><td>MAsked FAces</td><td>Detecting Masked Faces in the Wild with LLE-CNNs</td><td>Detecting Masked Faces in the Wild with LLE-CNNs</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=detecting masked faces in the wild with lle-cnns&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9cc8cf0c7d7fa7607659921b6ff657e17e135ecc</td></tr><tr><td>mafl</td><td>MAFL</td><td>Facial Landmark Detection by Deep Multi-task Learning</td><td>Facial Landmark Detection by Deep Multi-task Learning</td><td><a href="https://pdfs.semanticscholar.org/fcd7/1c18192928a2e0b264edd4d919ab2f8f652a.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=facial landmark detection by deep multi-task learning&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8a3c5507237957d013a0fe0f082cab7f757af6ee</td></tr><tr><td>mafl</td><td>MAFL</td><td>Learning Deep Representation for Face Alignment with Auxiliary Attributes</td><td>Learning Deep Representation for Face Alignment with Auxiliary Attributes</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning deep representation for face alignment with auxiliary attributes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td></tr><tr><td>malf</td><td>MALF</td><td>Fine-grained Evaluation on Face Detection in the Wild.</td><td>Fine-grained evaluation on face detection in the wild</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=fine-grained evaluation on face detection in the wild.&sort=relevance" target="_blank">[s2]</a></td><td></td><td>45e616093a92e5f1e61a7c6037d5f637aa8964af</td></tr><tr><td>mapillary</td><td>Mapillary</td><td>The Mapillary Vistas Dataset for Semantic Understanding of Street Scenes</td><td>The Mapillary Vistas Dataset for Semantic Understanding of Street Scenes</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the mapillary vistas dataset for semantic understanding of street scenes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>79828e6e9f137a583082b8b5a9dfce0c301989b8</td></tr><tr><td>market_1501</td><td>Market 1501</td><td>Improving Person Re-identification by Attribute and Identity Learning</td><td>Improving Person Re-identification by Attribute and Identity Learning</td><td><a href="https://arxiv.org/pdf/1703.07220.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=improving person re-identification by attribute and identity learning&sort=relevance" target="_blank">[s2]</a></td><td></td><td>7f23a4bb0c777dd72cca7665a5f370ac7980217e</td></tr><tr><td>market_1501</td><td>Market 1501</td><td>Scalable Person Re-identification: A Benchmark</td><td>Scalable Person Re-identification: A Benchmark</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=scalable person re-identification: a benchmark&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4308bd8c28e37e2ed9a3fcfe74d5436cce34b410</td></tr><tr><td>market_1501</td><td>Market 1501</td><td>Orientation Driven Bag of Appearances for Person Re-identification</td><td>Orientation Driven Bag of Appearances for Person Re-identification</td><td><a href="https://arxiv.org/pdf/1605.02464.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=orientation driven bag of appearances for person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a7fe834a0af614ce6b50dc093132b031dd9a856b</td></tr><tr><td>mars</td><td>MARS</td><td>MARS: A Video Benchmark for Large-Scale Person Re-identification</td><td>MARS: A Video Benchmark for Large-Scale Person Re-Identification</td><td><a href="https://pdfs.semanticscholar.org/c038/7e788a52f10bf35d4d50659cfa515d89fbec.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=mars: a video benchmark for large-scale person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>c0387e788a52f10bf35d4d50659cfa515d89fbec</td></tr><tr><td>mcgill</td><td>McGill Real World</td><td>Hierarchical Temporal Graphical Model for Head Pose Estimation and Subsequent Attribute Classification in Real-World Videos</td><td>Hierarchical temporal graphical model for head pose estimation and subsequent attribute classification in real-world videos</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=hierarchical temporal graphical model for head pose estimation and subsequent attribute classification in real-world videos&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2d45cfd838016a6e39f6b766ffe85acd649440c7</td></tr><tr><td>megaage</td><td>MegaAge</td><td>Quantifying Facial Age by Posterior of Age Comparisons</td><td>Quantifying Facial Age by Posterior of Age Comparisons</td><td><a href="https://arxiv.org/pdf/1708.09687.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=quantifying facial age by posterior of age comparisons&sort=relevance" target="_blank">[s2]</a></td><td></td><td>c72a2ea819df9b0e8cd267eebcc6528b8741e03d</td></tr><tr><td>megaface</td><td>MegaFace</td><td>Level Playing Field for Million Scale Face Recognition</td><td>Level Playing Field for Million Scale Face Recognition</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=level playing field for million scale face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>28d4e027c7e90b51b7d8908fce68128d1964668a</td></tr><tr><td>megaface</td><td>MegaFace</td><td>The MegaFace Benchmark: 1 Million Faces for Recognition at Scale</td><td>The MegaFace Benchmark: 1 Million Faces for Recognition at Scale</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the megaface benchmark: 1 million faces for recognition at scale&sort=relevance" target="_blank">[s2]</a></td><td></td><td>96e0cfcd81cdeb8282e29ef9ec9962b125f379b0</td></tr><tr><td>mit_cbcl</td><td>MIT CBCL</td><td>Component-based Face Recognition with 3D Morphable Models</td><td>Component-Based Face Recognition with 3D Morphable Models</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=component-based face recognition with 3d morphable models&sort=relevance" target="_blank">[s2]</a></td><td></td><td>079a0a3bf5200994e1f972b1b9197bf2f90e87d4</td></tr><tr><td>mmi_facial_expression</td><td>MMI Facial Expression Dataset</td><td>WEB-BASED DATABASE FOR FACIAL EXPRESSION ANALYSIS</td><td>Web-based database for facial expression analysis</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=web-based database for facial expression analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2a75f34663a60ab1b04a0049ed1d14335129e908</td></tr><tr><td>moments_in_time</td><td>Moments in Time</td><td>Moments in Time Dataset: one million videos for event understanding</td><td>Moments in Time Dataset: one million videos for event understanding</td><td><a href="https://arxiv.org/pdf/1801.03150.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=moments in time dataset: one million videos for event understanding&sort=relevance" target="_blank">[s2]</a></td><td></td><td>41976ebc8ab76d9a6861487c97cc7fcbe3b6015f</td></tr><tr><td>morph</td><td>MORPH Commercial</td><td>MORPH: A Longitudinal Image Database of Normal Adult Age-Progression</td><td>MORPH: a longitudinal image database of normal adult age-progression</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=morph: a longitudinal image database of normal adult age-progression&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9055b155cbabdce3b98e16e5ac9c0edf00f9552f</td></tr><tr><td>morph_nc</td><td>MORPH Non-Commercial</td><td>MORPH: A Longitudinal Image Database of Normal Adult Age-Progression</td><td>MORPH: a longitudinal image database of normal adult age-progression</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=morph: a longitudinal image database of normal adult age-progression&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9055b155cbabdce3b98e16e5ac9c0edf00f9552f</td></tr><tr><td>mot</td><td>MOT</td><td>Evaluating Multiple Object Tracking Performance: The CLEAR MOT Metrics</td><td>Evaluating Multiple Object Tracking Performance: The CLEAR MOT Metrics</td><td><a href="https://cvhci.anthropomatik.kit.edu/images/stories/msmmi/papers/eurasip2008.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=evaluating multiple object tracking performance: the clear mot metrics&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2258e01865367018ed6f4262c880df85b94959f8</td></tr><tr><td>mot</td><td>MOT</td><td>Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</td><td>Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</td><td><a href="https://arxiv.org/pdf/1609.01775.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=performance measures and a data set for multi-target, multi-camera tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>27a2fad58dd8727e280f97036e0d2bc55ef5424c</td></tr><tr><td>mpi_large</td><td>Large MPI Facial Expression</td><td>The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</td><td>The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</td><td><a href="https://pdfs.semanticscholar.org/ea05/0801199f98a1c7c1df6769f23f658299a3ae.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the mpi facial expression database — a validated database of emotional and conversational facial expressions&sort=relevance" target="_blank">[s2]</a></td><td></td><td>ea050801199f98a1c7c1df6769f23f658299a3ae</td></tr><tr><td>mpi_small</td><td>Small MPI Facial Expression</td><td>The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</td><td>The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</td><td><a href="https://pdfs.semanticscholar.org/ea05/0801199f98a1c7c1df6769f23f658299a3ae.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the mpi facial expression database — a validated database of emotional and conversational facial expressions&sort=relevance" target="_blank">[s2]</a></td><td></td><td>ea050801199f98a1c7c1df6769f23f658299a3ae</td></tr><tr><td>mpii_gaze</td><td>MPIIGaze</td><td>Appearance-based Gaze Estimation in the Wild</td><td>Appearance-based gaze estimation in the wild</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=appearance-based gaze estimation in the wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0df0d1adea39a5bef318b74faa37de7f3e00b452</td></tr><tr><td>mpii_human_pose</td><td>MPII Human Pose</td><td>2D Human Pose Estimation: New Benchmark and State of the Art Analysis</td><td>2D Human Pose Estimation: New Benchmark and State of the Art Analysis</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=2d human pose estimation: new benchmark and state of the art analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3325860c0c82a93b2eac654f5324dd6a776f609e</td></tr><tr><td>mr2</td><td>MR2</td><td>The MR2: A multi-racial mega-resolution database of facial stimuli</td><td>The MR2: A multi-racial, mega-resolution database of facial stimuli.</td><td><a href="http://www.mpmlab.org/The%20MR2%20face%20database.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the mr2: a multi-racial mega-resolution database of facial stimuli&sort=relevance" target="_blank">[s2]</a></td><td></td><td>578d4ad74818086bb64f182f72e2c8bd31e3d426</td></tr><tr><td>mrp_drone</td><td>MRP Drone</td><td>Investigating Open-World Person Re-identification Using a Drone</td><td>Investigating Open-World Person Re-identification Using a Drone</td><td><a href="https://pdfs.semanticscholar.org/ad01/687649d95cd5b56d7399a9603c4b8e2217d7.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=investigating open-world person re-identification using a drone&sort=relevance" target="_blank">[s2]</a></td><td></td><td>ad01687649d95cd5b56d7399a9603c4b8e2217d7</td></tr><tr><td>msceleb</td><td>MsCeleb</td><td>MS-Celeb-1M: A Dataset and Benchmark for Large-Scale Face Recognition</td><td>MS-Celeb-1M: A Dataset and Benchmark for Large-Scale Face Recognition</td><td><a href="https://arxiv.org/pdf/1607.08221.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=ms-celeb-1m: a dataset and benchmark for large-scale face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>291265db88023e92bb8c8e6390438e5da148e8f5</td></tr><tr><td>msmt_17</td><td>MSMT17</td><td>Person Transfer GAN to Bridge Domain Gap for Person Re-Identification</td><td>Person Transfer GAN to Bridge Domain Gap for Person Re-identification</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person transfer gan to bridge domain gap for person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a0cc5f73a37723a6dd465924143f1cb4976d0169</td></tr><tr><td>mtfl</td><td>MTFL</td><td>Facial Landmark Detection by Deep Multi-task Learning</td><td>Facial Landmark Detection by Deep Multi-task Learning</td><td><a href="https://pdfs.semanticscholar.org/fcd7/1c18192928a2e0b264edd4d919ab2f8f652a.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=facial landmark detection by deep multi-task learning&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8a3c5507237957d013a0fe0f082cab7f757af6ee</td></tr><tr><td>mtfl</td><td>MTFL</td><td>Learning Deep Representation for Face Alignment with Auxiliary Attributes</td><td>Learning Deep Representation for Face Alignment with Auxiliary Attributes</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning deep representation for face alignment with auxiliary attributes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td></tr><tr><td>multi_pie</td><td>MULTIPIE</td><td>Multi-PIE</td><td>The CMU Pose, Illumination, and Expression (PIE) Database</td><td><a href="http://www.comp.nus.edu.sg/~tsim/piedb.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-pie&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4d423acc78273b75134e2afd1777ba6d3a398973</td></tr><tr><td>names_and_faces</td><td>News Dataset</td><td>Names and Faces</td><td>Names and faces in the news</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=names and faces&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2fda164863a06a92d3a910b96eef927269aeb730</td></tr><tr><td>nova_emotions</td><td>Novaemötions Dataset</td><td>Crowdsourcing facial expressions for affective-interaction</td><td>Crowdsourcing facial expressions for affective-interaction</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=crowdsourcing facial expressions for affective-interaction&sort=relevance" target="_blank">[s2]</a></td><td></td><td>c06b13d0ec3f5c43e2782cd22542588e233733c3</td></tr><tr><td>orl</td><td>ORL</td><td>Parameterisation of a Stochastic Model for Human Face Identification</td><td>Parameterisation of a stochastic model for human face identification</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=parameterisation of a stochastic model for human face identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>55206f0b5f57ce17358999145506cd01e570358c</td></tr><tr><td>pa_100k</td><td>PA-100K</td><td>HydraPlus-Net: Attentive Deep Features for Pedestrian Analysis</td><td>HydraPlus-Net: Attentive Deep Features for Pedestrian Analysis</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=hydraplus-net: attentive deep features for pedestrian analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>f41c7bb02fc97d5fb9cadd7a49c3e558a1c58a44</td></tr><tr><td>penn_fudan</td><td>Penn Fudan</td><td>Object Detection Combining Recognition and Segmentation</td><td>Object Detection Combining Recognition and Segmentation</td><td><a href="https://pdfs.semanticscholar.org/3394/168ff0719b03ff65bcea35336a76b21fe5e4.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=object detection combining recognition and segmentation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3394168ff0719b03ff65bcea35336a76b21fe5e4</td></tr><tr><td>peta</td><td>PETA</td><td>Pedestrian Attribute Recognition At Far Distance</td><td>Pedestrian Attribute Recognition At Far Distance</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pedestrian attribute recognition at far distance&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2a4bbee0b4cf52d5aadbbc662164f7efba89566c</td></tr><tr><td>pets</td><td>PETS 2017</td><td>PETS 2017: Dataset and Challenge</td><td>PETS 2017: Dataset and Challenge</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pets 2017: dataset and challenge&sort=relevance" target="_blank">[s2]</a></td><td></td><td>22909dd19a0ec3b6065334cb5be5392cb24d839d</td></tr><tr><td>pilot_parliament</td><td>PPB</td><td>Gender Shades: Intersectional Accuracy Disparities in Commercial Gender Classification</td><td>Gender Shades: Intersectional Accuracy Disparities in Commercial Gender Classification</td><td><a href="http://proceedings.mlr.press/v81/buolamwini18a/buolamwini18a-supp.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=gender shades: intersectional accuracy disparities in commercial gender classification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>18858cc936947fc96b5c06bbe3c6c2faa5614540</td></tr><tr><td>pipa</td><td>PIPA</td><td>Beyond Frontal Faces: Improving Person Recognition Using Multiple Cues</td><td>Beyond frontal faces: Improving Person Recognition using multiple cues</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=beyond frontal faces: improving person recognition using multiple cues&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0a85bdff552615643dd74646ac881862a7c7072d</td></tr><tr><td>pku_reid</td><td>PKU-Reid</td><td>Swiss-System Based Cascade Ranking for Gait-based Person Re-identification</td><td>Swiss-System Based Cascade Ranking for Gait-Based Person Re-Identification</td><td><a href="https://pdfs.semanticscholar.org/f6c8/d5e35d7e4d60a0104f233ac1a3ab757da53f.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=swiss-system based cascade ranking for gait-based person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>f6c8d5e35d7e4d60a0104f233ac1a3ab757da53f</td></tr><tr><td>pku_reid</td><td>PKU-Reid</td><td>Orientation driven bag of appearances for person re-identification</td><td>Orientation Driven Bag of Appearances for Person Re-identification</td><td><a href="https://arxiv.org/pdf/1605.02464.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=orientation driven bag of appearances for person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a7fe834a0af614ce6b50dc093132b031dd9a856b</td></tr><tr><td>precarious</td><td>Precarious</td><td>Expecting the Unexpected: Training Detectors for Unusual Pedestrians With Adversarial Imposters</td><td>Expecting the Unexpected: Training Detectors for Unusual Pedestrians with Adversarial Imposters</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=expecting the unexpected: training detectors for unusual pedestrians with adversarial imposters&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9e5378e7b336c89735d3bb15cf67eff96f86d39a</td></tr><tr><td>prid</td><td>PRID</td><td>Person Re-Identification by Descriptive and Discriminative Classification</td><td>Person Re-identification by Descriptive and Discriminative Classification</td><td><a href="https://pdfs.semanticscholar.org/4c1b/f0592be3e535faf256c95e27982db9b3d3d3.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person re-identification by descriptive and discriminative classification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>16c7c31a7553d99f1837fc6e88e77b5ccbb346b8</td></tr><tr><td>prw</td><td>PRW</td><td>Person Re-identification in the Wild</td><td>Person Re-identification in the Wild</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person re-identification in the wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0b84f07af44f964817675ad961def8a51406dd2e</td></tr><tr><td>psu</td><td>PSU</td><td>Vision-based Analysis of Small Groups in Pedestrian Crowds</td><td>Vision-Based Analysis of Small Groups in Pedestrian Crowds</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=vision-based analysis of small groups in pedestrian crowds&sort=relevance" target="_blank">[s2]</a></td><td></td><td>066000d44d6691d27202896691f08b27117918b9</td></tr><tr><td>pubfig</td><td>PubFig</td><td>Attribute and Simile Classifiers for Face Verification</td><td>Attribute and simile classifiers for face verification</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=attribute and simile classifiers for face verification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>759a3b3821d9f0e08e0b0a62c8b693230afc3f8d</td></tr><tr><td>put_face</td><td>Put Face</td><td>The PUT face database</td><td>The put face database</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the put face database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>ae0aee03d946efffdc7af2362a42d3750e7dd48a</td></tr><tr><td>qmul_surv_face</td><td>QMUL-SurvFace</td><td>Surveillance Face Recognition Challenge</td><td>Surveillance Face Recognition Challenge</td><td><a href="https://arxiv.org/pdf/1804.09691.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=surveillance face recognition challenge&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2306b2a8fba28539306052764a77a0d0f5d1236a</td></tr><tr><td>rafd</td><td>RaFD</td><td>Presentation and validation of the Radboud Faces Database</td><td>Presentation and validation of the Radboud Faces Database</td><td><a href="https://pdfs.semanticscholar.org/3765/df816dc5a061bc261e190acc8bdd9d47bec0.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=presentation and validation of the radboud faces database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3765df816dc5a061bc261e190acc8bdd9d47bec0</td></tr><tr><td>raid</td><td>RAiD</td><td>Consistent Re-identification in a Camera Network</td><td>Consistent Re-identification in a Camera Network</td><td><a href="https://pdfs.semanticscholar.org/c27f/099e6e7e3f7f9979cbe9e0a5175fc5848ea0.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=consistent re-identification in a camera network&sort=relevance" target="_blank">[s2]</a></td><td></td><td>09d78009687bec46e70efcf39d4612822e61cb8c</td></tr><tr><td>rap_pedestrian</td><td>RAP</td><td>A Richly Annotated Dataset for Pedestrian Attribute Recognition</td><td>A Richly Annotated Dataset for Pedestrian Attribute Recognition</td><td><a href="https://arxiv.org/pdf/1603.07054.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a richly annotated dataset for pedestrian attribute recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>221c18238b829c12b911706947ab38fd017acef7</td></tr><tr><td>reseed</td><td>ReSEED</td><td>ReSEED: Social Event dEtection Dataset</td><td>ReSEED: social event dEtection dataset</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=reseed: social event detection dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>54983972aafc8e149259d913524581357b0f91c3</td></tr><tr><td>saivt</td><td>SAIVT SoftBio</td><td>A Database for Person Re-Identification in Multi-Camera Surveillance Networks</td><td>A Database for Person Re-Identification in Multi-Camera Surveillance Networks</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a database for person re-identification in multi-camera surveillance networks&sort=relevance" target="_blank">[s2]</a></td><td></td><td>22646e00a7ba34d1b5fbe3b1efcd91a1e1be3c2b</td></tr><tr><td>sarc3d</td><td>Sarc3D</td><td>SARC3D: a new 3D body model for People Tracking and Re-identification</td><td>SARC3D: A New 3D Body Model for People Tracking and Re-identification</td><td><a href="https://pdfs.semanticscholar.org/e27e/f52c641c2b5100a1b34fd0b819e84a31b4df.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=sarc3d: a new 3d body model for people tracking and re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>e27ef52c641c2b5100a1b34fd0b819e84a31b4df</td></tr><tr><td>scface</td><td>SCface</td><td>SCface – surveillance cameras face database</td><td>SCface – surveillance cameras face database</td><td><a href="http://scface.org/SCface%20-%20Surveillance%20Cameras%20Face%20Database.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=scface – surveillance cameras face database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>29a705a5fa76641e0d8963f1fdd67ee4c0d92d3d</td></tr><tr><td>scut_fbp</td><td>SCUT-FBP</td><td>SCUT-FBP: A Benchmark Dataset for Facial Beauty Perception</td><td>SCUT-FBP: A Benchmark Dataset for Facial Beauty Perception</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=scut-fbp: a benchmark dataset for facial beauty perception&sort=relevance" target="_blank">[s2]</a></td><td></td><td>bd26dabab576adb6af30484183c9c9c8379bf2e0</td></tr><tr><td>scut_head</td><td>SCUT HEAD</td><td>Detecting Heads using Feature Refine Net and Cascaded Multi-scale Architecture</td><td>Detecting Heads using Feature Refine Net and Cascaded Multi-scale Architecture</td><td><a href="https://arxiv.org/pdf/1803.09256.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=detecting heads using feature refine net and cascaded multi-scale architecture&sort=relevance" target="_blank">[s2]</a></td><td></td><td>d3200d49a19a4a4e4e9745ee39649b65d80c834b</td></tr><tr><td>sdu_vid</td><td>SDU-VID</td><td>A Spatio-Temporal Appearance Representation for Video-Based Pedestrian Re-Identification</td><td>A Spatio-Temporal Appearance Representation for Video-Based Pedestrian Re-Identification</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a spatio-temporal appearance representation for video-based pedestrian re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3b4ec8af470948a72a6ed37a9fd226719a874ebc</td></tr><tr><td>sdu_vid</td><td>SDU-VID</td><td>Local descriptors encoded by Fisher vectors for person re-identification</td><td>Local Descriptors Encoded by Fisher Vectors for Person Re-identification</td><td><a href="https://pdfs.semanticscholar.org/a105/f1ef67b4b02da38eadce8ffb4e13aa301a93.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=local descriptors encoded by fisher vectors for person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>46a01565e6afe7c074affb752e7069ee3bf2e4ef</td></tr><tr><td>sdu_vid</td><td>SDU-VID</td><td>Person reidentification by video ranking</td><td>Person Re-identification by Video Ranking</td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person reidentification by video ranking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>98bb029afe2a1239c3fdab517323066f0957b81b</td></tr><tr><td>social_relation</td><td>Social Relation</td><td>Learning Social Relation Traits from Face Images</td><td>Learning Social Relation Traits from Face Images</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning social relation traits from face images&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2a171f8d14b6b8735001a11c217af9587d095848</td></tr><tr><td>soton</td><td>SOTON HiD</td><td>On a Large Sequence-Based Human Gait Database</td><td>On a Large Sequence-Based Human Gait Database</td><td><a href="https://pdfs.semanticscholar.org/4f93/cd09785c6e77bf4bc5a788e079df524c8d21.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=on a large sequence-based human gait database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4f93cd09785c6e77bf4bc5a788e079df524c8d21</td></tr><tr><td>sports_videos_in_the_wild</td><td>SVW</td><td>Sports Videos in the Wild (SVW): A Video Dataset for Sports Analysis</td><td>Sports Videos in the Wild (SVW): A video dataset for sports analysis</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=sports videos in the wild (svw): a video dataset for sports analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1a40092b493c6b8840257ab7f96051d1a4dbfeb2</td></tr><tr><td>stair_actions</td><td>STAIR Action</td><td>STAIR Actions: A Video Dataset of Everyday Home Actions</td><td>STAIR Actions: A Video Dataset of Everyday Home Actions</td><td><a href="https://arxiv.org/pdf/1804.04326.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=stair actions: a video dataset of everyday home actions&sort=relevance" target="_blank">[s2]</a></td><td></td><td>d3f5a1848b0028d8ab51d0b0673732cad2e3c8c9</td></tr><tr><td>stanford_drone</td><td>Stanford Drone</td><td>Learning Social Etiquette: Human Trajectory Prediction In Crowded Scenes</td><td>Social LSTM: Human Trajectory Prediction in Crowded Spaces</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning social etiquette: human trajectory prediction in crowded scenes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>570f37ed63142312e6ccdf00ecc376341ec72b9f</td></tr><tr><td>stickmen_buffy</td><td>Buffy Stickmen</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td><a href="http://www.bmva.org/bmvc/2010/conference/paper12/abstract12.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clustered pose and nonlinear appearance models for human pose estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td></tr><tr><td>stickmen_buffy</td><td>Buffy Stickmen</td><td>Learning to Parse Images of Articulated Objects</td><td>Learning to parse images of articulated bodies</td><td><a href="http://machinelearning.wustl.edu/mlpapers/paper_files/NIPS2006_899.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning to parse images of articulated objects&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td></tr><tr><td>stickmen_family</td><td>We Are Family Stickmen</td><td>We Are Family: Joint Pose Estimation of Multiple Persons</td><td>We Are Family: Joint Pose Estimation of Multiple Persons</td><td><a href="http://eprints.pascal-network.org/archive/00007964/01/eichner10eccv.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=we are family: joint pose estimation of multiple persons&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0dc11a37cadda92886c56a6fb5191ded62099c28</td></tr><tr><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td>Learning to parse images of articulated bodies</td><td><a href="http://machinelearning.wustl.edu/mlpapers/paper_files/NIPS2006_899.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clustered pose and nonlinear appearance models for human pose estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td></tr><tr><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td>Learning to Parse Images of Articulated Objects</td><td>Learning to parse images of articulated bodies</td><td><a href="http://machinelearning.wustl.edu/mlpapers/paper_files/NIPS2006_899.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning to parse images of articulated objects&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td></tr><tr><td>sun_attributes</td><td>SUN</td><td>The SUN Attribute Database: Beyond Categories for Deeper Scene Understanding</td><td>The SUN Attribute Database: Beyond Categories for Deeper Scene Understanding</td><td><a href="http://www.cc.gatech.edu/~hays/papers/attribute_ijcv.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the sun attribute database: beyond categories for deeper scene understanding&sort=relevance" target="_blank">[s2]</a></td><td></td><td>66e6f08873325d37e0ec20a4769ce881e04e964e</td></tr><tr><td>svs</td><td>SVS</td><td>Pedestrian Attribute Classification in Surveillance: Database and Evaluation</td><td>Pedestrian Attribute Classification in Surveillance: Database and Evaluation</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pedestrian attribute classification in surveillance: database and evaluation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>488e475eeb3bb39a145f23ede197cd3620f1d98a</td></tr><tr><td>texas_3dfrd</td><td>Texas 3DFRD</td><td>Anthropometric 3D Face Recognition</td><td>Anthropometric 3D Face Recognition</td><td><a href="http://live.ece.utexas.edu/publications/2010/sg_ijcv_june10.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=anthropometric 3d face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2ce2560cf59db59ce313bbeb004e8ce55c5ce928</td></tr><tr><td>texas_3dfrd</td><td>Texas 3DFRD</td><td>Texas 3D Face Recognition Database</td><td>Texas 3D Face Recognition Database</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=texas 3d face recognition database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4d58f886f5150b2d5e48fd1b5a49e09799bf895d</td></tr><tr><td>tiny_faces</td><td>TinyFace</td><td>Low-Resolution Face Recognition</td><td>Low-Resolution Face Recognition</td><td><a href="https://arxiv.org/pdf/1811.08965.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=low-resolution face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8990cdce3f917dad622e43e033db686b354d057c</td></tr><tr><td>tiny_images</td><td>Tiny Images</td><td>80 million tiny images: a large dataset for non-parametric object and scene recognition</td><td>80 Million Tiny Images: A Large Data Set for Nonparametric Object and Scene Recognition</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=80 million tiny images: a large dataset for non-parametric object and scene recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>31b58ced31f22eab10bd3ee2d9174e7c14c27c01</td></tr><tr><td>tisi</td><td>Times Square Intersection</td><td>Video Synopsis by Heterogeneous Multi-source Correlation</td><td>Video Synopsis by Heterogeneous Multi-source Correlation</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=video synopsis by heterogeneous multi-source correlation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>b6c293f0420f7e945b5916ae44269fb53e139275</td></tr><tr><td>tisi</td><td>Times Square Intersection</td><td>Learning from Multiple Sources for Video Summarisation</td><td>Learning from Multiple Sources for Video Summarisation</td><td><a href="https://arxiv.org/pdf/1501.03069.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning from multiple sources for video summarisation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>287ddcb3db5562235d83aee318f318b8d5e43fb1</td></tr><tr><td>oxford_town_centre</td><td>TownCentre</td><td>Stable Multi-Target Tracking in Real-Time Surveillance Video</td><td>Stable multi-target tracking in real-time surveillance video</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=stable multi-target tracking in real-time surveillance video&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9361b784e73e9238d5cefbea5ac40d35d1e3103f</td></tr><tr><td>tud_brussels</td><td>TUD-Brussels</td><td>Multi-Cue Onboard Pedestrian Detection</td><td>Multi-cue onboard pedestrian detection</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-cue onboard pedestrian detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6ad5a38df8dd4cdddd74f31996ce096d41219f72</td></tr><tr><td>tud_campus</td><td>TUD-Campus</td><td>People-Tracking-by-Detection and People-Detection-by-Tracking</td><td>People-tracking-by-detection and people-detection-by-tracking</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=people-tracking-by-detection and people-detection-by-tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td></tr><tr><td>tud_crossing</td><td>TUD-Crossing</td><td>People-Tracking-by-Detection and People-Detection-by-Tracking</td><td>People-tracking-by-detection and people-detection-by-tracking</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=people-tracking-by-detection and people-detection-by-tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td></tr><tr><td>tud_motionpairs</td><td>TUD-Motionparis</td><td>Multi-Cue Onboard Pedestrian Detection</td><td>Multi-cue onboard pedestrian detection</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-cue onboard pedestrian detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6ad5a38df8dd4cdddd74f31996ce096d41219f72</td></tr><tr><td>tud_pedestrian</td><td>TUD-Pedestrian</td><td>People-Tracking-by-Detection and People-Detection-by-Tracking</td><td>People-tracking-by-detection and people-detection-by-tracking</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=people-tracking-by-detection and people-detection-by-tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td></tr><tr><td>tvhi</td><td>TVHI</td><td>High Five: Recognising human interactions in TV shows</td><td>High Five: Recognising human interactions in TV shows</td><td><a href="https://pdfs.semanticscholar.org/3cd4/0bfa1ff193a96bde0207e5140a399476466c.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=high five: recognising human interactions in tv shows&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3cd40bfa1ff193a96bde0207e5140a399476466c</td></tr><tr><td>uccs</td><td>UCCS</td><td>Large scale unconstrained open set face database</td><td>Large scale unconstrained open set face database</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=large scale unconstrained open set face database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>07fcbae86f7a3ad3ea1cf95178459ee9eaf77cb1</td></tr><tr><td>uccs</td><td>UCCS</td><td>Unconstrained Face Detection and Open-Set Face Recognition Challenge</td><td>Unconstrained Face Detection and Open-Set Face Recognition Challenge</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=unconstrained face detection and open-set face recognition challenge&sort=relevance" target="_blank">[s2]</a></td><td></td><td>d4f1eb008eb80595bcfdac368e23ae9754e1e745</td></tr><tr><td>ucf_101</td><td>UCF101</td><td>UCF101: A Dataset of 101 Human Actions Classes From Videos in The Wild</td><td>UCF101: A Dataset of 101 Human Actions Classes From Videos in The Wild</td><td><a href="https://arxiv.org/pdf/1212.0402.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=ucf101: a dataset of 101 human actions classes from videos in the wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>b5f2846a506fc417e7da43f6a7679146d99c5e96</td></tr><tr><td>ucf_crowd</td><td>UCF-CC-50</td><td>Multi-Source Multi-Scale Counting in Extremely Dense Crowd Images</td><td>Multi-source Multi-scale Counting in Extremely Dense Crowd Images</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-source multi-scale counting in extremely dense crowd images&sort=relevance" target="_blank">[s2]</a></td><td></td><td>32c801cb7fbeb742edfd94cccfca4934baec71da</td></tr><tr><td>ucf_selfie</td><td>UCF Selfie</td><td>How to Take a Good Selfie?</td><td>How to Take a Good Selfie?</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=how to take a good selfie?&sort=relevance" target="_blank">[s2]</a></td><td></td><td>041d3eedf5e45ce5c5229f0181c5c576ed1fafd6</td></tr><tr><td>ufdd</td><td>UFDD</td><td>Pushing the Limits of Unconstrained Face Detection: a Challenge Dataset and Baseline Results</td><td>Pushing the Limits of Unconstrained Face Detection: a Challenge Dataset and Baseline Results</td><td><a href="https://arxiv.org/pdf/1804.10275.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pushing the limits of unconstrained face detection: a challenge dataset and baseline results&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3531332efe19be21e7401ba1f04570a142617236</td></tr><tr><td>umb</td><td>UMB</td><td>UMB-DB: A Database of Partially Occluded 3D Faces</td><td>UMB-DB: A database of partially occluded 3D faces</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=umb-db: a database of partially occluded 3d faces&sort=relevance" target="_blank">[s2]</a></td><td></td><td>16e8b0a1e8451d5f697b94c0c2b32a00abee1d52</td></tr><tr><td>umd_faces</td><td>UMD</td><td>UMDFaces: An Annotated Face Dataset for Training Deep Networks</td><td>UMDFaces: An annotated face dataset for training deep networks</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=umdfaces: an annotated face dataset for training deep networks&sort=relevance" target="_blank">[s2]</a></td><td></td><td>31b05f65405534a696a847dd19c621b7b8588263</td></tr><tr><td>umd_faces</td><td>UMD</td><td>The Do's and Don'ts for CNN-based Face Verification</td><td>The Do’s and Don’ts for CNN-Based Face Verification</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the do's and don'ts for cnn-based face verification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>71b7fc715e2f1bb24c0030af8d7e7b6e7cd128a6</td></tr><tr><td>urban_tribes</td><td>Urban Tribes</td><td>From Bikers to Surfers: Visual Recognition of Urban Tribes</td><td>From Bikers to Surfers: Visual Recognition of Urban Tribes</td><td><a href="http://vision.cornell.edu/se3/wp-content/uploads/2014/09/utribes_bmvc13_final.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=from bikers to surfers: visual recognition of urban tribes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>774cbb45968607a027ae4729077734db000a1ec5</td></tr><tr><td>vgg_celebs_in_places</td><td>CIP</td><td>Faces in Places: Compound Query Retrieval</td><td>Faces in Places: compound query retrieval</td><td><a href="https://pdfs.semanticscholar.org/7ebb/153704706e457ab57b432793d2b6e5d12592.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=faces in places: compound query retrieval&sort=relevance" target="_blank">[s2]</a></td><td></td><td>7ebb153704706e457ab57b432793d2b6e5d12592</td></tr><tr><td>vgg_faces</td><td>VGG Face</td><td>Deep Face Recognition</td><td>Deep Face Recognition</td><td><a href="https://pdfs.semanticscholar.org/f372/ab9b3270d4e4f6a0258c83c2736c3a5c0454.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=deep face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>162ea969d1929ed180cc6de9f0bf116993ff6e06</td></tr><tr><td>vgg_faces2</td><td>VGG Face2</td><td>VGGFace2: A dataset for recognising faces across pose and age</td><td>VGGFace2: A Dataset for Recognising Faces across Pose and Age</td><td><a href="https://arxiv.org/pdf/1710.08092.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=vggface2: a dataset for recognising faces across pose and age&sort=relevance" target="_blank">[s2]</a></td><td></td><td>70c59dc3470ae867016f6ab0e008ac8ba03774a1</td></tr><tr><td>viper</td><td>VIPeR</td><td>Evaluating Appearance Models for Recognition, Reacquisition, and Tracking</td><td>Evaluating Appearance Models for Recognition, Reacquisition, and Tracking</td><td><a href="https://pdfs.semanticscholar.org/7847/b1fbccadb780b655e72c66d3f9e93ddb880c.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=evaluating appearance models for recognition, reacquisition, and tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6273b3491e94ea4dd1ce42b791d77bdc96ee73a8</td></tr><tr><td>voc</td><td>VOC</td><td>The PASCAL Visual Object Classes (VOC) Challenge</td><td>The Pascal Visual Object Classes (VOC) Challenge</td><td><a href="http://eprints.pascal-network.org/archive/00006187/01/PascalVOC_IJCV2009.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the pascal visual object classes (voc) challenge&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0ee1916a0cb2dc7d3add086b5f1092c3d4beb38a</td></tr><tr><td>vqa</td><td>VQA</td><td>VQA: Visual Question Answering</td><td>VQA: Visual Question Answering</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=vqa: visual question answering&sort=relevance" target="_blank">[s2]</a></td><td></td><td>01959ef569f74c286956024866c1d107099199f7</td></tr><tr><td>wider</td><td>WIDER</td><td>Recognize Complex Events from Static Images by Fusing Deep Channels</td><td>Recognize complex events from static images by fusing deep channels</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=recognize complex events from static images by fusing deep channels&sort=relevance" target="_blank">[s2]</a></td><td></td><td>356b431d4f7a2a0a38cf971c84568207dcdbf189</td></tr><tr><td>wider_attribute</td><td>WIDER Attribute</td><td>Human Attribute Recognition by Deep Hierarchical Contexts</td><td>Human Attribute Recognition by Deep Hierarchical Contexts</td><td><a href="https://pdfs.semanticscholar.org/8e28/07f2dd53b03a759e372e07f7191cae65c9fd.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=human attribute recognition by deep hierarchical contexts&sort=relevance" target="_blank">[s2]</a></td><td></td><td>44d23df380af207f5ac5b41459c722c87283e1eb</td></tr><tr><td>wider_face</td><td>WIDER FACE</td><td>WIDER FACE: A Face Detection Benchmark</td><td>WIDER FACE: A Face Detection Benchmark</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=wider face: a face detection benchmark&sort=relevance" target="_blank">[s2]</a></td><td></td><td>52d7eb0fbc3522434c13cc247549f74bb9609c5d</td></tr><tr><td>wildtrack</td><td>WildTrack</td><td>WILDTRACK: A Multi-camera HD Dataset for Dense Unscripted Pedestrian Detection</td><td>WILDTRACK: A Multi-camera HD Dataset for Dense Unscripted Pedestrian Detection</td><td><a href="http://openaccess.thecvf.com/content_cvpr_2018/Supplemental/1562-supp.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=wildtrack: a multi-camera hd dataset for dense unscripted pedestrian detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>36bccfb2ad847096bc76777e544f305813cd8f5b</td></tr><tr><td>wlfdb</td><td>WLFDB</td><td>WLFDB: Weakly Labeled Face Databases</td><td>WLFDB : Weakly Labeled Face Databases</td><td><a href="https://pdfs.semanticscholar.org/5ad4/e9f947c1653c247d418f05dad758a3f9277b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=wlfdb: weakly labeled face databases&sort=relevance" target="_blank">[s2]</a></td><td></td><td>5ad4e9f947c1653c247d418f05dad758a3f9277b</td></tr><tr><td>yale_faces</td><td>YaleFaces</td><td>From Few to Many: Illumination Cone Models for Face Recognition under Variable Lighting and Pose</td><td>From Few to Many: Illumination Cone Models for Face Recognition under Variable Lighting and Pose</td><td><a href="http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.147.1487&rep=rep1&type=pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=from few to many: illumination cone models for face recognition under variable lighting and pose&sort=relevance" target="_blank">[s2]</a></td><td></td><td>18c72175ddbb7d5956d180b65a96005c100f6014</td></tr><tr><td>yale_faces</td><td>YaleFaces</td><td>Acquiring Linear Subspaces for Face Recognition under Variable Lighting</td><td>Acquiring linear subspaces for face recognition under variable lighting</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=acquiring linear subspaces for face recognition under variable lighting&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2ad0ee93d029e790ebb50574f403a09854b65b7e</td></tr><tr><td>yawdd</td><td>YawDD</td><td>YawDD: A Yawning Detection Dataset</td><td>YawDD: a yawning detection dataset</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=yawdd: a yawning detection dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a94cae786d515d3450d48267e12ca954aab791c4</td></tr><tr><td>yfcc_100m</td><td>YFCC100M</td><td>YFCC100M: The New Data in Multimedia Research</td><td>YFCC100M: the new data in multimedia research</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=yfcc100m: the new data in multimedia research&sort=relevance" target="_blank">[s2]</a></td><td></td><td>010f0f4929e6a6644fb01f0e43820f91d0fad292</td></tr><tr><td>york_3d</td><td>UOY 3D Face Database</td><td>Three-Dimensional Face Recognition: An Eigensurface Approach</td><td>Three-dimensional face recognition: an eigensurface approach</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=three-dimensional face recognition: an eigensurface approach&sort=relevance" target="_blank">[s2]</a></td><td></td><td>19d1b811df60f86cbd5e04a094b07f32fff7a32a</td></tr><tr><td>youtube_faces</td><td>YouTubeFaces</td><td>Face Recognition in Unconstrained Videos with Matched Background Similarity</td><td>Face recognition in unconstrained videos with matched background similarity</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=face recognition in unconstrained videos with matched background similarity&sort=relevance" target="_blank">[s2]</a></td><td></td><td>560e0e58d0059259ddf86fcec1fa7975dee6a868</td></tr><tr><td>youtube_poses</td><td>YouTube Pose</td><td>Personalizing Human Video Pose Estimation</td><td>Personalizing Human Video Pose Estimation</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=personalizing human video pose estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1c2802c2199b6d15ecefe7ba0c39bfe44363de38</td></tr></table></body></html> \ No newline at end of file
+<!doctype html><html><head><meta charset='utf-8'><title>Papers with no location</title><link rel='stylesheet' href='reports.css'></head><body><h2>Papers with no location</h2><table border='1' cellpadding='3' cellspacing='3'><th>key</th><th>name</th><th>our title</th><th>found title</th><th></th><th></th><th>address</th><th>s2 id</th><tr><td>10k_US_adult_faces</td><td>10K US Adult Faces</td><td>The intrinsic memorability of face images</td><td>The intrinsic memorability of face photographs.</td><td><a href="https://pdfs.semanticscholar.org/8b2d/d5c61b23ead5ae5508bb8ce808b5ea266730.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the intrinsic memorability of face images&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8b2dd5c61b23ead5ae5508bb8ce808b5ea266730</td></tr><tr><td>3d_rma</td><td>3D-RMA</td><td>Automatic 3D Face Authentication</td><td>Automatic 3D face authentication</td><td><a href="http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.31.9190&rep=rep1&type=pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=automatic 3d face authentication&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2160788824c4c29ffe213b2cbeb3f52972d73f37</td></tr><tr><td>3dddb_unconstrained</td><td>3D Dynamic</td><td>A 3D Dynamic Database for Unconstrained Face Recognition</td><td>A 3D Dynamic Database for Unconstrained Face Recognition</td><td><a href="https://pdfs.semanticscholar.org/4d4b/b462c9f1d4e4ab1e4aa6a75cc0bc71b38461.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a 3d dynamic database for unconstrained face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4d4bb462c9f1d4e4ab1e4aa6a75cc0bc71b38461</td></tr><tr><td>3dpes</td><td>3DPeS</td><td>3DPes: 3D People Dataset for Surveillance and Forensics</td><td>3DPeS: 3D people dataset for surveillance and forensics</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=3dpes: 3d people dataset for surveillance and forensics&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2e8d0f1802e50cccfd3c0aabac0d0beab3a7846e</td></tr><tr><td>4dfab</td><td>4DFAB</td><td>4DFAB: A Large Scale 4D Facial Expression Database for Biometric Applications</td><td>4DFAB: A Large Scale 4D Facial Expression Database for Biometric Applications</td><td><a href="https://arxiv.org/pdf/1712.01443.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=4dfab: a large scale 4d facial expression database for biometric applications&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a40f9bfd3c45658ee8da70e1f2dfbe1f0c744d43</td></tr><tr><td>fpoq</td><td>50 People One Question</td><td>Merging Pose Estimates Across Space and Time</td><td>Merging Pose Estimates Across Space and Time</td><td><a href="http://authors.library.caltech.edu/41565/1/tracking_bmvc.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=merging pose estimates across space and time&sort=relevance" target="_blank">[s2]</a></td><td></td><td>5753b2b5e442eaa3be066daa4a2ca8d8a0bb1725</td></tr><tr><td>adience</td><td>Adience</td><td>Age and Gender Estimation of Unfiltered Faces</td><td>Age and Gender Estimation of Unfiltered Faces</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=age and gender estimation of unfiltered faces&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1be498d4bbc30c3bfd0029114c784bc2114d67c0</td></tr><tr><td>afad</td><td>AFAD</td><td>Ordinal Regression with a Multiple Output CNN for Age Estimation</td><td>Ordinal Regression with Multiple Output CNN for Age Estimation</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=ordinal regression with a multiple output cnn for age estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6618cff7f2ed440a0d2fa9e74ad5469df5cdbe4c</td></tr><tr><td>afew_va</td><td>AFEW-VA</td><td>AFEW-VA database for valence and arousal estimation in-the-wild</td><td>AFEW-VA database for valence and arousal estimation in-the-wild</td><td><a href="https://ibug.doc.ic.ac.uk/media/uploads/documents/afew-va.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=afew-va database for valence and arousal estimation in-the-wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2624d84503bc2f8e190e061c5480b6aa4d89277a</td></tr><tr><td>affectnet</td><td>AffectNet</td><td>AffectNet: A Database for Facial Expression, Valence, and Arousal Computing in the Wild</td><td>AffectNet: A Database for Facial Expression, Valence, and Arousal Computing in the Wild</td><td><a href="https://arxiv.org/pdf/1708.03985.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=affectnet: a database for facial expression, valence, and arousal computing in the wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>758d7e1be64cc668c59ef33ba8882c8597406e53</td></tr><tr><td>aflw</td><td>AFLW</td><td>Annotated Facial Landmarks in the Wild: A Large-scale, Real-world Database for Facial Landmark Localization</td><td>Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=annotated facial landmarks in the wild: a large-scale, real-world database for facial landmark localization&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td></tr><tr><td>afw</td><td>AFW</td><td>Face detection, pose estimation and landmark localization in the wild</td><td>Face detection, pose estimation, and landmark localization in the wild</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=face detection, pose estimation and landmark localization in the wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0e986f51fe45b00633de9fd0c94d082d2be51406</td></tr><tr><td>agedb</td><td>AgeDB</td><td>AgeDB: the first manually collected, in-the-wild age database</td><td>AgeDB: The First Manually Collected, In-the-Wild Age Database</td><td><a href="http://eprints.mdx.ac.uk/22044/1/agedb_kotsia.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=agedb: the first manually collected, in-the-wild age database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>d818568838433a6d6831adde49a58cef05e0c89f</td></tr><tr><td>alert_airport</td><td>ALERT Airport</td><td>A Systematic Evaluation and Benchmark for Person Re-Identification: Features, Metrics, and Datasets</td><td>A Systematic Evaluation and Benchmark for Person Re-Identification: Features, Metrics, and Datasets</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a systematic evaluation and benchmark for person re-identification: features, metrics, and datasets&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6403117f9c005ae81f1e8e6d1302f4a045e3d99d</td></tr><tr><td>am_fed</td><td>AM-FED</td><td>Affectiva MIT Facial Expression Dataset (AM-FED): Naturalistic and Spontaneous Facial Expressions Collected “In the Wild”</td><td>Affectiva-MIT Facial Expression Dataset (AM-FED): Naturalistic and Spontaneous Facial Expressions Collected "In-the-Wild"</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=affectiva mit facial expression dataset (am-fed): naturalistic and spontaneous facial expressions collected “in the wild”&sort=relevance" target="_blank">[s2]</a></td><td></td><td>47aeb3b82f54b5ae8142b4bdda7b614433e69b9a</td></tr><tr><td>apis</td><td>APiS1.0</td><td>Pedestrian Attribute Classification in Surveillance: Database and Evaluation</td><td>Pedestrian Attribute Classification in Surveillance: Database and Evaluation</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pedestrian attribute classification in surveillance: database and evaluation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>488e475eeb3bb39a145f23ede197cd3620f1d98a</td></tr><tr><td>appa_real</td><td>APPA-REAL</td><td>Apparent and Real Age Estimation in Still Images with Deep Residual Regressors on Appa-Real Database</td><td>Apparent and Real Age Estimation in Still Images with Deep Residual Regressors on Appa-Real Database</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=apparent and real age estimation in still images with deep residual regressors on appa-real database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>633c851ebf625ad7abdda2324e9de093cf623141</td></tr><tr><td>appa_real</td><td>APPA-REAL</td><td>From Apparent to Real Age: Gender, Age, Ethnic, Makeup, and Expression Bias Analysis in Real Age Estimation</td><td>From Apparent to Real Age: Gender, Age, Ethnic, Makeup, and Expression Bias Analysis in Real Age Estimation</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=from apparent to real age: gender, age, ethnic, makeup, and expression bias analysis in real age estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>7b92d1e53cc87f7a4256695de590098a2f30261e</td></tr><tr><td>ar_facedb</td><td>AR Face</td><td>The AR Face Database</td><td>The AR face database</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the ar face database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6d96f946aaabc734af7fe3fc4454cf8547fcd5ed</td></tr><tr><td>awe_ears</td><td>AWE Ears</td><td>Ear Recognition: More Than a Survey</td><td>Ear Recognition: More Than a Survey</td><td><a href="https://arxiv.org/pdf/1611.06203.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=ear recognition: more than a survey&sort=relevance" target="_blank">[s2]</a></td><td></td><td>84fe5b4ac805af63206012d29523a1e033bc827e</td></tr><tr><td>b3d_ac</td><td>B3D(AC)</td><td>A 3-D Audio-Visual Corpus of Affective Communication</td><td>A 3-D Audio-Visual Corpus of Affective Communication</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a 3-d audio-visual corpus of affective communication&sort=relevance" target="_blank">[s2]</a></td><td></td><td>d08cc366a4a0192a01e9a7495af1eb5d9f9e73ae</td></tr><tr><td>bbc_pose</td><td>BBC Pose</td><td>Automatic and Efficient Human Pose Estimation for Sign Language Videos</td><td>Automatic and Efficient Human Pose Estimation for Sign Language Videos</td><td><a href="http://tomas.pfister.fi/files/charles13ijcv.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=automatic and efficient human pose estimation for sign language videos&sort=relevance" target="_blank">[s2]</a></td><td></td><td>213a579af9e4f57f071b884aa872651372b661fd</td></tr><tr><td>bfm</td><td>BFM</td><td>A 3D Face Model for Pose and Illumination Invariant Face Recognition</td><td>A 3D Face Model for Pose and Illumination Invariant Face Recognition</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a 3d face model for pose and illumination invariant face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>639937b3a1b8bded3f7e9a40e85bd3770016cf3c</td></tr><tr><td>bio_id</td><td>BioID Face</td><td>Robust Face Detection Using the Hausdorff Distance</td><td>Robust Face Detection Using the Hausdorff Distance</td><td><a href="https://pdfs.semanticscholar.org/4053/e3423fb70ad9140ca89351df49675197196a.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=robust face detection using the hausdorff distance&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4053e3423fb70ad9140ca89351df49675197196a</td></tr><tr><td>bosphorus</td><td>The Bosphorus</td><td>Bosphorus Database for 3D Face Analysis</td><td>Bosphorus Database for 3D Face Analysis</td><td><a href="https://pdfs.semanticscholar.org/4254/fbba3846008f50671edc9cf70b99d7304543.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=bosphorus database for 3d face analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2acf7e58f0a526b957be2099c10aab693f795973</td></tr><tr><td>bp4d_plus</td><td>BP4D+</td><td>Multimodal Spontaneous Emotion Corpus for Human Behavior Analysis</td><td>Multimodal Spontaneous Emotion Corpus for Human Behavior Analysis</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multimodal spontaneous emotion corpus for human behavior analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>53ae38a6bb2b21b42bac4f0c4c8ed1f9fa02f9d4</td></tr><tr><td>bpad</td><td>BPAD</td><td>Describing People: A Poselet-Based Approach to Attribute Classification</td><td>Describing people: A poselet-based approach to attribute classification</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=describing people: a poselet-based approach to attribute classification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>7808937b46acad36e43c30ae4e9f3fd57462853d</td></tr><tr><td>brainwash</td><td>Brainwash</td><td>End-to-End People Detection in Crowded Scenes</td><td>End-to-End People Detection in Crowded Scenes</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=end-to-end people detection in crowded scenes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1bd1645a629f1b612960ab9bba276afd4cf7c666</td></tr><tr><td>bu_3dfe</td><td>BU-3DFE</td><td>A 3D Facial Expression Database For Facial Behavior Research</td><td>A 3D facial expression database for facial behavior research</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a 3d facial expression database for facial behavior research&sort=relevance" target="_blank">[s2]</a></td><td></td><td>cc589c499dcf323fe4a143bbef0074c3e31f9b60</td></tr><tr><td>cacd</td><td></td><td>Cross-Age Reference Coding for Age-Invariant Face Recognition and Retrieval</td><td>Cross-Age Reference Coding for Age-Invariant Face Recognition and Retrieval</td><td><a href="https://pdfs.semanticscholar.org/c44c/84540db1c38ace232ef34b03bda1c81ba039.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=cross-age reference coding for age-invariant face recognition and retrieval&sort=relevance" target="_blank">[s2]</a></td><td></td><td>c44c84540db1c38ace232ef34b03bda1c81ba039</td></tr><tr><td>cafe</td><td>#N/A</td><td>The Child Affective Facial Expression (CAFE) Set: Validity and reliability from untrained adults</td><td>The Child Affective Facial Expression (CAFE) set: validity and reliability from untrained adults</td><td><a href="https://pdfs.semanticscholar.org/2038/8099cc415c772926e47bcbbe554e133343d1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the child affective facial expression (cafe) set: validity and reliability from untrained adults&sort=relevance" target="_blank">[s2]</a></td><td></td><td>20388099cc415c772926e47bcbbe554e133343d1</td></tr><tr><td>caltech_10k_web_faces</td><td>Caltech 10K Web Faces</td><td>Pruning Training Sets for Learning of Object Categories</td><td>Pruning training sets for learning of object categories</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pruning training sets for learning of object categories&sort=relevance" target="_blank">[s2]</a></td><td></td><td>636b8ffc09b1b23ff714ac8350bb35635e49fa3c</td></tr><tr><td>caltech_crp</td><td>Caltech CRP</td><td>Fine-grained classification of pedestrians in video: Benchmark and state of the art</td><td>Fine-grained classification of pedestrians in video: Benchmark and state of the art</td><td><a href="https://arxiv.org/pdf/1605.06177.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=fine-grained classification of pedestrians in video: benchmark and state of the art&sort=relevance" target="_blank">[s2]</a></td><td></td><td>060820f110a72cbf02c14a6d1085bd6e1d994f6a</td></tr><tr><td>caltech_pedestrians</td><td>Caltech Pedestrians</td><td>Pedestrian Detection: A Benchmark</td><td>Pedestrian detection: A benchmark</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pedestrian detection: a benchmark&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1dc35905a1deff8bc74688f2d7e2f48fd2273275</td></tr><tr><td>cas_peal</td><td>CAS-PEAL</td><td>The CAS-PEAL Large-Scale Chinese Face Database and Baseline Evaluations</td><td>The CAS-PEAL Large-Scale Chinese Face Database and Baseline Evaluations</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the cas-peal large-scale chinese face database and baseline evaluations&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2485c98aa44131d1a2f7d1355b1e372f2bb148ad</td></tr><tr><td>casablanca</td><td>Casablanca</td><td>Context-aware {CNNs} for person head detection</td><td>Context-Aware CNNs for Person Head Detection</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=context-aware {cnns} for person head detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td></tr><tr><td>casia_webface</td><td>CASIA Webface</td><td>Learning Face Representation from Scratch</td><td>Learning Face Representation from Scratch</td><td><a href="https://arxiv.org/pdf/1411.7923.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning face representation from scratch&sort=relevance" target="_blank">[s2]</a></td><td></td><td>853bd61bc48a431b9b1c7cab10c603830c488e39</td></tr><tr><td>celeba</td><td>CelebA</td><td>Deep Learning Face Attributes in the Wild</td><td>Deep Learning Face Attributes in the Wild</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=deep learning face attributes in the wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6424b69f3ff4d35249c0bb7ef912fbc2c86f4ff4</td></tr><tr><td>cfd</td><td>CFD</td><td>The Chicago face database: A free stimulus set of faces and norming data</td><td>The Chicago face database: A free stimulus set of faces and norming data.</td><td><a href="https://pdfs.semanticscholar.org/4df3/143922bcdf7db78eb91e6b5359d6ada004d2.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the chicago face database: a free stimulus set of faces and norming data&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4df3143922bcdf7db78eb91e6b5359d6ada004d2</td></tr><tr><td>chalearn</td><td>ChaLearn</td><td>ChaLearn Looking at People: A Review of Events and Resources</td><td>ChaLearn looking at people: A review of events and resources</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=chalearn looking at people: a review of events and resources&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8d5998cd984e7cce307da7d46f155f9db99c6590</td></tr><tr><td>chokepoint</td><td>ChokePoint</td><td>Patch-based Probabilistic Image Quality Assessment for Face Selection and Improved Video-based Face Recognition</td><td>Patch-based probabilistic image quality assessment for face selection and improved video-based face recognition</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=patch-based probabilistic image quality assessment for face selection and improved video-based face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0486214fb58ee9a04edfe7d6a74c6d0f661a7668</td></tr><tr><td>clothing_co_parsing</td><td>CCP</td><td>Clothing Co-Parsing by Joint Image Segmentation and Labeling</td><td>Clothing Co-parsing by Joint Image Segmentation and Labeling</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clothing co-parsing by joint image segmentation and labeling&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2bf8541199728262f78d4dced6fb91479b39b738</td></tr><tr><td>cmdp</td><td>CMDP</td><td>Distance Estimation of an Unknown Person from a Portrait</td><td>Distance Estimation of an Unknown Person from a Portrait</td><td><a href="http://authors.library.caltech.edu/49084/13/FaceDistanceEstimation_RONCHI.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=distance estimation of an unknown person from a portrait&sort=relevance" target="_blank">[s2]</a></td><td></td><td>56ae6d94fc6097ec4ca861f0daa87941d1c10b70</td></tr><tr><td>cmu_pie</td><td>CMU PIE</td><td>The CMU Pose, Illumination, and Expression Database</td><td>The CMU Pose, Illumination, and Expression (PIE) Database</td><td><a href="http://www.comp.nus.edu.sg/~tsim/piedb.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the cmu pose, illumination, and expression database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4d423acc78273b75134e2afd1777ba6d3a398973</td></tr><tr><td>coco</td><td>COCO</td><td>Microsoft COCO: Common Objects in Context</td><td>Microsoft COCO: Common Objects in Context</td><td><a href="https://arxiv.org/pdf/1405.0312.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=microsoft coco: common objects in context&sort=relevance" target="_blank">[s2]</a></td><td></td><td>5e0f8c355a37a5a89351c02f174e7a5ddcb98683</td></tr><tr><td>coco_action</td><td>COCO-a</td><td>Describing Common Human Visual Actions in Images</td><td>Describing Common Human Visual Actions in Images</td><td><a href="https://arxiv.org/pdf/1506.02203.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=describing common human visual actions in images&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4946ba10a4d5a7d0a38372f23e6622bd347ae273</td></tr><tr><td>coco_qa</td><td>COCO QA</td><td>Exploring Models and Data for Image Question Answering</td><td>Exploring Models and Data for Image Question Answering</td><td><a href="https://arxiv.org/pdf/1505.02074.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=exploring models and data for image question answering&sort=relevance" target="_blank">[s2]</a></td><td></td><td>35b0331dfcd2897abd5749b49ff5e2b8ba0f7a62</td></tr><tr><td>cofw</td><td>COFW</td><td>Robust face landmark estimation under occlusion</td><td>Robust Face Landmark Estimation under Occlusion</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=robust face landmark estimation under occlusion&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2724ba85ec4a66de18da33925e537f3902f21249</td></tr><tr><td>cohn_kanade</td><td>CK</td><td>Comprehensive Database for Facial Expression Analysis</td><td>Comprehensive Database for Facial Expression Analysis</td><td><a href="https://pdfs.semanticscholar.org/23fc/83c8cfff14a16df7ca497661264fc54ed746.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=comprehensive database for facial expression analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>23fc83c8cfff14a16df7ca497661264fc54ed746</td></tr><tr><td>complex_activities</td><td>Ongoing Complex Activities</td><td>Recognition of Ongoing Complex Activities by Sequence Prediction over a Hierarchical Label Space</td><td>Recognition of ongoing complex activities by sequence prediction over a hierarchical label space</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=recognition of ongoing complex activities by sequence prediction over a hierarchical label space&sort=relevance" target="_blank">[s2]</a></td><td></td><td>65355cbb581a219bd7461d48b3afd115263ea760</td></tr><tr><td>cuhk_campus_03</td><td>CUHK03 Campus</td><td>Human Reidentification with Transferred Metric Learning</td><td>Human Reidentification with Transferred Metric Learning</td><td><a href="http://www.ee.cuhk.edu.hk/~xgwang/papers/liZWaccv12.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=human reidentification with transferred metric learning&sort=relevance" target="_blank">[s2]</a></td><td></td><td>44484d2866f222bbb9b6b0870890f9eea1ffb2d0</td></tr><tr><td>cuhk_campus_03</td><td>CUHK03 Campus</td><td>Locally Aligned Feature Transforms across Views</td><td>Locally Aligned Feature Transforms across Views</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=locally aligned feature transforms across views&sort=relevance" target="_blank">[s2]</a></td><td></td><td>38b55d95189c5e69cf4ab45098a48fba407609b4</td></tr><tr><td>cuhk_campus_03</td><td>CUHK03 Campus</td><td>DeepReID: Deep Filter Pairing Neural Network for Person Re-identification</td><td>DeepReID: Deep Filter Pairing Neural Network for Person Re-identification</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=deepreid: deep filter pairing neural network for person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6bd36e9fd0ef20a3074e1430a6cc601e6d407fc3</td></tr><tr><td>cvc_01_barcelona</td><td>CVC-01</td><td>Adaptive Image Sampling and Windows Classification for On-board Pedestrian Detection</td><td>Adaptive Image Sampling and Windows Classification for On-board Pedestrian Detection</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=adaptive image sampling and windows classification for on-board pedestrian detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>57fe081950f21ca03b5b375ae3e84b399c015861</td></tr><tr><td>ufi</td><td>UFI</td><td>Unconstrained Facial Images: Database for Face Recognition under Real-world Conditions</td><td>Unconstrained Facial Images: Database for Face Recognition Under Real-World Conditions</td><td><a href="http://home.zcu.cz/~pkral/papers/kral_micai15.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=unconstrained facial images: database for face recognition under real-world conditions&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4b4106614c1d553365bad75d7866bff0de6056ed</td></tr><tr><td>d3dfacs</td><td>D3DFACS</td><td>A FACS Valid 3D Dynamic Action Unit database with Applications to 3D Dynamic Morphable Facial Modelling</td><td>A FACS valid 3D dynamic action unit database with applications to 3D dynamic morphable facial modeling</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a facs valid 3d dynamic action unit database with applications to 3d dynamic morphable facial modelling&sort=relevance" target="_blank">[s2]</a></td><td></td><td>070de852bc6eb275d7ca3a9cdde8f6be8795d1a3</td></tr><tr><td>dartmouth_children</td><td>Dartmouth Children</td><td>The Dartmouth Database of Children's Faces: Acquisition and validation of a new face stimulus set</td><td>The Dartmouth Database of Children’s Faces: Acquisition and Validation of a New Face Stimulus Set</td><td><a href="https://pdfs.semanticscholar.org/4e6e/e936eb50dd032f7138702fa39b7c18ee8907.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the dartmouth database of children's faces: acquisition and validation of a new face stimulus set&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4e6ee936eb50dd032f7138702fa39b7c18ee8907</td></tr><tr><td>data_61</td><td>Data61 Pedestrian</td><td>A Multi-Modal Graphical Model for Scene Analysis</td><td>A Multi-modal Graphical Model for Scene Analysis</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a multi-modal graphical model for scene analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>563c940054e4b456661762c1ab858e6f730c3159</td></tr><tr><td>deep_fashion</td><td>DeepFashion</td><td>DeepFashion: Powering Robust Clothes Recognition and Retrieval with Rich Annotations</td><td>DeepFashion: Powering Robust Clothes Recognition and Retrieval with Rich Annotations</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=deepfashion: powering robust clothes recognition and retrieval with rich annotations&sort=relevance" target="_blank">[s2]</a></td><td></td><td>18010284894ed0edcca74e5bf768ee2e15ef7841</td></tr><tr><td>deep_fashion</td><td>DeepFashion</td><td>Fashion Landmark Detection in the Wild</td><td>Fashion Landmark Detection in the Wild</td><td><a href="https://arxiv.org/pdf/1608.03049.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=fashion landmark detection in the wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4fefd1bc8dc4e0ab37ee3324ddfa43ad9d6a04a7</td></tr><tr><td>distance_nighttime</td><td>Long Distance Heterogeneous Face</td><td>Nighttime Face Recognition at Long Distance: Cross-distance and Cross-spectral Matching</td><td>Nighttime Face Recognition at Long Distance: Cross-Distance and Cross-Spectral Matching</td><td><a href="https://pdfs.semanticscholar.org/4156/b7e88f2e0ab0a7c095b9bab199ae2b23bd06.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=nighttime face recognition at long distance: cross-distance and cross-spectral matching&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4156b7e88f2e0ab0a7c095b9bab199ae2b23bd06</td></tr><tr><td>duke_mtmc</td><td>Duke MTMC</td><td>Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</td><td>Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</td><td><a href="https://arxiv.org/pdf/1609.01775.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=performance measures and a data set for multi-target, multi-camera tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>27a2fad58dd8727e280f97036e0d2bc55ef5424c</td></tr><tr><td>duke_mtmc</td><td>Duke MTMC</td><td>Improving Person Re-identification by Attribute and Identity Learning</td><td>Improving Person Re-identification by Attribute and Identity Learning</td><td><a href="https://arxiv.org/pdf/1703.07220.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=improving person re-identification by attribute and identity learning&sort=relevance" target="_blank">[s2]</a></td><td></td><td>7f23a4bb0c777dd72cca7665a5f370ac7980217e</td></tr><tr><td>duke_mtmc</td><td>Duke MTMC</td><td>Unlabeled Samples Generated by GAN Improve the Person Re-identification Baseline in Vitro</td><td>Unlabeled Samples Generated by GAN Improve the Person Re-identification Baseline in Vitro</td><td><a href="https://arxiv.org/pdf/1701.07717.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=unlabeled samples generated by gan improve the person re-identification baseline in vitro&sort=relevance" target="_blank">[s2]</a></td><td></td><td>15e1af79939dbf90790b03d8aa02477783fb1d0f</td></tr><tr><td>duke_mtmc</td><td>Duke MTMC</td><td>Tracking Multiple People Online and in Real Time</td><td>Tracking Multiple People Online and in Real Time</td><td><a href="https://pdfs.semanticscholar.org/64e0/690dd176a93de9d4328f6e31fc4afe1e7536.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=tracking multiple people online and in real time&sort=relevance" target="_blank">[s2]</a></td><td></td><td>64e0690dd176a93de9d4328f6e31fc4afe1e7536</td></tr><tr><td>emotio_net</td><td>EmotioNet Database</td><td>EmotioNet: An Accurate, Real-Time Algorithm for the Automatic Annotation of a Million Facial Expressions in the Wild</td><td>EmotioNet: An Accurate, Real-Time Algorithm for the Automatic Annotation of a Million Facial Expressions in the Wild</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=emotionet: an accurate, real-time algorithm for the automatic annotation of a million facial expressions in the wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>c900e0ad4c95948baaf0acd8449fde26f9b4952a</td></tr><tr><td>erce</td><td>ERCe</td><td>Video Synopsis by Heterogeneous Multi-source Correlation</td><td>Video Synopsis by Heterogeneous Multi-source Correlation</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=video synopsis by heterogeneous multi-source correlation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>b6c293f0420f7e945b5916ae44269fb53e139275</td></tr><tr><td>erce</td><td>ERCe</td><td>Learning from Multiple Sources for Video Summarisation</td><td>Learning from Multiple Sources for Video Summarisation</td><td><a href="https://arxiv.org/pdf/1501.03069.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning from multiple sources for video summarisation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>287ddcb3db5562235d83aee318f318b8d5e43fb1</td></tr><tr><td>europersons</td><td>EuroCity Persons</td><td>The EuroCity Persons Dataset: A Novel Benchmark for Object Detection</td><td>The EuroCity Persons Dataset: A Novel Benchmark for Object Detection</td><td><a href="https://arxiv.org/pdf/1805.07193.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the eurocity persons dataset: a novel benchmark for object detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>72a155c987816ae81c858fddbd6beab656d86220</td></tr><tr><td>expw</td><td>ExpW</td><td>From Facial Expression Recognition to Interpersonal Relation Prediction</td><td>From Facial Expression Recognition to Interpersonal Relation Prediction</td><td><a href="https://arxiv.org/pdf/1609.06426.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=from facial expression recognition to interpersonal relation prediction&sort=relevance" target="_blank">[s2]</a></td><td></td><td>22f656d0f8426c84a33a267977f511f127bfd7f3</td></tr><tr><td>face_scrub</td><td>FaceScrub</td><td>A data-driven approach to cleaning large face datasets</td><td>A data-driven approach to cleaning large face datasets</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a data-driven approach to cleaning large face datasets&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0d3bb75852098b25d90f31d2f48fd0cb4944702b</td></tr><tr><td>face_tracer</td><td>FaceTracer</td><td>FaceTracer: A Search Engine for Large Collections of Images with Faces</td><td>FaceTracer: A Search Engine for Large Collections of Images with Faces</td><td><a href="https://pdfs.semanticscholar.org/73a8/1d311eedac8dea3ca24dc15b6990fa4a725e.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=facetracer: a search engine for large collections of images with faces&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4c170a0dcc8de75587dae21ca508dab2f9343974</td></tr><tr><td>face_tracer</td><td>FaceTracer</td><td>Face Swapping: Automatically Replacing Faces in Photographs</td><td>Face swapping: automatically replacing faces in photographs</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=face swapping: automatically replacing faces in photographs&sort=relevance" target="_blank">[s2]</a></td><td></td><td>670637d0303a863c1548d5b19f705860a23e285c</td></tr><tr><td>faceplace</td><td>Face Place</td><td>Recognizing disguised faces</td><td>Recognizing disguised faces</td><td><a href="https://pdfs.semanticscholar.org/d936/7ceb0be378c3a9ddf7cb741c678c1a3c574c.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=recognizing disguised faces&sort=relevance" target="_blank">[s2]</a></td><td></td><td>25474c21613607f6bb7687a281d5f9d4ffa1f9f3</td></tr><tr><td>fddb</td><td>FDDB</td><td>FDDB: A Benchmark for Face Detection in Unconstrained Settings</td><td>FDDB: A benchmark for face detection in unconstrained settings</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=fddb: a benchmark for face detection in unconstrained settings&sort=relevance" target="_blank">[s2]</a></td><td></td><td>75da1df4ed319926c544eefe17ec8d720feef8c0</td></tr><tr><td>fei</td><td>FEI</td><td>Captura e Alinhamento de Imagens: Um Banco de Faces Brasileiro</td><td>A new ranking method for principal components analysis and its application to face image analysis</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=captura e alinhamento de imagens: um banco de faces brasileiro&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8b56e33f33e582f3e473dba573a16b598ed9bcdc</td></tr><tr><td>feret</td><td>FERET</td><td>The FERET Verification Testing Protocol for Face Recognition Algorithms</td><td>The FERET Verification Testing Protocol for Face Recognition Algorithms</td><td><a href="https://pdfs.semanticscholar.org/8d2a/1c768fce6f71584dd993fb97e7b6419aaf60.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the feret verification testing protocol for face recognition algorithms&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0c4a139bb87c6743c7905b29a3cfec27a5130652</td></tr><tr><td>feret</td><td>FERET</td><td>The FERET Evaluation Methodology for Face-Recognition Algorithms</td><td>The FERET Evaluation Methodology for Face-Recognition Algorithms</td><td><a href="https://pdfs.semanticscholar.org/5099/7a5605c1f61e09e9a96789ed7495be6625aa.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the feret evaluation methodology for face-recognition algorithms&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0f0fcf041559703998abf310e56f8a2f90ee6f21</td></tr><tr><td>feret</td><td>FERET</td><td>FERET ( Face Recognition Technology ) Recognition Algorithm Development and Test Results</td><td>FERET ( Face Recognition Technology ) Recognition Algorithm Development and Test Results</td><td><a href="https://pdfs.semanticscholar.org/31de/9b3dd6106ce6eec9a35991b2b9083395fd0b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=feret ( face recognition technology ) recognition algorithm development and test results&sort=relevance" target="_blank">[s2]</a></td><td></td><td>31de9b3dd6106ce6eec9a35991b2b9083395fd0b</td></tr><tr><td>feret</td><td>FERET</td><td>The FERET database and evaluation procedure for face-recognition algorithms</td><td>The FERET database and evaluation procedure for face-recognition algorithms</td><td><a href="http://biometrics.nist.gov/cs_links/face/frvt/feret/FERET_Database_evaluation_procedure.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the feret database and evaluation procedure for face-recognition algorithms&sort=relevance" target="_blank">[s2]</a></td><td></td><td>dc8b25e35a3acb812beb499844734081722319b4</td></tr><tr><td>ferplus</td><td>FER+</td><td>Training Deep Networks for Facial Expression Recognition with Crowd-Sourced Label Distribution</td><td>Training deep networks for facial expression recognition with crowd-sourced label distribution</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=training deep networks for facial expression recognition with crowd-sourced label distribution&sort=relevance" target="_blank">[s2]</a></td><td></td><td>298cbc3dfbbb3a20af4eed97906650a4ea1c29e0</td></tr><tr><td>fia</td><td>CMU FiA</td><td>The CMU Face In Action (FIA) Database</td><td>The CMU Face In Action (FIA) Database</td><td><a href="https://pdfs.semanticscholar.org/4766/2d1a368daf70ba70ef2d59eb6209f98b675d.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the cmu face in action (fia) database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>47662d1a368daf70ba70ef2d59eb6209f98b675d</td></tr><tr><td>fiw_300</td><td>300-W</td><td>A semi-automatic methodology for facial landmark annotation</td><td>A Semi-automatic Methodology for Facial Landmark Annotation</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a semi-automatic methodology for facial landmark annotation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>013909077ad843eb6df7a3e8e290cfd5575999d2</td></tr><tr><td>fiw_300</td><td>300-W</td><td>300 Faces in-the-Wild Challenge: The first facial landmark localization Challenge</td><td>300 Faces in-the-Wild Challenge: The First Facial Landmark Localization Challenge</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=300 faces in-the-wild challenge: the first facial landmark localization challenge&sort=relevance" target="_blank">[s2]</a></td><td></td><td>044d9a8c61383312cdafbcc44b9d00d650b21c70</td></tr><tr><td>fiw_300</td><td>300-W</td><td>300 faces In-the-wild challenge: Database and results</td><td>300 Faces In-The-Wild Challenge: database and results</td><td><a href="http://ibug.doc.ic.ac.uk/media/uploads/documents/1-s2.0-s0262885616000147-main.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=300 faces in-the-wild challenge: database and results&sort=relevance" target="_blank">[s2]</a></td><td></td><td>e4754afaa15b1b53e70743880484b8d0736990ff</td></tr><tr><td>geofaces</td><td>GeoFaces</td><td>FACE2GPS: Estimating geographic location from facial features</td><td>Exploring the geo-dependence of human face appearance</td><td><a href="http://cs.uky.edu/~jacobs/papers/islam2014faces.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=face2gps: estimating geographic location from facial features&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2cd7821fcf5fae53a185624f7eeda007434ae037</td></tr><tr><td>geofaces</td><td>GeoFaces</td><td>Large-scale geo-facial image analysis</td><td>Large-scale geo-facial image analysis</td><td><a href="https://pdfs.semanticscholar.org/3ede/3ed28329bf48fbd06438a69c4f855bef003f.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=large-scale geo-facial image analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4af89578ac237278be310f7660a408b03f12d603</td></tr><tr><td>geofaces</td><td>GeoFaces</td><td>Exploring the Geo-Dependence of Human Face Appearance</td><td>Exploring the geo-dependence of human face appearance</td><td><a href="http://cs.uky.edu/~jacobs/papers/islam2014faces.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=exploring the geo-dependence of human face appearance&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2cd7821fcf5fae53a185624f7eeda007434ae037</td></tr><tr><td>geofaces</td><td>GeoFaces</td><td>GeoFaceExplorer: Exploring the Geo-Dependence of Facial Attributes</td><td>GeoFaceExplorer: exploring the geo-dependence of facial attributes</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=geofaceexplorer: exploring the geo-dependence of facial attributes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>17b46e2dad927836c689d6787ddb3387c6159ece</td></tr><tr><td>georgia_tech_face_database</td><td>Georgia Tech Face</td><td>Maximum likelihood training of the embedded HMM for face detection and recognition</td><td>Maximum Likelihood Training of the Embedded HMM for Face Detection and Recognition</td><td><a href="http://www.researchgate.net/profile/Monson_Hayes/publication/221124512_Maximum_Likelihood_Training_of_the_Embedded_HMM_for_Face_Detection_and_Recognition/links/0deec53509be9d6f55000000.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=maximum likelihood training of the embedded hmm for face detection and recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3dc3f0b64ef80f573e3a5f96e456e52ee980b877</td></tr><tr><td>gfw</td><td>Grouping Face in the Wild</td><td>Merge or Not? Learning to Group Faces via Imitation Learning</td><td>Merge or Not? Learning to Group Faces via Imitation Learning</td><td><a href="https://arxiv.org/pdf/1707.03986.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=merge or not? learning to group faces via imitation learning&sort=relevance" target="_blank">[s2]</a></td><td></td><td>e58dd160a76349d46f881bd6ddbc2921f08d1050</td></tr><tr><td>graz</td><td>Graz Pedestrian</td><td>Weak Hypotheses and Boosting for Generic Object Detection and Recognition</td><td>Weak Hypotheses and Boosting for Generic Object Detection and Recognition</td><td><a href="https://pdfs.semanticscholar.org/0c91/808994a250d7be332400a534a9291ca3b60e.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=weak hypotheses and boosting for generic object detection and recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0c91808994a250d7be332400a534a9291ca3b60e</td></tr><tr><td>h3d</td><td>H3D</td><td>Poselets: Body Part Detectors Trained Using 3D Human Pose Annotations</td><td>Poselets: Body part detectors trained using 3D human pose annotations</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=poselets: body part detectors trained using 3d human pose annotations&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2830fb5282de23d7784b4b4bc37065d27839a412</td></tr><tr><td>hda_plus</td><td>HDA+</td><td>The HDA+ data set for research on fully automated re-identification systems</td><td>The HDA+ Data Set for Research on Fully Automated Re-identification Systems</td><td><a href="https://pdfs.semanticscholar.org/8f02/ec0be21461fbcedf51d864f944cfc42c875f.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the hda+ data set for research on fully automated re-identification systems&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8f02ec0be21461fbcedf51d864f944cfc42c875f</td></tr><tr><td>hda_plus</td><td>HDA+</td><td>A Multi-camera video data set for research on High-Definition surveillance</td><td>HDA dataset-DRAFT 1 A Multi-camera video data set for research on High-Definition surveillance</td><td><a href="https://pdfs.semanticscholar.org/bd88/bb2e4f351352d88ee7375af834360e223498.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a multi-camera video data set for research on high-definition surveillance&sort=relevance" target="_blank">[s2]</a></td><td></td><td>bd88bb2e4f351352d88ee7375af834360e223498</td></tr><tr><td>helen</td><td>Helen</td><td>Interactive Facial Feature Localization</td><td>Interactive Facial Feature Localization</td><td><a href="https://pdfs.semanticscholar.org/95f1/2d27c3b4914e0668a268360948bce92f7db3.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=interactive facial feature localization&sort=relevance" target="_blank">[s2]</a></td><td></td><td>95f12d27c3b4914e0668a268360948bce92f7db3</td></tr><tr><td>hi4d_adsip</td><td>Hi4D-ADSIP</td><td>Hi4D-ADSIP 3-D dynamic facial articulation database</td><td>Hi4D-ADSIP 3-D dynamic facial articulation database</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=hi4d-adsip 3-d dynamic facial articulation database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a8d0b149c2eadaa02204d3e4356fbc8eccf3b315</td></tr><tr><td>hipsterwars</td><td>Hipsterwars</td><td>Hipster Wars: Discovering Elements of Fashion Styles</td><td>Hipster Wars: Discovering Elements of Fashion Styles</td><td><a href="https://pdfs.semanticscholar.org/04c2/cda00e5536f4b1508cbd80041e9552880e67.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=hipster wars: discovering elements of fashion styles&sort=relevance" target="_blank">[s2]</a></td><td></td><td>04c2cda00e5536f4b1508cbd80041e9552880e67</td></tr><tr><td>hollywood_headset</td><td>HollywoodHeads</td><td>Context-aware CNNs for person head detection</td><td>Context-Aware CNNs for Person Head Detection</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=context-aware cnns for person head detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td></tr><tr><td>hrt_transgender</td><td>HRT Transgender</td><td>Is the Eye Region More Reliable Than the Face? A Preliminary Study of Face-based Recognition on a Transgender Dataset</td><td>Is the eye region more reliable than the face? A preliminary study of face-based recognition on a transgender dataset</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=is the eye region more reliable than the face? a preliminary study of face-based recognition on a transgender dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>137aa2f891d474fce1e7a1d1e9b3aefe21e22b34</td></tr><tr><td>ibm_dif</td><td>IBM Diversity in Faces</td><td>Diversity in Faces</td><td>Facial Coding Scheme Reference 1 Craniofacial Distances</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=diversity in faces&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0ab7cff2ccda7269b73ff6efd9d37e1318f7db25</td></tr><tr><td>ifad</td><td>IFAD</td><td>Indian Face Age Database: A Database for Face Recognition with Age Variation</td><td>Indian Face Age Database: A Database for Face Recognition with Age Variation</td><td><a href="https://pdfs.semanticscholar.org/025e/4cf3fd3fdeced91e9373b56ee14af7ca432c.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=indian face age database: a database for face recognition with age variation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>55c40cbcf49a0225e72d911d762c27bb1c2d14aa</td></tr><tr><td>ifdb</td><td>IFDB</td><td>Iranian Face Database and Evaluation with a New Detection Algorithm</td><td>Iranian Face Database and Evaluation with a New Detection Algorithm</td><td><a href="https://pdfs.semanticscholar.org/066d/71fcd997033dce4ca58df924397dfe0b5fd1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=iranian face database and evaluation with a new detection algorithm&sort=relevance" target="_blank">[s2]</a></td><td></td><td>066d71fcd997033dce4ca58df924397dfe0b5fd1</td></tr><tr><td>iit_dehli_ear</td><td>IIT Dehli Ear</td><td>Automated human identification using ear imaging</td><td>Automated Human Identification Using Ear Imaging</td><td><a href="https://pdfs.semanticscholar.org/faf4/0ce28857aedf183e193486f5b4b0a8c478a2.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=automated human identification using ear imaging&sort=relevance" target="_blank">[s2]</a></td><td></td><td>faf40ce28857aedf183e193486f5b4b0a8c478a2</td></tr><tr><td>ijb_b</td><td>IJB-B</td><td>IARPA Janus Benchmark-B Face Dataset</td><td>IARPA Janus Benchmark-B Face Dataset</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=iarpa janus benchmark-b face dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0cb2dd5f178e3a297a0c33068961018659d0f443</td></tr><tr><td>ijb_a</td><td>IJB-A</td><td>Pushing the Frontiers of Unconstrained Face Detection and Recognition: IARPA Janus Benchmark A</td><td>Pushing the frontiers of unconstrained face detection and recognition: IARPA Janus Benchmark A</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pushing the frontiers of unconstrained face detection and recognition: iarpa janus benchmark a&sort=relevance" target="_blank">[s2]</a></td><td></td><td>140c95e53c619eac594d70f6369f518adfea12ef</td></tr><tr><td>ijb_c</td><td>IJB-C</td><td>IARPA Janus Benchmark C</td><td>IARPA Janus Benchmark - C: Face Dataset and Protocol</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=iarpa janus benchmark c&sort=relevance" target="_blank">[s2]</a></td><td></td><td>57178b36c21fd7f4529ac6748614bb3374714e91</td></tr><tr><td>ilids_mcts</td><td>i-LIDS Multiple-Camera</td><td>Imagery Library for Intelligent Detection Systems: The i-LIDS User Guide</td><td>Imagery Library for Intelligent Detection Systems (i-LIDS); A Standard for Testing Video Based Detection Systems</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=imagery library for intelligent detection systems: the i-lids user guide&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0297448f3ed948e136bb06ceff10eccb34e5bb77</td></tr><tr><td>ilids_mcts_vid</td><td>iLIDS-VID</td><td>Person Re-Identi cation by Video Ranking</td><td>Person Re-identification by Video Ranking</td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person re-identi cation by video ranking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>98bb029afe2a1239c3fdab517323066f0957b81b</td></tr><tr><td>imdb_face</td><td>IMDb Face</td><td>The Devil of Face Recognition is in the Noise</td><td>The Devil of Face Recognition is in the Noise</td><td><a href="https://arxiv.org/pdf/1807.11649.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the devil of face recognition is in the noise&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9e31e77f9543ab42474ba4e9330676e18c242e72</td></tr><tr><td>imdb_wiki</td><td>IMDB</td><td>Deep expectation of real and apparent age from a single image without facial landmarks</td><td>Deep Expectation of Real and Apparent Age from a Single Image Without Facial Landmarks</td><td><a href="http://www.vision.ee.ethz.ch/en/publications/papers/articles/eth_biwi_01299.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=deep expectation of real and apparent age from a single image without facial landmarks&sort=relevance" target="_blank">[s2]</a></td><td></td><td>10195a163ab6348eef37213a46f60a3d87f289c5</td></tr><tr><td>imdb_wiki</td><td>IMDB</td><td>DEX: Deep EXpectation of apparent age from a single image</td><td>DEX: Deep EXpectation of Apparent Age from a Single Image</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=dex: deep expectation of apparent age from a single image&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8355d095d3534ef511a9af68a3b2893339e3f96b</td></tr><tr><td>immediacy</td><td>Immediacy</td><td>Multi-task Recurrent Neural Network for Immediacy Prediction</td><td>Multi-task Recurrent Neural Network for Immediacy Prediction</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-task recurrent neural network for immediacy prediction&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1e3df3ca8feab0b36fd293fe689f93bb2aaac591</td></tr><tr><td>imsitu</td><td>imSitu</td><td>Situation Recognition: Visual Semantic Role Labeling for Image Understanding</td><td>Situation Recognition: Visual Semantic Role Labeling for Image Understanding</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=situation recognition: visual semantic role labeling for image understanding&sort=relevance" target="_blank">[s2]</a></td><td></td><td>51eba481dac6b229a7490f650dff7b17ce05df73</td></tr><tr><td>jaffe</td><td>JAFFE</td><td>Coding Facial Expressions with Gabor Wavelets</td><td>Coding Facial Expressions with Gabor Wavelets</td><td><a href="http://physics.lbl.gov/patrecog/images/Facerecog_gabor.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=coding facial expressions with gabor wavelets&sort=relevance" target="_blank">[s2]</a></td><td></td><td>45c31cde87258414f33412b3b12fc5bec7cb3ba9</td></tr><tr><td>jpl_pose</td><td>JPL-Interaction dataset</td><td>First-Person Activity Recognition: What Are They Doing to Me?</td><td>First-Person Activity Recognition: What Are They Doing to Me?</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=first-person activity recognition: what are they doing to me?&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1aad2da473888cb7ebc1bfaa15bfa0f1502ce005</td></tr><tr><td>kin_face</td><td>UB KinFace</td><td>Understanding Kin Relationships in a Photo</td><td>Understanding Kin Relationships in a Photo</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=understanding kin relationships in a photo&sort=relevance" target="_blank">[s2]</a></td><td></td><td>08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7</td></tr><tr><td>kin_face</td><td>UB KinFace</td><td>Kinship Verification through Transfer Learning</td><td>Kinship Verification through Transfer Learning</td><td><a href="https://pdfs.semanticscholar.org/4793/f11fbca4a7dba898b9fff68f70d868e2497c.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=kinship verification through transfer learning&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4793f11fbca4a7dba898b9fff68f70d868e2497c</td></tr><tr><td>kitti</td><td>KITTI</td><td>Vision meets Robotics: The KITTI Dataset</td><td>Vision meets robotics: The KITTI dataset</td><td><a href="https://pdfs.semanticscholar.org/026e/3363b7f76b51cc711886597a44d5f1fd1de2.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=vision meets robotics: the kitti dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>026e3363b7f76b51cc711886597a44d5f1fd1de2</td></tr><tr><td>lag</td><td>LAG</td><td>Large Age-Gap Face Verification by Feature Injection in Deep Networks</td><td>Large age-gap face verification by feature injection in deep networks</td><td><a href="https://arxiv.org/pdf/1602.06149.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=large age-gap face verification by feature injection in deep networks&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0d2dd4fc016cb6a517d8fb43a7cc3ff62964832e</td></tr><tr><td>laofiw</td><td>LAOFIW</td><td>Turning a Blind Eye: Explicit Removal of Biases and Variation from Deep Neural Network Embeddings</td><td>Turning a Blind Eye: Explicit Removal of Biases and Variation from Deep Neural Network Embeddings</td><td><a href="https://arxiv.org/pdf/1809.02169.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=turning a blind eye: explicit removal of biases and variation from deep neural network embeddings&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4eab317b5ac436a949849ed286baa3de2a541eef</td></tr><tr><td>large_scale_person_search</td><td>Large Scale Person Search</td><td>End-to-End Deep Learning for Person Search</td><td>End-to-End Deep Learning for Person Search</td><td><a href="https://pdfs.semanticscholar.org/2161/f6b7ee3c0acc81603b01dc0df689683577b9.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=end-to-end deep learning for person search&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2161f6b7ee3c0acc81603b01dc0df689683577b9</td></tr><tr><td>leeds_sports_pose</td><td>Leeds Sports Pose</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td><a href="http://www.bmva.org/bmvc/2010/conference/paper12/abstract12.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clustered pose and nonlinear appearance models for human pose estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td></tr><tr><td>lfpw</td><td>LFWP</td><td>Localizing Parts of Faces Using a Consensus of Exemplars</td><td>Localizing Parts of Faces Using a Consensus of Exemplars</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=localizing parts of faces using a consensus of exemplars&sort=relevance" target="_blank">[s2]</a></td><td></td><td>140438a77a771a8fb656b39a78ff488066eb6b50</td></tr><tr><td>lfw</td><td>LFW</td><td>Labeled Faces in the Wild: Updates and New Reporting Procedures</td><td>Labeled Faces in the Wild : Updates and New Reporting Procedures</td><td><a href="https://pdfs.semanticscholar.org/2d34/82dcff69c7417c7b933f22de606a0e8e42d4.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=labeled faces in the wild: updates and new reporting procedures&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2d3482dcff69c7417c7b933f22de606a0e8e42d4</td></tr><tr><td>lfw</td><td>LFW</td><td>Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments</td><td>Labeled Faces in the Wild: A Database forStudying Face Recognition in Unconstrained Environments</td><td><a href="https://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=labeled faces in the wild: a database for studying face recognition in unconstrained environments&sort=relevance" target="_blank">[s2]</a></td><td></td><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td></tr><tr><td>lfw</td><td>LFW</td><td>Labeled Faces in the Wild: A Survey</td><td>Labeled Faces in the Wild: A Survey</td><td><a href="https://pdfs.semanticscholar.org/7de6/e81d775e9cd7becbfd1bd685f4e2a5eebb22.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=labeled faces in the wild: a survey&sort=relevance" target="_blank">[s2]</a></td><td></td><td>7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22</td></tr><tr><td>lfw</td><td>LFW</td><td>Effective Unconstrained Face Recognition by Combining Multiple Descriptors and Learned Background Statistics</td><td>Effective Unconstrained Face Recognition by Combining Multiple Descriptors and Learned Background Statistics</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=effective unconstrained face recognition by combining multiple descriptors and learned background statistics&sort=relevance" target="_blank">[s2]</a></td><td></td><td>133f01aec1534604d184d56de866a4bd531dac87</td></tr><tr><td>m2vtsdb_extended</td><td>xm2vtsdb</td><td>XM2VTSDB: The Extended M2VTS Database</td><td>XM2VTSDB : The extended M2VTS database</td><td><a href="https://pdfs.semanticscholar.org/b626/28ac06bbac998a3ab825324a41a11bc3a988.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=xm2vtsdb: the extended m2vts database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>b62628ac06bbac998a3ab825324a41a11bc3a988</td></tr><tr><td>mafa</td><td>MAsked FAces</td><td>Detecting Masked Faces in the Wild with LLE-CNNs</td><td>Detecting Masked Faces in the Wild with LLE-CNNs</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=detecting masked faces in the wild with lle-cnns&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9cc8cf0c7d7fa7607659921b6ff657e17e135ecc</td></tr><tr><td>mafl</td><td>MAFL</td><td>Facial Landmark Detection by Deep Multi-task Learning</td><td>Facial Landmark Detection by Deep Multi-task Learning</td><td><a href="https://pdfs.semanticscholar.org/fcd7/1c18192928a2e0b264edd4d919ab2f8f652a.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=facial landmark detection by deep multi-task learning&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8a3c5507237957d013a0fe0f082cab7f757af6ee</td></tr><tr><td>mafl</td><td>MAFL</td><td>Learning Deep Representation for Face Alignment with Auxiliary Attributes</td><td>Learning Deep Representation for Face Alignment with Auxiliary Attributes</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning deep representation for face alignment with auxiliary attributes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td></tr><tr><td>malf</td><td>MALF</td><td>Fine-grained Evaluation on Face Detection in the Wild.</td><td>Fine-grained evaluation on face detection in the wild</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=fine-grained evaluation on face detection in the wild.&sort=relevance" target="_blank">[s2]</a></td><td></td><td>45e616093a92e5f1e61a7c6037d5f637aa8964af</td></tr><tr><td>mapillary</td><td>Mapillary</td><td>The Mapillary Vistas Dataset for Semantic Understanding of Street Scenes</td><td>The Mapillary Vistas Dataset for Semantic Understanding of Street Scenes</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the mapillary vistas dataset for semantic understanding of street scenes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>79828e6e9f137a583082b8b5a9dfce0c301989b8</td></tr><tr><td>market_1501</td><td>Market 1501</td><td>Improving Person Re-identification by Attribute and Identity Learning</td><td>Improving Person Re-identification by Attribute and Identity Learning</td><td><a href="https://arxiv.org/pdf/1703.07220.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=improving person re-identification by attribute and identity learning&sort=relevance" target="_blank">[s2]</a></td><td></td><td>7f23a4bb0c777dd72cca7665a5f370ac7980217e</td></tr><tr><td>market_1501</td><td>Market 1501</td><td>Scalable Person Re-identification: A Benchmark</td><td>Scalable Person Re-identification: A Benchmark</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=scalable person re-identification: a benchmark&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4308bd8c28e37e2ed9a3fcfe74d5436cce34b410</td></tr><tr><td>market_1501</td><td>Market 1501</td><td>Orientation Driven Bag of Appearances for Person Re-identification</td><td>Orientation Driven Bag of Appearances for Person Re-identification</td><td><a href="https://arxiv.org/pdf/1605.02464.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=orientation driven bag of appearances for person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a7fe834a0af614ce6b50dc093132b031dd9a856b</td></tr><tr><td>mars</td><td>MARS</td><td>MARS: A Video Benchmark for Large-Scale Person Re-identification</td><td>MARS: A Video Benchmark for Large-Scale Person Re-Identification</td><td><a href="https://pdfs.semanticscholar.org/c038/7e788a52f10bf35d4d50659cfa515d89fbec.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=mars: a video benchmark for large-scale person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>c0387e788a52f10bf35d4d50659cfa515d89fbec</td></tr><tr><td>mcgill</td><td>McGill Real World</td><td>Hierarchical Temporal Graphical Model for Head Pose Estimation and Subsequent Attribute Classification in Real-World Videos</td><td>Hierarchical temporal graphical model for head pose estimation and subsequent attribute classification in real-world videos</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=hierarchical temporal graphical model for head pose estimation and subsequent attribute classification in real-world videos&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2d45cfd838016a6e39f6b766ffe85acd649440c7</td></tr><tr><td>megaage</td><td>MegaAge</td><td>Quantifying Facial Age by Posterior of Age Comparisons</td><td>Quantifying Facial Age by Posterior of Age Comparisons</td><td><a href="https://arxiv.org/pdf/1708.09687.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=quantifying facial age by posterior of age comparisons&sort=relevance" target="_blank">[s2]</a></td><td></td><td>c72a2ea819df9b0e8cd267eebcc6528b8741e03d</td></tr><tr><td>megaface</td><td>MegaFace</td><td>Level Playing Field for Million Scale Face Recognition</td><td>Level Playing Field for Million Scale Face Recognition</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=level playing field for million scale face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>28d4e027c7e90b51b7d8908fce68128d1964668a</td></tr><tr><td>megaface</td><td>MegaFace</td><td>The MegaFace Benchmark: 1 Million Faces for Recognition at Scale</td><td>The MegaFace Benchmark: 1 Million Faces for Recognition at Scale</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the megaface benchmark: 1 million faces for recognition at scale&sort=relevance" target="_blank">[s2]</a></td><td></td><td>96e0cfcd81cdeb8282e29ef9ec9962b125f379b0</td></tr><tr><td>mit_cbcl</td><td>MIT CBCL</td><td>Component-based Face Recognition with 3D Morphable Models</td><td>Component-Based Face Recognition with 3D Morphable Models</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=component-based face recognition with 3d morphable models&sort=relevance" target="_blank">[s2]</a></td><td></td><td>079a0a3bf5200994e1f972b1b9197bf2f90e87d4</td></tr><tr><td>mmi_facial_expression</td><td>MMI Facial Expression Dataset</td><td>WEB-BASED DATABASE FOR FACIAL EXPRESSION ANALYSIS</td><td>Web-based database for facial expression analysis</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=web-based database for facial expression analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2a75f34663a60ab1b04a0049ed1d14335129e908</td></tr><tr><td>moments_in_time</td><td>Moments in Time</td><td>Moments in Time Dataset: one million videos for event understanding</td><td>Moments in Time Dataset: one million videos for event understanding</td><td><a href="https://arxiv.org/pdf/1801.03150.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=moments in time dataset: one million videos for event understanding&sort=relevance" target="_blank">[s2]</a></td><td></td><td>41976ebc8ab76d9a6861487c97cc7fcbe3b6015f</td></tr><tr><td>morph</td><td>MORPH Commercial</td><td>MORPH: A Longitudinal Image Database of Normal Adult Age-Progression</td><td>MORPH: a longitudinal image database of normal adult age-progression</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=morph: a longitudinal image database of normal adult age-progression&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9055b155cbabdce3b98e16e5ac9c0edf00f9552f</td></tr><tr><td>morph_nc</td><td>MORPH Non-Commercial</td><td>MORPH: A Longitudinal Image Database of Normal Adult Age-Progression</td><td>MORPH: a longitudinal image database of normal adult age-progression</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=morph: a longitudinal image database of normal adult age-progression&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9055b155cbabdce3b98e16e5ac9c0edf00f9552f</td></tr><tr><td>mot</td><td>MOT</td><td>Evaluating Multiple Object Tracking Performance: The CLEAR MOT Metrics</td><td>Evaluating Multiple Object Tracking Performance: The CLEAR MOT Metrics</td><td><a href="https://cvhci.anthropomatik.kit.edu/images/stories/msmmi/papers/eurasip2008.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=evaluating multiple object tracking performance: the clear mot metrics&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2258e01865367018ed6f4262c880df85b94959f8</td></tr><tr><td>mot</td><td>MOT</td><td>Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</td><td>Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</td><td><a href="https://arxiv.org/pdf/1609.01775.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=performance measures and a data set for multi-target, multi-camera tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>27a2fad58dd8727e280f97036e0d2bc55ef5424c</td></tr><tr><td>mpi_large</td><td>Large MPI Facial Expression</td><td>The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</td><td>The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</td><td><a href="https://pdfs.semanticscholar.org/ea05/0801199f98a1c7c1df6769f23f658299a3ae.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the mpi facial expression database — a validated database of emotional and conversational facial expressions&sort=relevance" target="_blank">[s2]</a></td><td></td><td>ea050801199f98a1c7c1df6769f23f658299a3ae</td></tr><tr><td>mpi_small</td><td>Small MPI Facial Expression</td><td>The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</td><td>The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</td><td><a href="https://pdfs.semanticscholar.org/ea05/0801199f98a1c7c1df6769f23f658299a3ae.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the mpi facial expression database — a validated database of emotional and conversational facial expressions&sort=relevance" target="_blank">[s2]</a></td><td></td><td>ea050801199f98a1c7c1df6769f23f658299a3ae</td></tr><tr><td>mpii_gaze</td><td>MPIIGaze</td><td>Appearance-based Gaze Estimation in the Wild</td><td>Appearance-based gaze estimation in the wild</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=appearance-based gaze estimation in the wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0df0d1adea39a5bef318b74faa37de7f3e00b452</td></tr><tr><td>mpii_human_pose</td><td>MPII Human Pose</td><td>2D Human Pose Estimation: New Benchmark and State of the Art Analysis</td><td>2D Human Pose Estimation: New Benchmark and State of the Art Analysis</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=2d human pose estimation: new benchmark and state of the art analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3325860c0c82a93b2eac654f5324dd6a776f609e</td></tr><tr><td>mr2</td><td>MR2</td><td>The MR2: A multi-racial mega-resolution database of facial stimuli</td><td>The MR2: A multi-racial, mega-resolution database of facial stimuli.</td><td><a href="http://www.mpmlab.org/The%20MR2%20face%20database.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the mr2: a multi-racial mega-resolution database of facial stimuli&sort=relevance" target="_blank">[s2]</a></td><td></td><td>578d4ad74818086bb64f182f72e2c8bd31e3d426</td></tr><tr><td>mrp_drone</td><td>MRP Drone</td><td>Investigating Open-World Person Re-identification Using a Drone</td><td>Investigating Open-World Person Re-identification Using a Drone</td><td><a href="https://pdfs.semanticscholar.org/ad01/687649d95cd5b56d7399a9603c4b8e2217d7.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=investigating open-world person re-identification using a drone&sort=relevance" target="_blank">[s2]</a></td><td></td><td>ad01687649d95cd5b56d7399a9603c4b8e2217d7</td></tr><tr><td>msceleb</td><td>MsCeleb</td><td>MS-Celeb-1M: A Dataset and Benchmark for Large-Scale Face Recognition</td><td>MS-Celeb-1M: A Dataset and Benchmark for Large-Scale Face Recognition</td><td><a href="https://arxiv.org/pdf/1607.08221.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=ms-celeb-1m: a dataset and benchmark for large-scale face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>291265db88023e92bb8c8e6390438e5da148e8f5</td></tr><tr><td>msmt_17</td><td>MSMT17</td><td>Person Transfer GAN to Bridge Domain Gap for Person Re-Identification</td><td>Person Transfer GAN to Bridge Domain Gap for Person Re-identification</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person transfer gan to bridge domain gap for person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a0cc5f73a37723a6dd465924143f1cb4976d0169</td></tr><tr><td>mtfl</td><td>MTFL</td><td>Facial Landmark Detection by Deep Multi-task Learning</td><td>Facial Landmark Detection by Deep Multi-task Learning</td><td><a href="https://pdfs.semanticscholar.org/fcd7/1c18192928a2e0b264edd4d919ab2f8f652a.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=facial landmark detection by deep multi-task learning&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8a3c5507237957d013a0fe0f082cab7f757af6ee</td></tr><tr><td>mtfl</td><td>MTFL</td><td>Learning Deep Representation for Face Alignment with Auxiliary Attributes</td><td>Learning Deep Representation for Face Alignment with Auxiliary Attributes</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning deep representation for face alignment with auxiliary attributes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td></tr><tr><td>multi_pie</td><td>MULTIPIE</td><td>Multi-PIE</td><td>The CMU Pose, Illumination, and Expression (PIE) Database</td><td><a href="http://www.comp.nus.edu.sg/~tsim/piedb.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-pie&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4d423acc78273b75134e2afd1777ba6d3a398973</td></tr><tr><td>names_and_faces</td><td>News Dataset</td><td>Names and Faces</td><td>Names and faces in the news</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=names and faces&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2fda164863a06a92d3a910b96eef927269aeb730</td></tr><tr><td>nova_emotions</td><td>Novaemötions Dataset</td><td>Crowdsourcing facial expressions for affective-interaction</td><td>Crowdsourcing facial expressions for affective-interaction</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=crowdsourcing facial expressions for affective-interaction&sort=relevance" target="_blank">[s2]</a></td><td></td><td>c06b13d0ec3f5c43e2782cd22542588e233733c3</td></tr><tr><td>orl</td><td>ORL</td><td>Parameterisation of a Stochastic Model for Human Face Identification</td><td>Parameterisation of a stochastic model for human face identification</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=parameterisation of a stochastic model for human face identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>55206f0b5f57ce17358999145506cd01e570358c</td></tr><tr><td>pa_100k</td><td>PA-100K</td><td>HydraPlus-Net: Attentive Deep Features for Pedestrian Analysis</td><td>HydraPlus-Net: Attentive Deep Features for Pedestrian Analysis</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=hydraplus-net: attentive deep features for pedestrian analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>f41c7bb02fc97d5fb9cadd7a49c3e558a1c58a44</td></tr><tr><td>penn_fudan</td><td>Penn Fudan</td><td>Object Detection Combining Recognition and Segmentation</td><td>Object Detection Combining Recognition and Segmentation</td><td><a href="https://pdfs.semanticscholar.org/3394/168ff0719b03ff65bcea35336a76b21fe5e4.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=object detection combining recognition and segmentation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3394168ff0719b03ff65bcea35336a76b21fe5e4</td></tr><tr><td>peta</td><td>PETA</td><td>Pedestrian Attribute Recognition At Far Distance</td><td>Pedestrian Attribute Recognition At Far Distance</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pedestrian attribute recognition at far distance&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2a4bbee0b4cf52d5aadbbc662164f7efba89566c</td></tr><tr><td>pets</td><td>PETS 2017</td><td>PETS 2017: Dataset and Challenge</td><td>PETS 2017: Dataset and Challenge</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pets 2017: dataset and challenge&sort=relevance" target="_blank">[s2]</a></td><td></td><td>22909dd19a0ec3b6065334cb5be5392cb24d839d</td></tr><tr><td>pilot_parliament</td><td>PPB</td><td>Gender Shades: Intersectional Accuracy Disparities in Commercial Gender Classification</td><td>Gender Shades: Intersectional Accuracy Disparities in Commercial Gender Classification</td><td><a href="http://proceedings.mlr.press/v81/buolamwini18a/buolamwini18a-supp.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=gender shades: intersectional accuracy disparities in commercial gender classification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>18858cc936947fc96b5c06bbe3c6c2faa5614540</td></tr><tr><td>pipa</td><td>PIPA</td><td>Beyond Frontal Faces: Improving Person Recognition Using Multiple Cues</td><td>Beyond frontal faces: Improving Person Recognition using multiple cues</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=beyond frontal faces: improving person recognition using multiple cues&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0a85bdff552615643dd74646ac881862a7c7072d</td></tr><tr><td>pku_reid</td><td>PKU-Reid</td><td>Swiss-System Based Cascade Ranking for Gait-based Person Re-identification</td><td>Swiss-System Based Cascade Ranking for Gait-Based Person Re-Identification</td><td><a href="https://pdfs.semanticscholar.org/f6c8/d5e35d7e4d60a0104f233ac1a3ab757da53f.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=swiss-system based cascade ranking for gait-based person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>f6c8d5e35d7e4d60a0104f233ac1a3ab757da53f</td></tr><tr><td>pku_reid</td><td>PKU-Reid</td><td>Orientation driven bag of appearances for person re-identification</td><td>Orientation Driven Bag of Appearances for Person Re-identification</td><td><a href="https://arxiv.org/pdf/1605.02464.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=orientation driven bag of appearances for person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a7fe834a0af614ce6b50dc093132b031dd9a856b</td></tr><tr><td>precarious</td><td>Precarious</td><td>Expecting the Unexpected: Training Detectors for Unusual Pedestrians With Adversarial Imposters</td><td>Expecting the Unexpected: Training Detectors for Unusual Pedestrians with Adversarial Imposters</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=expecting the unexpected: training detectors for unusual pedestrians with adversarial imposters&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9e5378e7b336c89735d3bb15cf67eff96f86d39a</td></tr><tr><td>prid</td><td>PRID</td><td>Person Re-Identification by Descriptive and Discriminative Classification</td><td>Person Re-identification by Descriptive and Discriminative Classification</td><td><a href="https://pdfs.semanticscholar.org/4c1b/f0592be3e535faf256c95e27982db9b3d3d3.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person re-identification by descriptive and discriminative classification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>16c7c31a7553d99f1837fc6e88e77b5ccbb346b8</td></tr><tr><td>prw</td><td>PRW</td><td>Person Re-identification in the Wild</td><td>Person Re-identification in the Wild</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person re-identification in the wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0b84f07af44f964817675ad961def8a51406dd2e</td></tr><tr><td>psu</td><td>PSU</td><td>Vision-based Analysis of Small Groups in Pedestrian Crowds</td><td>Vision-Based Analysis of Small Groups in Pedestrian Crowds</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=vision-based analysis of small groups in pedestrian crowds&sort=relevance" target="_blank">[s2]</a></td><td></td><td>066000d44d6691d27202896691f08b27117918b9</td></tr><tr><td>pubfig</td><td>PubFig</td><td>Attribute and Simile Classifiers for Face Verification</td><td>Attribute and simile classifiers for face verification</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=attribute and simile classifiers for face verification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>759a3b3821d9f0e08e0b0a62c8b693230afc3f8d</td></tr><tr><td>put_face</td><td>Put Face</td><td>The PUT face database</td><td>The put face database</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the put face database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>ae0aee03d946efffdc7af2362a42d3750e7dd48a</td></tr><tr><td>qmul_surv_face</td><td>QMUL-SurvFace</td><td>Surveillance Face Recognition Challenge</td><td>Surveillance Face Recognition Challenge</td><td><a href="https://arxiv.org/pdf/1804.09691.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=surveillance face recognition challenge&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2306b2a8fba28539306052764a77a0d0f5d1236a</td></tr><tr><td>rafd</td><td>RaFD</td><td>Presentation and validation of the Radboud Faces Database</td><td>Presentation and validation of the Radboud Faces Database</td><td><a href="https://pdfs.semanticscholar.org/3765/df816dc5a061bc261e190acc8bdd9d47bec0.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=presentation and validation of the radboud faces database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3765df816dc5a061bc261e190acc8bdd9d47bec0</td></tr><tr><td>raid</td><td>RAiD</td><td>Consistent Re-identification in a Camera Network</td><td>Consistent Re-identification in a Camera Network</td><td><a href="https://pdfs.semanticscholar.org/c27f/099e6e7e3f7f9979cbe9e0a5175fc5848ea0.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=consistent re-identification in a camera network&sort=relevance" target="_blank">[s2]</a></td><td></td><td>09d78009687bec46e70efcf39d4612822e61cb8c</td></tr><tr><td>rap_pedestrian</td><td>RAP</td><td>A Richly Annotated Dataset for Pedestrian Attribute Recognition</td><td>A Richly Annotated Dataset for Pedestrian Attribute Recognition</td><td><a href="https://arxiv.org/pdf/1603.07054.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a richly annotated dataset for pedestrian attribute recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>221c18238b829c12b911706947ab38fd017acef7</td></tr><tr><td>reseed</td><td>ReSEED</td><td>ReSEED: Social Event dEtection Dataset</td><td>ReSEED: social event dEtection dataset</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=reseed: social event detection dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>54983972aafc8e149259d913524581357b0f91c3</td></tr><tr><td>saivt</td><td>SAIVT SoftBio</td><td>A Database for Person Re-Identification in Multi-Camera Surveillance Networks</td><td>A Database for Person Re-Identification in Multi-Camera Surveillance Networks</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a database for person re-identification in multi-camera surveillance networks&sort=relevance" target="_blank">[s2]</a></td><td></td><td>22646e00a7ba34d1b5fbe3b1efcd91a1e1be3c2b</td></tr><tr><td>sarc3d</td><td>Sarc3D</td><td>SARC3D: a new 3D body model for People Tracking and Re-identification</td><td>SARC3D: A New 3D Body Model for People Tracking and Re-identification</td><td><a href="https://pdfs.semanticscholar.org/e27e/f52c641c2b5100a1b34fd0b819e84a31b4df.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=sarc3d: a new 3d body model for people tracking and re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>e27ef52c641c2b5100a1b34fd0b819e84a31b4df</td></tr><tr><td>scface</td><td>SCface</td><td>SCface – surveillance cameras face database</td><td>SCface – surveillance cameras face database</td><td><a href="http://scface.org/SCface%20-%20Surveillance%20Cameras%20Face%20Database.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=scface – surveillance cameras face database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>29a705a5fa76641e0d8963f1fdd67ee4c0d92d3d</td></tr><tr><td>scut_fbp</td><td>SCUT-FBP</td><td>SCUT-FBP: A Benchmark Dataset for Facial Beauty Perception</td><td>SCUT-FBP: A Benchmark Dataset for Facial Beauty Perception</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=scut-fbp: a benchmark dataset for facial beauty perception&sort=relevance" target="_blank">[s2]</a></td><td></td><td>bd26dabab576adb6af30484183c9c9c8379bf2e0</td></tr><tr><td>scut_head</td><td>SCUT HEAD</td><td>Detecting Heads using Feature Refine Net and Cascaded Multi-scale Architecture</td><td>Detecting Heads using Feature Refine Net and Cascaded Multi-scale Architecture</td><td><a href="https://arxiv.org/pdf/1803.09256.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=detecting heads using feature refine net and cascaded multi-scale architecture&sort=relevance" target="_blank">[s2]</a></td><td></td><td>d3200d49a19a4a4e4e9745ee39649b65d80c834b</td></tr><tr><td>sdu_vid</td><td>SDU-VID</td><td>A Spatio-Temporal Appearance Representation for Video-Based Pedestrian Re-Identification</td><td>A Spatio-Temporal Appearance Representation for Video-Based Pedestrian Re-Identification</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a spatio-temporal appearance representation for video-based pedestrian re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3b4ec8af470948a72a6ed37a9fd226719a874ebc</td></tr><tr><td>sdu_vid</td><td>SDU-VID</td><td>Local descriptors encoded by Fisher vectors for person re-identification</td><td>Local Descriptors Encoded by Fisher Vectors for Person Re-identification</td><td><a href="https://pdfs.semanticscholar.org/a105/f1ef67b4b02da38eadce8ffb4e13aa301a93.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=local descriptors encoded by fisher vectors for person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>46a01565e6afe7c074affb752e7069ee3bf2e4ef</td></tr><tr><td>sdu_vid</td><td>SDU-VID</td><td>Person reidentification by video ranking</td><td>Person Re-identification by Video Ranking</td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person reidentification by video ranking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>98bb029afe2a1239c3fdab517323066f0957b81b</td></tr><tr><td>social_relation</td><td>Social Relation</td><td>Learning Social Relation Traits from Face Images</td><td>Learning Social Relation Traits from Face Images</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning social relation traits from face images&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2a171f8d14b6b8735001a11c217af9587d095848</td></tr><tr><td>soton</td><td>SOTON HiD</td><td>On a Large Sequence-Based Human Gait Database</td><td>On a Large Sequence-Based Human Gait Database</td><td><a href="https://pdfs.semanticscholar.org/4f93/cd09785c6e77bf4bc5a788e079df524c8d21.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=on a large sequence-based human gait database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4f93cd09785c6e77bf4bc5a788e079df524c8d21</td></tr><tr><td>sports_videos_in_the_wild</td><td>SVW</td><td>Sports Videos in the Wild (SVW): A Video Dataset for Sports Analysis</td><td>Sports Videos in the Wild (SVW): A video dataset for sports analysis</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=sports videos in the wild (svw): a video dataset for sports analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1a40092b493c6b8840257ab7f96051d1a4dbfeb2</td></tr><tr><td>stair_actions</td><td>STAIR Action</td><td>STAIR Actions: A Video Dataset of Everyday Home Actions</td><td>STAIR Actions: A Video Dataset of Everyday Home Actions</td><td><a href="https://arxiv.org/pdf/1804.04326.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=stair actions: a video dataset of everyday home actions&sort=relevance" target="_blank">[s2]</a></td><td></td><td>d3f5a1848b0028d8ab51d0b0673732cad2e3c8c9</td></tr><tr><td>stanford_drone</td><td>Stanford Drone</td><td>Learning Social Etiquette: Human Trajectory Prediction In Crowded Scenes</td><td>Social LSTM: Human Trajectory Prediction in Crowded Spaces</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning social etiquette: human trajectory prediction in crowded scenes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>570f37ed63142312e6ccdf00ecc376341ec72b9f</td></tr><tr><td>stickmen_buffy</td><td>Buffy Stickmen</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td><a href="http://www.bmva.org/bmvc/2010/conference/paper12/abstract12.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clustered pose and nonlinear appearance models for human pose estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td></tr><tr><td>stickmen_buffy</td><td>Buffy Stickmen</td><td>Learning to Parse Images of Articulated Objects</td><td>Learning to parse images of articulated bodies</td><td><a href="http://machinelearning.wustl.edu/mlpapers/paper_files/NIPS2006_899.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning to parse images of articulated objects&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td></tr><tr><td>stickmen_family</td><td>We Are Family Stickmen</td><td>We Are Family: Joint Pose Estimation of Multiple Persons</td><td>We Are Family: Joint Pose Estimation of Multiple Persons</td><td><a href="http://eprints.pascal-network.org/archive/00007964/01/eichner10eccv.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=we are family: joint pose estimation of multiple persons&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0dc11a37cadda92886c56a6fb5191ded62099c28</td></tr><tr><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td>Learning to parse images of articulated bodies</td><td><a href="http://machinelearning.wustl.edu/mlpapers/paper_files/NIPS2006_899.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clustered pose and nonlinear appearance models for human pose estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td></tr><tr><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td>Learning to Parse Images of Articulated Objects</td><td>Learning to parse images of articulated bodies</td><td><a href="http://machinelearning.wustl.edu/mlpapers/paper_files/NIPS2006_899.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning to parse images of articulated objects&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td></tr><tr><td>sun_attributes</td><td>SUN</td><td>The SUN Attribute Database: Beyond Categories for Deeper Scene Understanding</td><td>The SUN Attribute Database: Beyond Categories for Deeper Scene Understanding</td><td><a href="http://www.cc.gatech.edu/~hays/papers/attribute_ijcv.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the sun attribute database: beyond categories for deeper scene understanding&sort=relevance" target="_blank">[s2]</a></td><td></td><td>66e6f08873325d37e0ec20a4769ce881e04e964e</td></tr><tr><td>svs</td><td>SVS</td><td>Pedestrian Attribute Classification in Surveillance: Database and Evaluation</td><td>Pedestrian Attribute Classification in Surveillance: Database and Evaluation</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pedestrian attribute classification in surveillance: database and evaluation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>488e475eeb3bb39a145f23ede197cd3620f1d98a</td></tr><tr><td>texas_3dfrd</td><td>Texas 3DFRD</td><td>Anthropometric 3D Face Recognition</td><td>Anthropometric 3D Face Recognition</td><td><a href="http://live.ece.utexas.edu/publications/2010/sg_ijcv_june10.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=anthropometric 3d face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2ce2560cf59db59ce313bbeb004e8ce55c5ce928</td></tr><tr><td>texas_3dfrd</td><td>Texas 3DFRD</td><td>Texas 3D Face Recognition Database</td><td>Texas 3D Face Recognition Database</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=texas 3d face recognition database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4d58f886f5150b2d5e48fd1b5a49e09799bf895d</td></tr><tr><td>tiny_faces</td><td>TinyFace</td><td>Low-Resolution Face Recognition</td><td>Low-Resolution Face Recognition</td><td><a href="https://arxiv.org/pdf/1811.08965.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=low-resolution face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8990cdce3f917dad622e43e033db686b354d057c</td></tr><tr><td>tiny_images</td><td>Tiny Images</td><td>80 million tiny images: a large dataset for non-parametric object and scene recognition</td><td>80 Million Tiny Images: A Large Data Set for Nonparametric Object and Scene Recognition</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=80 million tiny images: a large dataset for non-parametric object and scene recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>31b58ced31f22eab10bd3ee2d9174e7c14c27c01</td></tr><tr><td>tisi</td><td>Times Square Intersection</td><td>Video Synopsis by Heterogeneous Multi-source Correlation</td><td>Video Synopsis by Heterogeneous Multi-source Correlation</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=video synopsis by heterogeneous multi-source correlation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>b6c293f0420f7e945b5916ae44269fb53e139275</td></tr><tr><td>tisi</td><td>Times Square Intersection</td><td>Learning from Multiple Sources for Video Summarisation</td><td>Learning from Multiple Sources for Video Summarisation</td><td><a href="https://arxiv.org/pdf/1501.03069.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning from multiple sources for video summarisation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>287ddcb3db5562235d83aee318f318b8d5e43fb1</td></tr><tr><td>oxford_town_centre</td><td>TownCentre</td><td>Stable Multi-Target Tracking in Real-Time Surveillance Video</td><td>Stable multi-target tracking in real-time surveillance video</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=stable multi-target tracking in real-time surveillance video&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9361b784e73e9238d5cefbea5ac40d35d1e3103f</td></tr><tr><td>tud_brussels</td><td>TUD-Brussels</td><td>Multi-Cue Onboard Pedestrian Detection</td><td>Multi-cue onboard pedestrian detection</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-cue onboard pedestrian detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6ad5a38df8dd4cdddd74f31996ce096d41219f72</td></tr><tr><td>tud_campus</td><td>TUD-Campus</td><td>People-Tracking-by-Detection and People-Detection-by-Tracking</td><td>People-tracking-by-detection and people-detection-by-tracking</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=people-tracking-by-detection and people-detection-by-tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td></tr><tr><td>tud_crossing</td><td>TUD-Crossing</td><td>People-Tracking-by-Detection and People-Detection-by-Tracking</td><td>People-tracking-by-detection and people-detection-by-tracking</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=people-tracking-by-detection and people-detection-by-tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td></tr><tr><td>tud_motionpairs</td><td>TUD-Motionparis</td><td>Multi-Cue Onboard Pedestrian Detection</td><td>Multi-cue onboard pedestrian detection</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-cue onboard pedestrian detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6ad5a38df8dd4cdddd74f31996ce096d41219f72</td></tr><tr><td>tud_pedestrian</td><td>TUD-Pedestrian</td><td>People-Tracking-by-Detection and People-Detection-by-Tracking</td><td>People-tracking-by-detection and people-detection-by-tracking</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=people-tracking-by-detection and people-detection-by-tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td></tr><tr><td>tvhi</td><td>TVHI</td><td>High Five: Recognising human interactions in TV shows</td><td>High Five: Recognising human interactions in TV shows</td><td><a href="https://pdfs.semanticscholar.org/3cd4/0bfa1ff193a96bde0207e5140a399476466c.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=high five: recognising human interactions in tv shows&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3cd40bfa1ff193a96bde0207e5140a399476466c</td></tr><tr><td>uccs</td><td>UCCS</td><td>Large scale unconstrained open set face database</td><td>Large scale unconstrained open set face database</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=large scale unconstrained open set face database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>07fcbae86f7a3ad3ea1cf95178459ee9eaf77cb1</td></tr><tr><td>uccs</td><td>UCCS</td><td>Unconstrained Face Detection and Open-Set Face Recognition Challenge</td><td>Unconstrained Face Detection and Open-Set Face Recognition Challenge</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=unconstrained face detection and open-set face recognition challenge&sort=relevance" target="_blank">[s2]</a></td><td></td><td>d4f1eb008eb80595bcfdac368e23ae9754e1e745</td></tr><tr><td>ucf_101</td><td>UCF101</td><td>UCF101: A Dataset of 101 Human Actions Classes From Videos in The Wild</td><td>UCF101: A Dataset of 101 Human Actions Classes From Videos in The Wild</td><td><a href="https://arxiv.org/pdf/1212.0402.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=ucf101: a dataset of 101 human actions classes from videos in the wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>b5f2846a506fc417e7da43f6a7679146d99c5e96</td></tr><tr><td>ucf_crowd</td><td>UCF-CC-50</td><td>Multi-Source Multi-Scale Counting in Extremely Dense Crowd Images</td><td>Multi-source Multi-scale Counting in Extremely Dense Crowd Images</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-source multi-scale counting in extremely dense crowd images&sort=relevance" target="_blank">[s2]</a></td><td></td><td>32c801cb7fbeb742edfd94cccfca4934baec71da</td></tr><tr><td>ucf_selfie</td><td>UCF Selfie</td><td>How to Take a Good Selfie?</td><td>How to Take a Good Selfie?</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=how to take a good selfie?&sort=relevance" target="_blank">[s2]</a></td><td></td><td>041d3eedf5e45ce5c5229f0181c5c576ed1fafd6</td></tr><tr><td>ufdd</td><td>UFDD</td><td>Pushing the Limits of Unconstrained Face Detection: a Challenge Dataset and Baseline Results</td><td>Pushing the Limits of Unconstrained Face Detection: a Challenge Dataset and Baseline Results</td><td><a href="https://arxiv.org/pdf/1804.10275.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pushing the limits of unconstrained face detection: a challenge dataset and baseline results&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3531332efe19be21e7401ba1f04570a142617236</td></tr><tr><td>umb</td><td>UMB</td><td>UMB-DB: A Database of Partially Occluded 3D Faces</td><td>UMB-DB: A database of partially occluded 3D faces</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=umb-db: a database of partially occluded 3d faces&sort=relevance" target="_blank">[s2]</a></td><td></td><td>16e8b0a1e8451d5f697b94c0c2b32a00abee1d52</td></tr><tr><td>umd_faces</td><td>UMD</td><td>UMDFaces: An Annotated Face Dataset for Training Deep Networks</td><td>UMDFaces: An annotated face dataset for training deep networks</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=umdfaces: an annotated face dataset for training deep networks&sort=relevance" target="_blank">[s2]</a></td><td></td><td>31b05f65405534a696a847dd19c621b7b8588263</td></tr><tr><td>umd_faces</td><td>UMD</td><td>The Do's and Don'ts for CNN-based Face Verification</td><td>The Do’s and Don’ts for CNN-Based Face Verification</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the do's and don'ts for cnn-based face verification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>71b7fc715e2f1bb24c0030af8d7e7b6e7cd128a6</td></tr><tr><td>urban_tribes</td><td>Urban Tribes</td><td>From Bikers to Surfers: Visual Recognition of Urban Tribes</td><td>From Bikers to Surfers: Visual Recognition of Urban Tribes</td><td><a href="http://vision.cornell.edu/se3/wp-content/uploads/2014/09/utribes_bmvc13_final.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=from bikers to surfers: visual recognition of urban tribes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>774cbb45968607a027ae4729077734db000a1ec5</td></tr><tr><td>vgg_celebs_in_places</td><td>CIP</td><td>Faces in Places: Compound Query Retrieval</td><td>Faces in Places: compound query retrieval</td><td><a href="https://pdfs.semanticscholar.org/7ebb/153704706e457ab57b432793d2b6e5d12592.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=faces in places: compound query retrieval&sort=relevance" target="_blank">[s2]</a></td><td></td><td>7ebb153704706e457ab57b432793d2b6e5d12592</td></tr><tr><td>vgg_faces</td><td>VGG Face</td><td>Deep Face Recognition</td><td>Deep Face Recognition</td><td><a href="https://pdfs.semanticscholar.org/f372/ab9b3270d4e4f6a0258c83c2736c3a5c0454.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=deep face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>162ea969d1929ed180cc6de9f0bf116993ff6e06</td></tr><tr><td>vgg_faces2</td><td>VGG Face2</td><td>VGGFace2: A dataset for recognising faces across pose and age</td><td>VGGFace2: A Dataset for Recognising Faces across Pose and Age</td><td><a href="https://arxiv.org/pdf/1710.08092.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=vggface2: a dataset for recognising faces across pose and age&sort=relevance" target="_blank">[s2]</a></td><td></td><td>70c59dc3470ae867016f6ab0e008ac8ba03774a1</td></tr><tr><td>viper</td><td>VIPeR</td><td>Evaluating Appearance Models for Recognition, Reacquisition, and Tracking</td><td>Evaluating Appearance Models for Recognition, Reacquisition, and Tracking</td><td><a href="https://pdfs.semanticscholar.org/7847/b1fbccadb780b655e72c66d3f9e93ddb880c.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=evaluating appearance models for recognition, reacquisition, and tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6273b3491e94ea4dd1ce42b791d77bdc96ee73a8</td></tr><tr><td>voc</td><td>VOC</td><td>The PASCAL Visual Object Classes (VOC) Challenge</td><td>The Pascal Visual Object Classes (VOC) Challenge</td><td><a href="http://eprints.pascal-network.org/archive/00006187/01/PascalVOC_IJCV2009.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the pascal visual object classes (voc) challenge&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0ee1916a0cb2dc7d3add086b5f1092c3d4beb38a</td></tr><tr><td>vqa</td><td>VQA</td><td>VQA: Visual Question Answering</td><td>VQA: Visual Question Answering</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=vqa: visual question answering&sort=relevance" target="_blank">[s2]</a></td><td></td><td>01959ef569f74c286956024866c1d107099199f7</td></tr><tr><td>wider</td><td>WIDER</td><td>Recognize Complex Events from Static Images by Fusing Deep Channels</td><td>Recognize complex events from static images by fusing deep channels</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=recognize complex events from static images by fusing deep channels&sort=relevance" target="_blank">[s2]</a></td><td></td><td>356b431d4f7a2a0a38cf971c84568207dcdbf189</td></tr><tr><td>wider_attribute</td><td>WIDER Attribute</td><td>Human Attribute Recognition by Deep Hierarchical Contexts</td><td>Human Attribute Recognition by Deep Hierarchical Contexts</td><td><a href="https://pdfs.semanticscholar.org/8e28/07f2dd53b03a759e372e07f7191cae65c9fd.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=human attribute recognition by deep hierarchical contexts&sort=relevance" target="_blank">[s2]</a></td><td></td><td>44d23df380af207f5ac5b41459c722c87283e1eb</td></tr><tr><td>wider_face</td><td>WIDER FACE</td><td>WIDER FACE: A Face Detection Benchmark</td><td>WIDER FACE: A Face Detection Benchmark</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=wider face: a face detection benchmark&sort=relevance" target="_blank">[s2]</a></td><td></td><td>52d7eb0fbc3522434c13cc247549f74bb9609c5d</td></tr><tr><td>wildtrack</td><td>WildTrack</td><td>WILDTRACK: A Multi-camera HD Dataset for Dense Unscripted Pedestrian Detection</td><td>WILDTRACK: A Multi-camera HD Dataset for Dense Unscripted Pedestrian Detection</td><td><a href="http://openaccess.thecvf.com/content_cvpr_2018/Supplemental/1562-supp.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=wildtrack: a multi-camera hd dataset for dense unscripted pedestrian detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>36bccfb2ad847096bc76777e544f305813cd8f5b</td></tr><tr><td>wlfdb</td><td>WLFDB</td><td>WLFDB: Weakly Labeled Face Databases</td><td>WLFDB : Weakly Labeled Face Databases</td><td><a href="https://pdfs.semanticscholar.org/5ad4/e9f947c1653c247d418f05dad758a3f9277b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=wlfdb: weakly labeled face databases&sort=relevance" target="_blank">[s2]</a></td><td></td><td>5ad4e9f947c1653c247d418f05dad758a3f9277b</td></tr><tr><td>yale_faces</td><td>YaleFaces</td><td>From Few to Many: Illumination Cone Models for Face Recognition under Variable Lighting and Pose</td><td>From Few to Many: Illumination Cone Models for Face Recognition under Variable Lighting and Pose</td><td><a href="http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.147.1487&rep=rep1&type=pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=from few to many: illumination cone models for face recognition under variable lighting and pose&sort=relevance" target="_blank">[s2]</a></td><td></td><td>18c72175ddbb7d5956d180b65a96005c100f6014</td></tr><tr><td>yale_faces</td><td>YaleFaces</td><td>Acquiring Linear Subspaces for Face Recognition under Variable Lighting</td><td>Acquiring linear subspaces for face recognition under variable lighting</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=acquiring linear subspaces for face recognition under variable lighting&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2ad0ee93d029e790ebb50574f403a09854b65b7e</td></tr><tr><td>yawdd</td><td>YawDD</td><td>YawDD: A Yawning Detection Dataset</td><td>YawDD: a yawning detection dataset</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=yawdd: a yawning detection dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a94cae786d515d3450d48267e12ca954aab791c4</td></tr><tr><td>yfcc_100m</td><td>YFCC100M</td><td>YFCC100M: The New Data in Multimedia Research</td><td>YFCC100M: the new data in multimedia research</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=yfcc100m: the new data in multimedia research&sort=relevance" target="_blank">[s2]</a></td><td></td><td>010f0f4929e6a6644fb01f0e43820f91d0fad292</td></tr><tr><td>york_3d</td><td>UOY 3D Face Database</td><td>Three-Dimensional Face Recognition: An Eigensurface Approach</td><td>Three-dimensional face recognition: an eigensurface approach</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=three-dimensional face recognition: an eigensurface approach&sort=relevance" target="_blank">[s2]</a></td><td></td><td>19d1b811df60f86cbd5e04a094b07f32fff7a32a</td></tr><tr><td>youtube_faces</td><td>YouTubeFaces</td><td>Face Recognition in Unconstrained Videos with Matched Background Similarity</td><td>Face recognition in unconstrained videos with matched background similarity</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=face recognition in unconstrained videos with matched background similarity&sort=relevance" target="_blank">[s2]</a></td><td></td><td>560e0e58d0059259ddf86fcec1fa7975dee6a868</td></tr><tr><td>youtube_poses</td><td>YouTube Pose</td><td>Personalizing Human Video Pose Estimation</td><td>Personalizing Human Video Pose Estimation</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=personalizing human video pose estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1c2802c2199b6d15ecefe7ba0c39bfe44363de38</td></tr></table></body></html> \ No newline at end of file
diff --git a/scraper/reports/report_coverage.html b/scraper/reports/report_coverage.html
index d5e81bb0..df689fdf 100644
--- a/scraper/reports/report_coverage.html
+++ b/scraper/reports/report_coverage.html
@@ -1 +1 @@
-<!doctype html><html><head><meta charset='utf-8'><title>Coverage</title><link rel='stylesheet' href='reports.css'></head><body><h2>Coverage</h2><table border='1' cellpadding='3' cellspacing='3'><th>Paper ID</th><th>Megapixels Key</th><th>Megapixels Name</th><th>Report Link</th><th>PDF Link</th><th>Journal</th><th>Type</th><th>Address</th><th>Country</th><th>Lat</th><th>Lng</th><th>Coverage</th><th>Total Citations</th><th>Geocoded Citations</th><th>Unknown Citations</th><th>Empty Citations</th><th>With PDF</th><th>With DOI</th><tr><td>0e986f51fe45b00633de9fd0c94d082d2be51406</td><td>afw</td><td>AFW</td><td><a href="papers/0e986f51fe45b00633de9fd0c94d082d2be51406.html" target="_blank">Face detection, pose estimation, and landmark localization in the wild</a></td><td><span class="gray">[pdf]</a></td><td>2012 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>65%</td><td>999</td><td>649</td><td>350</td><td>44</td><td>576</td><td>422</td></tr><tr><td>b5f2846a506fc417e7da43f6a7679146d99c5e96</td><td>ucf_101</td><td>UCF101</td><td><a href="papers/b5f2846a506fc417e7da43f6a7679146d99c5e96.html" target="_blank">UCF101: A Dataset of 101 Human Actions Classes From Videos in The Wild</a></td><td><a href="https://arxiv.org/pdf/1212.0402.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>999</td><td>643</td><td>356</td><td>56</td><td>628</td><td>362</td></tr><tr><td>162ea969d1929ed180cc6de9f0bf116993ff6e06</td><td>vgg_faces</td><td>VGG Face</td><td><a href="papers/162ea969d1929ed180cc6de9f0bf116993ff6e06.html" target="_blank">Deep Face Recognition</a></td><td><a href="https://pdfs.semanticscholar.org/f372/ab9b3270d4e4f6a0258c83c2736c3a5c0454.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>63%</td><td>999</td><td>629</td><td>370</td><td>51</td><td>558</td><td>429</td></tr><tr><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td><td>lfw</td><td>LFW</td><td><a href="papers/370b5757a5379b15e30d619e4d3fb9e8e13f3256.html" target="_blank">Labeled Faces in the Wild: A Database forStudying Face Recognition in Unconstrained Environments</a></td><td><a href="https://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>999</td><td>617</td><td>382</td><td>63</td><td>598</td><td>382</td></tr><tr><td>5e0f8c355a37a5a89351c02f174e7a5ddcb98683</td><td>coco</td><td>COCO</td><td><a href="papers/5e0f8c355a37a5a89351c02f174e7a5ddcb98683.html" target="_blank">Microsoft COCO: Common Objects in Context</a></td><td><a href="https://arxiv.org/pdf/1405.0312.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>999</td><td>606</td><td>393</td><td>25</td><td>722</td><td>259</td></tr><tr><td>4d9a02d080636e9666c4d1cc438b9893391ec6c7</td><td>cohn_kanade_plus</td><td>CK+</td><td><a href="papers/4d9a02d080636e9666c4d1cc438b9893391ec6c7.html" target="_blank">The Extended Cohn-Kanade Dataset (CK+): A complete dataset for action unit and emotion-specified expression</a></td><td><span class="gray">[pdf]</a></td><td>2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition - Workshops</td><td>edu</td><td>University of Pittsburgh</td><td>United States</td><td>40.44415295</td><td>-79.96243993</td><td>60%</td><td>999</td><td>604</td><td>395</td><td>58</td><td>470</td><td>518</td></tr><tr><td>026e3363b7f76b51cc711886597a44d5f1fd1de2</td><td>kitti</td><td>KITTI</td><td><a href="papers/026e3363b7f76b51cc711886597a44d5f1fd1de2.html" target="_blank">Vision meets robotics: The KITTI dataset</a></td><td><a href="https://pdfs.semanticscholar.org/026e/3363b7f76b51cc711886597a44d5f1fd1de2.pdf" target="_blank">[pdf]</a></td><td>I. J. Robotics Res.</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>999</td><td>602</td><td>397</td><td>36</td><td>553</td><td>462</td></tr><tr><td>f72f6a45ee240cc99296a287ff725aaa7e7ebb35</td><td>caltech_pedestrians</td><td>Caltech Pedestrians</td><td><a href="papers/f72f6a45ee240cc99296a287ff725aaa7e7ebb35.html" target="_blank">Pedestrian Detection: An Evaluation of the State of the Art</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td>edu</td><td>California Institute of Technology</td><td>United States</td><td>34.13710185</td><td>-118.12527487</td><td>60%</td><td>999</td><td>597</td><td>402</td><td>70</td><td>526</td><td>466</td></tr><tr><td>0ee1916a0cb2dc7d3add086b5f1092c3d4beb38a</td><td>voc</td><td>VOC</td><td><a href="papers/0ee1916a0cb2dc7d3add086b5f1092c3d4beb38a.html" target="_blank">The Pascal Visual Object Classes (VOC) Challenge</a></td><td><a href="http://eprints.pascal-network.org/archive/00006187/01/PascalVOC_IJCV2009.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td>company</td><td>Microsoft</td><td>United States</td><td>47.64233180</td><td>-122.13693020</td><td>60%</td><td>999</td><td>596</td><td>402</td><td>30</td><td>557</td><td>422</td></tr><tr><td>759a3b3821d9f0e08e0b0a62c8b693230afc3f8d</td><td>pubfig</td><td>PubFig</td><td><a href="papers/759a3b3821d9f0e08e0b0a62c8b693230afc3f8d.html" target="_blank">Attribute and simile classifiers for face verification</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE 12th International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>63%</td><td>914</td><td>574</td><td>340</td><td>48</td><td>586</td><td>316</td></tr><tr><td>6d96f946aaabc734af7fe3fc4454cf8547fcd5ed</td><td>ar_facedb</td><td>AR Face</td><td><a href="papers/6d96f946aaabc734af7fe3fc4454cf8547fcd5ed.html" target="_blank">The AR face database</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>999</td><td>573</td><td>426</td><td>59</td><td>458</td><td>530</td></tr><tr><td>31b58ced31f22eab10bd3ee2d9174e7c14c27c01</td><td>tiny_images</td><td>Tiny Images</td><td><a href="papers/31b58ced31f22eab10bd3ee2d9174e7c14c27c01.html" target="_blank">80 Million Tiny Images: A Large Data Set for Nonparametric Object and Scene Recognition</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>999</td><td>573</td><td>426</td><td>89</td><td>644</td><td>337</td></tr><tr><td>10d6b12fa07c7c8d6c8c3f42c7f1c061c131d4c5</td><td>inria_person</td><td>INRIA Pedestrian</td><td><a href="papers/10d6b12fa07c7c8d6c8c3f42c7f1c061c131d4c5.html" target="_blank">Histograms of oriented gradients for human detection</a></td><td><span class="gray">[pdf]</a></td><td>2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)</td><td>edu</td><td>INRIA Rhone-Alps, Montbonnot, France</td><td>France</td><td>45.21788600</td><td>5.80736900</td><td>57%</td><td>999</td><td>570</td><td>429</td><td>43</td><td>419</td><td>509</td></tr><tr><td>18ae7c9a4bbc832b8b14bc4122070d7939f5e00e</td><td>frgc</td><td>FRGC</td><td><a href="papers/18ae7c9a4bbc832b8b14bc4122070d7939f5e00e.html" target="_blank">Overview of the face recognition grand challenge</a></td><td><span class="gray">[pdf]</a></td><td>2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)</td><td>edu</td><td>NIST</td><td>United States</td><td>39.14004000</td><td>-77.21850600</td><td>57%</td><td>999</td><td>565</td><td>433</td><td>86</td><td>549</td><td>442</td></tr><tr><td>18c72175ddbb7d5956d180b65a96005c100f6014</td><td>yale_faces</td><td>YaleFaces</td><td><a href="papers/18c72175ddbb7d5956d180b65a96005c100f6014.html" target="_blank">From Few to Many: Illumination Cone Models for Face Recognition under Variable Lighting and Pose</a></td><td><a href="http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.147.1487&rep=rep1&type=pdf" target="_blank">[pdf]</a></td><td>IEEE Trans. Pattern Anal. Mach. Intell.</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>999</td><td>560</td><td>439</td><td>67</td><td>498</td><td>462</td></tr><tr><td>2ad0ee93d029e790ebb50574f403a09854b65b7e</td><td>yale_faces</td><td>YaleFaces</td><td><a href="papers/2ad0ee93d029e790ebb50574f403a09854b65b7e.html" target="_blank">Acquiring linear subspaces for face recognition under variable lighting</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>999</td><td>554</td><td>445</td><td>94</td><td>495</td><td>491</td></tr><tr><td>23fc83c8cfff14a16df7ca497661264fc54ed746</td><td>cohn_kanade</td><td>CK</td><td><a href="papers/23fc83c8cfff14a16df7ca497661264fc54ed746.html" target="_blank">Comprehensive Database for Facial Expression Analysis</a></td><td><a href="https://pdfs.semanticscholar.org/23fc/83c8cfff14a16df7ca497661264fc54ed746.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>999</td><td>551</td><td>448</td><td>70</td><td>540</td><td>439</td></tr><tr><td>b62628ac06bbac998a3ab825324a41a11bc3a988</td><td>m2vtsdb_extended</td><td>xm2vtsdb</td><td><a href="papers/b62628ac06bbac998a3ab825324a41a11bc3a988.html" target="_blank">XM2VTSDB : The extended M2VTS database</a></td><td><a href="https://pdfs.semanticscholar.org/b626/28ac06bbac998a3ab825324a41a11bc3a988.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>864</td><td>527</td><td>337</td><td>39</td><td>493</td><td>404</td></tr><tr><td>dc8b25e35a3acb812beb499844734081722319b4</td><td>feret</td><td>FERET</td><td><a href="papers/dc8b25e35a3acb812beb499844734081722319b4.html" target="_blank">The FERET database and evaluation procedure for face-recognition algorithms</a></td><td><a href="http://biometrics.nist.gov/cs_links/face/frvt/feret/FERET_Database_evaluation_procedure.pdf" target="_blank">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>999</td><td>516</td><td>483</td><td>103</td><td>591</td><td>421</td></tr><tr><td>6424b69f3ff4d35249c0bb7ef912fbc2c86f4ff4</td><td>celeba</td><td>CelebA</td><td><a href="papers/6424b69f3ff4d35249c0bb7ef912fbc2c86f4ff4.html" target="_blank">Deep Learning Face Attributes in the Wild</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td>edu</td><td>Chinese University of Hong Kong</td><td>China</td><td>22.41626320</td><td>114.21093180</td><td>56%</td><td>919</td><td>514</td><td>404</td><td>62</td><td>694</td><td>201</td></tr><tr><td>45c31cde87258414f33412b3b12fc5bec7cb3ba9</td><td>jaffe</td><td>JAFFE</td><td><a href="papers/45c31cde87258414f33412b3b12fc5bec7cb3ba9.html" target="_blank">Coding Facial Expressions with Gabor Wavelets</a></td><td><a href="http://physics.lbl.gov/patrecog/images/Facerecog_gabor.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>899</td><td>508</td><td>391</td><td>51</td><td>431</td><td>451</td></tr><tr><td>55206f0b5f57ce17358999145506cd01e570358c</td><td>orl</td><td>ORL</td><td><a href="papers/55206f0b5f57ce17358999145506cd01e570358c.html" target="_blank">Parameterisation of a stochastic model for human face identification</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>999</td><td>500</td><td>499</td><td>94</td><td>543</td><td>427</td></tr><tr><td>4d423acc78273b75134e2afd1777ba6d3a398973</td><td>cmu_pie</td><td>CMU PIE</td><td><a href="papers/4d423acc78273b75134e2afd1777ba6d3a398973.html" target="_blank">The CMU Pose, Illumination, and Expression (PIE) Database</a></td><td><a href="http://www.comp.nus.edu.sg/~tsim/piedb.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>59%</td><td>760</td><td>446</td><td>313</td><td>50</td><td>404</td><td>345</td></tr><tr><td>4d423acc78273b75134e2afd1777ba6d3a398973</td><td>multi_pie</td><td>MULTIPIE</td><td><a href="papers/4d423acc78273b75134e2afd1777ba6d3a398973.html" target="_blank">The CMU Pose, Illumination, and Expression (PIE) Database</a></td><td><a href="http://www.comp.nus.edu.sg/~tsim/piedb.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>59%</td><td>760</td><td>446</td><td>313</td><td>50</td><td>404</td><td>345</td></tr><tr><td>6273b3491e94ea4dd1ce42b791d77bdc96ee73a8</td><td>viper</td><td>VIPeR</td><td><a href="papers/6273b3491e94ea4dd1ce42b791d77bdc96ee73a8.html" target="_blank">Evaluating Appearance Models for Recognition, Reacquisition, and Tracking</a></td><td><a href="https://pdfs.semanticscholar.org/7847/b1fbccadb780b655e72c66d3f9e93ddb880c.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>edu</td><td>University of California, Santa Cruz</td><td>United States</td><td>36.99158470</td><td>-122.05827710</td><td>66%</td><td>624</td><td>414</td><td>210</td><td>33</td><td>342</td><td>276</td></tr><tr><td>6bd36e9fd0ef20a3074e1430a6cc601e6d407fc3</td><td>cuhk_campus_03</td><td>CUHK03 Campus</td><td><a href="papers/6bd36e9fd0ef20a3074e1430a6cc601e6d407fc3.html" target="_blank">DeepReID: Deep Filter Pairing Neural Network for Person Re-identification</a></td><td><span class="gray">[pdf]</a></td><td>2014 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>72%</td><td>568</td><td>411</td><td>157</td><td>19</td><td>320</td><td>235</td></tr><tr><td>2830fb5282de23d7784b4b4bc37065d27839a412</td><td>h3d</td><td>H3D</td><td><a href="papers/2830fb5282de23d7784b4b4bc37065d27839a412.html" target="_blank">Poselets: Body part detectors trained using 3D human pose annotations</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE 12th International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>716</td><td>407</td><td>309</td><td>60</td><td>492</td><td>222</td></tr><tr><td>2258e01865367018ed6f4262c880df85b94959f8</td><td>mot</td><td>MOT</td><td><a href="papers/2258e01865367018ed6f4262c880df85b94959f8.html" target="_blank">Evaluating Multiple Object Tracking Performance: The CLEAR MOT Metrics</a></td><td><a href="https://cvhci.anthropomatik.kit.edu/images/stories/msmmi/papers/eurasip2008.pdf" target="_blank">[pdf]</a></td><td>EURASIP J. Image and Video Processing</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>632</td><td>366</td><td>264</td><td>44</td><td>358</td><td>264</td></tr><tr><td>4308bd8c28e37e2ed9a3fcfe74d5436cce34b410</td><td>market_1501</td><td>Market 1501</td><td><a href="papers/4308bd8c28e37e2ed9a3fcfe74d5436cce34b410.html" target="_blank">Scalable Person Re-identification: A Benchmark</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td>company</td><td>Microsoft</td><td>United States</td><td>47.64233180</td><td>-122.13693020</td><td>77%</td><td>460</td><td>354</td><td>106</td><td>9</td><td>263</td><td>185</td></tr><tr><td>560e0e58d0059259ddf86fcec1fa7975dee6a868</td><td>youtube_faces</td><td>YouTubeFaces</td><td><a href="papers/560e0e58d0059259ddf86fcec1fa7975dee6a868.html" target="_blank">Face recognition in unconstrained videos with matched background similarity</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011</td><td>edu</td><td>Tel Aviv University</td><td>Israel</td><td>32.11198890</td><td>34.80459702</td><td>66%</td><td>509</td><td>338</td><td>170</td><td>24</td><td>294</td><td>216</td></tr><tr><td>853bd61bc48a431b9b1c7cab10c603830c488e39</td><td>casia_webface</td><td>CASIA Webface</td><td><a href="papers/853bd61bc48a431b9b1c7cab10c603830c488e39.html" target="_blank">Learning Face Representation from Scratch</a></td><td><a href="https://arxiv.org/pdf/1411.7923.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td>edu</td><td>Chinese Academy of Sciences</td><td>China</td><td>40.00447950</td><td>116.37023800</td><td>70%</td><td>476</td><td>331</td><td>145</td><td>20</td><td>290</td><td>182</td></tr><tr><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td><td>tud_campus</td><td>TUD-Campus</td><td><a href="papers/3316521a5527c7700af8ae6aef32a79a8b83672c.html" target="_blank">People-tracking-by-detection and people-detection-by-tracking</a></td><td><span class="gray">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>59%</td><td>545</td><td>324</td><td>220</td><td>37</td><td>330</td><td>218</td></tr><tr><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td><td>tud_crossing</td><td>TUD-Crossing</td><td><a href="papers/3316521a5527c7700af8ae6aef32a79a8b83672c.html" target="_blank">People-tracking-by-detection and people-detection-by-tracking</a></td><td><span class="gray">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>59%</td><td>545</td><td>324</td><td>220</td><td>37</td><td>330</td><td>218</td></tr><tr><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td><td>tud_pedestrian</td><td>TUD-Pedestrian</td><td><a href="papers/3316521a5527c7700af8ae6aef32a79a8b83672c.html" target="_blank">People-tracking-by-detection and people-detection-by-tracking</a></td><td><span class="gray">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>59%</td><td>545</td><td>324</td><td>220</td><td>37</td><td>330</td><td>218</td></tr><tr><td>cc589c499dcf323fe4a143bbef0074c3e31f9b60</td><td>bu_3dfe</td><td>BU-3DFE</td><td><a href="papers/cc589c499dcf323fe4a143bbef0074c3e31f9b60.html" target="_blank">A 3D facial expression database for facial behavior research</a></td><td><span class="gray">[pdf]</a></td><td>7th International Conference on Automatic Face and Gesture Recognition (FGR06)</td><td></td><td></td><td></td><td></td><td></td><td>53%</td><td>588</td><td>313</td><td>274</td><td>45</td><td>306</td><td>282</td></tr><tr><td>4053e3423fb70ad9140ca89351df49675197196a</td><td>bio_id</td><td>BioID Face</td><td><a href="papers/4053e3423fb70ad9140ca89351df49675197196a.html" target="_blank">Robust Face Detection Using the Hausdorff Distance</a></td><td><a href="https://pdfs.semanticscholar.org/4053/e3423fb70ad9140ca89351df49675197196a.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>511</td><td>281</td><td>230</td><td>50</td><td>329</td><td>182</td></tr><tr><td>16c7c31a7553d99f1837fc6e88e77b5ccbb346b8</td><td>prid</td><td>PRID</td><td><a href="papers/16c7c31a7553d99f1837fc6e88e77b5ccbb346b8.html" target="_blank">Person Re-identification by Descriptive and Discriminative Classification</a></td><td><a href="https://pdfs.semanticscholar.org/4c1b/f0592be3e535faf256c95e27982db9b3d3d3.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>68%</td><td>386</td><td>263</td><td>123</td><td>23</td><td>204</td><td>180</td></tr><tr><td>8a3c5507237957d013a0fe0f082cab7f757af6ee</td><td>mafl</td><td>MAFL</td><td><a href="papers/8a3c5507237957d013a0fe0f082cab7f757af6ee.html" target="_blank">Facial Landmark Detection by Deep Multi-task Learning</a></td><td><a href="https://pdfs.semanticscholar.org/fcd7/1c18192928a2e0b264edd4d919ab2f8f652a.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>407</td><td>259</td><td>148</td><td>18</td><td>252</td><td>153</td></tr><tr><td>8a3c5507237957d013a0fe0f082cab7f757af6ee</td><td>mtfl</td><td>MTFL</td><td><a href="papers/8a3c5507237957d013a0fe0f082cab7f757af6ee.html" target="_blank">Facial Landmark Detection by Deep Multi-task Learning</a></td><td><a href="https://pdfs.semanticscholar.org/fcd7/1c18192928a2e0b264edd4d919ab2f8f652a.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>407</td><td>259</td><td>148</td><td>18</td><td>252</td><td>153</td></tr><tr><td>2485c98aa44131d1a2f7d1355b1e372f2bb148ad</td><td>cas_peal</td><td>CAS-PEAL</td><td><a href="papers/2485c98aa44131d1a2f7d1355b1e372f2bb148ad.html" target="_blank">The CAS-PEAL Large-Scale Chinese Face Database and Baseline Evaluations</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Systems, Man, and Cybernetics - Part A: Systems and Humans</td><td></td><td></td><td></td><td></td><td></td><td>59%</td><td>429</td><td>253</td><td>176</td><td>38</td><td>198</td><td>234</td></tr><tr><td>2a75f34663a60ab1b04a0049ed1d14335129e908</td><td>mmi_facial_expression</td><td>MMI Facial Expression Dataset</td><td><a href="papers/2a75f34663a60ab1b04a0049ed1d14335129e908.html" target="_blank">Web-based database for facial expression analysis</a></td><td><span class="gray">[pdf]</a></td><td>2005 IEEE International Conference on Multimedia and Expo</td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>464</td><td>250</td><td>214</td><td>45</td><td>282</td><td>188</td></tr><tr><td>3325860c0c82a93b2eac654f5324dd6a776f609e</td><td>mpii_human_pose</td><td>MPII Human Pose</td><td><a href="papers/3325860c0c82a93b2eac654f5324dd6a776f609e.html" target="_blank">2D Human Pose Estimation: New Benchmark and State of the Art Analysis</a></td><td><span class="gray">[pdf]</a></td><td>2014 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>387</td><td>249</td><td>138</td><td>21</td><td>291</td><td>96</td></tr><tr><td>9055b155cbabdce3b98e16e5ac9c0edf00f9552f</td><td>morph</td><td>MORPH Commercial</td><td><a href="papers/9055b155cbabdce3b98e16e5ac9c0edf00f9552f.html" target="_blank">MORPH: a longitudinal image database of normal adult age-progression</a></td><td><span class="gray">[pdf]</a></td><td>7th International Conference on Automatic Face and Gesture Recognition (FGR06)</td><td>edu</td><td>North Carolina University</td><td>United States</td><td>34.22398690</td><td>-77.87013250</td><td>55%</td><td>437</td><td>239</td><td>197</td><td>24</td><td>228</td><td>203</td></tr><tr><td>9055b155cbabdce3b98e16e5ac9c0edf00f9552f</td><td>morph_nc</td><td>MORPH Non-Commercial</td><td><a href="papers/9055b155cbabdce3b98e16e5ac9c0edf00f9552f.html" target="_blank">MORPH: a longitudinal image database of normal adult age-progression</a></td><td><span class="gray">[pdf]</a></td><td>7th International Conference on Automatic Face and Gesture Recognition (FGR06)</td><td>edu</td><td>North Carolina University</td><td>United States</td><td>34.22398690</td><td>-77.87013250</td><td>55%</td><td>437</td><td>239</td><td>197</td><td>24</td><td>228</td><td>203</td></tr><tr><td>75da1df4ed319926c544eefe17ec8d720feef8c0</td><td>fddb</td><td>FDDB</td><td><a href="papers/75da1df4ed319926c544eefe17ec8d720feef8c0.html" target="_blank">FDDB: A benchmark for face detection in unconstrained settings</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>380</td><td>237</td><td>143</td><td>20</td><td>202</td><td>164</td></tr><tr><td>3765df816dc5a061bc261e190acc8bdd9d47bec0</td><td>rafd</td><td>RaFD</td><td><a href="papers/3765df816dc5a061bc261e190acc8bdd9d47bec0.html" target="_blank">Presentation and validation of the Radboud Faces Database</a></td><td><a href="https://pdfs.semanticscholar.org/3765/df816dc5a061bc261e190acc8bdd9d47bec0.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>48%</td><td>487</td><td>234</td><td>253</td><td>39</td><td>342</td><td>144</td></tr><tr><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td><td>stickmen_buffy</td><td>Buffy Stickmen</td><td><a href="papers/6dd0597f8513dc100cd0bc1b493768cde45098a9.html" target="_blank">Learning to parse images of articulated bodies</a></td><td><a href="http://machinelearning.wustl.edu/mlpapers/paper_files/NIPS2006_899.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>369</td><td>225</td><td>143</td><td>32</td><td>237</td><td>131</td></tr><tr><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td><a href="papers/6dd0597f8513dc100cd0bc1b493768cde45098a9.html" target="_blank">Learning to parse images of articulated bodies</a></td><td><a href="http://machinelearning.wustl.edu/mlpapers/paper_files/NIPS2006_899.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>369</td><td>225</td><td>143</td><td>32</td><td>237</td><td>131</td></tr><tr><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td><a href="papers/6dd0597f8513dc100cd0bc1b493768cde45098a9.html" target="_blank">Learning to parse images of articulated bodies</a></td><td><a href="http://machinelearning.wustl.edu/mlpapers/paper_files/NIPS2006_899.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>369</td><td>225</td><td>143</td><td>32</td><td>237</td><td>131</td></tr><tr><td>9361b784e73e9238d5cefbea5ac40d35d1e3103f</td><td>oxford_town_centre</td><td>TownCentre</td><td><a href="papers/9361b784e73e9238d5cefbea5ac40d35d1e3103f.html" target="_blank">Stable multi-target tracking in real-time surveillance video</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011</td><td>edu</td><td>University of Oxford</td><td>United Kingdom</td><td>51.75345380</td><td>-1.25400997</td><td>67%</td><td>328</td><td>221</td><td>107</td><td>13</td><td>186</td><td>140</td></tr><tr><td>044d9a8c61383312cdafbcc44b9d00d650b21c70</td><td>fiw_300</td><td>300-W</td><td><a href="papers/044d9a8c61383312cdafbcc44b9d00d650b21c70.html" target="_blank">300 Faces in-the-Wild Challenge: The First Facial Landmark Localization Challenge</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision Workshops</td><td></td><td></td><td></td><td></td><td></td><td>65%</td><td>323</td><td>211</td><td>112</td><td>27</td><td>208</td><td>120</td></tr><tr><td>95f12d27c3b4914e0668a268360948bce92f7db3</td><td>helen</td><td>Helen</td><td><a href="papers/95f12d27c3b4914e0668a268360948bce92f7db3.html" target="_blank">Interactive Facial Feature Localization</a></td><td><a href="https://pdfs.semanticscholar.org/95f1/2d27c3b4914e0668a268360948bce92f7db3.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>company</td><td>Adobe</td><td>United States</td><td>37.33077030</td><td>-121.89409510</td><td>60%</td><td>352</td><td>211</td><td>141</td><td>26</td><td>212</td><td>146</td></tr><tr><td>13f06b08f371ba8b5d31c3e288b4deb61335b462</td><td>eth_andreas_ess</td><td>ETHZ Pedestrian</td><td><a href="papers/13f06b08f371ba8b5d31c3e288b4deb61335b462.html" target="_blank">Depth and Appearance for Mobile Scene Analysis</a></td><td><span class="gray">[pdf]</a></td><td>2007 IEEE 11th International Conference on Computer Vision</td><td>edu</td><td>ETH Zurich</td><td>Switzerland</td><td>47.37631300</td><td>8.54766990</td><td>62%</td><td>324</td><td>202</td><td>122</td><td>26</td><td>193</td><td>127</td></tr><tr><td>5981e6479c3fd4e31644db35d236bfb84ae46514</td><td>mot</td><td>MOT</td><td><a href="papers/5981e6479c3fd4e31644db35d236bfb84ae46514.html" target="_blank">Learning to associate: HybridBoosted multi-target tracker for crowded scene</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>University of Southern California</td><td>United States</td><td>34.02241490</td><td>-118.28634407</td><td>61%</td><td>326</td><td>200</td><td>125</td><td>22</td><td>190</td><td>137</td></tr><tr><td>2acf7e58f0a526b957be2099c10aab693f795973</td><td>bosphorus</td><td>The Bosphorus</td><td><a href="papers/2acf7e58f0a526b957be2099c10aab693f795973.html" target="_blank">Bosphorus Database for 3D Face Analysis</a></td><td><a href="https://pdfs.semanticscholar.org/4254/fbba3846008f50671edc9cf70b99d7304543.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>352</td><td>196</td><td>156</td><td>17</td><td>162</td><td>188</td></tr><tr><td>2724ba85ec4a66de18da33925e537f3902f21249</td><td>cofw</td><td>COFW</td><td><a href="papers/2724ba85ec4a66de18da33925e537f3902f21249.html" target="_blank">Robust Face Landmark Estimation under Occlusion</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision</td><td>edu</td><td>California Institute of Technology</td><td>United States</td><td>34.13710185</td><td>-118.12527487</td><td>60%</td><td>325</td><td>194</td><td>131</td><td>18</td><td>194</td><td>133</td></tr><tr><td>44484d2866f222bbb9b6b0870890f9eea1ffb2d0</td><td>cuhk_campus_03</td><td>CUHK03 Campus</td><td><a href="papers/44484d2866f222bbb9b6b0870890f9eea1ffb2d0.html" target="_blank">Human Reidentification with Transferred Metric Learning</a></td><td><a href="http://www.ee.cuhk.edu.hk/~xgwang/papers/liZWaccv12.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>69%</td><td>280</td><td>193</td><td>87</td><td>9</td><td>139</td><td>137</td></tr><tr><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td><td>aflw</td><td>AFLW</td><td><a href="papers/a74251efa970b92925b89eeef50a5e37d9281ad0.html" target="_blank">Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</a></td><td><span class="gray">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td>edu</td><td>TU Graz</td><td>Austria</td><td>47.07071400</td><td>15.43950400</td><td>60%</td><td>318</td><td>192</td><td>126</td><td>34</td><td>211</td><td>107</td></tr><tr><td>639937b3a1b8bded3f7e9a40e85bd3770016cf3c</td><td>bfm</td><td>BFM</td><td><a href="papers/639937b3a1b8bded3f7e9a40e85bd3770016cf3c.html" target="_blank">A 3D Face Model for Pose and Illumination Invariant Face Recognition</a></td><td><span class="gray">[pdf]</a></td><td>2009 Sixth IEEE International Conference on Advanced Video and Signal Based Surveillance</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>343</td><td>189</td><td>154</td><td>25</td><td>223</td><td>114</td></tr><tr><td>436f798d1a4e54e5947c1e7d7375c31b2bdb4064</td><td>tud_multiview</td><td>TUD-Multiview</td><td><a href="papers/436f798d1a4e54e5947c1e7d7375c31b2bdb4064.html" target="_blank">Monocular 3D pose estimation and tracking by detection</a></td><td><span class="gray">[pdf]</a></td><td>2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>TU Darmstadt</td><td>Germany</td><td>49.87482770</td><td>8.65632810</td><td>60%</td><td>311</td><td>186</td><td>125</td><td>34</td><td>208</td><td>105</td></tr><tr><td>436f798d1a4e54e5947c1e7d7375c31b2bdb4064</td><td>tud_stadtmitte</td><td>TUD-Stadtmitte</td><td><a href="papers/436f798d1a4e54e5947c1e7d7375c31b2bdb4064.html" target="_blank">Monocular 3D pose estimation and tracking by detection</a></td><td><span class="gray">[pdf]</a></td><td>2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>TU Darmstadt</td><td>Germany</td><td>49.87482770</td><td>8.65632810</td><td>60%</td><td>311</td><td>186</td><td>125</td><td>34</td><td>208</td><td>105</td></tr><tr><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td><td>leeds_sports_pose</td><td>Leeds Sports Pose</td><td><a href="papers/4b1d23d17476fcf78f4cbadf69fb130b1aa627c0.html" target="_blank">Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</a></td><td><a href="http://www.bmva.org/bmvc/2010/conference/paper12/abstract12.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>285</td><td>182</td><td>103</td><td>13</td><td>197</td><td>93</td></tr><tr><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td><td>stickmen_buffy</td><td>Buffy Stickmen</td><td><a href="papers/4b1d23d17476fcf78f4cbadf69fb130b1aa627c0.html" target="_blank">Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</a></td><td><a href="http://www.bmva.org/bmvc/2010/conference/paper12/abstract12.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>285</td><td>182</td><td>103</td><td>13</td><td>197</td><td>93</td></tr><tr><td>010f0f4929e6a6644fb01f0e43820f91d0fad292</td><td>yfcc_100m</td><td>YFCC100M</td><td><a href="papers/010f0f4929e6a6644fb01f0e43820f91d0fad292.html" target="_blank">YFCC100M: the new data in multimedia research</a></td><td><span class="gray">[pdf]</a></td><td>Commun. ACM</td><td>edu</td><td>Carnegie Mellon University</td><td>United States</td><td>40.44416190</td><td>-79.94272826</td><td>64%</td><td>274</td><td>174</td><td>100</td><td>23</td><td>172</td><td>100</td></tr><tr><td>38b55d95189c5e69cf4ab45098a48fba407609b4</td><td>cuhk_campus_03</td><td>CUHK03 Campus</td><td><a href="papers/38b55d95189c5e69cf4ab45098a48fba407609b4.html" target="_blank">Locally Aligned Feature Transforms across Views</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>63%</td><td>258</td><td>163</td><td>95</td><td>15</td><td>136</td><td>117</td></tr><tr><td>833fa04463d90aab4a9fe2870d480f0b40df446e</td><td>sun_attributes</td><td>SUN</td><td><a href="papers/833fa04463d90aab4a9fe2870d480f0b40df446e.html" target="_blank">SUN attribute database: Discovering, annotating, and recognizing scene attributes</a></td><td><span class="gray">[pdf]</a></td><td>2012 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>Brown University</td><td>United States</td><td>41.82686820</td><td>-71.40123146</td><td>60%</td><td>264</td><td>158</td><td>106</td><td>27</td><td>206</td><td>56</td></tr><tr><td>2eed184680edcdec8a3b605ad1a3ba8e8f7cc2e9</td><td>graz</td><td>Graz Pedestrian</td><td><a href="papers/2eed184680edcdec8a3b605ad1a3ba8e8f7cc2e9.html" target="_blank">Generic object recognition with boosting</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td>edu</td><td>TU Graz</td><td>Austria</td><td>47.07071400</td><td>15.43950400</td><td>53%</td><td>293</td><td>155</td><td>138</td><td>16</td><td>195</td><td>97</td></tr><tr><td>6204776d31359d129a582057c2d788a14f8aadeb</td><td>youtube_celebrities</td><td>YouTube Celebrities</td><td><a href="papers/6204776d31359d129a582057c2d788a14f8aadeb.html" target="_blank">Face tracking and recognition with visual constraints in real-world videos</a></td><td><span class="gray">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>Rutgers University</td><td>United States</td><td>40.47913175</td><td>-74.43168868</td><td>56%</td><td>267</td><td>149</td><td>117</td><td>13</td><td>125</td><td>121</td></tr><tr><td>140c95e53c619eac594d70f6369f518adfea12ef</td><td>ijb_a</td><td>IJB-A</td><td><a href="papers/140c95e53c619eac594d70f6369f518adfea12ef.html" target="_blank">Pushing the frontiers of unconstrained face detection and recognition: IARPA Janus Benchmark A</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>237</td><td>148</td><td>89</td><td>16</td><td>159</td><td>76</td></tr><tr><td>27a2fad58dd8727e280f97036e0d2bc55ef5424c</td><td>duke_mtmc</td><td>Duke MTMC</td><td><a href="papers/27a2fad58dd8727e280f97036e0d2bc55ef5424c.html" target="_blank">Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</a></td><td><a href="https://arxiv.org/pdf/1609.01775.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Duke University</td><td>United States</td><td>35.99905220</td><td>-78.92906290</td><td>85%</td><td>169</td><td>144</td><td>25</td><td>3</td><td>113</td><td>54</td></tr><tr><td>27a2fad58dd8727e280f97036e0d2bc55ef5424c</td><td>mot</td><td>MOT</td><td><a href="papers/27a2fad58dd8727e280f97036e0d2bc55ef5424c.html" target="_blank">Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</a></td><td><a href="https://arxiv.org/pdf/1609.01775.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Duke University</td><td>United States</td><td>35.99905220</td><td>-78.92906290</td><td>85%</td><td>169</td><td>144</td><td>25</td><td>3</td><td>113</td><td>54</td></tr><tr><td>98bb029afe2a1239c3fdab517323066f0957b81b</td><td>ilids_mcts_vid</td><td>iLIDS-VID</td><td><a href="papers/98bb029afe2a1239c3fdab517323066f0957b81b.html" target="_blank">Person Re-identification by Video Ranking</a></td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>68%</td><td>209</td><td>143</td><td>66</td><td>8</td><td>111</td><td>97</td></tr><tr><td>98bb029afe2a1239c3fdab517323066f0957b81b</td><td>sdu_vid</td><td>SDU-VID</td><td><a href="papers/98bb029afe2a1239c3fdab517323066f0957b81b.html" target="_blank">Person Re-identification by Video Ranking</a></td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>68%</td><td>209</td><td>143</td><td>66</td><td>8</td><td>111</td><td>97</td></tr><tr><td>e8de844fefd54541b71c9823416daa238be65546</td><td>visual_phrases</td><td>Phrasal Recognition</td><td><a href="papers/e8de844fefd54541b71c9823416daa238be65546.html" target="_blank">Recognition using visual phrases</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011</td><td>edu</td><td>University of Illinois, Urbana-Champaign</td><td>United States</td><td>40.11116745</td><td>-88.22587665</td><td>57%</td><td>246</td><td>140</td><td>106</td><td>18</td><td>170</td><td>68</td></tr><tr><td>4c170a0dcc8de75587dae21ca508dab2f9343974</td><td>face_tracer</td><td>FaceTracer</td><td><a href="papers/4c170a0dcc8de75587dae21ca508dab2f9343974.html" target="_blank">FaceTracer: A Search Engine for Large Collections of Images with Faces</a></td><td><a href="https://pdfs.semanticscholar.org/73a8/1d311eedac8dea3ca24dc15b6990fa4a725e.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>225</td><td>139</td><td>86</td><td>17</td><td>146</td><td>77</td></tr><tr><td>7808937b46acad36e43c30ae4e9f3fd57462853d</td><td>bpad</td><td>BPAD</td><td><a href="papers/7808937b46acad36e43c30ae4e9f3fd57462853d.html" target="_blank">Describing people: A poselet-based approach to attribute classification</a></td><td><span class="gray">[pdf]</a></td><td>2011 International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>230</td><td>138</td><td>92</td><td>14</td><td>163</td><td>66</td></tr><tr><td>291265db88023e92bb8c8e6390438e5da148e8f5</td><td>msceleb</td><td>MsCeleb</td><td><a href="papers/291265db88023e92bb8c8e6390438e5da148e8f5.html" target="_blank">MS-Celeb-1M: A Dataset and Benchmark for Large-Scale Face Recognition</a></td><td><a href="https://arxiv.org/pdf/1607.08221.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>company</td><td>Microsoft</td><td>United States</td><td>47.64233180</td><td>-122.13693020</td><td>77%</td><td>180</td><td>138</td><td>42</td><td>9</td><td>120</td><td>59</td></tr><tr><td>46a01565e6afe7c074affb752e7069ee3bf2e4ef</td><td>sdu_vid</td><td>SDU-VID</td><td><a href="papers/46a01565e6afe7c074affb752e7069ee3bf2e4ef.html" target="_blank">Local Descriptors Encoded by Fisher Vectors for Person Re-identification</a></td><td><a href="https://pdfs.semanticscholar.org/a105/f1ef67b4b02da38eadce8ffb4e13aa301a93.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>68%</td><td>197</td><td>133</td><td>64</td><td>15</td><td>108</td><td>88</td></tr><tr><td>0c91808994a250d7be332400a534a9291ca3b60e</td><td>graz</td><td>Graz Pedestrian</td><td><a href="papers/0c91808994a250d7be332400a534a9291ca3b60e.html" target="_blank">Weak Hypotheses and Boosting for Generic Object Detection and Recognition</a></td><td><a href="https://pdfs.semanticscholar.org/0c91/808994a250d7be332400a534a9291ca3b60e.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>236</td><td>131</td><td>105</td><td>17</td><td>161</td><td>77</td></tr><tr><td>570f37ed63142312e6ccdf00ecc376341ec72b9f</td><td>stanford_drone</td><td>Stanford Drone</td><td><a href="papers/570f37ed63142312e6ccdf00ecc376341ec72b9f.html" target="_blank">Social LSTM: Human Trajectory Prediction in Crowded Spaces</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>224</td><td>127</td><td>97</td><td>3</td><td>140</td><td>81</td></tr><tr><td>35b0331dfcd2897abd5749b49ff5e2b8ba0f7a62</td><td>coco_qa</td><td>COCO QA</td><td><a href="papers/35b0331dfcd2897abd5749b49ff5e2b8ba0f7a62.html" target="_blank">Exploring Models and Data for Image Question Answering</a></td><td><a href="https://arxiv.org/pdf/1505.02074.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>206</td><td>126</td><td>80</td><td>11</td><td>162</td><td>39</td></tr><tr><td>4e4746094bf60ee83e40d8597a6191e463b57f76</td><td>leeds_sports_pose_extended</td><td>Leeds Sports Pose Extended</td><td><a href="papers/4e4746094bf60ee83e40d8597a6191e463b57f76.html" target="_blank">Learning effective human pose estimation from inaccurate annotation</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011</td><td>edu</td><td>University of Leeds</td><td>United Kingdom</td><td>53.80387185</td><td>-1.55245712</td><td>69%</td><td>169</td><td>117</td><td>52</td><td>8</td><td>108</td><td>65</td></tr><tr><td>52d7eb0fbc3522434c13cc247549f74bb9609c5d</td><td>wider_face</td><td>WIDER FACE</td><td><a href="papers/52d7eb0fbc3522434c13cc247549f74bb9609c5d.html" target="_blank">WIDER FACE: A Face Detection Benchmark</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>Chinese University of Hong Kong</td><td>China</td><td>22.41626320</td><td>114.21093180</td><td>65%</td><td>178</td><td>116</td><td>62</td><td>12</td><td>112</td><td>66</td></tr><tr><td>c0387e788a52f10bf35d4d50659cfa515d89fbec</td><td>mars</td><td>MARS</td><td><a href="papers/c0387e788a52f10bf35d4d50659cfa515d89fbec.html" target="_blank">MARS: A Video Benchmark for Large-Scale Person Re-Identification</a></td><td><a href="https://pdfs.semanticscholar.org/c038/7e788a52f10bf35d4d50659cfa515d89fbec.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>68%</td><td>168</td><td>115</td><td>53</td><td>4</td><td>97</td><td>69</td></tr><tr><td>b1f4423c227fa37b9680787be38857069247a307</td><td>afew_va</td><td>AFEW-VA</td><td><a href="papers/b1f4423c227fa37b9680787be38857069247a307.html" target="_blank">Collecting Large, Richly Annotated Facial-Expression Databases from Movies</a></td><td><span class="gray">[pdf]</a></td><td>IEEE MultiMedia</td><td>edu</td><td>Australian National University</td><td>Australia</td><td>-35.27769990</td><td>149.11852700</td><td>63%</td><td>181</td><td>114</td><td>67</td><td>8</td><td>87</td><td>97</td></tr><tr><td>013909077ad843eb6df7a3e8e290cfd5575999d2</td><td>fiw_300</td><td>300-W</td><td><a href="papers/013909077ad843eb6df7a3e8e290cfd5575999d2.html" target="_blank">A Semi-automatic Methodology for Facial Landmark Annotation</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition Workshops</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>184</td><td>114</td><td>70</td><td>14</td><td>120</td><td>67</td></tr><tr><td>18010284894ed0edcca74e5bf768ee2e15ef7841</td><td>deep_fashion</td><td>DeepFashion</td><td><a href="papers/18010284894ed0edcca74e5bf768ee2e15ef7841.html" target="_blank">DeepFashion: Powering Robust Clothes Recognition and Retrieval with Rich Annotations</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>176</td><td>113</td><td>63</td><td>2</td><td>113</td><td>62</td></tr><tr><td>22ad2c8c0f4d6aa4328b38d894b814ec22579761</td><td>gallagher</td><td>Gallagher</td><td><a href="papers/22ad2c8c0f4d6aa4328b38d894b814ec22579761.html" target="_blank">Clothing cosegmentation for recognizing people</a></td><td><span class="gray">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>Carnegie Mellon University</td><td>United States</td><td>40.44416190</td><td>-79.94272826</td><td>63%</td><td>178</td><td>113</td><td>65</td><td>7</td><td>100</td><td>86</td></tr><tr><td>0df0d1adea39a5bef318b74faa37de7f3e00b452</td><td>mpii_gaze</td><td>MPIIGaze</td><td><a href="papers/0df0d1adea39a5bef318b74faa37de7f3e00b452.html" target="_blank">Appearance-based gaze estimation in the wild</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>72%</td><td>149</td><td>108</td><td>41</td><td>3</td><td>94</td><td>54</td></tr><tr><td>133f01aec1534604d184d56de866a4bd531dac87</td><td>lfw</td><td>LFW</td><td><a href="papers/133f01aec1534604d184d56de866a4bd531dac87.html" target="_blank">Effective Unconstrained Face Recognition by Combining Multiple Descriptors and Learned Background Statistics</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>183</td><td>107</td><td>76</td><td>14</td><td>103</td><td>77</td></tr><tr><td>1be498d4bbc30c3bfd0029114c784bc2114d67c0</td><td>adience</td><td>Adience</td><td><a href="papers/1be498d4bbc30c3bfd0029114c784bc2114d67c0.html" target="_blank">Age and Gender Estimation of Unfiltered Faces</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Information Forensics and Security</td><td>edu</td><td>Open University of Israel</td><td>Israel</td><td>32.77824165</td><td>34.99565673</td><td>59%</td><td>179</td><td>105</td><td>74</td><td>6</td><td>98</td><td>80</td></tr><tr><td>96e0cfcd81cdeb8282e29ef9ec9962b125f379b0</td><td>megaface</td><td>MegaFace</td><td><a href="papers/96e0cfcd81cdeb8282e29ef9ec9962b125f379b0.html" target="_blank">The MegaFace Benchmark: 1 Million Faces for Recognition at Scale</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>76%</td><td>139</td><td>105</td><td>34</td><td>5</td><td>100</td><td>37</td></tr><tr><td>29a705a5fa76641e0d8963f1fdd67ee4c0d92d3d</td><td>scface</td><td>SCface</td><td><a href="papers/29a705a5fa76641e0d8963f1fdd67ee4c0d92d3d.html" target="_blank">SCface – surveillance cameras face database</a></td><td><a href="http://scface.org/SCface%20-%20Surveillance%20Cameras%20Face%20Database.pdf" target="_blank">[pdf]</a></td><td>Multimedia Tools and Applications</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>179</td><td>103</td><td>76</td><td>15</td><td>88</td><td>89</td></tr><tr><td>56ffa7d906b08d02d6d5a12c7377a57e24ef3391</td><td>unbc_shoulder_pain</td><td>UNBC-McMaster Pain</td><td><a href="papers/56ffa7d906b08d02d6d5a12c7377a57e24ef3391.html" target="_blank">Painful data: The UNBC-McMaster shoulder pain expression archive database</a></td><td><span class="gray">[pdf]</a></td><td>Face and Gesture 2011</td><td>edu</td><td>Carnegie Mellon University</td><td>United States</td><td>40.44416190</td><td>-79.94272826</td><td>54%</td><td>189</td><td>102</td><td>87</td><td>22</td><td>108</td><td>78</td></tr><tr><td>32c801cb7fbeb742edfd94cccfca4934baec71da</td><td>ucf_crowd</td><td>UCF-CC-50</td><td><a href="papers/32c801cb7fbeb742edfd94cccfca4934baec71da.html" target="_blank">Multi-source Multi-scale Counting in Extremely Dense Crowd Images</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>68%</td><td>148</td><td>100</td><td>48</td><td>5</td><td>80</td><td>65</td></tr><tr><td>1aad2da473888cb7ebc1bfaa15bfa0f1502ce005</td><td>jpl_pose</td><td>JPL-Interaction dataset</td><td><a href="papers/1aad2da473888cb7ebc1bfaa15bfa0f1502ce005.html" target="_blank">First-Person Activity Recognition: What Are They Doing to Me?</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>67%</td><td>148</td><td>99</td><td>49</td><td>7</td><td>105</td><td>43</td></tr><tr><td>3b5b6d19d4733ab606c39c69a889f9e67967f151</td><td>qmul_grid</td><td>GRID</td><td><a href="papers/3b5b6d19d4733ab606c39c69a889f9e67967f151.html" target="_blank">Multi-camera activity correlation analysis</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>Queen Mary University of London</td><td>United Kingdom</td><td>51.52472720</td><td>-0.03931035</td><td>69%</td><td>142</td><td>98</td><td>44</td><td>7</td><td>77</td><td>64</td></tr><tr><td>5a5f0287484f0d480fed1ce585dbf729586f0edc</td><td>disfa</td><td>DISFA</td><td><a href="papers/5a5f0287484f0d480fed1ce585dbf729586f0edc.html" target="_blank">DISFA: A Spontaneous Facial Action Intensity Database</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Affective Computing</td><td>edu</td><td>University of Denver</td><td>United States</td><td>39.67665410</td><td>-104.96220300</td><td>52%</td><td>184</td><td>95</td><td>89</td><td>19</td><td>96</td><td>89</td></tr><tr><td>4f93cd09785c6e77bf4bc5a788e079df524c8d21</td><td>soton</td><td>SOTON HiD</td><td><a href="papers/4f93cd09785c6e77bf4bc5a788e079df524c8d21.html" target="_blank">On a Large Sequence-Based Human Gait Database</a></td><td><a href="https://pdfs.semanticscholar.org/4f93/cd09785c6e77bf4bc5a788e079df524c8d21.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>63%</td><td>150</td><td>95</td><td>55</td><td>17</td><td>103</td><td>51</td></tr><tr><td>8b56e33f33e582f3e473dba573a16b598ed9bcdc</td><td>fei</td><td>FEI</td><td><a href="papers/8b56e33f33e582f3e473dba573a16b598ed9bcdc.html" target="_blank">A new ranking method for principal components analysis and its application to face image analysis</a></td><td><span class="gray">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>169</td><td>93</td><td>76</td><td>6</td><td>69</td><td>102</td></tr><tr><td>066000d44d6691d27202896691f08b27117918b9</td><td>psu</td><td>PSU</td><td><a href="papers/066000d44d6691d27202896691f08b27117918b9.html" target="_blank">Vision-Based Analysis of Small Groups in Pedestrian Crowds</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>168</td><td>90</td><td>78</td><td>10</td><td>85</td><td>79</td></tr><tr><td>0d3bb75852098b25d90f31d2f48fd0cb4944702b</td><td>face_scrub</td><td>FaceScrub</td><td><a href="papers/0d3bb75852098b25d90f31d2f48fd0cb4944702b.html" target="_blank">A data-driven approach to cleaning large face datasets</a></td><td><span class="gray">[pdf]</a></td><td>2014 IEEE International Conference on Image Processing (ICIP)</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>138</td><td>88</td><td>50</td><td>1</td><td>95</td><td>41</td></tr><tr><td>10195a163ab6348eef37213a46f60a3d87f289c5</td><td>imdb_wiki</td><td>IMDB</td><td><a href="papers/10195a163ab6348eef37213a46f60a3d87f289c5.html" target="_blank">Deep Expectation of Real and Apparent Age from a Single Image Without Facial Landmarks</a></td><td><a href="http://www.vision.ee.ethz.ch/en/publications/papers/articles/eth_biwi_01299.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td>edu</td><td>ETH Zurich</td><td>Switzerland</td><td>47.37631300</td><td>8.54766990</td><td>60%</td><td>145</td><td>87</td><td>58</td><td>10</td><td>93</td><td>51</td></tr><tr><td>0486214fb58ee9a04edfe7d6a74c6d0f661a7668</td><td>chokepoint</td><td>ChokePoint</td><td><a href="papers/0486214fb58ee9a04edfe7d6a74c6d0f661a7668.html" target="_blank">Patch-based probabilistic image quality assessment for face selection and improved video-based face recognition</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>138</td><td>86</td><td>52</td><td>6</td><td>76</td><td>63</td></tr><tr><td>2d3482dcff69c7417c7b933f22de606a0e8e42d4</td><td>lfw</td><td>LFW</td><td><a href="papers/2d3482dcff69c7417c7b933f22de606a0e8e42d4.html" target="_blank">Labeled Faces in the Wild : Updates and New Reporting Procedures</a></td><td><a href="https://pdfs.semanticscholar.org/2d34/82dcff69c7417c7b933f22de606a0e8e42d4.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>edu</td><td>University of Massachusetts</td><td>United States</td><td>42.38897850</td><td>-72.52869870</td><td>67%</td><td>123</td><td>83</td><td>40</td><td>3</td><td>71</td><td>51</td></tr><tr><td>2e8d0f1802e50cccfd3c0aabac0d0beab3a7846e</td><td>3dpes</td><td>3DPeS</td><td><a href="papers/2e8d0f1802e50cccfd3c0aabac0d0beab3a7846e.html" target="_blank">3DPeS: 3D people dataset for surveillance and forensics</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>133</td><td>82</td><td>51</td><td>9</td><td>73</td><td>58</td></tr><tr><td>5a4df9bef1872865f0b619ac3aacc97f49e4a035</td><td>cuhk_train_station</td><td>CUHK Train Station Dataset</td><td><a href="papers/5a4df9bef1872865f0b619ac3aacc97f49e4a035.html" target="_blank">Understanding collective crowd behaviors: Learning a Mixture model of Dynamic pedestrian-Agents</a></td><td><span class="gray">[pdf]</a></td><td>2012 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>Chinese University of Hong Kong</td><td>China</td><td>22.41626320</td><td>114.21093180</td><td>58%</td><td>141</td><td>82</td><td>59</td><td>5</td><td>60</td><td>75</td></tr><tr><td>8355d095d3534ef511a9af68a3b2893339e3f96b</td><td>imdb_wiki</td><td>IMDB</td><td><a href="papers/8355d095d3534ef511a9af68a3b2893339e3f96b.html" target="_blank">DEX: Deep EXpectation of Apparent Age from a Single Image</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision Workshop (ICCVW)</td><td></td><td></td><td></td><td></td><td></td><td>65%</td><td>122</td><td>79</td><td>43</td><td>6</td><td>75</td><td>48</td></tr><tr><td>b91f54e1581fbbf60392364323d00a0cd43e493c</td><td>bp4d_spontanous</td><td>BP4D-Spontanous</td><td><a href="papers/b91f54e1581fbbf60392364323d00a0cd43e493c.html" target="_blank">A high-resolution spontaneous 3D dynamic facial expression database</a></td><td><span class="gray">[pdf]</a></td><td>2013 10th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)</td><td>edu</td><td>SUNY Binghamton</td><td>United States</td><td>42.08779975</td><td>-75.97066066</td><td>49%</td><td>154</td><td>76</td><td>78</td><td>7</td><td>80</td><td>75</td></tr><tr><td>e4754afaa15b1b53e70743880484b8d0736990ff</td><td>fiw_300</td><td>300-W</td><td><a href="papers/e4754afaa15b1b53e70743880484b8d0736990ff.html" target="_blank">300 Faces In-The-Wild Challenge: database and results</a></td><td><a href="http://ibug.doc.ic.ac.uk/media/uploads/documents/1-s2.0-s0262885616000147-main.pdf" target="_blank">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>129</td><td>75</td><td>54</td><td>9</td><td>74</td><td>55</td></tr><tr><td>7f23a4bb0c777dd72cca7665a5f370ac7980217e</td><td>duke_mtmc</td><td>Duke MTMC</td><td><a href="papers/7f23a4bb0c777dd72cca7665a5f370ac7980217e.html" target="_blank">Improving Person Re-identification by Attribute and Identity Learning</a></td><td><a href="https://arxiv.org/pdf/1703.07220.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>84%</td><td>87</td><td>73</td><td>14</td><td>0</td><td>43</td><td>42</td></tr><tr><td>66e6f08873325d37e0ec20a4769ce881e04e964e</td><td>sun_attributes</td><td>SUN</td><td><a href="papers/66e6f08873325d37e0ec20a4769ce881e04e964e.html" target="_blank">The SUN Attribute Database: Beyond Categories for Deeper Scene Understanding</a></td><td><a href="http://www.cc.gatech.edu/~hays/papers/attribute_ijcv.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>116</td><td>70</td><td>46</td><td>14</td><td>84</td><td>31</td></tr><tr><td>7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22</td><td>lfw</td><td>LFW</td><td><a href="papers/7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22.html" target="_blank">Labeled Faces in the Wild: A Survey</a></td><td><a href="https://pdfs.semanticscholar.org/7de6/e81d775e9cd7becbfd1bd685f4e2a5eebb22.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Stevens Institute of Technology</td><td>United States</td><td>40.74225200</td><td>-74.02709490</td><td>61%</td><td>109</td><td>66</td><td>43</td><td>8</td><td>66</td><td>43</td></tr><tr><td>3394168ff0719b03ff65bcea35336a76b21fe5e4</td><td>penn_fudan</td><td>Penn Fudan</td><td><a href="papers/3394168ff0719b03ff65bcea35336a76b21fe5e4.html" target="_blank">Object Detection Combining Recognition and Segmentation</a></td><td><a href="https://pdfs.semanticscholar.org/3394/168ff0719b03ff65bcea35336a76b21fe5e4.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>105</td><td>64</td><td>41</td><td>9</td><td>58</td><td>43</td></tr><tr><td>2a4bbee0b4cf52d5aadbbc662164f7efba89566c</td><td>peta</td><td>PETA</td><td><a href="papers/2a4bbee0b4cf52d5aadbbc662164f7efba89566c.html" target="_blank">Pedestrian Attribute Recognition At Far Distance</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>73%</td><td>88</td><td>64</td><td>24</td><td>1</td><td>50</td><td>36</td></tr><tr><td>3b4ec8af470948a72a6ed37a9fd226719a874ebc</td><td>sdu_vid</td><td>SDU-VID</td><td><a href="papers/3b4ec8af470948a72a6ed37a9fd226719a874ebc.html" target="_blank">A Spatio-Temporal Appearance Representation for Video-Based Pedestrian Re-Identification</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>66%</td><td>95</td><td>63</td><td>32</td><td>6</td><td>50</td><td>45</td></tr><tr><td>70c59dc3470ae867016f6ab0e008ac8ba03774a1</td><td>vgg_faces2</td><td>VGG Face2</td><td><a href="papers/70c59dc3470ae867016f6ab0e008ac8ba03774a1.html" target="_blank">VGGFace2: A Dataset for Recognising Faces across Pose and Age</a></td><td><a href="https://arxiv.org/pdf/1710.08092.pdf" target="_blank">[pdf]</a></td><td>2018 13th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2018)</td><td></td><td></td><td></td><td></td><td></td><td>76%</td><td>83</td><td>63</td><td>20</td><td>3</td><td>61</td><td>20</td></tr><tr><td>04c2cda00e5536f4b1508cbd80041e9552880e67</td><td>hipsterwars</td><td>Hipsterwars</td><td><a href="papers/04c2cda00e5536f4b1508cbd80041e9552880e67.html" target="_blank">Hipster Wars: Discovering Elements of Fashion Styles</a></td><td><a href="https://pdfs.semanticscholar.org/04c2/cda00e5536f4b1508cbd80041e9552880e67.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>95</td><td>61</td><td>34</td><td>4</td><td>59</td><td>35</td></tr><tr><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td><td>mafl</td><td>MAFL</td><td><a href="papers/a0fd85b3400c7b3e11122f44dc5870ae2de9009a.html" target="_blank">Learning Deep Representation for Face Alignment with Auxiliary Attributes</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>108</td><td>61</td><td>47</td><td>11</td><td>66</td><td>44</td></tr><tr><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td><td>mtfl</td><td>MTFL</td><td><a href="papers/a0fd85b3400c7b3e11122f44dc5870ae2de9009a.html" target="_blank">Learning Deep Representation for Face Alignment with Auxiliary Attributes</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>108</td><td>61</td><td>47</td><td>11</td><td>66</td><td>44</td></tr><tr><td>06f02199690961ba52997cde1527e714d2b3bf8f</td><td>columbia_gaze</td><td>Columbia Gaze</td><td><a href="papers/06f02199690961ba52997cde1527e714d2b3bf8f.html" target="_blank">Gaze locking: passive eye contact detection for human-object interaction</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Columbia University</td><td>United States</td><td>40.84198360</td><td>-73.94368971</td><td>76%</td><td>79</td><td>60</td><td>19</td><td>0</td><td>49</td><td>34</td></tr><tr><td>4df3143922bcdf7db78eb91e6b5359d6ada004d2</td><td>cfd</td><td>CFD</td><td><a href="papers/4df3143922bcdf7db78eb91e6b5359d6ada004d2.html" target="_blank">The Chicago face database: A free stimulus set of faces and norming data.</a></td><td><a href="https://pdfs.semanticscholar.org/4df3/143922bcdf7db78eb91e6b5359d6ada004d2.pdf" target="_blank">[pdf]</a></td><td>Behavior research methods</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>99</td><td>59</td><td>40</td><td>1</td><td>73</td><td>21</td></tr><tr><td>0c4a139bb87c6743c7905b29a3cfec27a5130652</td><td>feret</td><td>FERET</td><td><a href="papers/0c4a139bb87c6743c7905b29a3cfec27a5130652.html" target="_blank">The FERET Verification Testing Protocol for Face Recognition Algorithms</a></td><td><a href="https://pdfs.semanticscholar.org/8d2a/1c768fce6f71584dd993fb97e7b6419aaf60.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>edu</td><td>City University of New York</td><td>United States</td><td>40.87228250</td><td>-73.89489171</td><td>51%</td><td>115</td><td>59</td><td>56</td><td>8</td><td>75</td><td>37</td></tr><tr><td>08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7</td><td>kin_face</td><td>UB KinFace</td><td><a href="papers/08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7.html" target="_blank">Understanding Kin Relationships in a Photo</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Multimedia</td><td></td><td></td><td></td><td></td><td></td><td>63%</td><td>94</td><td>59</td><td>35</td><td>1</td><td>33</td><td>61</td></tr><tr><td>2ce2560cf59db59ce313bbeb004e8ce55c5ce928</td><td>texas_3dfrd</td><td>Texas 3DFRD</td><td><a href="papers/2ce2560cf59db59ce313bbeb004e8ce55c5ce928.html" target="_blank">Anthropometric 3D Face Recognition</a></td><td><a href="http://live.ece.utexas.edu/publications/2010/sg_ijcv_june10.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>63%</td><td>91</td><td>57</td><td>34</td><td>5</td><td>60</td><td>31</td></tr><tr><td>5194cbd51f9769ab25260446b4fa17204752e799</td><td>violent_flows</td><td>Violent Flows</td><td><a href="papers/5194cbd51f9769ab25260446b4fa17204752e799.html" target="_blank">Violent flows: Real-time detection of violent crowd behavior</a></td><td><span class="gray">[pdf]</a></td><td>2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops</td><td>edu</td><td>Open University of Israel</td><td>Israel</td><td>32.77824165</td><td>34.99565673</td><td>65%</td><td>88</td><td>57</td><td>31</td><td>6</td><td>45</td><td>44</td></tr><tr><td>3cd40bfa1ff193a96bde0207e5140a399476466c</td><td>tvhi</td><td>TVHI</td><td><a href="papers/3cd40bfa1ff193a96bde0207e5140a399476466c.html" target="_blank">High Five: Recognising human interactions in TV shows</a></td><td><a href="https://pdfs.semanticscholar.org/3cd4/0bfa1ff193a96bde0207e5140a399476466c.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>98</td><td>56</td><td>42</td><td>10</td><td>66</td><td>28</td></tr><tr><td>2160788824c4c29ffe213b2cbeb3f52972d73f37</td><td>3d_rma</td><td>3D-RMA</td><td><a href="papers/2160788824c4c29ffe213b2cbeb3f52972d73f37.html" target="_blank">Automatic 3D face authentication</a></td><td><a href="http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.31.9190&rep=rep1&type=pdf" target="_blank">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>100</td><td>54</td><td>46</td><td>8</td><td>63</td><td>36</td></tr><tr><td>2edb87494278ad11641b6cf7a3f8996de12b8e14</td><td>qmul_grid</td><td>GRID</td><td><a href="papers/2edb87494278ad11641b6cf7a3f8996de12b8e14.html" target="_blank">Time-Delayed Correlation Analysis for Multi-Camera Activity Understanding</a></td><td><a href="http://www.eecs.qmul.ac.uk/~ccloy/files/ijcv_2010.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td>edu</td><td>Queen Mary University of London</td><td>United Kingdom</td><td>51.52472720</td><td>-0.03931035</td><td>63%</td><td>84</td><td>53</td><td>31</td><td>4</td><td>51</td><td>33</td></tr><tr><td>0b84f07af44f964817675ad961def8a51406dd2e</td><td>prw</td><td>PRW</td><td><a href="papers/0b84f07af44f964817675ad961def8a51406dd2e.html" target="_blank">Person Re-identification in the Wild</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>68%</td><td>77</td><td>52</td><td>25</td><td>1</td><td>47</td><td>27</td></tr><tr><td>0b440695c822a8e35184fb2f60dcdaa8a6de84ae</td><td>kinectface</td><td>KinectFaceDB</td><td><a href="papers/0b440695c822a8e35184fb2f60dcdaa8a6de84ae.html" target="_blank">KinectFaceDB: A Kinect Database for Face Recognition</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Systems, Man, and Cybernetics: Systems</td><td>edu</td><td>University of North Carolina at Chapel Hill</td><td>United States</td><td>35.91139710</td><td>-79.05045290</td><td>61%</td><td>82</td><td>50</td><td>32</td><td>6</td><td>28</td><td>52</td></tr><tr><td>ae0aee03d946efffdc7af2362a42d3750e7dd48a</td><td>put_face</td><td>Put Face</td><td><a href="papers/ae0aee03d946efffdc7af2362a42d3750e7dd48a.html" target="_blank">The put face database</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>51%</td><td>99</td><td>50</td><td>49</td><td>7</td><td>55</td><td>48</td></tr><tr><td>0dc11a37cadda92886c56a6fb5191ded62099c28</td><td>stickmen_family</td><td>We Are Family Stickmen</td><td><a href="papers/0dc11a37cadda92886c56a6fb5191ded62099c28.html" target="_blank">We Are Family: Joint Pose Estimation of Multiple Persons</a></td><td><a href="http://eprints.pascal-network.org/archive/00007964/01/eichner10eccv.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>63%</td><td>78</td><td>49</td><td>29</td><td>6</td><td>54</td><td>23</td></tr><tr><td>0a85bdff552615643dd74646ac881862a7c7072d</td><td>pipa</td><td>PIPA</td><td><a href="papers/0a85bdff552615643dd74646ac881862a7c7072d.html" target="_blank">Beyond frontal faces: Improving Person Recognition using multiple cues</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>company</td><td>Facebook</td><td>United States</td><td>37.39367170</td><td>-122.08072620</td><td>89%</td><td>54</td><td>48</td><td>5</td><td>1</td><td>41</td><td>12</td></tr><tr><td>c900e0ad4c95948baaf0acd8449fde26f9b4952a</td><td>emotio_net</td><td>EmotioNet Database</td><td><a href="papers/c900e0ad4c95948baaf0acd8449fde26f9b4952a.html" target="_blank">EmotioNet: An Accurate, Real-Time Algorithm for the Automatic Annotation of a Million Facial Expressions in the Wild</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>86</td><td>45</td><td>41</td><td>7</td><td>54</td><td>29</td></tr><tr><td>f1af714b92372c8e606485a3982eab2f16772ad8</td><td>mug_faces</td><td>MUG Faces</td><td><a href="papers/f1af714b92372c8e606485a3982eab2f16772ad8.html" target="_blank">The MUG facial expression database</a></td><td><span class="gray">[pdf]</a></td><td>11th International Workshop on Image Analysis for Multimedia Interactive Services WIAMIS 10</td><td>edu</td><td>Aristotle University of Thessaloniki</td><td>Greece</td><td>40.62984145</td><td>22.95889350</td><td>55%</td><td>82</td><td>45</td><td>37</td><td>4</td><td>34</td><td>47</td></tr><tr><td>636b8ffc09b1b23ff714ac8350bb35635e49fa3c</td><td>caltech_10k_web_faces</td><td>Caltech 10K Web Faces</td><td><a href="papers/636b8ffc09b1b23ff714ac8350bb35635e49fa3c.html" target="_blank">Pruning training sets for learning of object categories</a></td><td><span class="gray">[pdf]</a></td><td>2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)</td><td></td><td></td><td></td><td></td><td></td><td>70%</td><td>63</td><td>44</td><td>19</td><td>4</td><td>42</td><td>20</td></tr><tr><td>2bf8541199728262f78d4dced6fb91479b39b738</td><td>clothing_co_parsing</td><td>CCP</td><td><a href="papers/2bf8541199728262f78d4dced6fb91479b39b738.html" target="_blank">Clothing Co-parsing by Joint Image Segmentation and Labeling</a></td><td><span class="gray">[pdf]</a></td><td>2014 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>72%</td><td>60</td><td>43</td><td>17</td><td>0</td><td>34</td><td>28</td></tr><tr><td>4793f11fbca4a7dba898b9fff68f70d868e2497c</td><td>kin_face</td><td>UB KinFace</td><td><a href="papers/4793f11fbca4a7dba898b9fff68f70d868e2497c.html" target="_blank">Kinship Verification through Transfer Learning</a></td><td><a href="https://pdfs.semanticscholar.org/4793/f11fbca4a7dba898b9fff68f70d868e2497c.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>71</td><td>41</td><td>30</td><td>2</td><td>29</td><td>42</td></tr><tr><td>f41c7bb02fc97d5fb9cadd7a49c3e558a1c58a44</td><td>pa_100k</td><td>PA-100K</td><td><a href="papers/f41c7bb02fc97d5fb9cadd7a49c3e558a1c58a44.html" target="_blank">HydraPlus-Net: Attentive Deep Features for Pedestrian Analysis</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>75%</td><td>55</td><td>41</td><td>14</td><td>0</td><td>36</td><td>17</td></tr><tr><td>6618cff7f2ed440a0d2fa9e74ad5469df5cdbe4c</td><td>afad</td><td>AFAD</td><td><a href="papers/6618cff7f2ed440a0d2fa9e74ad5469df5cdbe4c.html" target="_blank">Ordinal Regression with Multiple Output CNN for Age Estimation</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>51%</td><td>78</td><td>40</td><td>38</td><td>8</td><td>44</td><td>31</td></tr><tr><td>4d58f886f5150b2d5e48fd1b5a49e09799bf895d</td><td>texas_3dfrd</td><td>Texas 3DFRD</td><td><a href="papers/4d58f886f5150b2d5e48fd1b5a49e09799bf895d.html" target="_blank">Texas 3D Face Recognition Database</a></td><td><span class="gray">[pdf]</a></td><td>2010 IEEE Southwest Symposium on Image Analysis & Interpretation (SSIAI)</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>66</td><td>40</td><td>26</td><td>3</td><td>40</td><td>27</td></tr><tr><td>31de9b3dd6106ce6eec9a35991b2b9083395fd0b</td><td>feret</td><td>FERET</td><td><a href="papers/31de9b3dd6106ce6eec9a35991b2b9083395fd0b.html" target="_blank">FERET ( Face Recognition Technology ) Recognition Algorithm Development and Test Results</a></td><td><a href="https://pdfs.semanticscholar.org/31de/9b3dd6106ce6eec9a35991b2b9083395fd0b.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>75</td><td>39</td><td>36</td><td>5</td><td>54</td><td>20</td></tr><tr><td>faf40ce28857aedf183e193486f5b4b0a8c478a2</td><td>iit_dehli_ear</td><td>IIT Dehli Ear</td><td><a href="papers/faf40ce28857aedf183e193486f5b4b0a8c478a2.html" target="_blank">Automated Human Identification Using Ear Imaging</a></td><td><a href="https://pdfs.semanticscholar.org/faf4/0ce28857aedf183e193486f5b4b0a8c478a2.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>49%</td><td>80</td><td>39</td><td>41</td><td>6</td><td>35</td><td>44</td></tr><tr><td>47aeb3b82f54b5ae8142b4bdda7b614433e69b9a</td><td>am_fed</td><td>AM-FED</td><td><a href="papers/47aeb3b82f54b5ae8142b4bdda7b614433e69b9a.html" target="_blank">Affectiva-MIT Facial Expression Dataset (AM-FED): Naturalistic and Spontaneous Facial Expressions Collected "In-the-Wild"</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition Workshops</td><td></td><td></td><td></td><td></td><td></td><td>46%</td><td>83</td><td>38</td><td>45</td><td>6</td><td>43</td><td>39</td></tr><tr><td>22646e00a7ba34d1b5fbe3b1efcd91a1e1be3c2b</td><td>saivt</td><td>SAIVT SoftBio</td><td><a href="papers/22646e00a7ba34d1b5fbe3b1efcd91a1e1be3c2b.html" target="_blank">A Database for Person Re-Identification in Multi-Camera Surveillance Networks</a></td><td><span class="gray">[pdf]</a></td><td>2012 International Conference on Digital Image Computing Techniques and Applications (DICTA)</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>65</td><td>38</td><td>27</td><td>6</td><td>45</td><td>20</td></tr><tr><td>79828e6e9f137a583082b8b5a9dfce0c301989b8</td><td>mapillary</td><td>Mapillary</td><td><a href="papers/79828e6e9f137a583082b8b5a9dfce0c301989b8.html" target="_blank">The Mapillary Vistas Dataset for Semantic Understanding of Street Scenes</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>61</td><td>37</td><td>24</td><td>0</td><td>43</td><td>16</td></tr><tr><td>3dc3f0b64ef80f573e3a5f96e456e52ee980b877</td><td>georgia_tech_face_database</td><td>Georgia Tech Face</td><td><a href="papers/3dc3f0b64ef80f573e3a5f96e456e52ee980b877.html" target="_blank">Maximum Likelihood Training of the Embedded HMM for Face Detection and Recognition</a></td><td><a href="http://www.researchgate.net/profile/Monson_Hayes/publication/221124512_Maximum_Likelihood_Training_of_the_Embedded_HMM_for_Face_Detection_and_Recognition/links/0deec53509be9d6f55000000.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>67</td><td>36</td><td>31</td><td>4</td><td>29</td><td>28</td></tr><tr><td>6f3c76b7c0bd8e1d122c6ea808a271fd4749c951</td><td>ward</td><td>WARD</td><td><a href="papers/6f3c76b7c0bd8e1d122c6ea808a271fd4749c951.html" target="_blank">Re-identify people in wide area camera network</a></td><td><span class="gray">[pdf]</a></td><td>2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops</td><td>edu</td><td>University of Udine</td><td>Italy</td><td>46.08107230</td><td>13.21194740</td><td>60%</td><td>60</td><td>36</td><td>24</td><td>1</td><td>38</td><td>21</td></tr><tr><td>51eba481dac6b229a7490f650dff7b17ce05df73</td><td>imsitu</td><td>imSitu</td><td><a href="papers/51eba481dac6b229a7490f650dff7b17ce05df73.html" target="_blank">Situation Recognition: Visual Semantic Role Labeling for Image Understanding</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>67%</td><td>52</td><td>35</td><td>17</td><td>1</td><td>46</td><td>6</td></tr><tr><td>fcc6fe6007c322641796cb8792718641856a22a7</td><td>miw</td><td>MIW</td><td><a href="papers/fcc6fe6007c322641796cb8792718641856a22a7.html" target="_blank">Automatic facial makeup detection with application in face recognition</a></td><td><span class="gray">[pdf]</a></td><td>2013 International Conference on Biometrics (ICB)</td><td>edu</td><td>West Virginia University</td><td>United States</td><td>39.65404635</td><td>-79.96475355</td><td>71%</td><td>49</td><td>35</td><td>14</td><td>1</td><td>19</td><td>29</td></tr><tr><td>09d78009687bec46e70efcf39d4612822e61cb8c</td><td>raid</td><td>RAiD</td><td><a href="papers/09d78009687bec46e70efcf39d4612822e61cb8c.html" target="_blank">Consistent Re-identification in a Camera Network</a></td><td><a href="https://pdfs.semanticscholar.org/c27f/099e6e7e3f7f9979cbe9e0a5175fc5848ea0.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>71%</td><td>49</td><td>35</td><td>14</td><td>3</td><td>34</td><td>13</td></tr><tr><td>fcc6fe6007c322641796cb8792718641856a22a7</td><td>youtube_makeup</td><td>YMU</td><td><a href="papers/fcc6fe6007c322641796cb8792718641856a22a7.html" target="_blank">Automatic facial makeup detection with application in face recognition</a></td><td><span class="gray">[pdf]</a></td><td>2013 International Conference on Biometrics (ICB)</td><td>edu</td><td>West Virginia University</td><td>United States</td><td>39.65404635</td><td>-79.96475355</td><td>71%</td><td>49</td><td>35</td><td>14</td><td>1</td><td>19</td><td>29</td></tr><tr><td>8be57cdad86fdf8c8290df4ca3149592f3c46dd3</td><td>m2vts</td><td>m2vts</td><td><a href="papers/8be57cdad86fdf8c8290df4ca3149592f3c46dd3.html" target="_blank">The M2VTS Multimodal Face Database (Release 1.00)</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>45%</td><td>73</td><td>33</td><td>40</td><td>2</td><td>39</td><td>33</td></tr><tr><td>37d6f0eb074d207b53885bd2eb78ccc8a04be597</td><td>vmu</td><td>VMU</td><td><a href="papers/37d6f0eb074d207b53885bd2eb78ccc8a04be597.html" target="_blank">Can facial cosmetics affect the matching accuracy of face recognition systems?</a></td><td><span class="gray">[pdf]</a></td><td>2012 IEEE Fifth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td>edu</td><td>West Virginia University</td><td>United States</td><td>39.65404635</td><td>-79.96475355</td><td>62%</td><td>53</td><td>33</td><td>20</td><td>0</td><td>19</td><td>31</td></tr><tr><td>37d6f0eb074d207b53885bd2eb78ccc8a04be597</td><td>youtube_makeup</td><td>YMU</td><td><a href="papers/37d6f0eb074d207b53885bd2eb78ccc8a04be597.html" target="_blank">Can facial cosmetics affect the matching accuracy of face recognition systems?</a></td><td><span class="gray">[pdf]</a></td><td>2012 IEEE Fifth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td>edu</td><td>West Virginia University</td><td>United States</td><td>39.65404635</td><td>-79.96475355</td><td>62%</td><td>53</td><td>33</td><td>20</td><td>0</td><td>19</td><td>31</td></tr><tr><td>9c23859ec7313f2e756a3e85575735e0c52249f4</td><td>facebook_100</td><td>Facebook100</td><td><a href="papers/9c23859ec7313f2e756a3e85575735e0c52249f4.html" target="_blank">Scaling up biologically-inspired computer vision: A case study in unconstrained face recognition on facebook</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td>edu</td><td>Harvard University</td><td>United States</td><td>42.36782045</td><td>-71.12666653</td><td>62%</td><td>52</td><td>32</td><td>20</td><td>3</td><td>38</td><td>13</td></tr><tr><td>2161f6b7ee3c0acc81603b01dc0df689683577b9</td><td>large_scale_person_search</td><td>Large Scale Person Search</td><td><a href="papers/2161f6b7ee3c0acc81603b01dc0df689683577b9.html" target="_blank">End-to-End Deep Learning for Person Search</a></td><td><a href="https://pdfs.semanticscholar.org/2161/f6b7ee3c0acc81603b01dc0df689683577b9.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>70%</td><td>46</td><td>32</td><td>14</td><td>0</td><td>27</td><td>16</td></tr><tr><td>9c23859ec7313f2e756a3e85575735e0c52249f4</td><td>pubfig_83</td><td>pubfig83</td><td><a href="papers/9c23859ec7313f2e756a3e85575735e0c52249f4.html" target="_blank">Scaling up biologically-inspired computer vision: A case study in unconstrained face recognition on facebook</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td>edu</td><td>Harvard University</td><td>United States</td><td>42.36782045</td><td>-71.12666653</td><td>62%</td><td>52</td><td>32</td><td>20</td><td>3</td><td>38</td><td>13</td></tr><tr><td>31b05f65405534a696a847dd19c621b7b8588263</td><td>umd_faces</td><td>UMD</td><td><a href="papers/31b05f65405534a696a847dd19c621b7b8588263.html" target="_blank">UMDFaces: An annotated face dataset for training deep networks</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE International Joint Conference on Biometrics (IJCB)</td><td>edu</td><td>University of Maryland</td><td>United States</td><td>39.28996850</td><td>-76.62196103</td><td>76%</td><td>42</td><td>32</td><td>10</td><td>2</td><td>30</td><td>11</td></tr><tr><td>16e8b0a1e8451d5f697b94c0c2b32a00abee1d52</td><td>umb</td><td>UMB</td><td><a href="papers/16e8b0a1e8451d5f697b94c0c2b32a00abee1d52.html" target="_blank">UMB-DB: A database of partially occluded 3D faces</a></td><td><span class="gray">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td></td><td>66%</td><td>47</td><td>31</td><td>16</td><td>2</td><td>22</td><td>24</td></tr><tr><td>356b431d4f7a2a0a38cf971c84568207dcdbf189</td><td>wider</td><td>WIDER</td><td><a href="papers/356b431d4f7a2a0a38cf971c84568207dcdbf189.html" target="_blank">Recognize complex events from static images by fusing deep channels</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>70%</td><td>44</td><td>31</td><td>13</td><td>1</td><td>29</td><td>15</td></tr><tr><td>18858cc936947fc96b5c06bbe3c6c2faa5614540</td><td>pilot_parliament</td><td>PPB</td><td><a href="papers/18858cc936947fc96b5c06bbe3c6c2faa5614540.html" target="_blank">Gender Shades: Intersectional Accuracy Disparities in Commercial Gender Classification</a></td><td><a href="http://proceedings.mlr.press/v81/buolamwini18a/buolamwini18a-supp.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>51%</td><td>59</td><td>30</td><td>29</td><td>0</td><td>47</td><td>10</td></tr><tr><td>47662d1a368daf70ba70ef2d59eb6209f98b675d</td><td>fia</td><td>CMU FiA</td><td><a href="papers/47662d1a368daf70ba70ef2d59eb6209f98b675d.html" target="_blank">The CMU Face In Action (FIA) Database</a></td><td><a href="https://pdfs.semanticscholar.org/4766/2d1a368daf70ba70ef2d59eb6209f98b675d.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>54</td><td>29</td><td>25</td><td>5</td><td>40</td><td>16</td></tr><tr><td>8b2dd5c61b23ead5ae5508bb8ce808b5ea266730</td><td>10k_US_adult_faces</td><td>10K US Adult Faces</td><td><a href="papers/8b2dd5c61b23ead5ae5508bb8ce808b5ea266730.html" target="_blank">The intrinsic memorability of face photographs.</a></td><td><a href="https://pdfs.semanticscholar.org/8b2d/d5c61b23ead5ae5508bb8ce808b5ea266730.pdf" target="_blank">[pdf]</a></td><td>Journal of experimental psychology. General</td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>52</td><td>28</td><td>24</td><td>2</td><td>36</td><td>14</td></tr><tr><td>070de852bc6eb275d7ca3a9cdde8f6be8795d1a3</td><td>d3dfacs</td><td>D3DFACS</td><td><a href="papers/070de852bc6eb275d7ca3a9cdde8f6be8795d1a3.html" target="_blank">A FACS valid 3D dynamic action unit database with applications to 3D dynamic morphable facial modeling</a></td><td><span class="gray">[pdf]</a></td><td>2011 International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>50</td><td>26</td><td>24</td><td>5</td><td>31</td><td>18</td></tr><tr><td>28d4e027c7e90b51b7d8908fce68128d1964668a</td><td>megaface</td><td>MegaFace</td><td><a href="papers/28d4e027c7e90b51b7d8908fce68128d1964668a.html" target="_blank">Level Playing Field for Million Scale Face Recognition</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>University of Washington</td><td>United States</td><td>47.65432380</td><td>-122.30800894</td><td>67%</td><td>39</td><td>26</td><td>13</td><td>2</td><td>29</td><td>9</td></tr><tr><td>298cbc3dfbbb3a20af4eed97906650a4ea1c29e0</td><td>ferplus</td><td>FER+</td><td><a href="papers/298cbc3dfbbb3a20af4eed97906650a4ea1c29e0.html" target="_blank">Training deep networks for facial expression recognition with crowd-sourced label distribution</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>74%</td><td>34</td><td>25</td><td>9</td><td>0</td><td>18</td><td>16</td></tr><tr><td>e27ef52c641c2b5100a1b34fd0b819e84a31b4df</td><td>sarc3d</td><td>Sarc3D</td><td><a href="papers/e27ef52c641c2b5100a1b34fd0b819e84a31b4df.html" target="_blank">SARC3D: A New 3D Body Model for People Tracking and Re-identification</a></td><td><a href="https://pdfs.semanticscholar.org/e27e/f52c641c2b5100a1b34fd0b819e84a31b4df.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>74%</td><td>34</td><td>25</td><td>9</td><td>2</td><td>21</td><td>12</td></tr><tr><td>57fe081950f21ca03b5b375ae3e84b399c015861</td><td>cvc_01_barcelona</td><td>CVC-01</td><td><a href="papers/57fe081950f21ca03b5b375ae3e84b399c015861.html" target="_blank">Adaptive Image Sampling and Windows Classification for On-board Pedestrian Detection</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>51%</td><td>47</td><td>24</td><td>23</td><td>1</td><td>23</td><td>24</td></tr><tr><td>758d7e1be64cc668c59ef33ba8882c8597406e53</td><td>affectnet</td><td>AffectNet</td><td><a href="papers/758d7e1be64cc668c59ef33ba8882c8597406e53.html" target="_blank">AffectNet: A Database for Facial Expression, Valence, and Arousal Computing in the Wild</a></td><td><a href="https://arxiv.org/pdf/1708.03985.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>37</td><td>23</td><td>14</td><td>0</td><td>25</td><td>11</td></tr><tr><td>d08cc366a4a0192a01e9a7495af1eb5d9f9e73ae</td><td>b3d_ac</td><td>B3D(AC)</td><td><a href="papers/d08cc366a4a0192a01e9a7495af1eb5d9f9e73ae.html" target="_blank">A 3-D Audio-Visual Corpus of Affective Communication</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Multimedia</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>42</td><td>23</td><td>19</td><td>2</td><td>26</td><td>15</td></tr><tr><td>53ae38a6bb2b21b42bac4f0c4c8ed1f9fa02f9d4</td><td>bp4d_plus</td><td>BP4D+</td><td><a href="papers/53ae38a6bb2b21b42bac4f0c4c8ed1f9fa02f9d4.html" target="_blank">Multimodal Spontaneous Emotion Corpus for Human Behavior Analysis</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>42</td><td>23</td><td>19</td><td>0</td><td>17</td><td>26</td></tr><tr><td>1bd1645a629f1b612960ab9bba276afd4cf7c666</td><td>brainwash</td><td>Brainwash</td><td><a href="papers/1bd1645a629f1b612960ab9bba276afd4cf7c666.html" target="_blank">End-to-End People Detection in Crowded Scenes</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>Stanford University</td><td>United States</td><td>37.43131385</td><td>-122.16936535</td><td>55%</td><td>42</td><td>23</td><td>19</td><td>1</td><td>19</td><td>19</td></tr><tr><td>1c2802c2199b6d15ecefe7ba0c39bfe44363de38</td><td>youtube_poses</td><td>YouTube Pose</td><td><a href="papers/1c2802c2199b6d15ecefe7ba0c39bfe44363de38.html" target="_blank">Personalizing Human Video Pose Estimation</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>Oxford University</td><td>United Kingdom</td><td>51.75208490</td><td>-1.25166460</td><td>64%</td><td>36</td><td>23</td><td>13</td><td>2</td><td>30</td><td>8</td></tr><tr><td>a0cc5f73a37723a6dd465924143f1cb4976d0169</td><td>msmt_17</td><td>MSMT17</td><td><a href="papers/a0cc5f73a37723a6dd465924143f1cb4976d0169.html" target="_blank">Person Transfer GAN to Bridge Domain Gap for Person Re-identification</a></td><td><span class="gray">[pdf]</a></td><td>2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>92%</td><td>24</td><td>22</td><td>2</td><td>1</td><td>20</td><td>4</td></tr><tr><td>fd8168f1c50de85bac58a8d328df0a50248b16ae</td><td>nd_2006</td><td>ND-2006</td><td><a href="papers/fd8168f1c50de85bac58a8d328df0a50248b16ae.html" target="_blank">Using a Multi-Instance Enrollment Representation to Improve 3D Face Recognition</a></td><td><span class="gray">[pdf]</a></td><td>2007 First IEEE International Conference on Biometrics: Theory, Applications, and Systems</td><td>edu</td><td>University of Notre Dame</td><td>United States</td><td>41.70456775</td><td>-86.23822026</td><td>63%</td><td>35</td><td>22</td><td>13</td><td>3</td><td>18</td><td>15</td></tr><tr><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td><td>casablanca</td><td>Casablanca</td><td><a href="papers/0ceda9dae8b9f322df65ca2ef02caca9758aec6f.html" target="_blank">Context-Aware CNNs for Person Head Detection</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>33</td><td>21</td><td>12</td><td>1</td><td>23</td><td>11</td></tr><tr><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td><td>hollywood_headset</td><td>HollywoodHeads</td><td><a href="papers/0ceda9dae8b9f322df65ca2ef02caca9758aec6f.html" target="_blank">Context-Aware CNNs for Person Head Detection</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>33</td><td>21</td><td>12</td><td>1</td><td>23</td><td>11</td></tr><tr><td>0cb2dd5f178e3a297a0c33068961018659d0f443</td><td>ijb_b</td><td>IJB-B</td><td><a href="papers/0cb2dd5f178e3a297a0c33068961018659d0f443.html" target="_blank">IARPA Janus Benchmark-B Face Dataset</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td>edu</td><td>Michigan State University</td><td>United States</td><td>42.71856800</td><td>-84.47791571</td><td>60%</td><td>35</td><td>21</td><td>14</td><td>3</td><td>25</td><td>8</td></tr><tr><td>488e475eeb3bb39a145f23ede197cd3620f1d98a</td><td>apis</td><td>APiS1.0</td><td><a href="papers/488e475eeb3bb39a145f23ede197cd3620f1d98a.html" target="_blank">Pedestrian Attribute Classification in Surveillance: Database and Evaluation</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision Workshops</td><td></td><td></td><td></td><td></td><td></td><td>71%</td><td>28</td><td>20</td><td>8</td><td>0</td><td>13</td><td>15</td></tr><tr><td>20388099cc415c772926e47bcbbe554e133343d1</td><td>cafe</td><td>#N/A</td><td><a href="papers/20388099cc415c772926e47bcbbe554e133343d1.html" target="_blank">The Child Affective Facial Expression (CAFE) set: validity and reliability from untrained adults</a></td><td><a href="https://pdfs.semanticscholar.org/2038/8099cc415c772926e47bcbbe554e133343d1.pdf" target="_blank">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>37</td><td>20</td><td>17</td><td>3</td><td>30</td><td>7</td></tr><tr><td>0297448f3ed948e136bb06ceff10eccb34e5bb77</td><td>ilids_mcts</td><td>i-LIDS Multiple-Camera</td><td><a href="papers/0297448f3ed948e136bb06ceff10eccb34e5bb77.html" target="_blank">Imagery Library for Intelligent Detection Systems (i-LIDS); A Standard for Testing Video Based Detection Systems</a></td><td><span class="gray">[pdf]</a></td><td>Proceedings 40th Annual 2006 International Carnahan Conference on Security Technology</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>35</td><td>20</td><td>15</td><td>2</td><td>21</td><td>14</td></tr><tr><td>41976ebc8ab76d9a6861487c97cc7fcbe3b6015f</td><td>moments_in_time</td><td>Moments in Time</td><td><a href="papers/41976ebc8ab76d9a6861487c97cc7fcbe3b6015f.html" target="_blank">Moments in Time Dataset: one million videos for event understanding</a></td><td><a href="https://arxiv.org/pdf/1801.03150.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>69%</td><td>29</td><td>20</td><td>9</td><td>2</td><td>27</td><td>2</td></tr><tr><td>488e475eeb3bb39a145f23ede197cd3620f1d98a</td><td>svs</td><td>SVS</td><td><a href="papers/488e475eeb3bb39a145f23ede197cd3620f1d98a.html" target="_blank">Pedestrian Attribute Classification in Surveillance: Database and Evaluation</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision Workshops</td><td></td><td></td><td></td><td></td><td></td><td>71%</td><td>28</td><td>20</td><td>8</td><td>0</td><td>13</td><td>15</td></tr><tr><td>84fe5b4ac805af63206012d29523a1e033bc827e</td><td>awe_ears</td><td>AWE Ears</td><td><a href="papers/84fe5b4ac805af63206012d29523a1e033bc827e.html" target="_blank">Ear Recognition: More Than a Survey</a></td><td><a href="https://arxiv.org/pdf/1611.06203.pdf" target="_blank">[pdf]</a></td><td>Neurocomputing</td><td></td><td></td><td></td><td></td><td></td><td>73%</td><td>26</td><td>19</td><td>7</td><td>0</td><td>10</td><td>16</td></tr><tr><td>4fefd1bc8dc4e0ab37ee3324ddfa43ad9d6a04a7</td><td>deep_fashion</td><td>DeepFashion</td><td><a href="papers/4fefd1bc8dc4e0ab37ee3324ddfa43ad9d6a04a7.html" target="_blank">Fashion Landmark Detection in the Wild</a></td><td><a href="https://arxiv.org/pdf/1608.03049.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>73%</td><td>26</td><td>19</td><td>7</td><td>1</td><td>16</td><td>10</td></tr><tr><td>4946ba10a4d5a7d0a38372f23e6622bd347ae273</td><td>coco_action</td><td>COCO-a</td><td><a href="papers/4946ba10a4d5a7d0a38372f23e6622bd347ae273.html" target="_blank">Describing Common Human Visual Actions in Images</a></td><td><a href="https://arxiv.org/pdf/1506.02203.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>72%</td><td>25</td><td>18</td><td>7</td><td>0</td><td>23</td><td>2</td></tr><tr><td>64e0690dd176a93de9d4328f6e31fc4afe1e7536</td><td>duke_mtmc</td><td>Duke MTMC</td><td><a href="papers/64e0690dd176a93de9d4328f6e31fc4afe1e7536.html" target="_blank">Tracking Multiple People Online and in Real Time</a></td><td><a href="https://pdfs.semanticscholar.org/64e0/690dd176a93de9d4328f6e31fc4afe1e7536.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>78%</td><td>23</td><td>18</td><td>5</td><td>1</td><td>12</td><td>10</td></tr><tr><td>213a579af9e4f57f071b884aa872651372b661fd</td><td>bbc_pose</td><td>BBC Pose</td><td><a href="papers/213a579af9e4f57f071b884aa872651372b661fd.html" target="_blank">Automatic and Efficient Human Pose Estimation for Sign Language Videos</a></td><td><a href="http://tomas.pfister.fi/files/charles13ijcv.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>65%</td><td>26</td><td>17</td><td>9</td><td>1</td><td>16</td><td>11</td></tr><tr><td>1e3df3ca8feab0b36fd293fe689f93bb2aaac591</td><td>immediacy</td><td>Immediacy</td><td><a href="papers/1e3df3ca8feab0b36fd293fe689f93bb2aaac591.html" target="_blank">Multi-task Recurrent Neural Network for Immediacy Prediction</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>65%</td><td>26</td><td>17</td><td>9</td><td>2</td><td>21</td><td>6</td></tr><tr><td>d178cde92ab3dc0dd2ebee5a76a33d556c39448b</td><td>jiku_mobile</td><td>Jiku Mobile Video Dataset</td><td><a href="papers/d178cde92ab3dc0dd2ebee5a76a33d556c39448b.html" target="_blank">The jiku mobile video dataset</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td>edu</td><td>National University of Singapore</td><td>Singapore</td><td>1.29620180</td><td>103.77689944</td><td>71%</td><td>24</td><td>17</td><td>7</td><td>0</td><td>6</td><td>19</td></tr><tr><td>2eb84aaba316b095d4bb51da1a3e4365bbf9ab1d</td><td>kin_face</td><td>UB KinFace</td><td><a href="papers/2eb84aaba316b095d4bb51da1a3e4365bbf9ab1d.html" target="_blank">Genealogical face recognition based on UB KinFace database</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td>edu</td><td>SUNY Buffalo</td><td>United States</td><td>42.93362780</td><td>-78.88394479</td><td>55%</td><td>31</td><td>17</td><td>14</td><td>0</td><td>11</td><td>21</td></tr><tr><td>ea050801199f98a1c7c1df6769f23f658299a3ae</td><td>mpi_large</td><td>Large MPI Facial Expression</td><td><a href="papers/ea050801199f98a1c7c1df6769f23f658299a3ae.html" target="_blank">The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</a></td><td><a href="https://pdfs.semanticscholar.org/ea05/0801199f98a1c7c1df6769f23f658299a3ae.pdf" target="_blank">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>33</td><td>17</td><td>16</td><td>4</td><td>29</td><td>4</td></tr><tr><td>ea050801199f98a1c7c1df6769f23f658299a3ae</td><td>mpi_small</td><td>Small MPI Facial Expression</td><td><a href="papers/ea050801199f98a1c7c1df6769f23f658299a3ae.html" target="_blank">The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</a></td><td><a href="https://pdfs.semanticscholar.org/ea05/0801199f98a1c7c1df6769f23f658299a3ae.pdf" target="_blank">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>33</td><td>17</td><td>16</td><td>4</td><td>29</td><td>4</td></tr><tr><td>221c18238b829c12b911706947ab38fd017acef7</td><td>rap_pedestrian</td><td>RAP</td><td><a href="papers/221c18238b829c12b911706947ab38fd017acef7.html" target="_blank">A Richly Annotated Dataset for Pedestrian Attribute Recognition</a></td><td><a href="https://arxiv.org/pdf/1603.07054.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>65%</td><td>26</td><td>17</td><td>9</td><td>0</td><td>16</td><td>10</td></tr><tr><td>d818568838433a6d6831adde49a58cef05e0c89f</td><td>agedb</td><td>AgeDB</td><td><a href="papers/d818568838433a6d6831adde49a58cef05e0c89f.html" target="_blank">AgeDB: The First Manually Collected, In-the-Wild Age Database</a></td><td><a href="http://eprints.mdx.ac.uk/22044/1/agedb_kotsia.pdf" target="_blank">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td>edu</td><td>Imperial College London</td><td>United Kingdom</td><td>51.49887085</td><td>-0.17560797</td><td>89%</td><td>18</td><td>16</td><td>2</td><td>0</td><td>14</td><td>3</td></tr><tr><td>71b7fc715e2f1bb24c0030af8d7e7b6e7cd128a6</td><td>umd_faces</td><td>UMD</td><td><a href="papers/71b7fc715e2f1bb24c0030af8d7e7b6e7cd128a6.html" target="_blank">The Do’s and Don’ts for CNN-Based Face Verification</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE International Conference on Computer Vision Workshops (ICCVW)</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>26</td><td>16</td><td>10</td><td>2</td><td>16</td><td>8</td></tr><tr><td>19d1b811df60f86cbd5e04a094b07f32fff7a32a</td><td>york_3d</td><td>UOY 3D Face Database</td><td><a href="papers/19d1b811df60f86cbd5e04a094b07f32fff7a32a.html" target="_blank">Three-dimensional face recognition: an eigensurface approach</a></td><td><span class="gray">[pdf]</a></td><td>2004 International Conference on Image Processing, 2004. ICIP '04.</td><td></td><td></td><td></td><td></td><td></td><td>42%</td><td>38</td><td>16</td><td>22</td><td>4</td><td>24</td><td>13</td></tr><tr><td>b6c293f0420f7e945b5916ae44269fb53e139275</td><td>erce</td><td>ERCe</td><td><a href="papers/b6c293f0420f7e945b5916ae44269fb53e139275.html" target="_blank">Video Synopsis by Heterogeneous Multi-source Correlation</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>29</td><td>15</td><td>14</td><td>2</td><td>14</td><td>13</td></tr><tr><td>b6c293f0420f7e945b5916ae44269fb53e139275</td><td>tisi</td><td>Times Square Intersection</td><td><a href="papers/b6c293f0420f7e945b5916ae44269fb53e139275.html" target="_blank">Video Synopsis by Heterogeneous Multi-source Correlation</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>29</td><td>15</td><td>14</td><td>2</td><td>14</td><td>13</td></tr><tr><td>2a171f8d14b6b8735001a11c217af9587d095848</td><td>social_relation</td><td>Social Relation</td><td><a href="papers/2a171f8d14b6b8735001a11c217af9587d095848.html" target="_blank">Learning Social Relation Traits from Face Images</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>23</td><td>14</td><td>9</td><td>4</td><td>16</td><td>7</td></tr><tr><td>5753b2b5e442eaa3be066daa4a2ca8d8a0bb1725</td><td>50_people_one_question</td><td>50 People One Question</td><td><a href="papers/5753b2b5e442eaa3be066daa4a2ca8d8a0bb1725.html" target="_blank">Merging Pose Estimates Across Space and Time</a></td><td><a href="http://authors.library.caltech.edu/41565/1/tracking_bmvc.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>81%</td><td>16</td><td>13</td><td>3</td><td>0</td><td>13</td><td>4</td></tr><tr><td>44d23df380af207f5ac5b41459c722c87283e1eb</td><td>wider_attribute</td><td>WIDER Attribute</td><td><a href="papers/44d23df380af207f5ac5b41459c722c87283e1eb.html" target="_blank">Human Attribute Recognition by Deep Hierarchical Contexts</a></td><td><a href="https://pdfs.semanticscholar.org/8e28/07f2dd53b03a759e372e07f7191cae65c9fd.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>72%</td><td>18</td><td>13</td><td>5</td><td>0</td><td>14</td><td>4</td></tr><tr><td>45e616093a92e5f1e61a7c6037d5f637aa8964af</td><td>malf</td><td>MALF</td><td><a href="papers/45e616093a92e5f1e61a7c6037d5f637aa8964af.html" target="_blank">Fine-grained evaluation on face detection in the wild</a></td><td><span class="gray">[pdf]</a></td><td>2015 11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)</td><td></td><td></td><td></td><td></td><td></td><td>71%</td><td>17</td><td>12</td><td>5</td><td>0</td><td>12</td><td>5</td></tr><tr><td>774cbb45968607a027ae4729077734db000a1ec5</td><td>urban_tribes</td><td>Urban Tribes</td><td><a href="papers/774cbb45968607a027ae4729077734db000a1ec5.html" target="_blank">From Bikers to Surfers: Visual Recognition of Urban Tribes</a></td><td><a href="http://vision.cornell.edu/se3/wp-content/uploads/2014/09/utribes_bmvc13_final.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>67%</td><td>18</td><td>12</td><td>6</td><td>1</td><td>12</td><td>6</td></tr><tr><td>a94cae786d515d3450d48267e12ca954aab791c4</td><td>yawdd</td><td>YawDD</td><td><a href="papers/a94cae786d515d3450d48267e12ca954aab791c4.html" target="_blank">YawDD: a yawning detection dataset</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>80%</td><td>15</td><td>12</td><td>3</td><td>1</td><td>2</td><td>13</td></tr><tr><td>4156b7e88f2e0ab0a7c095b9bab199ae2b23bd06</td><td>distance_nighttime</td><td>Long Distance Heterogeneous Face</td><td><a href="papers/4156b7e88f2e0ab0a7c095b9bab199ae2b23bd06.html" target="_blank">Nighttime Face Recognition at Long Distance: Cross-Distance and Cross-Spectral Matching</a></td><td><a href="https://pdfs.semanticscholar.org/4156/b7e88f2e0ab0a7c095b9bab199ae2b23bd06.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>22</td><td>11</td><td>11</td><td>3</td><td>11</td><td>10</td></tr><tr><td>4e6ee936eb50dd032f7138702fa39b7c18ee8907</td><td>dartmouth_children</td><td>Dartmouth Children</td><td><a href="papers/4e6ee936eb50dd032f7138702fa39b7c18ee8907.html" target="_blank">The Dartmouth Database of Children’s Faces: Acquisition and Validation of a New Face Stimulus Set</a></td><td><a href="https://pdfs.semanticscholar.org/4e6e/e936eb50dd032f7138702fa39b7c18ee8907.pdf" target="_blank">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td></td><td>48%</td><td>21</td><td>10</td><td>11</td><td>2</td><td>18</td><td>3</td></tr><tr><td>25474c21613607f6bb7687a281d5f9d4ffa1f9f3</td><td>faceplace</td><td>Face Place</td><td><a href="papers/25474c21613607f6bb7687a281d5f9d4ffa1f9f3.html" target="_blank">Recognizing disguised faces</a></td><td><a href="https://pdfs.semanticscholar.org/d936/7ceb0be378c3a9ddf7cb741c678c1a3c574c.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>34%</td><td>29</td><td>10</td><td>19</td><td>0</td><td>18</td><td>10</td></tr><tr><td>0f0fcf041559703998abf310e56f8a2f90ee6f21</td><td>feret</td><td>FERET</td><td><a href="papers/0f0fcf041559703998abf310e56f8a2f90ee6f21.html" target="_blank">The FERET Evaluation Methodology for Face-Recognition Algorithms</a></td><td><a href="https://pdfs.semanticscholar.org/5099/7a5605c1f61e09e9a96789ed7495be6625aa.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>34%</td><td>29</td><td>10</td><td>19</td><td>3</td><td>18</td><td>9</td></tr><tr><td>2f43b614607163abf41dfe5d17ef6749a1b61304</td><td>hrt_transgender</td><td>HRT Transgender</td><td><a href="papers/2f43b614607163abf41dfe5d17ef6749a1b61304.html" target="_blank">Investigating the Periocular-Based Face Recognition Across Gender Transformation</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Information Forensics and Security</td><td>edu</td><td>University of North Carolina at Wilmington</td><td>United States</td><td>34.22498270</td><td>-77.86907744</td><td>77%</td><td>13</td><td>10</td><td>3</td><td>0</td><td>6</td><td>8</td></tr><tr><td>57178b36c21fd7f4529ac6748614bb3374714e91</td><td>ijb_c</td><td>IJB-C</td><td><a href="papers/57178b36c21fd7f4529ac6748614bb3374714e91.html" target="_blank">IARPA Janus Benchmark - C: Face Dataset and Protocol</a></td><td><span class="gray">[pdf]</a></td><td>2018 International Conference on Biometrics (ICB)</td><td></td><td></td><td></td><td></td><td></td><td>71%</td><td>14</td><td>10</td><td>4</td><td>0</td><td>12</td><td>1</td></tr><tr><td>ca3e88d87e1344d076c964ea89d91a75c417f5ee</td><td>imfdb</td><td>IMFDB</td><td><a href="papers/ca3e88d87e1344d076c964ea89d91a75c417f5ee.html" target="_blank">Indian Movie Face Database: A benchmark for face recognition under wide variations</a></td><td><span class="gray">[pdf]</a></td><td>2013 Fourth National Conference on Computer Vision, Pattern Recognition, Image Processing and Graphics (NCVPRIPG)</td><td>edu</td><td>BVBCET, Hubli, India</td><td>India</td><td>15.36883320</td><td>75.12137960</td><td>59%</td><td>17</td><td>10</td><td>7</td><td>0</td><td>11</td><td>5</td></tr><tr><td>4563b46d42079242f06567b3f2e2f7a80cb3befe</td><td>vadana</td><td>VADANA</td><td><a href="papers/4563b46d42079242f06567b3f2e2f7a80cb3befe.html" target="_blank">VADANA: A dense dataset for facial image analysis</a></td><td><span class="gray">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td>edu</td><td>University of Delaware</td><td>United States</td><td>39.68103280</td><td>-75.75401840</td><td>67%</td><td>15</td><td>10</td><td>5</td><td>0</td><td>5</td><td>10</td></tr><tr><td>2624d84503bc2f8e190e061c5480b6aa4d89277a</td><td>afew_va</td><td>AFEW-VA</td><td><a href="papers/2624d84503bc2f8e190e061c5480b6aa4d89277a.html" target="_blank">AFEW-VA database for valence and arousal estimation in-the-wild</a></td><td><a href="https://ibug.doc.ic.ac.uk/media/uploads/documents/afew-va.pdf" target="_blank">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>18</td><td>9</td><td>9</td><td>0</td><td>12</td><td>5</td></tr><tr><td>6403117f9c005ae81f1e8e6d1302f4a045e3d99d</td><td>alert_airport</td><td>ALERT Airport</td><td><a href="papers/6403117f9c005ae81f1e8e6d1302f4a045e3d99d.html" target="_blank">A Systematic Evaluation and Benchmark for Person Re-Identification: Features, Metrics, and Datasets</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>45%</td><td>20</td><td>9</td><td>11</td><td>0</td><td>9</td><td>11</td></tr><tr><td>a8d0b149c2eadaa02204d3e4356fbc8eccf3b315</td><td>hi4d_adsip</td><td>Hi4D-ADSIP</td><td><a href="papers/a8d0b149c2eadaa02204d3e4356fbc8eccf3b315.html" target="_blank">Hi4D-ADSIP 3-D dynamic facial articulation database</a></td><td><span class="gray">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>15</td><td>9</td><td>6</td><td>1</td><td>4</td><td>11</td></tr><tr><td>b71d1aa90dcbe3638888725314c0d56640c1fef1</td><td>ifdb</td><td>IFDB</td><td><a href="papers/b71d1aa90dcbe3638888725314c0d56640c1fef1.html" target="_blank">Iranian Face Database with age, pose and expression</a></td><td><span class="gray">[pdf]</a></td><td>2007 International Conference on Machine Vision</td><td>edu</td><td>Islamic Azad University</td><td>Iran</td><td>34.84529990</td><td>48.55962120</td><td>39%</td><td>23</td><td>9</td><td>14</td><td>2</td><td>14</td><td>9</td></tr><tr><td>bd26dabab576adb6af30484183c9c9c8379bf2e0</td><td>scut_fbp</td><td>SCUT-FBP</td><td><a href="papers/bd26dabab576adb6af30484183c9c9c8379bf2e0.html" target="_blank">SCUT-FBP: A Benchmark Dataset for Facial Beauty Perception</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Systems, Man, and Cybernetics</td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>19</td><td>9</td><td>10</td><td>2</td><td>6</td><td>13</td></tr><tr><td>060820f110a72cbf02c14a6d1085bd6e1d994f6a</td><td>caltech_crp</td><td>Caltech CRP</td><td><a href="papers/060820f110a72cbf02c14a6d1085bd6e1d994f6a.html" target="_blank">Fine-grained classification of pedestrians in video: Benchmark and state of the art</a></td><td><a href="https://arxiv.org/pdf/1605.06177.pdf" target="_blank">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>17</td><td>8</td><td>9</td><td>0</td><td>9</td><td>8</td></tr><tr><td>2b926b3586399d028b46315d7d9fb9d879e4f79c</td><td>frav3d</td><td>FRAV3D</td><td><a href="papers/2b926b3586399d028b46315d7d9fb9d879e4f79c.html" target="_blank">Multimodal 2D, 2.5D & 3D Face Verification</a></td><td><span class="gray">[pdf]</a></td><td>2006 International Conference on Image Processing</td><td>edu</td><td>Universidad Rey Juan Carlos, Spain</td><td>Spain</td><td>40.33586610</td><td>-3.87694320</td><td>57%</td><td>14</td><td>8</td><td>6</td><td>0</td><td>2</td><td>12</td></tr><tr><td>8f02ec0be21461fbcedf51d864f944cfc42c875f</td><td>hda_plus</td><td>HDA+</td><td><a href="papers/8f02ec0be21461fbcedf51d864f944cfc42c875f.html" target="_blank">The HDA+ Data Set for Research on Fully Automated Re-identification Systems</a></td><td><a href="https://pdfs.semanticscholar.org/8f02/ec0be21461fbcedf51d864f944cfc42c875f.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>16</td><td>8</td><td>8</td><td>1</td><td>10</td><td>6</td></tr><tr><td>c570d1247e337f91e555c3be0e8c8a5aba539d9f</td><td>mcgill</td><td>McGill Real World</td><td><a href="papers/c570d1247e337f91e555c3be0e8c8a5aba539d9f.html" target="_blank">Robust semi-automatic head pose labeling for real-world face video sequences</a></td><td><span class="gray">[pdf]</a></td><td>Multimedia Tools and Applications</td><td>edu</td><td>McGill University</td><td>Canada</td><td>45.50397610</td><td>-73.57496870</td><td>44%</td><td>18</td><td>8</td><td>10</td><td>0</td><td>13</td><td>7</td></tr><tr><td>041d3eedf5e45ce5c5229f0181c5c576ed1fafd6</td><td>ucf_selfie</td><td>UCF Selfie</td><td><a href="papers/041d3eedf5e45ce5c5229f0181c5c576ed1fafd6.html" target="_blank">How to Take a Good Selfie?</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>73%</td><td>11</td><td>8</td><td>3</td><td>0</td><td>7</td><td>5</td></tr><tr><td>633c851ebf625ad7abdda2324e9de093cf623141</td><td>appa_real</td><td>APPA-REAL</td><td><a href="papers/633c851ebf625ad7abdda2324e9de093cf623141.html" target="_blank">Apparent and Real Age Estimation in Still Images with Deep Residual Regressors on Appa-Real Database</a></td><td><span class="gray">[pdf]</a></td><td>2017 12th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2017)</td><td></td><td></td><td></td><td></td><td></td><td>70%</td><td>10</td><td>7</td><td>3</td><td>0</td><td>8</td><td>3</td></tr><tr><td>8d5998cd984e7cce307da7d46f155f9db99c6590</td><td>chalearn</td><td>ChaLearn</td><td><a href="papers/8d5998cd984e7cce307da7d46f155f9db99c6590.html" target="_blank">ChaLearn looking at people: A review of events and resources</a></td><td><span class="gray">[pdf]</a></td><td>2017 International Joint Conference on Neural Networks (IJCNN)</td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>13</td><td>7</td><td>6</td><td>1</td><td>8</td><td>4</td></tr><tr><td>2cd7821fcf5fae53a185624f7eeda007434ae037</td><td>geofaces</td><td>GeoFaces</td><td><a href="papers/2cd7821fcf5fae53a185624f7eeda007434ae037.html" target="_blank">Exploring the geo-dependence of human face appearance</a></td><td><a href="http://cs.uky.edu/~jacobs/papers/islam2014faces.pdf" target="_blank">[pdf]</a></td><td>IEEE Winter Conference on Applications of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>88%</td><td>8</td><td>7</td><td>1</td><td>0</td><td>5</td><td>3</td></tr><tr><td>2cd7821fcf5fae53a185624f7eeda007434ae037</td><td>geofaces</td><td>GeoFaces</td><td><a href="papers/2cd7821fcf5fae53a185624f7eeda007434ae037.html" target="_blank">Exploring the geo-dependence of human face appearance</a></td><td><a href="http://cs.uky.edu/~jacobs/papers/islam2014faces.pdf" target="_blank">[pdf]</a></td><td>IEEE Winter Conference on Applications of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>88%</td><td>8</td><td>7</td><td>1</td><td>0</td><td>5</td><td>3</td></tr><tr><td>7f4040b482d16354d5938c1d1b926b544652bf5b</td><td>nova_emotions</td><td>Novaemötions Dataset</td><td><a href="papers/7f4040b482d16354d5938c1d1b926b544652bf5b.html" target="_blank">Competitive affective gaming: winning with a smile</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Universidade NOVA de Lisboa, Caparica, Portugal</td><td>Portugal</td><td>38.66096400</td><td>-9.20581300</td><td>78%</td><td>9</td><td>7</td><td>2</td><td>0</td><td>5</td><td>4</td></tr><tr><td>4b4106614c1d553365bad75d7866bff0de6056ed</td><td>ufi</td><td>UFI</td><td><a href="papers/4b4106614c1d553365bad75d7866bff0de6056ed.html" target="_blank">Unconstrained Facial Images: Database for Face Recognition Under Real-World Conditions</a></td><td><a href="http://home.zcu.cz/~pkral/papers/kral_micai15.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>12</td><td>6</td><td>6</td><td>0</td><td>4</td><td>6</td></tr><tr><td>22f656d0f8426c84a33a267977f511f127bfd7f3</td><td>expw</td><td>ExpW</td><td><a href="papers/22f656d0f8426c84a33a267977f511f127bfd7f3.html" target="_blank">From Facial Expression Recognition to Interpersonal Relation Prediction</a></td><td><a href="https://arxiv.org/pdf/1609.06426.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>11</td><td>6</td><td>5</td><td>0</td><td>5</td><td>4</td></tr><tr><td>4af89578ac237278be310f7660a408b03f12d603</td><td>geofaces</td><td>GeoFaces</td><td><a href="papers/4af89578ac237278be310f7660a408b03f12d603.html" target="_blank">Large-scale geo-facial image analysis</a></td><td><a href="https://pdfs.semanticscholar.org/3ede/3ed28329bf48fbd06438a69c4f855bef003f.pdf" target="_blank">[pdf]</a></td><td>EURASIP J. Image and Video Processing</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>6</td><td>6</td><td>0</td><td>0</td><td>4</td><td>2</td></tr><tr><td>2d45cfd838016a6e39f6b766ffe85acd649440c7</td><td>mcgill</td><td>McGill Real World</td><td><a href="papers/2d45cfd838016a6e39f6b766ffe85acd649440c7.html" target="_blank">Hierarchical temporal graphical model for head pose estimation and subsequent attribute classification in real-world videos</a></td><td><span class="gray">[pdf]</a></td><td>Computer Vision and Image Understanding</td><td></td><td></td><td></td><td></td><td></td><td>75%</td><td>8</td><td>6</td><td>2</td><td>1</td><td>5</td><td>3</td></tr><tr><td>1a40092b493c6b8840257ab7f96051d1a4dbfeb2</td><td>sports_videos_in_the_wild</td><td>SVW</td><td><a href="papers/1a40092b493c6b8840257ab7f96051d1a4dbfeb2.html" target="_blank">Sports Videos in the Wild (SVW): A video dataset for sports analysis</a></td><td><span class="gray">[pdf]</a></td><td>2015 11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)</td><td></td><td></td><td></td><td></td><td></td><td>86%</td><td>7</td><td>6</td><td>1</td><td>1</td><td>5</td><td>2</td></tr><tr><td>9e5378e7b336c89735d3bb15cf67eff96f86d39a</td><td>precarious</td><td>Precarious</td><td><a href="papers/9e5378e7b336c89735d3bb15cf67eff96f86d39a.html" target="_blank">Expecting the Unexpected: Training Detectors for Unusual Pedestrians with Adversarial Imposters</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>36%</td><td>14</td><td>5</td><td>9</td><td>0</td><td>12</td><td>1</td></tr><tr><td>07fcbae86f7a3ad3ea1cf95178459ee9eaf77cb1</td><td>uccs</td><td>UCCS</td><td><a href="papers/07fcbae86f7a3ad3ea1cf95178459ee9eaf77cb1.html" target="_blank">Large scale unconstrained open set face database</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE Sixth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td>company</td><td>Securics Inc., Colorado Springs, CO</td><td>United States</td><td>38.83388160</td><td>-104.82136340</td><td>83%</td><td>6</td><td>5</td><td>1</td><td>0</td><td>4</td><td>2</td></tr><tr><td>d4f1eb008eb80595bcfdac368e23ae9754e1e745</td><td>uccs</td><td>UCCS</td><td><a href="papers/d4f1eb008eb80595bcfdac368e23ae9754e1e745.html" target="_blank">Unconstrained Face Detection and Open-Set Face Recognition Challenge</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE International Joint Conference on Biometrics (IJCB)</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>5</td><td>5</td><td>0</td><td>0</td><td>4</td><td>1</td></tr><tr><td>8627f019882b024aef92e4eb9355c499c733e5b7</td><td>used</td><td>USED Social Event Dataset</td><td><a href="papers/8627f019882b024aef92e4eb9355c499c733e5b7.html" target="_blank">USED: a large-scale social event detection dataset</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td>edu</td><td>University of Trento</td><td>Italy</td><td>46.06588360</td><td>11.11598940</td><td>71%</td><td>7</td><td>5</td><td>2</td><td>0</td><td>3</td><td>4</td></tr><tr><td>922e0a51a3b8c67c4c6ac09a577ff674cbd28b34</td><td>v47</td><td>V47</td><td><a href="papers/922e0a51a3b8c67c4c6ac09a577ff674cbd28b34.html" target="_blank">Re-identification of pedestrians with variable occlusion and scale</a></td><td><span class="gray">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td>edu</td><td>Kingston University</td><td>United Kingdom</td><td>51.42930860</td><td>-0.26840440</td><td>56%</td><td>9</td><td>5</td><td>4</td><td>1</td><td>5</td><td>4</td></tr><tr><td>56ae6d94fc6097ec4ca861f0daa87941d1c10b70</td><td>cmdp</td><td>CMDP</td><td><a href="papers/56ae6d94fc6097ec4ca861f0daa87941d1c10b70.html" target="_blank">Distance Estimation of an Unknown Person from a Portrait</a></td><td><a href="http://authors.library.caltech.edu/49084/13/FaceDistanceEstimation_RONCHI.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>44%</td><td>9</td><td>4</td><td>5</td><td>0</td><td>6</td><td>3</td></tr><tr><td>563c940054e4b456661762c1ab858e6f730c3159</td><td>data_61</td><td>Data61 Pedestrian</td><td><a href="papers/563c940054e4b456661762c1ab858e6f730c3159.html" target="_blank">A Multi-modal Graphical Model for Scene Analysis</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE Winter Conference on Applications of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>8</td><td>4</td><td>4</td><td>0</td><td>5</td><td>3</td></tr><tr><td>287ddcb3db5562235d83aee318f318b8d5e43fb1</td><td>erce</td><td>ERCe</td><td><a href="papers/287ddcb3db5562235d83aee318f318b8d5e43fb1.html" target="_blank">Learning from Multiple Sources for Video Summarisation</a></td><td><a href="https://arxiv.org/pdf/1501.03069.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>7</td><td>4</td><td>3</td><td>0</td><td>4</td><td>3</td></tr><tr><td>dd65f71dac86e36eecbd3ed225d016c3336b4a13</td><td>families_in_the_wild</td><td>FIW</td><td><a href="papers/dd65f71dac86e36eecbd3ed225d016c3336b4a13.html" target="_blank">Visual Kinship Recognition of Families in the Wild</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td>edu</td><td>University of Massachusetts Dartmouth</td><td>United States</td><td>41.62772475</td><td>-71.00724501</td><td>80%</td><td>5</td><td>4</td><td>1</td><td>0</td><td>2</td><td>3</td></tr><tr><td>137aa2f891d474fce1e7a1d1e9b3aefe21e22b34</td><td>hrt_transgender</td><td>HRT Transgender</td><td><a href="papers/137aa2f891d474fce1e7a1d1e9b3aefe21e22b34.html" target="_blank">Is the eye region more reliable than the face? A preliminary study of face-based recognition on a transgender dataset</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE Sixth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>7</td><td>4</td><td>3</td><td>1</td><td>3</td><td>5</td></tr><tr><td>0d2dd4fc016cb6a517d8fb43a7cc3ff62964832e</td><td>lag</td><td>LAG</td><td><a href="papers/0d2dd4fc016cb6a517d8fb43a7cc3ff62964832e.html" target="_blank">Large age-gap face verification by feature injection in deep networks</a></td><td><a href="https://arxiv.org/pdf/1602.06149.pdf" target="_blank">[pdf]</a></td><td>Pattern Recognition Letters</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>7</td><td>4</td><td>3</td><td>0</td><td>3</td><td>4</td></tr><tr><td>23e824d1dfc33f3780dd18076284f07bd99f1c43</td><td>mifs</td><td>MIFS</td><td><a href="papers/23e824d1dfc33f3780dd18076284f07bd99f1c43.html" target="_blank">Spoofing faces using makeup: An investigative study</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE International Conference on Identity, Security and Behavior Analysis (ISBA)</td><td>edu</td><td>INRIA Méditerranée</td><td>France</td><td>43.61581310</td><td>7.06838000</td><td>67%</td><td>6</td><td>4</td><td>2</td><td>0</td><td>1</td><td>5</td></tr><tr><td>22909dd19a0ec3b6065334cb5be5392cb24d839d</td><td>pets</td><td>PETS 2017</td><td><a href="papers/22909dd19a0ec3b6065334cb5be5392cb24d839d.html" target="_blank">PETS 2017: Dataset and Challenge</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td></td><td></td><td></td><td></td><td></td><td>44%</td><td>9</td><td>4</td><td>5</td><td>0</td><td>1</td><td>8</td></tr><tr><td>54983972aafc8e149259d913524581357b0f91c3</td><td>reseed</td><td>ReSEED</td><td><a href="papers/54983972aafc8e149259d913524581357b0f91c3.html" target="_blank">ReSEED: social event dEtection dataset</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>67%</td><td>6</td><td>4</td><td>2</td><td>1</td><td>1</td><td>5</td></tr><tr><td>287ddcb3db5562235d83aee318f318b8d5e43fb1</td><td>tisi</td><td>Times Square Intersection</td><td><a href="papers/287ddcb3db5562235d83aee318f318b8d5e43fb1.html" target="_blank">Learning from Multiple Sources for Video Summarisation</a></td><td><a href="https://arxiv.org/pdf/1501.03069.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>7</td><td>4</td><td>3</td><td>0</td><td>4</td><td>3</td></tr><tr><td>7ebb153704706e457ab57b432793d2b6e5d12592</td><td>vgg_celebs_in_places</td><td>CIP</td><td><a href="papers/7ebb153704706e457ab57b432793d2b6e5d12592.html" target="_blank">Faces in Places: compound query retrieval</a></td><td><a href="https://pdfs.semanticscholar.org/7ebb/153704706e457ab57b432793d2b6e5d12592.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>80%</td><td>5</td><td>4</td><td>1</td><td>0</td><td>3</td><td>2</td></tr><tr><td>9cc8cf0c7d7fa7607659921b6ff657e17e135ecc</td><td>mafa</td><td>MAsked FAces</td><td><a href="papers/9cc8cf0c7d7fa7607659921b6ff657e17e135ecc.html" target="_blank">Detecting Masked Faces in the Wild with LLE-CNNs</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>5</td><td>3</td><td>2</td><td>1</td><td>4</td><td>1</td></tr><tr><td>a7fe834a0af614ce6b50dc093132b031dd9a856b</td><td>market_1501</td><td>Market 1501</td><td><a href="papers/a7fe834a0af614ce6b50dc093132b031dd9a856b.html" target="_blank">Orientation Driven Bag of Appearances for Person Re-identification</a></td><td><a href="https://arxiv.org/pdf/1605.02464.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>7</td><td>3</td><td>4</td><td>0</td><td>4</td><td>4</td></tr><tr><td>578d4ad74818086bb64f182f72e2c8bd31e3d426</td><td>mr2</td><td>MR2</td><td><a href="papers/578d4ad74818086bb64f182f72e2c8bd31e3d426.html" target="_blank">The MR2: A multi-racial, mega-resolution database of facial stimuli.</a></td><td><a href="http://www.mpmlab.org/The%20MR2%20face%20database.pdf" target="_blank">[pdf]</a></td><td>Behavior research methods</td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>7</td><td>3</td><td>4</td><td>0</td><td>7</td><td>0</td></tr><tr><td>ad01687649d95cd5b56d7399a9603c4b8e2217d7</td><td>mrp_drone</td><td>MRP Drone</td><td><a href="papers/ad01687649d95cd5b56d7399a9603c4b8e2217d7.html" target="_blank">Investigating Open-World Person Re-identification Using a Drone</a></td><td><a href="https://pdfs.semanticscholar.org/ad01/687649d95cd5b56d7399a9603c4b8e2217d7.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>7</td><td>3</td><td>4</td><td>1</td><td>5</td><td>2</td></tr><tr><td>a7fe834a0af614ce6b50dc093132b031dd9a856b</td><td>pku_reid</td><td>PKU-Reid</td><td><a href="papers/a7fe834a0af614ce6b50dc093132b031dd9a856b.html" target="_blank">Orientation Driven Bag of Appearances for Person Re-identification</a></td><td><a href="https://arxiv.org/pdf/1605.02464.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>7</td><td>3</td><td>4</td><td>0</td><td>4</td><td>4</td></tr><tr><td>17b46e2dad927836c689d6787ddb3387c6159ece</td><td>geofaces</td><td>GeoFaces</td><td><a href="papers/17b46e2dad927836c689d6787ddb3387c6159ece.html" target="_blank">GeoFaceExplorer: exploring the geo-dependence of facial attributes</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>2</td><td>2</td><td>0</td><td>0</td><td>1</td><td>1</td></tr><tr><td>9e31e77f9543ab42474ba4e9330676e18c242e72</td><td>imdb_face</td><td>IMDb Face</td><td><a href="papers/9e31e77f9543ab42474ba4e9330676e18c242e72.html" target="_blank">The Devil of Face Recognition is in the Noise</a></td><td><a href="https://arxiv.org/pdf/1807.11649.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Nanyang Technological University</td><td>Singapore</td><td>1.34841040</td><td>103.68297965</td><td>33%</td><td>6</td><td>2</td><td>4</td><td>0</td><td>4</td><td>1</td></tr><tr><td>4eab317b5ac436a949849ed286baa3de2a541eef</td><td>laofiw</td><td>LAOFIW</td><td><a href="papers/4eab317b5ac436a949849ed286baa3de2a541eef.html" target="_blank">Turning a Blind Eye: Explicit Removal of Biases and Variation from Deep Neural Network Embeddings</a></td><td><a href="https://arxiv.org/pdf/1809.02169.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>2</td><td>2</td><td>0</td><td>0</td><td>2</td><td>0</td></tr><tr><td>f6c8d5e35d7e4d60a0104f233ac1a3ab757da53f</td><td>pku_reid</td><td>PKU-Reid</td><td><a href="papers/f6c8d5e35d7e4d60a0104f233ac1a3ab757da53f.html" target="_blank">Swiss-System Based Cascade Ranking for Gait-Based Person Re-Identification</a></td><td><a href="https://pdfs.semanticscholar.org/f6c8/d5e35d7e4d60a0104f233ac1a3ab757da53f.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>4</td><td>2</td><td>2</td><td>0</td><td>1</td><td>2</td></tr><tr><td>3531332efe19be21e7401ba1f04570a142617236</td><td>ufdd</td><td>UFDD</td><td><a href="papers/3531332efe19be21e7401ba1f04570a142617236.html" target="_blank">Pushing the Limits of Unconstrained Face Detection: a Challenge Dataset and Baseline Results</a></td><td><a href="https://arxiv.org/pdf/1804.10275.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>4</td><td>2</td><td>2</td><td>1</td><td>4</td><td>0</td></tr><tr><td>4d4bb462c9f1d4e4ab1e4aa6a75cc0bc71b38461</td><td>3dddb_unconstrained</td><td>3D Dynamic</td><td><a href="papers/4d4bb462c9f1d4e4ab1e4aa6a75cc0bc71b38461.html" target="_blank">A 3D Dynamic Database for Unconstrained Face Recognition</a></td><td><a href="https://pdfs.semanticscholar.org/4d4b/b462c9f1d4e4ab1e4aa6a75cc0bc71b38461.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>2</td><td>1</td><td>1</td><td>0</td><td>1</td><td>1</td></tr><tr><td>a40f9bfd3c45658ee8da70e1f2dfbe1f0c744d43</td><td>4dfab</td><td>4DFAB</td><td><a href="papers/a40f9bfd3c45658ee8da70e1f2dfbe1f0c744d43.html" target="_blank">4DFAB: A Large Scale 4D Facial Expression Database for Biometric Applications</a></td><td><a href="https://arxiv.org/pdf/1712.01443.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>25%</td><td>4</td><td>1</td><td>3</td><td>0</td><td>2</td><td>2</td></tr><tr><td>65355cbb581a219bd7461d48b3afd115263ea760</td><td>complex_activities</td><td>Ongoing Complex Activities</td><td><a href="papers/65355cbb581a219bd7461d48b3afd115263ea760.html" target="_blank">Recognition of ongoing complex activities by sequence prediction over a hierarchical label space</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Winter Conference on Applications of Computer Vision (WACV)</td><td></td><td></td><td></td><td></td><td></td><td>33%</td><td>3</td><td>1</td><td>2</td><td>0</td><td>3</td><td>0</td></tr><tr><td>e58dd160a76349d46f881bd6ddbc2921f08d1050</td><td>gfw</td><td>Grouping Face in the Wild</td><td><a href="papers/e58dd160a76349d46f881bd6ddbc2921f08d1050.html" target="_blank">Merge or Not? Learning to Group Faces via Imitation Learning</a></td><td><a href="https://arxiv.org/pdf/1707.03986.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>2</td><td>1</td><td>1</td><td>0</td><td>2</td><td>0</td></tr><tr><td>55c40cbcf49a0225e72d911d762c27bb1c2d14aa</td><td>ifad</td><td>IFAD</td><td><a href="papers/55c40cbcf49a0225e72d911d762c27bb1c2d14aa.html" target="_blank">Indian Face Age Database: A Database for Face Recognition with Age Variation</a></td><td><a href="https://pdfs.semanticscholar.org/025e/4cf3fd3fdeced91e9373b56ee14af7ca432c.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>2</td><td>1</td><td>1</td><td>0</td><td>2</td><td>0</td></tr><tr><td>c72a2ea819df9b0e8cd267eebcc6528b8741e03d</td><td>megaage</td><td>MegaAge</td><td><a href="papers/c72a2ea819df9b0e8cd267eebcc6528b8741e03d.html" target="_blank">Quantifying Facial Age by Posterior of Age Comparisons</a></td><td><a href="https://arxiv.org/pdf/1708.09687.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>25%</td><td>4</td><td>1</td><td>3</td><td>1</td><td>4</td><td>0</td></tr><tr><td>c06b13d0ec3f5c43e2782cd22542588e233733c3</td><td>nova_emotions</td><td>Novaemötions Dataset</td><td><a href="papers/c06b13d0ec3f5c43e2782cd22542588e233733c3.html" target="_blank">Crowdsourcing facial expressions for affective-interaction</a></td><td><span class="gray">[pdf]</a></td><td>Computer Vision and Image Understanding</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>1</td><td>1</td><td>0</td><td>0</td><td>1</td><td>0</td></tr><tr><td>2306b2a8fba28539306052764a77a0d0f5d1236a</td><td>qmul_surv_face</td><td>QMUL-SurvFace</td><td><a href="papers/2306b2a8fba28539306052764a77a0d0f5d1236a.html" target="_blank">Surveillance Face Recognition Challenge</a></td><td><a href="https://arxiv.org/pdf/1804.09691.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td>edu</td><td>Queen Mary University of London</td><td>United Kingdom</td><td>51.52472720</td><td>-0.03931035</td><td>100%</td><td>1</td><td>1</td><td>0</td><td>0</td><td>1</td><td>0</td></tr><tr><td>d3f5a1848b0028d8ab51d0b0673732cad2e3c8c9</td><td>stair_actions</td><td>STAIR Action</td><td><a href="papers/d3f5a1848b0028d8ab51d0b0673732cad2e3c8c9.html" target="_blank">STAIR Actions: A Video Dataset of Everyday Home Actions</a></td><td><a href="https://arxiv.org/pdf/1804.04326.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>1</td><td>1</td><td>0</td><td>0</td><td>1</td><td>0</td></tr><tr><td>5ad4e9f947c1653c247d418f05dad758a3f9277b</td><td>wlfdb</td><td>WLFDB</td><td><a href="papers/5ad4e9f947c1653c247d418f05dad758a3f9277b.html" target="_blank">WLFDB : Weakly Labeled Face Databases</a></td><td><a href="https://pdfs.semanticscholar.org/5ad4/e9f947c1653c247d418f05dad758a3f9277b.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>1</td><td>1</td><td>0</td><td>0</td><td>0</td><td>1</td></tr><tr><td>7b92d1e53cc87f7a4256695de590098a2f30261e</td><td>appa_real</td><td>APPA-REAL</td><td><a href="papers/7b92d1e53cc87f7a4256695de590098a2f30261e.html" target="_blank">From Apparent to Real Age: Gender, Age, Ethnic, Makeup, and Expression Bias Analysis in Real Age Estimation</a></td><td><span class="gray">[pdf]</a></td><td>2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>1dc35905a1deff8bc74688f2d7e2f48fd2273275</td><td>caltech_pedestrians</td><td>Caltech Pedestrians</td><td><a href="papers/1dc35905a1deff8bc74688f2d7e2f48fd2273275.html" target="_blank">Pedestrian detection: A benchmark</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>15e1af79939dbf90790b03d8aa02477783fb1d0f</td><td>duke_mtmc</td><td>Duke MTMC</td><td><a href="papers/15e1af79939dbf90790b03d8aa02477783fb1d0f.html" target="_blank">Unlabeled Samples Generated by GAN Improve the Person Re-identification Baseline in Vitro</a></td><td><a href="https://arxiv.org/pdf/1701.07717.pdf" target="_blank">[pdf]</a></td><td>2017 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>72a155c987816ae81c858fddbd6beab656d86220</td><td>europersons</td><td>EuroCity Persons</td><td><a href="papers/72a155c987816ae81c858fddbd6beab656d86220.html" target="_blank">The EuroCity Persons Dataset: A Novel Benchmark for Object Detection</a></td><td><a href="https://arxiv.org/pdf/1805.07193.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>2</td><td>0</td><td>2</td><td>0</td><td>2</td><td>0</td></tr><tr><td>670637d0303a863c1548d5b19f705860a23e285c</td><td>face_tracer</td><td>FaceTracer</td><td><a href="papers/670637d0303a863c1548d5b19f705860a23e285c.html" target="_blank">Face swapping: automatically replacing faces in photographs</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>12ad3b5bbbf407f8e54ea692c07633d1a867c566</td><td>graz</td><td>Graz Pedestrian</td><td><a href="papers/12ad3b5bbbf407f8e54ea692c07633d1a867c566.html" target="_blank">Object recognition using segmentation for feature detection</a></td><td><span class="gray">[pdf]</a></td><td>Proceedings of the 17th International Conference on Pattern Recognition, 2004. ICPR 2004.</td><td>edu</td><td>Inst. of Comput. Sci., Univ. of Leoben, Austria</td><td>Austria</td><td>47.38473720</td><td>15.09302010</td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>bd88bb2e4f351352d88ee7375af834360e223498</td><td>hda_plus</td><td>HDA+</td><td><a href="papers/bd88bb2e4f351352d88ee7375af834360e223498.html" target="_blank">HDA dataset-DRAFT 1 A Multi-camera video data set for research on High-Definition surveillance</a></td><td><a href="https://pdfs.semanticscholar.org/bd88/bb2e4f351352d88ee7375af834360e223498.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>2</td><td>0</td><td>2</td><td>0</td><td>1</td><td>2</td></tr><tr><td>0ab7cff2ccda7269b73ff6efd9d37e1318f7db25</td><td>ibm_dif</td><td>IBM Diversity in Faces</td><td><a href="papers/0ab7cff2ccda7269b73ff6efd9d37e1318f7db25.html" target="_blank">Facial Coding Scheme Reference 1 Craniofacial Distances</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>066d71fcd997033dce4ca58df924397dfe0b5fd1</td><td>ifdb</td><td>IFDB</td><td><a href="papers/066d71fcd997033dce4ca58df924397dfe0b5fd1.html" target="_blank">Iranian Face Database and Evaluation with a New Detection Algorithm</a></td><td><a href="https://pdfs.semanticscholar.org/066d/71fcd997033dce4ca58df924397dfe0b5fd1.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>21d9d0deed16f0ad62a4865e9acf0686f4f15492</td><td>images_of_groups</td><td>Images of Groups</td><td><a href="papers/21d9d0deed16f0ad62a4865e9acf0686f4f15492.html" target="_blank">Understanding images of groups of people</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>Carnegie Mellon University</td><td>United States</td><td>40.44416190</td><td>-79.94272826</td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>140438a77a771a8fb656b39a78ff488066eb6b50</td><td>lfpw</td><td>LFWP</td><td><a href="papers/140438a77a771a8fb656b39a78ff488066eb6b50.html" target="_blank">Localizing Parts of Faces Using a Consensus of Exemplars</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>079a0a3bf5200994e1f972b1b9197bf2f90e87d4</td><td>mit_cbcl</td><td>MIT CBCL</td><td><a href="papers/079a0a3bf5200994e1f972b1b9197bf2f90e87d4.html" target="_blank">Component-Based Face Recognition with 3D Morphable Models</a></td><td><span class="gray">[pdf]</a></td><td>2004 Conference on Computer Vision and Pattern Recognition Workshop</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>2fda164863a06a92d3a910b96eef927269aeb730</td><td>names_and_faces</td><td>News Dataset</td><td><a href="papers/2fda164863a06a92d3a910b96eef927269aeb730.html" target="_blank">Names and faces in the news</a></td><td><span class="gray">[pdf]</a></td><td>Proceedings of the 2004 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2004. CVPR 2004.</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>d3200d49a19a4a4e4e9745ee39649b65d80c834b</td><td>scut_head</td><td>SCUT HEAD</td><td><a href="papers/d3200d49a19a4a4e4e9745ee39649b65d80c834b.html" target="_blank">Detecting Heads using Feature Refine Net and Cascaded Multi-scale Architecture</a></td><td><a href="https://arxiv.org/pdf/1803.09256.pdf" target="_blank">[pdf]</a></td><td>2018 24th International Conference on Pattern Recognition (ICPR)</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>8990cdce3f917dad622e43e033db686b354d057c</td><td>tiny_faces</td><td>TinyFace</td><td><a href="papers/8990cdce3f917dad622e43e033db686b354d057c.html" target="_blank">Low-Resolution Face Recognition</a></td><td><a href="https://arxiv.org/pdf/1811.08965.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>6ad5a38df8dd4cdddd74f31996ce096d41219f72</td><td>tud_brussels</td><td>TUD-Brussels</td><td><a href="papers/6ad5a38df8dd4cdddd74f31996ce096d41219f72.html" target="_blank">Multi-cue onboard pedestrian detection</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>6ad5a38df8dd4cdddd74f31996ce096d41219f72</td><td>tud_motionpairs</td><td>TUD-Motionparis</td><td><a href="papers/6ad5a38df8dd4cdddd74f31996ce096d41219f72.html" target="_blank">Multi-cue onboard pedestrian detection</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>01959ef569f74c286956024866c1d107099199f7</td><td>vqa</td><td>VQA</td><td><a href="papers/01959ef569f74c286956024866c1d107099199f7.html" target="_blank">VQA: Visual Question Answering</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>9b9bf5e623cb8af7407d2d2d857bc3f1b531c182</td><td>who_goes_there</td><td>WGT</td><td><a href="papers/9b9bf5e623cb8af7407d2d2d857bc3f1b531c182.html" target="_blank">Who goes there?: approaches to mapping facial appearance diversity</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td>edu</td><td>University of Kentucky</td><td>United States</td><td>38.03337420</td><td>-84.50177580</td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>36bccfb2ad847096bc76777e544f305813cd8f5b</td><td>wildtrack</td><td>WildTrack</td><td><a href="papers/36bccfb2ad847096bc76777e544f305813cd8f5b.html" target="_blank">WILDTRACK: A Multi-camera HD Dataset for Dense Unscripted Pedestrian Detection</a></td><td><a href="http://openaccess.thecvf.com/content_cvpr_2018/Supplemental/1562-supp.pdf" target="_blank">[pdf]</a></td><td>2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr></table></body></html> \ No newline at end of file
+<!doctype html><html><head><meta charset='utf-8'><title>Coverage</title><link rel='stylesheet' href='reports.css'></head><body><h2>Coverage</h2><table border='1' cellpadding='3' cellspacing='3'><th>Paper ID</th><th>Megapixels Key</th><th>Megapixels Name</th><th>Report Link</th><th>PDF Link</th><th>Journal</th><th>Type</th><th>Address</th><th>Country</th><th>Lat</th><th>Lng</th><th>Coverage</th><th>Total Citations</th><th>Geocoded Citations</th><th>Unknown Citations</th><th>Empty Citations</th><th>With PDF</th><th>With DOI</th><tr><td>0e986f51fe45b00633de9fd0c94d082d2be51406</td><td>afw</td><td>AFW</td><td><a href="papers/0e986f51fe45b00633de9fd0c94d082d2be51406.html" target="_blank">Face detection, pose estimation, and landmark localization in the wild</a></td><td><span class="gray">[pdf]</a></td><td>2012 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>66%</td><td>999</td><td>656</td><td>343</td><td>43</td><td>576</td><td>422</td></tr><tr><td>b5f2846a506fc417e7da43f6a7679146d99c5e96</td><td>ucf_101</td><td>UCF101</td><td><a href="papers/b5f2846a506fc417e7da43f6a7679146d99c5e96.html" target="_blank">UCF101: A Dataset of 101 Human Actions Classes From Videos in The Wild</a></td><td><a href="https://arxiv.org/pdf/1212.0402.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>999</td><td>642</td><td>357</td><td>56</td><td>628</td><td>362</td></tr><tr><td>162ea969d1929ed180cc6de9f0bf116993ff6e06</td><td>vgg_faces</td><td>VGG Face</td><td><a href="papers/162ea969d1929ed180cc6de9f0bf116993ff6e06.html" target="_blank">Deep Face Recognition</a></td><td><a href="https://pdfs.semanticscholar.org/f372/ab9b3270d4e4f6a0258c83c2736c3a5c0454.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>999</td><td>640</td><td>359</td><td>49</td><td>558</td><td>429</td></tr><tr><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td><td>lfw</td><td>LFW</td><td><a href="papers/370b5757a5379b15e30d619e4d3fb9e8e13f3256.html" target="_blank">Labeled Faces in the Wild: A Database forStudying Face Recognition in Unconstrained Environments</a></td><td><a href="https://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>999</td><td>622</td><td>377</td><td>61</td><td>598</td><td>382</td></tr><tr><td>5e0f8c355a37a5a89351c02f174e7a5ddcb98683</td><td>coco</td><td>COCO</td><td><a href="papers/5e0f8c355a37a5a89351c02f174e7a5ddcb98683.html" target="_blank">Microsoft COCO: Common Objects in Context</a></td><td><a href="https://arxiv.org/pdf/1405.0312.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>999</td><td>606</td><td>393</td><td>25</td><td>722</td><td>259</td></tr><tr><td>4d9a02d080636e9666c4d1cc438b9893391ec6c7</td><td>cohn_kanade_plus</td><td>CK+</td><td><a href="papers/4d9a02d080636e9666c4d1cc438b9893391ec6c7.html" target="_blank">The Extended Cohn-Kanade Dataset (CK+): A complete dataset for action unit and emotion-specified expression</a></td><td><span class="gray">[pdf]</a></td><td>2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition - Workshops</td><td>edu</td><td>University of Pittsburgh</td><td>United States</td><td>40.44415295</td><td>-79.96243993</td><td>60%</td><td>999</td><td>604</td><td>395</td><td>58</td><td>470</td><td>518</td></tr><tr><td>026e3363b7f76b51cc711886597a44d5f1fd1de2</td><td>kitti</td><td>KITTI</td><td><a href="papers/026e3363b7f76b51cc711886597a44d5f1fd1de2.html" target="_blank">Vision meets robotics: The KITTI dataset</a></td><td><a href="https://pdfs.semanticscholar.org/026e/3363b7f76b51cc711886597a44d5f1fd1de2.pdf" target="_blank">[pdf]</a></td><td>I. J. Robotics Res.</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>999</td><td>602</td><td>397</td><td>36</td><td>553</td><td>462</td></tr><tr><td>0ee1916a0cb2dc7d3add086b5f1092c3d4beb38a</td><td>voc</td><td>VOC</td><td><a href="papers/0ee1916a0cb2dc7d3add086b5f1092c3d4beb38a.html" target="_blank">The Pascal Visual Object Classes (VOC) Challenge</a></td><td><a href="http://eprints.pascal-network.org/archive/00006187/01/PascalVOC_IJCV2009.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td>company</td><td>Microsoft</td><td>United States</td><td>47.64233180</td><td>-122.13693020</td><td>60%</td><td>999</td><td>598</td><td>400</td><td>29</td><td>557</td><td>422</td></tr><tr><td>f72f6a45ee240cc99296a287ff725aaa7e7ebb35</td><td>caltech_pedestrians</td><td>Caltech Pedestrians</td><td><a href="papers/f72f6a45ee240cc99296a287ff725aaa7e7ebb35.html" target="_blank">Pedestrian Detection: An Evaluation of the State of the Art</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td>edu</td><td>California Institute of Technology</td><td>United States</td><td>34.13710185</td><td>-118.12527487</td><td>60%</td><td>999</td><td>596</td><td>403</td><td>70</td><td>526</td><td>466</td></tr><tr><td>759a3b3821d9f0e08e0b0a62c8b693230afc3f8d</td><td>pubfig</td><td>PubFig</td><td><a href="papers/759a3b3821d9f0e08e0b0a62c8b693230afc3f8d.html" target="_blank">Attribute and simile classifiers for face verification</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE 12th International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>63%</td><td>914</td><td>578</td><td>336</td><td>47</td><td>586</td><td>316</td></tr><tr><td>31b58ced31f22eab10bd3ee2d9174e7c14c27c01</td><td>tiny_images</td><td>Tiny Images</td><td><a href="papers/31b58ced31f22eab10bd3ee2d9174e7c14c27c01.html" target="_blank">80 Million Tiny Images: A Large Data Set for Nonparametric Object and Scene Recognition</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>999</td><td>574</td><td>425</td><td>89</td><td>644</td><td>337</td></tr><tr><td>6d96f946aaabc734af7fe3fc4454cf8547fcd5ed</td><td>ar_facedb</td><td>AR Face</td><td><a href="papers/6d96f946aaabc734af7fe3fc4454cf8547fcd5ed.html" target="_blank">The AR face database</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>999</td><td>573</td><td>426</td><td>59</td><td>458</td><td>530</td></tr><tr><td>10d6b12fa07c7c8d6c8c3f42c7f1c061c131d4c5</td><td>inria_person</td><td>INRIA Pedestrian</td><td><a href="papers/10d6b12fa07c7c8d6c8c3f42c7f1c061c131d4c5.html" target="_blank">Histograms of oriented gradients for human detection</a></td><td><span class="gray">[pdf]</a></td><td>2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)</td><td>edu</td><td>INRIA Rhone-Alps, Montbonnot, France</td><td>France</td><td>45.21788600</td><td>5.80736900</td><td>57%</td><td>999</td><td>570</td><td>429</td><td>43</td><td>419</td><td>509</td></tr><tr><td>18ae7c9a4bbc832b8b14bc4122070d7939f5e00e</td><td>frgc</td><td>FRGC</td><td><a href="papers/18ae7c9a4bbc832b8b14bc4122070d7939f5e00e.html" target="_blank">Overview of the face recognition grand challenge</a></td><td><span class="gray">[pdf]</a></td><td>2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)</td><td>edu</td><td>NIST</td><td>United States</td><td>39.14004000</td><td>-77.21850600</td><td>57%</td><td>999</td><td>566</td><td>432</td><td>86</td><td>549</td><td>442</td></tr><tr><td>18c72175ddbb7d5956d180b65a96005c100f6014</td><td>yale_faces</td><td>YaleFaces</td><td><a href="papers/18c72175ddbb7d5956d180b65a96005c100f6014.html" target="_blank">From Few to Many: Illumination Cone Models for Face Recognition under Variable Lighting and Pose</a></td><td><a href="http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.147.1487&rep=rep1&type=pdf" target="_blank">[pdf]</a></td><td>IEEE Trans. Pattern Anal. Mach. Intell.</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>999</td><td>560</td><td>439</td><td>67</td><td>498</td><td>462</td></tr><tr><td>2ad0ee93d029e790ebb50574f403a09854b65b7e</td><td>yale_faces</td><td>YaleFaces</td><td><a href="papers/2ad0ee93d029e790ebb50574f403a09854b65b7e.html" target="_blank">Acquiring linear subspaces for face recognition under variable lighting</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>999</td><td>554</td><td>445</td><td>94</td><td>495</td><td>491</td></tr><tr><td>23fc83c8cfff14a16df7ca497661264fc54ed746</td><td>cohn_kanade</td><td>CK</td><td><a href="papers/23fc83c8cfff14a16df7ca497661264fc54ed746.html" target="_blank">Comprehensive Database for Facial Expression Analysis</a></td><td><a href="https://pdfs.semanticscholar.org/23fc/83c8cfff14a16df7ca497661264fc54ed746.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>999</td><td>552</td><td>447</td><td>70</td><td>540</td><td>439</td></tr><tr><td>b62628ac06bbac998a3ab825324a41a11bc3a988</td><td>m2vtsdb_extended</td><td>xm2vtsdb</td><td><a href="papers/b62628ac06bbac998a3ab825324a41a11bc3a988.html" target="_blank">XM2VTSDB : The extended M2VTS database</a></td><td><a href="https://pdfs.semanticscholar.org/b626/28ac06bbac998a3ab825324a41a11bc3a988.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>864</td><td>527</td><td>337</td><td>39</td><td>493</td><td>404</td></tr><tr><td>dc8b25e35a3acb812beb499844734081722319b4</td><td>feret</td><td>FERET</td><td><a href="papers/dc8b25e35a3acb812beb499844734081722319b4.html" target="_blank">The FERET database and evaluation procedure for face-recognition algorithms</a></td><td><a href="http://biometrics.nist.gov/cs_links/face/frvt/feret/FERET_Database_evaluation_procedure.pdf" target="_blank">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>999</td><td>520</td><td>479</td><td>103</td><td>591</td><td>421</td></tr><tr><td>6424b69f3ff4d35249c0bb7ef912fbc2c86f4ff4</td><td>celeba</td><td>CelebA</td><td><a href="papers/6424b69f3ff4d35249c0bb7ef912fbc2c86f4ff4.html" target="_blank">Deep Learning Face Attributes in the Wild</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td>edu</td><td>Chinese University of Hong Kong</td><td>China</td><td>22.41626320</td><td>114.21093180</td><td>56%</td><td>919</td><td>519</td><td>399</td><td>61</td><td>694</td><td>201</td></tr><tr><td>45c31cde87258414f33412b3b12fc5bec7cb3ba9</td><td>jaffe</td><td>JAFFE</td><td><a href="papers/45c31cde87258414f33412b3b12fc5bec7cb3ba9.html" target="_blank">Coding Facial Expressions with Gabor Wavelets</a></td><td><a href="http://physics.lbl.gov/patrecog/images/Facerecog_gabor.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>899</td><td>508</td><td>391</td><td>51</td><td>431</td><td>451</td></tr><tr><td>55206f0b5f57ce17358999145506cd01e570358c</td><td>orl</td><td>ORL</td><td><a href="papers/55206f0b5f57ce17358999145506cd01e570358c.html" target="_blank">Parameterisation of a stochastic model for human face identification</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>999</td><td>500</td><td>499</td><td>94</td><td>543</td><td>427</td></tr><tr><td>4d423acc78273b75134e2afd1777ba6d3a398973</td><td>cmu_pie</td><td>CMU PIE</td><td><a href="papers/4d423acc78273b75134e2afd1777ba6d3a398973.html" target="_blank">The CMU Pose, Illumination, and Expression (PIE) Database</a></td><td><a href="http://www.comp.nus.edu.sg/~tsim/piedb.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>59%</td><td>760</td><td>448</td><td>311</td><td>50</td><td>404</td><td>345</td></tr><tr><td>4d423acc78273b75134e2afd1777ba6d3a398973</td><td>multi_pie</td><td>MULTIPIE</td><td><a href="papers/4d423acc78273b75134e2afd1777ba6d3a398973.html" target="_blank">The CMU Pose, Illumination, and Expression (PIE) Database</a></td><td><a href="http://www.comp.nus.edu.sg/~tsim/piedb.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>59%</td><td>760</td><td>448</td><td>311</td><td>50</td><td>404</td><td>345</td></tr><tr><td>6273b3491e94ea4dd1ce42b791d77bdc96ee73a8</td><td>viper</td><td>VIPeR</td><td><a href="papers/6273b3491e94ea4dd1ce42b791d77bdc96ee73a8.html" target="_blank">Evaluating Appearance Models for Recognition, Reacquisition, and Tracking</a></td><td><a href="https://pdfs.semanticscholar.org/7847/b1fbccadb780b655e72c66d3f9e93ddb880c.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>edu</td><td>University of California, Santa Cruz</td><td>United States</td><td>36.99158470</td><td>-122.05827710</td><td>67%</td><td>624</td><td>415</td><td>209</td><td>33</td><td>342</td><td>276</td></tr><tr><td>6bd36e9fd0ef20a3074e1430a6cc601e6d407fc3</td><td>cuhk_campus_03</td><td>CUHK03 Campus</td><td><a href="papers/6bd36e9fd0ef20a3074e1430a6cc601e6d407fc3.html" target="_blank">DeepReID: Deep Filter Pairing Neural Network for Person Re-identification</a></td><td><span class="gray">[pdf]</a></td><td>2014 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>73%</td><td>568</td><td>412</td><td>156</td><td>19</td><td>320</td><td>235</td></tr><tr><td>2830fb5282de23d7784b4b4bc37065d27839a412</td><td>h3d</td><td>H3D</td><td><a href="papers/2830fb5282de23d7784b4b4bc37065d27839a412.html" target="_blank">Poselets: Body part detectors trained using 3D human pose annotations</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE 12th International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>716</td><td>409</td><td>307</td><td>60</td><td>492</td><td>222</td></tr><tr><td>2258e01865367018ed6f4262c880df85b94959f8</td><td>mot</td><td>MOT</td><td><a href="papers/2258e01865367018ed6f4262c880df85b94959f8.html" target="_blank">Evaluating Multiple Object Tracking Performance: The CLEAR MOT Metrics</a></td><td><a href="https://cvhci.anthropomatik.kit.edu/images/stories/msmmi/papers/eurasip2008.pdf" target="_blank">[pdf]</a></td><td>EURASIP J. Image and Video Processing</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>632</td><td>366</td><td>264</td><td>44</td><td>358</td><td>264</td></tr><tr><td>4308bd8c28e37e2ed9a3fcfe74d5436cce34b410</td><td>market_1501</td><td>Market 1501</td><td><a href="papers/4308bd8c28e37e2ed9a3fcfe74d5436cce34b410.html" target="_blank">Scalable Person Re-identification: A Benchmark</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td>company</td><td>Microsoft</td><td>United States</td><td>47.64233180</td><td>-122.13693020</td><td>77%</td><td>460</td><td>355</td><td>105</td><td>9</td><td>263</td><td>185</td></tr><tr><td>560e0e58d0059259ddf86fcec1fa7975dee6a868</td><td>youtube_faces</td><td>YouTubeFaces</td><td><a href="papers/560e0e58d0059259ddf86fcec1fa7975dee6a868.html" target="_blank">Face recognition in unconstrained videos with matched background similarity</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011</td><td>edu</td><td>Tel Aviv University</td><td>Israel</td><td>32.11198890</td><td>34.80459702</td><td>67%</td><td>509</td><td>340</td><td>168</td><td>24</td><td>294</td><td>216</td></tr><tr><td>853bd61bc48a431b9b1c7cab10c603830c488e39</td><td>casia_webface</td><td>CASIA Webface</td><td><a href="papers/853bd61bc48a431b9b1c7cab10c603830c488e39.html" target="_blank">Learning Face Representation from Scratch</a></td><td><a href="https://arxiv.org/pdf/1411.7923.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td>edu</td><td>Chinese Academy of Sciences</td><td>China</td><td>40.00447950</td><td>116.37023800</td><td>71%</td><td>476</td><td>337</td><td>139</td><td>19</td><td>290</td><td>182</td></tr><tr><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td><td>tud_campus</td><td>TUD-Campus</td><td><a href="papers/3316521a5527c7700af8ae6aef32a79a8b83672c.html" target="_blank">People-tracking-by-detection and people-detection-by-tracking</a></td><td><span class="gray">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>59%</td><td>545</td><td>324</td><td>220</td><td>37</td><td>330</td><td>218</td></tr><tr><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td><td>tud_crossing</td><td>TUD-Crossing</td><td><a href="papers/3316521a5527c7700af8ae6aef32a79a8b83672c.html" target="_blank">People-tracking-by-detection and people-detection-by-tracking</a></td><td><span class="gray">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>59%</td><td>545</td><td>324</td><td>220</td><td>37</td><td>330</td><td>218</td></tr><tr><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td><td>tud_pedestrian</td><td>TUD-Pedestrian</td><td><a href="papers/3316521a5527c7700af8ae6aef32a79a8b83672c.html" target="_blank">People-tracking-by-detection and people-detection-by-tracking</a></td><td><span class="gray">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>59%</td><td>545</td><td>324</td><td>220</td><td>37</td><td>330</td><td>218</td></tr><tr><td>cc589c499dcf323fe4a143bbef0074c3e31f9b60</td><td>bu_3dfe</td><td>BU-3DFE</td><td><a href="papers/cc589c499dcf323fe4a143bbef0074c3e31f9b60.html" target="_blank">A 3D facial expression database for facial behavior research</a></td><td><span class="gray">[pdf]</a></td><td>7th International Conference on Automatic Face and Gesture Recognition (FGR06)</td><td></td><td></td><td></td><td></td><td></td><td>53%</td><td>588</td><td>313</td><td>274</td><td>45</td><td>306</td><td>282</td></tr><tr><td>4053e3423fb70ad9140ca89351df49675197196a</td><td>bio_id</td><td>BioID Face</td><td><a href="papers/4053e3423fb70ad9140ca89351df49675197196a.html" target="_blank">Robust Face Detection Using the Hausdorff Distance</a></td><td><a href="https://pdfs.semanticscholar.org/4053/e3423fb70ad9140ca89351df49675197196a.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>511</td><td>283</td><td>228</td><td>50</td><td>329</td><td>182</td></tr><tr><td>16c7c31a7553d99f1837fc6e88e77b5ccbb346b8</td><td>prid</td><td>PRID</td><td><a href="papers/16c7c31a7553d99f1837fc6e88e77b5ccbb346b8.html" target="_blank">Person Re-identification by Descriptive and Discriminative Classification</a></td><td><a href="https://pdfs.semanticscholar.org/4c1b/f0592be3e535faf256c95e27982db9b3d3d3.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>68%</td><td>386</td><td>263</td><td>123</td><td>23</td><td>204</td><td>180</td></tr><tr><td>8a3c5507237957d013a0fe0f082cab7f757af6ee</td><td>mafl</td><td>MAFL</td><td><a href="papers/8a3c5507237957d013a0fe0f082cab7f757af6ee.html" target="_blank">Facial Landmark Detection by Deep Multi-task Learning</a></td><td><a href="https://pdfs.semanticscholar.org/fcd7/1c18192928a2e0b264edd4d919ab2f8f652a.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>407</td><td>262</td><td>145</td><td>18</td><td>252</td><td>153</td></tr><tr><td>8a3c5507237957d013a0fe0f082cab7f757af6ee</td><td>mtfl</td><td>MTFL</td><td><a href="papers/8a3c5507237957d013a0fe0f082cab7f757af6ee.html" target="_blank">Facial Landmark Detection by Deep Multi-task Learning</a></td><td><a href="https://pdfs.semanticscholar.org/fcd7/1c18192928a2e0b264edd4d919ab2f8f652a.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>407</td><td>262</td><td>145</td><td>18</td><td>252</td><td>153</td></tr><tr><td>2485c98aa44131d1a2f7d1355b1e372f2bb148ad</td><td>cas_peal</td><td>CAS-PEAL</td><td><a href="papers/2485c98aa44131d1a2f7d1355b1e372f2bb148ad.html" target="_blank">The CAS-PEAL Large-Scale Chinese Face Database and Baseline Evaluations</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Systems, Man, and Cybernetics - Part A: Systems and Humans</td><td></td><td></td><td></td><td></td><td></td><td>59%</td><td>429</td><td>254</td><td>175</td><td>38</td><td>198</td><td>234</td></tr><tr><td>9055b155cbabdce3b98e16e5ac9c0edf00f9552f</td><td>morph</td><td>MORPH Commercial</td><td><a href="papers/9055b155cbabdce3b98e16e5ac9c0edf00f9552f.html" target="_blank">MORPH: a longitudinal image database of normal adult age-progression</a></td><td><span class="gray">[pdf]</a></td><td>7th International Conference on Automatic Face and Gesture Recognition (FGR06)</td><td>edu</td><td>North Carolina University</td><td>United States</td><td>34.22398690</td><td>-77.87013250</td><td>57%</td><td>437</td><td>251</td><td>185</td><td>22</td><td>228</td><td>203</td></tr><tr><td>9055b155cbabdce3b98e16e5ac9c0edf00f9552f</td><td>morph_nc</td><td>MORPH Non-Commercial</td><td><a href="papers/9055b155cbabdce3b98e16e5ac9c0edf00f9552f.html" target="_blank">MORPH: a longitudinal image database of normal adult age-progression</a></td><td><span class="gray">[pdf]</a></td><td>7th International Conference on Automatic Face and Gesture Recognition (FGR06)</td><td>edu</td><td>North Carolina University</td><td>United States</td><td>34.22398690</td><td>-77.87013250</td><td>57%</td><td>437</td><td>251</td><td>185</td><td>22</td><td>228</td><td>203</td></tr><tr><td>2a75f34663a60ab1b04a0049ed1d14335129e908</td><td>mmi_facial_expression</td><td>MMI Facial Expression Dataset</td><td><a href="papers/2a75f34663a60ab1b04a0049ed1d14335129e908.html" target="_blank">Web-based database for facial expression analysis</a></td><td><span class="gray">[pdf]</a></td><td>2005 IEEE International Conference on Multimedia and Expo</td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>464</td><td>250</td><td>214</td><td>45</td><td>282</td><td>188</td></tr><tr><td>3325860c0c82a93b2eac654f5324dd6a776f609e</td><td>mpii_human_pose</td><td>MPII Human Pose</td><td><a href="papers/3325860c0c82a93b2eac654f5324dd6a776f609e.html" target="_blank">2D Human Pose Estimation: New Benchmark and State of the Art Analysis</a></td><td><span class="gray">[pdf]</a></td><td>2014 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>387</td><td>249</td><td>138</td><td>21</td><td>291</td><td>96</td></tr><tr><td>75da1df4ed319926c544eefe17ec8d720feef8c0</td><td>fddb</td><td>FDDB</td><td><a href="papers/75da1df4ed319926c544eefe17ec8d720feef8c0.html" target="_blank">FDDB: A benchmark for face detection in unconstrained settings</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>63%</td><td>380</td><td>240</td><td>140</td><td>19</td><td>202</td><td>164</td></tr><tr><td>3765df816dc5a061bc261e190acc8bdd9d47bec0</td><td>rafd</td><td>RaFD</td><td><a href="papers/3765df816dc5a061bc261e190acc8bdd9d47bec0.html" target="_blank">Presentation and validation of the Radboud Faces Database</a></td><td><a href="https://pdfs.semanticscholar.org/3765/df816dc5a061bc261e190acc8bdd9d47bec0.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>48%</td><td>487</td><td>234</td><td>253</td><td>39</td><td>342</td><td>144</td></tr><tr><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td><td>stickmen_buffy</td><td>Buffy Stickmen</td><td><a href="papers/6dd0597f8513dc100cd0bc1b493768cde45098a9.html" target="_blank">Learning to parse images of articulated bodies</a></td><td><a href="http://machinelearning.wustl.edu/mlpapers/paper_files/NIPS2006_899.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>369</td><td>225</td><td>143</td><td>32</td><td>237</td><td>131</td></tr><tr><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td><a href="papers/6dd0597f8513dc100cd0bc1b493768cde45098a9.html" target="_blank">Learning to parse images of articulated bodies</a></td><td><a href="http://machinelearning.wustl.edu/mlpapers/paper_files/NIPS2006_899.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>369</td><td>225</td><td>143</td><td>32</td><td>237</td><td>131</td></tr><tr><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td><a href="papers/6dd0597f8513dc100cd0bc1b493768cde45098a9.html" target="_blank">Learning to parse images of articulated bodies</a></td><td><a href="http://machinelearning.wustl.edu/mlpapers/paper_files/NIPS2006_899.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>369</td><td>225</td><td>143</td><td>32</td><td>237</td><td>131</td></tr><tr><td>9361b784e73e9238d5cefbea5ac40d35d1e3103f</td><td>oxford_town_centre</td><td>TownCentre</td><td><a href="papers/9361b784e73e9238d5cefbea5ac40d35d1e3103f.html" target="_blank">Stable multi-target tracking in real-time surveillance video</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011</td><td>edu</td><td>University of Oxford</td><td>United Kingdom</td><td>51.75345380</td><td>-1.25400997</td><td>67%</td><td>328</td><td>221</td><td>107</td><td>13</td><td>186</td><td>140</td></tr><tr><td>95f12d27c3b4914e0668a268360948bce92f7db3</td><td>helen</td><td>Helen</td><td><a href="papers/95f12d27c3b4914e0668a268360948bce92f7db3.html" target="_blank">Interactive Facial Feature Localization</a></td><td><a href="https://pdfs.semanticscholar.org/95f1/2d27c3b4914e0668a268360948bce92f7db3.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>company</td><td>Adobe</td><td>United States</td><td>37.33077030</td><td>-121.89409510</td><td>62%</td><td>352</td><td>219</td><td>133</td><td>23</td><td>212</td><td>146</td></tr><tr><td>044d9a8c61383312cdafbcc44b9d00d650b21c70</td><td>fiw_300</td><td>300-W</td><td><a href="papers/044d9a8c61383312cdafbcc44b9d00d650b21c70.html" target="_blank">300 Faces in-the-Wild Challenge: The First Facial Landmark Localization Challenge</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision Workshops</td><td></td><td></td><td></td><td></td><td></td><td>67%</td><td>323</td><td>217</td><td>106</td><td>25</td><td>208</td><td>120</td></tr><tr><td>13f06b08f371ba8b5d31c3e288b4deb61335b462</td><td>eth_andreas_ess</td><td>ETHZ Pedestrian</td><td><a href="papers/13f06b08f371ba8b5d31c3e288b4deb61335b462.html" target="_blank">Depth and Appearance for Mobile Scene Analysis</a></td><td><span class="gray">[pdf]</a></td><td>2007 IEEE 11th International Conference on Computer Vision</td><td>edu</td><td>ETH Zurich</td><td>Switzerland</td><td>47.37631300</td><td>8.54766990</td><td>62%</td><td>324</td><td>202</td><td>122</td><td>26</td><td>193</td><td>127</td></tr><tr><td>5981e6479c3fd4e31644db35d236bfb84ae46514</td><td>mot</td><td>MOT</td><td><a href="papers/5981e6479c3fd4e31644db35d236bfb84ae46514.html" target="_blank">Learning to associate: HybridBoosted multi-target tracker for crowded scene</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>University of Southern California</td><td>United States</td><td>34.02241490</td><td>-118.28634407</td><td>61%</td><td>326</td><td>200</td><td>125</td><td>22</td><td>190</td><td>137</td></tr><tr><td>2724ba85ec4a66de18da33925e537f3902f21249</td><td>cofw</td><td>COFW</td><td><a href="papers/2724ba85ec4a66de18da33925e537f3902f21249.html" target="_blank">Robust Face Landmark Estimation under Occlusion</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision</td><td>edu</td><td>California Institute of Technology</td><td>United States</td><td>34.13710185</td><td>-118.12527487</td><td>61%</td><td>325</td><td>197</td><td>128</td><td>17</td><td>194</td><td>133</td></tr><tr><td>2acf7e58f0a526b957be2099c10aab693f795973</td><td>bosphorus</td><td>The Bosphorus</td><td><a href="papers/2acf7e58f0a526b957be2099c10aab693f795973.html" target="_blank">Bosphorus Database for 3D Face Analysis</a></td><td><a href="https://pdfs.semanticscholar.org/4254/fbba3846008f50671edc9cf70b99d7304543.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>352</td><td>196</td><td>156</td><td>17</td><td>162</td><td>188</td></tr><tr><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td><td>aflw</td><td>AFLW</td><td><a href="papers/a74251efa970b92925b89eeef50a5e37d9281ad0.html" target="_blank">Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</a></td><td><span class="gray">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td>edu</td><td>TU Graz</td><td>Austria</td><td>47.07071400</td><td>15.43950400</td><td>61%</td><td>318</td><td>194</td><td>124</td><td>33</td><td>211</td><td>107</td></tr><tr><td>44484d2866f222bbb9b6b0870890f9eea1ffb2d0</td><td>cuhk_campus_03</td><td>CUHK03 Campus</td><td><a href="papers/44484d2866f222bbb9b6b0870890f9eea1ffb2d0.html" target="_blank">Human Reidentification with Transferred Metric Learning</a></td><td><a href="http://www.ee.cuhk.edu.hk/~xgwang/papers/liZWaccv12.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>69%</td><td>280</td><td>194</td><td>86</td><td>9</td><td>139</td><td>137</td></tr><tr><td>639937b3a1b8bded3f7e9a40e85bd3770016cf3c</td><td>bfm</td><td>BFM</td><td><a href="papers/639937b3a1b8bded3f7e9a40e85bd3770016cf3c.html" target="_blank">A 3D Face Model for Pose and Illumination Invariant Face Recognition</a></td><td><span class="gray">[pdf]</a></td><td>2009 Sixth IEEE International Conference on Advanced Video and Signal Based Surveillance</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>343</td><td>190</td><td>153</td><td>25</td><td>223</td><td>114</td></tr><tr><td>436f798d1a4e54e5947c1e7d7375c31b2bdb4064</td><td>tud_multiview</td><td>TUD-Multiview</td><td><a href="papers/436f798d1a4e54e5947c1e7d7375c31b2bdb4064.html" target="_blank">Monocular 3D pose estimation and tracking by detection</a></td><td><span class="gray">[pdf]</a></td><td>2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>TU Darmstadt</td><td>Germany</td><td>49.87482770</td><td>8.65632810</td><td>59%</td><td>311</td><td>185</td><td>126</td><td>34</td><td>208</td><td>105</td></tr><tr><td>436f798d1a4e54e5947c1e7d7375c31b2bdb4064</td><td>tud_stadtmitte</td><td>TUD-Stadtmitte</td><td><a href="papers/436f798d1a4e54e5947c1e7d7375c31b2bdb4064.html" target="_blank">Monocular 3D pose estimation and tracking by detection</a></td><td><span class="gray">[pdf]</a></td><td>2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>TU Darmstadt</td><td>Germany</td><td>49.87482770</td><td>8.65632810</td><td>59%</td><td>311</td><td>185</td><td>126</td><td>34</td><td>208</td><td>105</td></tr><tr><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td><td>leeds_sports_pose</td><td>Leeds Sports Pose</td><td><a href="papers/4b1d23d17476fcf78f4cbadf69fb130b1aa627c0.html" target="_blank">Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</a></td><td><a href="http://www.bmva.org/bmvc/2010/conference/paper12/abstract12.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>285</td><td>182</td><td>103</td><td>13</td><td>197</td><td>93</td></tr><tr><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td><td>stickmen_buffy</td><td>Buffy Stickmen</td><td><a href="papers/4b1d23d17476fcf78f4cbadf69fb130b1aa627c0.html" target="_blank">Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</a></td><td><a href="http://www.bmva.org/bmvc/2010/conference/paper12/abstract12.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>285</td><td>182</td><td>103</td><td>13</td><td>197</td><td>93</td></tr><tr><td>010f0f4929e6a6644fb01f0e43820f91d0fad292</td><td>yfcc_100m</td><td>YFCC100M</td><td><a href="papers/010f0f4929e6a6644fb01f0e43820f91d0fad292.html" target="_blank">YFCC100M: the new data in multimedia research</a></td><td><span class="gray">[pdf]</a></td><td>Commun. ACM</td><td>edu</td><td>Carnegie Mellon University</td><td>United States</td><td>40.44416190</td><td>-79.94272826</td><td>64%</td><td>274</td><td>175</td><td>99</td><td>23</td><td>172</td><td>100</td></tr><tr><td>38b55d95189c5e69cf4ab45098a48fba407609b4</td><td>cuhk_campus_03</td><td>CUHK03 Campus</td><td><a href="papers/38b55d95189c5e69cf4ab45098a48fba407609b4.html" target="_blank">Locally Aligned Feature Transforms across Views</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>258</td><td>164</td><td>94</td><td>15</td><td>136</td><td>117</td></tr><tr><td>833fa04463d90aab4a9fe2870d480f0b40df446e</td><td>sun_attributes</td><td>SUN</td><td><a href="papers/833fa04463d90aab4a9fe2870d480f0b40df446e.html" target="_blank">SUN attribute database: Discovering, annotating, and recognizing scene attributes</a></td><td><span class="gray">[pdf]</a></td><td>2012 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>Brown University</td><td>United States</td><td>41.82686820</td><td>-71.40123146</td><td>60%</td><td>264</td><td>158</td><td>106</td><td>27</td><td>206</td><td>56</td></tr><tr><td>2eed184680edcdec8a3b605ad1a3ba8e8f7cc2e9</td><td>graz</td><td>Graz Pedestrian</td><td><a href="papers/2eed184680edcdec8a3b605ad1a3ba8e8f7cc2e9.html" target="_blank">Generic object recognition with boosting</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td>edu</td><td>TU Graz</td><td>Austria</td><td>47.07071400</td><td>15.43950400</td><td>53%</td><td>293</td><td>155</td><td>138</td><td>16</td><td>195</td><td>97</td></tr><tr><td>140c95e53c619eac594d70f6369f518adfea12ef</td><td>ijb_a</td><td>IJB-A</td><td><a href="papers/140c95e53c619eac594d70f6369f518adfea12ef.html" target="_blank">Pushing the frontiers of unconstrained face detection and recognition: IARPA Janus Benchmark A</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>237</td><td>152</td><td>85</td><td>15</td><td>159</td><td>76</td></tr><tr><td>6204776d31359d129a582057c2d788a14f8aadeb</td><td>youtube_celebrities</td><td>YouTube Celebrities</td><td><a href="papers/6204776d31359d129a582057c2d788a14f8aadeb.html" target="_blank">Face tracking and recognition with visual constraints in real-world videos</a></td><td><span class="gray">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>Rutgers University</td><td>United States</td><td>40.47913175</td><td>-74.43168868</td><td>56%</td><td>267</td><td>150</td><td>116</td><td>12</td><td>125</td><td>121</td></tr><tr><td>27a2fad58dd8727e280f97036e0d2bc55ef5424c</td><td>duke_mtmc</td><td>Duke MTMC</td><td><a href="papers/27a2fad58dd8727e280f97036e0d2bc55ef5424c.html" target="_blank">Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</a></td><td><a href="https://arxiv.org/pdf/1609.01775.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Duke University</td><td>United States</td><td>35.99905220</td><td>-78.92906290</td><td>85%</td><td>169</td><td>144</td><td>25</td><td>3</td><td>113</td><td>54</td></tr><tr><td>27a2fad58dd8727e280f97036e0d2bc55ef5424c</td><td>mot</td><td>MOT</td><td><a href="papers/27a2fad58dd8727e280f97036e0d2bc55ef5424c.html" target="_blank">Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</a></td><td><a href="https://arxiv.org/pdf/1609.01775.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Duke University</td><td>United States</td><td>35.99905220</td><td>-78.92906290</td><td>85%</td><td>169</td><td>144</td><td>25</td><td>3</td><td>113</td><td>54</td></tr><tr><td>1be498d4bbc30c3bfd0029114c784bc2114d67c0</td><td>adience</td><td>Adience</td><td><a href="papers/1be498d4bbc30c3bfd0029114c784bc2114d67c0.html" target="_blank">Age and Gender Estimation of Unfiltered Faces</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Information Forensics and Security</td><td>edu</td><td>Open University of Israel</td><td>Israel</td><td>32.77824165</td><td>34.99565673</td><td>80%</td><td>179</td><td>143</td><td>36</td><td>1</td><td>98</td><td>80</td></tr><tr><td>98bb029afe2a1239c3fdab517323066f0957b81b</td><td>ilids_mcts_vid</td><td>iLIDS-VID</td><td><a href="papers/98bb029afe2a1239c3fdab517323066f0957b81b.html" target="_blank">Person Re-identification by Video Ranking</a></td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>68%</td><td>209</td><td>143</td><td>66</td><td>8</td><td>111</td><td>97</td></tr><tr><td>98bb029afe2a1239c3fdab517323066f0957b81b</td><td>sdu_vid</td><td>SDU-VID</td><td><a href="papers/98bb029afe2a1239c3fdab517323066f0957b81b.html" target="_blank">Person Re-identification by Video Ranking</a></td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>68%</td><td>209</td><td>143</td><td>66</td><td>8</td><td>111</td><td>97</td></tr><tr><td>291265db88023e92bb8c8e6390438e5da148e8f5</td><td>msceleb</td><td>MsCeleb</td><td><a href="papers/291265db88023e92bb8c8e6390438e5da148e8f5.html" target="_blank">MS-Celeb-1M: A Dataset and Benchmark for Large-Scale Face Recognition</a></td><td><a href="https://arxiv.org/pdf/1607.08221.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>company</td><td>Microsoft</td><td>United States</td><td>47.64233180</td><td>-122.13693020</td><td>78%</td><td>180</td><td>141</td><td>39</td><td>8</td><td>120</td><td>59</td></tr><tr><td>4c170a0dcc8de75587dae21ca508dab2f9343974</td><td>face_tracer</td><td>FaceTracer</td><td><a href="papers/4c170a0dcc8de75587dae21ca508dab2f9343974.html" target="_blank">FaceTracer: A Search Engine for Large Collections of Images with Faces</a></td><td><a href="https://pdfs.semanticscholar.org/73a8/1d311eedac8dea3ca24dc15b6990fa4a725e.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>225</td><td>140</td><td>85</td><td>17</td><td>146</td><td>77</td></tr><tr><td>e8de844fefd54541b71c9823416daa238be65546</td><td>visual_phrases</td><td>Phrasal Recognition</td><td><a href="papers/e8de844fefd54541b71c9823416daa238be65546.html" target="_blank">Recognition using visual phrases</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011</td><td>edu</td><td>University of Illinois, Urbana-Champaign</td><td>United States</td><td>40.11116745</td><td>-88.22587665</td><td>57%</td><td>246</td><td>140</td><td>106</td><td>18</td><td>170</td><td>68</td></tr><tr><td>7808937b46acad36e43c30ae4e9f3fd57462853d</td><td>bpad</td><td>BPAD</td><td><a href="papers/7808937b46acad36e43c30ae4e9f3fd57462853d.html" target="_blank">Describing people: A poselet-based approach to attribute classification</a></td><td><span class="gray">[pdf]</a></td><td>2011 International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>230</td><td>138</td><td>92</td><td>14</td><td>163</td><td>66</td></tr><tr><td>46a01565e6afe7c074affb752e7069ee3bf2e4ef</td><td>sdu_vid</td><td>SDU-VID</td><td><a href="papers/46a01565e6afe7c074affb752e7069ee3bf2e4ef.html" target="_blank">Local Descriptors Encoded by Fisher Vectors for Person Re-identification</a></td><td><a href="https://pdfs.semanticscholar.org/a105/f1ef67b4b02da38eadce8ffb4e13aa301a93.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>68%</td><td>197</td><td>133</td><td>64</td><td>15</td><td>108</td><td>88</td></tr><tr><td>0c91808994a250d7be332400a534a9291ca3b60e</td><td>graz</td><td>Graz Pedestrian</td><td><a href="papers/0c91808994a250d7be332400a534a9291ca3b60e.html" target="_blank">Weak Hypotheses and Boosting for Generic Object Detection and Recognition</a></td><td><a href="https://pdfs.semanticscholar.org/0c91/808994a250d7be332400a534a9291ca3b60e.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>236</td><td>131</td><td>105</td><td>17</td><td>161</td><td>77</td></tr><tr><td>35b0331dfcd2897abd5749b49ff5e2b8ba0f7a62</td><td>coco_qa</td><td>COCO QA</td><td><a href="papers/35b0331dfcd2897abd5749b49ff5e2b8ba0f7a62.html" target="_blank">Exploring Models and Data for Image Question Answering</a></td><td><a href="https://arxiv.org/pdf/1505.02074.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>206</td><td>126</td><td>80</td><td>11</td><td>162</td><td>39</td></tr><tr><td>570f37ed63142312e6ccdf00ecc376341ec72b9f</td><td>stanford_drone</td><td>Stanford Drone</td><td><a href="papers/570f37ed63142312e6ccdf00ecc376341ec72b9f.html" target="_blank">Social LSTM: Human Trajectory Prediction in Crowded Spaces</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>224</td><td>126</td><td>98</td><td>3</td><td>140</td><td>81</td></tr><tr><td>013909077ad843eb6df7a3e8e290cfd5575999d2</td><td>fiw_300</td><td>300-W</td><td><a href="papers/013909077ad843eb6df7a3e8e290cfd5575999d2.html" target="_blank">A Semi-automatic Methodology for Facial Landmark Annotation</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition Workshops</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>184</td><td>118</td><td>66</td><td>12</td><td>120</td><td>67</td></tr><tr><td>4e4746094bf60ee83e40d8597a6191e463b57f76</td><td>leeds_sports_pose_extended</td><td>Leeds Sports Pose Extended</td><td><a href="papers/4e4746094bf60ee83e40d8597a6191e463b57f76.html" target="_blank">Learning effective human pose estimation from inaccurate annotation</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011</td><td>edu</td><td>University of Leeds</td><td>United Kingdom</td><td>53.80387185</td><td>-1.55245712</td><td>69%</td><td>169</td><td>117</td><td>52</td><td>8</td><td>108</td><td>65</td></tr><tr><td>22ad2c8c0f4d6aa4328b38d894b814ec22579761</td><td>gallagher</td><td>Gallagher</td><td><a href="papers/22ad2c8c0f4d6aa4328b38d894b814ec22579761.html" target="_blank">Clothing cosegmentation for recognizing people</a></td><td><span class="gray">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>Carnegie Mellon University</td><td>United States</td><td>40.44416190</td><td>-79.94272826</td><td>65%</td><td>178</td><td>116</td><td>62</td><td>7</td><td>100</td><td>86</td></tr><tr><td>b1f4423c227fa37b9680787be38857069247a307</td><td>afew_va</td><td>AFEW-VA</td><td><a href="papers/b1f4423c227fa37b9680787be38857069247a307.html" target="_blank">Collecting Large, Richly Annotated Facial-Expression Databases from Movies</a></td><td><span class="gray">[pdf]</a></td><td>IEEE MultiMedia</td><td>edu</td><td>Australian National University</td><td>Australia</td><td>-35.27769990</td><td>149.11852700</td><td>64%</td><td>181</td><td>115</td><td>66</td><td>8</td><td>87</td><td>97</td></tr><tr><td>c0387e788a52f10bf35d4d50659cfa515d89fbec</td><td>mars</td><td>MARS</td><td><a href="papers/c0387e788a52f10bf35d4d50659cfa515d89fbec.html" target="_blank">MARS: A Video Benchmark for Large-Scale Person Re-Identification</a></td><td><a href="https://pdfs.semanticscholar.org/c038/7e788a52f10bf35d4d50659cfa515d89fbec.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>68%</td><td>168</td><td>115</td><td>53</td><td>4</td><td>97</td><td>69</td></tr><tr><td>52d7eb0fbc3522434c13cc247549f74bb9609c5d</td><td>wider_face</td><td>WIDER FACE</td><td><a href="papers/52d7eb0fbc3522434c13cc247549f74bb9609c5d.html" target="_blank">WIDER FACE: A Face Detection Benchmark</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>Chinese University of Hong Kong</td><td>China</td><td>22.41626320</td><td>114.21093180</td><td>65%</td><td>178</td><td>115</td><td>63</td><td>12</td><td>112</td><td>66</td></tr><tr><td>18010284894ed0edcca74e5bf768ee2e15ef7841</td><td>deep_fashion</td><td>DeepFashion</td><td><a href="papers/18010284894ed0edcca74e5bf768ee2e15ef7841.html" target="_blank">DeepFashion: Powering Robust Clothes Recognition and Retrieval with Rich Annotations</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>176</td><td>112</td><td>64</td><td>2</td><td>113</td><td>62</td></tr><tr><td>133f01aec1534604d184d56de866a4bd531dac87</td><td>lfw</td><td>LFW</td><td><a href="papers/133f01aec1534604d184d56de866a4bd531dac87.html" target="_blank">Effective Unconstrained Face Recognition by Combining Multiple Descriptors and Learned Background Statistics</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>183</td><td>109</td><td>74</td><td>13</td><td>103</td><td>77</td></tr><tr><td>0df0d1adea39a5bef318b74faa37de7f3e00b452</td><td>mpii_gaze</td><td>MPIIGaze</td><td><a href="papers/0df0d1adea39a5bef318b74faa37de7f3e00b452.html" target="_blank">Appearance-based gaze estimation in the wild</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>72%</td><td>149</td><td>107</td><td>42</td><td>3</td><td>94</td><td>54</td></tr><tr><td>96e0cfcd81cdeb8282e29ef9ec9962b125f379b0</td><td>megaface</td><td>MegaFace</td><td><a href="papers/96e0cfcd81cdeb8282e29ef9ec9962b125f379b0.html" target="_blank">The MegaFace Benchmark: 1 Million Faces for Recognition at Scale</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>76%</td><td>139</td><td>106</td><td>33</td><td>5</td><td>100</td><td>37</td></tr><tr><td>29a705a5fa76641e0d8963f1fdd67ee4c0d92d3d</td><td>scface</td><td>SCface</td><td><a href="papers/29a705a5fa76641e0d8963f1fdd67ee4c0d92d3d.html" target="_blank">SCface – surveillance cameras face database</a></td><td><a href="http://scface.org/SCface%20-%20Surveillance%20Cameras%20Face%20Database.pdf" target="_blank">[pdf]</a></td><td>Multimedia Tools and Applications</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>179</td><td>103</td><td>76</td><td>15</td><td>88</td><td>89</td></tr><tr><td>56ffa7d906b08d02d6d5a12c7377a57e24ef3391</td><td>unbc_shoulder_pain</td><td>UNBC-McMaster Pain</td><td><a href="papers/56ffa7d906b08d02d6d5a12c7377a57e24ef3391.html" target="_blank">Painful data: The UNBC-McMaster shoulder pain expression archive database</a></td><td><span class="gray">[pdf]</a></td><td>Face and Gesture 2011</td><td>edu</td><td>Carnegie Mellon University</td><td>United States</td><td>40.44416190</td><td>-79.94272826</td><td>54%</td><td>189</td><td>102</td><td>87</td><td>22</td><td>108</td><td>78</td></tr><tr><td>10195a163ab6348eef37213a46f60a3d87f289c5</td><td>imdb_wiki</td><td>IMDB</td><td><a href="papers/10195a163ab6348eef37213a46f60a3d87f289c5.html" target="_blank">Deep Expectation of Real and Apparent Age from a Single Image Without Facial Landmarks</a></td><td><a href="http://www.vision.ee.ethz.ch/en/publications/papers/articles/eth_biwi_01299.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td>edu</td><td>ETH Zurich</td><td>Switzerland</td><td>47.37631300</td><td>8.54766990</td><td>69%</td><td>145</td><td>100</td><td>45</td><td>9</td><td>93</td><td>51</td></tr><tr><td>32c801cb7fbeb742edfd94cccfca4934baec71da</td><td>ucf_crowd</td><td>UCF-CC-50</td><td><a href="papers/32c801cb7fbeb742edfd94cccfca4934baec71da.html" target="_blank">Multi-source Multi-scale Counting in Extremely Dense Crowd Images</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>68%</td><td>148</td><td>100</td><td>48</td><td>5</td><td>80</td><td>65</td></tr><tr><td>1aad2da473888cb7ebc1bfaa15bfa0f1502ce005</td><td>jpl_pose</td><td>JPL-Interaction dataset</td><td><a href="papers/1aad2da473888cb7ebc1bfaa15bfa0f1502ce005.html" target="_blank">First-Person Activity Recognition: What Are They Doing to Me?</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>67%</td><td>148</td><td>99</td><td>49</td><td>7</td><td>105</td><td>43</td></tr><tr><td>3b5b6d19d4733ab606c39c69a889f9e67967f151</td><td>qmul_grid</td><td>GRID</td><td><a href="papers/3b5b6d19d4733ab606c39c69a889f9e67967f151.html" target="_blank">Multi-camera activity correlation analysis</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>Queen Mary University of London</td><td>United Kingdom</td><td>51.52472720</td><td>-0.03931035</td><td>69%</td><td>142</td><td>98</td><td>44</td><td>7</td><td>77</td><td>64</td></tr><tr><td>5a5f0287484f0d480fed1ce585dbf729586f0edc</td><td>disfa</td><td>DISFA</td><td><a href="papers/5a5f0287484f0d480fed1ce585dbf729586f0edc.html" target="_blank">DISFA: A Spontaneous Facial Action Intensity Database</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Affective Computing</td><td>edu</td><td>University of Denver</td><td>United States</td><td>39.67665410</td><td>-104.96220300</td><td>52%</td><td>184</td><td>95</td><td>89</td><td>19</td><td>96</td><td>89</td></tr><tr><td>4f93cd09785c6e77bf4bc5a788e079df524c8d21</td><td>soton</td><td>SOTON HiD</td><td><a href="papers/4f93cd09785c6e77bf4bc5a788e079df524c8d21.html" target="_blank">On a Large Sequence-Based Human Gait Database</a></td><td><a href="https://pdfs.semanticscholar.org/4f93/cd09785c6e77bf4bc5a788e079df524c8d21.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>63%</td><td>150</td><td>95</td><td>55</td><td>17</td><td>103</td><td>51</td></tr><tr><td>8b56e33f33e582f3e473dba573a16b598ed9bcdc</td><td>fei</td><td>FEI</td><td><a href="papers/8b56e33f33e582f3e473dba573a16b598ed9bcdc.html" target="_blank">A new ranking method for principal components analysis and its application to face image analysis</a></td><td><span class="gray">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>169</td><td>93</td><td>76</td><td>6</td><td>69</td><td>102</td></tr><tr><td>8355d095d3534ef511a9af68a3b2893339e3f96b</td><td>imdb_wiki</td><td>IMDB</td><td><a href="papers/8355d095d3534ef511a9af68a3b2893339e3f96b.html" target="_blank">DEX: Deep EXpectation of Apparent Age from a Single Image</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision Workshop (ICCVW)</td><td></td><td></td><td></td><td></td><td></td><td>75%</td><td>122</td><td>92</td><td>30</td><td>4</td><td>75</td><td>48</td></tr><tr><td>066000d44d6691d27202896691f08b27117918b9</td><td>psu</td><td>PSU</td><td><a href="papers/066000d44d6691d27202896691f08b27117918b9.html" target="_blank">Vision-Based Analysis of Small Groups in Pedestrian Crowds</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>168</td><td>90</td><td>78</td><td>10</td><td>85</td><td>79</td></tr><tr><td>0d3bb75852098b25d90f31d2f48fd0cb4944702b</td><td>face_scrub</td><td>FaceScrub</td><td><a href="papers/0d3bb75852098b25d90f31d2f48fd0cb4944702b.html" target="_blank">A data-driven approach to cleaning large face datasets</a></td><td><span class="gray">[pdf]</a></td><td>2014 IEEE International Conference on Image Processing (ICIP)</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>138</td><td>89</td><td>49</td><td>1</td><td>95</td><td>41</td></tr><tr><td>0486214fb58ee9a04edfe7d6a74c6d0f661a7668</td><td>chokepoint</td><td>ChokePoint</td><td><a href="papers/0486214fb58ee9a04edfe7d6a74c6d0f661a7668.html" target="_blank">Patch-based probabilistic image quality assessment for face selection and improved video-based face recognition</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>138</td><td>86</td><td>52</td><td>6</td><td>76</td><td>63</td></tr><tr><td>2d3482dcff69c7417c7b933f22de606a0e8e42d4</td><td>lfw</td><td>LFW</td><td><a href="papers/2d3482dcff69c7417c7b933f22de606a0e8e42d4.html" target="_blank">Labeled Faces in the Wild : Updates and New Reporting Procedures</a></td><td><a href="https://pdfs.semanticscholar.org/2d34/82dcff69c7417c7b933f22de606a0e8e42d4.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>edu</td><td>University of Massachusetts</td><td>United States</td><td>42.38897850</td><td>-72.52869870</td><td>68%</td><td>123</td><td>84</td><td>39</td><td>3</td><td>71</td><td>51</td></tr><tr><td>2e8d0f1802e50cccfd3c0aabac0d0beab3a7846e</td><td>3dpes</td><td>3DPeS</td><td><a href="papers/2e8d0f1802e50cccfd3c0aabac0d0beab3a7846e.html" target="_blank">3DPeS: 3D people dataset for surveillance and forensics</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>133</td><td>82</td><td>51</td><td>9</td><td>73</td><td>58</td></tr><tr><td>5a4df9bef1872865f0b619ac3aacc97f49e4a035</td><td>cuhk_train_station</td><td>CUHK Train Station Dataset</td><td><a href="papers/5a4df9bef1872865f0b619ac3aacc97f49e4a035.html" target="_blank">Understanding collective crowd behaviors: Learning a Mixture model of Dynamic pedestrian-Agents</a></td><td><span class="gray">[pdf]</a></td><td>2012 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>Chinese University of Hong Kong</td><td>China</td><td>22.41626320</td><td>114.21093180</td><td>58%</td><td>141</td><td>82</td><td>59</td><td>5</td><td>60</td><td>75</td></tr><tr><td>e4754afaa15b1b53e70743880484b8d0736990ff</td><td>fiw_300</td><td>300-W</td><td><a href="papers/e4754afaa15b1b53e70743880484b8d0736990ff.html" target="_blank">300 Faces In-The-Wild Challenge: database and results</a></td><td><a href="http://ibug.doc.ic.ac.uk/media/uploads/documents/1-s2.0-s0262885616000147-main.pdf" target="_blank">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>129</td><td>77</td><td>52</td><td>9</td><td>74</td><td>55</td></tr><tr><td>b91f54e1581fbbf60392364323d00a0cd43e493c</td><td>bp4d_spontanous</td><td>BP4D-Spontanous</td><td><a href="papers/b91f54e1581fbbf60392364323d00a0cd43e493c.html" target="_blank">A high-resolution spontaneous 3D dynamic facial expression database</a></td><td><span class="gray">[pdf]</a></td><td>2013 10th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)</td><td>edu</td><td>SUNY Binghamton</td><td>United States</td><td>42.08779975</td><td>-75.97066066</td><td>49%</td><td>154</td><td>76</td><td>78</td><td>7</td><td>80</td><td>75</td></tr><tr><td>7f23a4bb0c777dd72cca7665a5f370ac7980217e</td><td>duke_mtmc</td><td>Duke MTMC</td><td><a href="papers/7f23a4bb0c777dd72cca7665a5f370ac7980217e.html" target="_blank">Improving Person Re-identification by Attribute and Identity Learning</a></td><td><a href="https://arxiv.org/pdf/1703.07220.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>84%</td><td>87</td><td>73</td><td>14</td><td>0</td><td>43</td><td>42</td></tr><tr><td>66e6f08873325d37e0ec20a4769ce881e04e964e</td><td>sun_attributes</td><td>SUN</td><td><a href="papers/66e6f08873325d37e0ec20a4769ce881e04e964e.html" target="_blank">The SUN Attribute Database: Beyond Categories for Deeper Scene Understanding</a></td><td><a href="http://www.cc.gatech.edu/~hays/papers/attribute_ijcv.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>116</td><td>70</td><td>46</td><td>14</td><td>84</td><td>31</td></tr><tr><td>7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22</td><td>lfw</td><td>LFW</td><td><a href="papers/7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22.html" target="_blank">Labeled Faces in the Wild: A Survey</a></td><td><a href="https://pdfs.semanticscholar.org/7de6/e81d775e9cd7becbfd1bd685f4e2a5eebb22.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Stevens Institute of Technology</td><td>United States</td><td>40.74225200</td><td>-74.02709490</td><td>62%</td><td>109</td><td>68</td><td>41</td><td>8</td><td>66</td><td>43</td></tr><tr><td>70c59dc3470ae867016f6ab0e008ac8ba03774a1</td><td>vgg_faces2</td><td>VGG Face2</td><td><a href="papers/70c59dc3470ae867016f6ab0e008ac8ba03774a1.html" target="_blank">VGGFace2: A Dataset for Recognising Faces across Pose and Age</a></td><td><a href="https://arxiv.org/pdf/1710.08092.pdf" target="_blank">[pdf]</a></td><td>2018 13th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2018)</td><td></td><td></td><td></td><td></td><td></td><td>80%</td><td>83</td><td>66</td><td>17</td><td>3</td><td>61</td><td>20</td></tr><tr><td>2a4bbee0b4cf52d5aadbbc662164f7efba89566c</td><td>peta</td><td>PETA</td><td><a href="papers/2a4bbee0b4cf52d5aadbbc662164f7efba89566c.html" target="_blank">Pedestrian Attribute Recognition At Far Distance</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>74%</td><td>88</td><td>65</td><td>23</td><td>1</td><td>50</td><td>36</td></tr><tr><td>3394168ff0719b03ff65bcea35336a76b21fe5e4</td><td>penn_fudan</td><td>Penn Fudan</td><td><a href="papers/3394168ff0719b03ff65bcea35336a76b21fe5e4.html" target="_blank">Object Detection Combining Recognition and Segmentation</a></td><td><a href="https://pdfs.semanticscholar.org/3394/168ff0719b03ff65bcea35336a76b21fe5e4.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>105</td><td>64</td><td>41</td><td>9</td><td>58</td><td>43</td></tr><tr><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td><td>mafl</td><td>MAFL</td><td><a href="papers/a0fd85b3400c7b3e11122f44dc5870ae2de9009a.html" target="_blank">Learning Deep Representation for Face Alignment with Auxiliary Attributes</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>108</td><td>63</td><td>45</td><td>11</td><td>66</td><td>44</td></tr><tr><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td><td>mtfl</td><td>MTFL</td><td><a href="papers/a0fd85b3400c7b3e11122f44dc5870ae2de9009a.html" target="_blank">Learning Deep Representation for Face Alignment with Auxiliary Attributes</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>108</td><td>63</td><td>45</td><td>11</td><td>66</td><td>44</td></tr><tr><td>3b4ec8af470948a72a6ed37a9fd226719a874ebc</td><td>sdu_vid</td><td>SDU-VID</td><td><a href="papers/3b4ec8af470948a72a6ed37a9fd226719a874ebc.html" target="_blank">A Spatio-Temporal Appearance Representation for Video-Based Pedestrian Re-Identification</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>66%</td><td>95</td><td>63</td><td>32</td><td>6</td><td>50</td><td>45</td></tr><tr><td>04c2cda00e5536f4b1508cbd80041e9552880e67</td><td>hipsterwars</td><td>Hipsterwars</td><td><a href="papers/04c2cda00e5536f4b1508cbd80041e9552880e67.html" target="_blank">Hipster Wars: Discovering Elements of Fashion Styles</a></td><td><a href="https://pdfs.semanticscholar.org/04c2/cda00e5536f4b1508cbd80041e9552880e67.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>95</td><td>61</td><td>34</td><td>4</td><td>59</td><td>35</td></tr><tr><td>06f02199690961ba52997cde1527e714d2b3bf8f</td><td>columbia_gaze</td><td>Columbia Gaze</td><td><a href="papers/06f02199690961ba52997cde1527e714d2b3bf8f.html" target="_blank">Gaze locking: passive eye contact detection for human-object interaction</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Columbia University</td><td>United States</td><td>40.84198360</td><td>-73.94368971</td><td>76%</td><td>79</td><td>60</td><td>19</td><td>0</td><td>49</td><td>34</td></tr><tr><td>4df3143922bcdf7db78eb91e6b5359d6ada004d2</td><td>cfd</td><td>CFD</td><td><a href="papers/4df3143922bcdf7db78eb91e6b5359d6ada004d2.html" target="_blank">The Chicago face database: A free stimulus set of faces and norming data.</a></td><td><a href="https://pdfs.semanticscholar.org/4df3/143922bcdf7db78eb91e6b5359d6ada004d2.pdf" target="_blank">[pdf]</a></td><td>Behavior research methods</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>99</td><td>59</td><td>40</td><td>1</td><td>73</td><td>21</td></tr><tr><td>0c4a139bb87c6743c7905b29a3cfec27a5130652</td><td>feret</td><td>FERET</td><td><a href="papers/0c4a139bb87c6743c7905b29a3cfec27a5130652.html" target="_blank">The FERET Verification Testing Protocol for Face Recognition Algorithms</a></td><td><a href="https://pdfs.semanticscholar.org/8d2a/1c768fce6f71584dd993fb97e7b6419aaf60.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>edu</td><td>City University of New York</td><td>United States</td><td>40.87228250</td><td>-73.89489171</td><td>51%</td><td>115</td><td>59</td><td>56</td><td>8</td><td>75</td><td>37</td></tr><tr><td>08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7</td><td>kin_face</td><td>UB KinFace</td><td><a href="papers/08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7.html" target="_blank">Understanding Kin Relationships in a Photo</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Multimedia</td><td></td><td></td><td></td><td></td><td></td><td>63%</td><td>94</td><td>59</td><td>35</td><td>1</td><td>33</td><td>61</td></tr><tr><td>2ce2560cf59db59ce313bbeb004e8ce55c5ce928</td><td>texas_3dfrd</td><td>Texas 3DFRD</td><td><a href="papers/2ce2560cf59db59ce313bbeb004e8ce55c5ce928.html" target="_blank">Anthropometric 3D Face Recognition</a></td><td><a href="http://live.ece.utexas.edu/publications/2010/sg_ijcv_june10.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>63%</td><td>91</td><td>57</td><td>34</td><td>5</td><td>60</td><td>31</td></tr><tr><td>5194cbd51f9769ab25260446b4fa17204752e799</td><td>violent_flows</td><td>Violent Flows</td><td><a href="papers/5194cbd51f9769ab25260446b4fa17204752e799.html" target="_blank">Violent flows: Real-time detection of violent crowd behavior</a></td><td><span class="gray">[pdf]</a></td><td>2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops</td><td>edu</td><td>Open University of Israel</td><td>Israel</td><td>32.77824165</td><td>34.99565673</td><td>65%</td><td>88</td><td>57</td><td>31</td><td>6</td><td>45</td><td>44</td></tr><tr><td>3cd40bfa1ff193a96bde0207e5140a399476466c</td><td>tvhi</td><td>TVHI</td><td><a href="papers/3cd40bfa1ff193a96bde0207e5140a399476466c.html" target="_blank">High Five: Recognising human interactions in TV shows</a></td><td><a href="https://pdfs.semanticscholar.org/3cd4/0bfa1ff193a96bde0207e5140a399476466c.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>98</td><td>56</td><td>42</td><td>10</td><td>66</td><td>28</td></tr><tr><td>2160788824c4c29ffe213b2cbeb3f52972d73f37</td><td>3d_rma</td><td>3D-RMA</td><td><a href="papers/2160788824c4c29ffe213b2cbeb3f52972d73f37.html" target="_blank">Automatic 3D face authentication</a></td><td><a href="http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.31.9190&rep=rep1&type=pdf" target="_blank">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>100</td><td>54</td><td>46</td><td>8</td><td>63</td><td>36</td></tr><tr><td>2edb87494278ad11641b6cf7a3f8996de12b8e14</td><td>qmul_grid</td><td>GRID</td><td><a href="papers/2edb87494278ad11641b6cf7a3f8996de12b8e14.html" target="_blank">Time-Delayed Correlation Analysis for Multi-Camera Activity Understanding</a></td><td><a href="http://www.eecs.qmul.ac.uk/~ccloy/files/ijcv_2010.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td>edu</td><td>Queen Mary University of London</td><td>United Kingdom</td><td>51.52472720</td><td>-0.03931035</td><td>63%</td><td>84</td><td>53</td><td>31</td><td>4</td><td>51</td><td>33</td></tr><tr><td>0b84f07af44f964817675ad961def8a51406dd2e</td><td>prw</td><td>PRW</td><td><a href="papers/0b84f07af44f964817675ad961def8a51406dd2e.html" target="_blank">Person Re-identification in the Wild</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>68%</td><td>77</td><td>52</td><td>25</td><td>1</td><td>47</td><td>27</td></tr><tr><td>0b440695c822a8e35184fb2f60dcdaa8a6de84ae</td><td>kinectface</td><td>KinectFaceDB</td><td><a href="papers/0b440695c822a8e35184fb2f60dcdaa8a6de84ae.html" target="_blank">KinectFaceDB: A Kinect Database for Face Recognition</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Systems, Man, and Cybernetics: Systems</td><td>edu</td><td>University of North Carolina at Chapel Hill</td><td>United States</td><td>35.91139710</td><td>-79.05045290</td><td>62%</td><td>82</td><td>51</td><td>31</td><td>6</td><td>28</td><td>52</td></tr><tr><td>ae0aee03d946efffdc7af2362a42d3750e7dd48a</td><td>put_face</td><td>Put Face</td><td><a href="papers/ae0aee03d946efffdc7af2362a42d3750e7dd48a.html" target="_blank">The put face database</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>51%</td><td>99</td><td>50</td><td>49</td><td>7</td><td>55</td><td>48</td></tr><tr><td>0a85bdff552615643dd74646ac881862a7c7072d</td><td>pipa</td><td>PIPA</td><td><a href="papers/0a85bdff552615643dd74646ac881862a7c7072d.html" target="_blank">Beyond frontal faces: Improving Person Recognition using multiple cues</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>company</td><td>Facebook</td><td>United States</td><td>37.39367170</td><td>-122.08072620</td><td>91%</td><td>54</td><td>49</td><td>4</td><td>1</td><td>41</td><td>12</td></tr><tr><td>0dc11a37cadda92886c56a6fb5191ded62099c28</td><td>stickmen_family</td><td>We Are Family Stickmen</td><td><a href="papers/0dc11a37cadda92886c56a6fb5191ded62099c28.html" target="_blank">We Are Family: Joint Pose Estimation of Multiple Persons</a></td><td><a href="http://eprints.pascal-network.org/archive/00007964/01/eichner10eccv.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>63%</td><td>78</td><td>49</td><td>29</td><td>6</td><td>54</td><td>23</td></tr><tr><td>c900e0ad4c95948baaf0acd8449fde26f9b4952a</td><td>emotio_net</td><td>EmotioNet Database</td><td><a href="papers/c900e0ad4c95948baaf0acd8449fde26f9b4952a.html" target="_blank">EmotioNet: An Accurate, Real-Time Algorithm for the Automatic Annotation of a Million Facial Expressions in the Wild</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>86</td><td>45</td><td>41</td><td>7</td><td>54</td><td>29</td></tr><tr><td>f1af714b92372c8e606485a3982eab2f16772ad8</td><td>mug_faces</td><td>MUG Faces</td><td><a href="papers/f1af714b92372c8e606485a3982eab2f16772ad8.html" target="_blank">The MUG facial expression database</a></td><td><span class="gray">[pdf]</a></td><td>11th International Workshop on Image Analysis for Multimedia Interactive Services WIAMIS 10</td><td>edu</td><td>Aristotle University of Thessaloniki</td><td>Greece</td><td>40.62984145</td><td>22.95889350</td><td>55%</td><td>82</td><td>45</td><td>37</td><td>4</td><td>34</td><td>47</td></tr><tr><td>636b8ffc09b1b23ff714ac8350bb35635e49fa3c</td><td>caltech_10k_web_faces</td><td>Caltech 10K Web Faces</td><td><a href="papers/636b8ffc09b1b23ff714ac8350bb35635e49fa3c.html" target="_blank">Pruning training sets for learning of object categories</a></td><td><span class="gray">[pdf]</a></td><td>2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)</td><td></td><td></td><td></td><td></td><td></td><td>70%</td><td>63</td><td>44</td><td>19</td><td>4</td><td>42</td><td>20</td></tr><tr><td>2bf8541199728262f78d4dced6fb91479b39b738</td><td>clothing_co_parsing</td><td>CCP</td><td><a href="papers/2bf8541199728262f78d4dced6fb91479b39b738.html" target="_blank">Clothing Co-parsing by Joint Image Segmentation and Labeling</a></td><td><span class="gray">[pdf]</a></td><td>2014 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>70%</td><td>60</td><td>42</td><td>18</td><td>0</td><td>34</td><td>28</td></tr><tr><td>4793f11fbca4a7dba898b9fff68f70d868e2497c</td><td>kin_face</td><td>UB KinFace</td><td><a href="papers/4793f11fbca4a7dba898b9fff68f70d868e2497c.html" target="_blank">Kinship Verification through Transfer Learning</a></td><td><a href="https://pdfs.semanticscholar.org/4793/f11fbca4a7dba898b9fff68f70d868e2497c.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>71</td><td>41</td><td>30</td><td>2</td><td>29</td><td>42</td></tr><tr><td>f41c7bb02fc97d5fb9cadd7a49c3e558a1c58a44</td><td>pa_100k</td><td>PA-100K</td><td><a href="papers/f41c7bb02fc97d5fb9cadd7a49c3e558a1c58a44.html" target="_blank">HydraPlus-Net: Attentive Deep Features for Pedestrian Analysis</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>75%</td><td>55</td><td>41</td><td>14</td><td>0</td><td>36</td><td>17</td></tr><tr><td>6618cff7f2ed440a0d2fa9e74ad5469df5cdbe4c</td><td>afad</td><td>AFAD</td><td><a href="papers/6618cff7f2ed440a0d2fa9e74ad5469df5cdbe4c.html" target="_blank">Ordinal Regression with Multiple Output CNN for Age Estimation</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>51%</td><td>78</td><td>40</td><td>38</td><td>8</td><td>44</td><td>31</td></tr><tr><td>4d58f886f5150b2d5e48fd1b5a49e09799bf895d</td><td>texas_3dfrd</td><td>Texas 3DFRD</td><td><a href="papers/4d58f886f5150b2d5e48fd1b5a49e09799bf895d.html" target="_blank">Texas 3D Face Recognition Database</a></td><td><span class="gray">[pdf]</a></td><td>2010 IEEE Southwest Symposium on Image Analysis & Interpretation (SSIAI)</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>66</td><td>40</td><td>26</td><td>3</td><td>40</td><td>27</td></tr><tr><td>31de9b3dd6106ce6eec9a35991b2b9083395fd0b</td><td>feret</td><td>FERET</td><td><a href="papers/31de9b3dd6106ce6eec9a35991b2b9083395fd0b.html" target="_blank">FERET ( Face Recognition Technology ) Recognition Algorithm Development and Test Results</a></td><td><a href="https://pdfs.semanticscholar.org/31de/9b3dd6106ce6eec9a35991b2b9083395fd0b.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>75</td><td>39</td><td>36</td><td>5</td><td>54</td><td>20</td></tr><tr><td>faf40ce28857aedf183e193486f5b4b0a8c478a2</td><td>iit_dehli_ear</td><td>IIT Dehli Ear</td><td><a href="papers/faf40ce28857aedf183e193486f5b4b0a8c478a2.html" target="_blank">Automated Human Identification Using Ear Imaging</a></td><td><a href="https://pdfs.semanticscholar.org/faf4/0ce28857aedf183e193486f5b4b0a8c478a2.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>49%</td><td>80</td><td>39</td><td>41</td><td>6</td><td>35</td><td>44</td></tr><tr><td>47aeb3b82f54b5ae8142b4bdda7b614433e69b9a</td><td>am_fed</td><td>AM-FED</td><td><a href="papers/47aeb3b82f54b5ae8142b4bdda7b614433e69b9a.html" target="_blank">Affectiva-MIT Facial Expression Dataset (AM-FED): Naturalistic and Spontaneous Facial Expressions Collected "In-the-Wild"</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition Workshops</td><td></td><td></td><td></td><td></td><td></td><td>46%</td><td>83</td><td>38</td><td>45</td><td>6</td><td>43</td><td>39</td></tr><tr><td>22646e00a7ba34d1b5fbe3b1efcd91a1e1be3c2b</td><td>saivt</td><td>SAIVT SoftBio</td><td><a href="papers/22646e00a7ba34d1b5fbe3b1efcd91a1e1be3c2b.html" target="_blank">A Database for Person Re-Identification in Multi-Camera Surveillance Networks</a></td><td><span class="gray">[pdf]</a></td><td>2012 International Conference on Digital Image Computing Techniques and Applications (DICTA)</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>65</td><td>38</td><td>27</td><td>6</td><td>45</td><td>20</td></tr><tr><td>79828e6e9f137a583082b8b5a9dfce0c301989b8</td><td>mapillary</td><td>Mapillary</td><td><a href="papers/79828e6e9f137a583082b8b5a9dfce0c301989b8.html" target="_blank">The Mapillary Vistas Dataset for Semantic Understanding of Street Scenes</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>61</td><td>37</td><td>24</td><td>0</td><td>43</td><td>16</td></tr><tr><td>3dc3f0b64ef80f573e3a5f96e456e52ee980b877</td><td>georgia_tech_face_database</td><td>Georgia Tech Face</td><td><a href="papers/3dc3f0b64ef80f573e3a5f96e456e52ee980b877.html" target="_blank">Maximum Likelihood Training of the Embedded HMM for Face Detection and Recognition</a></td><td><a href="http://www.researchgate.net/profile/Monson_Hayes/publication/221124512_Maximum_Likelihood_Training_of_the_Embedded_HMM_for_Face_Detection_and_Recognition/links/0deec53509be9d6f55000000.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>67</td><td>36</td><td>31</td><td>4</td><td>29</td><td>28</td></tr><tr><td>6f3c76b7c0bd8e1d122c6ea808a271fd4749c951</td><td>ward</td><td>WARD</td><td><a href="papers/6f3c76b7c0bd8e1d122c6ea808a271fd4749c951.html" target="_blank">Re-identify people in wide area camera network</a></td><td><span class="gray">[pdf]</a></td><td>2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops</td><td>edu</td><td>University of Udine</td><td>Italy</td><td>46.08107230</td><td>13.21194740</td><td>60%</td><td>60</td><td>36</td><td>24</td><td>1</td><td>38</td><td>21</td></tr><tr><td>fcc6fe6007c322641796cb8792718641856a22a7</td><td>miw</td><td>MIW</td><td><a href="papers/fcc6fe6007c322641796cb8792718641856a22a7.html" target="_blank">Automatic facial makeup detection with application in face recognition</a></td><td><span class="gray">[pdf]</a></td><td>2013 International Conference on Biometrics (ICB)</td><td>edu</td><td>West Virginia University</td><td>United States</td><td>39.65404635</td><td>-79.96475355</td><td>71%</td><td>49</td><td>35</td><td>14</td><td>1</td><td>19</td><td>29</td></tr><tr><td>09d78009687bec46e70efcf39d4612822e61cb8c</td><td>raid</td><td>RAiD</td><td><a href="papers/09d78009687bec46e70efcf39d4612822e61cb8c.html" target="_blank">Consistent Re-identification in a Camera Network</a></td><td><a href="https://pdfs.semanticscholar.org/c27f/099e6e7e3f7f9979cbe9e0a5175fc5848ea0.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>71%</td><td>49</td><td>35</td><td>14</td><td>3</td><td>34</td><td>13</td></tr><tr><td>fcc6fe6007c322641796cb8792718641856a22a7</td><td>youtube_makeup</td><td>YMU</td><td><a href="papers/fcc6fe6007c322641796cb8792718641856a22a7.html" target="_blank">Automatic facial makeup detection with application in face recognition</a></td><td><span class="gray">[pdf]</a></td><td>2013 International Conference on Biometrics (ICB)</td><td>edu</td><td>West Virginia University</td><td>United States</td><td>39.65404635</td><td>-79.96475355</td><td>71%</td><td>49</td><td>35</td><td>14</td><td>1</td><td>19</td><td>29</td></tr><tr><td>51eba481dac6b229a7490f650dff7b17ce05df73</td><td>imsitu</td><td>imSitu</td><td><a href="papers/51eba481dac6b229a7490f650dff7b17ce05df73.html" target="_blank">Situation Recognition: Visual Semantic Role Labeling for Image Understanding</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>65%</td><td>52</td><td>34</td><td>18</td><td>1</td><td>46</td><td>6</td></tr><tr><td>8be57cdad86fdf8c8290df4ca3149592f3c46dd3</td><td>m2vts</td><td>m2vts</td><td><a href="papers/8be57cdad86fdf8c8290df4ca3149592f3c46dd3.html" target="_blank">The M2VTS Multimodal Face Database (Release 1.00)</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>45%</td><td>73</td><td>33</td><td>40</td><td>2</td><td>39</td><td>33</td></tr><tr><td>31b05f65405534a696a847dd19c621b7b8588263</td><td>umd_faces</td><td>UMD</td><td><a href="papers/31b05f65405534a696a847dd19c621b7b8588263.html" target="_blank">UMDFaces: An annotated face dataset for training deep networks</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE International Joint Conference on Biometrics (IJCB)</td><td>edu</td><td>University of Maryland</td><td>United States</td><td>39.28996850</td><td>-76.62196103</td><td>79%</td><td>42</td><td>33</td><td>9</td><td>2</td><td>30</td><td>11</td></tr><tr><td>37d6f0eb074d207b53885bd2eb78ccc8a04be597</td><td>vmu</td><td>VMU</td><td><a href="papers/37d6f0eb074d207b53885bd2eb78ccc8a04be597.html" target="_blank">Can facial cosmetics affect the matching accuracy of face recognition systems?</a></td><td><span class="gray">[pdf]</a></td><td>2012 IEEE Fifth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td>edu</td><td>West Virginia University</td><td>United States</td><td>39.65404635</td><td>-79.96475355</td><td>62%</td><td>53</td><td>33</td><td>20</td><td>0</td><td>19</td><td>31</td></tr><tr><td>37d6f0eb074d207b53885bd2eb78ccc8a04be597</td><td>youtube_makeup</td><td>YMU</td><td><a href="papers/37d6f0eb074d207b53885bd2eb78ccc8a04be597.html" target="_blank">Can facial cosmetics affect the matching accuracy of face recognition systems?</a></td><td><span class="gray">[pdf]</a></td><td>2012 IEEE Fifth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td>edu</td><td>West Virginia University</td><td>United States</td><td>39.65404635</td><td>-79.96475355</td><td>62%</td><td>53</td><td>33</td><td>20</td><td>0</td><td>19</td><td>31</td></tr><tr><td>9c23859ec7313f2e756a3e85575735e0c52249f4</td><td>facebook_100</td><td>Facebook100</td><td><a href="papers/9c23859ec7313f2e756a3e85575735e0c52249f4.html" target="_blank">Scaling up biologically-inspired computer vision: A case study in unconstrained face recognition on facebook</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td>edu</td><td>Harvard University</td><td>United States</td><td>42.36782045</td><td>-71.12666653</td><td>62%</td><td>52</td><td>32</td><td>20</td><td>3</td><td>38</td><td>13</td></tr><tr><td>2161f6b7ee3c0acc81603b01dc0df689683577b9</td><td>large_scale_person_search</td><td>Large Scale Person Search</td><td><a href="papers/2161f6b7ee3c0acc81603b01dc0df689683577b9.html" target="_blank">End-to-End Deep Learning for Person Search</a></td><td><a href="https://pdfs.semanticscholar.org/2161/f6b7ee3c0acc81603b01dc0df689683577b9.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>70%</td><td>46</td><td>32</td><td>14</td><td>0</td><td>27</td><td>16</td></tr><tr><td>9c23859ec7313f2e756a3e85575735e0c52249f4</td><td>pubfig_83</td><td>pubfig83</td><td><a href="papers/9c23859ec7313f2e756a3e85575735e0c52249f4.html" target="_blank">Scaling up biologically-inspired computer vision: A case study in unconstrained face recognition on facebook</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td>edu</td><td>Harvard University</td><td>United States</td><td>42.36782045</td><td>-71.12666653</td><td>62%</td><td>52</td><td>32</td><td>20</td><td>3</td><td>38</td><td>13</td></tr><tr><td>18858cc936947fc96b5c06bbe3c6c2faa5614540</td><td>pilot_parliament</td><td>PPB</td><td><a href="papers/18858cc936947fc96b5c06bbe3c6c2faa5614540.html" target="_blank">Gender Shades: Intersectional Accuracy Disparities in Commercial Gender Classification</a></td><td><a href="http://proceedings.mlr.press/v81/buolamwini18a/buolamwini18a-supp.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>53%</td><td>59</td><td>31</td><td>28</td><td>0</td><td>47</td><td>10</td></tr><tr><td>16e8b0a1e8451d5f697b94c0c2b32a00abee1d52</td><td>umb</td><td>UMB</td><td><a href="papers/16e8b0a1e8451d5f697b94c0c2b32a00abee1d52.html" target="_blank">UMB-DB: A database of partially occluded 3D faces</a></td><td><span class="gray">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td></td><td>66%</td><td>47</td><td>31</td><td>16</td><td>2</td><td>22</td><td>24</td></tr><tr><td>356b431d4f7a2a0a38cf971c84568207dcdbf189</td><td>wider</td><td>WIDER</td><td><a href="papers/356b431d4f7a2a0a38cf971c84568207dcdbf189.html" target="_blank">Recognize complex events from static images by fusing deep channels</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>68%</td><td>44</td><td>30</td><td>14</td><td>1</td><td>29</td><td>15</td></tr><tr><td>8b2dd5c61b23ead5ae5508bb8ce808b5ea266730</td><td>10k_US_adult_faces</td><td>10K US Adult Faces</td><td><a href="papers/8b2dd5c61b23ead5ae5508bb8ce808b5ea266730.html" target="_blank">The intrinsic memorability of face photographs.</a></td><td><a href="https://pdfs.semanticscholar.org/8b2d/d5c61b23ead5ae5508bb8ce808b5ea266730.pdf" target="_blank">[pdf]</a></td><td>Journal of experimental psychology. General</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>52</td><td>29</td><td>23</td><td>2</td><td>36</td><td>14</td></tr><tr><td>47662d1a368daf70ba70ef2d59eb6209f98b675d</td><td>fia</td><td>CMU FiA</td><td><a href="papers/47662d1a368daf70ba70ef2d59eb6209f98b675d.html" target="_blank">The CMU Face In Action (FIA) Database</a></td><td><a href="https://pdfs.semanticscholar.org/4766/2d1a368daf70ba70ef2d59eb6209f98b675d.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>54</td><td>29</td><td>25</td><td>5</td><td>40</td><td>16</td></tr><tr><td>28d4e027c7e90b51b7d8908fce68128d1964668a</td><td>megaface</td><td>MegaFace</td><td><a href="papers/28d4e027c7e90b51b7d8908fce68128d1964668a.html" target="_blank">Level Playing Field for Million Scale Face Recognition</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>University of Washington</td><td>United States</td><td>47.65432380</td><td>-122.30800894</td><td>72%</td><td>39</td><td>28</td><td>11</td><td>2</td><td>29</td><td>9</td></tr><tr><td>070de852bc6eb275d7ca3a9cdde8f6be8795d1a3</td><td>d3dfacs</td><td>D3DFACS</td><td><a href="papers/070de852bc6eb275d7ca3a9cdde8f6be8795d1a3.html" target="_blank">A FACS valid 3D dynamic action unit database with applications to 3D dynamic morphable facial modeling</a></td><td><span class="gray">[pdf]</a></td><td>2011 International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>50</td><td>26</td><td>24</td><td>5</td><td>31</td><td>18</td></tr><tr><td>298cbc3dfbbb3a20af4eed97906650a4ea1c29e0</td><td>ferplus</td><td>FER+</td><td><a href="papers/298cbc3dfbbb3a20af4eed97906650a4ea1c29e0.html" target="_blank">Training deep networks for facial expression recognition with crowd-sourced label distribution</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>74%</td><td>34</td><td>25</td><td>9</td><td>0</td><td>18</td><td>16</td></tr><tr><td>e27ef52c641c2b5100a1b34fd0b819e84a31b4df</td><td>sarc3d</td><td>Sarc3D</td><td><a href="papers/e27ef52c641c2b5100a1b34fd0b819e84a31b4df.html" target="_blank">SARC3D: A New 3D Body Model for People Tracking and Re-identification</a></td><td><a href="https://pdfs.semanticscholar.org/e27e/f52c641c2b5100a1b34fd0b819e84a31b4df.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>74%</td><td>34</td><td>25</td><td>9</td><td>2</td><td>21</td><td>12</td></tr><tr><td>57fe081950f21ca03b5b375ae3e84b399c015861</td><td>cvc_01_barcelona</td><td>CVC-01</td><td><a href="papers/57fe081950f21ca03b5b375ae3e84b399c015861.html" target="_blank">Adaptive Image Sampling and Windows Classification for On-board Pedestrian Detection</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>51%</td><td>47</td><td>24</td><td>23</td><td>1</td><td>23</td><td>24</td></tr><tr><td>758d7e1be64cc668c59ef33ba8882c8597406e53</td><td>affectnet</td><td>AffectNet</td><td><a href="papers/758d7e1be64cc668c59ef33ba8882c8597406e53.html" target="_blank">AffectNet: A Database for Facial Expression, Valence, and Arousal Computing in the Wild</a></td><td><a href="https://arxiv.org/pdf/1708.03985.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>37</td><td>23</td><td>14</td><td>0</td><td>25</td><td>11</td></tr><tr><td>d08cc366a4a0192a01e9a7495af1eb5d9f9e73ae</td><td>b3d_ac</td><td>B3D(AC)</td><td><a href="papers/d08cc366a4a0192a01e9a7495af1eb5d9f9e73ae.html" target="_blank">A 3-D Audio-Visual Corpus of Affective Communication</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Multimedia</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>42</td><td>23</td><td>19</td><td>2</td><td>26</td><td>15</td></tr><tr><td>53ae38a6bb2b21b42bac4f0c4c8ed1f9fa02f9d4</td><td>bp4d_plus</td><td>BP4D+</td><td><a href="papers/53ae38a6bb2b21b42bac4f0c4c8ed1f9fa02f9d4.html" target="_blank">Multimodal Spontaneous Emotion Corpus for Human Behavior Analysis</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>42</td><td>23</td><td>19</td><td>0</td><td>17</td><td>26</td></tr><tr><td>1bd1645a629f1b612960ab9bba276afd4cf7c666</td><td>brainwash</td><td>Brainwash</td><td><a href="papers/1bd1645a629f1b612960ab9bba276afd4cf7c666.html" target="_blank">End-to-End People Detection in Crowded Scenes</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>Stanford University</td><td>United States</td><td>37.43131385</td><td>-122.16936535</td><td>55%</td><td>42</td><td>23</td><td>19</td><td>1</td><td>19</td><td>19</td></tr><tr><td>1c2802c2199b6d15ecefe7ba0c39bfe44363de38</td><td>youtube_poses</td><td>YouTube Pose</td><td><a href="papers/1c2802c2199b6d15ecefe7ba0c39bfe44363de38.html" target="_blank">Personalizing Human Video Pose Estimation</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>Oxford University</td><td>United Kingdom</td><td>51.75208490</td><td>-1.25166460</td><td>64%</td><td>36</td><td>23</td><td>13</td><td>2</td><td>30</td><td>8</td></tr><tr><td>0cb2dd5f178e3a297a0c33068961018659d0f443</td><td>ijb_b</td><td>IJB-B</td><td><a href="papers/0cb2dd5f178e3a297a0c33068961018659d0f443.html" target="_blank">IARPA Janus Benchmark-B Face Dataset</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td>edu</td><td>Michigan State University</td><td>United States</td><td>42.71856800</td><td>-84.47791571</td><td>63%</td><td>35</td><td>22</td><td>13</td><td>3</td><td>25</td><td>8</td></tr><tr><td>a0cc5f73a37723a6dd465924143f1cb4976d0169</td><td>msmt_17</td><td>MSMT17</td><td><a href="papers/a0cc5f73a37723a6dd465924143f1cb4976d0169.html" target="_blank">Person Transfer GAN to Bridge Domain Gap for Person Re-identification</a></td><td><span class="gray">[pdf]</a></td><td>2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>92%</td><td>24</td><td>22</td><td>2</td><td>1</td><td>20</td><td>4</td></tr><tr><td>fd8168f1c50de85bac58a8d328df0a50248b16ae</td><td>nd_2006</td><td>ND-2006</td><td><a href="papers/fd8168f1c50de85bac58a8d328df0a50248b16ae.html" target="_blank">Using a Multi-Instance Enrollment Representation to Improve 3D Face Recognition</a></td><td><span class="gray">[pdf]</a></td><td>2007 First IEEE International Conference on Biometrics: Theory, Applications, and Systems</td><td>edu</td><td>University of Notre Dame</td><td>United States</td><td>41.70456775</td><td>-86.23822026</td><td>63%</td><td>35</td><td>22</td><td>13</td><td>3</td><td>18</td><td>15</td></tr><tr><td>488e475eeb3bb39a145f23ede197cd3620f1d98a</td><td>apis</td><td>APiS1.0</td><td><a href="papers/488e475eeb3bb39a145f23ede197cd3620f1d98a.html" target="_blank">Pedestrian Attribute Classification in Surveillance: Database and Evaluation</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision Workshops</td><td></td><td></td><td></td><td></td><td></td><td>71%</td><td>28</td><td>20</td><td>8</td><td>0</td><td>13</td><td>15</td></tr><tr><td>20388099cc415c772926e47bcbbe554e133343d1</td><td>cafe</td><td>#N/A</td><td><a href="papers/20388099cc415c772926e47bcbbe554e133343d1.html" target="_blank">The Child Affective Facial Expression (CAFE) set: validity and reliability from untrained adults</a></td><td><a href="https://pdfs.semanticscholar.org/2038/8099cc415c772926e47bcbbe554e133343d1.pdf" target="_blank">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>37</td><td>20</td><td>17</td><td>3</td><td>30</td><td>7</td></tr><tr><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td><td>casablanca</td><td>Casablanca</td><td><a href="papers/0ceda9dae8b9f322df65ca2ef02caca9758aec6f.html" target="_blank">Context-Aware CNNs for Person Head Detection</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>33</td><td>20</td><td>13</td><td>1</td><td>23</td><td>11</td></tr><tr><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td><td>hollywood_headset</td><td>HollywoodHeads</td><td><a href="papers/0ceda9dae8b9f322df65ca2ef02caca9758aec6f.html" target="_blank">Context-Aware CNNs for Person Head Detection</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>33</td><td>20</td><td>13</td><td>1</td><td>23</td><td>11</td></tr><tr><td>0297448f3ed948e136bb06ceff10eccb34e5bb77</td><td>ilids_mcts</td><td>i-LIDS Multiple-Camera</td><td><a href="papers/0297448f3ed948e136bb06ceff10eccb34e5bb77.html" target="_blank">Imagery Library for Intelligent Detection Systems (i-LIDS); A Standard for Testing Video Based Detection Systems</a></td><td><span class="gray">[pdf]</a></td><td>Proceedings 40th Annual 2006 International Carnahan Conference on Security Technology</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>35</td><td>20</td><td>15</td><td>2</td><td>21</td><td>14</td></tr><tr><td>41976ebc8ab76d9a6861487c97cc7fcbe3b6015f</td><td>moments_in_time</td><td>Moments in Time</td><td><a href="papers/41976ebc8ab76d9a6861487c97cc7fcbe3b6015f.html" target="_blank">Moments in Time Dataset: one million videos for event understanding</a></td><td><a href="https://arxiv.org/pdf/1801.03150.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>69%</td><td>29</td><td>20</td><td>9</td><td>2</td><td>27</td><td>2</td></tr><tr><td>488e475eeb3bb39a145f23ede197cd3620f1d98a</td><td>svs</td><td>SVS</td><td><a href="papers/488e475eeb3bb39a145f23ede197cd3620f1d98a.html" target="_blank">Pedestrian Attribute Classification in Surveillance: Database and Evaluation</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision Workshops</td><td></td><td></td><td></td><td></td><td></td><td>71%</td><td>28</td><td>20</td><td>8</td><td>0</td><td>13</td><td>15</td></tr><tr><td>84fe5b4ac805af63206012d29523a1e033bc827e</td><td>awe_ears</td><td>AWE Ears</td><td><a href="papers/84fe5b4ac805af63206012d29523a1e033bc827e.html" target="_blank">Ear Recognition: More Than a Survey</a></td><td><a href="https://arxiv.org/pdf/1611.06203.pdf" target="_blank">[pdf]</a></td><td>Neurocomputing</td><td></td><td></td><td></td><td></td><td></td><td>73%</td><td>26</td><td>19</td><td>7</td><td>0</td><td>10</td><td>16</td></tr><tr><td>4fefd1bc8dc4e0ab37ee3324ddfa43ad9d6a04a7</td><td>deep_fashion</td><td>DeepFashion</td><td><a href="papers/4fefd1bc8dc4e0ab37ee3324ddfa43ad9d6a04a7.html" target="_blank">Fashion Landmark Detection in the Wild</a></td><td><a href="https://arxiv.org/pdf/1608.03049.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>73%</td><td>26</td><td>19</td><td>7</td><td>1</td><td>16</td><td>10</td></tr><tr><td>64e0690dd176a93de9d4328f6e31fc4afe1e7536</td><td>duke_mtmc</td><td>Duke MTMC</td><td><a href="papers/64e0690dd176a93de9d4328f6e31fc4afe1e7536.html" target="_blank">Tracking Multiple People Online and in Real Time</a></td><td><a href="https://pdfs.semanticscholar.org/64e0/690dd176a93de9d4328f6e31fc4afe1e7536.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>78%</td><td>23</td><td>18</td><td>5</td><td>1</td><td>12</td><td>10</td></tr><tr><td>d818568838433a6d6831adde49a58cef05e0c89f</td><td>agedb</td><td>AgeDB</td><td><a href="papers/d818568838433a6d6831adde49a58cef05e0c89f.html" target="_blank">AgeDB: The First Manually Collected, In-the-Wild Age Database</a></td><td><a href="http://eprints.mdx.ac.uk/22044/1/agedb_kotsia.pdf" target="_blank">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td>edu</td><td>Imperial College London</td><td>United Kingdom</td><td>51.49887085</td><td>-0.17560797</td><td>94%</td><td>18</td><td>17</td><td>1</td><td>0</td><td>14</td><td>3</td></tr><tr><td>213a579af9e4f57f071b884aa872651372b661fd</td><td>bbc_pose</td><td>BBC Pose</td><td><a href="papers/213a579af9e4f57f071b884aa872651372b661fd.html" target="_blank">Automatic and Efficient Human Pose Estimation for Sign Language Videos</a></td><td><a href="http://tomas.pfister.fi/files/charles13ijcv.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>65%</td><td>26</td><td>17</td><td>9</td><td>1</td><td>16</td><td>11</td></tr><tr><td>4946ba10a4d5a7d0a38372f23e6622bd347ae273</td><td>coco_action</td><td>COCO-a</td><td><a href="papers/4946ba10a4d5a7d0a38372f23e6622bd347ae273.html" target="_blank">Describing Common Human Visual Actions in Images</a></td><td><a href="https://arxiv.org/pdf/1506.02203.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>68%</td><td>25</td><td>17</td><td>8</td><td>0</td><td>23</td><td>2</td></tr><tr><td>d178cde92ab3dc0dd2ebee5a76a33d556c39448b</td><td>jiku_mobile</td><td>Jiku Mobile Video Dataset</td><td><a href="papers/d178cde92ab3dc0dd2ebee5a76a33d556c39448b.html" target="_blank">The jiku mobile video dataset</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td>edu</td><td>National University of Singapore</td><td>Singapore</td><td>1.29620180</td><td>103.77689944</td><td>71%</td><td>24</td><td>17</td><td>7</td><td>0</td><td>6</td><td>19</td></tr><tr><td>2eb84aaba316b095d4bb51da1a3e4365bbf9ab1d</td><td>kin_face</td><td>UB KinFace</td><td><a href="papers/2eb84aaba316b095d4bb51da1a3e4365bbf9ab1d.html" target="_blank">Genealogical face recognition based on UB KinFace database</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td>edu</td><td>SUNY Buffalo</td><td>United States</td><td>42.93362780</td><td>-78.88394479</td><td>55%</td><td>31</td><td>17</td><td>14</td><td>0</td><td>11</td><td>21</td></tr><tr><td>ea050801199f98a1c7c1df6769f23f658299a3ae</td><td>mpi_large</td><td>Large MPI Facial Expression</td><td><a href="papers/ea050801199f98a1c7c1df6769f23f658299a3ae.html" target="_blank">The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</a></td><td><a href="https://pdfs.semanticscholar.org/ea05/0801199f98a1c7c1df6769f23f658299a3ae.pdf" target="_blank">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>33</td><td>17</td><td>16</td><td>4</td><td>29</td><td>4</td></tr><tr><td>ea050801199f98a1c7c1df6769f23f658299a3ae</td><td>mpi_small</td><td>Small MPI Facial Expression</td><td><a href="papers/ea050801199f98a1c7c1df6769f23f658299a3ae.html" target="_blank">The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</a></td><td><a href="https://pdfs.semanticscholar.org/ea05/0801199f98a1c7c1df6769f23f658299a3ae.pdf" target="_blank">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>33</td><td>17</td><td>16</td><td>4</td><td>29</td><td>4</td></tr><tr><td>221c18238b829c12b911706947ab38fd017acef7</td><td>rap_pedestrian</td><td>RAP</td><td><a href="papers/221c18238b829c12b911706947ab38fd017acef7.html" target="_blank">A Richly Annotated Dataset for Pedestrian Attribute Recognition</a></td><td><a href="https://arxiv.org/pdf/1603.07054.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>65%</td><td>26</td><td>17</td><td>9</td><td>0</td><td>16</td><td>10</td></tr><tr><td>1e3df3ca8feab0b36fd293fe689f93bb2aaac591</td><td>immediacy</td><td>Immediacy</td><td><a href="papers/1e3df3ca8feab0b36fd293fe689f93bb2aaac591.html" target="_blank">Multi-task Recurrent Neural Network for Immediacy Prediction</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>26</td><td>16</td><td>10</td><td>2</td><td>21</td><td>6</td></tr><tr><td>71b7fc715e2f1bb24c0030af8d7e7b6e7cd128a6</td><td>umd_faces</td><td>UMD</td><td><a href="papers/71b7fc715e2f1bb24c0030af8d7e7b6e7cd128a6.html" target="_blank">The Do’s and Don’ts for CNN-Based Face Verification</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE International Conference on Computer Vision Workshops (ICCVW)</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>26</td><td>16</td><td>10</td><td>2</td><td>16</td><td>8</td></tr><tr><td>19d1b811df60f86cbd5e04a094b07f32fff7a32a</td><td>york_3d</td><td>UOY 3D Face Database</td><td><a href="papers/19d1b811df60f86cbd5e04a094b07f32fff7a32a.html" target="_blank">Three-dimensional face recognition: an eigensurface approach</a></td><td><span class="gray">[pdf]</a></td><td>2004 International Conference on Image Processing, 2004. ICIP '04.</td><td></td><td></td><td></td><td></td><td></td><td>42%</td><td>38</td><td>16</td><td>22</td><td>4</td><td>24</td><td>13</td></tr><tr><td>b6c293f0420f7e945b5916ae44269fb53e139275</td><td>erce</td><td>ERCe</td><td><a href="papers/b6c293f0420f7e945b5916ae44269fb53e139275.html" target="_blank">Video Synopsis by Heterogeneous Multi-source Correlation</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>29</td><td>15</td><td>14</td><td>2</td><td>14</td><td>13</td></tr><tr><td>b6c293f0420f7e945b5916ae44269fb53e139275</td><td>tisi</td><td>Times Square Intersection</td><td><a href="papers/b6c293f0420f7e945b5916ae44269fb53e139275.html" target="_blank">Video Synopsis by Heterogeneous Multi-source Correlation</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>29</td><td>15</td><td>14</td><td>2</td><td>14</td><td>13</td></tr><tr><td>2a171f8d14b6b8735001a11c217af9587d095848</td><td>social_relation</td><td>Social Relation</td><td><a href="papers/2a171f8d14b6b8735001a11c217af9587d095848.html" target="_blank">Learning Social Relation Traits from Face Images</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>23</td><td>14</td><td>9</td><td>4</td><td>16</td><td>7</td></tr><tr><td>5753b2b5e442eaa3be066daa4a2ca8d8a0bb1725</td><td>fpoq</td><td>50 People One Question</td><td><a href="papers/5753b2b5e442eaa3be066daa4a2ca8d8a0bb1725.html" target="_blank">Merging Pose Estimates Across Space and Time</a></td><td><a href="http://authors.library.caltech.edu/41565/1/tracking_bmvc.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>81%</td><td>16</td><td>13</td><td>3</td><td>0</td><td>13</td><td>4</td></tr><tr><td>44d23df380af207f5ac5b41459c722c87283e1eb</td><td>wider_attribute</td><td>WIDER Attribute</td><td><a href="papers/44d23df380af207f5ac5b41459c722c87283e1eb.html" target="_blank">Human Attribute Recognition by Deep Hierarchical Contexts</a></td><td><a href="https://pdfs.semanticscholar.org/8e28/07f2dd53b03a759e372e07f7191cae65c9fd.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>72%</td><td>18</td><td>13</td><td>5</td><td>0</td><td>14</td><td>4</td></tr><tr><td>45e616093a92e5f1e61a7c6037d5f637aa8964af</td><td>malf</td><td>MALF</td><td><a href="papers/45e616093a92e5f1e61a7c6037d5f637aa8964af.html" target="_blank">Fine-grained evaluation on face detection in the wild</a></td><td><span class="gray">[pdf]</a></td><td>2015 11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)</td><td></td><td></td><td></td><td></td><td></td><td>71%</td><td>17</td><td>12</td><td>5</td><td>0</td><td>12</td><td>5</td></tr><tr><td>774cbb45968607a027ae4729077734db000a1ec5</td><td>urban_tribes</td><td>Urban Tribes</td><td><a href="papers/774cbb45968607a027ae4729077734db000a1ec5.html" target="_blank">From Bikers to Surfers: Visual Recognition of Urban Tribes</a></td><td><a href="http://vision.cornell.edu/se3/wp-content/uploads/2014/09/utribes_bmvc13_final.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>67%</td><td>18</td><td>12</td><td>6</td><td>1</td><td>12</td><td>6</td></tr><tr><td>a94cae786d515d3450d48267e12ca954aab791c4</td><td>yawdd</td><td>YawDD</td><td><a href="papers/a94cae786d515d3450d48267e12ca954aab791c4.html" target="_blank">YawDD: a yawning detection dataset</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>80%</td><td>15</td><td>12</td><td>3</td><td>1</td><td>2</td><td>13</td></tr><tr><td>4e6ee936eb50dd032f7138702fa39b7c18ee8907</td><td>dartmouth_children</td><td>Dartmouth Children</td><td><a href="papers/4e6ee936eb50dd032f7138702fa39b7c18ee8907.html" target="_blank">The Dartmouth Database of Children’s Faces: Acquisition and Validation of a New Face Stimulus Set</a></td><td><a href="https://pdfs.semanticscholar.org/4e6e/e936eb50dd032f7138702fa39b7c18ee8907.pdf" target="_blank">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>21</td><td>11</td><td>10</td><td>2</td><td>18</td><td>3</td></tr><tr><td>4156b7e88f2e0ab0a7c095b9bab199ae2b23bd06</td><td>distance_nighttime</td><td>Long Distance Heterogeneous Face</td><td><a href="papers/4156b7e88f2e0ab0a7c095b9bab199ae2b23bd06.html" target="_blank">Nighttime Face Recognition at Long Distance: Cross-Distance and Cross-Spectral Matching</a></td><td><a href="https://pdfs.semanticscholar.org/4156/b7e88f2e0ab0a7c095b9bab199ae2b23bd06.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>22</td><td>11</td><td>11</td><td>3</td><td>11</td><td>10</td></tr><tr><td>b71d1aa90dcbe3638888725314c0d56640c1fef1</td><td>ifdb</td><td>IFDB</td><td><a href="papers/b71d1aa90dcbe3638888725314c0d56640c1fef1.html" target="_blank">Iranian Face Database with age, pose and expression</a></td><td><span class="gray">[pdf]</a></td><td>2007 International Conference on Machine Vision</td><td>edu</td><td>Islamic Azad University</td><td>Iran</td><td>34.84529990</td><td>48.55962120</td><td>48%</td><td>23</td><td>11</td><td>12</td><td>2</td><td>14</td><td>9</td></tr><tr><td>57178b36c21fd7f4529ac6748614bb3374714e91</td><td>ijb_c</td><td>IJB-C</td><td><a href="papers/57178b36c21fd7f4529ac6748614bb3374714e91.html" target="_blank">IARPA Janus Benchmark - C: Face Dataset and Protocol</a></td><td><span class="gray">[pdf]</a></td><td>2018 International Conference on Biometrics (ICB)</td><td></td><td></td><td></td><td></td><td></td><td>79%</td><td>14</td><td>11</td><td>3</td><td>0</td><td>12</td><td>1</td></tr><tr><td>ca3e88d87e1344d076c964ea89d91a75c417f5ee</td><td>imfdb</td><td>IMFDB</td><td><a href="papers/ca3e88d87e1344d076c964ea89d91a75c417f5ee.html" target="_blank">Indian Movie Face Database: A benchmark for face recognition under wide variations</a></td><td><span class="gray">[pdf]</a></td><td>2013 Fourth National Conference on Computer Vision, Pattern Recognition, Image Processing and Graphics (NCVPRIPG)</td><td>edu</td><td>BVBCET, Hubli, India</td><td>India</td><td>15.36883320</td><td>75.12137960</td><td>65%</td><td>17</td><td>11</td><td>6</td><td>0</td><td>11</td><td>5</td></tr><tr><td>25474c21613607f6bb7687a281d5f9d4ffa1f9f3</td><td>faceplace</td><td>Face Place</td><td><a href="papers/25474c21613607f6bb7687a281d5f9d4ffa1f9f3.html" target="_blank">Recognizing disguised faces</a></td><td><a href="https://pdfs.semanticscholar.org/d936/7ceb0be378c3a9ddf7cb741c678c1a3c574c.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>34%</td><td>29</td><td>10</td><td>19</td><td>0</td><td>18</td><td>10</td></tr><tr><td>0f0fcf041559703998abf310e56f8a2f90ee6f21</td><td>feret</td><td>FERET</td><td><a href="papers/0f0fcf041559703998abf310e56f8a2f90ee6f21.html" target="_blank">The FERET Evaluation Methodology for Face-Recognition Algorithms</a></td><td><a href="https://pdfs.semanticscholar.org/5099/7a5605c1f61e09e9a96789ed7495be6625aa.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>34%</td><td>29</td><td>10</td><td>19</td><td>3</td><td>18</td><td>9</td></tr><tr><td>2f43b614607163abf41dfe5d17ef6749a1b61304</td><td>hrt_transgender</td><td>HRT Transgender</td><td><a href="papers/2f43b614607163abf41dfe5d17ef6749a1b61304.html" target="_blank">Investigating the Periocular-Based Face Recognition Across Gender Transformation</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Information Forensics and Security</td><td>edu</td><td>University of North Carolina at Wilmington</td><td>United States</td><td>34.22498270</td><td>-77.86907744</td><td>77%</td><td>13</td><td>10</td><td>3</td><td>0</td><td>6</td><td>8</td></tr><tr><td>4563b46d42079242f06567b3f2e2f7a80cb3befe</td><td>vadana</td><td>VADANA</td><td><a href="papers/4563b46d42079242f06567b3f2e2f7a80cb3befe.html" target="_blank">VADANA: A dense dataset for facial image analysis</a></td><td><span class="gray">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td>edu</td><td>University of Delaware</td><td>United States</td><td>39.68103280</td><td>-75.75401840</td><td>67%</td><td>15</td><td>10</td><td>5</td><td>0</td><td>5</td><td>10</td></tr><tr><td>2624d84503bc2f8e190e061c5480b6aa4d89277a</td><td>afew_va</td><td>AFEW-VA</td><td><a href="papers/2624d84503bc2f8e190e061c5480b6aa4d89277a.html" target="_blank">AFEW-VA database for valence and arousal estimation in-the-wild</a></td><td><a href="https://ibug.doc.ic.ac.uk/media/uploads/documents/afew-va.pdf" target="_blank">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>18</td><td>9</td><td>9</td><td>0</td><td>12</td><td>5</td></tr><tr><td>6403117f9c005ae81f1e8e6d1302f4a045e3d99d</td><td>alert_airport</td><td>ALERT Airport</td><td><a href="papers/6403117f9c005ae81f1e8e6d1302f4a045e3d99d.html" target="_blank">A Systematic Evaluation and Benchmark for Person Re-Identification: Features, Metrics, and Datasets</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>45%</td><td>20</td><td>9</td><td>11</td><td>0</td><td>9</td><td>11</td></tr><tr><td>8d5998cd984e7cce307da7d46f155f9db99c6590</td><td>chalearn</td><td>ChaLearn</td><td><a href="papers/8d5998cd984e7cce307da7d46f155f9db99c6590.html" target="_blank">ChaLearn looking at people: A review of events and resources</a></td><td><span class="gray">[pdf]</a></td><td>2017 International Joint Conference on Neural Networks (IJCNN)</td><td></td><td></td><td></td><td></td><td></td><td>69%</td><td>13</td><td>9</td><td>4</td><td>1</td><td>8</td><td>4</td></tr><tr><td>a8d0b149c2eadaa02204d3e4356fbc8eccf3b315</td><td>hi4d_adsip</td><td>Hi4D-ADSIP</td><td><a href="papers/a8d0b149c2eadaa02204d3e4356fbc8eccf3b315.html" target="_blank">Hi4D-ADSIP 3-D dynamic facial articulation database</a></td><td><span class="gray">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>15</td><td>9</td><td>6</td><td>1</td><td>4</td><td>11</td></tr><tr><td>bd26dabab576adb6af30484183c9c9c8379bf2e0</td><td>scut_fbp</td><td>SCUT-FBP</td><td><a href="papers/bd26dabab576adb6af30484183c9c9c8379bf2e0.html" target="_blank">SCUT-FBP: A Benchmark Dataset for Facial Beauty Perception</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Systems, Man, and Cybernetics</td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>19</td><td>9</td><td>10</td><td>2</td><td>6</td><td>13</td></tr><tr><td>060820f110a72cbf02c14a6d1085bd6e1d994f6a</td><td>caltech_crp</td><td>Caltech CRP</td><td><a href="papers/060820f110a72cbf02c14a6d1085bd6e1d994f6a.html" target="_blank">Fine-grained classification of pedestrians in video: Benchmark and state of the art</a></td><td><a href="https://arxiv.org/pdf/1605.06177.pdf" target="_blank">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>17</td><td>8</td><td>9</td><td>0</td><td>9</td><td>8</td></tr><tr><td>2b926b3586399d028b46315d7d9fb9d879e4f79c</td><td>frav3d</td><td>FRAV3D</td><td><a href="papers/2b926b3586399d028b46315d7d9fb9d879e4f79c.html" target="_blank">Multimodal 2D, 2.5D & 3D Face Verification</a></td><td><span class="gray">[pdf]</a></td><td>2006 International Conference on Image Processing</td><td>edu</td><td>Universidad Rey Juan Carlos, Spain</td><td>Spain</td><td>40.33586610</td><td>-3.87694320</td><td>57%</td><td>14</td><td>8</td><td>6</td><td>0</td><td>2</td><td>12</td></tr><tr><td>8f02ec0be21461fbcedf51d864f944cfc42c875f</td><td>hda_plus</td><td>HDA+</td><td><a href="papers/8f02ec0be21461fbcedf51d864f944cfc42c875f.html" target="_blank">The HDA+ Data Set for Research on Fully Automated Re-identification Systems</a></td><td><a href="https://pdfs.semanticscholar.org/8f02/ec0be21461fbcedf51d864f944cfc42c875f.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>16</td><td>8</td><td>8</td><td>1</td><td>10</td><td>6</td></tr><tr><td>c570d1247e337f91e555c3be0e8c8a5aba539d9f</td><td>mcgill</td><td>McGill Real World</td><td><a href="papers/c570d1247e337f91e555c3be0e8c8a5aba539d9f.html" target="_blank">Robust semi-automatic head pose labeling for real-world face video sequences</a></td><td><span class="gray">[pdf]</a></td><td>Multimedia Tools and Applications</td><td>edu</td><td>McGill University</td><td>Canada</td><td>45.50397610</td><td>-73.57496870</td><td>44%</td><td>18</td><td>8</td><td>10</td><td>0</td><td>13</td><td>7</td></tr><tr><td>041d3eedf5e45ce5c5229f0181c5c576ed1fafd6</td><td>ucf_selfie</td><td>UCF Selfie</td><td><a href="papers/041d3eedf5e45ce5c5229f0181c5c576ed1fafd6.html" target="_blank">How to Take a Good Selfie?</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>73%</td><td>11</td><td>8</td><td>3</td><td>0</td><td>7</td><td>5</td></tr><tr><td>633c851ebf625ad7abdda2324e9de093cf623141</td><td>appa_real</td><td>APPA-REAL</td><td><a href="papers/633c851ebf625ad7abdda2324e9de093cf623141.html" target="_blank">Apparent and Real Age Estimation in Still Images with Deep Residual Regressors on Appa-Real Database</a></td><td><span class="gray">[pdf]</a></td><td>2017 12th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2017)</td><td></td><td></td><td></td><td></td><td></td><td>70%</td><td>10</td><td>7</td><td>3</td><td>0</td><td>8</td><td>3</td></tr><tr><td>2cd7821fcf5fae53a185624f7eeda007434ae037</td><td>geofaces</td><td>GeoFaces</td><td><a href="papers/2cd7821fcf5fae53a185624f7eeda007434ae037.html" target="_blank">Exploring the geo-dependence of human face appearance</a></td><td><a href="http://cs.uky.edu/~jacobs/papers/islam2014faces.pdf" target="_blank">[pdf]</a></td><td>IEEE Winter Conference on Applications of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>88%</td><td>8</td><td>7</td><td>1</td><td>0</td><td>5</td><td>3</td></tr><tr><td>2cd7821fcf5fae53a185624f7eeda007434ae037</td><td>geofaces</td><td>GeoFaces</td><td><a href="papers/2cd7821fcf5fae53a185624f7eeda007434ae037.html" target="_blank">Exploring the geo-dependence of human face appearance</a></td><td><a href="http://cs.uky.edu/~jacobs/papers/islam2014faces.pdf" target="_blank">[pdf]</a></td><td>IEEE Winter Conference on Applications of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>88%</td><td>8</td><td>7</td><td>1</td><td>0</td><td>5</td><td>3</td></tr><tr><td>2d45cfd838016a6e39f6b766ffe85acd649440c7</td><td>mcgill</td><td>McGill Real World</td><td><a href="papers/2d45cfd838016a6e39f6b766ffe85acd649440c7.html" target="_blank">Hierarchical temporal graphical model for head pose estimation and subsequent attribute classification in real-world videos</a></td><td><span class="gray">[pdf]</a></td><td>Computer Vision and Image Understanding</td><td></td><td></td><td></td><td></td><td></td><td>88%</td><td>8</td><td>7</td><td>1</td><td>0</td><td>5</td><td>3</td></tr><tr><td>7f4040b482d16354d5938c1d1b926b544652bf5b</td><td>nova_emotions</td><td>Novaemötions Dataset</td><td><a href="papers/7f4040b482d16354d5938c1d1b926b544652bf5b.html" target="_blank">Competitive affective gaming: winning with a smile</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Universidade NOVA de Lisboa, Caparica, Portugal</td><td>Portugal</td><td>38.66096400</td><td>-9.20581300</td><td>78%</td><td>9</td><td>7</td><td>2</td><td>0</td><td>5</td><td>4</td></tr><tr><td>4b4106614c1d553365bad75d7866bff0de6056ed</td><td>ufi</td><td>UFI</td><td><a href="papers/4b4106614c1d553365bad75d7866bff0de6056ed.html" target="_blank">Unconstrained Facial Images: Database for Face Recognition Under Real-World Conditions</a></td><td><a href="http://home.zcu.cz/~pkral/papers/kral_micai15.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>12</td><td>6</td><td>6</td><td>0</td><td>4</td><td>6</td></tr><tr><td>22f656d0f8426c84a33a267977f511f127bfd7f3</td><td>expw</td><td>ExpW</td><td><a href="papers/22f656d0f8426c84a33a267977f511f127bfd7f3.html" target="_blank">From Facial Expression Recognition to Interpersonal Relation Prediction</a></td><td><a href="https://arxiv.org/pdf/1609.06426.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>11</td><td>6</td><td>5</td><td>0</td><td>5</td><td>4</td></tr><tr><td>4af89578ac237278be310f7660a408b03f12d603</td><td>geofaces</td><td>GeoFaces</td><td><a href="papers/4af89578ac237278be310f7660a408b03f12d603.html" target="_blank">Large-scale geo-facial image analysis</a></td><td><a href="https://pdfs.semanticscholar.org/3ede/3ed28329bf48fbd06438a69c4f855bef003f.pdf" target="_blank">[pdf]</a></td><td>EURASIP J. Image and Video Processing</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>6</td><td>6</td><td>0</td><td>0</td><td>4</td><td>2</td></tr><tr><td>1a40092b493c6b8840257ab7f96051d1a4dbfeb2</td><td>sports_videos_in_the_wild</td><td>SVW</td><td><a href="papers/1a40092b493c6b8840257ab7f96051d1a4dbfeb2.html" target="_blank">Sports Videos in the Wild (SVW): A video dataset for sports analysis</a></td><td><span class="gray">[pdf]</a></td><td>2015 11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)</td><td></td><td></td><td></td><td></td><td></td><td>86%</td><td>7</td><td>6</td><td>1</td><td>1</td><td>5</td><td>2</td></tr><tr><td>9e5378e7b336c89735d3bb15cf67eff96f86d39a</td><td>precarious</td><td>Precarious</td><td><a href="papers/9e5378e7b336c89735d3bb15cf67eff96f86d39a.html" target="_blank">Expecting the Unexpected: Training Detectors for Unusual Pedestrians with Adversarial Imposters</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>36%</td><td>14</td><td>5</td><td>9</td><td>0</td><td>12</td><td>1</td></tr><tr><td>07fcbae86f7a3ad3ea1cf95178459ee9eaf77cb1</td><td>uccs</td><td>UCCS</td><td><a href="papers/07fcbae86f7a3ad3ea1cf95178459ee9eaf77cb1.html" target="_blank">Large scale unconstrained open set face database</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE Sixth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td>company</td><td>Securics Inc., Colorado Springs, CO</td><td>United States</td><td>38.83388160</td><td>-104.82136340</td><td>83%</td><td>6</td><td>5</td><td>1</td><td>0</td><td>4</td><td>2</td></tr><tr><td>d4f1eb008eb80595bcfdac368e23ae9754e1e745</td><td>uccs</td><td>UCCS</td><td><a href="papers/d4f1eb008eb80595bcfdac368e23ae9754e1e745.html" target="_blank">Unconstrained Face Detection and Open-Set Face Recognition Challenge</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE International Joint Conference on Biometrics (IJCB)</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>5</td><td>5</td><td>0</td><td>0</td><td>4</td><td>1</td></tr><tr><td>8627f019882b024aef92e4eb9355c499c733e5b7</td><td>used</td><td>USED Social Event Dataset</td><td><a href="papers/8627f019882b024aef92e4eb9355c499c733e5b7.html" target="_blank">USED: a large-scale social event detection dataset</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td>edu</td><td>University of Trento</td><td>Italy</td><td>46.06588360</td><td>11.11598940</td><td>71%</td><td>7</td><td>5</td><td>2</td><td>0</td><td>3</td><td>4</td></tr><tr><td>922e0a51a3b8c67c4c6ac09a577ff674cbd28b34</td><td>v47</td><td>V47</td><td><a href="papers/922e0a51a3b8c67c4c6ac09a577ff674cbd28b34.html" target="_blank">Re-identification of pedestrians with variable occlusion and scale</a></td><td><span class="gray">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td>edu</td><td>Kingston University</td><td>United Kingdom</td><td>51.42930860</td><td>-0.26840440</td><td>56%</td><td>9</td><td>5</td><td>4</td><td>1</td><td>5</td><td>4</td></tr><tr><td>7ebb153704706e457ab57b432793d2b6e5d12592</td><td>vgg_celebs_in_places</td><td>CIP</td><td><a href="papers/7ebb153704706e457ab57b432793d2b6e5d12592.html" target="_blank">Faces in Places: compound query retrieval</a></td><td><a href="https://pdfs.semanticscholar.org/7ebb/153704706e457ab57b432793d2b6e5d12592.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>5</td><td>5</td><td>0</td><td>0</td><td>3</td><td>2</td></tr><tr><td>56ae6d94fc6097ec4ca861f0daa87941d1c10b70</td><td>cmdp</td><td>CMDP</td><td><a href="papers/56ae6d94fc6097ec4ca861f0daa87941d1c10b70.html" target="_blank">Distance Estimation of an Unknown Person from a Portrait</a></td><td><a href="http://authors.library.caltech.edu/49084/13/FaceDistanceEstimation_RONCHI.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>44%</td><td>9</td><td>4</td><td>5</td><td>0</td><td>6</td><td>3</td></tr><tr><td>563c940054e4b456661762c1ab858e6f730c3159</td><td>data_61</td><td>Data61 Pedestrian</td><td><a href="papers/563c940054e4b456661762c1ab858e6f730c3159.html" target="_blank">A Multi-modal Graphical Model for Scene Analysis</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE Winter Conference on Applications of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>8</td><td>4</td><td>4</td><td>0</td><td>5</td><td>3</td></tr><tr><td>287ddcb3db5562235d83aee318f318b8d5e43fb1</td><td>erce</td><td>ERCe</td><td><a href="papers/287ddcb3db5562235d83aee318f318b8d5e43fb1.html" target="_blank">Learning from Multiple Sources for Video Summarisation</a></td><td><a href="https://arxiv.org/pdf/1501.03069.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>7</td><td>4</td><td>3</td><td>0</td><td>4</td><td>3</td></tr><tr><td>dd65f71dac86e36eecbd3ed225d016c3336b4a13</td><td>families_in_the_wild</td><td>FIW</td><td><a href="papers/dd65f71dac86e36eecbd3ed225d016c3336b4a13.html" target="_blank">Visual Kinship Recognition of Families in the Wild</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td>edu</td><td>University of Massachusetts Dartmouth</td><td>United States</td><td>41.62772475</td><td>-71.00724501</td><td>80%</td><td>5</td><td>4</td><td>1</td><td>0</td><td>2</td><td>3</td></tr><tr><td>137aa2f891d474fce1e7a1d1e9b3aefe21e22b34</td><td>hrt_transgender</td><td>HRT Transgender</td><td><a href="papers/137aa2f891d474fce1e7a1d1e9b3aefe21e22b34.html" target="_blank">Is the eye region more reliable than the face? A preliminary study of face-based recognition on a transgender dataset</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE Sixth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>7</td><td>4</td><td>3</td><td>1</td><td>3</td><td>5</td></tr><tr><td>0d2dd4fc016cb6a517d8fb43a7cc3ff62964832e</td><td>lag</td><td>LAG</td><td><a href="papers/0d2dd4fc016cb6a517d8fb43a7cc3ff62964832e.html" target="_blank">Large age-gap face verification by feature injection in deep networks</a></td><td><a href="https://arxiv.org/pdf/1602.06149.pdf" target="_blank">[pdf]</a></td><td>Pattern Recognition Letters</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>7</td><td>4</td><td>3</td><td>0</td><td>3</td><td>4</td></tr><tr><td>23e824d1dfc33f3780dd18076284f07bd99f1c43</td><td>mifs</td><td>MIFS</td><td><a href="papers/23e824d1dfc33f3780dd18076284f07bd99f1c43.html" target="_blank">Spoofing faces using makeup: An investigative study</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE International Conference on Identity, Security and Behavior Analysis (ISBA)</td><td>edu</td><td>INRIA Méditerranée</td><td>France</td><td>43.61581310</td><td>7.06838000</td><td>67%</td><td>6</td><td>4</td><td>2</td><td>0</td><td>1</td><td>5</td></tr><tr><td>22909dd19a0ec3b6065334cb5be5392cb24d839d</td><td>pets</td><td>PETS 2017</td><td><a href="papers/22909dd19a0ec3b6065334cb5be5392cb24d839d.html" target="_blank">PETS 2017: Dataset and Challenge</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td></td><td></td><td></td><td></td><td></td><td>44%</td><td>9</td><td>4</td><td>5</td><td>0</td><td>1</td><td>8</td></tr><tr><td>54983972aafc8e149259d913524581357b0f91c3</td><td>reseed</td><td>ReSEED</td><td><a href="papers/54983972aafc8e149259d913524581357b0f91c3.html" target="_blank">ReSEED: social event dEtection dataset</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>67%</td><td>6</td><td>4</td><td>2</td><td>1</td><td>1</td><td>5</td></tr><tr><td>287ddcb3db5562235d83aee318f318b8d5e43fb1</td><td>tisi</td><td>Times Square Intersection</td><td><a href="papers/287ddcb3db5562235d83aee318f318b8d5e43fb1.html" target="_blank">Learning from Multiple Sources for Video Summarisation</a></td><td><a href="https://arxiv.org/pdf/1501.03069.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>7</td><td>4</td><td>3</td><td>0</td><td>4</td><td>3</td></tr><tr><td>9e31e77f9543ab42474ba4e9330676e18c242e72</td><td>imdb_face</td><td>IMDb Face</td><td><a href="papers/9e31e77f9543ab42474ba4e9330676e18c242e72.html" target="_blank">The Devil of Face Recognition is in the Noise</a></td><td><a href="https://arxiv.org/pdf/1807.11649.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Nanyang Technological University</td><td>Singapore</td><td>1.34841040</td><td>103.68297965</td><td>50%</td><td>6</td><td>3</td><td>3</td><td>0</td><td>4</td><td>1</td></tr><tr><td>9cc8cf0c7d7fa7607659921b6ff657e17e135ecc</td><td>mafa</td><td>MAsked FAces</td><td><a href="papers/9cc8cf0c7d7fa7607659921b6ff657e17e135ecc.html" target="_blank">Detecting Masked Faces in the Wild with LLE-CNNs</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>5</td><td>3</td><td>2</td><td>1</td><td>4</td><td>1</td></tr><tr><td>a7fe834a0af614ce6b50dc093132b031dd9a856b</td><td>market_1501</td><td>Market 1501</td><td><a href="papers/a7fe834a0af614ce6b50dc093132b031dd9a856b.html" target="_blank">Orientation Driven Bag of Appearances for Person Re-identification</a></td><td><a href="https://arxiv.org/pdf/1605.02464.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>7</td><td>3</td><td>4</td><td>0</td><td>4</td><td>4</td></tr><tr><td>578d4ad74818086bb64f182f72e2c8bd31e3d426</td><td>mr2</td><td>MR2</td><td><a href="papers/578d4ad74818086bb64f182f72e2c8bd31e3d426.html" target="_blank">The MR2: A multi-racial, mega-resolution database of facial stimuli.</a></td><td><a href="http://www.mpmlab.org/The%20MR2%20face%20database.pdf" target="_blank">[pdf]</a></td><td>Behavior research methods</td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>7</td><td>3</td><td>4</td><td>0</td><td>7</td><td>0</td></tr><tr><td>ad01687649d95cd5b56d7399a9603c4b8e2217d7</td><td>mrp_drone</td><td>MRP Drone</td><td><a href="papers/ad01687649d95cd5b56d7399a9603c4b8e2217d7.html" target="_blank">Investigating Open-World Person Re-identification Using a Drone</a></td><td><a href="https://pdfs.semanticscholar.org/ad01/687649d95cd5b56d7399a9603c4b8e2217d7.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>7</td><td>3</td><td>4</td><td>1</td><td>5</td><td>2</td></tr><tr><td>a7fe834a0af614ce6b50dc093132b031dd9a856b</td><td>pku_reid</td><td>PKU-Reid</td><td><a href="papers/a7fe834a0af614ce6b50dc093132b031dd9a856b.html" target="_blank">Orientation Driven Bag of Appearances for Person Re-identification</a></td><td><a href="https://arxiv.org/pdf/1605.02464.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>7</td><td>3</td><td>4</td><td>0</td><td>4</td><td>4</td></tr><tr><td>17b46e2dad927836c689d6787ddb3387c6159ece</td><td>geofaces</td><td>GeoFaces</td><td><a href="papers/17b46e2dad927836c689d6787ddb3387c6159ece.html" target="_blank">GeoFaceExplorer: exploring the geo-dependence of facial attributes</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>2</td><td>2</td><td>0</td><td>0</td><td>1</td><td>1</td></tr><tr><td>e58dd160a76349d46f881bd6ddbc2921f08d1050</td><td>gfw</td><td>Grouping Face in the Wild</td><td><a href="papers/e58dd160a76349d46f881bd6ddbc2921f08d1050.html" target="_blank">Merge or Not? Learning to Group Faces via Imitation Learning</a></td><td><a href="https://arxiv.org/pdf/1707.03986.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>2</td><td>2</td><td>0</td><td>0</td><td>2</td><td>0</td></tr><tr><td>4eab317b5ac436a949849ed286baa3de2a541eef</td><td>laofiw</td><td>LAOFIW</td><td><a href="papers/4eab317b5ac436a949849ed286baa3de2a541eef.html" target="_blank">Turning a Blind Eye: Explicit Removal of Biases and Variation from Deep Neural Network Embeddings</a></td><td><a href="https://arxiv.org/pdf/1809.02169.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>2</td><td>2</td><td>0</td><td>0</td><td>2</td><td>0</td></tr><tr><td>c72a2ea819df9b0e8cd267eebcc6528b8741e03d</td><td>megaage</td><td>MegaAge</td><td><a href="papers/c72a2ea819df9b0e8cd267eebcc6528b8741e03d.html" target="_blank">Quantifying Facial Age by Posterior of Age Comparisons</a></td><td><a href="https://arxiv.org/pdf/1708.09687.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>4</td><td>2</td><td>2</td><td>1</td><td>4</td><td>0</td></tr><tr><td>f6c8d5e35d7e4d60a0104f233ac1a3ab757da53f</td><td>pku_reid</td><td>PKU-Reid</td><td><a href="papers/f6c8d5e35d7e4d60a0104f233ac1a3ab757da53f.html" target="_blank">Swiss-System Based Cascade Ranking for Gait-Based Person Re-Identification</a></td><td><a href="https://pdfs.semanticscholar.org/f6c8/d5e35d7e4d60a0104f233ac1a3ab757da53f.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>4</td><td>2</td><td>2</td><td>0</td><td>1</td><td>2</td></tr><tr><td>3531332efe19be21e7401ba1f04570a142617236</td><td>ufdd</td><td>UFDD</td><td><a href="papers/3531332efe19be21e7401ba1f04570a142617236.html" target="_blank">Pushing the Limits of Unconstrained Face Detection: a Challenge Dataset and Baseline Results</a></td><td><a href="https://arxiv.org/pdf/1804.10275.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>4</td><td>2</td><td>2</td><td>1</td><td>4</td><td>0</td></tr><tr><td>4d4bb462c9f1d4e4ab1e4aa6a75cc0bc71b38461</td><td>3dddb_unconstrained</td><td>3D Dynamic</td><td><a href="papers/4d4bb462c9f1d4e4ab1e4aa6a75cc0bc71b38461.html" target="_blank">A 3D Dynamic Database for Unconstrained Face Recognition</a></td><td><a href="https://pdfs.semanticscholar.org/4d4b/b462c9f1d4e4ab1e4aa6a75cc0bc71b38461.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>2</td><td>1</td><td>1</td><td>0</td><td>1</td><td>1</td></tr><tr><td>a40f9bfd3c45658ee8da70e1f2dfbe1f0c744d43</td><td>4dfab</td><td>4DFAB</td><td><a href="papers/a40f9bfd3c45658ee8da70e1f2dfbe1f0c744d43.html" target="_blank">4DFAB: A Large Scale 4D Facial Expression Database for Biometric Applications</a></td><td><a href="https://arxiv.org/pdf/1712.01443.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>25%</td><td>4</td><td>1</td><td>3</td><td>0</td><td>2</td><td>2</td></tr><tr><td>65355cbb581a219bd7461d48b3afd115263ea760</td><td>complex_activities</td><td>Ongoing Complex Activities</td><td><a href="papers/65355cbb581a219bd7461d48b3afd115263ea760.html" target="_blank">Recognition of ongoing complex activities by sequence prediction over a hierarchical label space</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Winter Conference on Applications of Computer Vision (WACV)</td><td></td><td></td><td></td><td></td><td></td><td>33%</td><td>3</td><td>1</td><td>2</td><td>0</td><td>3</td><td>0</td></tr><tr><td>55c40cbcf49a0225e72d911d762c27bb1c2d14aa</td><td>ifad</td><td>IFAD</td><td><a href="papers/55c40cbcf49a0225e72d911d762c27bb1c2d14aa.html" target="_blank">Indian Face Age Database: A Database for Face Recognition with Age Variation</a></td><td><a href="https://pdfs.semanticscholar.org/025e/4cf3fd3fdeced91e9373b56ee14af7ca432c.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>2</td><td>1</td><td>1</td><td>0</td><td>2</td><td>0</td></tr><tr><td>c06b13d0ec3f5c43e2782cd22542588e233733c3</td><td>nova_emotions</td><td>Novaemötions Dataset</td><td><a href="papers/c06b13d0ec3f5c43e2782cd22542588e233733c3.html" target="_blank">Crowdsourcing facial expressions for affective-interaction</a></td><td><span class="gray">[pdf]</a></td><td>Computer Vision and Image Understanding</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>1</td><td>1</td><td>0</td><td>0</td><td>1</td><td>0</td></tr><tr><td>2306b2a8fba28539306052764a77a0d0f5d1236a</td><td>qmul_surv_face</td><td>QMUL-SurvFace</td><td><a href="papers/2306b2a8fba28539306052764a77a0d0f5d1236a.html" target="_blank">Surveillance Face Recognition Challenge</a></td><td><a href="https://arxiv.org/pdf/1804.09691.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td>edu</td><td>Queen Mary University of London</td><td>United Kingdom</td><td>51.52472720</td><td>-0.03931035</td><td>100%</td><td>1</td><td>1</td><td>0</td><td>0</td><td>1</td><td>0</td></tr><tr><td>d3f5a1848b0028d8ab51d0b0673732cad2e3c8c9</td><td>stair_actions</td><td>STAIR Action</td><td><a href="papers/d3f5a1848b0028d8ab51d0b0673732cad2e3c8c9.html" target="_blank">STAIR Actions: A Video Dataset of Everyday Home Actions</a></td><td><a href="https://arxiv.org/pdf/1804.04326.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>1</td><td>1</td><td>0</td><td>0</td><td>1</td><td>0</td></tr><tr><td>5ad4e9f947c1653c247d418f05dad758a3f9277b</td><td>wlfdb</td><td>WLFDB</td><td><a href="papers/5ad4e9f947c1653c247d418f05dad758a3f9277b.html" target="_blank">WLFDB : Weakly Labeled Face Databases</a></td><td><a href="https://pdfs.semanticscholar.org/5ad4/e9f947c1653c247d418f05dad758a3f9277b.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>1</td><td>1</td><td>0</td><td>0</td><td>0</td><td>1</td></tr><tr><td>7b92d1e53cc87f7a4256695de590098a2f30261e</td><td>appa_real</td><td>APPA-REAL</td><td><a href="papers/7b92d1e53cc87f7a4256695de590098a2f30261e.html" target="_blank">From Apparent to Real Age: Gender, Age, Ethnic, Makeup, and Expression Bias Analysis in Real Age Estimation</a></td><td><span class="gray">[pdf]</a></td><td>2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>1dc35905a1deff8bc74688f2d7e2f48fd2273275</td><td>caltech_pedestrians</td><td>Caltech Pedestrians</td><td><a href="papers/1dc35905a1deff8bc74688f2d7e2f48fd2273275.html" target="_blank">Pedestrian detection: A benchmark</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>15e1af79939dbf90790b03d8aa02477783fb1d0f</td><td>duke_mtmc</td><td>Duke MTMC</td><td><a href="papers/15e1af79939dbf90790b03d8aa02477783fb1d0f.html" target="_blank">Unlabeled Samples Generated by GAN Improve the Person Re-identification Baseline in Vitro</a></td><td><a href="https://arxiv.org/pdf/1701.07717.pdf" target="_blank">[pdf]</a></td><td>2017 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>72a155c987816ae81c858fddbd6beab656d86220</td><td>europersons</td><td>EuroCity Persons</td><td><a href="papers/72a155c987816ae81c858fddbd6beab656d86220.html" target="_blank">The EuroCity Persons Dataset: A Novel Benchmark for Object Detection</a></td><td><a href="https://arxiv.org/pdf/1805.07193.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>2</td><td>0</td><td>2</td><td>0</td><td>2</td><td>0</td></tr><tr><td>670637d0303a863c1548d5b19f705860a23e285c</td><td>face_tracer</td><td>FaceTracer</td><td><a href="papers/670637d0303a863c1548d5b19f705860a23e285c.html" target="_blank">Face swapping: automatically replacing faces in photographs</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>12ad3b5bbbf407f8e54ea692c07633d1a867c566</td><td>graz</td><td>Graz Pedestrian</td><td><a href="papers/12ad3b5bbbf407f8e54ea692c07633d1a867c566.html" target="_blank">Object recognition using segmentation for feature detection</a></td><td><span class="gray">[pdf]</a></td><td>Proceedings of the 17th International Conference on Pattern Recognition, 2004. ICPR 2004.</td><td>edu</td><td>Inst. of Comput. Sci., Univ. of Leoben, Austria</td><td>Austria</td><td>47.38473720</td><td>15.09302010</td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>bd88bb2e4f351352d88ee7375af834360e223498</td><td>hda_plus</td><td>HDA+</td><td><a href="papers/bd88bb2e4f351352d88ee7375af834360e223498.html" target="_blank">HDA dataset-DRAFT 1 A Multi-camera video data set for research on High-Definition surveillance</a></td><td><a href="https://pdfs.semanticscholar.org/bd88/bb2e4f351352d88ee7375af834360e223498.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>2</td><td>0</td><td>2</td><td>0</td><td>1</td><td>2</td></tr><tr><td>0ab7cff2ccda7269b73ff6efd9d37e1318f7db25</td><td>ibm_dif</td><td>IBM Diversity in Faces</td><td><a href="papers/0ab7cff2ccda7269b73ff6efd9d37e1318f7db25.html" target="_blank">Facial Coding Scheme Reference 1 Craniofacial Distances</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>066d71fcd997033dce4ca58df924397dfe0b5fd1</td><td>ifdb</td><td>IFDB</td><td><a href="papers/066d71fcd997033dce4ca58df924397dfe0b5fd1.html" target="_blank">Iranian Face Database and Evaluation with a New Detection Algorithm</a></td><td><a href="https://pdfs.semanticscholar.org/066d/71fcd997033dce4ca58df924397dfe0b5fd1.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>21d9d0deed16f0ad62a4865e9acf0686f4f15492</td><td>images_of_groups</td><td>Images of Groups</td><td><a href="papers/21d9d0deed16f0ad62a4865e9acf0686f4f15492.html" target="_blank">Understanding images of groups of people</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>Carnegie Mellon University</td><td>United States</td><td>40.44416190</td><td>-79.94272826</td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>140438a77a771a8fb656b39a78ff488066eb6b50</td><td>lfpw</td><td>LFWP</td><td><a href="papers/140438a77a771a8fb656b39a78ff488066eb6b50.html" target="_blank">Localizing Parts of Faces Using a Consensus of Exemplars</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>079a0a3bf5200994e1f972b1b9197bf2f90e87d4</td><td>mit_cbcl</td><td>MIT CBCL</td><td><a href="papers/079a0a3bf5200994e1f972b1b9197bf2f90e87d4.html" target="_blank">Component-Based Face Recognition with 3D Morphable Models</a></td><td><span class="gray">[pdf]</a></td><td>2004 Conference on Computer Vision and Pattern Recognition Workshop</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>2fda164863a06a92d3a910b96eef927269aeb730</td><td>names_and_faces</td><td>News Dataset</td><td><a href="papers/2fda164863a06a92d3a910b96eef927269aeb730.html" target="_blank">Names and faces in the news</a></td><td><span class="gray">[pdf]</a></td><td>Proceedings of the 2004 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2004. CVPR 2004.</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>d3200d49a19a4a4e4e9745ee39649b65d80c834b</td><td>scut_head</td><td>SCUT HEAD</td><td><a href="papers/d3200d49a19a4a4e4e9745ee39649b65d80c834b.html" target="_blank">Detecting Heads using Feature Refine Net and Cascaded Multi-scale Architecture</a></td><td><a href="https://arxiv.org/pdf/1803.09256.pdf" target="_blank">[pdf]</a></td><td>2018 24th International Conference on Pattern Recognition (ICPR)</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>8990cdce3f917dad622e43e033db686b354d057c</td><td>tiny_faces</td><td>TinyFace</td><td><a href="papers/8990cdce3f917dad622e43e033db686b354d057c.html" target="_blank">Low-Resolution Face Recognition</a></td><td><a href="https://arxiv.org/pdf/1811.08965.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>6ad5a38df8dd4cdddd74f31996ce096d41219f72</td><td>tud_brussels</td><td>TUD-Brussels</td><td><a href="papers/6ad5a38df8dd4cdddd74f31996ce096d41219f72.html" target="_blank">Multi-cue onboard pedestrian detection</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>6ad5a38df8dd4cdddd74f31996ce096d41219f72</td><td>tud_motionpairs</td><td>TUD-Motionparis</td><td><a href="papers/6ad5a38df8dd4cdddd74f31996ce096d41219f72.html" target="_blank">Multi-cue onboard pedestrian detection</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>01959ef569f74c286956024866c1d107099199f7</td><td>vqa</td><td>VQA</td><td><a href="papers/01959ef569f74c286956024866c1d107099199f7.html" target="_blank">VQA: Visual Question Answering</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>9b9bf5e623cb8af7407d2d2d857bc3f1b531c182</td><td>who_goes_there</td><td>WGT</td><td><a href="papers/9b9bf5e623cb8af7407d2d2d857bc3f1b531c182.html" target="_blank">Who goes there?: approaches to mapping facial appearance diversity</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td>edu</td><td>University of Kentucky</td><td>United States</td><td>38.03337420</td><td>-84.50177580</td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>36bccfb2ad847096bc76777e544f305813cd8f5b</td><td>wildtrack</td><td>WildTrack</td><td><a href="papers/36bccfb2ad847096bc76777e544f305813cd8f5b.html" target="_blank">WILDTRACK: A Multi-camera HD Dataset for Dense Unscripted Pedestrian Detection</a></td><td><a href="http://openaccess.thecvf.com/content_cvpr_2018/Supplemental/1562-supp.pdf" target="_blank">[pdf]</a></td><td>2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr></table></body></html> \ No newline at end of file
diff --git a/scraper/reports/report_index.html b/scraper/reports/report_index.html
index e74ba313..a3294082 100644
--- a/scraper/reports/report_index.html
+++ b/scraper/reports/report_index.html
@@ -1 +1 @@
-<!doctype html><html><head><meta charset='utf-8'><title>All Papers</title><link rel='stylesheet' href='reports.css'></head><body><h2>All Papers</h2><table border='1' cellpadding='3' cellspacing='3'><th>Paper ID</th><th>Megapixels Key</th><th>Megapixels Name</th><th>Report Link</th><th>PDF Link</th><th>Journal</th><th>Type</th><th>Address</th><th>Country</th><th>Lat</th><th>Lng</th><th>Coverage</th><th>Total Citations</th><th>Geocoded Citations</th><th>Unknown Citations</th><th>Empty Citations</th><th>With PDF</th><th>With DOI</th><tr><td>3325860c0c82a93b2eac654f5324dd6a776f609e</td><td>mpii_human_pose</td><td>MPII Human Pose</td><td><a href="papers/3325860c0c82a93b2eac654f5324dd6a776f609e.html" target="_blank">2D Human Pose Estimation: New Benchmark and State of the Art Analysis</a></td><td><span class="gray">[pdf]</a></td><td>2014 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>387</td><td>249</td><td>138</td><td>21</td><td>291</td><td>96</td></tr><tr><td>e4754afaa15b1b53e70743880484b8d0736990ff</td><td>fiw_300</td><td>300-W</td><td><a href="papers/e4754afaa15b1b53e70743880484b8d0736990ff.html" target="_blank">300 Faces In-The-Wild Challenge: database and results</a></td><td><a href="http://ibug.doc.ic.ac.uk/media/uploads/documents/1-s2.0-s0262885616000147-main.pdf" target="_blank">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>129</td><td>75</td><td>54</td><td>9</td><td>74</td><td>55</td></tr><tr><td>044d9a8c61383312cdafbcc44b9d00d650b21c70</td><td>fiw_300</td><td>300-W</td><td><a href="papers/044d9a8c61383312cdafbcc44b9d00d650b21c70.html" target="_blank">300 Faces in-the-Wild Challenge: The First Facial Landmark Localization Challenge</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision Workshops</td><td></td><td></td><td></td><td></td><td></td><td>65%</td><td>323</td><td>211</td><td>112</td><td>27</td><td>208</td><td>120</td></tr><tr><td>2e8d0f1802e50cccfd3c0aabac0d0beab3a7846e</td><td>3dpes</td><td>3DPeS</td><td><a href="papers/2e8d0f1802e50cccfd3c0aabac0d0beab3a7846e.html" target="_blank">3DPeS: 3D people dataset for surveillance and forensics</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>133</td><td>82</td><td>51</td><td>9</td><td>73</td><td>58</td></tr><tr><td>a40f9bfd3c45658ee8da70e1f2dfbe1f0c744d43</td><td>4dfab</td><td>4DFAB</td><td><a href="papers/a40f9bfd3c45658ee8da70e1f2dfbe1f0c744d43.html" target="_blank">4DFAB: A Large Scale 4D Facial Expression Database for Biometric Applications</a></td><td><a href="https://arxiv.org/pdf/1712.01443.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>25%</td><td>4</td><td>1</td><td>3</td><td>0</td><td>2</td><td>2</td></tr><tr><td>31b58ced31f22eab10bd3ee2d9174e7c14c27c01</td><td>tiny_images</td><td>Tiny Images</td><td><a href="papers/31b58ced31f22eab10bd3ee2d9174e7c14c27c01.html" target="_blank">80 Million Tiny Images: A Large Data Set for Nonparametric Object and Scene Recognition</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>999</td><td>573</td><td>426</td><td>89</td><td>644</td><td>337</td></tr><tr><td>d08cc366a4a0192a01e9a7495af1eb5d9f9e73ae</td><td>b3d_ac</td><td>B3D(AC)</td><td><a href="papers/d08cc366a4a0192a01e9a7495af1eb5d9f9e73ae.html" target="_blank">A 3-D Audio-Visual Corpus of Affective Communication</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Multimedia</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>42</td><td>23</td><td>19</td><td>2</td><td>26</td><td>15</td></tr><tr><td>4d4bb462c9f1d4e4ab1e4aa6a75cc0bc71b38461</td><td>3dddb_unconstrained</td><td>3D Dynamic</td><td><a href="papers/4d4bb462c9f1d4e4ab1e4aa6a75cc0bc71b38461.html" target="_blank">A 3D Dynamic Database for Unconstrained Face Recognition</a></td><td><a href="https://pdfs.semanticscholar.org/4d4b/b462c9f1d4e4ab1e4aa6a75cc0bc71b38461.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>2</td><td>1</td><td>1</td><td>0</td><td>1</td><td>1</td></tr><tr><td>639937b3a1b8bded3f7e9a40e85bd3770016cf3c</td><td>bfm</td><td>BFM</td><td><a href="papers/639937b3a1b8bded3f7e9a40e85bd3770016cf3c.html" target="_blank">A 3D Face Model for Pose and Illumination Invariant Face Recognition</a></td><td><span class="gray">[pdf]</a></td><td>2009 Sixth IEEE International Conference on Advanced Video and Signal Based Surveillance</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>343</td><td>189</td><td>154</td><td>25</td><td>223</td><td>114</td></tr><tr><td>cc589c499dcf323fe4a143bbef0074c3e31f9b60</td><td>bu_3dfe</td><td>BU-3DFE</td><td><a href="papers/cc589c499dcf323fe4a143bbef0074c3e31f9b60.html" target="_blank">A 3D facial expression database for facial behavior research</a></td><td><span class="gray">[pdf]</a></td><td>7th International Conference on Automatic Face and Gesture Recognition (FGR06)</td><td></td><td></td><td></td><td></td><td></td><td>53%</td><td>588</td><td>313</td><td>274</td><td>45</td><td>306</td><td>282</td></tr><tr><td>22646e00a7ba34d1b5fbe3b1efcd91a1e1be3c2b</td><td>saivt</td><td>SAIVT SoftBio</td><td><a href="papers/22646e00a7ba34d1b5fbe3b1efcd91a1e1be3c2b.html" target="_blank">A Database for Person Re-Identification in Multi-Camera Surveillance Networks</a></td><td><span class="gray">[pdf]</a></td><td>2012 International Conference on Digital Image Computing Techniques and Applications (DICTA)</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>65</td><td>38</td><td>27</td><td>6</td><td>45</td><td>20</td></tr><tr><td>070de852bc6eb275d7ca3a9cdde8f6be8795d1a3</td><td>d3dfacs</td><td>D3DFACS</td><td><a href="papers/070de852bc6eb275d7ca3a9cdde8f6be8795d1a3.html" target="_blank">A FACS valid 3D dynamic action unit database with applications to 3D dynamic morphable facial modeling</a></td><td><span class="gray">[pdf]</a></td><td>2011 International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>50</td><td>26</td><td>24</td><td>5</td><td>31</td><td>18</td></tr><tr><td>563c940054e4b456661762c1ab858e6f730c3159</td><td>data_61</td><td>Data61 Pedestrian</td><td><a href="papers/563c940054e4b456661762c1ab858e6f730c3159.html" target="_blank">A Multi-modal Graphical Model for Scene Analysis</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE Winter Conference on Applications of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>8</td><td>4</td><td>4</td><td>0</td><td>5</td><td>3</td></tr><tr><td>221c18238b829c12b911706947ab38fd017acef7</td><td>rap_pedestrian</td><td>RAP</td><td><a href="papers/221c18238b829c12b911706947ab38fd017acef7.html" target="_blank">A Richly Annotated Dataset for Pedestrian Attribute Recognition</a></td><td><a href="https://arxiv.org/pdf/1603.07054.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>65%</td><td>26</td><td>17</td><td>9</td><td>0</td><td>16</td><td>10</td></tr><tr><td>013909077ad843eb6df7a3e8e290cfd5575999d2</td><td>fiw_300</td><td>300-W</td><td><a href="papers/013909077ad843eb6df7a3e8e290cfd5575999d2.html" target="_blank">A Semi-automatic Methodology for Facial Landmark Annotation</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition Workshops</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>184</td><td>114</td><td>70</td><td>14</td><td>120</td><td>67</td></tr><tr><td>3b4ec8af470948a72a6ed37a9fd226719a874ebc</td><td>sdu_vid</td><td>SDU-VID</td><td><a href="papers/3b4ec8af470948a72a6ed37a9fd226719a874ebc.html" target="_blank">A Spatio-Temporal Appearance Representation for Video-Based Pedestrian Re-Identification</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>66%</td><td>95</td><td>63</td><td>32</td><td>6</td><td>50</td><td>45</td></tr><tr><td>6403117f9c005ae81f1e8e6d1302f4a045e3d99d</td><td>alert_airport</td><td>ALERT Airport</td><td><a href="papers/6403117f9c005ae81f1e8e6d1302f4a045e3d99d.html" target="_blank">A Systematic Evaluation and Benchmark for Person Re-Identification: Features, Metrics, and Datasets</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>45%</td><td>20</td><td>9</td><td>11</td><td>0</td><td>9</td><td>11</td></tr><tr><td>0d3bb75852098b25d90f31d2f48fd0cb4944702b</td><td>face_scrub</td><td>FaceScrub</td><td><a href="papers/0d3bb75852098b25d90f31d2f48fd0cb4944702b.html" target="_blank">A data-driven approach to cleaning large face datasets</a></td><td><span class="gray">[pdf]</a></td><td>2014 IEEE International Conference on Image Processing (ICIP)</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>138</td><td>88</td><td>50</td><td>1</td><td>95</td><td>41</td></tr><tr><td>b91f54e1581fbbf60392364323d00a0cd43e493c</td><td>bp4d_spontanous</td><td>BP4D-Spontanous</td><td><a href="papers/b91f54e1581fbbf60392364323d00a0cd43e493c.html" target="_blank">A high-resolution spontaneous 3D dynamic facial expression database</a></td><td><span class="gray">[pdf]</a></td><td>2013 10th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)</td><td>edu</td><td>SUNY Binghamton</td><td>United States</td><td>42.08779975</td><td>-75.97066066</td><td>49%</td><td>154</td><td>76</td><td>78</td><td>7</td><td>80</td><td>75</td></tr><tr><td>8b56e33f33e582f3e473dba573a16b598ed9bcdc</td><td>fei</td><td>FEI</td><td><a href="papers/8b56e33f33e582f3e473dba573a16b598ed9bcdc.html" target="_blank">A new ranking method for principal components analysis and its application to face image analysis</a></td><td><span class="gray">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>169</td><td>93</td><td>76</td><td>6</td><td>69</td><td>102</td></tr><tr><td>2624d84503bc2f8e190e061c5480b6aa4d89277a</td><td>afew_va</td><td>AFEW-VA</td><td><a href="papers/2624d84503bc2f8e190e061c5480b6aa4d89277a.html" target="_blank">AFEW-VA database for valence and arousal estimation in-the-wild</a></td><td><a href="https://ibug.doc.ic.ac.uk/media/uploads/documents/afew-va.pdf" target="_blank">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>18</td><td>9</td><td>9</td><td>0</td><td>12</td><td>5</td></tr><tr><td>2ad0ee93d029e790ebb50574f403a09854b65b7e</td><td>yale_faces</td><td>YaleFaces</td><td><a href="papers/2ad0ee93d029e790ebb50574f403a09854b65b7e.html" target="_blank">Acquiring linear subspaces for face recognition under variable lighting</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>999</td><td>554</td><td>445</td><td>94</td><td>495</td><td>491</td></tr><tr><td>57fe081950f21ca03b5b375ae3e84b399c015861</td><td>cvc_01_barcelona</td><td>CVC-01</td><td><a href="papers/57fe081950f21ca03b5b375ae3e84b399c015861.html" target="_blank">Adaptive Image Sampling and Windows Classification for On-board Pedestrian Detection</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>51%</td><td>47</td><td>24</td><td>23</td><td>1</td><td>23</td><td>24</td></tr><tr><td>758d7e1be64cc668c59ef33ba8882c8597406e53</td><td>affectnet</td><td>AffectNet</td><td><a href="papers/758d7e1be64cc668c59ef33ba8882c8597406e53.html" target="_blank">AffectNet: A Database for Facial Expression, Valence, and Arousal Computing in the Wild</a></td><td><a href="https://arxiv.org/pdf/1708.03985.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>37</td><td>23</td><td>14</td><td>0</td><td>25</td><td>11</td></tr><tr><td>47aeb3b82f54b5ae8142b4bdda7b614433e69b9a</td><td>am_fed</td><td>AM-FED</td><td><a href="papers/47aeb3b82f54b5ae8142b4bdda7b614433e69b9a.html" target="_blank">Affectiva-MIT Facial Expression Dataset (AM-FED): Naturalistic and Spontaneous Facial Expressions Collected "In-the-Wild"</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition Workshops</td><td></td><td></td><td></td><td></td><td></td><td>46%</td><td>83</td><td>38</td><td>45</td><td>6</td><td>43</td><td>39</td></tr><tr><td>1be498d4bbc30c3bfd0029114c784bc2114d67c0</td><td>adience</td><td>Adience</td><td><a href="papers/1be498d4bbc30c3bfd0029114c784bc2114d67c0.html" target="_blank">Age and Gender Estimation of Unfiltered Faces</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Information Forensics and Security</td><td>edu</td><td>Open University of Israel</td><td>Israel</td><td>32.77824165</td><td>34.99565673</td><td>59%</td><td>179</td><td>105</td><td>74</td><td>6</td><td>98</td><td>80</td></tr><tr><td>d818568838433a6d6831adde49a58cef05e0c89f</td><td>agedb</td><td>AgeDB</td><td><a href="papers/d818568838433a6d6831adde49a58cef05e0c89f.html" target="_blank">AgeDB: The First Manually Collected, In-the-Wild Age Database</a></td><td><a href="http://eprints.mdx.ac.uk/22044/1/agedb_kotsia.pdf" target="_blank">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td>edu</td><td>Imperial College London</td><td>United Kingdom</td><td>51.49887085</td><td>-0.17560797</td><td>89%</td><td>18</td><td>16</td><td>2</td><td>0</td><td>14</td><td>3</td></tr><tr><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td><td>aflw</td><td>AFLW</td><td><a href="papers/a74251efa970b92925b89eeef50a5e37d9281ad0.html" target="_blank">Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</a></td><td><span class="gray">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td>edu</td><td>TU Graz</td><td>Austria</td><td>47.07071400</td><td>15.43950400</td><td>60%</td><td>318</td><td>192</td><td>126</td><td>34</td><td>211</td><td>107</td></tr><tr><td>2ce2560cf59db59ce313bbeb004e8ce55c5ce928</td><td>texas_3dfrd</td><td>Texas 3DFRD</td><td><a href="papers/2ce2560cf59db59ce313bbeb004e8ce55c5ce928.html" target="_blank">Anthropometric 3D Face Recognition</a></td><td><a href="http://live.ece.utexas.edu/publications/2010/sg_ijcv_june10.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>63%</td><td>91</td><td>57</td><td>34</td><td>5</td><td>60</td><td>31</td></tr><tr><td>633c851ebf625ad7abdda2324e9de093cf623141</td><td>appa_real</td><td>APPA-REAL</td><td><a href="papers/633c851ebf625ad7abdda2324e9de093cf623141.html" target="_blank">Apparent and Real Age Estimation in Still Images with Deep Residual Regressors on Appa-Real Database</a></td><td><span class="gray">[pdf]</a></td><td>2017 12th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2017)</td><td></td><td></td><td></td><td></td><td></td><td>70%</td><td>10</td><td>7</td><td>3</td><td>0</td><td>8</td><td>3</td></tr><tr><td>0df0d1adea39a5bef318b74faa37de7f3e00b452</td><td>mpii_gaze</td><td>MPIIGaze</td><td><a href="papers/0df0d1adea39a5bef318b74faa37de7f3e00b452.html" target="_blank">Appearance-based gaze estimation in the wild</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>72%</td><td>149</td><td>108</td><td>41</td><td>3</td><td>94</td><td>54</td></tr><tr><td>759a3b3821d9f0e08e0b0a62c8b693230afc3f8d</td><td>pubfig</td><td>PubFig</td><td><a href="papers/759a3b3821d9f0e08e0b0a62c8b693230afc3f8d.html" target="_blank">Attribute and simile classifiers for face verification</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE 12th International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>63%</td><td>914</td><td>574</td><td>340</td><td>48</td><td>586</td><td>316</td></tr><tr><td>faf40ce28857aedf183e193486f5b4b0a8c478a2</td><td>iit_dehli_ear</td><td>IIT Dehli Ear</td><td><a href="papers/faf40ce28857aedf183e193486f5b4b0a8c478a2.html" target="_blank">Automated Human Identification Using Ear Imaging</a></td><td><a href="https://pdfs.semanticscholar.org/faf4/0ce28857aedf183e193486f5b4b0a8c478a2.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>49%</td><td>80</td><td>39</td><td>41</td><td>6</td><td>35</td><td>44</td></tr><tr><td>2160788824c4c29ffe213b2cbeb3f52972d73f37</td><td>3d_rma</td><td>3D-RMA</td><td><a href="papers/2160788824c4c29ffe213b2cbeb3f52972d73f37.html" target="_blank">Automatic 3D face authentication</a></td><td><a href="http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.31.9190&rep=rep1&type=pdf" target="_blank">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>100</td><td>54</td><td>46</td><td>8</td><td>63</td><td>36</td></tr><tr><td>213a579af9e4f57f071b884aa872651372b661fd</td><td>bbc_pose</td><td>BBC Pose</td><td><a href="papers/213a579af9e4f57f071b884aa872651372b661fd.html" target="_blank">Automatic and Efficient Human Pose Estimation for Sign Language Videos</a></td><td><a href="http://tomas.pfister.fi/files/charles13ijcv.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>65%</td><td>26</td><td>17</td><td>9</td><td>1</td><td>16</td><td>11</td></tr><tr><td>fcc6fe6007c322641796cb8792718641856a22a7</td><td>miw</td><td>MIW</td><td><a href="papers/fcc6fe6007c322641796cb8792718641856a22a7.html" target="_blank">Automatic facial makeup detection with application in face recognition</a></td><td><span class="gray">[pdf]</a></td><td>2013 International Conference on Biometrics (ICB)</td><td>edu</td><td>West Virginia University</td><td>United States</td><td>39.65404635</td><td>-79.96475355</td><td>71%</td><td>49</td><td>35</td><td>14</td><td>1</td><td>19</td><td>29</td></tr><tr><td>fcc6fe6007c322641796cb8792718641856a22a7</td><td>youtube_makeup</td><td>YMU</td><td><a href="papers/fcc6fe6007c322641796cb8792718641856a22a7.html" target="_blank">Automatic facial makeup detection with application in face recognition</a></td><td><span class="gray">[pdf]</a></td><td>2013 International Conference on Biometrics (ICB)</td><td>edu</td><td>West Virginia University</td><td>United States</td><td>39.65404635</td><td>-79.96475355</td><td>71%</td><td>49</td><td>35</td><td>14</td><td>1</td><td>19</td><td>29</td></tr><tr><td>0a85bdff552615643dd74646ac881862a7c7072d</td><td>pipa</td><td>PIPA</td><td><a href="papers/0a85bdff552615643dd74646ac881862a7c7072d.html" target="_blank">Beyond frontal faces: Improving Person Recognition using multiple cues</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>company</td><td>Facebook</td><td>United States</td><td>37.39367170</td><td>-122.08072620</td><td>89%</td><td>54</td><td>48</td><td>5</td><td>1</td><td>41</td><td>12</td></tr><tr><td>2acf7e58f0a526b957be2099c10aab693f795973</td><td>bosphorus</td><td>The Bosphorus</td><td><a href="papers/2acf7e58f0a526b957be2099c10aab693f795973.html" target="_blank">Bosphorus Database for 3D Face Analysis</a></td><td><a href="https://pdfs.semanticscholar.org/4254/fbba3846008f50671edc9cf70b99d7304543.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>352</td><td>196</td><td>156</td><td>17</td><td>162</td><td>188</td></tr><tr><td>37d6f0eb074d207b53885bd2eb78ccc8a04be597</td><td>vmu</td><td>VMU</td><td><a href="papers/37d6f0eb074d207b53885bd2eb78ccc8a04be597.html" target="_blank">Can facial cosmetics affect the matching accuracy of face recognition systems?</a></td><td><span class="gray">[pdf]</a></td><td>2012 IEEE Fifth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td>edu</td><td>West Virginia University</td><td>United States</td><td>39.65404635</td><td>-79.96475355</td><td>62%</td><td>53</td><td>33</td><td>20</td><td>0</td><td>19</td><td>31</td></tr><tr><td>37d6f0eb074d207b53885bd2eb78ccc8a04be597</td><td>youtube_makeup</td><td>YMU</td><td><a href="papers/37d6f0eb074d207b53885bd2eb78ccc8a04be597.html" target="_blank">Can facial cosmetics affect the matching accuracy of face recognition systems?</a></td><td><span class="gray">[pdf]</a></td><td>2012 IEEE Fifth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td>edu</td><td>West Virginia University</td><td>United States</td><td>39.65404635</td><td>-79.96475355</td><td>62%</td><td>53</td><td>33</td><td>20</td><td>0</td><td>19</td><td>31</td></tr><tr><td>8d5998cd984e7cce307da7d46f155f9db99c6590</td><td>chalearn</td><td>ChaLearn</td><td><a href="papers/8d5998cd984e7cce307da7d46f155f9db99c6590.html" target="_blank">ChaLearn looking at people: A review of events and resources</a></td><td><span class="gray">[pdf]</a></td><td>2017 International Joint Conference on Neural Networks (IJCNN)</td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>13</td><td>7</td><td>6</td><td>1</td><td>8</td><td>4</td></tr><tr><td>2bf8541199728262f78d4dced6fb91479b39b738</td><td>clothing_co_parsing</td><td>CCP</td><td><a href="papers/2bf8541199728262f78d4dced6fb91479b39b738.html" target="_blank">Clothing Co-parsing by Joint Image Segmentation and Labeling</a></td><td><span class="gray">[pdf]</a></td><td>2014 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>72%</td><td>60</td><td>43</td><td>17</td><td>0</td><td>34</td><td>28</td></tr><tr><td>22ad2c8c0f4d6aa4328b38d894b814ec22579761</td><td>gallagher</td><td>Gallagher</td><td><a href="papers/22ad2c8c0f4d6aa4328b38d894b814ec22579761.html" target="_blank">Clothing cosegmentation for recognizing people</a></td><td><span class="gray">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>Carnegie Mellon University</td><td>United States</td><td>40.44416190</td><td>-79.94272826</td><td>63%</td><td>178</td><td>113</td><td>65</td><td>7</td><td>100</td><td>86</td></tr><tr><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td><td>leeds_sports_pose</td><td>Leeds Sports Pose</td><td><a href="papers/4b1d23d17476fcf78f4cbadf69fb130b1aa627c0.html" target="_blank">Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</a></td><td><a href="http://www.bmva.org/bmvc/2010/conference/paper12/abstract12.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>285</td><td>182</td><td>103</td><td>13</td><td>197</td><td>93</td></tr><tr><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td><td>stickmen_buffy</td><td>Buffy Stickmen</td><td><a href="papers/4b1d23d17476fcf78f4cbadf69fb130b1aa627c0.html" target="_blank">Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</a></td><td><a href="http://www.bmva.org/bmvc/2010/conference/paper12/abstract12.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>285</td><td>182</td><td>103</td><td>13</td><td>197</td><td>93</td></tr><tr><td>45c31cde87258414f33412b3b12fc5bec7cb3ba9</td><td>jaffe</td><td>JAFFE</td><td><a href="papers/45c31cde87258414f33412b3b12fc5bec7cb3ba9.html" target="_blank">Coding Facial Expressions with Gabor Wavelets</a></td><td><a href="http://physics.lbl.gov/patrecog/images/Facerecog_gabor.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>899</td><td>508</td><td>391</td><td>51</td><td>431</td><td>451</td></tr><tr><td>b1f4423c227fa37b9680787be38857069247a307</td><td>afew_va</td><td>AFEW-VA</td><td><a href="papers/b1f4423c227fa37b9680787be38857069247a307.html" target="_blank">Collecting Large, Richly Annotated Facial-Expression Databases from Movies</a></td><td><span class="gray">[pdf]</a></td><td>IEEE MultiMedia</td><td>edu</td><td>Australian National University</td><td>Australia</td><td>-35.27769990</td><td>149.11852700</td><td>63%</td><td>181</td><td>114</td><td>67</td><td>8</td><td>87</td><td>97</td></tr><tr><td>7f4040b482d16354d5938c1d1b926b544652bf5b</td><td>nova_emotions</td><td>Novaemötions Dataset</td><td><a href="papers/7f4040b482d16354d5938c1d1b926b544652bf5b.html" target="_blank">Competitive affective gaming: winning with a smile</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Universidade NOVA de Lisboa, Caparica, Portugal</td><td>Portugal</td><td>38.66096400</td><td>-9.20581300</td><td>78%</td><td>9</td><td>7</td><td>2</td><td>0</td><td>5</td><td>4</td></tr><tr><td>079a0a3bf5200994e1f972b1b9197bf2f90e87d4</td><td>mit_cbcl</td><td>MIT CBCL</td><td><a href="papers/079a0a3bf5200994e1f972b1b9197bf2f90e87d4.html" target="_blank">Component-Based Face Recognition with 3D Morphable Models</a></td><td><span class="gray">[pdf]</a></td><td>2004 Conference on Computer Vision and Pattern Recognition Workshop</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>23fc83c8cfff14a16df7ca497661264fc54ed746</td><td>cohn_kanade</td><td>CK</td><td><a href="papers/23fc83c8cfff14a16df7ca497661264fc54ed746.html" target="_blank">Comprehensive Database for Facial Expression Analysis</a></td><td><a href="https://pdfs.semanticscholar.org/23fc/83c8cfff14a16df7ca497661264fc54ed746.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>999</td><td>551</td><td>448</td><td>70</td><td>540</td><td>439</td></tr><tr><td>09d78009687bec46e70efcf39d4612822e61cb8c</td><td>raid</td><td>RAiD</td><td><a href="papers/09d78009687bec46e70efcf39d4612822e61cb8c.html" target="_blank">Consistent Re-identification in a Camera Network</a></td><td><a href="https://pdfs.semanticscholar.org/c27f/099e6e7e3f7f9979cbe9e0a5175fc5848ea0.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>71%</td><td>49</td><td>35</td><td>14</td><td>3</td><td>34</td><td>13</td></tr><tr><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td><td>casablanca</td><td>Casablanca</td><td><a href="papers/0ceda9dae8b9f322df65ca2ef02caca9758aec6f.html" target="_blank">Context-Aware CNNs for Person Head Detection</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>33</td><td>21</td><td>12</td><td>1</td><td>23</td><td>11</td></tr><tr><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td><td>hollywood_headset</td><td>HollywoodHeads</td><td><a href="papers/0ceda9dae8b9f322df65ca2ef02caca9758aec6f.html" target="_blank">Context-Aware CNNs for Person Head Detection</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>33</td><td>21</td><td>12</td><td>1</td><td>23</td><td>11</td></tr><tr><td>c06b13d0ec3f5c43e2782cd22542588e233733c3</td><td>nova_emotions</td><td>Novaemötions Dataset</td><td><a href="papers/c06b13d0ec3f5c43e2782cd22542588e233733c3.html" target="_blank">Crowdsourcing facial expressions for affective-interaction</a></td><td><span class="gray">[pdf]</a></td><td>Computer Vision and Image Understanding</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>1</td><td>1</td><td>0</td><td>0</td><td>1</td><td>0</td></tr><tr><td>8355d095d3534ef511a9af68a3b2893339e3f96b</td><td>imdb_wiki</td><td>IMDB</td><td><a href="papers/8355d095d3534ef511a9af68a3b2893339e3f96b.html" target="_blank">DEX: Deep EXpectation of Apparent Age from a Single Image</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision Workshop (ICCVW)</td><td></td><td></td><td></td><td></td><td></td><td>65%</td><td>122</td><td>79</td><td>43</td><td>6</td><td>75</td><td>48</td></tr><tr><td>5a5f0287484f0d480fed1ce585dbf729586f0edc</td><td>disfa</td><td>DISFA</td><td><a href="papers/5a5f0287484f0d480fed1ce585dbf729586f0edc.html" target="_blank">DISFA: A Spontaneous Facial Action Intensity Database</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Affective Computing</td><td>edu</td><td>University of Denver</td><td>United States</td><td>39.67665410</td><td>-104.96220300</td><td>52%</td><td>184</td><td>95</td><td>89</td><td>19</td><td>96</td><td>89</td></tr><tr><td>10195a163ab6348eef37213a46f60a3d87f289c5</td><td>imdb_wiki</td><td>IMDB</td><td><a href="papers/10195a163ab6348eef37213a46f60a3d87f289c5.html" target="_blank">Deep Expectation of Real and Apparent Age from a Single Image Without Facial Landmarks</a></td><td><a href="http://www.vision.ee.ethz.ch/en/publications/papers/articles/eth_biwi_01299.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td>edu</td><td>ETH Zurich</td><td>Switzerland</td><td>47.37631300</td><td>8.54766990</td><td>60%</td><td>145</td><td>87</td><td>58</td><td>10</td><td>93</td><td>51</td></tr><tr><td>162ea969d1929ed180cc6de9f0bf116993ff6e06</td><td>vgg_faces</td><td>VGG Face</td><td><a href="papers/162ea969d1929ed180cc6de9f0bf116993ff6e06.html" target="_blank">Deep Face Recognition</a></td><td><a href="https://pdfs.semanticscholar.org/f372/ab9b3270d4e4f6a0258c83c2736c3a5c0454.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>63%</td><td>999</td><td>629</td><td>370</td><td>51</td><td>558</td><td>429</td></tr><tr><td>6424b69f3ff4d35249c0bb7ef912fbc2c86f4ff4</td><td>celeba</td><td>CelebA</td><td><a href="papers/6424b69f3ff4d35249c0bb7ef912fbc2c86f4ff4.html" target="_blank">Deep Learning Face Attributes in the Wild</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td>edu</td><td>Chinese University of Hong Kong</td><td>China</td><td>22.41626320</td><td>114.21093180</td><td>56%</td><td>919</td><td>514</td><td>404</td><td>62</td><td>694</td><td>201</td></tr><tr><td>18010284894ed0edcca74e5bf768ee2e15ef7841</td><td>deep_fashion</td><td>DeepFashion</td><td><a href="papers/18010284894ed0edcca74e5bf768ee2e15ef7841.html" target="_blank">DeepFashion: Powering Robust Clothes Recognition and Retrieval with Rich Annotations</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>176</td><td>113</td><td>63</td><td>2</td><td>113</td><td>62</td></tr><tr><td>6bd36e9fd0ef20a3074e1430a6cc601e6d407fc3</td><td>cuhk_campus_03</td><td>CUHK03 Campus</td><td><a href="papers/6bd36e9fd0ef20a3074e1430a6cc601e6d407fc3.html" target="_blank">DeepReID: Deep Filter Pairing Neural Network for Person Re-identification</a></td><td><span class="gray">[pdf]</a></td><td>2014 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>72%</td><td>568</td><td>411</td><td>157</td><td>19</td><td>320</td><td>235</td></tr><tr><td>13f06b08f371ba8b5d31c3e288b4deb61335b462</td><td>eth_andreas_ess</td><td>ETHZ Pedestrian</td><td><a href="papers/13f06b08f371ba8b5d31c3e288b4deb61335b462.html" target="_blank">Depth and Appearance for Mobile Scene Analysis</a></td><td><span class="gray">[pdf]</a></td><td>2007 IEEE 11th International Conference on Computer Vision</td><td>edu</td><td>ETH Zurich</td><td>Switzerland</td><td>47.37631300</td><td>8.54766990</td><td>62%</td><td>324</td><td>202</td><td>122</td><td>26</td><td>193</td><td>127</td></tr><tr><td>4946ba10a4d5a7d0a38372f23e6622bd347ae273</td><td>coco_action</td><td>COCO-a</td><td><a href="papers/4946ba10a4d5a7d0a38372f23e6622bd347ae273.html" target="_blank">Describing Common Human Visual Actions in Images</a></td><td><a href="https://arxiv.org/pdf/1506.02203.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>72%</td><td>25</td><td>18</td><td>7</td><td>0</td><td>23</td><td>2</td></tr><tr><td>7808937b46acad36e43c30ae4e9f3fd57462853d</td><td>bpad</td><td>BPAD</td><td><a href="papers/7808937b46acad36e43c30ae4e9f3fd57462853d.html" target="_blank">Describing people: A poselet-based approach to attribute classification</a></td><td><span class="gray">[pdf]</a></td><td>2011 International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>230</td><td>138</td><td>92</td><td>14</td><td>163</td><td>66</td></tr><tr><td>d3200d49a19a4a4e4e9745ee39649b65d80c834b</td><td>scut_head</td><td>SCUT HEAD</td><td><a href="papers/d3200d49a19a4a4e4e9745ee39649b65d80c834b.html" target="_blank">Detecting Heads using Feature Refine Net and Cascaded Multi-scale Architecture</a></td><td><a href="https://arxiv.org/pdf/1803.09256.pdf" target="_blank">[pdf]</a></td><td>2018 24th International Conference on Pattern Recognition (ICPR)</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>9cc8cf0c7d7fa7607659921b6ff657e17e135ecc</td><td>mafa</td><td>MAsked FAces</td><td><a href="papers/9cc8cf0c7d7fa7607659921b6ff657e17e135ecc.html" target="_blank">Detecting Masked Faces in the Wild with LLE-CNNs</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>5</td><td>3</td><td>2</td><td>1</td><td>4</td><td>1</td></tr><tr><td>56ae6d94fc6097ec4ca861f0daa87941d1c10b70</td><td>cmdp</td><td>CMDP</td><td><a href="papers/56ae6d94fc6097ec4ca861f0daa87941d1c10b70.html" target="_blank">Distance Estimation of an Unknown Person from a Portrait</a></td><td><a href="http://authors.library.caltech.edu/49084/13/FaceDistanceEstimation_RONCHI.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>44%</td><td>9</td><td>4</td><td>5</td><td>0</td><td>6</td><td>3</td></tr><tr><td>84fe5b4ac805af63206012d29523a1e033bc827e</td><td>awe_ears</td><td>AWE Ears</td><td><a href="papers/84fe5b4ac805af63206012d29523a1e033bc827e.html" target="_blank">Ear Recognition: More Than a Survey</a></td><td><a href="https://arxiv.org/pdf/1611.06203.pdf" target="_blank">[pdf]</a></td><td>Neurocomputing</td><td></td><td></td><td></td><td></td><td></td><td>73%</td><td>26</td><td>19</td><td>7</td><td>0</td><td>10</td><td>16</td></tr><tr><td>133f01aec1534604d184d56de866a4bd531dac87</td><td>lfw</td><td>LFW</td><td><a href="papers/133f01aec1534604d184d56de866a4bd531dac87.html" target="_blank">Effective Unconstrained Face Recognition by Combining Multiple Descriptors and Learned Background Statistics</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>183</td><td>107</td><td>76</td><td>14</td><td>103</td><td>77</td></tr><tr><td>c900e0ad4c95948baaf0acd8449fde26f9b4952a</td><td>emotio_net</td><td>EmotioNet Database</td><td><a href="papers/c900e0ad4c95948baaf0acd8449fde26f9b4952a.html" target="_blank">EmotioNet: An Accurate, Real-Time Algorithm for the Automatic Annotation of a Million Facial Expressions in the Wild</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>86</td><td>45</td><td>41</td><td>7</td><td>54</td><td>29</td></tr><tr><td>2161f6b7ee3c0acc81603b01dc0df689683577b9</td><td>large_scale_person_search</td><td>Large Scale Person Search</td><td><a href="papers/2161f6b7ee3c0acc81603b01dc0df689683577b9.html" target="_blank">End-to-End Deep Learning for Person Search</a></td><td><a href="https://pdfs.semanticscholar.org/2161/f6b7ee3c0acc81603b01dc0df689683577b9.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>70%</td><td>46</td><td>32</td><td>14</td><td>0</td><td>27</td><td>16</td></tr><tr><td>1bd1645a629f1b612960ab9bba276afd4cf7c666</td><td>brainwash</td><td>Brainwash</td><td><a href="papers/1bd1645a629f1b612960ab9bba276afd4cf7c666.html" target="_blank">End-to-End People Detection in Crowded Scenes</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>Stanford University</td><td>United States</td><td>37.43131385</td><td>-122.16936535</td><td>55%</td><td>42</td><td>23</td><td>19</td><td>1</td><td>19</td><td>19</td></tr><tr><td>6273b3491e94ea4dd1ce42b791d77bdc96ee73a8</td><td>viper</td><td>VIPeR</td><td><a href="papers/6273b3491e94ea4dd1ce42b791d77bdc96ee73a8.html" target="_blank">Evaluating Appearance Models for Recognition, Reacquisition, and Tracking</a></td><td><a href="https://pdfs.semanticscholar.org/7847/b1fbccadb780b655e72c66d3f9e93ddb880c.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>edu</td><td>University of California, Santa Cruz</td><td>United States</td><td>36.99158470</td><td>-122.05827710</td><td>66%</td><td>624</td><td>414</td><td>210</td><td>33</td><td>342</td><td>276</td></tr><tr><td>2258e01865367018ed6f4262c880df85b94959f8</td><td>mot</td><td>MOT</td><td><a href="papers/2258e01865367018ed6f4262c880df85b94959f8.html" target="_blank">Evaluating Multiple Object Tracking Performance: The CLEAR MOT Metrics</a></td><td><a href="https://cvhci.anthropomatik.kit.edu/images/stories/msmmi/papers/eurasip2008.pdf" target="_blank">[pdf]</a></td><td>EURASIP J. Image and Video Processing</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>632</td><td>366</td><td>264</td><td>44</td><td>358</td><td>264</td></tr><tr><td>9e5378e7b336c89735d3bb15cf67eff96f86d39a</td><td>precarious</td><td>Precarious</td><td><a href="papers/9e5378e7b336c89735d3bb15cf67eff96f86d39a.html" target="_blank">Expecting the Unexpected: Training Detectors for Unusual Pedestrians with Adversarial Imposters</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>36%</td><td>14</td><td>5</td><td>9</td><td>0</td><td>12</td><td>1</td></tr><tr><td>35b0331dfcd2897abd5749b49ff5e2b8ba0f7a62</td><td>coco_qa</td><td>COCO QA</td><td><a href="papers/35b0331dfcd2897abd5749b49ff5e2b8ba0f7a62.html" target="_blank">Exploring Models and Data for Image Question Answering</a></td><td><a href="https://arxiv.org/pdf/1505.02074.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>206</td><td>126</td><td>80</td><td>11</td><td>162</td><td>39</td></tr><tr><td>2cd7821fcf5fae53a185624f7eeda007434ae037</td><td>geofaces</td><td>GeoFaces</td><td><a href="papers/2cd7821fcf5fae53a185624f7eeda007434ae037.html" target="_blank">Exploring the geo-dependence of human face appearance</a></td><td><a href="http://cs.uky.edu/~jacobs/papers/islam2014faces.pdf" target="_blank">[pdf]</a></td><td>IEEE Winter Conference on Applications of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>88%</td><td>8</td><td>7</td><td>1</td><td>0</td><td>5</td><td>3</td></tr><tr><td>2cd7821fcf5fae53a185624f7eeda007434ae037</td><td>geofaces</td><td>GeoFaces</td><td><a href="papers/2cd7821fcf5fae53a185624f7eeda007434ae037.html" target="_blank">Exploring the geo-dependence of human face appearance</a></td><td><a href="http://cs.uky.edu/~jacobs/papers/islam2014faces.pdf" target="_blank">[pdf]</a></td><td>IEEE Winter Conference on Applications of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>88%</td><td>8</td><td>7</td><td>1</td><td>0</td><td>5</td><td>3</td></tr><tr><td>75da1df4ed319926c544eefe17ec8d720feef8c0</td><td>fddb</td><td>FDDB</td><td><a href="papers/75da1df4ed319926c544eefe17ec8d720feef8c0.html" target="_blank">FDDB: A benchmark for face detection in unconstrained settings</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>380</td><td>237</td><td>143</td><td>20</td><td>202</td><td>164</td></tr><tr><td>31de9b3dd6106ce6eec9a35991b2b9083395fd0b</td><td>feret</td><td>FERET</td><td><a href="papers/31de9b3dd6106ce6eec9a35991b2b9083395fd0b.html" target="_blank">FERET ( Face Recognition Technology ) Recognition Algorithm Development and Test Results</a></td><td><a href="https://pdfs.semanticscholar.org/31de/9b3dd6106ce6eec9a35991b2b9083395fd0b.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>75</td><td>39</td><td>36</td><td>5</td><td>54</td><td>20</td></tr><tr><td>0e986f51fe45b00633de9fd0c94d082d2be51406</td><td>afw</td><td>AFW</td><td><a href="papers/0e986f51fe45b00633de9fd0c94d082d2be51406.html" target="_blank">Face detection, pose estimation, and landmark localization in the wild</a></td><td><span class="gray">[pdf]</a></td><td>2012 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>65%</td><td>999</td><td>649</td><td>350</td><td>44</td><td>576</td><td>422</td></tr><tr><td>560e0e58d0059259ddf86fcec1fa7975dee6a868</td><td>youtube_faces</td><td>YouTubeFaces</td><td><a href="papers/560e0e58d0059259ddf86fcec1fa7975dee6a868.html" target="_blank">Face recognition in unconstrained videos with matched background similarity</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011</td><td>edu</td><td>Tel Aviv University</td><td>Israel</td><td>32.11198890</td><td>34.80459702</td><td>66%</td><td>509</td><td>338</td><td>170</td><td>24</td><td>294</td><td>216</td></tr><tr><td>670637d0303a863c1548d5b19f705860a23e285c</td><td>face_tracer</td><td>FaceTracer</td><td><a href="papers/670637d0303a863c1548d5b19f705860a23e285c.html" target="_blank">Face swapping: automatically replacing faces in photographs</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>6204776d31359d129a582057c2d788a14f8aadeb</td><td>youtube_celebrities</td><td>YouTube Celebrities</td><td><a href="papers/6204776d31359d129a582057c2d788a14f8aadeb.html" target="_blank">Face tracking and recognition with visual constraints in real-world videos</a></td><td><span class="gray">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>Rutgers University</td><td>United States</td><td>40.47913175</td><td>-74.43168868</td><td>56%</td><td>267</td><td>149</td><td>117</td><td>13</td><td>125</td><td>121</td></tr><tr><td>4c170a0dcc8de75587dae21ca508dab2f9343974</td><td>face_tracer</td><td>FaceTracer</td><td><a href="papers/4c170a0dcc8de75587dae21ca508dab2f9343974.html" target="_blank">FaceTracer: A Search Engine for Large Collections of Images with Faces</a></td><td><a href="https://pdfs.semanticscholar.org/73a8/1d311eedac8dea3ca24dc15b6990fa4a725e.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>225</td><td>139</td><td>86</td><td>17</td><td>146</td><td>77</td></tr><tr><td>7ebb153704706e457ab57b432793d2b6e5d12592</td><td>vgg_celebs_in_places</td><td>CIP</td><td><a href="papers/7ebb153704706e457ab57b432793d2b6e5d12592.html" target="_blank">Faces in Places: compound query retrieval</a></td><td><a href="https://pdfs.semanticscholar.org/7ebb/153704706e457ab57b432793d2b6e5d12592.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>80%</td><td>5</td><td>4</td><td>1</td><td>0</td><td>3</td><td>2</td></tr><tr><td>0ab7cff2ccda7269b73ff6efd9d37e1318f7db25</td><td>ibm_dif</td><td>IBM Diversity in Faces</td><td><a href="papers/0ab7cff2ccda7269b73ff6efd9d37e1318f7db25.html" target="_blank">Facial Coding Scheme Reference 1 Craniofacial Distances</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>8a3c5507237957d013a0fe0f082cab7f757af6ee</td><td>mafl</td><td>MAFL</td><td><a href="papers/8a3c5507237957d013a0fe0f082cab7f757af6ee.html" target="_blank">Facial Landmark Detection by Deep Multi-task Learning</a></td><td><a href="https://pdfs.semanticscholar.org/fcd7/1c18192928a2e0b264edd4d919ab2f8f652a.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>407</td><td>259</td><td>148</td><td>18</td><td>252</td><td>153</td></tr><tr><td>8a3c5507237957d013a0fe0f082cab7f757af6ee</td><td>mtfl</td><td>MTFL</td><td><a href="papers/8a3c5507237957d013a0fe0f082cab7f757af6ee.html" target="_blank">Facial Landmark Detection by Deep Multi-task Learning</a></td><td><a href="https://pdfs.semanticscholar.org/fcd7/1c18192928a2e0b264edd4d919ab2f8f652a.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>407</td><td>259</td><td>148</td><td>18</td><td>252</td><td>153</td></tr><tr><td>4fefd1bc8dc4e0ab37ee3324ddfa43ad9d6a04a7</td><td>deep_fashion</td><td>DeepFashion</td><td><a href="papers/4fefd1bc8dc4e0ab37ee3324ddfa43ad9d6a04a7.html" target="_blank">Fashion Landmark Detection in the Wild</a></td><td><a href="https://arxiv.org/pdf/1608.03049.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>73%</td><td>26</td><td>19</td><td>7</td><td>1</td><td>16</td><td>10</td></tr><tr><td>060820f110a72cbf02c14a6d1085bd6e1d994f6a</td><td>caltech_crp</td><td>Caltech CRP</td><td><a href="papers/060820f110a72cbf02c14a6d1085bd6e1d994f6a.html" target="_blank">Fine-grained classification of pedestrians in video: Benchmark and state of the art</a></td><td><a href="https://arxiv.org/pdf/1605.06177.pdf" target="_blank">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>17</td><td>8</td><td>9</td><td>0</td><td>9</td><td>8</td></tr><tr><td>45e616093a92e5f1e61a7c6037d5f637aa8964af</td><td>malf</td><td>MALF</td><td><a href="papers/45e616093a92e5f1e61a7c6037d5f637aa8964af.html" target="_blank">Fine-grained evaluation on face detection in the wild</a></td><td><span class="gray">[pdf]</a></td><td>2015 11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)</td><td></td><td></td><td></td><td></td><td></td><td>71%</td><td>17</td><td>12</td><td>5</td><td>0</td><td>12</td><td>5</td></tr><tr><td>1aad2da473888cb7ebc1bfaa15bfa0f1502ce005</td><td>jpl_pose</td><td>JPL-Interaction dataset</td><td><a href="papers/1aad2da473888cb7ebc1bfaa15bfa0f1502ce005.html" target="_blank">First-Person Activity Recognition: What Are They Doing to Me?</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>67%</td><td>148</td><td>99</td><td>49</td><td>7</td><td>105</td><td>43</td></tr><tr><td>7b92d1e53cc87f7a4256695de590098a2f30261e</td><td>appa_real</td><td>APPA-REAL</td><td><a href="papers/7b92d1e53cc87f7a4256695de590098a2f30261e.html" target="_blank">From Apparent to Real Age: Gender, Age, Ethnic, Makeup, and Expression Bias Analysis in Real Age Estimation</a></td><td><span class="gray">[pdf]</a></td><td>2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>774cbb45968607a027ae4729077734db000a1ec5</td><td>urban_tribes</td><td>Urban Tribes</td><td><a href="papers/774cbb45968607a027ae4729077734db000a1ec5.html" target="_blank">From Bikers to Surfers: Visual Recognition of Urban Tribes</a></td><td><a href="http://vision.cornell.edu/se3/wp-content/uploads/2014/09/utribes_bmvc13_final.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>67%</td><td>18</td><td>12</td><td>6</td><td>1</td><td>12</td><td>6</td></tr><tr><td>22f656d0f8426c84a33a267977f511f127bfd7f3</td><td>expw</td><td>ExpW</td><td><a href="papers/22f656d0f8426c84a33a267977f511f127bfd7f3.html" target="_blank">From Facial Expression Recognition to Interpersonal Relation Prediction</a></td><td><a href="https://arxiv.org/pdf/1609.06426.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>11</td><td>6</td><td>5</td><td>0</td><td>5</td><td>4</td></tr><tr><td>18c72175ddbb7d5956d180b65a96005c100f6014</td><td>yale_faces</td><td>YaleFaces</td><td><a href="papers/18c72175ddbb7d5956d180b65a96005c100f6014.html" target="_blank">From Few to Many: Illumination Cone Models for Face Recognition under Variable Lighting and Pose</a></td><td><a href="http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.147.1487&rep=rep1&type=pdf" target="_blank">[pdf]</a></td><td>IEEE Trans. Pattern Anal. Mach. Intell.</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>999</td><td>560</td><td>439</td><td>67</td><td>498</td><td>462</td></tr><tr><td>06f02199690961ba52997cde1527e714d2b3bf8f</td><td>columbia_gaze</td><td>Columbia Gaze</td><td><a href="papers/06f02199690961ba52997cde1527e714d2b3bf8f.html" target="_blank">Gaze locking: passive eye contact detection for human-object interaction</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Columbia University</td><td>United States</td><td>40.84198360</td><td>-73.94368971</td><td>76%</td><td>79</td><td>60</td><td>19</td><td>0</td><td>49</td><td>34</td></tr><tr><td>18858cc936947fc96b5c06bbe3c6c2faa5614540</td><td>pilot_parliament</td><td>PPB</td><td><a href="papers/18858cc936947fc96b5c06bbe3c6c2faa5614540.html" target="_blank">Gender Shades: Intersectional Accuracy Disparities in Commercial Gender Classification</a></td><td><a href="http://proceedings.mlr.press/v81/buolamwini18a/buolamwini18a-supp.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>51%</td><td>59</td><td>30</td><td>29</td><td>0</td><td>47</td><td>10</td></tr><tr><td>2eb84aaba316b095d4bb51da1a3e4365bbf9ab1d</td><td>kin_face</td><td>UB KinFace</td><td><a href="papers/2eb84aaba316b095d4bb51da1a3e4365bbf9ab1d.html" target="_blank">Genealogical face recognition based on UB KinFace database</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td>edu</td><td>SUNY Buffalo</td><td>United States</td><td>42.93362780</td><td>-78.88394479</td><td>55%</td><td>31</td><td>17</td><td>14</td><td>0</td><td>11</td><td>21</td></tr><tr><td>2eed184680edcdec8a3b605ad1a3ba8e8f7cc2e9</td><td>graz</td><td>Graz Pedestrian</td><td><a href="papers/2eed184680edcdec8a3b605ad1a3ba8e8f7cc2e9.html" target="_blank">Generic object recognition with boosting</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td>edu</td><td>TU Graz</td><td>Austria</td><td>47.07071400</td><td>15.43950400</td><td>53%</td><td>293</td><td>155</td><td>138</td><td>16</td><td>195</td><td>97</td></tr><tr><td>17b46e2dad927836c689d6787ddb3387c6159ece</td><td>geofaces</td><td>GeoFaces</td><td><a href="papers/17b46e2dad927836c689d6787ddb3387c6159ece.html" target="_blank">GeoFaceExplorer: exploring the geo-dependence of facial attributes</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>2</td><td>2</td><td>0</td><td>0</td><td>1</td><td>1</td></tr><tr><td>bd88bb2e4f351352d88ee7375af834360e223498</td><td>hda_plus</td><td>HDA+</td><td><a href="papers/bd88bb2e4f351352d88ee7375af834360e223498.html" target="_blank">HDA dataset-DRAFT 1 A Multi-camera video data set for research on High-Definition surveillance</a></td><td><a href="https://pdfs.semanticscholar.org/bd88/bb2e4f351352d88ee7375af834360e223498.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>2</td><td>0</td><td>2</td><td>0</td><td>1</td><td>2</td></tr><tr><td>a8d0b149c2eadaa02204d3e4356fbc8eccf3b315</td><td>hi4d_adsip</td><td>Hi4D-ADSIP</td><td><a href="papers/a8d0b149c2eadaa02204d3e4356fbc8eccf3b315.html" target="_blank">Hi4D-ADSIP 3-D dynamic facial articulation database</a></td><td><span class="gray">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>15</td><td>9</td><td>6</td><td>1</td><td>4</td><td>11</td></tr><tr><td>2d45cfd838016a6e39f6b766ffe85acd649440c7</td><td>mcgill</td><td>McGill Real World</td><td><a href="papers/2d45cfd838016a6e39f6b766ffe85acd649440c7.html" target="_blank">Hierarchical temporal graphical model for head pose estimation and subsequent attribute classification in real-world videos</a></td><td><span class="gray">[pdf]</a></td><td>Computer Vision and Image Understanding</td><td></td><td></td><td></td><td></td><td></td><td>75%</td><td>8</td><td>6</td><td>2</td><td>1</td><td>5</td><td>3</td></tr><tr><td>3cd40bfa1ff193a96bde0207e5140a399476466c</td><td>tvhi</td><td>TVHI</td><td><a href="papers/3cd40bfa1ff193a96bde0207e5140a399476466c.html" target="_blank">High Five: Recognising human interactions in TV shows</a></td><td><a href="https://pdfs.semanticscholar.org/3cd4/0bfa1ff193a96bde0207e5140a399476466c.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>98</td><td>56</td><td>42</td><td>10</td><td>66</td><td>28</td></tr><tr><td>04c2cda00e5536f4b1508cbd80041e9552880e67</td><td>hipsterwars</td><td>Hipsterwars</td><td><a href="papers/04c2cda00e5536f4b1508cbd80041e9552880e67.html" target="_blank">Hipster Wars: Discovering Elements of Fashion Styles</a></td><td><a href="https://pdfs.semanticscholar.org/04c2/cda00e5536f4b1508cbd80041e9552880e67.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>95</td><td>61</td><td>34</td><td>4</td><td>59</td><td>35</td></tr><tr><td>10d6b12fa07c7c8d6c8c3f42c7f1c061c131d4c5</td><td>inria_person</td><td>INRIA Pedestrian</td><td><a href="papers/10d6b12fa07c7c8d6c8c3f42c7f1c061c131d4c5.html" target="_blank">Histograms of oriented gradients for human detection</a></td><td><span class="gray">[pdf]</a></td><td>2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)</td><td>edu</td><td>INRIA Rhone-Alps, Montbonnot, France</td><td>France</td><td>45.21788600</td><td>5.80736900</td><td>57%</td><td>999</td><td>570</td><td>429</td><td>43</td><td>419</td><td>509</td></tr><tr><td>041d3eedf5e45ce5c5229f0181c5c576ed1fafd6</td><td>ucf_selfie</td><td>UCF Selfie</td><td><a href="papers/041d3eedf5e45ce5c5229f0181c5c576ed1fafd6.html" target="_blank">How to Take a Good Selfie?</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>73%</td><td>11</td><td>8</td><td>3</td><td>0</td><td>7</td><td>5</td></tr><tr><td>44d23df380af207f5ac5b41459c722c87283e1eb</td><td>wider_attribute</td><td>WIDER Attribute</td><td><a href="papers/44d23df380af207f5ac5b41459c722c87283e1eb.html" target="_blank">Human Attribute Recognition by Deep Hierarchical Contexts</a></td><td><a href="https://pdfs.semanticscholar.org/8e28/07f2dd53b03a759e372e07f7191cae65c9fd.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>72%</td><td>18</td><td>13</td><td>5</td><td>0</td><td>14</td><td>4</td></tr><tr><td>44484d2866f222bbb9b6b0870890f9eea1ffb2d0</td><td>cuhk_campus_03</td><td>CUHK03 Campus</td><td><a href="papers/44484d2866f222bbb9b6b0870890f9eea1ffb2d0.html" target="_blank">Human Reidentification with Transferred Metric Learning</a></td><td><a href="http://www.ee.cuhk.edu.hk/~xgwang/papers/liZWaccv12.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>69%</td><td>280</td><td>193</td><td>87</td><td>9</td><td>139</td><td>137</td></tr><tr><td>f41c7bb02fc97d5fb9cadd7a49c3e558a1c58a44</td><td>pa_100k</td><td>PA-100K</td><td><a href="papers/f41c7bb02fc97d5fb9cadd7a49c3e558a1c58a44.html" target="_blank">HydraPlus-Net: Attentive Deep Features for Pedestrian Analysis</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>75%</td><td>55</td><td>41</td><td>14</td><td>0</td><td>36</td><td>17</td></tr><tr><td>57178b36c21fd7f4529ac6748614bb3374714e91</td><td>ijb_c</td><td>IJB-C</td><td><a href="papers/57178b36c21fd7f4529ac6748614bb3374714e91.html" target="_blank">IARPA Janus Benchmark - C: Face Dataset and Protocol</a></td><td><span class="gray">[pdf]</a></td><td>2018 International Conference on Biometrics (ICB)</td><td></td><td></td><td></td><td></td><td></td><td>71%</td><td>14</td><td>10</td><td>4</td><td>0</td><td>12</td><td>1</td></tr><tr><td>0cb2dd5f178e3a297a0c33068961018659d0f443</td><td>ijb_b</td><td>IJB-B</td><td><a href="papers/0cb2dd5f178e3a297a0c33068961018659d0f443.html" target="_blank">IARPA Janus Benchmark-B Face Dataset</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td>edu</td><td>Michigan State University</td><td>United States</td><td>42.71856800</td><td>-84.47791571</td><td>60%</td><td>35</td><td>21</td><td>14</td><td>3</td><td>25</td><td>8</td></tr><tr><td>0297448f3ed948e136bb06ceff10eccb34e5bb77</td><td>ilids_mcts</td><td>i-LIDS Multiple-Camera</td><td><a href="papers/0297448f3ed948e136bb06ceff10eccb34e5bb77.html" target="_blank">Imagery Library for Intelligent Detection Systems (i-LIDS); A Standard for Testing Video Based Detection Systems</a></td><td><span class="gray">[pdf]</a></td><td>Proceedings 40th Annual 2006 International Carnahan Conference on Security Technology</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>35</td><td>20</td><td>15</td><td>2</td><td>21</td><td>14</td></tr><tr><td>7f23a4bb0c777dd72cca7665a5f370ac7980217e</td><td>duke_mtmc</td><td>Duke MTMC</td><td><a href="papers/7f23a4bb0c777dd72cca7665a5f370ac7980217e.html" target="_blank">Improving Person Re-identification by Attribute and Identity Learning</a></td><td><a href="https://arxiv.org/pdf/1703.07220.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>84%</td><td>87</td><td>73</td><td>14</td><td>0</td><td>43</td><td>42</td></tr><tr><td>55c40cbcf49a0225e72d911d762c27bb1c2d14aa</td><td>ifad</td><td>IFAD</td><td><a href="papers/55c40cbcf49a0225e72d911d762c27bb1c2d14aa.html" target="_blank">Indian Face Age Database: A Database for Face Recognition with Age Variation</a></td><td><a href="https://pdfs.semanticscholar.org/025e/4cf3fd3fdeced91e9373b56ee14af7ca432c.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>2</td><td>1</td><td>1</td><td>0</td><td>2</td><td>0</td></tr><tr><td>ca3e88d87e1344d076c964ea89d91a75c417f5ee</td><td>imfdb</td><td>IMFDB</td><td><a href="papers/ca3e88d87e1344d076c964ea89d91a75c417f5ee.html" target="_blank">Indian Movie Face Database: A benchmark for face recognition under wide variations</a></td><td><span class="gray">[pdf]</a></td><td>2013 Fourth National Conference on Computer Vision, Pattern Recognition, Image Processing and Graphics (NCVPRIPG)</td><td>edu</td><td>BVBCET, Hubli, India</td><td>India</td><td>15.36883320</td><td>75.12137960</td><td>59%</td><td>17</td><td>10</td><td>7</td><td>0</td><td>11</td><td>5</td></tr><tr><td>95f12d27c3b4914e0668a268360948bce92f7db3</td><td>helen</td><td>Helen</td><td><a href="papers/95f12d27c3b4914e0668a268360948bce92f7db3.html" target="_blank">Interactive Facial Feature Localization</a></td><td><a href="https://pdfs.semanticscholar.org/95f1/2d27c3b4914e0668a268360948bce92f7db3.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>company</td><td>Adobe</td><td>United States</td><td>37.33077030</td><td>-121.89409510</td><td>60%</td><td>352</td><td>211</td><td>141</td><td>26</td><td>212</td><td>146</td></tr><tr><td>ad01687649d95cd5b56d7399a9603c4b8e2217d7</td><td>mrp_drone</td><td>MRP Drone</td><td><a href="papers/ad01687649d95cd5b56d7399a9603c4b8e2217d7.html" target="_blank">Investigating Open-World Person Re-identification Using a Drone</a></td><td><a href="https://pdfs.semanticscholar.org/ad01/687649d95cd5b56d7399a9603c4b8e2217d7.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>7</td><td>3</td><td>4</td><td>1</td><td>5</td><td>2</td></tr><tr><td>2f43b614607163abf41dfe5d17ef6749a1b61304</td><td>hrt_transgender</td><td>HRT Transgender</td><td><a href="papers/2f43b614607163abf41dfe5d17ef6749a1b61304.html" target="_blank">Investigating the Periocular-Based Face Recognition Across Gender Transformation</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Information Forensics and Security</td><td>edu</td><td>University of North Carolina at Wilmington</td><td>United States</td><td>34.22498270</td><td>-77.86907744</td><td>77%</td><td>13</td><td>10</td><td>3</td><td>0</td><td>6</td><td>8</td></tr><tr><td>066d71fcd997033dce4ca58df924397dfe0b5fd1</td><td>ifdb</td><td>IFDB</td><td><a href="papers/066d71fcd997033dce4ca58df924397dfe0b5fd1.html" target="_blank">Iranian Face Database and Evaluation with a New Detection Algorithm</a></td><td><a href="https://pdfs.semanticscholar.org/066d/71fcd997033dce4ca58df924397dfe0b5fd1.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>b71d1aa90dcbe3638888725314c0d56640c1fef1</td><td>ifdb</td><td>IFDB</td><td><a href="papers/b71d1aa90dcbe3638888725314c0d56640c1fef1.html" target="_blank">Iranian Face Database with age, pose and expression</a></td><td><span class="gray">[pdf]</a></td><td>2007 International Conference on Machine Vision</td><td>edu</td><td>Islamic Azad University</td><td>Iran</td><td>34.84529990</td><td>48.55962120</td><td>39%</td><td>23</td><td>9</td><td>14</td><td>2</td><td>14</td><td>9</td></tr><tr><td>137aa2f891d474fce1e7a1d1e9b3aefe21e22b34</td><td>hrt_transgender</td><td>HRT Transgender</td><td><a href="papers/137aa2f891d474fce1e7a1d1e9b3aefe21e22b34.html" target="_blank">Is the eye region more reliable than the face? A preliminary study of face-based recognition on a transgender dataset</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE Sixth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>7</td><td>4</td><td>3</td><td>1</td><td>3</td><td>5</td></tr><tr><td>0b440695c822a8e35184fb2f60dcdaa8a6de84ae</td><td>kinectface</td><td>KinectFaceDB</td><td><a href="papers/0b440695c822a8e35184fb2f60dcdaa8a6de84ae.html" target="_blank">KinectFaceDB: A Kinect Database for Face Recognition</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Systems, Man, and Cybernetics: Systems</td><td>edu</td><td>University of North Carolina at Chapel Hill</td><td>United States</td><td>35.91139710</td><td>-79.05045290</td><td>61%</td><td>82</td><td>50</td><td>32</td><td>6</td><td>28</td><td>52</td></tr><tr><td>4793f11fbca4a7dba898b9fff68f70d868e2497c</td><td>kin_face</td><td>UB KinFace</td><td><a href="papers/4793f11fbca4a7dba898b9fff68f70d868e2497c.html" target="_blank">Kinship Verification through Transfer Learning</a></td><td><a href="https://pdfs.semanticscholar.org/4793/f11fbca4a7dba898b9fff68f70d868e2497c.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>71</td><td>41</td><td>30</td><td>2</td><td>29</td><td>42</td></tr><tr><td>2d3482dcff69c7417c7b933f22de606a0e8e42d4</td><td>lfw</td><td>LFW</td><td><a href="papers/2d3482dcff69c7417c7b933f22de606a0e8e42d4.html" target="_blank">Labeled Faces in the Wild : Updates and New Reporting Procedures</a></td><td><a href="https://pdfs.semanticscholar.org/2d34/82dcff69c7417c7b933f22de606a0e8e42d4.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>edu</td><td>University of Massachusetts</td><td>United States</td><td>42.38897850</td><td>-72.52869870</td><td>67%</td><td>123</td><td>83</td><td>40</td><td>3</td><td>71</td><td>51</td></tr><tr><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td><td>lfw</td><td>LFW</td><td><a href="papers/370b5757a5379b15e30d619e4d3fb9e8e13f3256.html" target="_blank">Labeled Faces in the Wild: A Database forStudying Face Recognition in Unconstrained Environments</a></td><td><a href="https://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>999</td><td>617</td><td>382</td><td>63</td><td>598</td><td>382</td></tr><tr><td>7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22</td><td>lfw</td><td>LFW</td><td><a href="papers/7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22.html" target="_blank">Labeled Faces in the Wild: A Survey</a></td><td><a href="https://pdfs.semanticscholar.org/7de6/e81d775e9cd7becbfd1bd685f4e2a5eebb22.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Stevens Institute of Technology</td><td>United States</td><td>40.74225200</td><td>-74.02709490</td><td>61%</td><td>109</td><td>66</td><td>43</td><td>8</td><td>66</td><td>43</td></tr><tr><td>0d2dd4fc016cb6a517d8fb43a7cc3ff62964832e</td><td>lag</td><td>LAG</td><td><a href="papers/0d2dd4fc016cb6a517d8fb43a7cc3ff62964832e.html" target="_blank">Large age-gap face verification by feature injection in deep networks</a></td><td><a href="https://arxiv.org/pdf/1602.06149.pdf" target="_blank">[pdf]</a></td><td>Pattern Recognition Letters</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>7</td><td>4</td><td>3</td><td>0</td><td>3</td><td>4</td></tr><tr><td>07fcbae86f7a3ad3ea1cf95178459ee9eaf77cb1</td><td>uccs</td><td>UCCS</td><td><a href="papers/07fcbae86f7a3ad3ea1cf95178459ee9eaf77cb1.html" target="_blank">Large scale unconstrained open set face database</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE Sixth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td>company</td><td>Securics Inc., Colorado Springs, CO</td><td>United States</td><td>38.83388160</td><td>-104.82136340</td><td>83%</td><td>6</td><td>5</td><td>1</td><td>0</td><td>4</td><td>2</td></tr><tr><td>4af89578ac237278be310f7660a408b03f12d603</td><td>geofaces</td><td>GeoFaces</td><td><a href="papers/4af89578ac237278be310f7660a408b03f12d603.html" target="_blank">Large-scale geo-facial image analysis</a></td><td><a href="https://pdfs.semanticscholar.org/3ede/3ed28329bf48fbd06438a69c4f855bef003f.pdf" target="_blank">[pdf]</a></td><td>EURASIP J. Image and Video Processing</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>6</td><td>6</td><td>0</td><td>0</td><td>4</td><td>2</td></tr><tr><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td><td>mafl</td><td>MAFL</td><td><a href="papers/a0fd85b3400c7b3e11122f44dc5870ae2de9009a.html" target="_blank">Learning Deep Representation for Face Alignment with Auxiliary Attributes</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>108</td><td>61</td><td>47</td><td>11</td><td>66</td><td>44</td></tr><tr><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td><td>mtfl</td><td>MTFL</td><td><a href="papers/a0fd85b3400c7b3e11122f44dc5870ae2de9009a.html" target="_blank">Learning Deep Representation for Face Alignment with Auxiliary Attributes</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>108</td><td>61</td><td>47</td><td>11</td><td>66</td><td>44</td></tr><tr><td>853bd61bc48a431b9b1c7cab10c603830c488e39</td><td>casia_webface</td><td>CASIA Webface</td><td><a href="papers/853bd61bc48a431b9b1c7cab10c603830c488e39.html" target="_blank">Learning Face Representation from Scratch</a></td><td><a href="https://arxiv.org/pdf/1411.7923.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td>edu</td><td>Chinese Academy of Sciences</td><td>China</td><td>40.00447950</td><td>116.37023800</td><td>70%</td><td>476</td><td>331</td><td>145</td><td>20</td><td>290</td><td>182</td></tr><tr><td>2a171f8d14b6b8735001a11c217af9587d095848</td><td>social_relation</td><td>Social Relation</td><td><a href="papers/2a171f8d14b6b8735001a11c217af9587d095848.html" target="_blank">Learning Social Relation Traits from Face Images</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>23</td><td>14</td><td>9</td><td>4</td><td>16</td><td>7</td></tr><tr><td>4e4746094bf60ee83e40d8597a6191e463b57f76</td><td>leeds_sports_pose_extended</td><td>Leeds Sports Pose Extended</td><td><a href="papers/4e4746094bf60ee83e40d8597a6191e463b57f76.html" target="_blank">Learning effective human pose estimation from inaccurate annotation</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011</td><td>edu</td><td>University of Leeds</td><td>United Kingdom</td><td>53.80387185</td><td>-1.55245712</td><td>69%</td><td>169</td><td>117</td><td>52</td><td>8</td><td>108</td><td>65</td></tr><tr><td>287ddcb3db5562235d83aee318f318b8d5e43fb1</td><td>erce</td><td>ERCe</td><td><a href="papers/287ddcb3db5562235d83aee318f318b8d5e43fb1.html" target="_blank">Learning from Multiple Sources for Video Summarisation</a></td><td><a href="https://arxiv.org/pdf/1501.03069.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>7</td><td>4</td><td>3</td><td>0</td><td>4</td><td>3</td></tr><tr><td>287ddcb3db5562235d83aee318f318b8d5e43fb1</td><td>tisi</td><td>Times Square Intersection</td><td><a href="papers/287ddcb3db5562235d83aee318f318b8d5e43fb1.html" target="_blank">Learning from Multiple Sources for Video Summarisation</a></td><td><a href="https://arxiv.org/pdf/1501.03069.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>7</td><td>4</td><td>3</td><td>0</td><td>4</td><td>3</td></tr><tr><td>5981e6479c3fd4e31644db35d236bfb84ae46514</td><td>mot</td><td>MOT</td><td><a href="papers/5981e6479c3fd4e31644db35d236bfb84ae46514.html" target="_blank">Learning to associate: HybridBoosted multi-target tracker for crowded scene</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>University of Southern California</td><td>United States</td><td>34.02241490</td><td>-118.28634407</td><td>61%</td><td>326</td><td>200</td><td>125</td><td>22</td><td>190</td><td>137</td></tr><tr><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td><td>stickmen_buffy</td><td>Buffy Stickmen</td><td><a href="papers/6dd0597f8513dc100cd0bc1b493768cde45098a9.html" target="_blank">Learning to parse images of articulated bodies</a></td><td><a href="http://machinelearning.wustl.edu/mlpapers/paper_files/NIPS2006_899.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>369</td><td>225</td><td>143</td><td>32</td><td>237</td><td>131</td></tr><tr><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td><a href="papers/6dd0597f8513dc100cd0bc1b493768cde45098a9.html" target="_blank">Learning to parse images of articulated bodies</a></td><td><a href="http://machinelearning.wustl.edu/mlpapers/paper_files/NIPS2006_899.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>369</td><td>225</td><td>143</td><td>32</td><td>237</td><td>131</td></tr><tr><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td><a href="papers/6dd0597f8513dc100cd0bc1b493768cde45098a9.html" target="_blank">Learning to parse images of articulated bodies</a></td><td><a href="http://machinelearning.wustl.edu/mlpapers/paper_files/NIPS2006_899.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>369</td><td>225</td><td>143</td><td>32</td><td>237</td><td>131</td></tr><tr><td>28d4e027c7e90b51b7d8908fce68128d1964668a</td><td>megaface</td><td>MegaFace</td><td><a href="papers/28d4e027c7e90b51b7d8908fce68128d1964668a.html" target="_blank">Level Playing Field for Million Scale Face Recognition</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>University of Washington</td><td>United States</td><td>47.65432380</td><td>-122.30800894</td><td>67%</td><td>39</td><td>26</td><td>13</td><td>2</td><td>29</td><td>9</td></tr><tr><td>46a01565e6afe7c074affb752e7069ee3bf2e4ef</td><td>sdu_vid</td><td>SDU-VID</td><td><a href="papers/46a01565e6afe7c074affb752e7069ee3bf2e4ef.html" target="_blank">Local Descriptors Encoded by Fisher Vectors for Person Re-identification</a></td><td><a href="https://pdfs.semanticscholar.org/a105/f1ef67b4b02da38eadce8ffb4e13aa301a93.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>68%</td><td>197</td><td>133</td><td>64</td><td>15</td><td>108</td><td>88</td></tr><tr><td>140438a77a771a8fb656b39a78ff488066eb6b50</td><td>lfpw</td><td>LFWP</td><td><a href="papers/140438a77a771a8fb656b39a78ff488066eb6b50.html" target="_blank">Localizing Parts of Faces Using a Consensus of Exemplars</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>38b55d95189c5e69cf4ab45098a48fba407609b4</td><td>cuhk_campus_03</td><td>CUHK03 Campus</td><td><a href="papers/38b55d95189c5e69cf4ab45098a48fba407609b4.html" target="_blank">Locally Aligned Feature Transforms across Views</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>63%</td><td>258</td><td>163</td><td>95</td><td>15</td><td>136</td><td>117</td></tr><tr><td>8990cdce3f917dad622e43e033db686b354d057c</td><td>tiny_faces</td><td>TinyFace</td><td><a href="papers/8990cdce3f917dad622e43e033db686b354d057c.html" target="_blank">Low-Resolution Face Recognition</a></td><td><a href="https://arxiv.org/pdf/1811.08965.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>c0387e788a52f10bf35d4d50659cfa515d89fbec</td><td>mars</td><td>MARS</td><td><a href="papers/c0387e788a52f10bf35d4d50659cfa515d89fbec.html" target="_blank">MARS: A Video Benchmark for Large-Scale Person Re-Identification</a></td><td><a href="https://pdfs.semanticscholar.org/c038/7e788a52f10bf35d4d50659cfa515d89fbec.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>68%</td><td>168</td><td>115</td><td>53</td><td>4</td><td>97</td><td>69</td></tr><tr><td>9055b155cbabdce3b98e16e5ac9c0edf00f9552f</td><td>morph</td><td>MORPH Commercial</td><td><a href="papers/9055b155cbabdce3b98e16e5ac9c0edf00f9552f.html" target="_blank">MORPH: a longitudinal image database of normal adult age-progression</a></td><td><span class="gray">[pdf]</a></td><td>7th International Conference on Automatic Face and Gesture Recognition (FGR06)</td><td>edu</td><td>North Carolina University</td><td>United States</td><td>34.22398690</td><td>-77.87013250</td><td>55%</td><td>437</td><td>239</td><td>197</td><td>24</td><td>228</td><td>203</td></tr><tr><td>9055b155cbabdce3b98e16e5ac9c0edf00f9552f</td><td>morph_nc</td><td>MORPH Non-Commercial</td><td><a href="papers/9055b155cbabdce3b98e16e5ac9c0edf00f9552f.html" target="_blank">MORPH: a longitudinal image database of normal adult age-progression</a></td><td><span class="gray">[pdf]</a></td><td>7th International Conference on Automatic Face and Gesture Recognition (FGR06)</td><td>edu</td><td>North Carolina University</td><td>United States</td><td>34.22398690</td><td>-77.87013250</td><td>55%</td><td>437</td><td>239</td><td>197</td><td>24</td><td>228</td><td>203</td></tr><tr><td>291265db88023e92bb8c8e6390438e5da148e8f5</td><td>msceleb</td><td>MsCeleb</td><td><a href="papers/291265db88023e92bb8c8e6390438e5da148e8f5.html" target="_blank">MS-Celeb-1M: A Dataset and Benchmark for Large-Scale Face Recognition</a></td><td><a href="https://arxiv.org/pdf/1607.08221.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>company</td><td>Microsoft</td><td>United States</td><td>47.64233180</td><td>-122.13693020</td><td>77%</td><td>180</td><td>138</td><td>42</td><td>9</td><td>120</td><td>59</td></tr><tr><td>3dc3f0b64ef80f573e3a5f96e456e52ee980b877</td><td>georgia_tech_face_database</td><td>Georgia Tech Face</td><td><a href="papers/3dc3f0b64ef80f573e3a5f96e456e52ee980b877.html" target="_blank">Maximum Likelihood Training of the Embedded HMM for Face Detection and Recognition</a></td><td><a href="http://www.researchgate.net/profile/Monson_Hayes/publication/221124512_Maximum_Likelihood_Training_of_the_Embedded_HMM_for_Face_Detection_and_Recognition/links/0deec53509be9d6f55000000.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>67</td><td>36</td><td>31</td><td>4</td><td>29</td><td>28</td></tr><tr><td>e58dd160a76349d46f881bd6ddbc2921f08d1050</td><td>gfw</td><td>Grouping Face in the Wild</td><td><a href="papers/e58dd160a76349d46f881bd6ddbc2921f08d1050.html" target="_blank">Merge or Not? Learning to Group Faces via Imitation Learning</a></td><td><a href="https://arxiv.org/pdf/1707.03986.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>2</td><td>1</td><td>1</td><td>0</td><td>2</td><td>0</td></tr><tr><td>5753b2b5e442eaa3be066daa4a2ca8d8a0bb1725</td><td>50_people_one_question</td><td>50 People One Question</td><td><a href="papers/5753b2b5e442eaa3be066daa4a2ca8d8a0bb1725.html" target="_blank">Merging Pose Estimates Across Space and Time</a></td><td><a href="http://authors.library.caltech.edu/41565/1/tracking_bmvc.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>81%</td><td>16</td><td>13</td><td>3</td><td>0</td><td>13</td><td>4</td></tr><tr><td>5e0f8c355a37a5a89351c02f174e7a5ddcb98683</td><td>coco</td><td>COCO</td><td><a href="papers/5e0f8c355a37a5a89351c02f174e7a5ddcb98683.html" target="_blank">Microsoft COCO: Common Objects in Context</a></td><td><a href="https://arxiv.org/pdf/1405.0312.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>999</td><td>606</td><td>393</td><td>25</td><td>722</td><td>259</td></tr><tr><td>41976ebc8ab76d9a6861487c97cc7fcbe3b6015f</td><td>moments_in_time</td><td>Moments in Time</td><td><a href="papers/41976ebc8ab76d9a6861487c97cc7fcbe3b6015f.html" target="_blank">Moments in Time Dataset: one million videos for event understanding</a></td><td><a href="https://arxiv.org/pdf/1801.03150.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>69%</td><td>29</td><td>20</td><td>9</td><td>2</td><td>27</td><td>2</td></tr><tr><td>436f798d1a4e54e5947c1e7d7375c31b2bdb4064</td><td>tud_multiview</td><td>TUD-Multiview</td><td><a href="papers/436f798d1a4e54e5947c1e7d7375c31b2bdb4064.html" target="_blank">Monocular 3D pose estimation and tracking by detection</a></td><td><span class="gray">[pdf]</a></td><td>2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>TU Darmstadt</td><td>Germany</td><td>49.87482770</td><td>8.65632810</td><td>60%</td><td>311</td><td>186</td><td>125</td><td>34</td><td>208</td><td>105</td></tr><tr><td>436f798d1a4e54e5947c1e7d7375c31b2bdb4064</td><td>tud_stadtmitte</td><td>TUD-Stadtmitte</td><td><a href="papers/436f798d1a4e54e5947c1e7d7375c31b2bdb4064.html" target="_blank">Monocular 3D pose estimation and tracking by detection</a></td><td><span class="gray">[pdf]</a></td><td>2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>TU Darmstadt</td><td>Germany</td><td>49.87482770</td><td>8.65632810</td><td>60%</td><td>311</td><td>186</td><td>125</td><td>34</td><td>208</td><td>105</td></tr><tr><td>3b5b6d19d4733ab606c39c69a889f9e67967f151</td><td>qmul_grid</td><td>GRID</td><td><a href="papers/3b5b6d19d4733ab606c39c69a889f9e67967f151.html" target="_blank">Multi-camera activity correlation analysis</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>Queen Mary University of London</td><td>United Kingdom</td><td>51.52472720</td><td>-0.03931035</td><td>69%</td><td>142</td><td>98</td><td>44</td><td>7</td><td>77</td><td>64</td></tr><tr><td>6ad5a38df8dd4cdddd74f31996ce096d41219f72</td><td>tud_brussels</td><td>TUD-Brussels</td><td><a href="papers/6ad5a38df8dd4cdddd74f31996ce096d41219f72.html" target="_blank">Multi-cue onboard pedestrian detection</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>6ad5a38df8dd4cdddd74f31996ce096d41219f72</td><td>tud_motionpairs</td><td>TUD-Motionparis</td><td><a href="papers/6ad5a38df8dd4cdddd74f31996ce096d41219f72.html" target="_blank">Multi-cue onboard pedestrian detection</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>32c801cb7fbeb742edfd94cccfca4934baec71da</td><td>ucf_crowd</td><td>UCF-CC-50</td><td><a href="papers/32c801cb7fbeb742edfd94cccfca4934baec71da.html" target="_blank">Multi-source Multi-scale Counting in Extremely Dense Crowd Images</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>68%</td><td>148</td><td>100</td><td>48</td><td>5</td><td>80</td><td>65</td></tr><tr><td>1e3df3ca8feab0b36fd293fe689f93bb2aaac591</td><td>immediacy</td><td>Immediacy</td><td><a href="papers/1e3df3ca8feab0b36fd293fe689f93bb2aaac591.html" target="_blank">Multi-task Recurrent Neural Network for Immediacy Prediction</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>65%</td><td>26</td><td>17</td><td>9</td><td>2</td><td>21</td><td>6</td></tr><tr><td>2b926b3586399d028b46315d7d9fb9d879e4f79c</td><td>frav3d</td><td>FRAV3D</td><td><a href="papers/2b926b3586399d028b46315d7d9fb9d879e4f79c.html" target="_blank">Multimodal 2D, 2.5D & 3D Face Verification</a></td><td><span class="gray">[pdf]</a></td><td>2006 International Conference on Image Processing</td><td>edu</td><td>Universidad Rey Juan Carlos, Spain</td><td>Spain</td><td>40.33586610</td><td>-3.87694320</td><td>57%</td><td>14</td><td>8</td><td>6</td><td>0</td><td>2</td><td>12</td></tr><tr><td>53ae38a6bb2b21b42bac4f0c4c8ed1f9fa02f9d4</td><td>bp4d_plus</td><td>BP4D+</td><td><a href="papers/53ae38a6bb2b21b42bac4f0c4c8ed1f9fa02f9d4.html" target="_blank">Multimodal Spontaneous Emotion Corpus for Human Behavior Analysis</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>42</td><td>23</td><td>19</td><td>0</td><td>17</td><td>26</td></tr><tr><td>2fda164863a06a92d3a910b96eef927269aeb730</td><td>names_and_faces</td><td>News Dataset</td><td><a href="papers/2fda164863a06a92d3a910b96eef927269aeb730.html" target="_blank">Names and faces in the news</a></td><td><span class="gray">[pdf]</a></td><td>Proceedings of the 2004 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2004. CVPR 2004.</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>4156b7e88f2e0ab0a7c095b9bab199ae2b23bd06</td><td>distance_nighttime</td><td>Long Distance Heterogeneous Face</td><td><a href="papers/4156b7e88f2e0ab0a7c095b9bab199ae2b23bd06.html" target="_blank">Nighttime Face Recognition at Long Distance: Cross-Distance and Cross-Spectral Matching</a></td><td><a href="https://pdfs.semanticscholar.org/4156/b7e88f2e0ab0a7c095b9bab199ae2b23bd06.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>22</td><td>11</td><td>11</td><td>3</td><td>11</td><td>10</td></tr><tr><td>3394168ff0719b03ff65bcea35336a76b21fe5e4</td><td>penn_fudan</td><td>Penn Fudan</td><td><a href="papers/3394168ff0719b03ff65bcea35336a76b21fe5e4.html" target="_blank">Object Detection Combining Recognition and Segmentation</a></td><td><a href="https://pdfs.semanticscholar.org/3394/168ff0719b03ff65bcea35336a76b21fe5e4.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>105</td><td>64</td><td>41</td><td>9</td><td>58</td><td>43</td></tr><tr><td>12ad3b5bbbf407f8e54ea692c07633d1a867c566</td><td>graz</td><td>Graz Pedestrian</td><td><a href="papers/12ad3b5bbbf407f8e54ea692c07633d1a867c566.html" target="_blank">Object recognition using segmentation for feature detection</a></td><td><span class="gray">[pdf]</a></td><td>Proceedings of the 17th International Conference on Pattern Recognition, 2004. ICPR 2004.</td><td>edu</td><td>Inst. of Comput. Sci., Univ. of Leoben, Austria</td><td>Austria</td><td>47.38473720</td><td>15.09302010</td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>4f93cd09785c6e77bf4bc5a788e079df524c8d21</td><td>soton</td><td>SOTON HiD</td><td><a href="papers/4f93cd09785c6e77bf4bc5a788e079df524c8d21.html" target="_blank">On a Large Sequence-Based Human Gait Database</a></td><td><a href="https://pdfs.semanticscholar.org/4f93/cd09785c6e77bf4bc5a788e079df524c8d21.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>63%</td><td>150</td><td>95</td><td>55</td><td>17</td><td>103</td><td>51</td></tr><tr><td>6618cff7f2ed440a0d2fa9e74ad5469df5cdbe4c</td><td>afad</td><td>AFAD</td><td><a href="papers/6618cff7f2ed440a0d2fa9e74ad5469df5cdbe4c.html" target="_blank">Ordinal Regression with Multiple Output CNN for Age Estimation</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>51%</td><td>78</td><td>40</td><td>38</td><td>8</td><td>44</td><td>31</td></tr><tr><td>a7fe834a0af614ce6b50dc093132b031dd9a856b</td><td>market_1501</td><td>Market 1501</td><td><a href="papers/a7fe834a0af614ce6b50dc093132b031dd9a856b.html" target="_blank">Orientation Driven Bag of Appearances for Person Re-identification</a></td><td><a href="https://arxiv.org/pdf/1605.02464.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>7</td><td>3</td><td>4</td><td>0</td><td>4</td><td>4</td></tr><tr><td>a7fe834a0af614ce6b50dc093132b031dd9a856b</td><td>pku_reid</td><td>PKU-Reid</td><td><a href="papers/a7fe834a0af614ce6b50dc093132b031dd9a856b.html" target="_blank">Orientation Driven Bag of Appearances for Person Re-identification</a></td><td><a href="https://arxiv.org/pdf/1605.02464.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>7</td><td>3</td><td>4</td><td>0</td><td>4</td><td>4</td></tr><tr><td>18ae7c9a4bbc832b8b14bc4122070d7939f5e00e</td><td>frgc</td><td>FRGC</td><td><a href="papers/18ae7c9a4bbc832b8b14bc4122070d7939f5e00e.html" target="_blank">Overview of the face recognition grand challenge</a></td><td><span class="gray">[pdf]</a></td><td>2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)</td><td>edu</td><td>NIST</td><td>United States</td><td>39.14004000</td><td>-77.21850600</td><td>57%</td><td>999</td><td>565</td><td>433</td><td>86</td><td>549</td><td>442</td></tr><tr><td>22909dd19a0ec3b6065334cb5be5392cb24d839d</td><td>pets</td><td>PETS 2017</td><td><a href="papers/22909dd19a0ec3b6065334cb5be5392cb24d839d.html" target="_blank">PETS 2017: Dataset and Challenge</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td></td><td></td><td></td><td></td><td></td><td>44%</td><td>9</td><td>4</td><td>5</td><td>0</td><td>1</td><td>8</td></tr><tr><td>56ffa7d906b08d02d6d5a12c7377a57e24ef3391</td><td>unbc_shoulder_pain</td><td>UNBC-McMaster Pain</td><td><a href="papers/56ffa7d906b08d02d6d5a12c7377a57e24ef3391.html" target="_blank">Painful data: The UNBC-McMaster shoulder pain expression archive database</a></td><td><span class="gray">[pdf]</a></td><td>Face and Gesture 2011</td><td>edu</td><td>Carnegie Mellon University</td><td>United States</td><td>40.44416190</td><td>-79.94272826</td><td>54%</td><td>189</td><td>102</td><td>87</td><td>22</td><td>108</td><td>78</td></tr><tr><td>55206f0b5f57ce17358999145506cd01e570358c</td><td>orl</td><td>ORL</td><td><a href="papers/55206f0b5f57ce17358999145506cd01e570358c.html" target="_blank">Parameterisation of a stochastic model for human face identification</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>999</td><td>500</td><td>499</td><td>94</td><td>543</td><td>427</td></tr><tr><td>0486214fb58ee9a04edfe7d6a74c6d0f661a7668</td><td>chokepoint</td><td>ChokePoint</td><td><a href="papers/0486214fb58ee9a04edfe7d6a74c6d0f661a7668.html" target="_blank">Patch-based probabilistic image quality assessment for face selection and improved video-based face recognition</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>138</td><td>86</td><td>52</td><td>6</td><td>76</td><td>63</td></tr><tr><td>488e475eeb3bb39a145f23ede197cd3620f1d98a</td><td>apis</td><td>APiS1.0</td><td><a href="papers/488e475eeb3bb39a145f23ede197cd3620f1d98a.html" target="_blank">Pedestrian Attribute Classification in Surveillance: Database and Evaluation</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision Workshops</td><td></td><td></td><td></td><td></td><td></td><td>71%</td><td>28</td><td>20</td><td>8</td><td>0</td><td>13</td><td>15</td></tr><tr><td>488e475eeb3bb39a145f23ede197cd3620f1d98a</td><td>svs</td><td>SVS</td><td><a href="papers/488e475eeb3bb39a145f23ede197cd3620f1d98a.html" target="_blank">Pedestrian Attribute Classification in Surveillance: Database and Evaluation</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision Workshops</td><td></td><td></td><td></td><td></td><td></td><td>71%</td><td>28</td><td>20</td><td>8</td><td>0</td><td>13</td><td>15</td></tr><tr><td>2a4bbee0b4cf52d5aadbbc662164f7efba89566c</td><td>peta</td><td>PETA</td><td><a href="papers/2a4bbee0b4cf52d5aadbbc662164f7efba89566c.html" target="_blank">Pedestrian Attribute Recognition At Far Distance</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>73%</td><td>88</td><td>64</td><td>24</td><td>1</td><td>50</td><td>36</td></tr><tr><td>f72f6a45ee240cc99296a287ff725aaa7e7ebb35</td><td>caltech_pedestrians</td><td>Caltech Pedestrians</td><td><a href="papers/f72f6a45ee240cc99296a287ff725aaa7e7ebb35.html" target="_blank">Pedestrian Detection: An Evaluation of the State of the Art</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td>edu</td><td>California Institute of Technology</td><td>United States</td><td>34.13710185</td><td>-118.12527487</td><td>60%</td><td>999</td><td>597</td><td>402</td><td>70</td><td>526</td><td>466</td></tr><tr><td>1dc35905a1deff8bc74688f2d7e2f48fd2273275</td><td>caltech_pedestrians</td><td>Caltech Pedestrians</td><td><a href="papers/1dc35905a1deff8bc74688f2d7e2f48fd2273275.html" target="_blank">Pedestrian detection: A benchmark</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td><td>tud_campus</td><td>TUD-Campus</td><td><a href="papers/3316521a5527c7700af8ae6aef32a79a8b83672c.html" target="_blank">People-tracking-by-detection and people-detection-by-tracking</a></td><td><span class="gray">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>59%</td><td>545</td><td>324</td><td>220</td><td>37</td><td>330</td><td>218</td></tr><tr><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td><td>tud_crossing</td><td>TUD-Crossing</td><td><a href="papers/3316521a5527c7700af8ae6aef32a79a8b83672c.html" target="_blank">People-tracking-by-detection and people-detection-by-tracking</a></td><td><span class="gray">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>59%</td><td>545</td><td>324</td><td>220</td><td>37</td><td>330</td><td>218</td></tr><tr><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td><td>tud_pedestrian</td><td>TUD-Pedestrian</td><td><a href="papers/3316521a5527c7700af8ae6aef32a79a8b83672c.html" target="_blank">People-tracking-by-detection and people-detection-by-tracking</a></td><td><span class="gray">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>59%</td><td>545</td><td>324</td><td>220</td><td>37</td><td>330</td><td>218</td></tr><tr><td>27a2fad58dd8727e280f97036e0d2bc55ef5424c</td><td>duke_mtmc</td><td>Duke MTMC</td><td><a href="papers/27a2fad58dd8727e280f97036e0d2bc55ef5424c.html" target="_blank">Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</a></td><td><a href="https://arxiv.org/pdf/1609.01775.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Duke University</td><td>United States</td><td>35.99905220</td><td>-78.92906290</td><td>85%</td><td>169</td><td>144</td><td>25</td><td>3</td><td>113</td><td>54</td></tr><tr><td>27a2fad58dd8727e280f97036e0d2bc55ef5424c</td><td>mot</td><td>MOT</td><td><a href="papers/27a2fad58dd8727e280f97036e0d2bc55ef5424c.html" target="_blank">Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</a></td><td><a href="https://arxiv.org/pdf/1609.01775.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Duke University</td><td>United States</td><td>35.99905220</td><td>-78.92906290</td><td>85%</td><td>169</td><td>144</td><td>25</td><td>3</td><td>113</td><td>54</td></tr><tr><td>16c7c31a7553d99f1837fc6e88e77b5ccbb346b8</td><td>prid</td><td>PRID</td><td><a href="papers/16c7c31a7553d99f1837fc6e88e77b5ccbb346b8.html" target="_blank">Person Re-identification by Descriptive and Discriminative Classification</a></td><td><a href="https://pdfs.semanticscholar.org/4c1b/f0592be3e535faf256c95e27982db9b3d3d3.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>68%</td><td>386</td><td>263</td><td>123</td><td>23</td><td>204</td><td>180</td></tr><tr><td>98bb029afe2a1239c3fdab517323066f0957b81b</td><td>ilids_mcts_vid</td><td>iLIDS-VID</td><td><a href="papers/98bb029afe2a1239c3fdab517323066f0957b81b.html" target="_blank">Person Re-identification by Video Ranking</a></td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>68%</td><td>209</td><td>143</td><td>66</td><td>8</td><td>111</td><td>97</td></tr><tr><td>98bb029afe2a1239c3fdab517323066f0957b81b</td><td>sdu_vid</td><td>SDU-VID</td><td><a href="papers/98bb029afe2a1239c3fdab517323066f0957b81b.html" target="_blank">Person Re-identification by Video Ranking</a></td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>68%</td><td>209</td><td>143</td><td>66</td><td>8</td><td>111</td><td>97</td></tr><tr><td>0b84f07af44f964817675ad961def8a51406dd2e</td><td>prw</td><td>PRW</td><td><a href="papers/0b84f07af44f964817675ad961def8a51406dd2e.html" target="_blank">Person Re-identification in the Wild</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>68%</td><td>77</td><td>52</td><td>25</td><td>1</td><td>47</td><td>27</td></tr><tr><td>a0cc5f73a37723a6dd465924143f1cb4976d0169</td><td>msmt_17</td><td>MSMT17</td><td><a href="papers/a0cc5f73a37723a6dd465924143f1cb4976d0169.html" target="_blank">Person Transfer GAN to Bridge Domain Gap for Person Re-identification</a></td><td><span class="gray">[pdf]</a></td><td>2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>92%</td><td>24</td><td>22</td><td>2</td><td>1</td><td>20</td><td>4</td></tr><tr><td>1c2802c2199b6d15ecefe7ba0c39bfe44363de38</td><td>youtube_poses</td><td>YouTube Pose</td><td><a href="papers/1c2802c2199b6d15ecefe7ba0c39bfe44363de38.html" target="_blank">Personalizing Human Video Pose Estimation</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>Oxford University</td><td>United Kingdom</td><td>51.75208490</td><td>-1.25166460</td><td>64%</td><td>36</td><td>23</td><td>13</td><td>2</td><td>30</td><td>8</td></tr><tr><td>2830fb5282de23d7784b4b4bc37065d27839a412</td><td>h3d</td><td>H3D</td><td><a href="papers/2830fb5282de23d7784b4b4bc37065d27839a412.html" target="_blank">Poselets: Body part detectors trained using 3D human pose annotations</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE 12th International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>716</td><td>407</td><td>309</td><td>60</td><td>492</td><td>222</td></tr><tr><td>3765df816dc5a061bc261e190acc8bdd9d47bec0</td><td>rafd</td><td>RaFD</td><td><a href="papers/3765df816dc5a061bc261e190acc8bdd9d47bec0.html" target="_blank">Presentation and validation of the Radboud Faces Database</a></td><td><a href="https://pdfs.semanticscholar.org/3765/df816dc5a061bc261e190acc8bdd9d47bec0.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>48%</td><td>487</td><td>234</td><td>253</td><td>39</td><td>342</td><td>144</td></tr><tr><td>636b8ffc09b1b23ff714ac8350bb35635e49fa3c</td><td>caltech_10k_web_faces</td><td>Caltech 10K Web Faces</td><td><a href="papers/636b8ffc09b1b23ff714ac8350bb35635e49fa3c.html" target="_blank">Pruning training sets for learning of object categories</a></td><td><span class="gray">[pdf]</a></td><td>2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)</td><td></td><td></td><td></td><td></td><td></td><td>70%</td><td>63</td><td>44</td><td>19</td><td>4</td><td>42</td><td>20</td></tr><tr><td>3531332efe19be21e7401ba1f04570a142617236</td><td>ufdd</td><td>UFDD</td><td><a href="papers/3531332efe19be21e7401ba1f04570a142617236.html" target="_blank">Pushing the Limits of Unconstrained Face Detection: a Challenge Dataset and Baseline Results</a></td><td><a href="https://arxiv.org/pdf/1804.10275.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>4</td><td>2</td><td>2</td><td>1</td><td>4</td><td>0</td></tr><tr><td>140c95e53c619eac594d70f6369f518adfea12ef</td><td>ijb_a</td><td>IJB-A</td><td><a href="papers/140c95e53c619eac594d70f6369f518adfea12ef.html" target="_blank">Pushing the frontiers of unconstrained face detection and recognition: IARPA Janus Benchmark A</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>237</td><td>148</td><td>89</td><td>16</td><td>159</td><td>76</td></tr><tr><td>c72a2ea819df9b0e8cd267eebcc6528b8741e03d</td><td>megaage</td><td>MegaAge</td><td><a href="papers/c72a2ea819df9b0e8cd267eebcc6528b8741e03d.html" target="_blank">Quantifying Facial Age by Posterior of Age Comparisons</a></td><td><a href="https://arxiv.org/pdf/1708.09687.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>25%</td><td>4</td><td>1</td><td>3</td><td>1</td><td>4</td><td>0</td></tr><tr><td>922e0a51a3b8c67c4c6ac09a577ff674cbd28b34</td><td>v47</td><td>V47</td><td><a href="papers/922e0a51a3b8c67c4c6ac09a577ff674cbd28b34.html" target="_blank">Re-identification of pedestrians with variable occlusion and scale</a></td><td><span class="gray">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td>edu</td><td>Kingston University</td><td>United Kingdom</td><td>51.42930860</td><td>-0.26840440</td><td>56%</td><td>9</td><td>5</td><td>4</td><td>1</td><td>5</td><td>4</td></tr><tr><td>6f3c76b7c0bd8e1d122c6ea808a271fd4749c951</td><td>ward</td><td>WARD</td><td><a href="papers/6f3c76b7c0bd8e1d122c6ea808a271fd4749c951.html" target="_blank">Re-identify people in wide area camera network</a></td><td><span class="gray">[pdf]</a></td><td>2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops</td><td>edu</td><td>University of Udine</td><td>Italy</td><td>46.08107230</td><td>13.21194740</td><td>60%</td><td>60</td><td>36</td><td>24</td><td>1</td><td>38</td><td>21</td></tr><tr><td>54983972aafc8e149259d913524581357b0f91c3</td><td>reseed</td><td>ReSEED</td><td><a href="papers/54983972aafc8e149259d913524581357b0f91c3.html" target="_blank">ReSEED: social event dEtection dataset</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>67%</td><td>6</td><td>4</td><td>2</td><td>1</td><td>1</td><td>5</td></tr><tr><td>65355cbb581a219bd7461d48b3afd115263ea760</td><td>complex_activities</td><td>Ongoing Complex Activities</td><td><a href="papers/65355cbb581a219bd7461d48b3afd115263ea760.html" target="_blank">Recognition of ongoing complex activities by sequence prediction over a hierarchical label space</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Winter Conference on Applications of Computer Vision (WACV)</td><td></td><td></td><td></td><td></td><td></td><td>33%</td><td>3</td><td>1</td><td>2</td><td>0</td><td>3</td><td>0</td></tr><tr><td>e8de844fefd54541b71c9823416daa238be65546</td><td>visual_phrases</td><td>Phrasal Recognition</td><td><a href="papers/e8de844fefd54541b71c9823416daa238be65546.html" target="_blank">Recognition using visual phrases</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011</td><td>edu</td><td>University of Illinois, Urbana-Champaign</td><td>United States</td><td>40.11116745</td><td>-88.22587665</td><td>57%</td><td>246</td><td>140</td><td>106</td><td>18</td><td>170</td><td>68</td></tr><tr><td>356b431d4f7a2a0a38cf971c84568207dcdbf189</td><td>wider</td><td>WIDER</td><td><a href="papers/356b431d4f7a2a0a38cf971c84568207dcdbf189.html" target="_blank">Recognize complex events from static images by fusing deep channels</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>70%</td><td>44</td><td>31</td><td>13</td><td>1</td><td>29</td><td>15</td></tr><tr><td>25474c21613607f6bb7687a281d5f9d4ffa1f9f3</td><td>faceplace</td><td>Face Place</td><td><a href="papers/25474c21613607f6bb7687a281d5f9d4ffa1f9f3.html" target="_blank">Recognizing disguised faces</a></td><td><a href="https://pdfs.semanticscholar.org/d936/7ceb0be378c3a9ddf7cb741c678c1a3c574c.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>34%</td><td>29</td><td>10</td><td>19</td><td>0</td><td>18</td><td>10</td></tr><tr><td>4053e3423fb70ad9140ca89351df49675197196a</td><td>bio_id</td><td>BioID Face</td><td><a href="papers/4053e3423fb70ad9140ca89351df49675197196a.html" target="_blank">Robust Face Detection Using the Hausdorff Distance</a></td><td><a href="https://pdfs.semanticscholar.org/4053/e3423fb70ad9140ca89351df49675197196a.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>511</td><td>281</td><td>230</td><td>50</td><td>329</td><td>182</td></tr><tr><td>2724ba85ec4a66de18da33925e537f3902f21249</td><td>cofw</td><td>COFW</td><td><a href="papers/2724ba85ec4a66de18da33925e537f3902f21249.html" target="_blank">Robust Face Landmark Estimation under Occlusion</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision</td><td>edu</td><td>California Institute of Technology</td><td>United States</td><td>34.13710185</td><td>-118.12527487</td><td>60%</td><td>325</td><td>194</td><td>131</td><td>18</td><td>194</td><td>133</td></tr><tr><td>c570d1247e337f91e555c3be0e8c8a5aba539d9f</td><td>mcgill</td><td>McGill Real World</td><td><a href="papers/c570d1247e337f91e555c3be0e8c8a5aba539d9f.html" target="_blank">Robust semi-automatic head pose labeling for real-world face video sequences</a></td><td><span class="gray">[pdf]</a></td><td>Multimedia Tools and Applications</td><td>edu</td><td>McGill University</td><td>Canada</td><td>45.50397610</td><td>-73.57496870</td><td>44%</td><td>18</td><td>8</td><td>10</td><td>0</td><td>13</td><td>7</td></tr><tr><td>e27ef52c641c2b5100a1b34fd0b819e84a31b4df</td><td>sarc3d</td><td>Sarc3D</td><td><a href="papers/e27ef52c641c2b5100a1b34fd0b819e84a31b4df.html" target="_blank">SARC3D: A New 3D Body Model for People Tracking and Re-identification</a></td><td><a href="https://pdfs.semanticscholar.org/e27e/f52c641c2b5100a1b34fd0b819e84a31b4df.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>74%</td><td>34</td><td>25</td><td>9</td><td>2</td><td>21</td><td>12</td></tr><tr><td>bd26dabab576adb6af30484183c9c9c8379bf2e0</td><td>scut_fbp</td><td>SCUT-FBP</td><td><a href="papers/bd26dabab576adb6af30484183c9c9c8379bf2e0.html" target="_blank">SCUT-FBP: A Benchmark Dataset for Facial Beauty Perception</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Systems, Man, and Cybernetics</td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>19</td><td>9</td><td>10</td><td>2</td><td>6</td><td>13</td></tr><tr><td>29a705a5fa76641e0d8963f1fdd67ee4c0d92d3d</td><td>scface</td><td>SCface</td><td><a href="papers/29a705a5fa76641e0d8963f1fdd67ee4c0d92d3d.html" target="_blank">SCface – surveillance cameras face database</a></td><td><a href="http://scface.org/SCface%20-%20Surveillance%20Cameras%20Face%20Database.pdf" target="_blank">[pdf]</a></td><td>Multimedia Tools and Applications</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>179</td><td>103</td><td>76</td><td>15</td><td>88</td><td>89</td></tr><tr><td>d3f5a1848b0028d8ab51d0b0673732cad2e3c8c9</td><td>stair_actions</td><td>STAIR Action</td><td><a href="papers/d3f5a1848b0028d8ab51d0b0673732cad2e3c8c9.html" target="_blank">STAIR Actions: A Video Dataset of Everyday Home Actions</a></td><td><a href="https://arxiv.org/pdf/1804.04326.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>1</td><td>1</td><td>0</td><td>0</td><td>1</td><td>0</td></tr><tr><td>833fa04463d90aab4a9fe2870d480f0b40df446e</td><td>sun_attributes</td><td>SUN</td><td><a href="papers/833fa04463d90aab4a9fe2870d480f0b40df446e.html" target="_blank">SUN attribute database: Discovering, annotating, and recognizing scene attributes</a></td><td><span class="gray">[pdf]</a></td><td>2012 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>Brown University</td><td>United States</td><td>41.82686820</td><td>-71.40123146</td><td>60%</td><td>264</td><td>158</td><td>106</td><td>27</td><td>206</td><td>56</td></tr><tr><td>4308bd8c28e37e2ed9a3fcfe74d5436cce34b410</td><td>market_1501</td><td>Market 1501</td><td><a href="papers/4308bd8c28e37e2ed9a3fcfe74d5436cce34b410.html" target="_blank">Scalable Person Re-identification: A Benchmark</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td>company</td><td>Microsoft</td><td>United States</td><td>47.64233180</td><td>-122.13693020</td><td>77%</td><td>460</td><td>354</td><td>106</td><td>9</td><td>263</td><td>185</td></tr><tr><td>9c23859ec7313f2e756a3e85575735e0c52249f4</td><td>facebook_100</td><td>Facebook100</td><td><a href="papers/9c23859ec7313f2e756a3e85575735e0c52249f4.html" target="_blank">Scaling up biologically-inspired computer vision: A case study in unconstrained face recognition on facebook</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td>edu</td><td>Harvard University</td><td>United States</td><td>42.36782045</td><td>-71.12666653</td><td>62%</td><td>52</td><td>32</td><td>20</td><td>3</td><td>38</td><td>13</td></tr><tr><td>9c23859ec7313f2e756a3e85575735e0c52249f4</td><td>pubfig_83</td><td>pubfig83</td><td><a href="papers/9c23859ec7313f2e756a3e85575735e0c52249f4.html" target="_blank">Scaling up biologically-inspired computer vision: A case study in unconstrained face recognition on facebook</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td>edu</td><td>Harvard University</td><td>United States</td><td>42.36782045</td><td>-71.12666653</td><td>62%</td><td>52</td><td>32</td><td>20</td><td>3</td><td>38</td><td>13</td></tr><tr><td>51eba481dac6b229a7490f650dff7b17ce05df73</td><td>imsitu</td><td>imSitu</td><td><a href="papers/51eba481dac6b229a7490f650dff7b17ce05df73.html" target="_blank">Situation Recognition: Visual Semantic Role Labeling for Image Understanding</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>67%</td><td>52</td><td>35</td><td>17</td><td>1</td><td>46</td><td>6</td></tr><tr><td>570f37ed63142312e6ccdf00ecc376341ec72b9f</td><td>stanford_drone</td><td>Stanford Drone</td><td><a href="papers/570f37ed63142312e6ccdf00ecc376341ec72b9f.html" target="_blank">Social LSTM: Human Trajectory Prediction in Crowded Spaces</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>224</td><td>127</td><td>97</td><td>3</td><td>140</td><td>81</td></tr><tr><td>23e824d1dfc33f3780dd18076284f07bd99f1c43</td><td>mifs</td><td>MIFS</td><td><a href="papers/23e824d1dfc33f3780dd18076284f07bd99f1c43.html" target="_blank">Spoofing faces using makeup: An investigative study</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE International Conference on Identity, Security and Behavior Analysis (ISBA)</td><td>edu</td><td>INRIA Méditerranée</td><td>France</td><td>43.61581310</td><td>7.06838000</td><td>67%</td><td>6</td><td>4</td><td>2</td><td>0</td><td>1</td><td>5</td></tr><tr><td>1a40092b493c6b8840257ab7f96051d1a4dbfeb2</td><td>sports_videos_in_the_wild</td><td>SVW</td><td><a href="papers/1a40092b493c6b8840257ab7f96051d1a4dbfeb2.html" target="_blank">Sports Videos in the Wild (SVW): A video dataset for sports analysis</a></td><td><span class="gray">[pdf]</a></td><td>2015 11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)</td><td></td><td></td><td></td><td></td><td></td><td>86%</td><td>7</td><td>6</td><td>1</td><td>1</td><td>5</td><td>2</td></tr><tr><td>9361b784e73e9238d5cefbea5ac40d35d1e3103f</td><td>oxford_town_centre</td><td>TownCentre</td><td><a href="papers/9361b784e73e9238d5cefbea5ac40d35d1e3103f.html" target="_blank">Stable multi-target tracking in real-time surveillance video</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011</td><td>edu</td><td>University of Oxford</td><td>United Kingdom</td><td>51.75345380</td><td>-1.25400997</td><td>67%</td><td>328</td><td>221</td><td>107</td><td>13</td><td>186</td><td>140</td></tr><tr><td>2306b2a8fba28539306052764a77a0d0f5d1236a</td><td>qmul_surv_face</td><td>QMUL-SurvFace</td><td><a href="papers/2306b2a8fba28539306052764a77a0d0f5d1236a.html" target="_blank">Surveillance Face Recognition Challenge</a></td><td><a href="https://arxiv.org/pdf/1804.09691.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td>edu</td><td>Queen Mary University of London</td><td>United Kingdom</td><td>51.52472720</td><td>-0.03931035</td><td>100%</td><td>1</td><td>1</td><td>0</td><td>0</td><td>1</td><td>0</td></tr><tr><td>f6c8d5e35d7e4d60a0104f233ac1a3ab757da53f</td><td>pku_reid</td><td>PKU-Reid</td><td><a href="papers/f6c8d5e35d7e4d60a0104f233ac1a3ab757da53f.html" target="_blank">Swiss-System Based Cascade Ranking for Gait-Based Person Re-Identification</a></td><td><a href="https://pdfs.semanticscholar.org/f6c8/d5e35d7e4d60a0104f233ac1a3ab757da53f.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>4</td><td>2</td><td>2</td><td>0</td><td>1</td><td>2</td></tr><tr><td>4d58f886f5150b2d5e48fd1b5a49e09799bf895d</td><td>texas_3dfrd</td><td>Texas 3DFRD</td><td><a href="papers/4d58f886f5150b2d5e48fd1b5a49e09799bf895d.html" target="_blank">Texas 3D Face Recognition Database</a></td><td><span class="gray">[pdf]</a></td><td>2010 IEEE Southwest Symposium on Image Analysis & Interpretation (SSIAI)</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>66</td><td>40</td><td>26</td><td>3</td><td>40</td><td>27</td></tr><tr><td>6d96f946aaabc734af7fe3fc4454cf8547fcd5ed</td><td>ar_facedb</td><td>AR Face</td><td><a href="papers/6d96f946aaabc734af7fe3fc4454cf8547fcd5ed.html" target="_blank">The AR face database</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>999</td><td>573</td><td>426</td><td>59</td><td>458</td><td>530</td></tr><tr><td>2485c98aa44131d1a2f7d1355b1e372f2bb148ad</td><td>cas_peal</td><td>CAS-PEAL</td><td><a href="papers/2485c98aa44131d1a2f7d1355b1e372f2bb148ad.html" target="_blank">The CAS-PEAL Large-Scale Chinese Face Database and Baseline Evaluations</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Systems, Man, and Cybernetics - Part A: Systems and Humans</td><td></td><td></td><td></td><td></td><td></td><td>59%</td><td>429</td><td>253</td><td>176</td><td>38</td><td>198</td><td>234</td></tr><tr><td>47662d1a368daf70ba70ef2d59eb6209f98b675d</td><td>fia</td><td>CMU FiA</td><td><a href="papers/47662d1a368daf70ba70ef2d59eb6209f98b675d.html" target="_blank">The CMU Face In Action (FIA) Database</a></td><td><a href="https://pdfs.semanticscholar.org/4766/2d1a368daf70ba70ef2d59eb6209f98b675d.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>54</td><td>29</td><td>25</td><td>5</td><td>40</td><td>16</td></tr><tr><td>4d423acc78273b75134e2afd1777ba6d3a398973</td><td>cmu_pie</td><td>CMU PIE</td><td><a href="papers/4d423acc78273b75134e2afd1777ba6d3a398973.html" target="_blank">The CMU Pose, Illumination, and Expression (PIE) Database</a></td><td><a href="http://www.comp.nus.edu.sg/~tsim/piedb.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>59%</td><td>760</td><td>446</td><td>313</td><td>50</td><td>404</td><td>345</td></tr><tr><td>4d423acc78273b75134e2afd1777ba6d3a398973</td><td>multi_pie</td><td>MULTIPIE</td><td><a href="papers/4d423acc78273b75134e2afd1777ba6d3a398973.html" target="_blank">The CMU Pose, Illumination, and Expression (PIE) Database</a></td><td><a href="http://www.comp.nus.edu.sg/~tsim/piedb.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>59%</td><td>760</td><td>446</td><td>313</td><td>50</td><td>404</td><td>345</td></tr><tr><td>4df3143922bcdf7db78eb91e6b5359d6ada004d2</td><td>cfd</td><td>CFD</td><td><a href="papers/4df3143922bcdf7db78eb91e6b5359d6ada004d2.html" target="_blank">The Chicago face database: A free stimulus set of faces and norming data.</a></td><td><a href="https://pdfs.semanticscholar.org/4df3/143922bcdf7db78eb91e6b5359d6ada004d2.pdf" target="_blank">[pdf]</a></td><td>Behavior research methods</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>99</td><td>59</td><td>40</td><td>1</td><td>73</td><td>21</td></tr><tr><td>20388099cc415c772926e47bcbbe554e133343d1</td><td>cafe</td><td>#N/A</td><td><a href="papers/20388099cc415c772926e47bcbbe554e133343d1.html" target="_blank">The Child Affective Facial Expression (CAFE) set: validity and reliability from untrained adults</a></td><td><a href="https://pdfs.semanticscholar.org/2038/8099cc415c772926e47bcbbe554e133343d1.pdf" target="_blank">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>37</td><td>20</td><td>17</td><td>3</td><td>30</td><td>7</td></tr><tr><td>4e6ee936eb50dd032f7138702fa39b7c18ee8907</td><td>dartmouth_children</td><td>Dartmouth Children</td><td><a href="papers/4e6ee936eb50dd032f7138702fa39b7c18ee8907.html" target="_blank">The Dartmouth Database of Children’s Faces: Acquisition and Validation of a New Face Stimulus Set</a></td><td><a href="https://pdfs.semanticscholar.org/4e6e/e936eb50dd032f7138702fa39b7c18ee8907.pdf" target="_blank">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td></td><td>48%</td><td>21</td><td>10</td><td>11</td><td>2</td><td>18</td><td>3</td></tr><tr><td>9e31e77f9543ab42474ba4e9330676e18c242e72</td><td>imdb_face</td><td>IMDb Face</td><td><a href="papers/9e31e77f9543ab42474ba4e9330676e18c242e72.html" target="_blank">The Devil of Face Recognition is in the Noise</a></td><td><a href="https://arxiv.org/pdf/1807.11649.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Nanyang Technological University</td><td>Singapore</td><td>1.34841040</td><td>103.68297965</td><td>33%</td><td>6</td><td>2</td><td>4</td><td>0</td><td>4</td><td>1</td></tr><tr><td>71b7fc715e2f1bb24c0030af8d7e7b6e7cd128a6</td><td>umd_faces</td><td>UMD</td><td><a href="papers/71b7fc715e2f1bb24c0030af8d7e7b6e7cd128a6.html" target="_blank">The Do’s and Don’ts for CNN-Based Face Verification</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE International Conference on Computer Vision Workshops (ICCVW)</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>26</td><td>16</td><td>10</td><td>2</td><td>16</td><td>8</td></tr><tr><td>72a155c987816ae81c858fddbd6beab656d86220</td><td>europersons</td><td>EuroCity Persons</td><td><a href="papers/72a155c987816ae81c858fddbd6beab656d86220.html" target="_blank">The EuroCity Persons Dataset: A Novel Benchmark for Object Detection</a></td><td><a href="https://arxiv.org/pdf/1805.07193.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>2</td><td>0</td><td>2</td><td>0</td><td>2</td><td>0</td></tr><tr><td>4d9a02d080636e9666c4d1cc438b9893391ec6c7</td><td>cohn_kanade_plus</td><td>CK+</td><td><a href="papers/4d9a02d080636e9666c4d1cc438b9893391ec6c7.html" target="_blank">The Extended Cohn-Kanade Dataset (CK+): A complete dataset for action unit and emotion-specified expression</a></td><td><span class="gray">[pdf]</a></td><td>2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition - Workshops</td><td>edu</td><td>University of Pittsburgh</td><td>United States</td><td>40.44415295</td><td>-79.96243993</td><td>60%</td><td>999</td><td>604</td><td>395</td><td>58</td><td>470</td><td>518</td></tr><tr><td>0f0fcf041559703998abf310e56f8a2f90ee6f21</td><td>feret</td><td>FERET</td><td><a href="papers/0f0fcf041559703998abf310e56f8a2f90ee6f21.html" target="_blank">The FERET Evaluation Methodology for Face-Recognition Algorithms</a></td><td><a href="https://pdfs.semanticscholar.org/5099/7a5605c1f61e09e9a96789ed7495be6625aa.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>34%</td><td>29</td><td>10</td><td>19</td><td>3</td><td>18</td><td>9</td></tr><tr><td>0c4a139bb87c6743c7905b29a3cfec27a5130652</td><td>feret</td><td>FERET</td><td><a href="papers/0c4a139bb87c6743c7905b29a3cfec27a5130652.html" target="_blank">The FERET Verification Testing Protocol for Face Recognition Algorithms</a></td><td><a href="https://pdfs.semanticscholar.org/8d2a/1c768fce6f71584dd993fb97e7b6419aaf60.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>edu</td><td>City University of New York</td><td>United States</td><td>40.87228250</td><td>-73.89489171</td><td>51%</td><td>115</td><td>59</td><td>56</td><td>8</td><td>75</td><td>37</td></tr><tr><td>dc8b25e35a3acb812beb499844734081722319b4</td><td>feret</td><td>FERET</td><td><a href="papers/dc8b25e35a3acb812beb499844734081722319b4.html" target="_blank">The FERET database and evaluation procedure for face-recognition algorithms</a></td><td><a href="http://biometrics.nist.gov/cs_links/face/frvt/feret/FERET_Database_evaluation_procedure.pdf" target="_blank">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>999</td><td>516</td><td>483</td><td>103</td><td>591</td><td>421</td></tr><tr><td>8f02ec0be21461fbcedf51d864f944cfc42c875f</td><td>hda_plus</td><td>HDA+</td><td><a href="papers/8f02ec0be21461fbcedf51d864f944cfc42c875f.html" target="_blank">The HDA+ Data Set for Research on Fully Automated Re-identification Systems</a></td><td><a href="https://pdfs.semanticscholar.org/8f02/ec0be21461fbcedf51d864f944cfc42c875f.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>16</td><td>8</td><td>8</td><td>1</td><td>10</td><td>6</td></tr><tr><td>8be57cdad86fdf8c8290df4ca3149592f3c46dd3</td><td>m2vts</td><td>m2vts</td><td><a href="papers/8be57cdad86fdf8c8290df4ca3149592f3c46dd3.html" target="_blank">The M2VTS Multimodal Face Database (Release 1.00)</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>45%</td><td>73</td><td>33</td><td>40</td><td>2</td><td>39</td><td>33</td></tr><tr><td>ea050801199f98a1c7c1df6769f23f658299a3ae</td><td>mpi_large</td><td>Large MPI Facial Expression</td><td><a href="papers/ea050801199f98a1c7c1df6769f23f658299a3ae.html" target="_blank">The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</a></td><td><a href="https://pdfs.semanticscholar.org/ea05/0801199f98a1c7c1df6769f23f658299a3ae.pdf" target="_blank">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>33</td><td>17</td><td>16</td><td>4</td><td>29</td><td>4</td></tr><tr><td>ea050801199f98a1c7c1df6769f23f658299a3ae</td><td>mpi_small</td><td>Small MPI Facial Expression</td><td><a href="papers/ea050801199f98a1c7c1df6769f23f658299a3ae.html" target="_blank">The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</a></td><td><a href="https://pdfs.semanticscholar.org/ea05/0801199f98a1c7c1df6769f23f658299a3ae.pdf" target="_blank">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>33</td><td>17</td><td>16</td><td>4</td><td>29</td><td>4</td></tr><tr><td>578d4ad74818086bb64f182f72e2c8bd31e3d426</td><td>mr2</td><td>MR2</td><td><a href="papers/578d4ad74818086bb64f182f72e2c8bd31e3d426.html" target="_blank">The MR2: A multi-racial, mega-resolution database of facial stimuli.</a></td><td><a href="http://www.mpmlab.org/The%20MR2%20face%20database.pdf" target="_blank">[pdf]</a></td><td>Behavior research methods</td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>7</td><td>3</td><td>4</td><td>0</td><td>7</td><td>0</td></tr><tr><td>f1af714b92372c8e606485a3982eab2f16772ad8</td><td>mug_faces</td><td>MUG Faces</td><td><a href="papers/f1af714b92372c8e606485a3982eab2f16772ad8.html" target="_blank">The MUG facial expression database</a></td><td><span class="gray">[pdf]</a></td><td>11th International Workshop on Image Analysis for Multimedia Interactive Services WIAMIS 10</td><td>edu</td><td>Aristotle University of Thessaloniki</td><td>Greece</td><td>40.62984145</td><td>22.95889350</td><td>55%</td><td>82</td><td>45</td><td>37</td><td>4</td><td>34</td><td>47</td></tr><tr><td>79828e6e9f137a583082b8b5a9dfce0c301989b8</td><td>mapillary</td><td>Mapillary</td><td><a href="papers/79828e6e9f137a583082b8b5a9dfce0c301989b8.html" target="_blank">The Mapillary Vistas Dataset for Semantic Understanding of Street Scenes</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>61</td><td>37</td><td>24</td><td>0</td><td>43</td><td>16</td></tr><tr><td>96e0cfcd81cdeb8282e29ef9ec9962b125f379b0</td><td>megaface</td><td>MegaFace</td><td><a href="papers/96e0cfcd81cdeb8282e29ef9ec9962b125f379b0.html" target="_blank">The MegaFace Benchmark: 1 Million Faces for Recognition at Scale</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>76%</td><td>139</td><td>105</td><td>34</td><td>5</td><td>100</td><td>37</td></tr><tr><td>0ee1916a0cb2dc7d3add086b5f1092c3d4beb38a</td><td>voc</td><td>VOC</td><td><a href="papers/0ee1916a0cb2dc7d3add086b5f1092c3d4beb38a.html" target="_blank">The Pascal Visual Object Classes (VOC) Challenge</a></td><td><a href="http://eprints.pascal-network.org/archive/00006187/01/PascalVOC_IJCV2009.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td>company</td><td>Microsoft</td><td>United States</td><td>47.64233180</td><td>-122.13693020</td><td>60%</td><td>999</td><td>596</td><td>402</td><td>30</td><td>557</td><td>422</td></tr><tr><td>66e6f08873325d37e0ec20a4769ce881e04e964e</td><td>sun_attributes</td><td>SUN</td><td><a href="papers/66e6f08873325d37e0ec20a4769ce881e04e964e.html" target="_blank">The SUN Attribute Database: Beyond Categories for Deeper Scene Understanding</a></td><td><a href="http://www.cc.gatech.edu/~hays/papers/attribute_ijcv.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>116</td><td>70</td><td>46</td><td>14</td><td>84</td><td>31</td></tr><tr><td>8b2dd5c61b23ead5ae5508bb8ce808b5ea266730</td><td>10k_US_adult_faces</td><td>10K US Adult Faces</td><td><a href="papers/8b2dd5c61b23ead5ae5508bb8ce808b5ea266730.html" target="_blank">The intrinsic memorability of face photographs.</a></td><td><a href="https://pdfs.semanticscholar.org/8b2d/d5c61b23ead5ae5508bb8ce808b5ea266730.pdf" target="_blank">[pdf]</a></td><td>Journal of experimental psychology. General</td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>52</td><td>28</td><td>24</td><td>2</td><td>36</td><td>14</td></tr><tr><td>d178cde92ab3dc0dd2ebee5a76a33d556c39448b</td><td>jiku_mobile</td><td>Jiku Mobile Video Dataset</td><td><a href="papers/d178cde92ab3dc0dd2ebee5a76a33d556c39448b.html" target="_blank">The jiku mobile video dataset</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td>edu</td><td>National University of Singapore</td><td>Singapore</td><td>1.29620180</td><td>103.77689944</td><td>71%</td><td>24</td><td>17</td><td>7</td><td>0</td><td>6</td><td>19</td></tr><tr><td>ae0aee03d946efffdc7af2362a42d3750e7dd48a</td><td>put_face</td><td>Put Face</td><td><a href="papers/ae0aee03d946efffdc7af2362a42d3750e7dd48a.html" target="_blank">The put face database</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>51%</td><td>99</td><td>50</td><td>49</td><td>7</td><td>55</td><td>48</td></tr><tr><td>19d1b811df60f86cbd5e04a094b07f32fff7a32a</td><td>york_3d</td><td>UOY 3D Face Database</td><td><a href="papers/19d1b811df60f86cbd5e04a094b07f32fff7a32a.html" target="_blank">Three-dimensional face recognition: an eigensurface approach</a></td><td><span class="gray">[pdf]</a></td><td>2004 International Conference on Image Processing, 2004. ICIP '04.</td><td></td><td></td><td></td><td></td><td></td><td>42%</td><td>38</td><td>16</td><td>22</td><td>4</td><td>24</td><td>13</td></tr><tr><td>2edb87494278ad11641b6cf7a3f8996de12b8e14</td><td>qmul_grid</td><td>GRID</td><td><a href="papers/2edb87494278ad11641b6cf7a3f8996de12b8e14.html" target="_blank">Time-Delayed Correlation Analysis for Multi-Camera Activity Understanding</a></td><td><a href="http://www.eecs.qmul.ac.uk/~ccloy/files/ijcv_2010.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td>edu</td><td>Queen Mary University of London</td><td>United Kingdom</td><td>51.52472720</td><td>-0.03931035</td><td>63%</td><td>84</td><td>53</td><td>31</td><td>4</td><td>51</td><td>33</td></tr><tr><td>64e0690dd176a93de9d4328f6e31fc4afe1e7536</td><td>duke_mtmc</td><td>Duke MTMC</td><td><a href="papers/64e0690dd176a93de9d4328f6e31fc4afe1e7536.html" target="_blank">Tracking Multiple People Online and in Real Time</a></td><td><a href="https://pdfs.semanticscholar.org/64e0/690dd176a93de9d4328f6e31fc4afe1e7536.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>78%</td><td>23</td><td>18</td><td>5</td><td>1</td><td>12</td><td>10</td></tr><tr><td>298cbc3dfbbb3a20af4eed97906650a4ea1c29e0</td><td>ferplus</td><td>FER+</td><td><a href="papers/298cbc3dfbbb3a20af4eed97906650a4ea1c29e0.html" target="_blank">Training deep networks for facial expression recognition with crowd-sourced label distribution</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>74%</td><td>34</td><td>25</td><td>9</td><td>0</td><td>18</td><td>16</td></tr><tr><td>4eab317b5ac436a949849ed286baa3de2a541eef</td><td>laofiw</td><td>LAOFIW</td><td><a href="papers/4eab317b5ac436a949849ed286baa3de2a541eef.html" target="_blank">Turning a Blind Eye: Explicit Removal of Biases and Variation from Deep Neural Network Embeddings</a></td><td><a href="https://arxiv.org/pdf/1809.02169.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>2</td><td>2</td><td>0</td><td>0</td><td>2</td><td>0</td></tr><tr><td>b5f2846a506fc417e7da43f6a7679146d99c5e96</td><td>ucf_101</td><td>UCF101</td><td><a href="papers/b5f2846a506fc417e7da43f6a7679146d99c5e96.html" target="_blank">UCF101: A Dataset of 101 Human Actions Classes From Videos in The Wild</a></td><td><a href="https://arxiv.org/pdf/1212.0402.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>999</td><td>643</td><td>356</td><td>56</td><td>628</td><td>362</td></tr><tr><td>16e8b0a1e8451d5f697b94c0c2b32a00abee1d52</td><td>umb</td><td>UMB</td><td><a href="papers/16e8b0a1e8451d5f697b94c0c2b32a00abee1d52.html" target="_blank">UMB-DB: A database of partially occluded 3D faces</a></td><td><span class="gray">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td></td><td>66%</td><td>47</td><td>31</td><td>16</td><td>2</td><td>22</td><td>24</td></tr><tr><td>31b05f65405534a696a847dd19c621b7b8588263</td><td>umd_faces</td><td>UMD</td><td><a href="papers/31b05f65405534a696a847dd19c621b7b8588263.html" target="_blank">UMDFaces: An annotated face dataset for training deep networks</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE International Joint Conference on Biometrics (IJCB)</td><td>edu</td><td>University of Maryland</td><td>United States</td><td>39.28996850</td><td>-76.62196103</td><td>76%</td><td>42</td><td>32</td><td>10</td><td>2</td><td>30</td><td>11</td></tr><tr><td>8627f019882b024aef92e4eb9355c499c733e5b7</td><td>used</td><td>USED Social Event Dataset</td><td><a href="papers/8627f019882b024aef92e4eb9355c499c733e5b7.html" target="_blank">USED: a large-scale social event detection dataset</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td>edu</td><td>University of Trento</td><td>Italy</td><td>46.06588360</td><td>11.11598940</td><td>71%</td><td>7</td><td>5</td><td>2</td><td>0</td><td>3</td><td>4</td></tr><tr><td>d4f1eb008eb80595bcfdac368e23ae9754e1e745</td><td>uccs</td><td>UCCS</td><td><a href="papers/d4f1eb008eb80595bcfdac368e23ae9754e1e745.html" target="_blank">Unconstrained Face Detection and Open-Set Face Recognition Challenge</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE International Joint Conference on Biometrics (IJCB)</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>5</td><td>5</td><td>0</td><td>0</td><td>4</td><td>1</td></tr><tr><td>4b4106614c1d553365bad75d7866bff0de6056ed</td><td>ufi</td><td>UFI</td><td><a href="papers/4b4106614c1d553365bad75d7866bff0de6056ed.html" target="_blank">Unconstrained Facial Images: Database for Face Recognition Under Real-World Conditions</a></td><td><a href="http://home.zcu.cz/~pkral/papers/kral_micai15.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>12</td><td>6</td><td>6</td><td>0</td><td>4</td><td>6</td></tr><tr><td>08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7</td><td>kin_face</td><td>UB KinFace</td><td><a href="papers/08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7.html" target="_blank">Understanding Kin Relationships in a Photo</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Multimedia</td><td></td><td></td><td></td><td></td><td></td><td>63%</td><td>94</td><td>59</td><td>35</td><td>1</td><td>33</td><td>61</td></tr><tr><td>5a4df9bef1872865f0b619ac3aacc97f49e4a035</td><td>cuhk_train_station</td><td>CUHK Train Station Dataset</td><td><a href="papers/5a4df9bef1872865f0b619ac3aacc97f49e4a035.html" target="_blank">Understanding collective crowd behaviors: Learning a Mixture model of Dynamic pedestrian-Agents</a></td><td><span class="gray">[pdf]</a></td><td>2012 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>Chinese University of Hong Kong</td><td>China</td><td>22.41626320</td><td>114.21093180</td><td>58%</td><td>141</td><td>82</td><td>59</td><td>5</td><td>60</td><td>75</td></tr><tr><td>21d9d0deed16f0ad62a4865e9acf0686f4f15492</td><td>images_of_groups</td><td>Images of Groups</td><td><a href="papers/21d9d0deed16f0ad62a4865e9acf0686f4f15492.html" target="_blank">Understanding images of groups of people</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>Carnegie Mellon University</td><td>United States</td><td>40.44416190</td><td>-79.94272826</td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>15e1af79939dbf90790b03d8aa02477783fb1d0f</td><td>duke_mtmc</td><td>Duke MTMC</td><td><a href="papers/15e1af79939dbf90790b03d8aa02477783fb1d0f.html" target="_blank">Unlabeled Samples Generated by GAN Improve the Person Re-identification Baseline in Vitro</a></td><td><a href="https://arxiv.org/pdf/1701.07717.pdf" target="_blank">[pdf]</a></td><td>2017 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>fd8168f1c50de85bac58a8d328df0a50248b16ae</td><td>nd_2006</td><td>ND-2006</td><td><a href="papers/fd8168f1c50de85bac58a8d328df0a50248b16ae.html" target="_blank">Using a Multi-Instance Enrollment Representation to Improve 3D Face Recognition</a></td><td><span class="gray">[pdf]</a></td><td>2007 First IEEE International Conference on Biometrics: Theory, Applications, and Systems</td><td>edu</td><td>University of Notre Dame</td><td>United States</td><td>41.70456775</td><td>-86.23822026</td><td>63%</td><td>35</td><td>22</td><td>13</td><td>3</td><td>18</td><td>15</td></tr><tr><td>4563b46d42079242f06567b3f2e2f7a80cb3befe</td><td>vadana</td><td>VADANA</td><td><a href="papers/4563b46d42079242f06567b3f2e2f7a80cb3befe.html" target="_blank">VADANA: A dense dataset for facial image analysis</a></td><td><span class="gray">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td>edu</td><td>University of Delaware</td><td>United States</td><td>39.68103280</td><td>-75.75401840</td><td>67%</td><td>15</td><td>10</td><td>5</td><td>0</td><td>5</td><td>10</td></tr><tr><td>70c59dc3470ae867016f6ab0e008ac8ba03774a1</td><td>vgg_faces2</td><td>VGG Face2</td><td><a href="papers/70c59dc3470ae867016f6ab0e008ac8ba03774a1.html" target="_blank">VGGFace2: A Dataset for Recognising Faces across Pose and Age</a></td><td><a href="https://arxiv.org/pdf/1710.08092.pdf" target="_blank">[pdf]</a></td><td>2018 13th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2018)</td><td></td><td></td><td></td><td></td><td></td><td>76%</td><td>83</td><td>63</td><td>20</td><td>3</td><td>61</td><td>20</td></tr><tr><td>01959ef569f74c286956024866c1d107099199f7</td><td>vqa</td><td>VQA</td><td><a href="papers/01959ef569f74c286956024866c1d107099199f7.html" target="_blank">VQA: Visual Question Answering</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>b6c293f0420f7e945b5916ae44269fb53e139275</td><td>erce</td><td>ERCe</td><td><a href="papers/b6c293f0420f7e945b5916ae44269fb53e139275.html" target="_blank">Video Synopsis by Heterogeneous Multi-source Correlation</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>29</td><td>15</td><td>14</td><td>2</td><td>14</td><td>13</td></tr><tr><td>b6c293f0420f7e945b5916ae44269fb53e139275</td><td>tisi</td><td>Times Square Intersection</td><td><a href="papers/b6c293f0420f7e945b5916ae44269fb53e139275.html" target="_blank">Video Synopsis by Heterogeneous Multi-source Correlation</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>29</td><td>15</td><td>14</td><td>2</td><td>14</td><td>13</td></tr><tr><td>5194cbd51f9769ab25260446b4fa17204752e799</td><td>violent_flows</td><td>Violent Flows</td><td><a href="papers/5194cbd51f9769ab25260446b4fa17204752e799.html" target="_blank">Violent flows: Real-time detection of violent crowd behavior</a></td><td><span class="gray">[pdf]</a></td><td>2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops</td><td>edu</td><td>Open University of Israel</td><td>Israel</td><td>32.77824165</td><td>34.99565673</td><td>65%</td><td>88</td><td>57</td><td>31</td><td>6</td><td>45</td><td>44</td></tr><tr><td>026e3363b7f76b51cc711886597a44d5f1fd1de2</td><td>kitti</td><td>KITTI</td><td><a href="papers/026e3363b7f76b51cc711886597a44d5f1fd1de2.html" target="_blank">Vision meets robotics: The KITTI dataset</a></td><td><a href="https://pdfs.semanticscholar.org/026e/3363b7f76b51cc711886597a44d5f1fd1de2.pdf" target="_blank">[pdf]</a></td><td>I. J. Robotics Res.</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>999</td><td>602</td><td>397</td><td>36</td><td>553</td><td>462</td></tr><tr><td>066000d44d6691d27202896691f08b27117918b9</td><td>psu</td><td>PSU</td><td><a href="papers/066000d44d6691d27202896691f08b27117918b9.html" target="_blank">Vision-Based Analysis of Small Groups in Pedestrian Crowds</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>168</td><td>90</td><td>78</td><td>10</td><td>85</td><td>79</td></tr><tr><td>dd65f71dac86e36eecbd3ed225d016c3336b4a13</td><td>families_in_the_wild</td><td>FIW</td><td><a href="papers/dd65f71dac86e36eecbd3ed225d016c3336b4a13.html" target="_blank">Visual Kinship Recognition of Families in the Wild</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td>edu</td><td>University of Massachusetts Dartmouth</td><td>United States</td><td>41.62772475</td><td>-71.00724501</td><td>80%</td><td>5</td><td>4</td><td>1</td><td>0</td><td>2</td><td>3</td></tr><tr><td>52d7eb0fbc3522434c13cc247549f74bb9609c5d</td><td>wider_face</td><td>WIDER FACE</td><td><a href="papers/52d7eb0fbc3522434c13cc247549f74bb9609c5d.html" target="_blank">WIDER FACE: A Face Detection Benchmark</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>Chinese University of Hong Kong</td><td>China</td><td>22.41626320</td><td>114.21093180</td><td>65%</td><td>178</td><td>116</td><td>62</td><td>12</td><td>112</td><td>66</td></tr><tr><td>36bccfb2ad847096bc76777e544f305813cd8f5b</td><td>wildtrack</td><td>WildTrack</td><td><a href="papers/36bccfb2ad847096bc76777e544f305813cd8f5b.html" target="_blank">WILDTRACK: A Multi-camera HD Dataset for Dense Unscripted Pedestrian Detection</a></td><td><a href="http://openaccess.thecvf.com/content_cvpr_2018/Supplemental/1562-supp.pdf" target="_blank">[pdf]</a></td><td>2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>5ad4e9f947c1653c247d418f05dad758a3f9277b</td><td>wlfdb</td><td>WLFDB</td><td><a href="papers/5ad4e9f947c1653c247d418f05dad758a3f9277b.html" target="_blank">WLFDB : Weakly Labeled Face Databases</a></td><td><a href="https://pdfs.semanticscholar.org/5ad4/e9f947c1653c247d418f05dad758a3f9277b.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>1</td><td>1</td><td>0</td><td>0</td><td>0</td><td>1</td></tr><tr><td>0dc11a37cadda92886c56a6fb5191ded62099c28</td><td>stickmen_family</td><td>We Are Family Stickmen</td><td><a href="papers/0dc11a37cadda92886c56a6fb5191ded62099c28.html" target="_blank">We Are Family: Joint Pose Estimation of Multiple Persons</a></td><td><a href="http://eprints.pascal-network.org/archive/00007964/01/eichner10eccv.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>63%</td><td>78</td><td>49</td><td>29</td><td>6</td><td>54</td><td>23</td></tr><tr><td>0c91808994a250d7be332400a534a9291ca3b60e</td><td>graz</td><td>Graz Pedestrian</td><td><a href="papers/0c91808994a250d7be332400a534a9291ca3b60e.html" target="_blank">Weak Hypotheses and Boosting for Generic Object Detection and Recognition</a></td><td><a href="https://pdfs.semanticscholar.org/0c91/808994a250d7be332400a534a9291ca3b60e.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>236</td><td>131</td><td>105</td><td>17</td><td>161</td><td>77</td></tr><tr><td>2a75f34663a60ab1b04a0049ed1d14335129e908</td><td>mmi_facial_expression</td><td>MMI Facial Expression Dataset</td><td><a href="papers/2a75f34663a60ab1b04a0049ed1d14335129e908.html" target="_blank">Web-based database for facial expression analysis</a></td><td><span class="gray">[pdf]</a></td><td>2005 IEEE International Conference on Multimedia and Expo</td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>464</td><td>250</td><td>214</td><td>45</td><td>282</td><td>188</td></tr><tr><td>9b9bf5e623cb8af7407d2d2d857bc3f1b531c182</td><td>who_goes_there</td><td>WGT</td><td><a href="papers/9b9bf5e623cb8af7407d2d2d857bc3f1b531c182.html" target="_blank">Who goes there?: approaches to mapping facial appearance diversity</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td>edu</td><td>University of Kentucky</td><td>United States</td><td>38.03337420</td><td>-84.50177580</td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>b62628ac06bbac998a3ab825324a41a11bc3a988</td><td>m2vtsdb_extended</td><td>xm2vtsdb</td><td><a href="papers/b62628ac06bbac998a3ab825324a41a11bc3a988.html" target="_blank">XM2VTSDB : The extended M2VTS database</a></td><td><a href="https://pdfs.semanticscholar.org/b626/28ac06bbac998a3ab825324a41a11bc3a988.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>864</td><td>527</td><td>337</td><td>39</td><td>493</td><td>404</td></tr><tr><td>010f0f4929e6a6644fb01f0e43820f91d0fad292</td><td>yfcc_100m</td><td>YFCC100M</td><td><a href="papers/010f0f4929e6a6644fb01f0e43820f91d0fad292.html" target="_blank">YFCC100M: the new data in multimedia research</a></td><td><span class="gray">[pdf]</a></td><td>Commun. ACM</td><td>edu</td><td>Carnegie Mellon University</td><td>United States</td><td>40.44416190</td><td>-79.94272826</td><td>64%</td><td>274</td><td>174</td><td>100</td><td>23</td><td>172</td><td>100</td></tr><tr><td>a94cae786d515d3450d48267e12ca954aab791c4</td><td>yawdd</td><td>YawDD</td><td><a href="papers/a94cae786d515d3450d48267e12ca954aab791c4.html" target="_blank">YawDD: a yawning detection dataset</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>80%</td><td>15</td><td>12</td><td>3</td><td>1</td><td>2</td><td>13</td></tr></table></body></html> \ No newline at end of file
+<!doctype html><html><head><meta charset='utf-8'><title>All Papers</title><link rel='stylesheet' href='reports.css'></head><body><h2>All Papers</h2><table border='1' cellpadding='3' cellspacing='3'><th>Paper ID</th><th>Megapixels Key</th><th>Megapixels Name</th><th>Report Link</th><th>PDF Link</th><th>Journal</th><th>Type</th><th>Address</th><th>Country</th><th>Lat</th><th>Lng</th><th>Coverage</th><th>Total Citations</th><th>Geocoded Citations</th><th>Unknown Citations</th><th>Empty Citations</th><th>With PDF</th><th>With DOI</th><tr><td>3325860c0c82a93b2eac654f5324dd6a776f609e</td><td>mpii_human_pose</td><td>MPII Human Pose</td><td><a href="papers/3325860c0c82a93b2eac654f5324dd6a776f609e.html" target="_blank">2D Human Pose Estimation: New Benchmark and State of the Art Analysis</a></td><td><span class="gray">[pdf]</a></td><td>2014 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>387</td><td>249</td><td>138</td><td>21</td><td>291</td><td>96</td></tr><tr><td>e4754afaa15b1b53e70743880484b8d0736990ff</td><td>fiw_300</td><td>300-W</td><td><a href="papers/e4754afaa15b1b53e70743880484b8d0736990ff.html" target="_blank">300 Faces In-The-Wild Challenge: database and results</a></td><td><a href="http://ibug.doc.ic.ac.uk/media/uploads/documents/1-s2.0-s0262885616000147-main.pdf" target="_blank">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>129</td><td>77</td><td>52</td><td>9</td><td>74</td><td>55</td></tr><tr><td>044d9a8c61383312cdafbcc44b9d00d650b21c70</td><td>fiw_300</td><td>300-W</td><td><a href="papers/044d9a8c61383312cdafbcc44b9d00d650b21c70.html" target="_blank">300 Faces in-the-Wild Challenge: The First Facial Landmark Localization Challenge</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision Workshops</td><td></td><td></td><td></td><td></td><td></td><td>67%</td><td>323</td><td>217</td><td>106</td><td>25</td><td>208</td><td>120</td></tr><tr><td>2e8d0f1802e50cccfd3c0aabac0d0beab3a7846e</td><td>3dpes</td><td>3DPeS</td><td><a href="papers/2e8d0f1802e50cccfd3c0aabac0d0beab3a7846e.html" target="_blank">3DPeS: 3D people dataset for surveillance and forensics</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>133</td><td>82</td><td>51</td><td>9</td><td>73</td><td>58</td></tr><tr><td>a40f9bfd3c45658ee8da70e1f2dfbe1f0c744d43</td><td>4dfab</td><td>4DFAB</td><td><a href="papers/a40f9bfd3c45658ee8da70e1f2dfbe1f0c744d43.html" target="_blank">4DFAB: A Large Scale 4D Facial Expression Database for Biometric Applications</a></td><td><a href="https://arxiv.org/pdf/1712.01443.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>25%</td><td>4</td><td>1</td><td>3</td><td>0</td><td>2</td><td>2</td></tr><tr><td>31b58ced31f22eab10bd3ee2d9174e7c14c27c01</td><td>tiny_images</td><td>Tiny Images</td><td><a href="papers/31b58ced31f22eab10bd3ee2d9174e7c14c27c01.html" target="_blank">80 Million Tiny Images: A Large Data Set for Nonparametric Object and Scene Recognition</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>999</td><td>574</td><td>425</td><td>89</td><td>644</td><td>337</td></tr><tr><td>d08cc366a4a0192a01e9a7495af1eb5d9f9e73ae</td><td>b3d_ac</td><td>B3D(AC)</td><td><a href="papers/d08cc366a4a0192a01e9a7495af1eb5d9f9e73ae.html" target="_blank">A 3-D Audio-Visual Corpus of Affective Communication</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Multimedia</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>42</td><td>23</td><td>19</td><td>2</td><td>26</td><td>15</td></tr><tr><td>4d4bb462c9f1d4e4ab1e4aa6a75cc0bc71b38461</td><td>3dddb_unconstrained</td><td>3D Dynamic</td><td><a href="papers/4d4bb462c9f1d4e4ab1e4aa6a75cc0bc71b38461.html" target="_blank">A 3D Dynamic Database for Unconstrained Face Recognition</a></td><td><a href="https://pdfs.semanticscholar.org/4d4b/b462c9f1d4e4ab1e4aa6a75cc0bc71b38461.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>2</td><td>1</td><td>1</td><td>0</td><td>1</td><td>1</td></tr><tr><td>639937b3a1b8bded3f7e9a40e85bd3770016cf3c</td><td>bfm</td><td>BFM</td><td><a href="papers/639937b3a1b8bded3f7e9a40e85bd3770016cf3c.html" target="_blank">A 3D Face Model for Pose and Illumination Invariant Face Recognition</a></td><td><span class="gray">[pdf]</a></td><td>2009 Sixth IEEE International Conference on Advanced Video and Signal Based Surveillance</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>343</td><td>190</td><td>153</td><td>25</td><td>223</td><td>114</td></tr><tr><td>cc589c499dcf323fe4a143bbef0074c3e31f9b60</td><td>bu_3dfe</td><td>BU-3DFE</td><td><a href="papers/cc589c499dcf323fe4a143bbef0074c3e31f9b60.html" target="_blank">A 3D facial expression database for facial behavior research</a></td><td><span class="gray">[pdf]</a></td><td>7th International Conference on Automatic Face and Gesture Recognition (FGR06)</td><td></td><td></td><td></td><td></td><td></td><td>53%</td><td>588</td><td>313</td><td>274</td><td>45</td><td>306</td><td>282</td></tr><tr><td>22646e00a7ba34d1b5fbe3b1efcd91a1e1be3c2b</td><td>saivt</td><td>SAIVT SoftBio</td><td><a href="papers/22646e00a7ba34d1b5fbe3b1efcd91a1e1be3c2b.html" target="_blank">A Database for Person Re-Identification in Multi-Camera Surveillance Networks</a></td><td><span class="gray">[pdf]</a></td><td>2012 International Conference on Digital Image Computing Techniques and Applications (DICTA)</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>65</td><td>38</td><td>27</td><td>6</td><td>45</td><td>20</td></tr><tr><td>070de852bc6eb275d7ca3a9cdde8f6be8795d1a3</td><td>d3dfacs</td><td>D3DFACS</td><td><a href="papers/070de852bc6eb275d7ca3a9cdde8f6be8795d1a3.html" target="_blank">A FACS valid 3D dynamic action unit database with applications to 3D dynamic morphable facial modeling</a></td><td><span class="gray">[pdf]</a></td><td>2011 International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>50</td><td>26</td><td>24</td><td>5</td><td>31</td><td>18</td></tr><tr><td>563c940054e4b456661762c1ab858e6f730c3159</td><td>data_61</td><td>Data61 Pedestrian</td><td><a href="papers/563c940054e4b456661762c1ab858e6f730c3159.html" target="_blank">A Multi-modal Graphical Model for Scene Analysis</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE Winter Conference on Applications of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>8</td><td>4</td><td>4</td><td>0</td><td>5</td><td>3</td></tr><tr><td>221c18238b829c12b911706947ab38fd017acef7</td><td>rap_pedestrian</td><td>RAP</td><td><a href="papers/221c18238b829c12b911706947ab38fd017acef7.html" target="_blank">A Richly Annotated Dataset for Pedestrian Attribute Recognition</a></td><td><a href="https://arxiv.org/pdf/1603.07054.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>65%</td><td>26</td><td>17</td><td>9</td><td>0</td><td>16</td><td>10</td></tr><tr><td>013909077ad843eb6df7a3e8e290cfd5575999d2</td><td>fiw_300</td><td>300-W</td><td><a href="papers/013909077ad843eb6df7a3e8e290cfd5575999d2.html" target="_blank">A Semi-automatic Methodology for Facial Landmark Annotation</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition Workshops</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>184</td><td>118</td><td>66</td><td>12</td><td>120</td><td>67</td></tr><tr><td>3b4ec8af470948a72a6ed37a9fd226719a874ebc</td><td>sdu_vid</td><td>SDU-VID</td><td><a href="papers/3b4ec8af470948a72a6ed37a9fd226719a874ebc.html" target="_blank">A Spatio-Temporal Appearance Representation for Video-Based Pedestrian Re-Identification</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>66%</td><td>95</td><td>63</td><td>32</td><td>6</td><td>50</td><td>45</td></tr><tr><td>6403117f9c005ae81f1e8e6d1302f4a045e3d99d</td><td>alert_airport</td><td>ALERT Airport</td><td><a href="papers/6403117f9c005ae81f1e8e6d1302f4a045e3d99d.html" target="_blank">A Systematic Evaluation and Benchmark for Person Re-Identification: Features, Metrics, and Datasets</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>45%</td><td>20</td><td>9</td><td>11</td><td>0</td><td>9</td><td>11</td></tr><tr><td>0d3bb75852098b25d90f31d2f48fd0cb4944702b</td><td>face_scrub</td><td>FaceScrub</td><td><a href="papers/0d3bb75852098b25d90f31d2f48fd0cb4944702b.html" target="_blank">A data-driven approach to cleaning large face datasets</a></td><td><span class="gray">[pdf]</a></td><td>2014 IEEE International Conference on Image Processing (ICIP)</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>138</td><td>89</td><td>49</td><td>1</td><td>95</td><td>41</td></tr><tr><td>b91f54e1581fbbf60392364323d00a0cd43e493c</td><td>bp4d_spontanous</td><td>BP4D-Spontanous</td><td><a href="papers/b91f54e1581fbbf60392364323d00a0cd43e493c.html" target="_blank">A high-resolution spontaneous 3D dynamic facial expression database</a></td><td><span class="gray">[pdf]</a></td><td>2013 10th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)</td><td>edu</td><td>SUNY Binghamton</td><td>United States</td><td>42.08779975</td><td>-75.97066066</td><td>49%</td><td>154</td><td>76</td><td>78</td><td>7</td><td>80</td><td>75</td></tr><tr><td>8b56e33f33e582f3e473dba573a16b598ed9bcdc</td><td>fei</td><td>FEI</td><td><a href="papers/8b56e33f33e582f3e473dba573a16b598ed9bcdc.html" target="_blank">A new ranking method for principal components analysis and its application to face image analysis</a></td><td><span class="gray">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>169</td><td>93</td><td>76</td><td>6</td><td>69</td><td>102</td></tr><tr><td>2624d84503bc2f8e190e061c5480b6aa4d89277a</td><td>afew_va</td><td>AFEW-VA</td><td><a href="papers/2624d84503bc2f8e190e061c5480b6aa4d89277a.html" target="_blank">AFEW-VA database for valence and arousal estimation in-the-wild</a></td><td><a href="https://ibug.doc.ic.ac.uk/media/uploads/documents/afew-va.pdf" target="_blank">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>18</td><td>9</td><td>9</td><td>0</td><td>12</td><td>5</td></tr><tr><td>2ad0ee93d029e790ebb50574f403a09854b65b7e</td><td>yale_faces</td><td>YaleFaces</td><td><a href="papers/2ad0ee93d029e790ebb50574f403a09854b65b7e.html" target="_blank">Acquiring linear subspaces for face recognition under variable lighting</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>999</td><td>554</td><td>445</td><td>94</td><td>495</td><td>491</td></tr><tr><td>57fe081950f21ca03b5b375ae3e84b399c015861</td><td>cvc_01_barcelona</td><td>CVC-01</td><td><a href="papers/57fe081950f21ca03b5b375ae3e84b399c015861.html" target="_blank">Adaptive Image Sampling and Windows Classification for On-board Pedestrian Detection</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>51%</td><td>47</td><td>24</td><td>23</td><td>1</td><td>23</td><td>24</td></tr><tr><td>758d7e1be64cc668c59ef33ba8882c8597406e53</td><td>affectnet</td><td>AffectNet</td><td><a href="papers/758d7e1be64cc668c59ef33ba8882c8597406e53.html" target="_blank">AffectNet: A Database for Facial Expression, Valence, and Arousal Computing in the Wild</a></td><td><a href="https://arxiv.org/pdf/1708.03985.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>37</td><td>23</td><td>14</td><td>0</td><td>25</td><td>11</td></tr><tr><td>47aeb3b82f54b5ae8142b4bdda7b614433e69b9a</td><td>am_fed</td><td>AM-FED</td><td><a href="papers/47aeb3b82f54b5ae8142b4bdda7b614433e69b9a.html" target="_blank">Affectiva-MIT Facial Expression Dataset (AM-FED): Naturalistic and Spontaneous Facial Expressions Collected "In-the-Wild"</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition Workshops</td><td></td><td></td><td></td><td></td><td></td><td>46%</td><td>83</td><td>38</td><td>45</td><td>6</td><td>43</td><td>39</td></tr><tr><td>1be498d4bbc30c3bfd0029114c784bc2114d67c0</td><td>adience</td><td>Adience</td><td><a href="papers/1be498d4bbc30c3bfd0029114c784bc2114d67c0.html" target="_blank">Age and Gender Estimation of Unfiltered Faces</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Information Forensics and Security</td><td>edu</td><td>Open University of Israel</td><td>Israel</td><td>32.77824165</td><td>34.99565673</td><td>80%</td><td>179</td><td>143</td><td>36</td><td>1</td><td>98</td><td>80</td></tr><tr><td>d818568838433a6d6831adde49a58cef05e0c89f</td><td>agedb</td><td>AgeDB</td><td><a href="papers/d818568838433a6d6831adde49a58cef05e0c89f.html" target="_blank">AgeDB: The First Manually Collected, In-the-Wild Age Database</a></td><td><a href="http://eprints.mdx.ac.uk/22044/1/agedb_kotsia.pdf" target="_blank">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td>edu</td><td>Imperial College London</td><td>United Kingdom</td><td>51.49887085</td><td>-0.17560797</td><td>94%</td><td>18</td><td>17</td><td>1</td><td>0</td><td>14</td><td>3</td></tr><tr><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td><td>aflw</td><td>AFLW</td><td><a href="papers/a74251efa970b92925b89eeef50a5e37d9281ad0.html" target="_blank">Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</a></td><td><span class="gray">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td>edu</td><td>TU Graz</td><td>Austria</td><td>47.07071400</td><td>15.43950400</td><td>61%</td><td>318</td><td>194</td><td>124</td><td>33</td><td>211</td><td>107</td></tr><tr><td>2ce2560cf59db59ce313bbeb004e8ce55c5ce928</td><td>texas_3dfrd</td><td>Texas 3DFRD</td><td><a href="papers/2ce2560cf59db59ce313bbeb004e8ce55c5ce928.html" target="_blank">Anthropometric 3D Face Recognition</a></td><td><a href="http://live.ece.utexas.edu/publications/2010/sg_ijcv_june10.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>63%</td><td>91</td><td>57</td><td>34</td><td>5</td><td>60</td><td>31</td></tr><tr><td>633c851ebf625ad7abdda2324e9de093cf623141</td><td>appa_real</td><td>APPA-REAL</td><td><a href="papers/633c851ebf625ad7abdda2324e9de093cf623141.html" target="_blank">Apparent and Real Age Estimation in Still Images with Deep Residual Regressors on Appa-Real Database</a></td><td><span class="gray">[pdf]</a></td><td>2017 12th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2017)</td><td></td><td></td><td></td><td></td><td></td><td>70%</td><td>10</td><td>7</td><td>3</td><td>0</td><td>8</td><td>3</td></tr><tr><td>0df0d1adea39a5bef318b74faa37de7f3e00b452</td><td>mpii_gaze</td><td>MPIIGaze</td><td><a href="papers/0df0d1adea39a5bef318b74faa37de7f3e00b452.html" target="_blank">Appearance-based gaze estimation in the wild</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>72%</td><td>149</td><td>107</td><td>42</td><td>3</td><td>94</td><td>54</td></tr><tr><td>759a3b3821d9f0e08e0b0a62c8b693230afc3f8d</td><td>pubfig</td><td>PubFig</td><td><a href="papers/759a3b3821d9f0e08e0b0a62c8b693230afc3f8d.html" target="_blank">Attribute and simile classifiers for face verification</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE 12th International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>63%</td><td>914</td><td>578</td><td>336</td><td>47</td><td>586</td><td>316</td></tr><tr><td>faf40ce28857aedf183e193486f5b4b0a8c478a2</td><td>iit_dehli_ear</td><td>IIT Dehli Ear</td><td><a href="papers/faf40ce28857aedf183e193486f5b4b0a8c478a2.html" target="_blank">Automated Human Identification Using Ear Imaging</a></td><td><a href="https://pdfs.semanticscholar.org/faf4/0ce28857aedf183e193486f5b4b0a8c478a2.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>49%</td><td>80</td><td>39</td><td>41</td><td>6</td><td>35</td><td>44</td></tr><tr><td>2160788824c4c29ffe213b2cbeb3f52972d73f37</td><td>3d_rma</td><td>3D-RMA</td><td><a href="papers/2160788824c4c29ffe213b2cbeb3f52972d73f37.html" target="_blank">Automatic 3D face authentication</a></td><td><a href="http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.31.9190&rep=rep1&type=pdf" target="_blank">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>100</td><td>54</td><td>46</td><td>8</td><td>63</td><td>36</td></tr><tr><td>213a579af9e4f57f071b884aa872651372b661fd</td><td>bbc_pose</td><td>BBC Pose</td><td><a href="papers/213a579af9e4f57f071b884aa872651372b661fd.html" target="_blank">Automatic and Efficient Human Pose Estimation for Sign Language Videos</a></td><td><a href="http://tomas.pfister.fi/files/charles13ijcv.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>65%</td><td>26</td><td>17</td><td>9</td><td>1</td><td>16</td><td>11</td></tr><tr><td>fcc6fe6007c322641796cb8792718641856a22a7</td><td>miw</td><td>MIW</td><td><a href="papers/fcc6fe6007c322641796cb8792718641856a22a7.html" target="_blank">Automatic facial makeup detection with application in face recognition</a></td><td><span class="gray">[pdf]</a></td><td>2013 International Conference on Biometrics (ICB)</td><td>edu</td><td>West Virginia University</td><td>United States</td><td>39.65404635</td><td>-79.96475355</td><td>71%</td><td>49</td><td>35</td><td>14</td><td>1</td><td>19</td><td>29</td></tr><tr><td>fcc6fe6007c322641796cb8792718641856a22a7</td><td>youtube_makeup</td><td>YMU</td><td><a href="papers/fcc6fe6007c322641796cb8792718641856a22a7.html" target="_blank">Automatic facial makeup detection with application in face recognition</a></td><td><span class="gray">[pdf]</a></td><td>2013 International Conference on Biometrics (ICB)</td><td>edu</td><td>West Virginia University</td><td>United States</td><td>39.65404635</td><td>-79.96475355</td><td>71%</td><td>49</td><td>35</td><td>14</td><td>1</td><td>19</td><td>29</td></tr><tr><td>0a85bdff552615643dd74646ac881862a7c7072d</td><td>pipa</td><td>PIPA</td><td><a href="papers/0a85bdff552615643dd74646ac881862a7c7072d.html" target="_blank">Beyond frontal faces: Improving Person Recognition using multiple cues</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>company</td><td>Facebook</td><td>United States</td><td>37.39367170</td><td>-122.08072620</td><td>91%</td><td>54</td><td>49</td><td>4</td><td>1</td><td>41</td><td>12</td></tr><tr><td>2acf7e58f0a526b957be2099c10aab693f795973</td><td>bosphorus</td><td>The Bosphorus</td><td><a href="papers/2acf7e58f0a526b957be2099c10aab693f795973.html" target="_blank">Bosphorus Database for 3D Face Analysis</a></td><td><a href="https://pdfs.semanticscholar.org/4254/fbba3846008f50671edc9cf70b99d7304543.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>352</td><td>196</td><td>156</td><td>17</td><td>162</td><td>188</td></tr><tr><td>37d6f0eb074d207b53885bd2eb78ccc8a04be597</td><td>vmu</td><td>VMU</td><td><a href="papers/37d6f0eb074d207b53885bd2eb78ccc8a04be597.html" target="_blank">Can facial cosmetics affect the matching accuracy of face recognition systems?</a></td><td><span class="gray">[pdf]</a></td><td>2012 IEEE Fifth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td>edu</td><td>West Virginia University</td><td>United States</td><td>39.65404635</td><td>-79.96475355</td><td>62%</td><td>53</td><td>33</td><td>20</td><td>0</td><td>19</td><td>31</td></tr><tr><td>37d6f0eb074d207b53885bd2eb78ccc8a04be597</td><td>youtube_makeup</td><td>YMU</td><td><a href="papers/37d6f0eb074d207b53885bd2eb78ccc8a04be597.html" target="_blank">Can facial cosmetics affect the matching accuracy of face recognition systems?</a></td><td><span class="gray">[pdf]</a></td><td>2012 IEEE Fifth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td>edu</td><td>West Virginia University</td><td>United States</td><td>39.65404635</td><td>-79.96475355</td><td>62%</td><td>53</td><td>33</td><td>20</td><td>0</td><td>19</td><td>31</td></tr><tr><td>8d5998cd984e7cce307da7d46f155f9db99c6590</td><td>chalearn</td><td>ChaLearn</td><td><a href="papers/8d5998cd984e7cce307da7d46f155f9db99c6590.html" target="_blank">ChaLearn looking at people: A review of events and resources</a></td><td><span class="gray">[pdf]</a></td><td>2017 International Joint Conference on Neural Networks (IJCNN)</td><td></td><td></td><td></td><td></td><td></td><td>69%</td><td>13</td><td>9</td><td>4</td><td>1</td><td>8</td><td>4</td></tr><tr><td>2bf8541199728262f78d4dced6fb91479b39b738</td><td>clothing_co_parsing</td><td>CCP</td><td><a href="papers/2bf8541199728262f78d4dced6fb91479b39b738.html" target="_blank">Clothing Co-parsing by Joint Image Segmentation and Labeling</a></td><td><span class="gray">[pdf]</a></td><td>2014 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>70%</td><td>60</td><td>42</td><td>18</td><td>0</td><td>34</td><td>28</td></tr><tr><td>22ad2c8c0f4d6aa4328b38d894b814ec22579761</td><td>gallagher</td><td>Gallagher</td><td><a href="papers/22ad2c8c0f4d6aa4328b38d894b814ec22579761.html" target="_blank">Clothing cosegmentation for recognizing people</a></td><td><span class="gray">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>Carnegie Mellon University</td><td>United States</td><td>40.44416190</td><td>-79.94272826</td><td>65%</td><td>178</td><td>116</td><td>62</td><td>7</td><td>100</td><td>86</td></tr><tr><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td><td>leeds_sports_pose</td><td>Leeds Sports Pose</td><td><a href="papers/4b1d23d17476fcf78f4cbadf69fb130b1aa627c0.html" target="_blank">Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</a></td><td><a href="http://www.bmva.org/bmvc/2010/conference/paper12/abstract12.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>285</td><td>182</td><td>103</td><td>13</td><td>197</td><td>93</td></tr><tr><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td><td>stickmen_buffy</td><td>Buffy Stickmen</td><td><a href="papers/4b1d23d17476fcf78f4cbadf69fb130b1aa627c0.html" target="_blank">Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</a></td><td><a href="http://www.bmva.org/bmvc/2010/conference/paper12/abstract12.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>285</td><td>182</td><td>103</td><td>13</td><td>197</td><td>93</td></tr><tr><td>45c31cde87258414f33412b3b12fc5bec7cb3ba9</td><td>jaffe</td><td>JAFFE</td><td><a href="papers/45c31cde87258414f33412b3b12fc5bec7cb3ba9.html" target="_blank">Coding Facial Expressions with Gabor Wavelets</a></td><td><a href="http://physics.lbl.gov/patrecog/images/Facerecog_gabor.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>899</td><td>508</td><td>391</td><td>51</td><td>431</td><td>451</td></tr><tr><td>b1f4423c227fa37b9680787be38857069247a307</td><td>afew_va</td><td>AFEW-VA</td><td><a href="papers/b1f4423c227fa37b9680787be38857069247a307.html" target="_blank">Collecting Large, Richly Annotated Facial-Expression Databases from Movies</a></td><td><span class="gray">[pdf]</a></td><td>IEEE MultiMedia</td><td>edu</td><td>Australian National University</td><td>Australia</td><td>-35.27769990</td><td>149.11852700</td><td>64%</td><td>181</td><td>115</td><td>66</td><td>8</td><td>87</td><td>97</td></tr><tr><td>7f4040b482d16354d5938c1d1b926b544652bf5b</td><td>nova_emotions</td><td>Novaemötions Dataset</td><td><a href="papers/7f4040b482d16354d5938c1d1b926b544652bf5b.html" target="_blank">Competitive affective gaming: winning with a smile</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Universidade NOVA de Lisboa, Caparica, Portugal</td><td>Portugal</td><td>38.66096400</td><td>-9.20581300</td><td>78%</td><td>9</td><td>7</td><td>2</td><td>0</td><td>5</td><td>4</td></tr><tr><td>079a0a3bf5200994e1f972b1b9197bf2f90e87d4</td><td>mit_cbcl</td><td>MIT CBCL</td><td><a href="papers/079a0a3bf5200994e1f972b1b9197bf2f90e87d4.html" target="_blank">Component-Based Face Recognition with 3D Morphable Models</a></td><td><span class="gray">[pdf]</a></td><td>2004 Conference on Computer Vision and Pattern Recognition Workshop</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>23fc83c8cfff14a16df7ca497661264fc54ed746</td><td>cohn_kanade</td><td>CK</td><td><a href="papers/23fc83c8cfff14a16df7ca497661264fc54ed746.html" target="_blank">Comprehensive Database for Facial Expression Analysis</a></td><td><a href="https://pdfs.semanticscholar.org/23fc/83c8cfff14a16df7ca497661264fc54ed746.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>999</td><td>552</td><td>447</td><td>70</td><td>540</td><td>439</td></tr><tr><td>09d78009687bec46e70efcf39d4612822e61cb8c</td><td>raid</td><td>RAiD</td><td><a href="papers/09d78009687bec46e70efcf39d4612822e61cb8c.html" target="_blank">Consistent Re-identification in a Camera Network</a></td><td><a href="https://pdfs.semanticscholar.org/c27f/099e6e7e3f7f9979cbe9e0a5175fc5848ea0.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>71%</td><td>49</td><td>35</td><td>14</td><td>3</td><td>34</td><td>13</td></tr><tr><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td><td>casablanca</td><td>Casablanca</td><td><a href="papers/0ceda9dae8b9f322df65ca2ef02caca9758aec6f.html" target="_blank">Context-Aware CNNs for Person Head Detection</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>33</td><td>20</td><td>13</td><td>1</td><td>23</td><td>11</td></tr><tr><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td><td>hollywood_headset</td><td>HollywoodHeads</td><td><a href="papers/0ceda9dae8b9f322df65ca2ef02caca9758aec6f.html" target="_blank">Context-Aware CNNs for Person Head Detection</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>33</td><td>20</td><td>13</td><td>1</td><td>23</td><td>11</td></tr><tr><td>c06b13d0ec3f5c43e2782cd22542588e233733c3</td><td>nova_emotions</td><td>Novaemötions Dataset</td><td><a href="papers/c06b13d0ec3f5c43e2782cd22542588e233733c3.html" target="_blank">Crowdsourcing facial expressions for affective-interaction</a></td><td><span class="gray">[pdf]</a></td><td>Computer Vision and Image Understanding</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>1</td><td>1</td><td>0</td><td>0</td><td>1</td><td>0</td></tr><tr><td>8355d095d3534ef511a9af68a3b2893339e3f96b</td><td>imdb_wiki</td><td>IMDB</td><td><a href="papers/8355d095d3534ef511a9af68a3b2893339e3f96b.html" target="_blank">DEX: Deep EXpectation of Apparent Age from a Single Image</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision Workshop (ICCVW)</td><td></td><td></td><td></td><td></td><td></td><td>75%</td><td>122</td><td>92</td><td>30</td><td>4</td><td>75</td><td>48</td></tr><tr><td>5a5f0287484f0d480fed1ce585dbf729586f0edc</td><td>disfa</td><td>DISFA</td><td><a href="papers/5a5f0287484f0d480fed1ce585dbf729586f0edc.html" target="_blank">DISFA: A Spontaneous Facial Action Intensity Database</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Affective Computing</td><td>edu</td><td>University of Denver</td><td>United States</td><td>39.67665410</td><td>-104.96220300</td><td>52%</td><td>184</td><td>95</td><td>89</td><td>19</td><td>96</td><td>89</td></tr><tr><td>10195a163ab6348eef37213a46f60a3d87f289c5</td><td>imdb_wiki</td><td>IMDB</td><td><a href="papers/10195a163ab6348eef37213a46f60a3d87f289c5.html" target="_blank">Deep Expectation of Real and Apparent Age from a Single Image Without Facial Landmarks</a></td><td><a href="http://www.vision.ee.ethz.ch/en/publications/papers/articles/eth_biwi_01299.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td>edu</td><td>ETH Zurich</td><td>Switzerland</td><td>47.37631300</td><td>8.54766990</td><td>69%</td><td>145</td><td>100</td><td>45</td><td>9</td><td>93</td><td>51</td></tr><tr><td>162ea969d1929ed180cc6de9f0bf116993ff6e06</td><td>vgg_faces</td><td>VGG Face</td><td><a href="papers/162ea969d1929ed180cc6de9f0bf116993ff6e06.html" target="_blank">Deep Face Recognition</a></td><td><a href="https://pdfs.semanticscholar.org/f372/ab9b3270d4e4f6a0258c83c2736c3a5c0454.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>999</td><td>640</td><td>359</td><td>49</td><td>558</td><td>429</td></tr><tr><td>6424b69f3ff4d35249c0bb7ef912fbc2c86f4ff4</td><td>celeba</td><td>CelebA</td><td><a href="papers/6424b69f3ff4d35249c0bb7ef912fbc2c86f4ff4.html" target="_blank">Deep Learning Face Attributes in the Wild</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td>edu</td><td>Chinese University of Hong Kong</td><td>China</td><td>22.41626320</td><td>114.21093180</td><td>56%</td><td>919</td><td>519</td><td>399</td><td>61</td><td>694</td><td>201</td></tr><tr><td>18010284894ed0edcca74e5bf768ee2e15ef7841</td><td>deep_fashion</td><td>DeepFashion</td><td><a href="papers/18010284894ed0edcca74e5bf768ee2e15ef7841.html" target="_blank">DeepFashion: Powering Robust Clothes Recognition and Retrieval with Rich Annotations</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>176</td><td>112</td><td>64</td><td>2</td><td>113</td><td>62</td></tr><tr><td>6bd36e9fd0ef20a3074e1430a6cc601e6d407fc3</td><td>cuhk_campus_03</td><td>CUHK03 Campus</td><td><a href="papers/6bd36e9fd0ef20a3074e1430a6cc601e6d407fc3.html" target="_blank">DeepReID: Deep Filter Pairing Neural Network for Person Re-identification</a></td><td><span class="gray">[pdf]</a></td><td>2014 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>73%</td><td>568</td><td>412</td><td>156</td><td>19</td><td>320</td><td>235</td></tr><tr><td>13f06b08f371ba8b5d31c3e288b4deb61335b462</td><td>eth_andreas_ess</td><td>ETHZ Pedestrian</td><td><a href="papers/13f06b08f371ba8b5d31c3e288b4deb61335b462.html" target="_blank">Depth and Appearance for Mobile Scene Analysis</a></td><td><span class="gray">[pdf]</a></td><td>2007 IEEE 11th International Conference on Computer Vision</td><td>edu</td><td>ETH Zurich</td><td>Switzerland</td><td>47.37631300</td><td>8.54766990</td><td>62%</td><td>324</td><td>202</td><td>122</td><td>26</td><td>193</td><td>127</td></tr><tr><td>4946ba10a4d5a7d0a38372f23e6622bd347ae273</td><td>coco_action</td><td>COCO-a</td><td><a href="papers/4946ba10a4d5a7d0a38372f23e6622bd347ae273.html" target="_blank">Describing Common Human Visual Actions in Images</a></td><td><a href="https://arxiv.org/pdf/1506.02203.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>68%</td><td>25</td><td>17</td><td>8</td><td>0</td><td>23</td><td>2</td></tr><tr><td>7808937b46acad36e43c30ae4e9f3fd57462853d</td><td>bpad</td><td>BPAD</td><td><a href="papers/7808937b46acad36e43c30ae4e9f3fd57462853d.html" target="_blank">Describing people: A poselet-based approach to attribute classification</a></td><td><span class="gray">[pdf]</a></td><td>2011 International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>230</td><td>138</td><td>92</td><td>14</td><td>163</td><td>66</td></tr><tr><td>d3200d49a19a4a4e4e9745ee39649b65d80c834b</td><td>scut_head</td><td>SCUT HEAD</td><td><a href="papers/d3200d49a19a4a4e4e9745ee39649b65d80c834b.html" target="_blank">Detecting Heads using Feature Refine Net and Cascaded Multi-scale Architecture</a></td><td><a href="https://arxiv.org/pdf/1803.09256.pdf" target="_blank">[pdf]</a></td><td>2018 24th International Conference on Pattern Recognition (ICPR)</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>9cc8cf0c7d7fa7607659921b6ff657e17e135ecc</td><td>mafa</td><td>MAsked FAces</td><td><a href="papers/9cc8cf0c7d7fa7607659921b6ff657e17e135ecc.html" target="_blank">Detecting Masked Faces in the Wild with LLE-CNNs</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>5</td><td>3</td><td>2</td><td>1</td><td>4</td><td>1</td></tr><tr><td>56ae6d94fc6097ec4ca861f0daa87941d1c10b70</td><td>cmdp</td><td>CMDP</td><td><a href="papers/56ae6d94fc6097ec4ca861f0daa87941d1c10b70.html" target="_blank">Distance Estimation of an Unknown Person from a Portrait</a></td><td><a href="http://authors.library.caltech.edu/49084/13/FaceDistanceEstimation_RONCHI.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>44%</td><td>9</td><td>4</td><td>5</td><td>0</td><td>6</td><td>3</td></tr><tr><td>84fe5b4ac805af63206012d29523a1e033bc827e</td><td>awe_ears</td><td>AWE Ears</td><td><a href="papers/84fe5b4ac805af63206012d29523a1e033bc827e.html" target="_blank">Ear Recognition: More Than a Survey</a></td><td><a href="https://arxiv.org/pdf/1611.06203.pdf" target="_blank">[pdf]</a></td><td>Neurocomputing</td><td></td><td></td><td></td><td></td><td></td><td>73%</td><td>26</td><td>19</td><td>7</td><td>0</td><td>10</td><td>16</td></tr><tr><td>133f01aec1534604d184d56de866a4bd531dac87</td><td>lfw</td><td>LFW</td><td><a href="papers/133f01aec1534604d184d56de866a4bd531dac87.html" target="_blank">Effective Unconstrained Face Recognition by Combining Multiple Descriptors and Learned Background Statistics</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>183</td><td>109</td><td>74</td><td>13</td><td>103</td><td>77</td></tr><tr><td>c900e0ad4c95948baaf0acd8449fde26f9b4952a</td><td>emotio_net</td><td>EmotioNet Database</td><td><a href="papers/c900e0ad4c95948baaf0acd8449fde26f9b4952a.html" target="_blank">EmotioNet: An Accurate, Real-Time Algorithm for the Automatic Annotation of a Million Facial Expressions in the Wild</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>86</td><td>45</td><td>41</td><td>7</td><td>54</td><td>29</td></tr><tr><td>2161f6b7ee3c0acc81603b01dc0df689683577b9</td><td>large_scale_person_search</td><td>Large Scale Person Search</td><td><a href="papers/2161f6b7ee3c0acc81603b01dc0df689683577b9.html" target="_blank">End-to-End Deep Learning for Person Search</a></td><td><a href="https://pdfs.semanticscholar.org/2161/f6b7ee3c0acc81603b01dc0df689683577b9.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>70%</td><td>46</td><td>32</td><td>14</td><td>0</td><td>27</td><td>16</td></tr><tr><td>1bd1645a629f1b612960ab9bba276afd4cf7c666</td><td>brainwash</td><td>Brainwash</td><td><a href="papers/1bd1645a629f1b612960ab9bba276afd4cf7c666.html" target="_blank">End-to-End People Detection in Crowded Scenes</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>Stanford University</td><td>United States</td><td>37.43131385</td><td>-122.16936535</td><td>55%</td><td>42</td><td>23</td><td>19</td><td>1</td><td>19</td><td>19</td></tr><tr><td>6273b3491e94ea4dd1ce42b791d77bdc96ee73a8</td><td>viper</td><td>VIPeR</td><td><a href="papers/6273b3491e94ea4dd1ce42b791d77bdc96ee73a8.html" target="_blank">Evaluating Appearance Models for Recognition, Reacquisition, and Tracking</a></td><td><a href="https://pdfs.semanticscholar.org/7847/b1fbccadb780b655e72c66d3f9e93ddb880c.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>edu</td><td>University of California, Santa Cruz</td><td>United States</td><td>36.99158470</td><td>-122.05827710</td><td>67%</td><td>624</td><td>415</td><td>209</td><td>33</td><td>342</td><td>276</td></tr><tr><td>2258e01865367018ed6f4262c880df85b94959f8</td><td>mot</td><td>MOT</td><td><a href="papers/2258e01865367018ed6f4262c880df85b94959f8.html" target="_blank">Evaluating Multiple Object Tracking Performance: The CLEAR MOT Metrics</a></td><td><a href="https://cvhci.anthropomatik.kit.edu/images/stories/msmmi/papers/eurasip2008.pdf" target="_blank">[pdf]</a></td><td>EURASIP J. Image and Video Processing</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>632</td><td>366</td><td>264</td><td>44</td><td>358</td><td>264</td></tr><tr><td>9e5378e7b336c89735d3bb15cf67eff96f86d39a</td><td>precarious</td><td>Precarious</td><td><a href="papers/9e5378e7b336c89735d3bb15cf67eff96f86d39a.html" target="_blank">Expecting the Unexpected: Training Detectors for Unusual Pedestrians with Adversarial Imposters</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>36%</td><td>14</td><td>5</td><td>9</td><td>0</td><td>12</td><td>1</td></tr><tr><td>35b0331dfcd2897abd5749b49ff5e2b8ba0f7a62</td><td>coco_qa</td><td>COCO QA</td><td><a href="papers/35b0331dfcd2897abd5749b49ff5e2b8ba0f7a62.html" target="_blank">Exploring Models and Data for Image Question Answering</a></td><td><a href="https://arxiv.org/pdf/1505.02074.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>206</td><td>126</td><td>80</td><td>11</td><td>162</td><td>39</td></tr><tr><td>2cd7821fcf5fae53a185624f7eeda007434ae037</td><td>geofaces</td><td>GeoFaces</td><td><a href="papers/2cd7821fcf5fae53a185624f7eeda007434ae037.html" target="_blank">Exploring the geo-dependence of human face appearance</a></td><td><a href="http://cs.uky.edu/~jacobs/papers/islam2014faces.pdf" target="_blank">[pdf]</a></td><td>IEEE Winter Conference on Applications of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>88%</td><td>8</td><td>7</td><td>1</td><td>0</td><td>5</td><td>3</td></tr><tr><td>2cd7821fcf5fae53a185624f7eeda007434ae037</td><td>geofaces</td><td>GeoFaces</td><td><a href="papers/2cd7821fcf5fae53a185624f7eeda007434ae037.html" target="_blank">Exploring the geo-dependence of human face appearance</a></td><td><a href="http://cs.uky.edu/~jacobs/papers/islam2014faces.pdf" target="_blank">[pdf]</a></td><td>IEEE Winter Conference on Applications of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>88%</td><td>8</td><td>7</td><td>1</td><td>0</td><td>5</td><td>3</td></tr><tr><td>75da1df4ed319926c544eefe17ec8d720feef8c0</td><td>fddb</td><td>FDDB</td><td><a href="papers/75da1df4ed319926c544eefe17ec8d720feef8c0.html" target="_blank">FDDB: A benchmark for face detection in unconstrained settings</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>63%</td><td>380</td><td>240</td><td>140</td><td>19</td><td>202</td><td>164</td></tr><tr><td>31de9b3dd6106ce6eec9a35991b2b9083395fd0b</td><td>feret</td><td>FERET</td><td><a href="papers/31de9b3dd6106ce6eec9a35991b2b9083395fd0b.html" target="_blank">FERET ( Face Recognition Technology ) Recognition Algorithm Development and Test Results</a></td><td><a href="https://pdfs.semanticscholar.org/31de/9b3dd6106ce6eec9a35991b2b9083395fd0b.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>75</td><td>39</td><td>36</td><td>5</td><td>54</td><td>20</td></tr><tr><td>0e986f51fe45b00633de9fd0c94d082d2be51406</td><td>afw</td><td>AFW</td><td><a href="papers/0e986f51fe45b00633de9fd0c94d082d2be51406.html" target="_blank">Face detection, pose estimation, and landmark localization in the wild</a></td><td><span class="gray">[pdf]</a></td><td>2012 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>66%</td><td>999</td><td>656</td><td>343</td><td>43</td><td>576</td><td>422</td></tr><tr><td>560e0e58d0059259ddf86fcec1fa7975dee6a868</td><td>youtube_faces</td><td>YouTubeFaces</td><td><a href="papers/560e0e58d0059259ddf86fcec1fa7975dee6a868.html" target="_blank">Face recognition in unconstrained videos with matched background similarity</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011</td><td>edu</td><td>Tel Aviv University</td><td>Israel</td><td>32.11198890</td><td>34.80459702</td><td>67%</td><td>509</td><td>340</td><td>168</td><td>24</td><td>294</td><td>216</td></tr><tr><td>670637d0303a863c1548d5b19f705860a23e285c</td><td>face_tracer</td><td>FaceTracer</td><td><a href="papers/670637d0303a863c1548d5b19f705860a23e285c.html" target="_blank">Face swapping: automatically replacing faces in photographs</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>6204776d31359d129a582057c2d788a14f8aadeb</td><td>youtube_celebrities</td><td>YouTube Celebrities</td><td><a href="papers/6204776d31359d129a582057c2d788a14f8aadeb.html" target="_blank">Face tracking and recognition with visual constraints in real-world videos</a></td><td><span class="gray">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>Rutgers University</td><td>United States</td><td>40.47913175</td><td>-74.43168868</td><td>56%</td><td>267</td><td>150</td><td>116</td><td>12</td><td>125</td><td>121</td></tr><tr><td>4c170a0dcc8de75587dae21ca508dab2f9343974</td><td>face_tracer</td><td>FaceTracer</td><td><a href="papers/4c170a0dcc8de75587dae21ca508dab2f9343974.html" target="_blank">FaceTracer: A Search Engine for Large Collections of Images with Faces</a></td><td><a href="https://pdfs.semanticscholar.org/73a8/1d311eedac8dea3ca24dc15b6990fa4a725e.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>225</td><td>140</td><td>85</td><td>17</td><td>146</td><td>77</td></tr><tr><td>7ebb153704706e457ab57b432793d2b6e5d12592</td><td>vgg_celebs_in_places</td><td>CIP</td><td><a href="papers/7ebb153704706e457ab57b432793d2b6e5d12592.html" target="_blank">Faces in Places: compound query retrieval</a></td><td><a href="https://pdfs.semanticscholar.org/7ebb/153704706e457ab57b432793d2b6e5d12592.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>5</td><td>5</td><td>0</td><td>0</td><td>3</td><td>2</td></tr><tr><td>0ab7cff2ccda7269b73ff6efd9d37e1318f7db25</td><td>ibm_dif</td><td>IBM Diversity in Faces</td><td><a href="papers/0ab7cff2ccda7269b73ff6efd9d37e1318f7db25.html" target="_blank">Facial Coding Scheme Reference 1 Craniofacial Distances</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>8a3c5507237957d013a0fe0f082cab7f757af6ee</td><td>mafl</td><td>MAFL</td><td><a href="papers/8a3c5507237957d013a0fe0f082cab7f757af6ee.html" target="_blank">Facial Landmark Detection by Deep Multi-task Learning</a></td><td><a href="https://pdfs.semanticscholar.org/fcd7/1c18192928a2e0b264edd4d919ab2f8f652a.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>407</td><td>262</td><td>145</td><td>18</td><td>252</td><td>153</td></tr><tr><td>8a3c5507237957d013a0fe0f082cab7f757af6ee</td><td>mtfl</td><td>MTFL</td><td><a href="papers/8a3c5507237957d013a0fe0f082cab7f757af6ee.html" target="_blank">Facial Landmark Detection by Deep Multi-task Learning</a></td><td><a href="https://pdfs.semanticscholar.org/fcd7/1c18192928a2e0b264edd4d919ab2f8f652a.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>407</td><td>262</td><td>145</td><td>18</td><td>252</td><td>153</td></tr><tr><td>4fefd1bc8dc4e0ab37ee3324ddfa43ad9d6a04a7</td><td>deep_fashion</td><td>DeepFashion</td><td><a href="papers/4fefd1bc8dc4e0ab37ee3324ddfa43ad9d6a04a7.html" target="_blank">Fashion Landmark Detection in the Wild</a></td><td><a href="https://arxiv.org/pdf/1608.03049.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>73%</td><td>26</td><td>19</td><td>7</td><td>1</td><td>16</td><td>10</td></tr><tr><td>060820f110a72cbf02c14a6d1085bd6e1d994f6a</td><td>caltech_crp</td><td>Caltech CRP</td><td><a href="papers/060820f110a72cbf02c14a6d1085bd6e1d994f6a.html" target="_blank">Fine-grained classification of pedestrians in video: Benchmark and state of the art</a></td><td><a href="https://arxiv.org/pdf/1605.06177.pdf" target="_blank">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>17</td><td>8</td><td>9</td><td>0</td><td>9</td><td>8</td></tr><tr><td>45e616093a92e5f1e61a7c6037d5f637aa8964af</td><td>malf</td><td>MALF</td><td><a href="papers/45e616093a92e5f1e61a7c6037d5f637aa8964af.html" target="_blank">Fine-grained evaluation on face detection in the wild</a></td><td><span class="gray">[pdf]</a></td><td>2015 11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)</td><td></td><td></td><td></td><td></td><td></td><td>71%</td><td>17</td><td>12</td><td>5</td><td>0</td><td>12</td><td>5</td></tr><tr><td>1aad2da473888cb7ebc1bfaa15bfa0f1502ce005</td><td>jpl_pose</td><td>JPL-Interaction dataset</td><td><a href="papers/1aad2da473888cb7ebc1bfaa15bfa0f1502ce005.html" target="_blank">First-Person Activity Recognition: What Are They Doing to Me?</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>67%</td><td>148</td><td>99</td><td>49</td><td>7</td><td>105</td><td>43</td></tr><tr><td>7b92d1e53cc87f7a4256695de590098a2f30261e</td><td>appa_real</td><td>APPA-REAL</td><td><a href="papers/7b92d1e53cc87f7a4256695de590098a2f30261e.html" target="_blank">From Apparent to Real Age: Gender, Age, Ethnic, Makeup, and Expression Bias Analysis in Real Age Estimation</a></td><td><span class="gray">[pdf]</a></td><td>2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>774cbb45968607a027ae4729077734db000a1ec5</td><td>urban_tribes</td><td>Urban Tribes</td><td><a href="papers/774cbb45968607a027ae4729077734db000a1ec5.html" target="_blank">From Bikers to Surfers: Visual Recognition of Urban Tribes</a></td><td><a href="http://vision.cornell.edu/se3/wp-content/uploads/2014/09/utribes_bmvc13_final.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>67%</td><td>18</td><td>12</td><td>6</td><td>1</td><td>12</td><td>6</td></tr><tr><td>22f656d0f8426c84a33a267977f511f127bfd7f3</td><td>expw</td><td>ExpW</td><td><a href="papers/22f656d0f8426c84a33a267977f511f127bfd7f3.html" target="_blank">From Facial Expression Recognition to Interpersonal Relation Prediction</a></td><td><a href="https://arxiv.org/pdf/1609.06426.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>11</td><td>6</td><td>5</td><td>0</td><td>5</td><td>4</td></tr><tr><td>18c72175ddbb7d5956d180b65a96005c100f6014</td><td>yale_faces</td><td>YaleFaces</td><td><a href="papers/18c72175ddbb7d5956d180b65a96005c100f6014.html" target="_blank">From Few to Many: Illumination Cone Models for Face Recognition under Variable Lighting and Pose</a></td><td><a href="http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.147.1487&rep=rep1&type=pdf" target="_blank">[pdf]</a></td><td>IEEE Trans. Pattern Anal. Mach. Intell.</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>999</td><td>560</td><td>439</td><td>67</td><td>498</td><td>462</td></tr><tr><td>06f02199690961ba52997cde1527e714d2b3bf8f</td><td>columbia_gaze</td><td>Columbia Gaze</td><td><a href="papers/06f02199690961ba52997cde1527e714d2b3bf8f.html" target="_blank">Gaze locking: passive eye contact detection for human-object interaction</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Columbia University</td><td>United States</td><td>40.84198360</td><td>-73.94368971</td><td>76%</td><td>79</td><td>60</td><td>19</td><td>0</td><td>49</td><td>34</td></tr><tr><td>18858cc936947fc96b5c06bbe3c6c2faa5614540</td><td>pilot_parliament</td><td>PPB</td><td><a href="papers/18858cc936947fc96b5c06bbe3c6c2faa5614540.html" target="_blank">Gender Shades: Intersectional Accuracy Disparities in Commercial Gender Classification</a></td><td><a href="http://proceedings.mlr.press/v81/buolamwini18a/buolamwini18a-supp.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>53%</td><td>59</td><td>31</td><td>28</td><td>0</td><td>47</td><td>10</td></tr><tr><td>2eb84aaba316b095d4bb51da1a3e4365bbf9ab1d</td><td>kin_face</td><td>UB KinFace</td><td><a href="papers/2eb84aaba316b095d4bb51da1a3e4365bbf9ab1d.html" target="_blank">Genealogical face recognition based on UB KinFace database</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td>edu</td><td>SUNY Buffalo</td><td>United States</td><td>42.93362780</td><td>-78.88394479</td><td>55%</td><td>31</td><td>17</td><td>14</td><td>0</td><td>11</td><td>21</td></tr><tr><td>2eed184680edcdec8a3b605ad1a3ba8e8f7cc2e9</td><td>graz</td><td>Graz Pedestrian</td><td><a href="papers/2eed184680edcdec8a3b605ad1a3ba8e8f7cc2e9.html" target="_blank">Generic object recognition with boosting</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td>edu</td><td>TU Graz</td><td>Austria</td><td>47.07071400</td><td>15.43950400</td><td>53%</td><td>293</td><td>155</td><td>138</td><td>16</td><td>195</td><td>97</td></tr><tr><td>17b46e2dad927836c689d6787ddb3387c6159ece</td><td>geofaces</td><td>GeoFaces</td><td><a href="papers/17b46e2dad927836c689d6787ddb3387c6159ece.html" target="_blank">GeoFaceExplorer: exploring the geo-dependence of facial attributes</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>2</td><td>2</td><td>0</td><td>0</td><td>1</td><td>1</td></tr><tr><td>bd88bb2e4f351352d88ee7375af834360e223498</td><td>hda_plus</td><td>HDA+</td><td><a href="papers/bd88bb2e4f351352d88ee7375af834360e223498.html" target="_blank">HDA dataset-DRAFT 1 A Multi-camera video data set for research on High-Definition surveillance</a></td><td><a href="https://pdfs.semanticscholar.org/bd88/bb2e4f351352d88ee7375af834360e223498.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>2</td><td>0</td><td>2</td><td>0</td><td>1</td><td>2</td></tr><tr><td>a8d0b149c2eadaa02204d3e4356fbc8eccf3b315</td><td>hi4d_adsip</td><td>Hi4D-ADSIP</td><td><a href="papers/a8d0b149c2eadaa02204d3e4356fbc8eccf3b315.html" target="_blank">Hi4D-ADSIP 3-D dynamic facial articulation database</a></td><td><span class="gray">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>15</td><td>9</td><td>6</td><td>1</td><td>4</td><td>11</td></tr><tr><td>2d45cfd838016a6e39f6b766ffe85acd649440c7</td><td>mcgill</td><td>McGill Real World</td><td><a href="papers/2d45cfd838016a6e39f6b766ffe85acd649440c7.html" target="_blank">Hierarchical temporal graphical model for head pose estimation and subsequent attribute classification in real-world videos</a></td><td><span class="gray">[pdf]</a></td><td>Computer Vision and Image Understanding</td><td></td><td></td><td></td><td></td><td></td><td>88%</td><td>8</td><td>7</td><td>1</td><td>0</td><td>5</td><td>3</td></tr><tr><td>3cd40bfa1ff193a96bde0207e5140a399476466c</td><td>tvhi</td><td>TVHI</td><td><a href="papers/3cd40bfa1ff193a96bde0207e5140a399476466c.html" target="_blank">High Five: Recognising human interactions in TV shows</a></td><td><a href="https://pdfs.semanticscholar.org/3cd4/0bfa1ff193a96bde0207e5140a399476466c.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>98</td><td>56</td><td>42</td><td>10</td><td>66</td><td>28</td></tr><tr><td>04c2cda00e5536f4b1508cbd80041e9552880e67</td><td>hipsterwars</td><td>Hipsterwars</td><td><a href="papers/04c2cda00e5536f4b1508cbd80041e9552880e67.html" target="_blank">Hipster Wars: Discovering Elements of Fashion Styles</a></td><td><a href="https://pdfs.semanticscholar.org/04c2/cda00e5536f4b1508cbd80041e9552880e67.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>95</td><td>61</td><td>34</td><td>4</td><td>59</td><td>35</td></tr><tr><td>10d6b12fa07c7c8d6c8c3f42c7f1c061c131d4c5</td><td>inria_person</td><td>INRIA Pedestrian</td><td><a href="papers/10d6b12fa07c7c8d6c8c3f42c7f1c061c131d4c5.html" target="_blank">Histograms of oriented gradients for human detection</a></td><td><span class="gray">[pdf]</a></td><td>2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)</td><td>edu</td><td>INRIA Rhone-Alps, Montbonnot, France</td><td>France</td><td>45.21788600</td><td>5.80736900</td><td>57%</td><td>999</td><td>570</td><td>429</td><td>43</td><td>419</td><td>509</td></tr><tr><td>041d3eedf5e45ce5c5229f0181c5c576ed1fafd6</td><td>ucf_selfie</td><td>UCF Selfie</td><td><a href="papers/041d3eedf5e45ce5c5229f0181c5c576ed1fafd6.html" target="_blank">How to Take a Good Selfie?</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>73%</td><td>11</td><td>8</td><td>3</td><td>0</td><td>7</td><td>5</td></tr><tr><td>44d23df380af207f5ac5b41459c722c87283e1eb</td><td>wider_attribute</td><td>WIDER Attribute</td><td><a href="papers/44d23df380af207f5ac5b41459c722c87283e1eb.html" target="_blank">Human Attribute Recognition by Deep Hierarchical Contexts</a></td><td><a href="https://pdfs.semanticscholar.org/8e28/07f2dd53b03a759e372e07f7191cae65c9fd.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>72%</td><td>18</td><td>13</td><td>5</td><td>0</td><td>14</td><td>4</td></tr><tr><td>44484d2866f222bbb9b6b0870890f9eea1ffb2d0</td><td>cuhk_campus_03</td><td>CUHK03 Campus</td><td><a href="papers/44484d2866f222bbb9b6b0870890f9eea1ffb2d0.html" target="_blank">Human Reidentification with Transferred Metric Learning</a></td><td><a href="http://www.ee.cuhk.edu.hk/~xgwang/papers/liZWaccv12.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>69%</td><td>280</td><td>194</td><td>86</td><td>9</td><td>139</td><td>137</td></tr><tr><td>f41c7bb02fc97d5fb9cadd7a49c3e558a1c58a44</td><td>pa_100k</td><td>PA-100K</td><td><a href="papers/f41c7bb02fc97d5fb9cadd7a49c3e558a1c58a44.html" target="_blank">HydraPlus-Net: Attentive Deep Features for Pedestrian Analysis</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>75%</td><td>55</td><td>41</td><td>14</td><td>0</td><td>36</td><td>17</td></tr><tr><td>57178b36c21fd7f4529ac6748614bb3374714e91</td><td>ijb_c</td><td>IJB-C</td><td><a href="papers/57178b36c21fd7f4529ac6748614bb3374714e91.html" target="_blank">IARPA Janus Benchmark - C: Face Dataset and Protocol</a></td><td><span class="gray">[pdf]</a></td><td>2018 International Conference on Biometrics (ICB)</td><td></td><td></td><td></td><td></td><td></td><td>79%</td><td>14</td><td>11</td><td>3</td><td>0</td><td>12</td><td>1</td></tr><tr><td>0cb2dd5f178e3a297a0c33068961018659d0f443</td><td>ijb_b</td><td>IJB-B</td><td><a href="papers/0cb2dd5f178e3a297a0c33068961018659d0f443.html" target="_blank">IARPA Janus Benchmark-B Face Dataset</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td>edu</td><td>Michigan State University</td><td>United States</td><td>42.71856800</td><td>-84.47791571</td><td>63%</td><td>35</td><td>22</td><td>13</td><td>3</td><td>25</td><td>8</td></tr><tr><td>0297448f3ed948e136bb06ceff10eccb34e5bb77</td><td>ilids_mcts</td><td>i-LIDS Multiple-Camera</td><td><a href="papers/0297448f3ed948e136bb06ceff10eccb34e5bb77.html" target="_blank">Imagery Library for Intelligent Detection Systems (i-LIDS); A Standard for Testing Video Based Detection Systems</a></td><td><span class="gray">[pdf]</a></td><td>Proceedings 40th Annual 2006 International Carnahan Conference on Security Technology</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>35</td><td>20</td><td>15</td><td>2</td><td>21</td><td>14</td></tr><tr><td>7f23a4bb0c777dd72cca7665a5f370ac7980217e</td><td>duke_mtmc</td><td>Duke MTMC</td><td><a href="papers/7f23a4bb0c777dd72cca7665a5f370ac7980217e.html" target="_blank">Improving Person Re-identification by Attribute and Identity Learning</a></td><td><a href="https://arxiv.org/pdf/1703.07220.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>84%</td><td>87</td><td>73</td><td>14</td><td>0</td><td>43</td><td>42</td></tr><tr><td>55c40cbcf49a0225e72d911d762c27bb1c2d14aa</td><td>ifad</td><td>IFAD</td><td><a href="papers/55c40cbcf49a0225e72d911d762c27bb1c2d14aa.html" target="_blank">Indian Face Age Database: A Database for Face Recognition with Age Variation</a></td><td><a href="https://pdfs.semanticscholar.org/025e/4cf3fd3fdeced91e9373b56ee14af7ca432c.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>2</td><td>1</td><td>1</td><td>0</td><td>2</td><td>0</td></tr><tr><td>ca3e88d87e1344d076c964ea89d91a75c417f5ee</td><td>imfdb</td><td>IMFDB</td><td><a href="papers/ca3e88d87e1344d076c964ea89d91a75c417f5ee.html" target="_blank">Indian Movie Face Database: A benchmark for face recognition under wide variations</a></td><td><span class="gray">[pdf]</a></td><td>2013 Fourth National Conference on Computer Vision, Pattern Recognition, Image Processing and Graphics (NCVPRIPG)</td><td>edu</td><td>BVBCET, Hubli, India</td><td>India</td><td>15.36883320</td><td>75.12137960</td><td>65%</td><td>17</td><td>11</td><td>6</td><td>0</td><td>11</td><td>5</td></tr><tr><td>95f12d27c3b4914e0668a268360948bce92f7db3</td><td>helen</td><td>Helen</td><td><a href="papers/95f12d27c3b4914e0668a268360948bce92f7db3.html" target="_blank">Interactive Facial Feature Localization</a></td><td><a href="https://pdfs.semanticscholar.org/95f1/2d27c3b4914e0668a268360948bce92f7db3.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>company</td><td>Adobe</td><td>United States</td><td>37.33077030</td><td>-121.89409510</td><td>62%</td><td>352</td><td>219</td><td>133</td><td>23</td><td>212</td><td>146</td></tr><tr><td>ad01687649d95cd5b56d7399a9603c4b8e2217d7</td><td>mrp_drone</td><td>MRP Drone</td><td><a href="papers/ad01687649d95cd5b56d7399a9603c4b8e2217d7.html" target="_blank">Investigating Open-World Person Re-identification Using a Drone</a></td><td><a href="https://pdfs.semanticscholar.org/ad01/687649d95cd5b56d7399a9603c4b8e2217d7.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>7</td><td>3</td><td>4</td><td>1</td><td>5</td><td>2</td></tr><tr><td>2f43b614607163abf41dfe5d17ef6749a1b61304</td><td>hrt_transgender</td><td>HRT Transgender</td><td><a href="papers/2f43b614607163abf41dfe5d17ef6749a1b61304.html" target="_blank">Investigating the Periocular-Based Face Recognition Across Gender Transformation</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Information Forensics and Security</td><td>edu</td><td>University of North Carolina at Wilmington</td><td>United States</td><td>34.22498270</td><td>-77.86907744</td><td>77%</td><td>13</td><td>10</td><td>3</td><td>0</td><td>6</td><td>8</td></tr><tr><td>066d71fcd997033dce4ca58df924397dfe0b5fd1</td><td>ifdb</td><td>IFDB</td><td><a href="papers/066d71fcd997033dce4ca58df924397dfe0b5fd1.html" target="_blank">Iranian Face Database and Evaluation with a New Detection Algorithm</a></td><td><a href="https://pdfs.semanticscholar.org/066d/71fcd997033dce4ca58df924397dfe0b5fd1.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>b71d1aa90dcbe3638888725314c0d56640c1fef1</td><td>ifdb</td><td>IFDB</td><td><a href="papers/b71d1aa90dcbe3638888725314c0d56640c1fef1.html" target="_blank">Iranian Face Database with age, pose and expression</a></td><td><span class="gray">[pdf]</a></td><td>2007 International Conference on Machine Vision</td><td>edu</td><td>Islamic Azad University</td><td>Iran</td><td>34.84529990</td><td>48.55962120</td><td>48%</td><td>23</td><td>11</td><td>12</td><td>2</td><td>14</td><td>9</td></tr><tr><td>137aa2f891d474fce1e7a1d1e9b3aefe21e22b34</td><td>hrt_transgender</td><td>HRT Transgender</td><td><a href="papers/137aa2f891d474fce1e7a1d1e9b3aefe21e22b34.html" target="_blank">Is the eye region more reliable than the face? A preliminary study of face-based recognition on a transgender dataset</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE Sixth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>7</td><td>4</td><td>3</td><td>1</td><td>3</td><td>5</td></tr><tr><td>0b440695c822a8e35184fb2f60dcdaa8a6de84ae</td><td>kinectface</td><td>KinectFaceDB</td><td><a href="papers/0b440695c822a8e35184fb2f60dcdaa8a6de84ae.html" target="_blank">KinectFaceDB: A Kinect Database for Face Recognition</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Systems, Man, and Cybernetics: Systems</td><td>edu</td><td>University of North Carolina at Chapel Hill</td><td>United States</td><td>35.91139710</td><td>-79.05045290</td><td>62%</td><td>82</td><td>51</td><td>31</td><td>6</td><td>28</td><td>52</td></tr><tr><td>4793f11fbca4a7dba898b9fff68f70d868e2497c</td><td>kin_face</td><td>UB KinFace</td><td><a href="papers/4793f11fbca4a7dba898b9fff68f70d868e2497c.html" target="_blank">Kinship Verification through Transfer Learning</a></td><td><a href="https://pdfs.semanticscholar.org/4793/f11fbca4a7dba898b9fff68f70d868e2497c.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>71</td><td>41</td><td>30</td><td>2</td><td>29</td><td>42</td></tr><tr><td>2d3482dcff69c7417c7b933f22de606a0e8e42d4</td><td>lfw</td><td>LFW</td><td><a href="papers/2d3482dcff69c7417c7b933f22de606a0e8e42d4.html" target="_blank">Labeled Faces in the Wild : Updates and New Reporting Procedures</a></td><td><a href="https://pdfs.semanticscholar.org/2d34/82dcff69c7417c7b933f22de606a0e8e42d4.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>edu</td><td>University of Massachusetts</td><td>United States</td><td>42.38897850</td><td>-72.52869870</td><td>68%</td><td>123</td><td>84</td><td>39</td><td>3</td><td>71</td><td>51</td></tr><tr><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td><td>lfw</td><td>LFW</td><td><a href="papers/370b5757a5379b15e30d619e4d3fb9e8e13f3256.html" target="_blank">Labeled Faces in the Wild: A Database forStudying Face Recognition in Unconstrained Environments</a></td><td><a href="https://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>999</td><td>622</td><td>377</td><td>61</td><td>598</td><td>382</td></tr><tr><td>7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22</td><td>lfw</td><td>LFW</td><td><a href="papers/7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22.html" target="_blank">Labeled Faces in the Wild: A Survey</a></td><td><a href="https://pdfs.semanticscholar.org/7de6/e81d775e9cd7becbfd1bd685f4e2a5eebb22.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Stevens Institute of Technology</td><td>United States</td><td>40.74225200</td><td>-74.02709490</td><td>62%</td><td>109</td><td>68</td><td>41</td><td>8</td><td>66</td><td>43</td></tr><tr><td>0d2dd4fc016cb6a517d8fb43a7cc3ff62964832e</td><td>lag</td><td>LAG</td><td><a href="papers/0d2dd4fc016cb6a517d8fb43a7cc3ff62964832e.html" target="_blank">Large age-gap face verification by feature injection in deep networks</a></td><td><a href="https://arxiv.org/pdf/1602.06149.pdf" target="_blank">[pdf]</a></td><td>Pattern Recognition Letters</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>7</td><td>4</td><td>3</td><td>0</td><td>3</td><td>4</td></tr><tr><td>07fcbae86f7a3ad3ea1cf95178459ee9eaf77cb1</td><td>uccs</td><td>UCCS</td><td><a href="papers/07fcbae86f7a3ad3ea1cf95178459ee9eaf77cb1.html" target="_blank">Large scale unconstrained open set face database</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE Sixth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td>company</td><td>Securics Inc., Colorado Springs, CO</td><td>United States</td><td>38.83388160</td><td>-104.82136340</td><td>83%</td><td>6</td><td>5</td><td>1</td><td>0</td><td>4</td><td>2</td></tr><tr><td>4af89578ac237278be310f7660a408b03f12d603</td><td>geofaces</td><td>GeoFaces</td><td><a href="papers/4af89578ac237278be310f7660a408b03f12d603.html" target="_blank">Large-scale geo-facial image analysis</a></td><td><a href="https://pdfs.semanticscholar.org/3ede/3ed28329bf48fbd06438a69c4f855bef003f.pdf" target="_blank">[pdf]</a></td><td>EURASIP J. Image and Video Processing</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>6</td><td>6</td><td>0</td><td>0</td><td>4</td><td>2</td></tr><tr><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td><td>mafl</td><td>MAFL</td><td><a href="papers/a0fd85b3400c7b3e11122f44dc5870ae2de9009a.html" target="_blank">Learning Deep Representation for Face Alignment with Auxiliary Attributes</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>108</td><td>63</td><td>45</td><td>11</td><td>66</td><td>44</td></tr><tr><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td><td>mtfl</td><td>MTFL</td><td><a href="papers/a0fd85b3400c7b3e11122f44dc5870ae2de9009a.html" target="_blank">Learning Deep Representation for Face Alignment with Auxiliary Attributes</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>108</td><td>63</td><td>45</td><td>11</td><td>66</td><td>44</td></tr><tr><td>853bd61bc48a431b9b1c7cab10c603830c488e39</td><td>casia_webface</td><td>CASIA Webface</td><td><a href="papers/853bd61bc48a431b9b1c7cab10c603830c488e39.html" target="_blank">Learning Face Representation from Scratch</a></td><td><a href="https://arxiv.org/pdf/1411.7923.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td>edu</td><td>Chinese Academy of Sciences</td><td>China</td><td>40.00447950</td><td>116.37023800</td><td>71%</td><td>476</td><td>337</td><td>139</td><td>19</td><td>290</td><td>182</td></tr><tr><td>2a171f8d14b6b8735001a11c217af9587d095848</td><td>social_relation</td><td>Social Relation</td><td><a href="papers/2a171f8d14b6b8735001a11c217af9587d095848.html" target="_blank">Learning Social Relation Traits from Face Images</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>23</td><td>14</td><td>9</td><td>4</td><td>16</td><td>7</td></tr><tr><td>4e4746094bf60ee83e40d8597a6191e463b57f76</td><td>leeds_sports_pose_extended</td><td>Leeds Sports Pose Extended</td><td><a href="papers/4e4746094bf60ee83e40d8597a6191e463b57f76.html" target="_blank">Learning effective human pose estimation from inaccurate annotation</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011</td><td>edu</td><td>University of Leeds</td><td>United Kingdom</td><td>53.80387185</td><td>-1.55245712</td><td>69%</td><td>169</td><td>117</td><td>52</td><td>8</td><td>108</td><td>65</td></tr><tr><td>287ddcb3db5562235d83aee318f318b8d5e43fb1</td><td>erce</td><td>ERCe</td><td><a href="papers/287ddcb3db5562235d83aee318f318b8d5e43fb1.html" target="_blank">Learning from Multiple Sources for Video Summarisation</a></td><td><a href="https://arxiv.org/pdf/1501.03069.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>7</td><td>4</td><td>3</td><td>0</td><td>4</td><td>3</td></tr><tr><td>287ddcb3db5562235d83aee318f318b8d5e43fb1</td><td>tisi</td><td>Times Square Intersection</td><td><a href="papers/287ddcb3db5562235d83aee318f318b8d5e43fb1.html" target="_blank">Learning from Multiple Sources for Video Summarisation</a></td><td><a href="https://arxiv.org/pdf/1501.03069.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>7</td><td>4</td><td>3</td><td>0</td><td>4</td><td>3</td></tr><tr><td>5981e6479c3fd4e31644db35d236bfb84ae46514</td><td>mot</td><td>MOT</td><td><a href="papers/5981e6479c3fd4e31644db35d236bfb84ae46514.html" target="_blank">Learning to associate: HybridBoosted multi-target tracker for crowded scene</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>University of Southern California</td><td>United States</td><td>34.02241490</td><td>-118.28634407</td><td>61%</td><td>326</td><td>200</td><td>125</td><td>22</td><td>190</td><td>137</td></tr><tr><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td><td>stickmen_buffy</td><td>Buffy Stickmen</td><td><a href="papers/6dd0597f8513dc100cd0bc1b493768cde45098a9.html" target="_blank">Learning to parse images of articulated bodies</a></td><td><a href="http://machinelearning.wustl.edu/mlpapers/paper_files/NIPS2006_899.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>369</td><td>225</td><td>143</td><td>32</td><td>237</td><td>131</td></tr><tr><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td><a href="papers/6dd0597f8513dc100cd0bc1b493768cde45098a9.html" target="_blank">Learning to parse images of articulated bodies</a></td><td><a href="http://machinelearning.wustl.edu/mlpapers/paper_files/NIPS2006_899.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>369</td><td>225</td><td>143</td><td>32</td><td>237</td><td>131</td></tr><tr><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td><a href="papers/6dd0597f8513dc100cd0bc1b493768cde45098a9.html" target="_blank">Learning to parse images of articulated bodies</a></td><td><a href="http://machinelearning.wustl.edu/mlpapers/paper_files/NIPS2006_899.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>369</td><td>225</td><td>143</td><td>32</td><td>237</td><td>131</td></tr><tr><td>28d4e027c7e90b51b7d8908fce68128d1964668a</td><td>megaface</td><td>MegaFace</td><td><a href="papers/28d4e027c7e90b51b7d8908fce68128d1964668a.html" target="_blank">Level Playing Field for Million Scale Face Recognition</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>University of Washington</td><td>United States</td><td>47.65432380</td><td>-122.30800894</td><td>72%</td><td>39</td><td>28</td><td>11</td><td>2</td><td>29</td><td>9</td></tr><tr><td>46a01565e6afe7c074affb752e7069ee3bf2e4ef</td><td>sdu_vid</td><td>SDU-VID</td><td><a href="papers/46a01565e6afe7c074affb752e7069ee3bf2e4ef.html" target="_blank">Local Descriptors Encoded by Fisher Vectors for Person Re-identification</a></td><td><a href="https://pdfs.semanticscholar.org/a105/f1ef67b4b02da38eadce8ffb4e13aa301a93.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>68%</td><td>197</td><td>133</td><td>64</td><td>15</td><td>108</td><td>88</td></tr><tr><td>140438a77a771a8fb656b39a78ff488066eb6b50</td><td>lfpw</td><td>LFWP</td><td><a href="papers/140438a77a771a8fb656b39a78ff488066eb6b50.html" target="_blank">Localizing Parts of Faces Using a Consensus of Exemplars</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>38b55d95189c5e69cf4ab45098a48fba407609b4</td><td>cuhk_campus_03</td><td>CUHK03 Campus</td><td><a href="papers/38b55d95189c5e69cf4ab45098a48fba407609b4.html" target="_blank">Locally Aligned Feature Transforms across Views</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>258</td><td>164</td><td>94</td><td>15</td><td>136</td><td>117</td></tr><tr><td>8990cdce3f917dad622e43e033db686b354d057c</td><td>tiny_faces</td><td>TinyFace</td><td><a href="papers/8990cdce3f917dad622e43e033db686b354d057c.html" target="_blank">Low-Resolution Face Recognition</a></td><td><a href="https://arxiv.org/pdf/1811.08965.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>c0387e788a52f10bf35d4d50659cfa515d89fbec</td><td>mars</td><td>MARS</td><td><a href="papers/c0387e788a52f10bf35d4d50659cfa515d89fbec.html" target="_blank">MARS: A Video Benchmark for Large-Scale Person Re-Identification</a></td><td><a href="https://pdfs.semanticscholar.org/c038/7e788a52f10bf35d4d50659cfa515d89fbec.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>68%</td><td>168</td><td>115</td><td>53</td><td>4</td><td>97</td><td>69</td></tr><tr><td>9055b155cbabdce3b98e16e5ac9c0edf00f9552f</td><td>morph</td><td>MORPH Commercial</td><td><a href="papers/9055b155cbabdce3b98e16e5ac9c0edf00f9552f.html" target="_blank">MORPH: a longitudinal image database of normal adult age-progression</a></td><td><span class="gray">[pdf]</a></td><td>7th International Conference on Automatic Face and Gesture Recognition (FGR06)</td><td>edu</td><td>North Carolina University</td><td>United States</td><td>34.22398690</td><td>-77.87013250</td><td>57%</td><td>437</td><td>251</td><td>185</td><td>22</td><td>228</td><td>203</td></tr><tr><td>9055b155cbabdce3b98e16e5ac9c0edf00f9552f</td><td>morph_nc</td><td>MORPH Non-Commercial</td><td><a href="papers/9055b155cbabdce3b98e16e5ac9c0edf00f9552f.html" target="_blank">MORPH: a longitudinal image database of normal adult age-progression</a></td><td><span class="gray">[pdf]</a></td><td>7th International Conference on Automatic Face and Gesture Recognition (FGR06)</td><td>edu</td><td>North Carolina University</td><td>United States</td><td>34.22398690</td><td>-77.87013250</td><td>57%</td><td>437</td><td>251</td><td>185</td><td>22</td><td>228</td><td>203</td></tr><tr><td>291265db88023e92bb8c8e6390438e5da148e8f5</td><td>msceleb</td><td>MsCeleb</td><td><a href="papers/291265db88023e92bb8c8e6390438e5da148e8f5.html" target="_blank">MS-Celeb-1M: A Dataset and Benchmark for Large-Scale Face Recognition</a></td><td><a href="https://arxiv.org/pdf/1607.08221.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>company</td><td>Microsoft</td><td>United States</td><td>47.64233180</td><td>-122.13693020</td><td>78%</td><td>180</td><td>141</td><td>39</td><td>8</td><td>120</td><td>59</td></tr><tr><td>3dc3f0b64ef80f573e3a5f96e456e52ee980b877</td><td>georgia_tech_face_database</td><td>Georgia Tech Face</td><td><a href="papers/3dc3f0b64ef80f573e3a5f96e456e52ee980b877.html" target="_blank">Maximum Likelihood Training of the Embedded HMM for Face Detection and Recognition</a></td><td><a href="http://www.researchgate.net/profile/Monson_Hayes/publication/221124512_Maximum_Likelihood_Training_of_the_Embedded_HMM_for_Face_Detection_and_Recognition/links/0deec53509be9d6f55000000.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>67</td><td>36</td><td>31</td><td>4</td><td>29</td><td>28</td></tr><tr><td>e58dd160a76349d46f881bd6ddbc2921f08d1050</td><td>gfw</td><td>Grouping Face in the Wild</td><td><a href="papers/e58dd160a76349d46f881bd6ddbc2921f08d1050.html" target="_blank">Merge or Not? Learning to Group Faces via Imitation Learning</a></td><td><a href="https://arxiv.org/pdf/1707.03986.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>2</td><td>2</td><td>0</td><td>0</td><td>2</td><td>0</td></tr><tr><td>5753b2b5e442eaa3be066daa4a2ca8d8a0bb1725</td><td>fpoq</td><td>50 People One Question</td><td><a href="papers/5753b2b5e442eaa3be066daa4a2ca8d8a0bb1725.html" target="_blank">Merging Pose Estimates Across Space and Time</a></td><td><a href="http://authors.library.caltech.edu/41565/1/tracking_bmvc.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>81%</td><td>16</td><td>13</td><td>3</td><td>0</td><td>13</td><td>4</td></tr><tr><td>5e0f8c355a37a5a89351c02f174e7a5ddcb98683</td><td>coco</td><td>COCO</td><td><a href="papers/5e0f8c355a37a5a89351c02f174e7a5ddcb98683.html" target="_blank">Microsoft COCO: Common Objects in Context</a></td><td><a href="https://arxiv.org/pdf/1405.0312.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>999</td><td>606</td><td>393</td><td>25</td><td>722</td><td>259</td></tr><tr><td>41976ebc8ab76d9a6861487c97cc7fcbe3b6015f</td><td>moments_in_time</td><td>Moments in Time</td><td><a href="papers/41976ebc8ab76d9a6861487c97cc7fcbe3b6015f.html" target="_blank">Moments in Time Dataset: one million videos for event understanding</a></td><td><a href="https://arxiv.org/pdf/1801.03150.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>69%</td><td>29</td><td>20</td><td>9</td><td>2</td><td>27</td><td>2</td></tr><tr><td>436f798d1a4e54e5947c1e7d7375c31b2bdb4064</td><td>tud_multiview</td><td>TUD-Multiview</td><td><a href="papers/436f798d1a4e54e5947c1e7d7375c31b2bdb4064.html" target="_blank">Monocular 3D pose estimation and tracking by detection</a></td><td><span class="gray">[pdf]</a></td><td>2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>TU Darmstadt</td><td>Germany</td><td>49.87482770</td><td>8.65632810</td><td>59%</td><td>311</td><td>185</td><td>126</td><td>34</td><td>208</td><td>105</td></tr><tr><td>436f798d1a4e54e5947c1e7d7375c31b2bdb4064</td><td>tud_stadtmitte</td><td>TUD-Stadtmitte</td><td><a href="papers/436f798d1a4e54e5947c1e7d7375c31b2bdb4064.html" target="_blank">Monocular 3D pose estimation and tracking by detection</a></td><td><span class="gray">[pdf]</a></td><td>2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>TU Darmstadt</td><td>Germany</td><td>49.87482770</td><td>8.65632810</td><td>59%</td><td>311</td><td>185</td><td>126</td><td>34</td><td>208</td><td>105</td></tr><tr><td>3b5b6d19d4733ab606c39c69a889f9e67967f151</td><td>qmul_grid</td><td>GRID</td><td><a href="papers/3b5b6d19d4733ab606c39c69a889f9e67967f151.html" target="_blank">Multi-camera activity correlation analysis</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>Queen Mary University of London</td><td>United Kingdom</td><td>51.52472720</td><td>-0.03931035</td><td>69%</td><td>142</td><td>98</td><td>44</td><td>7</td><td>77</td><td>64</td></tr><tr><td>6ad5a38df8dd4cdddd74f31996ce096d41219f72</td><td>tud_brussels</td><td>TUD-Brussels</td><td><a href="papers/6ad5a38df8dd4cdddd74f31996ce096d41219f72.html" target="_blank">Multi-cue onboard pedestrian detection</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>6ad5a38df8dd4cdddd74f31996ce096d41219f72</td><td>tud_motionpairs</td><td>TUD-Motionparis</td><td><a href="papers/6ad5a38df8dd4cdddd74f31996ce096d41219f72.html" target="_blank">Multi-cue onboard pedestrian detection</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>32c801cb7fbeb742edfd94cccfca4934baec71da</td><td>ucf_crowd</td><td>UCF-CC-50</td><td><a href="papers/32c801cb7fbeb742edfd94cccfca4934baec71da.html" target="_blank">Multi-source Multi-scale Counting in Extremely Dense Crowd Images</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>68%</td><td>148</td><td>100</td><td>48</td><td>5</td><td>80</td><td>65</td></tr><tr><td>1e3df3ca8feab0b36fd293fe689f93bb2aaac591</td><td>immediacy</td><td>Immediacy</td><td><a href="papers/1e3df3ca8feab0b36fd293fe689f93bb2aaac591.html" target="_blank">Multi-task Recurrent Neural Network for Immediacy Prediction</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>26</td><td>16</td><td>10</td><td>2</td><td>21</td><td>6</td></tr><tr><td>2b926b3586399d028b46315d7d9fb9d879e4f79c</td><td>frav3d</td><td>FRAV3D</td><td><a href="papers/2b926b3586399d028b46315d7d9fb9d879e4f79c.html" target="_blank">Multimodal 2D, 2.5D & 3D Face Verification</a></td><td><span class="gray">[pdf]</a></td><td>2006 International Conference on Image Processing</td><td>edu</td><td>Universidad Rey Juan Carlos, Spain</td><td>Spain</td><td>40.33586610</td><td>-3.87694320</td><td>57%</td><td>14</td><td>8</td><td>6</td><td>0</td><td>2</td><td>12</td></tr><tr><td>53ae38a6bb2b21b42bac4f0c4c8ed1f9fa02f9d4</td><td>bp4d_plus</td><td>BP4D+</td><td><a href="papers/53ae38a6bb2b21b42bac4f0c4c8ed1f9fa02f9d4.html" target="_blank">Multimodal Spontaneous Emotion Corpus for Human Behavior Analysis</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>42</td><td>23</td><td>19</td><td>0</td><td>17</td><td>26</td></tr><tr><td>2fda164863a06a92d3a910b96eef927269aeb730</td><td>names_and_faces</td><td>News Dataset</td><td><a href="papers/2fda164863a06a92d3a910b96eef927269aeb730.html" target="_blank">Names and faces in the news</a></td><td><span class="gray">[pdf]</a></td><td>Proceedings of the 2004 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2004. CVPR 2004.</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>4156b7e88f2e0ab0a7c095b9bab199ae2b23bd06</td><td>distance_nighttime</td><td>Long Distance Heterogeneous Face</td><td><a href="papers/4156b7e88f2e0ab0a7c095b9bab199ae2b23bd06.html" target="_blank">Nighttime Face Recognition at Long Distance: Cross-Distance and Cross-Spectral Matching</a></td><td><a href="https://pdfs.semanticscholar.org/4156/b7e88f2e0ab0a7c095b9bab199ae2b23bd06.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>22</td><td>11</td><td>11</td><td>3</td><td>11</td><td>10</td></tr><tr><td>3394168ff0719b03ff65bcea35336a76b21fe5e4</td><td>penn_fudan</td><td>Penn Fudan</td><td><a href="papers/3394168ff0719b03ff65bcea35336a76b21fe5e4.html" target="_blank">Object Detection Combining Recognition and Segmentation</a></td><td><a href="https://pdfs.semanticscholar.org/3394/168ff0719b03ff65bcea35336a76b21fe5e4.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>105</td><td>64</td><td>41</td><td>9</td><td>58</td><td>43</td></tr><tr><td>12ad3b5bbbf407f8e54ea692c07633d1a867c566</td><td>graz</td><td>Graz Pedestrian</td><td><a href="papers/12ad3b5bbbf407f8e54ea692c07633d1a867c566.html" target="_blank">Object recognition using segmentation for feature detection</a></td><td><span class="gray">[pdf]</a></td><td>Proceedings of the 17th International Conference on Pattern Recognition, 2004. ICPR 2004.</td><td>edu</td><td>Inst. of Comput. Sci., Univ. of Leoben, Austria</td><td>Austria</td><td>47.38473720</td><td>15.09302010</td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>4f93cd09785c6e77bf4bc5a788e079df524c8d21</td><td>soton</td><td>SOTON HiD</td><td><a href="papers/4f93cd09785c6e77bf4bc5a788e079df524c8d21.html" target="_blank">On a Large Sequence-Based Human Gait Database</a></td><td><a href="https://pdfs.semanticscholar.org/4f93/cd09785c6e77bf4bc5a788e079df524c8d21.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>63%</td><td>150</td><td>95</td><td>55</td><td>17</td><td>103</td><td>51</td></tr><tr><td>6618cff7f2ed440a0d2fa9e74ad5469df5cdbe4c</td><td>afad</td><td>AFAD</td><td><a href="papers/6618cff7f2ed440a0d2fa9e74ad5469df5cdbe4c.html" target="_blank">Ordinal Regression with Multiple Output CNN for Age Estimation</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>51%</td><td>78</td><td>40</td><td>38</td><td>8</td><td>44</td><td>31</td></tr><tr><td>a7fe834a0af614ce6b50dc093132b031dd9a856b</td><td>market_1501</td><td>Market 1501</td><td><a href="papers/a7fe834a0af614ce6b50dc093132b031dd9a856b.html" target="_blank">Orientation Driven Bag of Appearances for Person Re-identification</a></td><td><a href="https://arxiv.org/pdf/1605.02464.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>7</td><td>3</td><td>4</td><td>0</td><td>4</td><td>4</td></tr><tr><td>a7fe834a0af614ce6b50dc093132b031dd9a856b</td><td>pku_reid</td><td>PKU-Reid</td><td><a href="papers/a7fe834a0af614ce6b50dc093132b031dd9a856b.html" target="_blank">Orientation Driven Bag of Appearances for Person Re-identification</a></td><td><a href="https://arxiv.org/pdf/1605.02464.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>7</td><td>3</td><td>4</td><td>0</td><td>4</td><td>4</td></tr><tr><td>18ae7c9a4bbc832b8b14bc4122070d7939f5e00e</td><td>frgc</td><td>FRGC</td><td><a href="papers/18ae7c9a4bbc832b8b14bc4122070d7939f5e00e.html" target="_blank">Overview of the face recognition grand challenge</a></td><td><span class="gray">[pdf]</a></td><td>2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)</td><td>edu</td><td>NIST</td><td>United States</td><td>39.14004000</td><td>-77.21850600</td><td>57%</td><td>999</td><td>566</td><td>432</td><td>86</td><td>549</td><td>442</td></tr><tr><td>22909dd19a0ec3b6065334cb5be5392cb24d839d</td><td>pets</td><td>PETS 2017</td><td><a href="papers/22909dd19a0ec3b6065334cb5be5392cb24d839d.html" target="_blank">PETS 2017: Dataset and Challenge</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td></td><td></td><td></td><td></td><td></td><td>44%</td><td>9</td><td>4</td><td>5</td><td>0</td><td>1</td><td>8</td></tr><tr><td>56ffa7d906b08d02d6d5a12c7377a57e24ef3391</td><td>unbc_shoulder_pain</td><td>UNBC-McMaster Pain</td><td><a href="papers/56ffa7d906b08d02d6d5a12c7377a57e24ef3391.html" target="_blank">Painful data: The UNBC-McMaster shoulder pain expression archive database</a></td><td><span class="gray">[pdf]</a></td><td>Face and Gesture 2011</td><td>edu</td><td>Carnegie Mellon University</td><td>United States</td><td>40.44416190</td><td>-79.94272826</td><td>54%</td><td>189</td><td>102</td><td>87</td><td>22</td><td>108</td><td>78</td></tr><tr><td>55206f0b5f57ce17358999145506cd01e570358c</td><td>orl</td><td>ORL</td><td><a href="papers/55206f0b5f57ce17358999145506cd01e570358c.html" target="_blank">Parameterisation of a stochastic model for human face identification</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>999</td><td>500</td><td>499</td><td>94</td><td>543</td><td>427</td></tr><tr><td>0486214fb58ee9a04edfe7d6a74c6d0f661a7668</td><td>chokepoint</td><td>ChokePoint</td><td><a href="papers/0486214fb58ee9a04edfe7d6a74c6d0f661a7668.html" target="_blank">Patch-based probabilistic image quality assessment for face selection and improved video-based face recognition</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>138</td><td>86</td><td>52</td><td>6</td><td>76</td><td>63</td></tr><tr><td>488e475eeb3bb39a145f23ede197cd3620f1d98a</td><td>apis</td><td>APiS1.0</td><td><a href="papers/488e475eeb3bb39a145f23ede197cd3620f1d98a.html" target="_blank">Pedestrian Attribute Classification in Surveillance: Database and Evaluation</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision Workshops</td><td></td><td></td><td></td><td></td><td></td><td>71%</td><td>28</td><td>20</td><td>8</td><td>0</td><td>13</td><td>15</td></tr><tr><td>488e475eeb3bb39a145f23ede197cd3620f1d98a</td><td>svs</td><td>SVS</td><td><a href="papers/488e475eeb3bb39a145f23ede197cd3620f1d98a.html" target="_blank">Pedestrian Attribute Classification in Surveillance: Database and Evaluation</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision Workshops</td><td></td><td></td><td></td><td></td><td></td><td>71%</td><td>28</td><td>20</td><td>8</td><td>0</td><td>13</td><td>15</td></tr><tr><td>2a4bbee0b4cf52d5aadbbc662164f7efba89566c</td><td>peta</td><td>PETA</td><td><a href="papers/2a4bbee0b4cf52d5aadbbc662164f7efba89566c.html" target="_blank">Pedestrian Attribute Recognition At Far Distance</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>74%</td><td>88</td><td>65</td><td>23</td><td>1</td><td>50</td><td>36</td></tr><tr><td>f72f6a45ee240cc99296a287ff725aaa7e7ebb35</td><td>caltech_pedestrians</td><td>Caltech Pedestrians</td><td><a href="papers/f72f6a45ee240cc99296a287ff725aaa7e7ebb35.html" target="_blank">Pedestrian Detection: An Evaluation of the State of the Art</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td>edu</td><td>California Institute of Technology</td><td>United States</td><td>34.13710185</td><td>-118.12527487</td><td>60%</td><td>999</td><td>596</td><td>403</td><td>70</td><td>526</td><td>466</td></tr><tr><td>1dc35905a1deff8bc74688f2d7e2f48fd2273275</td><td>caltech_pedestrians</td><td>Caltech Pedestrians</td><td><a href="papers/1dc35905a1deff8bc74688f2d7e2f48fd2273275.html" target="_blank">Pedestrian detection: A benchmark</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td><td>tud_campus</td><td>TUD-Campus</td><td><a href="papers/3316521a5527c7700af8ae6aef32a79a8b83672c.html" target="_blank">People-tracking-by-detection and people-detection-by-tracking</a></td><td><span class="gray">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>59%</td><td>545</td><td>324</td><td>220</td><td>37</td><td>330</td><td>218</td></tr><tr><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td><td>tud_crossing</td><td>TUD-Crossing</td><td><a href="papers/3316521a5527c7700af8ae6aef32a79a8b83672c.html" target="_blank">People-tracking-by-detection and people-detection-by-tracking</a></td><td><span class="gray">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>59%</td><td>545</td><td>324</td><td>220</td><td>37</td><td>330</td><td>218</td></tr><tr><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td><td>tud_pedestrian</td><td>TUD-Pedestrian</td><td><a href="papers/3316521a5527c7700af8ae6aef32a79a8b83672c.html" target="_blank">People-tracking-by-detection and people-detection-by-tracking</a></td><td><span class="gray">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>59%</td><td>545</td><td>324</td><td>220</td><td>37</td><td>330</td><td>218</td></tr><tr><td>27a2fad58dd8727e280f97036e0d2bc55ef5424c</td><td>duke_mtmc</td><td>Duke MTMC</td><td><a href="papers/27a2fad58dd8727e280f97036e0d2bc55ef5424c.html" target="_blank">Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</a></td><td><a href="https://arxiv.org/pdf/1609.01775.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Duke University</td><td>United States</td><td>35.99905220</td><td>-78.92906290</td><td>85%</td><td>169</td><td>144</td><td>25</td><td>3</td><td>113</td><td>54</td></tr><tr><td>27a2fad58dd8727e280f97036e0d2bc55ef5424c</td><td>mot</td><td>MOT</td><td><a href="papers/27a2fad58dd8727e280f97036e0d2bc55ef5424c.html" target="_blank">Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</a></td><td><a href="https://arxiv.org/pdf/1609.01775.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Duke University</td><td>United States</td><td>35.99905220</td><td>-78.92906290</td><td>85%</td><td>169</td><td>144</td><td>25</td><td>3</td><td>113</td><td>54</td></tr><tr><td>16c7c31a7553d99f1837fc6e88e77b5ccbb346b8</td><td>prid</td><td>PRID</td><td><a href="papers/16c7c31a7553d99f1837fc6e88e77b5ccbb346b8.html" target="_blank">Person Re-identification by Descriptive and Discriminative Classification</a></td><td><a href="https://pdfs.semanticscholar.org/4c1b/f0592be3e535faf256c95e27982db9b3d3d3.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>68%</td><td>386</td><td>263</td><td>123</td><td>23</td><td>204</td><td>180</td></tr><tr><td>98bb029afe2a1239c3fdab517323066f0957b81b</td><td>ilids_mcts_vid</td><td>iLIDS-VID</td><td><a href="papers/98bb029afe2a1239c3fdab517323066f0957b81b.html" target="_blank">Person Re-identification by Video Ranking</a></td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>68%</td><td>209</td><td>143</td><td>66</td><td>8</td><td>111</td><td>97</td></tr><tr><td>98bb029afe2a1239c3fdab517323066f0957b81b</td><td>sdu_vid</td><td>SDU-VID</td><td><a href="papers/98bb029afe2a1239c3fdab517323066f0957b81b.html" target="_blank">Person Re-identification by Video Ranking</a></td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>68%</td><td>209</td><td>143</td><td>66</td><td>8</td><td>111</td><td>97</td></tr><tr><td>0b84f07af44f964817675ad961def8a51406dd2e</td><td>prw</td><td>PRW</td><td><a href="papers/0b84f07af44f964817675ad961def8a51406dd2e.html" target="_blank">Person Re-identification in the Wild</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>68%</td><td>77</td><td>52</td><td>25</td><td>1</td><td>47</td><td>27</td></tr><tr><td>a0cc5f73a37723a6dd465924143f1cb4976d0169</td><td>msmt_17</td><td>MSMT17</td><td><a href="papers/a0cc5f73a37723a6dd465924143f1cb4976d0169.html" target="_blank">Person Transfer GAN to Bridge Domain Gap for Person Re-identification</a></td><td><span class="gray">[pdf]</a></td><td>2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>92%</td><td>24</td><td>22</td><td>2</td><td>1</td><td>20</td><td>4</td></tr><tr><td>1c2802c2199b6d15ecefe7ba0c39bfe44363de38</td><td>youtube_poses</td><td>YouTube Pose</td><td><a href="papers/1c2802c2199b6d15ecefe7ba0c39bfe44363de38.html" target="_blank">Personalizing Human Video Pose Estimation</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>Oxford University</td><td>United Kingdom</td><td>51.75208490</td><td>-1.25166460</td><td>64%</td><td>36</td><td>23</td><td>13</td><td>2</td><td>30</td><td>8</td></tr><tr><td>2830fb5282de23d7784b4b4bc37065d27839a412</td><td>h3d</td><td>H3D</td><td><a href="papers/2830fb5282de23d7784b4b4bc37065d27839a412.html" target="_blank">Poselets: Body part detectors trained using 3D human pose annotations</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE 12th International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>716</td><td>409</td><td>307</td><td>60</td><td>492</td><td>222</td></tr><tr><td>3765df816dc5a061bc261e190acc8bdd9d47bec0</td><td>rafd</td><td>RaFD</td><td><a href="papers/3765df816dc5a061bc261e190acc8bdd9d47bec0.html" target="_blank">Presentation and validation of the Radboud Faces Database</a></td><td><a href="https://pdfs.semanticscholar.org/3765/df816dc5a061bc261e190acc8bdd9d47bec0.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>48%</td><td>487</td><td>234</td><td>253</td><td>39</td><td>342</td><td>144</td></tr><tr><td>636b8ffc09b1b23ff714ac8350bb35635e49fa3c</td><td>caltech_10k_web_faces</td><td>Caltech 10K Web Faces</td><td><a href="papers/636b8ffc09b1b23ff714ac8350bb35635e49fa3c.html" target="_blank">Pruning training sets for learning of object categories</a></td><td><span class="gray">[pdf]</a></td><td>2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)</td><td></td><td></td><td></td><td></td><td></td><td>70%</td><td>63</td><td>44</td><td>19</td><td>4</td><td>42</td><td>20</td></tr><tr><td>3531332efe19be21e7401ba1f04570a142617236</td><td>ufdd</td><td>UFDD</td><td><a href="papers/3531332efe19be21e7401ba1f04570a142617236.html" target="_blank">Pushing the Limits of Unconstrained Face Detection: a Challenge Dataset and Baseline Results</a></td><td><a href="https://arxiv.org/pdf/1804.10275.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>4</td><td>2</td><td>2</td><td>1</td><td>4</td><td>0</td></tr><tr><td>140c95e53c619eac594d70f6369f518adfea12ef</td><td>ijb_a</td><td>IJB-A</td><td><a href="papers/140c95e53c619eac594d70f6369f518adfea12ef.html" target="_blank">Pushing the frontiers of unconstrained face detection and recognition: IARPA Janus Benchmark A</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>237</td><td>152</td><td>85</td><td>15</td><td>159</td><td>76</td></tr><tr><td>c72a2ea819df9b0e8cd267eebcc6528b8741e03d</td><td>megaage</td><td>MegaAge</td><td><a href="papers/c72a2ea819df9b0e8cd267eebcc6528b8741e03d.html" target="_blank">Quantifying Facial Age by Posterior of Age Comparisons</a></td><td><a href="https://arxiv.org/pdf/1708.09687.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>4</td><td>2</td><td>2</td><td>1</td><td>4</td><td>0</td></tr><tr><td>922e0a51a3b8c67c4c6ac09a577ff674cbd28b34</td><td>v47</td><td>V47</td><td><a href="papers/922e0a51a3b8c67c4c6ac09a577ff674cbd28b34.html" target="_blank">Re-identification of pedestrians with variable occlusion and scale</a></td><td><span class="gray">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td>edu</td><td>Kingston University</td><td>United Kingdom</td><td>51.42930860</td><td>-0.26840440</td><td>56%</td><td>9</td><td>5</td><td>4</td><td>1</td><td>5</td><td>4</td></tr><tr><td>6f3c76b7c0bd8e1d122c6ea808a271fd4749c951</td><td>ward</td><td>WARD</td><td><a href="papers/6f3c76b7c0bd8e1d122c6ea808a271fd4749c951.html" target="_blank">Re-identify people in wide area camera network</a></td><td><span class="gray">[pdf]</a></td><td>2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops</td><td>edu</td><td>University of Udine</td><td>Italy</td><td>46.08107230</td><td>13.21194740</td><td>60%</td><td>60</td><td>36</td><td>24</td><td>1</td><td>38</td><td>21</td></tr><tr><td>54983972aafc8e149259d913524581357b0f91c3</td><td>reseed</td><td>ReSEED</td><td><a href="papers/54983972aafc8e149259d913524581357b0f91c3.html" target="_blank">ReSEED: social event dEtection dataset</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>67%</td><td>6</td><td>4</td><td>2</td><td>1</td><td>1</td><td>5</td></tr><tr><td>65355cbb581a219bd7461d48b3afd115263ea760</td><td>complex_activities</td><td>Ongoing Complex Activities</td><td><a href="papers/65355cbb581a219bd7461d48b3afd115263ea760.html" target="_blank">Recognition of ongoing complex activities by sequence prediction over a hierarchical label space</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Winter Conference on Applications of Computer Vision (WACV)</td><td></td><td></td><td></td><td></td><td></td><td>33%</td><td>3</td><td>1</td><td>2</td><td>0</td><td>3</td><td>0</td></tr><tr><td>e8de844fefd54541b71c9823416daa238be65546</td><td>visual_phrases</td><td>Phrasal Recognition</td><td><a href="papers/e8de844fefd54541b71c9823416daa238be65546.html" target="_blank">Recognition using visual phrases</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011</td><td>edu</td><td>University of Illinois, Urbana-Champaign</td><td>United States</td><td>40.11116745</td><td>-88.22587665</td><td>57%</td><td>246</td><td>140</td><td>106</td><td>18</td><td>170</td><td>68</td></tr><tr><td>356b431d4f7a2a0a38cf971c84568207dcdbf189</td><td>wider</td><td>WIDER</td><td><a href="papers/356b431d4f7a2a0a38cf971c84568207dcdbf189.html" target="_blank">Recognize complex events from static images by fusing deep channels</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>68%</td><td>44</td><td>30</td><td>14</td><td>1</td><td>29</td><td>15</td></tr><tr><td>25474c21613607f6bb7687a281d5f9d4ffa1f9f3</td><td>faceplace</td><td>Face Place</td><td><a href="papers/25474c21613607f6bb7687a281d5f9d4ffa1f9f3.html" target="_blank">Recognizing disguised faces</a></td><td><a href="https://pdfs.semanticscholar.org/d936/7ceb0be378c3a9ddf7cb741c678c1a3c574c.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>34%</td><td>29</td><td>10</td><td>19</td><td>0</td><td>18</td><td>10</td></tr><tr><td>4053e3423fb70ad9140ca89351df49675197196a</td><td>bio_id</td><td>BioID Face</td><td><a href="papers/4053e3423fb70ad9140ca89351df49675197196a.html" target="_blank">Robust Face Detection Using the Hausdorff Distance</a></td><td><a href="https://pdfs.semanticscholar.org/4053/e3423fb70ad9140ca89351df49675197196a.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>511</td><td>283</td><td>228</td><td>50</td><td>329</td><td>182</td></tr><tr><td>2724ba85ec4a66de18da33925e537f3902f21249</td><td>cofw</td><td>COFW</td><td><a href="papers/2724ba85ec4a66de18da33925e537f3902f21249.html" target="_blank">Robust Face Landmark Estimation under Occlusion</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision</td><td>edu</td><td>California Institute of Technology</td><td>United States</td><td>34.13710185</td><td>-118.12527487</td><td>61%</td><td>325</td><td>197</td><td>128</td><td>17</td><td>194</td><td>133</td></tr><tr><td>c570d1247e337f91e555c3be0e8c8a5aba539d9f</td><td>mcgill</td><td>McGill Real World</td><td><a href="papers/c570d1247e337f91e555c3be0e8c8a5aba539d9f.html" target="_blank">Robust semi-automatic head pose labeling for real-world face video sequences</a></td><td><span class="gray">[pdf]</a></td><td>Multimedia Tools and Applications</td><td>edu</td><td>McGill University</td><td>Canada</td><td>45.50397610</td><td>-73.57496870</td><td>44%</td><td>18</td><td>8</td><td>10</td><td>0</td><td>13</td><td>7</td></tr><tr><td>e27ef52c641c2b5100a1b34fd0b819e84a31b4df</td><td>sarc3d</td><td>Sarc3D</td><td><a href="papers/e27ef52c641c2b5100a1b34fd0b819e84a31b4df.html" target="_blank">SARC3D: A New 3D Body Model for People Tracking and Re-identification</a></td><td><a href="https://pdfs.semanticscholar.org/e27e/f52c641c2b5100a1b34fd0b819e84a31b4df.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>74%</td><td>34</td><td>25</td><td>9</td><td>2</td><td>21</td><td>12</td></tr><tr><td>bd26dabab576adb6af30484183c9c9c8379bf2e0</td><td>scut_fbp</td><td>SCUT-FBP</td><td><a href="papers/bd26dabab576adb6af30484183c9c9c8379bf2e0.html" target="_blank">SCUT-FBP: A Benchmark Dataset for Facial Beauty Perception</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Systems, Man, and Cybernetics</td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>19</td><td>9</td><td>10</td><td>2</td><td>6</td><td>13</td></tr><tr><td>29a705a5fa76641e0d8963f1fdd67ee4c0d92d3d</td><td>scface</td><td>SCface</td><td><a href="papers/29a705a5fa76641e0d8963f1fdd67ee4c0d92d3d.html" target="_blank">SCface – surveillance cameras face database</a></td><td><a href="http://scface.org/SCface%20-%20Surveillance%20Cameras%20Face%20Database.pdf" target="_blank">[pdf]</a></td><td>Multimedia Tools and Applications</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>179</td><td>103</td><td>76</td><td>15</td><td>88</td><td>89</td></tr><tr><td>d3f5a1848b0028d8ab51d0b0673732cad2e3c8c9</td><td>stair_actions</td><td>STAIR Action</td><td><a href="papers/d3f5a1848b0028d8ab51d0b0673732cad2e3c8c9.html" target="_blank">STAIR Actions: A Video Dataset of Everyday Home Actions</a></td><td><a href="https://arxiv.org/pdf/1804.04326.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>1</td><td>1</td><td>0</td><td>0</td><td>1</td><td>0</td></tr><tr><td>833fa04463d90aab4a9fe2870d480f0b40df446e</td><td>sun_attributes</td><td>SUN</td><td><a href="papers/833fa04463d90aab4a9fe2870d480f0b40df446e.html" target="_blank">SUN attribute database: Discovering, annotating, and recognizing scene attributes</a></td><td><span class="gray">[pdf]</a></td><td>2012 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>Brown University</td><td>United States</td><td>41.82686820</td><td>-71.40123146</td><td>60%</td><td>264</td><td>158</td><td>106</td><td>27</td><td>206</td><td>56</td></tr><tr><td>4308bd8c28e37e2ed9a3fcfe74d5436cce34b410</td><td>market_1501</td><td>Market 1501</td><td><a href="papers/4308bd8c28e37e2ed9a3fcfe74d5436cce34b410.html" target="_blank">Scalable Person Re-identification: A Benchmark</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td>company</td><td>Microsoft</td><td>United States</td><td>47.64233180</td><td>-122.13693020</td><td>77%</td><td>460</td><td>355</td><td>105</td><td>9</td><td>263</td><td>185</td></tr><tr><td>9c23859ec7313f2e756a3e85575735e0c52249f4</td><td>facebook_100</td><td>Facebook100</td><td><a href="papers/9c23859ec7313f2e756a3e85575735e0c52249f4.html" target="_blank">Scaling up biologically-inspired computer vision: A case study in unconstrained face recognition on facebook</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td>edu</td><td>Harvard University</td><td>United States</td><td>42.36782045</td><td>-71.12666653</td><td>62%</td><td>52</td><td>32</td><td>20</td><td>3</td><td>38</td><td>13</td></tr><tr><td>9c23859ec7313f2e756a3e85575735e0c52249f4</td><td>pubfig_83</td><td>pubfig83</td><td><a href="papers/9c23859ec7313f2e756a3e85575735e0c52249f4.html" target="_blank">Scaling up biologically-inspired computer vision: A case study in unconstrained face recognition on facebook</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td>edu</td><td>Harvard University</td><td>United States</td><td>42.36782045</td><td>-71.12666653</td><td>62%</td><td>52</td><td>32</td><td>20</td><td>3</td><td>38</td><td>13</td></tr><tr><td>51eba481dac6b229a7490f650dff7b17ce05df73</td><td>imsitu</td><td>imSitu</td><td><a href="papers/51eba481dac6b229a7490f650dff7b17ce05df73.html" target="_blank">Situation Recognition: Visual Semantic Role Labeling for Image Understanding</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>65%</td><td>52</td><td>34</td><td>18</td><td>1</td><td>46</td><td>6</td></tr><tr><td>570f37ed63142312e6ccdf00ecc376341ec72b9f</td><td>stanford_drone</td><td>Stanford Drone</td><td><a href="papers/570f37ed63142312e6ccdf00ecc376341ec72b9f.html" target="_blank">Social LSTM: Human Trajectory Prediction in Crowded Spaces</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>224</td><td>126</td><td>98</td><td>3</td><td>140</td><td>81</td></tr><tr><td>23e824d1dfc33f3780dd18076284f07bd99f1c43</td><td>mifs</td><td>MIFS</td><td><a href="papers/23e824d1dfc33f3780dd18076284f07bd99f1c43.html" target="_blank">Spoofing faces using makeup: An investigative study</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE International Conference on Identity, Security and Behavior Analysis (ISBA)</td><td>edu</td><td>INRIA Méditerranée</td><td>France</td><td>43.61581310</td><td>7.06838000</td><td>67%</td><td>6</td><td>4</td><td>2</td><td>0</td><td>1</td><td>5</td></tr><tr><td>1a40092b493c6b8840257ab7f96051d1a4dbfeb2</td><td>sports_videos_in_the_wild</td><td>SVW</td><td><a href="papers/1a40092b493c6b8840257ab7f96051d1a4dbfeb2.html" target="_blank">Sports Videos in the Wild (SVW): A video dataset for sports analysis</a></td><td><span class="gray">[pdf]</a></td><td>2015 11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)</td><td></td><td></td><td></td><td></td><td></td><td>86%</td><td>7</td><td>6</td><td>1</td><td>1</td><td>5</td><td>2</td></tr><tr><td>9361b784e73e9238d5cefbea5ac40d35d1e3103f</td><td>oxford_town_centre</td><td>TownCentre</td><td><a href="papers/9361b784e73e9238d5cefbea5ac40d35d1e3103f.html" target="_blank">Stable multi-target tracking in real-time surveillance video</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011</td><td>edu</td><td>University of Oxford</td><td>United Kingdom</td><td>51.75345380</td><td>-1.25400997</td><td>67%</td><td>328</td><td>221</td><td>107</td><td>13</td><td>186</td><td>140</td></tr><tr><td>2306b2a8fba28539306052764a77a0d0f5d1236a</td><td>qmul_surv_face</td><td>QMUL-SurvFace</td><td><a href="papers/2306b2a8fba28539306052764a77a0d0f5d1236a.html" target="_blank">Surveillance Face Recognition Challenge</a></td><td><a href="https://arxiv.org/pdf/1804.09691.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td>edu</td><td>Queen Mary University of London</td><td>United Kingdom</td><td>51.52472720</td><td>-0.03931035</td><td>100%</td><td>1</td><td>1</td><td>0</td><td>0</td><td>1</td><td>0</td></tr><tr><td>f6c8d5e35d7e4d60a0104f233ac1a3ab757da53f</td><td>pku_reid</td><td>PKU-Reid</td><td><a href="papers/f6c8d5e35d7e4d60a0104f233ac1a3ab757da53f.html" target="_blank">Swiss-System Based Cascade Ranking for Gait-Based Person Re-Identification</a></td><td><a href="https://pdfs.semanticscholar.org/f6c8/d5e35d7e4d60a0104f233ac1a3ab757da53f.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>4</td><td>2</td><td>2</td><td>0</td><td>1</td><td>2</td></tr><tr><td>4d58f886f5150b2d5e48fd1b5a49e09799bf895d</td><td>texas_3dfrd</td><td>Texas 3DFRD</td><td><a href="papers/4d58f886f5150b2d5e48fd1b5a49e09799bf895d.html" target="_blank">Texas 3D Face Recognition Database</a></td><td><span class="gray">[pdf]</a></td><td>2010 IEEE Southwest Symposium on Image Analysis & Interpretation (SSIAI)</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>66</td><td>40</td><td>26</td><td>3</td><td>40</td><td>27</td></tr><tr><td>6d96f946aaabc734af7fe3fc4454cf8547fcd5ed</td><td>ar_facedb</td><td>AR Face</td><td><a href="papers/6d96f946aaabc734af7fe3fc4454cf8547fcd5ed.html" target="_blank">The AR face database</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>999</td><td>573</td><td>426</td><td>59</td><td>458</td><td>530</td></tr><tr><td>2485c98aa44131d1a2f7d1355b1e372f2bb148ad</td><td>cas_peal</td><td>CAS-PEAL</td><td><a href="papers/2485c98aa44131d1a2f7d1355b1e372f2bb148ad.html" target="_blank">The CAS-PEAL Large-Scale Chinese Face Database and Baseline Evaluations</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Systems, Man, and Cybernetics - Part A: Systems and Humans</td><td></td><td></td><td></td><td></td><td></td><td>59%</td><td>429</td><td>254</td><td>175</td><td>38</td><td>198</td><td>234</td></tr><tr><td>47662d1a368daf70ba70ef2d59eb6209f98b675d</td><td>fia</td><td>CMU FiA</td><td><a href="papers/47662d1a368daf70ba70ef2d59eb6209f98b675d.html" target="_blank">The CMU Face In Action (FIA) Database</a></td><td><a href="https://pdfs.semanticscholar.org/4766/2d1a368daf70ba70ef2d59eb6209f98b675d.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>54</td><td>29</td><td>25</td><td>5</td><td>40</td><td>16</td></tr><tr><td>4d423acc78273b75134e2afd1777ba6d3a398973</td><td>cmu_pie</td><td>CMU PIE</td><td><a href="papers/4d423acc78273b75134e2afd1777ba6d3a398973.html" target="_blank">The CMU Pose, Illumination, and Expression (PIE) Database</a></td><td><a href="http://www.comp.nus.edu.sg/~tsim/piedb.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>59%</td><td>760</td><td>448</td><td>311</td><td>50</td><td>404</td><td>345</td></tr><tr><td>4d423acc78273b75134e2afd1777ba6d3a398973</td><td>multi_pie</td><td>MULTIPIE</td><td><a href="papers/4d423acc78273b75134e2afd1777ba6d3a398973.html" target="_blank">The CMU Pose, Illumination, and Expression (PIE) Database</a></td><td><a href="http://www.comp.nus.edu.sg/~tsim/piedb.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>59%</td><td>760</td><td>448</td><td>311</td><td>50</td><td>404</td><td>345</td></tr><tr><td>4df3143922bcdf7db78eb91e6b5359d6ada004d2</td><td>cfd</td><td>CFD</td><td><a href="papers/4df3143922bcdf7db78eb91e6b5359d6ada004d2.html" target="_blank">The Chicago face database: A free stimulus set of faces and norming data.</a></td><td><a href="https://pdfs.semanticscholar.org/4df3/143922bcdf7db78eb91e6b5359d6ada004d2.pdf" target="_blank">[pdf]</a></td><td>Behavior research methods</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>99</td><td>59</td><td>40</td><td>1</td><td>73</td><td>21</td></tr><tr><td>20388099cc415c772926e47bcbbe554e133343d1</td><td>cafe</td><td>#N/A</td><td><a href="papers/20388099cc415c772926e47bcbbe554e133343d1.html" target="_blank">The Child Affective Facial Expression (CAFE) set: validity and reliability from untrained adults</a></td><td><a href="https://pdfs.semanticscholar.org/2038/8099cc415c772926e47bcbbe554e133343d1.pdf" target="_blank">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>37</td><td>20</td><td>17</td><td>3</td><td>30</td><td>7</td></tr><tr><td>4e6ee936eb50dd032f7138702fa39b7c18ee8907</td><td>dartmouth_children</td><td>Dartmouth Children</td><td><a href="papers/4e6ee936eb50dd032f7138702fa39b7c18ee8907.html" target="_blank">The Dartmouth Database of Children’s Faces: Acquisition and Validation of a New Face Stimulus Set</a></td><td><a href="https://pdfs.semanticscholar.org/4e6e/e936eb50dd032f7138702fa39b7c18ee8907.pdf" target="_blank">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>21</td><td>11</td><td>10</td><td>2</td><td>18</td><td>3</td></tr><tr><td>9e31e77f9543ab42474ba4e9330676e18c242e72</td><td>imdb_face</td><td>IMDb Face</td><td><a href="papers/9e31e77f9543ab42474ba4e9330676e18c242e72.html" target="_blank">The Devil of Face Recognition is in the Noise</a></td><td><a href="https://arxiv.org/pdf/1807.11649.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Nanyang Technological University</td><td>Singapore</td><td>1.34841040</td><td>103.68297965</td><td>50%</td><td>6</td><td>3</td><td>3</td><td>0</td><td>4</td><td>1</td></tr><tr><td>71b7fc715e2f1bb24c0030af8d7e7b6e7cd128a6</td><td>umd_faces</td><td>UMD</td><td><a href="papers/71b7fc715e2f1bb24c0030af8d7e7b6e7cd128a6.html" target="_blank">The Do’s and Don’ts for CNN-Based Face Verification</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE International Conference on Computer Vision Workshops (ICCVW)</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>26</td><td>16</td><td>10</td><td>2</td><td>16</td><td>8</td></tr><tr><td>72a155c987816ae81c858fddbd6beab656d86220</td><td>europersons</td><td>EuroCity Persons</td><td><a href="papers/72a155c987816ae81c858fddbd6beab656d86220.html" target="_blank">The EuroCity Persons Dataset: A Novel Benchmark for Object Detection</a></td><td><a href="https://arxiv.org/pdf/1805.07193.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>2</td><td>0</td><td>2</td><td>0</td><td>2</td><td>0</td></tr><tr><td>4d9a02d080636e9666c4d1cc438b9893391ec6c7</td><td>cohn_kanade_plus</td><td>CK+</td><td><a href="papers/4d9a02d080636e9666c4d1cc438b9893391ec6c7.html" target="_blank">The Extended Cohn-Kanade Dataset (CK+): A complete dataset for action unit and emotion-specified expression</a></td><td><span class="gray">[pdf]</a></td><td>2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition - Workshops</td><td>edu</td><td>University of Pittsburgh</td><td>United States</td><td>40.44415295</td><td>-79.96243993</td><td>60%</td><td>999</td><td>604</td><td>395</td><td>58</td><td>470</td><td>518</td></tr><tr><td>0f0fcf041559703998abf310e56f8a2f90ee6f21</td><td>feret</td><td>FERET</td><td><a href="papers/0f0fcf041559703998abf310e56f8a2f90ee6f21.html" target="_blank">The FERET Evaluation Methodology for Face-Recognition Algorithms</a></td><td><a href="https://pdfs.semanticscholar.org/5099/7a5605c1f61e09e9a96789ed7495be6625aa.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>34%</td><td>29</td><td>10</td><td>19</td><td>3</td><td>18</td><td>9</td></tr><tr><td>0c4a139bb87c6743c7905b29a3cfec27a5130652</td><td>feret</td><td>FERET</td><td><a href="papers/0c4a139bb87c6743c7905b29a3cfec27a5130652.html" target="_blank">The FERET Verification Testing Protocol for Face Recognition Algorithms</a></td><td><a href="https://pdfs.semanticscholar.org/8d2a/1c768fce6f71584dd993fb97e7b6419aaf60.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>edu</td><td>City University of New York</td><td>United States</td><td>40.87228250</td><td>-73.89489171</td><td>51%</td><td>115</td><td>59</td><td>56</td><td>8</td><td>75</td><td>37</td></tr><tr><td>dc8b25e35a3acb812beb499844734081722319b4</td><td>feret</td><td>FERET</td><td><a href="papers/dc8b25e35a3acb812beb499844734081722319b4.html" target="_blank">The FERET database and evaluation procedure for face-recognition algorithms</a></td><td><a href="http://biometrics.nist.gov/cs_links/face/frvt/feret/FERET_Database_evaluation_procedure.pdf" target="_blank">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>999</td><td>520</td><td>479</td><td>103</td><td>591</td><td>421</td></tr><tr><td>8f02ec0be21461fbcedf51d864f944cfc42c875f</td><td>hda_plus</td><td>HDA+</td><td><a href="papers/8f02ec0be21461fbcedf51d864f944cfc42c875f.html" target="_blank">The HDA+ Data Set for Research on Fully Automated Re-identification Systems</a></td><td><a href="https://pdfs.semanticscholar.org/8f02/ec0be21461fbcedf51d864f944cfc42c875f.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>16</td><td>8</td><td>8</td><td>1</td><td>10</td><td>6</td></tr><tr><td>8be57cdad86fdf8c8290df4ca3149592f3c46dd3</td><td>m2vts</td><td>m2vts</td><td><a href="papers/8be57cdad86fdf8c8290df4ca3149592f3c46dd3.html" target="_blank">The M2VTS Multimodal Face Database (Release 1.00)</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>45%</td><td>73</td><td>33</td><td>40</td><td>2</td><td>39</td><td>33</td></tr><tr><td>ea050801199f98a1c7c1df6769f23f658299a3ae</td><td>mpi_large</td><td>Large MPI Facial Expression</td><td><a href="papers/ea050801199f98a1c7c1df6769f23f658299a3ae.html" target="_blank">The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</a></td><td><a href="https://pdfs.semanticscholar.org/ea05/0801199f98a1c7c1df6769f23f658299a3ae.pdf" target="_blank">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>33</td><td>17</td><td>16</td><td>4</td><td>29</td><td>4</td></tr><tr><td>ea050801199f98a1c7c1df6769f23f658299a3ae</td><td>mpi_small</td><td>Small MPI Facial Expression</td><td><a href="papers/ea050801199f98a1c7c1df6769f23f658299a3ae.html" target="_blank">The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</a></td><td><a href="https://pdfs.semanticscholar.org/ea05/0801199f98a1c7c1df6769f23f658299a3ae.pdf" target="_blank">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>33</td><td>17</td><td>16</td><td>4</td><td>29</td><td>4</td></tr><tr><td>578d4ad74818086bb64f182f72e2c8bd31e3d426</td><td>mr2</td><td>MR2</td><td><a href="papers/578d4ad74818086bb64f182f72e2c8bd31e3d426.html" target="_blank">The MR2: A multi-racial, mega-resolution database of facial stimuli.</a></td><td><a href="http://www.mpmlab.org/The%20MR2%20face%20database.pdf" target="_blank">[pdf]</a></td><td>Behavior research methods</td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>7</td><td>3</td><td>4</td><td>0</td><td>7</td><td>0</td></tr><tr><td>f1af714b92372c8e606485a3982eab2f16772ad8</td><td>mug_faces</td><td>MUG Faces</td><td><a href="papers/f1af714b92372c8e606485a3982eab2f16772ad8.html" target="_blank">The MUG facial expression database</a></td><td><span class="gray">[pdf]</a></td><td>11th International Workshop on Image Analysis for Multimedia Interactive Services WIAMIS 10</td><td>edu</td><td>Aristotle University of Thessaloniki</td><td>Greece</td><td>40.62984145</td><td>22.95889350</td><td>55%</td><td>82</td><td>45</td><td>37</td><td>4</td><td>34</td><td>47</td></tr><tr><td>79828e6e9f137a583082b8b5a9dfce0c301989b8</td><td>mapillary</td><td>Mapillary</td><td><a href="papers/79828e6e9f137a583082b8b5a9dfce0c301989b8.html" target="_blank">The Mapillary Vistas Dataset for Semantic Understanding of Street Scenes</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>61</td><td>37</td><td>24</td><td>0</td><td>43</td><td>16</td></tr><tr><td>96e0cfcd81cdeb8282e29ef9ec9962b125f379b0</td><td>megaface</td><td>MegaFace</td><td><a href="papers/96e0cfcd81cdeb8282e29ef9ec9962b125f379b0.html" target="_blank">The MegaFace Benchmark: 1 Million Faces for Recognition at Scale</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>76%</td><td>139</td><td>106</td><td>33</td><td>5</td><td>100</td><td>37</td></tr><tr><td>0ee1916a0cb2dc7d3add086b5f1092c3d4beb38a</td><td>voc</td><td>VOC</td><td><a href="papers/0ee1916a0cb2dc7d3add086b5f1092c3d4beb38a.html" target="_blank">The Pascal Visual Object Classes (VOC) Challenge</a></td><td><a href="http://eprints.pascal-network.org/archive/00006187/01/PascalVOC_IJCV2009.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td>company</td><td>Microsoft</td><td>United States</td><td>47.64233180</td><td>-122.13693020</td><td>60%</td><td>999</td><td>598</td><td>400</td><td>29</td><td>557</td><td>422</td></tr><tr><td>66e6f08873325d37e0ec20a4769ce881e04e964e</td><td>sun_attributes</td><td>SUN</td><td><a href="papers/66e6f08873325d37e0ec20a4769ce881e04e964e.html" target="_blank">The SUN Attribute Database: Beyond Categories for Deeper Scene Understanding</a></td><td><a href="http://www.cc.gatech.edu/~hays/papers/attribute_ijcv.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>116</td><td>70</td><td>46</td><td>14</td><td>84</td><td>31</td></tr><tr><td>8b2dd5c61b23ead5ae5508bb8ce808b5ea266730</td><td>10k_US_adult_faces</td><td>10K US Adult Faces</td><td><a href="papers/8b2dd5c61b23ead5ae5508bb8ce808b5ea266730.html" target="_blank">The intrinsic memorability of face photographs.</a></td><td><a href="https://pdfs.semanticscholar.org/8b2d/d5c61b23ead5ae5508bb8ce808b5ea266730.pdf" target="_blank">[pdf]</a></td><td>Journal of experimental psychology. General</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>52</td><td>29</td><td>23</td><td>2</td><td>36</td><td>14</td></tr><tr><td>d178cde92ab3dc0dd2ebee5a76a33d556c39448b</td><td>jiku_mobile</td><td>Jiku Mobile Video Dataset</td><td><a href="papers/d178cde92ab3dc0dd2ebee5a76a33d556c39448b.html" target="_blank">The jiku mobile video dataset</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td>edu</td><td>National University of Singapore</td><td>Singapore</td><td>1.29620180</td><td>103.77689944</td><td>71%</td><td>24</td><td>17</td><td>7</td><td>0</td><td>6</td><td>19</td></tr><tr><td>ae0aee03d946efffdc7af2362a42d3750e7dd48a</td><td>put_face</td><td>Put Face</td><td><a href="papers/ae0aee03d946efffdc7af2362a42d3750e7dd48a.html" target="_blank">The put face database</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>51%</td><td>99</td><td>50</td><td>49</td><td>7</td><td>55</td><td>48</td></tr><tr><td>19d1b811df60f86cbd5e04a094b07f32fff7a32a</td><td>york_3d</td><td>UOY 3D Face Database</td><td><a href="papers/19d1b811df60f86cbd5e04a094b07f32fff7a32a.html" target="_blank">Three-dimensional face recognition: an eigensurface approach</a></td><td><span class="gray">[pdf]</a></td><td>2004 International Conference on Image Processing, 2004. ICIP '04.</td><td></td><td></td><td></td><td></td><td></td><td>42%</td><td>38</td><td>16</td><td>22</td><td>4</td><td>24</td><td>13</td></tr><tr><td>2edb87494278ad11641b6cf7a3f8996de12b8e14</td><td>qmul_grid</td><td>GRID</td><td><a href="papers/2edb87494278ad11641b6cf7a3f8996de12b8e14.html" target="_blank">Time-Delayed Correlation Analysis for Multi-Camera Activity Understanding</a></td><td><a href="http://www.eecs.qmul.ac.uk/~ccloy/files/ijcv_2010.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td>edu</td><td>Queen Mary University of London</td><td>United Kingdom</td><td>51.52472720</td><td>-0.03931035</td><td>63%</td><td>84</td><td>53</td><td>31</td><td>4</td><td>51</td><td>33</td></tr><tr><td>64e0690dd176a93de9d4328f6e31fc4afe1e7536</td><td>duke_mtmc</td><td>Duke MTMC</td><td><a href="papers/64e0690dd176a93de9d4328f6e31fc4afe1e7536.html" target="_blank">Tracking Multiple People Online and in Real Time</a></td><td><a href="https://pdfs.semanticscholar.org/64e0/690dd176a93de9d4328f6e31fc4afe1e7536.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>78%</td><td>23</td><td>18</td><td>5</td><td>1</td><td>12</td><td>10</td></tr><tr><td>298cbc3dfbbb3a20af4eed97906650a4ea1c29e0</td><td>ferplus</td><td>FER+</td><td><a href="papers/298cbc3dfbbb3a20af4eed97906650a4ea1c29e0.html" target="_blank">Training deep networks for facial expression recognition with crowd-sourced label distribution</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>74%</td><td>34</td><td>25</td><td>9</td><td>0</td><td>18</td><td>16</td></tr><tr><td>4eab317b5ac436a949849ed286baa3de2a541eef</td><td>laofiw</td><td>LAOFIW</td><td><a href="papers/4eab317b5ac436a949849ed286baa3de2a541eef.html" target="_blank">Turning a Blind Eye: Explicit Removal of Biases and Variation from Deep Neural Network Embeddings</a></td><td><a href="https://arxiv.org/pdf/1809.02169.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>2</td><td>2</td><td>0</td><td>0</td><td>2</td><td>0</td></tr><tr><td>b5f2846a506fc417e7da43f6a7679146d99c5e96</td><td>ucf_101</td><td>UCF101</td><td><a href="papers/b5f2846a506fc417e7da43f6a7679146d99c5e96.html" target="_blank">UCF101: A Dataset of 101 Human Actions Classes From Videos in The Wild</a></td><td><a href="https://arxiv.org/pdf/1212.0402.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>999</td><td>642</td><td>357</td><td>56</td><td>628</td><td>362</td></tr><tr><td>16e8b0a1e8451d5f697b94c0c2b32a00abee1d52</td><td>umb</td><td>UMB</td><td><a href="papers/16e8b0a1e8451d5f697b94c0c2b32a00abee1d52.html" target="_blank">UMB-DB: A database of partially occluded 3D faces</a></td><td><span class="gray">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td></td><td>66%</td><td>47</td><td>31</td><td>16</td><td>2</td><td>22</td><td>24</td></tr><tr><td>31b05f65405534a696a847dd19c621b7b8588263</td><td>umd_faces</td><td>UMD</td><td><a href="papers/31b05f65405534a696a847dd19c621b7b8588263.html" target="_blank">UMDFaces: An annotated face dataset for training deep networks</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE International Joint Conference on Biometrics (IJCB)</td><td>edu</td><td>University of Maryland</td><td>United States</td><td>39.28996850</td><td>-76.62196103</td><td>79%</td><td>42</td><td>33</td><td>9</td><td>2</td><td>30</td><td>11</td></tr><tr><td>8627f019882b024aef92e4eb9355c499c733e5b7</td><td>used</td><td>USED Social Event Dataset</td><td><a href="papers/8627f019882b024aef92e4eb9355c499c733e5b7.html" target="_blank">USED: a large-scale social event detection dataset</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td>edu</td><td>University of Trento</td><td>Italy</td><td>46.06588360</td><td>11.11598940</td><td>71%</td><td>7</td><td>5</td><td>2</td><td>0</td><td>3</td><td>4</td></tr><tr><td>d4f1eb008eb80595bcfdac368e23ae9754e1e745</td><td>uccs</td><td>UCCS</td><td><a href="papers/d4f1eb008eb80595bcfdac368e23ae9754e1e745.html" target="_blank">Unconstrained Face Detection and Open-Set Face Recognition Challenge</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE International Joint Conference on Biometrics (IJCB)</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>5</td><td>5</td><td>0</td><td>0</td><td>4</td><td>1</td></tr><tr><td>4b4106614c1d553365bad75d7866bff0de6056ed</td><td>ufi</td><td>UFI</td><td><a href="papers/4b4106614c1d553365bad75d7866bff0de6056ed.html" target="_blank">Unconstrained Facial Images: Database for Face Recognition Under Real-World Conditions</a></td><td><a href="http://home.zcu.cz/~pkral/papers/kral_micai15.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>12</td><td>6</td><td>6</td><td>0</td><td>4</td><td>6</td></tr><tr><td>08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7</td><td>kin_face</td><td>UB KinFace</td><td><a href="papers/08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7.html" target="_blank">Understanding Kin Relationships in a Photo</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Multimedia</td><td></td><td></td><td></td><td></td><td></td><td>63%</td><td>94</td><td>59</td><td>35</td><td>1</td><td>33</td><td>61</td></tr><tr><td>5a4df9bef1872865f0b619ac3aacc97f49e4a035</td><td>cuhk_train_station</td><td>CUHK Train Station Dataset</td><td><a href="papers/5a4df9bef1872865f0b619ac3aacc97f49e4a035.html" target="_blank">Understanding collective crowd behaviors: Learning a Mixture model of Dynamic pedestrian-Agents</a></td><td><span class="gray">[pdf]</a></td><td>2012 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>Chinese University of Hong Kong</td><td>China</td><td>22.41626320</td><td>114.21093180</td><td>58%</td><td>141</td><td>82</td><td>59</td><td>5</td><td>60</td><td>75</td></tr><tr><td>21d9d0deed16f0ad62a4865e9acf0686f4f15492</td><td>images_of_groups</td><td>Images of Groups</td><td><a href="papers/21d9d0deed16f0ad62a4865e9acf0686f4f15492.html" target="_blank">Understanding images of groups of people</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>Carnegie Mellon University</td><td>United States</td><td>40.44416190</td><td>-79.94272826</td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>15e1af79939dbf90790b03d8aa02477783fb1d0f</td><td>duke_mtmc</td><td>Duke MTMC</td><td><a href="papers/15e1af79939dbf90790b03d8aa02477783fb1d0f.html" target="_blank">Unlabeled Samples Generated by GAN Improve the Person Re-identification Baseline in Vitro</a></td><td><a href="https://arxiv.org/pdf/1701.07717.pdf" target="_blank">[pdf]</a></td><td>2017 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>fd8168f1c50de85bac58a8d328df0a50248b16ae</td><td>nd_2006</td><td>ND-2006</td><td><a href="papers/fd8168f1c50de85bac58a8d328df0a50248b16ae.html" target="_blank">Using a Multi-Instance Enrollment Representation to Improve 3D Face Recognition</a></td><td><span class="gray">[pdf]</a></td><td>2007 First IEEE International Conference on Biometrics: Theory, Applications, and Systems</td><td>edu</td><td>University of Notre Dame</td><td>United States</td><td>41.70456775</td><td>-86.23822026</td><td>63%</td><td>35</td><td>22</td><td>13</td><td>3</td><td>18</td><td>15</td></tr><tr><td>4563b46d42079242f06567b3f2e2f7a80cb3befe</td><td>vadana</td><td>VADANA</td><td><a href="papers/4563b46d42079242f06567b3f2e2f7a80cb3befe.html" target="_blank">VADANA: A dense dataset for facial image analysis</a></td><td><span class="gray">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td>edu</td><td>University of Delaware</td><td>United States</td><td>39.68103280</td><td>-75.75401840</td><td>67%</td><td>15</td><td>10</td><td>5</td><td>0</td><td>5</td><td>10</td></tr><tr><td>70c59dc3470ae867016f6ab0e008ac8ba03774a1</td><td>vgg_faces2</td><td>VGG Face2</td><td><a href="papers/70c59dc3470ae867016f6ab0e008ac8ba03774a1.html" target="_blank">VGGFace2: A Dataset for Recognising Faces across Pose and Age</a></td><td><a href="https://arxiv.org/pdf/1710.08092.pdf" target="_blank">[pdf]</a></td><td>2018 13th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2018)</td><td></td><td></td><td></td><td></td><td></td><td>80%</td><td>83</td><td>66</td><td>17</td><td>3</td><td>61</td><td>20</td></tr><tr><td>01959ef569f74c286956024866c1d107099199f7</td><td>vqa</td><td>VQA</td><td><a href="papers/01959ef569f74c286956024866c1d107099199f7.html" target="_blank">VQA: Visual Question Answering</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>b6c293f0420f7e945b5916ae44269fb53e139275</td><td>erce</td><td>ERCe</td><td><a href="papers/b6c293f0420f7e945b5916ae44269fb53e139275.html" target="_blank">Video Synopsis by Heterogeneous Multi-source Correlation</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>29</td><td>15</td><td>14</td><td>2</td><td>14</td><td>13</td></tr><tr><td>b6c293f0420f7e945b5916ae44269fb53e139275</td><td>tisi</td><td>Times Square Intersection</td><td><a href="papers/b6c293f0420f7e945b5916ae44269fb53e139275.html" target="_blank">Video Synopsis by Heterogeneous Multi-source Correlation</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>29</td><td>15</td><td>14</td><td>2</td><td>14</td><td>13</td></tr><tr><td>5194cbd51f9769ab25260446b4fa17204752e799</td><td>violent_flows</td><td>Violent Flows</td><td><a href="papers/5194cbd51f9769ab25260446b4fa17204752e799.html" target="_blank">Violent flows: Real-time detection of violent crowd behavior</a></td><td><span class="gray">[pdf]</a></td><td>2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops</td><td>edu</td><td>Open University of Israel</td><td>Israel</td><td>32.77824165</td><td>34.99565673</td><td>65%</td><td>88</td><td>57</td><td>31</td><td>6</td><td>45</td><td>44</td></tr><tr><td>026e3363b7f76b51cc711886597a44d5f1fd1de2</td><td>kitti</td><td>KITTI</td><td><a href="papers/026e3363b7f76b51cc711886597a44d5f1fd1de2.html" target="_blank">Vision meets robotics: The KITTI dataset</a></td><td><a href="https://pdfs.semanticscholar.org/026e/3363b7f76b51cc711886597a44d5f1fd1de2.pdf" target="_blank">[pdf]</a></td><td>I. J. Robotics Res.</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>999</td><td>602</td><td>397</td><td>36</td><td>553</td><td>462</td></tr><tr><td>066000d44d6691d27202896691f08b27117918b9</td><td>psu</td><td>PSU</td><td><a href="papers/066000d44d6691d27202896691f08b27117918b9.html" target="_blank">Vision-Based Analysis of Small Groups in Pedestrian Crowds</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>168</td><td>90</td><td>78</td><td>10</td><td>85</td><td>79</td></tr><tr><td>dd65f71dac86e36eecbd3ed225d016c3336b4a13</td><td>families_in_the_wild</td><td>FIW</td><td><a href="papers/dd65f71dac86e36eecbd3ed225d016c3336b4a13.html" target="_blank">Visual Kinship Recognition of Families in the Wild</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td>edu</td><td>University of Massachusetts Dartmouth</td><td>United States</td><td>41.62772475</td><td>-71.00724501</td><td>80%</td><td>5</td><td>4</td><td>1</td><td>0</td><td>2</td><td>3</td></tr><tr><td>52d7eb0fbc3522434c13cc247549f74bb9609c5d</td><td>wider_face</td><td>WIDER FACE</td><td><a href="papers/52d7eb0fbc3522434c13cc247549f74bb9609c5d.html" target="_blank">WIDER FACE: A Face Detection Benchmark</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>Chinese University of Hong Kong</td><td>China</td><td>22.41626320</td><td>114.21093180</td><td>65%</td><td>178</td><td>115</td><td>63</td><td>12</td><td>112</td><td>66</td></tr><tr><td>36bccfb2ad847096bc76777e544f305813cd8f5b</td><td>wildtrack</td><td>WildTrack</td><td><a href="papers/36bccfb2ad847096bc76777e544f305813cd8f5b.html" target="_blank">WILDTRACK: A Multi-camera HD Dataset for Dense Unscripted Pedestrian Detection</a></td><td><a href="http://openaccess.thecvf.com/content_cvpr_2018/Supplemental/1562-supp.pdf" target="_blank">[pdf]</a></td><td>2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>5ad4e9f947c1653c247d418f05dad758a3f9277b</td><td>wlfdb</td><td>WLFDB</td><td><a href="papers/5ad4e9f947c1653c247d418f05dad758a3f9277b.html" target="_blank">WLFDB : Weakly Labeled Face Databases</a></td><td><a href="https://pdfs.semanticscholar.org/5ad4/e9f947c1653c247d418f05dad758a3f9277b.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>1</td><td>1</td><td>0</td><td>0</td><td>0</td><td>1</td></tr><tr><td>0dc11a37cadda92886c56a6fb5191ded62099c28</td><td>stickmen_family</td><td>We Are Family Stickmen</td><td><a href="papers/0dc11a37cadda92886c56a6fb5191ded62099c28.html" target="_blank">We Are Family: Joint Pose Estimation of Multiple Persons</a></td><td><a href="http://eprints.pascal-network.org/archive/00007964/01/eichner10eccv.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>63%</td><td>78</td><td>49</td><td>29</td><td>6</td><td>54</td><td>23</td></tr><tr><td>0c91808994a250d7be332400a534a9291ca3b60e</td><td>graz</td><td>Graz Pedestrian</td><td><a href="papers/0c91808994a250d7be332400a534a9291ca3b60e.html" target="_blank">Weak Hypotheses and Boosting for Generic Object Detection and Recognition</a></td><td><a href="https://pdfs.semanticscholar.org/0c91/808994a250d7be332400a534a9291ca3b60e.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>236</td><td>131</td><td>105</td><td>17</td><td>161</td><td>77</td></tr><tr><td>2a75f34663a60ab1b04a0049ed1d14335129e908</td><td>mmi_facial_expression</td><td>MMI Facial Expression Dataset</td><td><a href="papers/2a75f34663a60ab1b04a0049ed1d14335129e908.html" target="_blank">Web-based database for facial expression analysis</a></td><td><span class="gray">[pdf]</a></td><td>2005 IEEE International Conference on Multimedia and Expo</td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>464</td><td>250</td><td>214</td><td>45</td><td>282</td><td>188</td></tr><tr><td>9b9bf5e623cb8af7407d2d2d857bc3f1b531c182</td><td>who_goes_there</td><td>WGT</td><td><a href="papers/9b9bf5e623cb8af7407d2d2d857bc3f1b531c182.html" target="_blank">Who goes there?: approaches to mapping facial appearance diversity</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td>edu</td><td>University of Kentucky</td><td>United States</td><td>38.03337420</td><td>-84.50177580</td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>b62628ac06bbac998a3ab825324a41a11bc3a988</td><td>m2vtsdb_extended</td><td>xm2vtsdb</td><td><a href="papers/b62628ac06bbac998a3ab825324a41a11bc3a988.html" target="_blank">XM2VTSDB : The extended M2VTS database</a></td><td><a href="https://pdfs.semanticscholar.org/b626/28ac06bbac998a3ab825324a41a11bc3a988.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>864</td><td>527</td><td>337</td><td>39</td><td>493</td><td>404</td></tr><tr><td>010f0f4929e6a6644fb01f0e43820f91d0fad292</td><td>yfcc_100m</td><td>YFCC100M</td><td><a href="papers/010f0f4929e6a6644fb01f0e43820f91d0fad292.html" target="_blank">YFCC100M: the new data in multimedia research</a></td><td><span class="gray">[pdf]</a></td><td>Commun. ACM</td><td>edu</td><td>Carnegie Mellon University</td><td>United States</td><td>40.44416190</td><td>-79.94272826</td><td>64%</td><td>274</td><td>175</td><td>99</td><td>23</td><td>172</td><td>100</td></tr><tr><td>a94cae786d515d3450d48267e12ca954aab791c4</td><td>yawdd</td><td>YawDD</td><td><a href="papers/a94cae786d515d3450d48267e12ca954aab791c4.html" target="_blank">YawDD: a yawning detection dataset</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>80%</td><td>15</td><td>12</td><td>3</td><td>1</td><td>2</td><td>13</td></tr></table></body></html> \ No newline at end of file