summaryrefslogtreecommitdiff
path: root/scraper
diff options
context:
space:
mode:
authorjules@lens <julescarbon@gmail.com>2019-02-13 02:02:11 +0100
committerjules@lens <julescarbon@gmail.com>2019-02-13 02:02:11 +0100
commit857a8a5f13fa63e7cbc56bfee0361c8c02860424 (patch)
treecaf41529595dd05ca0808a9fcf5739613df3438c /scraper
parent84500c8a1e4e7ef267d71fdf8ad5a52fb33b2cb5 (diff)
parentdc7d9cbba842472efb33186e97ee55751e4d50ca (diff)
Merge branch 'master' of asdf.us:megapixels_dev
Diffstat (limited to 'scraper')
-rw-r--r--scraper/.gitignore4
l---------scraper/reports/datasets1
-rw-r--r--scraper/reports/geocode_papers.html1
-rw-r--r--scraper/reports/paper_title_report.html10
-rw-r--r--scraper/reports/paper_title_report_no_location.html10
-rw-r--r--scraper/reports/paper_title_report_nonmatching.html10
-rw-r--r--scraper/s2-final-report.py66
-rw-r--r--scraper/s2-geocode-server.py68
-rw-r--r--scraper/s2-papers.py10
-rw-r--r--scraper/util.py7
10 files changed, 128 insertions, 59 deletions
diff --git a/scraper/.gitignore b/scraper/.gitignore
new file mode 100644
index 00000000..868c3dd4
--- /dev/null
+++ b/scraper/.gitignore
@@ -0,0 +1,4 @@
+datasets/s2
+datasets/old
+datasets/scholar_entries.numbers
+datasets/scholar_entries.csv
diff --git a/scraper/reports/datasets b/scraper/reports/datasets
new file mode 120000
index 00000000..ed9c23bf
--- /dev/null
+++ b/scraper/reports/datasets
@@ -0,0 +1 @@
+../../site/datasets/ \ No newline at end of file
diff --git a/scraper/reports/geocode_papers.html b/scraper/reports/geocode_papers.html
index 529ee9c7..84ffe356 100644
--- a/scraper/reports/geocode_papers.html
+++ b/scraper/reports/geocode_papers.html
@@ -33,5 +33,6 @@ html,body { margin: 0; padding: 0; width: 100%; height: 100%; }
<div id="container">
</div>
</body>
+<script src="/reports/geocode-app.js"></script>
</html>
diff --git a/scraper/reports/paper_title_report.html b/scraper/reports/paper_title_report.html
index 51d5204e..90deaf36 100644
--- a/scraper/reports/paper_title_report.html
+++ b/scraper/reports/paper_title_report.html
@@ -1,9 +1,3 @@
-<!doctype html><html><head><meta charset='utf-8'><title>Paper Title Sanity Check</title><link rel='stylesheet' href='reports.css'></head><body><h2>Paper Title Sanity Check</h2><table border='1' cellpadding='3' cellspacing='3'><th>key</th><th>name</th><th>our title</th><th>found title</th><th></th><th></th><th>address</th><th>s2 id</th><tr><td>10k_US_adult_faces</td><td>10K US Adult Faces</td><td>The intrinsic memorability of face images</td><td>The intrinsic memorability of face photographs.</td><td><a href="https://pdfs.semanticscholar.org/8b2d/d5c61b23ead5ae5508bb8ce808b5ea266730.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the intrinsic memorability of face images&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8b2dd5c61b23ead5ae5508bb8ce808b5ea266730</td></tr><tr><td>3d_rma</td><td>3D-RMA</td><td>Automatic 3D Face Authentication</td><td>Automatic 3D face authentication</td><td><a href="http://pdfs.semanticscholar.org/2160/788824c4c29ffe213b2cbeb3f52972d73f37.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=automatic 3d face authentication&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2160788824c4c29ffe213b2cbeb3f52972d73f37</td></tr><tr><td>3dddb_unconstrained</td><td>3D Dynamic</td><td>A 3D Dynamic Database for Unconstrained Face Recognition</td><td>A 3 D Dynamic Database for Unconstrained Face Recognition</td><td><a href="https://pdfs.semanticscholar.org/4d4b/b462c9f1d4e4ab1e4aa6a75cc0bc71b38461.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a 3d dynamic database for unconstrained face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4d4bb462c9f1d4e4ab1e4aa6a75cc0bc71b38461</td></tr><tr><td>3dpes</td><td>3DPeS</td><td>3DPes: 3D People Dataset for Surveillance and Forensics</td><td>3DPeS: 3D people dataset for surveillance and forensics</td><td><a href="http://doi.acm.org/10.1145/2072572.2072590" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=3dpes: 3d people dataset for surveillance and forensics&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2e8d0f1802e50cccfd3c0aabac0d0beab3a7846e</td></tr><tr><td>4dfab</td><td>4DFAB</td><td>4DFAB: A Large Scale 4D Facial Expression Database for Biometric Applications</td><td>4DFAB: A Large Scale 4D Facial Expression Database for Biometric Applications</td><td><a href="https://arxiv.org/pdf/1712.01443.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=4dfab: a large scale 4d facial expression database for biometric applications&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9696ad8b164f5e10fcfe23aacf74bd6168aebb15</td></tr><tr><td>50_people_one_question</td><td>50 People One Question</td><td>Merging Pose Estimates Across Space and Time</td><td>Merging Pose Estimates Across Space and Time</td><td><a href="http://pdfs.semanticscholar.org/63b2/f5348af0f969dfc2afb4977732393c6459ec.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=merging pose estimates across space and time&sort=relevance" target="_blank">[s2]</a></td><td></td><td>5753b2b5e442eaa3be066daa4a2ca8d8a0bb1725</td></tr><tr><td>a_pascal_yahoo</td><td>aPascal</td><td>Describing Objects by their Attributes</td><td>Describing objects by their attributes</td><td><a href="http://www-2.cs.cmu.edu/~dhoiem/publications/cvpr2009_attributes.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=describing objects by their attributes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2e384f057211426ac5922f1b33d2aa8df5d51f57</td></tr><tr><td>adience</td><td>Adience</td><td>Age and Gender Estimation of Unfiltered Faces</td><td>Age and Gender Estimation of Unfiltered Faces</td><td><a href="http://www.openu.ac.il/home/hassner/Adience/EidingerEnbarHassner_tifs.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=age and gender estimation of unfiltered faces&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1be498d4bbc30c3bfd0029114c784bc2114d67c0</td></tr><tr><td>afad</td><td>AFAD</td><td>Ordinal Regression with a Multiple Output CNN for Age Estimation</td><td>Ordinal Regression with Multiple Output CNN for Age Estimation</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.532" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=ordinal regression with a multiple output cnn for age estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6618cff7f2ed440a0d2fa9e74ad5469df5cdbe4c</td></tr><tr><td>afew_va</td><td>AFEW-VA</td><td>AFEW-VA database for valence and arousal estimation in-the-wild</td><td>AFEW-VA database for valence and arousal estimation in-the-wild</td><td><a href="http://pdfs.semanticscholar.org/2624/d84503bc2f8e190e061c5480b6aa4d89277a.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=afew-va database for valence and arousal estimation in-the-wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2624d84503bc2f8e190e061c5480b6aa4d89277a</td></tr><tr><td>afew_va</td><td>AFEW-VA</td><td>Collecting Large, Richly Annotated Facial-Expression Databases from Movies</td><td>Collecting Large, Richly Annotated Facial-Expression Databases from Movies</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6200254" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=collecting large, richly annotated facial-expression databases from movies&sort=relevance" target="_blank">[s2]</a></td><td>Australian National University</td><td>b1f4423c227fa37b9680787be38857069247a307</td></tr><tr><td>affectnet</td><td>AffectNet</td><td>AffectNet: A New Database for Facial Expression, Valence, and Arousal Computation in the Wild</td><td>Skybiometry and AffectNet on Facial Emotion Recognition Using Supervised Machine Learning Algorithms</td><td><a href="http://dl.acm.org/citation.cfm?id=3232665" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=affectnet: a new database for facial expression, valence, and arousal computation in the wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>f152b6ee251cca940dd853c54e6a7b78fbc6b235</td></tr><tr><td>aflw</td><td>AFLW</td><td>Annotated Facial Landmarks in the Wild: A Large-scale, Real-world Database for Facial Landmark Localization</td><td>Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</td><td><a href="http://lrs.icg.tugraz.at/pubs/koestinger_befit_11.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=annotated facial landmarks in the wild: a large-scale, real-world database for facial landmark localization&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td></tr><tr><td>afw</td><td>AFW</td><td>Face detection, pose estimation and landmark localization in the wild</td><td>Face detection, pose estimation, and landmark localization in the wild</td><td><a href="http://vision.ics.uci.edu/papers/ZhuR_CVPR_2012/ZhuR_CVPR_2012.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=face detection, pose estimation and landmark localization in the wild&sort=relevance" target="_blank">[s2]</a></td><td>University of California, Irvine</td><td>0e986f51fe45b00633de9fd0c94d082d2be51406</td></tr><tr><td>agedb</td><td>AgeDB</td><td>AgeDB: the first manually collected, in-the-wild age database</td><td>AgeDB: The First Manually Collected, In-the-Wild Age Database</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014984" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=agedb: the first manually collected, in-the-wild age database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6dcf418c778f528b5792104760f1fbfe90c6dd6a</td></tr><tr><td>alert_airport</td><td>ALERT Airport</td><td>A Systematic Evaluation and Benchmark for Person Re-Identification: Features, Metrics, and Datasets</td><td>A Systematic Evaluation and Benchmark for Person Re-Identification: Features, Metrics, and Datasets.</td><td><a href="https://arxiv.org/pdf/1605.09653.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a systematic evaluation and benchmark for person re-identification: features, metrics, and datasets&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6403117f9c005ae81f1e8e6d1302f4a045e3d99d</td></tr><tr><td>am_fed</td><td>AM-FED</td><td>Affectiva MIT Facial Expression Dataset (AM-FED): Naturalistic and Spontaneous Facial Expressions Collected “In the Wild”</td><td>Affectiva-MIT Facial Expression Dataset (AM-FED): Naturalistic and Spontaneous Facial Expressions Collected "In-the-Wild"</td><td><a href="http://pdfs.semanticscholar.org/5d06/437656dd94616d7d87260d5eb77513ded30f.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=affectiva mit facial expression dataset (am-fed): naturalistic and spontaneous facial expressions collected “in the wild”&sort=relevance" target="_blank">[s2]</a></td><td></td><td>47aeb3b82f54b5ae8142b4bdda7b614433e69b9a</td></tr><tr><td>apis</td><td>APiS1.0</td><td>Pedestrian Attribute Classification in Surveillance: Database and Evaluation</td><td>Pedestrian Attribute Classification in Surveillance: Database and Evaluation</td><td><a href="http://www.cv-foundation.org/openaccess/content_iccv_workshops_2013/W10/papers/Zhu_Pedestrian_Attribute_Classification_2013_ICCV_paper.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pedestrian attribute classification in surveillance: database and evaluation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>488e475eeb3bb39a145f23ede197cd3620f1d98a</td></tr><tr><td>ar_facedb</td><td>AR Face</td><td>The AR Face Database</td><td>The AR face database</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the ar face database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6d96f946aaabc734af7fe3fc4454cf8547fcd5ed</td></tr><tr><td>awe_ears</td><td>AWE Ears</td><td>Ear Recognition: More Than a Survey</td><td>Ear Recognition: More Than a Survey</td><td><a href="http://pdfs.semanticscholar.org/84fe/5b4ac805af63206012d29523a1e033bc827e.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=ear recognition: more than a survey&sort=relevance" target="_blank">[s2]</a></td><td></td><td>84fe5b4ac805af63206012d29523a1e033bc827e</td></tr><tr><td>b3d_ac</td><td>B3D(AC)</td><td>A 3-D Audio-Visual Corpus of Affective Communication</td><td>A 3-D Audio-Visual Corpus of Affective Communication</td><td><a href="http://files.is.tue.mpg.de/jgall/download/jgall_avcorpus_mm10.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a 3-d audio-visual corpus of affective communication&sort=relevance" target="_blank">[s2]</a></td><td></td><td>d08cc366a4a0192a01e9a7495af1eb5d9f9e73ae</td></tr><tr><td>bbc_pose</td><td>BBC Pose</td><td>Automatic and Efficient Human Pose Estimation for Sign Language Videos</td><td>Automatic and Efficient Human Pose Estimation for Sign Language Videos</td><td><a href="https://doi.org/10.1007/s11263-013-0672-6" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=automatic and efficient human pose estimation for sign language videos&sort=relevance" target="_blank">[s2]</a></td><td></td><td>213a579af9e4f57f071b884aa872651372b661fd</td></tr><tr><td>berkeley_pose</td><td>BPAD</td><td>Describing People: A Poselet-Based Approach to Attribute Classification</td><td>Describing people: A poselet-based approach to attribute classification</td><td><a href="http://ttic.uchicago.edu/~smaji/papers/attributes-iccv11.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=describing people: a poselet-based approach to attribute classification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>7808937b46acad36e43c30ae4e9f3fd57462853d</td></tr><tr><td>bfm</td><td>BFM</td><td>A 3D Face Model for Pose and Illumination Invariant Face Recognition</td><td>A 3D Face Model for Pose and Illumination Invariant Face Recognition</td><td><a href="https://pdfs.semanticscholar.org/6399/37b3a1b8bded3f7e9a40e85bd3770016cf3c.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a 3d face model for pose and illumination invariant face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>639937b3a1b8bded3f7e9a40e85bd3770016cf3c</td></tr><tr><td>bio_id</td><td>BioID Face</td><td>Robust Face Detection Using the Hausdorff Distance</td><td>Robust Face Detection Using the Hausdorff Distance</td><td><a href="http://pdfs.semanticscholar.org/4053/e3423fb70ad9140ca89351df49675197196a.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=robust face detection using the hausdorff distance&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4053e3423fb70ad9140ca89351df49675197196a</td></tr><tr><td>bjut_3d</td><td>BJUT-3D</td><td>The BJUT-3D Large-Scale Chinese Face Database</td><td>A novel face recognition method based on 3D face model</td><td><a href="https://doi.org/10.1109/ROBIO.2007.4522202" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the bjut-3d large-scale chinese face database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1ed1a49534ad8dd00f81939449f6389cfbc25321</td></tr><tr><td>bosphorus</td><td>The Bosphorus</td><td>Bosphorus Database for 3D Face Analysis</td><td>Bosphorus Database for 3D Face Analysis</td><td><a href="http://pdfs.semanticscholar.org/4254/fbba3846008f50671edc9cf70b99d7304543.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=bosphorus database for 3d face analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2acf7e58f0a526b957be2099c10aab693f795973</td></tr><tr><td>bp4d_plus</td><td>BP4D+</td><td>Multimodal Spontaneous Emotion Corpus for Human Behavior Analysis</td><td>Multimodal Spontaneous Emotion Corpus for Human Behavior Analysis</td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Zhang_Multimodal_Spontaneous_Emotion_CVPR_2016_paper.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multimodal spontaneous emotion corpus for human behavior analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>53ae38a6bb2b21b42bac4f0c4c8ed1f9fa02f9d4</td></tr><tr><td>bp4d_spontanous</td><td>BP4D-Spontanous</td><td>A high resolution spontaneous 3D dynamic facial expression database</td><td>A high-resolution spontaneous 3D dynamic facial expression database</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6553788" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a high resolution spontaneous 3d dynamic facial expression database&sort=relevance" target="_blank">[s2]</a></td><td>SUNY Binghamton</td><td>b91f54e1581fbbf60392364323d00a0cd43e493c</td></tr><tr><td>brainwash</td><td>Brainwash</td><td>Brainwash dataset</td><td>Brainwash: A Data System for Feature Engineering</td><td><a href="http://pdfs.semanticscholar.org/ae44/8015b2ff2bd3b8a5c9a3266f954f5af9ffa9.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=brainwash dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>214c966d1f9c2a4b66f4535d9a0d4078e63a5867</td></tr><tr><td>bu_3dfe</td><td>BU-3DFE</td><td>A 3D Facial Expression Database For Facial Behavior Research</td><td>A 3D facial expression database for facial behavior research</td><td><a href="http://www.cs.binghamton.edu/~lijun/Research/3DFE/Yin_FGR06_a.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a 3d facial expression database for facial behavior research&sort=relevance" target="_blank">[s2]</a></td><td>SUNY Binghamton</td><td>cc589c499dcf323fe4a143bbef0074c3e31f9b60</td></tr><tr><td>buhmap_db</td><td>BUHMAP-DB</td><td>Facial Feature Tracking and Expression Recognition for Sign Language</td><td>Facial feature tracking and expression recognition for sign language</td><td><a href="http://www.cmpe.boun.edu.tr/pilab/pilabfiles/databases/buhmap/files/ari2008facialfeaturetracking.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=facial feature tracking and expression recognition for sign language&sort=relevance" target="_blank">[s2]</a></td><td></td><td>014b8df0180f33b9fea98f34ae611c6447d761d2</td></tr><tr><td>cafe</td><td>CAFE</td><td>The Child Affective Facial Expression (CAFE) Set: Validity and reliability from untrained adults</td><td>The Child Affective Facial Expression (CAFE) set: validity and reliability from untrained adults</td><td><a href="http://pdfs.semanticscholar.org/2038/8099cc415c772926e47bcbbe554e133343d1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the child affective facial expression (cafe) set: validity and reliability from untrained adults&sort=relevance" target="_blank">[s2]</a></td><td></td><td>20388099cc415c772926e47bcbbe554e133343d1</td></tr><tr><td>caltech_10k_web_faces</td><td>Caltech 10K Web Faces</td><td>Pruning Training Sets for Learning of Object Categories</td><td>Pruning training sets for learning of object categories</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1467308" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pruning training sets for learning of object categories&sort=relevance" target="_blank">[s2]</a></td><td></td><td>636b8ffc09b1b23ff714ac8350bb35635e49fa3c</td></tr><tr><td>caltech_pedestrians</td><td>Caltech Pedestrians</td><td>Pedestrian Detection: A Benchmark</td><td>Pedestrian detection: A benchmark</td><td><a href="http://vision.ucsd.edu/~pdollar/files/papers/DollarCVPR09peds.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pedestrian detection: a benchmark&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1dc35905a1deff8bc74688f2d7e2f48fd2273275</td></tr><tr><td>caltech_pedestrians</td><td>Caltech Pedestrians</td><td>Pedestrian Detection: An Evaluation of the State of the Art</td><td>Pedestrian Detection: An Evaluation of the State of the Art</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5975165" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pedestrian detection: an evaluation of the state of the art&sort=relevance" target="_blank">[s2]</a></td><td></td><td>f72f6a45ee240cc99296a287ff725aaa7e7ebb35</td></tr><tr><td>camel</td><td>CAMEL</td><td>CAMEL Dataset for Visual and Thermal Infrared Multiple Object Detection and Tracking</td><td>Application of Object Based Classification and High Resolution Satellite Imagery for Savanna Ecosystem Analysis</td><td><a href="http://pdfs.semanticscholar.org/5801/690199c1917fa58c35c3dead177c0b8f9f2d.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=camel dataset for visual and thermal infrared multiple object detection and tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>5801690199c1917fa58c35c3dead177c0b8f9f2d</td></tr><tr><td>cas_peal</td><td>CAS-PEAL</td><td>The CAS-PEAL Large-Scale Chinese Face Database and Baseline Evaluations</td><td>The CAS-PEAL Large-Scale Chinese Face Database and Baseline Evaluations</td><td><a href="https://doi.org/10.1109/TSMCA.2007.909557" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the cas-peal large-scale chinese face database and baseline evaluations&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2485c98aa44131d1a2f7d1355b1e372f2bb148ad</td></tr><tr><td>casablanca</td><td>Casablanca</td><td>Context-aware {CNNs} for person head detection</td><td>Context-Aware CNNs for Person Head Detection</td><td><a href="http://arxiv.org/pdf/1511.07917v1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=context-aware {cnns} for person head detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td></tr><tr><td>casia_webface</td><td>CASIA Webface</td><td>Learning Face Representation from Scratch</td><td>Learning Face Representation from Scratch</td><td><a href="http://pdfs.semanticscholar.org/b8a2/0ed7e74325da76d7183d1ab77b082a92b447.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning face representation from scratch&sort=relevance" target="_blank">[s2]</a></td><td></td><td>853bd61bc48a431b9b1c7cab10c603830c488e39</td></tr><tr><td>celeba</td><td>CelebA</td><td>Deep Learning Face Attributes in the Wild</td><td>Deep Learning Face Attributes in the Wild</td><td><a href="http://arxiv.org/pdf/1411.7766v2.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=deep learning face attributes in the wild&sort=relevance" target="_blank">[s2]</a></td><td>Chinese University of Hong Kong</td><td>6424b69f3ff4d35249c0bb7ef912fbc2c86f4ff4</td></tr><tr><td>celeba_plus</td><td>CelebFaces+</td><td>Deep Learning Face Representation from Predicting 10,000 Classes</td><td>Learning Deep Representation for Imbalanced Classification</td><td><a href="http://personal.ie.cuhk.edu.hk/~ccloy/files/cvpr_2016_imbalanced.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=deep learning face representation from predicting 10,000 classes&sort=relevance" target="_blank">[s2]</a></td><td>Shenzhen Institutes of Advanced Technology</td><td>69a68f9cf874c69e2232f47808016c2736b90c35</td></tr><tr><td>cfd</td><td>CFD</td><td>The Chicago face database: A free stimulus set of faces and norming data</td><td>The Chicago face database: A free stimulus set of faces and norming data.</td><td><a href="http://pdfs.semanticscholar.org/4df3/143922bcdf7db78eb91e6b5359d6ada004d2.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the chicago face database: a free stimulus set of faces and norming data&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4df3143922bcdf7db78eb91e6b5359d6ada004d2</td></tr><tr><td>chalearn</td><td>ChaLearn</td><td>ChaLearn Looking at People: A Review of Events and Resources</td><td>ChaLearn looking at people: A review of events and resources</td><td><a href="https://arxiv.org/pdf/1701.02664.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=chalearn looking at people: a review of events and resources&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8d5998cd984e7cce307da7d46f155f9db99c6590</td></tr><tr><td>chokepoint</td><td>ChokePoint</td><td>Patch-based Probabilistic Image Quality Assessment for Face Selection and Improved Video-based Face Recognition</td><td>Patch-based probabilistic image quality assessment for face selection and improved video-based face recognition</td><td><a href="http://conradsanderson.id.au/pdfs/wong_face_selection_cvpr_biometrics_2011.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=patch-based probabilistic image quality assessment for face selection and improved video-based face recognition&sort=relevance" target="_blank">[s2]</a></td><td>University of Queensland</td><td>0486214fb58ee9a04edfe7d6a74c6d0f661a7668</td></tr><tr><td>cityscapes</td><td>Cityscapes</td><td>The Cityscapes Dataset for Semantic Urban Scene Understanding</td><td>The Cityscapes Dataset for Semantic Urban Scene Understanding</td><td><a href="https://arxiv.org/pdf/1604.01685.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the cityscapes dataset for semantic urban scene understanding&sort=relevance" target="_blank">[s2]</a></td><td></td><td>32cde90437ab5a70cf003ea36f66f2de0e24b3ab</td></tr><tr><td>cityscapes</td><td>Cityscapes</td><td>The Cityscapes Dataset</td><td>The Cityscapes Dataset for Semantic Urban Scene Understanding</td><td><a href="https://arxiv.org/pdf/1604.01685.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the cityscapes dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>32cde90437ab5a70cf003ea36f66f2de0e24b3ab</td></tr><tr><td>clothing_co_parsing</td><td>CCP</td><td>Clothing Co-Parsing by Joint Image Segmentation and Labeling</td><td>Clothing Co-parsing by Joint Image Segmentation and Labeling</td><td><a href="https://arxiv.org/pdf/1502.00739v1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clothing co-parsing by joint image segmentation and labeling&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2bf8541199728262f78d4dced6fb91479b39b738</td></tr><tr><td>cmdp</td><td>CMDP</td><td>Distance Estimation of an Unknown Person from a Portrait</td><td>Distance Estimation of an Unknown Person from a Portrait</td><td><a href="http://pdfs.semanticscholar.org/56ae/6d94fc6097ec4ca861f0daa87941d1c10b70.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=distance estimation of an unknown person from a portrait&sort=relevance" target="_blank">[s2]</a></td><td>California Institute of Technology</td><td>56ae6d94fc6097ec4ca861f0daa87941d1c10b70</td></tr><tr><td>cmu_pie</td><td>CMU PIE</td><td>The CMU Pose, Illumination, and Expression Database</td><td>The CMU Pose, Illumination, and Expression (PIE) Database</td><td><a href="http://pdfs.semanticscholar.org/4d42/3acc78273b75134e2afd1777ba6d3a398973.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the cmu pose, illumination, and expression database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4d423acc78273b75134e2afd1777ba6d3a398973</td></tr><tr><td>coco</td><td>COCO</td><td>Microsoft COCO: Common Objects in Context</td><td>Microsoft COCO Captions: Data Collection and Evaluation Server</td><td><a href="http://pdfs.semanticscholar.org/ba95/81c33a7eebe87c50e61763e4c8d1723538f2.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=microsoft coco: common objects in context&sort=relevance" target="_blank">[s2]</a></td><td></td><td>696ca58d93f6404fea0fc75c62d1d7b378f47628</td></tr><tr><td>coco_action</td><td>COCO-a</td><td>Describing Common Human Visual Actions in Images</td><td>Describing Common Human Visual Actions in Images</td><td><a href="http://pdfs.semanticscholar.org/b38d/cf5fa5174c0d718d65cc4f3889b03c4a21df.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=describing common human visual actions in images&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4946ba10a4d5a7d0a38372f23e6622bd347ae273</td></tr><tr><td>coco_qa</td><td>COCO QA</td><td>Exploring Models and Data for Image Question Answering</td><td>Exploring Models and Data for Image Question Answering</td><td><a href="http://pdfs.semanticscholar.org/aa79/9c29c0d44ece1864467af520fe70540c069b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=exploring models and data for image question answering&sort=relevance" target="_blank">[s2]</a></td><td></td><td>35b0331dfcd2897abd5749b49ff5e2b8ba0f7a62</td></tr><tr><td>cofw</td><td>COFW</td><td>Robust face landmark estimation under occlusion</td><td>Robust Face Landmark Estimation under Occlusion</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6751298" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=robust face landmark estimation under occlusion&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2724ba85ec4a66de18da33925e537f3902f21249</td></tr><tr><td>cohn_kanade</td><td>CK</td><td>Comprehensive Database for Facial Expression Analysis</td><td>Comprehensive Database for Facial Expression Analysis</td><td><a href="http://pdfs.semanticscholar.org/23fc/83c8cfff14a16df7ca497661264fc54ed746.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=comprehensive database for facial expression analysis&sort=relevance" target="_blank">[s2]</a></td><td>Carnegie Mellon University</td><td>23fc83c8cfff14a16df7ca497661264fc54ed746</td></tr><tr><td>cohn_kanade_plus</td><td>CK+</td><td>The Extended Cohn-Kanade Dataset (CK+): A complete dataset for action unit and emotion-specified expression</td><td>The Extended Cohn-Kanade Dataset (CK+): A complete dataset for action unit and emotion-specified expression</td><td><a href="http://www.iainm.com/iainm/Publications_files/2010_The%20Extended.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the extended cohn-kanade dataset (ck+): a complete dataset for action unit and emotion-specified expression&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4d9a02d080636e9666c4d1cc438b9893391ec6c7</td></tr><tr><td>columbia_gaze</td><td>Columbia Gaze</td><td>Gaze Locking: Passive Eye Contact Detection for Human–Object Interaction</td><td>A 3D Morphable Eye Region Model for Gaze Estimation</td><td><a href="https://pdfs.semanticscholar.org/0d43/3b9435b874a1eea6d7999e86986c910fa285.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=gaze locking: passive eye contact detection for human–object interaction&sort=relevance" target="_blank">[s2]</a></td><td>Carnegie Mellon University</td><td>c34532fe6bfbd1e6df477c9ffdbb043b77e7804d</td></tr><tr><td>complex_activities</td><td>Ongoing Complex Activities</td><td>Recognition of Ongoing Complex Activities by Sequence Prediction over a Hierarchical Label Space</td><td>Recognition of ongoing complex activities by sequence prediction over a hierarchical label space</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/WACV.2016.7477586" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=recognition of ongoing complex activities by sequence prediction over a hierarchical label space&sort=relevance" target="_blank">[s2]</a></td><td></td><td>65355cbb581a219bd7461d48b3afd115263ea760</td></tr><tr><td>cuhk01</td><td>CUHK01</td><td>Human Reidentification with Transferred Metric Learning</td><td>Human Reidentification with Transferred Metric Learning</td><td><a href="http://pdfs.semanticscholar.org/4448/4d2866f222bbb9b6b0870890f9eea1ffb2d0.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=human reidentification with transferred metric learning&sort=relevance" target="_blank">[s2]</a></td><td></td><td>44484d2866f222bbb9b6b0870890f9eea1ffb2d0</td></tr><tr><td>cuhk02</td><td>CUHK02</td><td>Locally Aligned Feature Transforms across Views</td><td>Locally Aligned Feature Transforms across Views</td><td><a href="http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_CVPR2013/data/Papers/4989d594.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=locally aligned feature transforms across views&sort=relevance" target="_blank">[s2]</a></td><td></td><td>38b55d95189c5e69cf4ab45098a48fba407609b4</td></tr><tr><td>cuhk03</td><td>CUHK03</td><td>DeepReID: Deep Filter Pairing Neural Network for Person Re-identification</td><td>DeepReID: Deep Filter Pairing Neural Network for Person Re-identification</td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Li_DeepReID_Deep_Filter_2014_CVPR_paper.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=deepreid: deep filter pairing neural network for person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6bd36e9fd0ef20a3074e1430a6cc601e6d407fc3</td></tr><tr><td>cvc_01_barcelona</td><td>CVC-01</td><td>Adaptive Image Sampling and Windows Classification for On-board Pedestrian Detection</td><td>Adaptive Image Sampling and Windows Classification for On – board Pedestrian Detection</td><td><a href="http://pdfs.semanticscholar.org/57fe/081950f21ca03b5b375ae3e84b399c015861.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=adaptive image sampling and windows classification for on-board pedestrian detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>57fe081950f21ca03b5b375ae3e84b399c015861</td></tr><tr><td>czech_news_agency</td><td>UFI</td><td>Unconstrained Facial Images: Database for Face Recognition under Real-world Conditions</td><td>Unconstrained Facial Images: Database for Face Recognition Under Real-World Conditions</td><td><a href="http://pdfs.semanticscholar.org/4b41/06614c1d553365bad75d7866bff0de6056ed.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=unconstrained facial images: database for face recognition under real-world conditions&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4b4106614c1d553365bad75d7866bff0de6056ed</td></tr><tr><td>d3dfacs</td><td>D3DFACS</td><td>A FACS Valid 3D Dynamic Action Unit database with Applications to 3D Dynamic Morphable Facial Modelling</td><td>A FACS valid 3D dynamic action unit database with applications to 3D dynamic morphable facial modeling</td><td><a href="http://www.cs.bath.ac.uk/~dpc/D3DFACS/ICCV_final_2011.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a facs valid 3d dynamic action unit database with applications to 3d dynamic morphable facial modelling&sort=relevance" target="_blank">[s2]</a></td><td>Jacobs University</td><td>070de852bc6eb275d7ca3a9cdde8f6be8795d1a3</td></tr><tr><td>dartmouth_children</td><td>Dartmouth Children</td><td>The Dartmouth Database of Children's Faces: Acquisition and validation of a new face stimulus set</td><td>The Dartmouth Database of Children’s Faces: Acquisition and Validation of a New Face Stimulus Set</td><td><a href="http://pdfs.semanticscholar.org/4e6e/e936eb50dd032f7138702fa39b7c18ee8907.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the dartmouth database of children's faces: acquisition and validation of a new face stimulus set&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4e6ee936eb50dd032f7138702fa39b7c18ee8907</td></tr><tr><td>data_61</td><td>Data61 Pedestrian</td><td>A Multi-Modal Graphical Model for Scene Analysis</td><td>A Multi-modal Graphical Model for Scene Analysis</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/WACV.2015.139" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a multi-modal graphical model for scene analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>563c940054e4b456661762c1ab858e6f730c3159</td></tr><tr><td>deep_fashion</td><td>DeepFashion</td><td>DeepFashion: Powering Robust Clothes Recognition and Retrieval with Rich Annotations</td><td>Fashion Landmark Detection in the Wild</td><td><a href="http://pdfs.semanticscholar.org/d8ca/e259c1c5bba0c096f480dc7322bbaebfac1a.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=deepfashion: powering robust clothes recognition and retrieval with rich annotations&sort=relevance" target="_blank">[s2]</a></td><td>Chinese University of Hong Kong</td><td>4fefd1bc8dc4e0ab37ee3324ddfa43ad9d6a04a7</td></tr><tr><td>deep_fashion</td><td>DeepFashion</td><td>Fashion Landmark Detection in the Wild</td><td>Fashion Landmark Detection in the Wild</td><td><a href="http://pdfs.semanticscholar.org/d8ca/e259c1c5bba0c096f480dc7322bbaebfac1a.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=fashion landmark detection in the wild&sort=relevance" target="_blank">[s2]</a></td><td>Chinese University of Hong Kong</td><td>4fefd1bc8dc4e0ab37ee3324ddfa43ad9d6a04a7</td></tr><tr><td>disfa</td><td>DISFA</td><td>DISFA: A Spontaneous Facial Action Intensity Database</td><td>Extended DISFA Dataset: Investigating Posed and Spontaneous Facial Expressions</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7789672" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=disfa: a spontaneous facial action intensity database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a5acda0e8c0937bfed013e6382da127103e41395</td></tr><tr><td>distance_nighttime</td><td>Long Distance Heterogeneous Face</td><td>Nighttime Face Recognition at Long Distance: Cross-distance and Cross-spectral Matching</td><td>Nighttime Face Recognition at Long Distance: Cross-Distance and Cross-Spectral Matching</td><td><a href="http://pdfs.semanticscholar.org/4156/b7e88f2e0ab0a7c095b9bab199ae2b23bd06.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=nighttime face recognition at long distance: cross-distance and cross-spectral matching&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4156b7e88f2e0ab0a7c095b9bab199ae2b23bd06</td></tr><tr><td>duke_mtmc</td><td>Duke MTMC</td><td>Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</td><td>Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</td><td><a href="http://pdfs.semanticscholar.org/b5f2/4f49f9a5e47d6601399dc068158ad88d7651.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=performance measures and a data set for multi-target, multi-camera tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>27a2fad58dd8727e280f97036e0d2bc55ef5424c</td></tr><tr><td>emotio_net</td><td>EmotioNet Database</td><td>EmotioNet: An Accurate, Real-Time Algorithm for the Automatic Annotation of a Million Facial Expressions in the Wild</td><td>EmotioNet: An Accurate, Real-Time Algorithm for the Automatic Annotation of a Million Facial Expressions in the Wild</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7780969" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=emotionet: an accurate, real-time algorithm for the automatic annotation of a million facial expressions in the wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>c900e0ad4c95948baaf0acd8449fde26f9b4952a</td></tr><tr><td>eth_andreas_ess</td><td>ETHZ Pedestrian</td><td>Depth and Appearance for Mobile Scene Analysis</td><td>Depth and Appearance for Mobile Scene Analysis</td><td><a href="http://www.mmp.rwth-aachen.de/publications/pdf/ess-depthandappearance-iccv07.pdf/at_download/file" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=depth and appearance for mobile scene analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>13f06b08f371ba8b5d31c3e288b4deb61335b462</td></tr><tr><td>europersons</td><td>EuroCity Persons</td><td>The EuroCity Persons Dataset: A Novel Benchmark for Object Detection</td><td>The EuroCity Persons Dataset: A Novel Benchmark for Object Detection</td><td><a href="https://arxiv.org/pdf/1805.07193.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the eurocity persons dataset: a novel benchmark for object detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>f0e17f27f029db4ad650ff278fe3c10ecb6cb0c4</td></tr><tr><td>expw</td><td>ExpW</td><td>Learning Social Relation Traits from Face Images</td><td>From Facial Expression Recognition to Interpersonal Relation Prediction</td><td><a href="http://arxiv.org/abs/1609.06426" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning social relation traits from face images&sort=relevance" target="_blank">[s2]</a></td><td></td><td>22f656d0f8426c84a33a267977f511f127bfd7f3</td></tr><tr><td>expw</td><td>ExpW</td><td>From Facial Expression Recognition to Interpersonal Relation Prediction</td><td>From Facial Expression Recognition to Interpersonal Relation Prediction</td><td><a href="http://arxiv.org/abs/1609.06426" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=from facial expression recognition to interpersonal relation prediction&sort=relevance" target="_blank">[s2]</a></td><td></td><td>22f656d0f8426c84a33a267977f511f127bfd7f3</td></tr><tr><td>face_research_lab</td><td>Face Research Lab London</td><td>Face Research Lab London Set. figshare</td><td>Anxiety promotes memory for mood-congruent faces but does not alter loss aversion.</td><td><a href="http://pdfs.semanticscholar.org/c652/6dd3060d63a6c90e8b7ff340383c4e0e0dd8.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=face research lab london set. figshare&sort=relevance" target="_blank">[s2]</a></td><td>University College London</td><td>c6526dd3060d63a6c90e8b7ff340383c4e0e0dd8</td></tr><tr><td>face_scrub</td><td>FaceScrub</td><td>A data-driven approach to cleaning large face datasets</td><td>A data-driven approach to cleaning large face datasets</td><td><a href="https://doi.org/10.1109/ICIP.2014.7025068" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a data-driven approach to cleaning large face datasets&sort=relevance" target="_blank">[s2]</a></td><td>University of Illinois, Urbana-Champaign</td><td>0d3bb75852098b25d90f31d2f48fd0cb4944702b</td></tr><tr><td>face_tracer</td><td>FaceTracer</td><td>FaceTracer: A Search Engine for Large Collections of Images with Faces</td><td>Face swapping: automatically replacing faces in photographs</td><td><a href="https://classes.cs.uoregon.edu/16F/cis607photo/faces.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=facetracer: a search engine for large collections of images with faces&sort=relevance" target="_blank">[s2]</a></td><td>Columbia University</td><td>670637d0303a863c1548d5b19f705860a23e285c</td></tr><tr><td>face_tracer</td><td>FaceTracer</td><td>Face Swapping: Automatically Replacing Faces in Photographs</td><td>Face swapping: automatically replacing faces in photographs</td><td><a href="https://classes.cs.uoregon.edu/16F/cis607photo/faces.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=face swapping: automatically replacing faces in photographs&sort=relevance" target="_blank">[s2]</a></td><td>Columbia University</td><td>670637d0303a863c1548d5b19f705860a23e285c</td></tr><tr><td>facebook_100</td><td>Facebook100</td><td>Scaling Up Biologically-Inspired Computer Vision: A Case Study in Unconstrained Face Recognition on Facebook</td><td>Scaling up biologically-inspired computer vision: A case study in unconstrained face recognition on facebook</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5981788" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=scaling up biologically-inspired computer vision: a case study in unconstrained face recognition on facebook&sort=relevance" target="_blank">[s2]</a></td><td>Harvard University</td><td>9c23859ec7313f2e756a3e85575735e0c52249f4</td></tr><tr><td>faceplace</td><td>Face Place</td><td>Recognizing disguised faces</td><td>Recognizing disguised faces</td><td><a href="http://pdfs.semanticscholar.org/d936/7ceb0be378c3a9ddf7cb741c678c1a3c574c.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=recognizing disguised faces&sort=relevance" target="_blank">[s2]</a></td><td></td><td>25474c21613607f6bb7687a281d5f9d4ffa1f9f3</td></tr><tr><td>families_in_the_wild</td><td>FIW</td><td>Visual Kinship Recognition of Families in the Wild</td><td>Visual Kinship Recognition of Families in the Wild</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8337841" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=visual kinship recognition of families in the wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>dd65f71dac86e36eecbd3ed225d016c3336b4a13</td></tr><tr><td>fddb</td><td>FDDB</td><td>FDDB: A Benchmark for Face Detection in Unconstrained Settings</td><td>A Benchmark for Face Detection in Unconstrained Settings</td><td><a href="http://pdfs.semanticscholar.org/75da/1df4ed319926c544eefe17ec8d720feef8c0.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=fddb: a benchmark for face detection in unconstrained settings&sort=relevance" target="_blank">[s2]</a></td><td>University of Massachusetts</td><td>75da1df4ed319926c544eefe17ec8d720feef8c0</td></tr><tr><td>fei</td><td>FEI</td><td>Captura e Alinhamento de Imagens: Um Banco de Faces Brasileiro</td><td>Generalização cartográfica automatizada para um banco de dados cadastral</td><td><a href="https://pdfs.semanticscholar.org/b6b1/b0632eb9d4ab1427278f5e5c46f97753c73d.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=captura e alinhamento de imagens: um banco de faces brasileiro&sort=relevance" target="_blank">[s2]</a></td><td></td><td>b6b1b0632eb9d4ab1427278f5e5c46f97753c73d</td></tr><tr><td>feret</td><td>FERET</td><td>The FERET Verification Testing Protocol for Face Recognition Algorithms</td><td>The FERET Verification Testing Protocol for Face Recognition Algorithms</td><td><a href="http://pdfs.semanticscholar.org/0c4a/139bb87c6743c7905b29a3cfec27a5130652.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the feret verification testing protocol for face recognition algorithms&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0c4a139bb87c6743c7905b29a3cfec27a5130652</td></tr><tr><td>feret</td><td>FERET</td><td>The FERET database and evaluation procedure for face-recognition algorithms</td><td>The FERET database and evaluation procedure for face-recognition algorithms</td><td><a href="http://pdfs.semanticscholar.org/dc8b/25e35a3acb812beb499844734081722319b4.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the feret database and evaluation procedure for face-recognition algorithms&sort=relevance" target="_blank">[s2]</a></td><td></td><td>dc8b25e35a3acb812beb499844734081722319b4</td></tr><tr><td>feret</td><td>FERET</td><td>FERET ( Face Recognition Technology ) Recognition Algorithm Development and Test Results</td><td>FERET ( Face Recognition Technology ) Recognition Algorithm Development and Test Results</td><td><a href="http://pdfs.semanticscholar.org/31de/9b3dd6106ce6eec9a35991b2b9083395fd0b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=feret ( face recognition technology ) recognition algorithm development and test results&sort=relevance" target="_blank">[s2]</a></td><td></td><td>31de9b3dd6106ce6eec9a35991b2b9083395fd0b</td></tr><tr><td>feret</td><td>FERET</td><td>The FERET Evaluation Methodology for Face-Recognition Algorithms</td><td>The FERET Evaluation Methodology for Face-Recognition Algorithms</td><td><a href="http://pdfs.semanticscholar.org/0f0f/cf041559703998abf310e56f8a2f90ee6f21.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the feret evaluation methodology for face-recognition algorithms&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0f0fcf041559703998abf310e56f8a2f90ee6f21</td></tr><tr><td>ferplus</td><td>FER+</td><td>Training Deep Networks for Facial Expression Recognition with Crowd-Sourced Label Distribution</td><td>Training deep networks for facial expression recognition with crowd-sourced label distribution</td><td><a href="http://arxiv.org/pdf/1608.01041v1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=training deep networks for facial expression recognition with crowd-sourced label distribution&sort=relevance" target="_blank">[s2]</a></td><td></td><td>298cbc3dfbbb3a20af4eed97906650a4ea1c29e0</td></tr><tr><td>fia</td><td>CMU FiA</td><td>The CMU Face In Action (FIA) Database</td><td>The CMU Face In Action (FIA) Database</td><td><a href="http://pdfs.semanticscholar.org/bb47/a03401811f9d2ca2da12138697acbc7b97a3.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the cmu face in action (fia) database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>47662d1a368daf70ba70ef2d59eb6209f98b675d</td></tr><tr><td>fiw_300</td><td>300-W</td><td>300 faces In-the-wild challenge: Database and results</td><td>A Semi-automatic Methodology for Facial Landmark Annotation</td><td><a href="http://ibug.doc.ic.ac.uk/media/uploads/documents/sagonas_cvpr_2013_amfg_w.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=300 faces in-the-wild challenge: database and results&sort=relevance" target="_blank">[s2]</a></td><td>University of Twente</td><td>013909077ad843eb6df7a3e8e290cfd5575999d2</td></tr><tr><td>fiw_300</td><td>300-W</td><td>300 Faces in-the-Wild Challenge: The first facial landmark localization Challenge</td><td>A Semi-automatic Methodology for Facial Landmark Annotation</td><td><a href="http://ibug.doc.ic.ac.uk/media/uploads/documents/sagonas_cvpr_2013_amfg_w.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=300 faces in-the-wild challenge: the first facial landmark localization challenge&sort=relevance" target="_blank">[s2]</a></td><td>University of Twente</td><td>013909077ad843eb6df7a3e8e290cfd5575999d2</td></tr><tr><td>fiw_300</td><td>300-W</td><td>A semi-automatic methodology for facial landmark annotation</td><td>A Semi-automatic Methodology for Facial Landmark Annotation</td><td><a href="http://ibug.doc.ic.ac.uk/media/uploads/documents/sagonas_cvpr_2013_amfg_w.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a semi-automatic methodology for facial landmark annotation&sort=relevance" target="_blank">[s2]</a></td><td>University of Twente</td><td>013909077ad843eb6df7a3e8e290cfd5575999d2</td></tr><tr><td>frav3d</td><td>FRAV3D</td><td>MULTIMODAL 2D, 2.5D & 3D FACE VERIFICATION</td><td>2D and 3D face recognition: A survey</td><td><a href="http://pdfs.semanticscholar.org/2f5d/44dc3e1b5955942133ff872ebd31716ec604.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multimodal 2d, 2.5d & 3d face verification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2f5d44dc3e1b5955942133ff872ebd31716ec604</td></tr><tr><td>frgc</td><td>FRGC</td><td>Overview of the Face Recognition Grand Challenge</td><td>Overview of the face recognition grand challenge</td><td><a href="http://www3.nd.edu/~kwb/PhillipsEtAlCVPR_2005.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=overview of the face recognition grand challenge&sort=relevance" target="_blank">[s2]</a></td><td></td><td>18ae7c9a4bbc832b8b14bc4122070d7939f5e00e</td></tr><tr><td>gallagher</td><td>Gallagher</td><td>Clothing Cosegmentation for Recognizing People</td><td>Clothing Cosegmentation for Shopping Images With Cluttered Background</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7423747" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clothing cosegmentation for recognizing people&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6dbe8e5121c534339d6e41f8683e85f87e6abf81</td></tr><tr><td>gavab_db</td><td>Gavab</td><td>GavabDB: a 3D face database</td><td>Expression invariant 3D face recognition with a Morphable Model</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4813376" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=gavabdb: a 3d face database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>42505464808dfb446f521fc6ff2cfeffd4d68ff1</td></tr><tr><td>geofaces</td><td>GeoFaces</td><td>GeoFaceExplorer: Exploring the Geo-Dependence of Facial Attributes</td><td>GeoFaceExplorer: exploring the geo-dependence of facial attributes</td><td><a href="http://doi.acm.org/10.1145/2676440.2676443" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=geofaceexplorer: exploring the geo-dependence of facial attributes&sort=relevance" target="_blank">[s2]</a></td><td>University of Kentucky</td><td>17b46e2dad927836c689d6787ddb3387c6159ece</td></tr><tr><td>georgia_tech_face_database</td><td>Georgia Tech Face</td><td>Maximum likelihood training of the embedded HMM for face detection and recognition</td><td>MAXIMUM LIKELIHOOD TRAINING OF THE EMBEDDED HMM FOR FACE DETECTION AND RECOGNITION Ara V. Ne an and Monson H. Hayes III Center for Signal and Image Processing School of Electrical and Computer Engineering</td><td><a href="http://pdfs.semanticscholar.org/3dc3/f0b64ef80f573e3a5f96e456e52ee980b877.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=maximum likelihood training of the embedded hmm for face detection and recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3dc3f0b64ef80f573e3a5f96e456e52ee980b877</td></tr><tr><td>graz</td><td>Graz Pedestrian</td><td>Generic Object Recognition with Boosting</td><td>Object recognition using segmentation for feature detection</td><td><a href="http://www.emt.tugraz.at/~tracking/Publications/fussenegger2004b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=generic object recognition with boosting&sort=relevance" target="_blank">[s2]</a></td><td></td><td>12ad3b5bbbf407f8e54ea692c07633d1a867c566</td></tr><tr><td>graz</td><td>Graz Pedestrian</td><td>Weak Hypotheses and Boosting for Generic Object Detection and Recognition</td><td>Object recognition using segmentation for feature detection</td><td><a href="http://www.emt.tugraz.at/~tracking/Publications/fussenegger2004b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=weak hypotheses and boosting for generic object detection and recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>12ad3b5bbbf407f8e54ea692c07633d1a867c566</td></tr><tr><td>graz</td><td>Graz Pedestrian</td><td>Object Recognition Using Segmentation for Feature Detection</td><td>Object recognition using segmentation for feature detection</td><td><a href="http://www.emt.tugraz.at/~tracking/Publications/fussenegger2004b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=object recognition using segmentation for feature detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>12ad3b5bbbf407f8e54ea692c07633d1a867c566</td></tr><tr><td>h3d</td><td>H3D</td><td>Poselets: Body Part Detectors Trained Using 3D Human Pose Annotations</td><td>Poselets: Body part detectors trained using 3D human pose annotations</td><td><a href="http://vision.stanford.edu/teaching/cs231b_spring1213/papers/ICCV09_BourdevMalik.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=poselets: body part detectors trained using 3d human pose annotations&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2830fb5282de23d7784b4b4bc37065d27839a412</td></tr><tr><td>hda_plus</td><td>HDA+</td><td>The HDA+ data set for research on fully automated re-identification systems</td><td>HDA dataset-DRAFT 1 A Multi-camera video data set for research on High-Definition surveillance</td><td><a href="http://pdfs.semanticscholar.org/bd88/bb2e4f351352d88ee7375af834360e223498.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the hda+ data set for research on fully automated re-identification systems&sort=relevance" target="_blank">[s2]</a></td><td></td><td>bd88bb2e4f351352d88ee7375af834360e223498</td></tr><tr><td>hda_plus</td><td>HDA+</td><td>A Multi-camera video data set for research on High-Definition surveillance</td><td>HDA dataset-DRAFT 1 A Multi-camera video data set for research on High-Definition surveillance</td><td><a href="http://pdfs.semanticscholar.org/bd88/bb2e4f351352d88ee7375af834360e223498.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a multi-camera video data set for research on high-definition surveillance&sort=relevance" target="_blank">[s2]</a></td><td></td><td>bd88bb2e4f351352d88ee7375af834360e223498</td></tr><tr><td>helen</td><td>Helen</td><td>Interactive Facial Feature Localization</td><td>Interactive Facial Feature Localization</td><td><a href="http://pdfs.semanticscholar.org/95f1/2d27c3b4914e0668a268360948bce92f7db3.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=interactive facial feature localization&sort=relevance" target="_blank">[s2]</a></td><td>University of Illinois, Urbana-Champaign</td><td>95f12d27c3b4914e0668a268360948bce92f7db3</td></tr><tr><td>hi4d_adsip</td><td>Hi4D-ADSIP</td><td>Hi4D-ADSIP 3-D dynamic facial articulation database</td><td>High-resolution comprehensive 3-D dynamic database for facial articulation analysis</td><td><a href="http://www.researchgate.net/profile/Wei_Quan3/publication/221430048_High-resolution_comprehensive_3-D_dynamic_database_for_facial_articulation_analysis/links/0deec534309495805d000000.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=hi4d-adsip 3-d dynamic facial articulation database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>24830e3979d4ed01b9fd0feebf4a8fd22e0c35fd</td></tr><tr><td>hipsterwars</td><td>Hipsterwars</td><td>Hipster Wars: Discovering Elements of Fashion Styles</td><td>Hipster Wars: Discovering Elements of Fashion Styles</td><td><a href="http://pdfs.semanticscholar.org/04c2/cda00e5536f4b1508cbd80041e9552880e67.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=hipster wars: discovering elements of fashion styles&sort=relevance" target="_blank">[s2]</a></td><td>Tohoku University</td><td>04c2cda00e5536f4b1508cbd80041e9552880e67</td></tr><tr><td>hollywood_headset</td><td>HollywoodHeads</td><td>Context-aware CNNs for person head detection</td><td>Context-Aware CNNs for Person Head Detection</td><td><a href="http://arxiv.org/pdf/1511.07917v1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=context-aware cnns for person head detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td></tr><tr><td>hrt_transgender</td><td>HRT Transgender</td><td>Is the Eye Region More Reliable Than the Face? A Preliminary Study of Face-based Recognition on a Transgender Dataset</td><td>Face recognition: A literature survey</td><td><a href="http://pdfs.semanticscholar.org/2831/2c3a47c1be3a67365700744d3d6665b86f22.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=is the eye region more reliable than the face? a preliminary study of face-based recognition on a transgender dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>28312c3a47c1be3a67365700744d3d6665b86f22</td></tr><tr><td>hrt_transgender</td><td>HRT Transgender</td><td>Investigating the Periocular-Based Face Recognition Across Gender Transformation</td><td>Face recognition: A literature survey</td><td><a href="http://pdfs.semanticscholar.org/2831/2c3a47c1be3a67365700744d3d6665b86f22.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=investigating the periocular-based face recognition across gender transformation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>28312c3a47c1be3a67365700744d3d6665b86f22</td></tr><tr><td>hrt_transgender</td><td>HRT Transgender</td><td>Face recognition across gender transformation using SVM Classifier</td><td>Face recognition: A literature survey</td><td><a href="http://pdfs.semanticscholar.org/2831/2c3a47c1be3a67365700744d3d6665b86f22.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=face recognition across gender transformation using svm classifier&sort=relevance" target="_blank">[s2]</a></td><td></td><td>28312c3a47c1be3a67365700744d3d6665b86f22</td></tr><tr><td>ifad</td><td>IFAD</td><td>Indian Face Age Database: A Database for Face Recognition with Age Variation</td><td>Indian Face Age Database : A Database for Face Recognition with Age Variation</td><td><a href="https://pdfs.semanticscholar.org/55c4/0cbcf49a0225e72d911d762c27bb1c2d14aa.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=indian face age database: a database for face recognition with age variation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>55c40cbcf49a0225e72d911d762c27bb1c2d14aa</td></tr><tr><td>ifdb</td><td>IFDB</td><td>Iranian Face Database with age, pose and expression</td><td>Iranian Face Database and Evaluation with a New Detection Algorithm</td><td><a href="http://pdfs.semanticscholar.org/066d/71fcd997033dce4ca58df924397dfe0b5fd1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=iranian face database with age, pose and expression&sort=relevance" target="_blank">[s2]</a></td><td></td><td>066d71fcd997033dce4ca58df924397dfe0b5fd1</td></tr><tr><td>ifdb</td><td>IFDB</td><td>Iranian Face Database and Evaluation with a New Detection Algorithm</td><td>Iranian Face Database and Evaluation with a New Detection Algorithm</td><td><a href="http://pdfs.semanticscholar.org/066d/71fcd997033dce4ca58df924397dfe0b5fd1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=iranian face database and evaluation with a new detection algorithm&sort=relevance" target="_blank">[s2]</a></td><td></td><td>066d71fcd997033dce4ca58df924397dfe0b5fd1</td></tr><tr><td>iit_dehli_ear</td><td>IIT Dehli Ear</td><td>Automated human identification using ear imaging</td><td>Automated Human Identification Using Ear Imaging</td><td><a href="https://pdfs.semanticscholar.org/faf4/0ce28857aedf183e193486f5b4b0a8c478a2.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=automated human identification using ear imaging&sort=relevance" target="_blank">[s2]</a></td><td></td><td>faf40ce28857aedf183e193486f5b4b0a8c478a2</td></tr><tr><td>ijb_c</td><td>IJB-A</td><td>Pushing the Frontiers of Unconstrained Face Detection and Recognition: IARPA Janus Benchmark A</td><td>Pushing the frontiers of unconstrained face detection and recognition: IARPA Janus Benchmark A</td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/1B_089_ext.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pushing the frontiers of unconstrained face detection and recognition: iarpa janus benchmark a&sort=relevance" target="_blank">[s2]</a></td><td></td><td>140c95e53c619eac594d70f6369f518adfea12ef</td></tr><tr><td>ijb_c</td><td>IJB-B</td><td>IARPA Janus Benchmark-B Face Dataset</td><td>IARPA Janus Benchmark-B Face Dataset</td><td><a href="http://www.vislab.ucr.edu/Biometrics2017/program_slides/Noblis_CVPRW_IJBB.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=iarpa janus benchmark-b face dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0cb2dd5f178e3a297a0c33068961018659d0f443</td></tr><tr><td>ijb_c</td><td>IJB-C</td><td>IARPA Janus Benchmark C</td><td>IARPA Janus Benchmark - C: Face Dataset and Protocol</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411217" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=iarpa janus benchmark c&sort=relevance" target="_blank">[s2]</a></td><td></td><td>57178b36c21fd7f4529ac6748614bb3374714e91</td></tr><tr><td>ilids_mcts</td><td></td><td>Imagery Library for Intelligent Detection Systems:
-The i-LIDS User Guide</td><td>Imagery Library for Intelligent Detection Systems (i-LIDS); A Standard for Testing Video Based Detection Systems</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=imagery library for intelligent detection systems:
-the i-lids user guide&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0297448f3ed948e136bb06ceff10eccb34e5bb77</td></tr><tr><td>ilids_vid_reid</td><td>iLIDS-VID</td><td>Person Re-Identi cation by Video Ranking</td><td>Person Re-identification by Exploiting Spatio-Temporal Cues and Multi-view Metric Learning</td><td><a href="https://doi.org/10.1109/LSP.2016.2574323" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person re-identi cation by video ranking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>99eb4cea0d9bc9fe777a5c5172f8638a37a7f262</td></tr><tr><td>images_of_groups</td><td>Images of Groups</td><td>Understanding Groups of Images of People</td><td>Understanding images of groups of people</td><td><a href="http://amp.ece.cmu.edu/people/Andy/Andy_files/cvpr09.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=understanding groups of images of people&sort=relevance" target="_blank">[s2]</a></td><td></td><td>21d9d0deed16f0ad62a4865e9acf0686f4f15492</td></tr><tr><td>imdb_wiki</td><td>IMDB</td><td>Deep expectation of real and apparent age from a single image without facial landmarks</td><td>Deep Expectation of Real and Apparent Age from a Single Image Without Facial Landmarks</td><td><a href="https://doi.org/10.1007/s11263-016-0940-3" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=deep expectation of real and apparent age from a single image without facial landmarks&sort=relevance" target="_blank">[s2]</a></td><td></td><td>10195a163ab6348eef37213a46f60a3d87f289c5</td></tr><tr><td>imdb_wiki</td><td>IMDB</td><td>DEX: Deep EXpectation of apparent age from a single image</td><td>DEX: Deep EXpectation of Apparent Age from a Single Image</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406390" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=dex: deep expectation of apparent age from a single image&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8355d095d3534ef511a9af68a3b2893339e3f96b</td></tr><tr><td>imfdb</td><td>IMFDB</td><td>Indian Movie Face Database: A Benchmark for Face Recognition Under Wide Variations</td><td>Indian Movie Face Database: A benchmark for face recognition under wide variations</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=indian movie face database: a benchmark for face recognition under wide variations&sort=relevance" target="_blank">[s2]</a></td><td></td><td>ca3e88d87e1344d076c964ea89d91a75c417f5ee</td></tr><tr><td>imm_face</td><td>IMM Face Dataset</td><td>The IMM Face Database - An Annotated Dataset of 240 Face Images</td><td>Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</td><td><a href="http://lrs.icg.tugraz.at/pubs/koestinger_befit_11.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the imm face database - an annotated dataset of 240 face images&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td></tr><tr><td>immediacy</td><td>Immediacy</td><td>Multi-task Recurrent Neural Network for Immediacy Prediction</td><td>Multi-task Recurrent Neural Network for Immediacy Prediction</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.383" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-task recurrent neural network for immediacy prediction&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1e3df3ca8feab0b36fd293fe689f93bb2aaac591</td></tr><tr><td>imsitu</td><td>imSitu</td><td>Situation Recognition: Visual Semantic Role Labeling for Image Understanding</td><td>Situation Recognition: Visual Semantic Role Labeling for Image Understanding</td><td><a href="http://grail.cs.washington.edu/wp-content/uploads/2016/09/yatskar2016srv.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=situation recognition: visual semantic role labeling for image understanding&sort=relevance" target="_blank">[s2]</a></td><td>University of Washington</td><td>51eba481dac6b229a7490f650dff7b17ce05df73</td></tr><tr><td>inria_person</td><td>INRIA Pedestrian</td><td>Histograms of Oriented Gradients for Human Detection</td><td>Histograms of oriented gradients for human detection</td><td><a href="http://nichol.as/papers/Dalai/Histograms%20of%20oriented%20gradients%20for%20human%20detection.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=histograms of oriented gradients for human detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>10d6b12fa07c7c8d6c8c3f42c7f1c061c131d4c5</td></tr><tr><td>jaffe</td><td>JAFFE</td><td>Coding Facial Expressions with Gabor Wavelets</td><td>Coding Facial Expressions with Gabor Wavelets</td><td><a href="http://pdfs.semanticscholar.org/45c3/1cde87258414f33412b3b12fc5bec7cb3ba9.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=coding facial expressions with gabor wavelets&sort=relevance" target="_blank">[s2]</a></td><td>Kyushu University</td><td>45c31cde87258414f33412b3b12fc5bec7cb3ba9</td></tr><tr><td>jiku_mobile</td><td>Jiku Mobile Video Dataset</td><td>The Jiku Mobile Video Dataset</td><td>A Synchronization Ground Truth for the Jiku Mobile Video Dataset</td><td><a href="http://pdfs.semanticscholar.org/ad62/c6e17bc39b4dec20d32f6ac667ae42d2c118.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the jiku mobile video dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>ad62c6e17bc39b4dec20d32f6ac667ae42d2c118</td></tr><tr><td>jpl_pose</td><td>JPL-Interaction dataset</td><td>First-Person Activity Recognition: What Are They Doing to Me?</td><td>First-Person Activity Recognition: What Are They Doing to Me?</td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2013/papers/Ryoo_First-Person_Activity_Recognition_2013_CVPR_paper.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=first-person activity recognition: what are they doing to me?&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1aad2da473888cb7ebc1bfaa15bfa0f1502ce005</td></tr><tr><td>kdef</td><td>KDEF</td><td>The Karolinska Directed Emotional Faces – KDEF</td><td>Gaze fixation and the neural circuitry of face processing in autism</td><td><a href="http://doi.org/10.1038/nn1421" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the karolinska directed emotional faces – kdef&sort=relevance" target="_blank">[s2]</a></td><td></td><td>93884e46c49f7ae1c7c34046fbc28882f2bd6341</td></tr><tr><td>kin_face</td><td>UB KinFace</td><td>Genealogical Face Recognition based on UB KinFace Database</td><td>Understanding Kin Relationships in a Photo</td><td><a href="http://www1.ece.neu.edu/~yunfu/papers/Kinship-TMM.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=genealogical face recognition based on ub kinface database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7</td></tr><tr><td>kin_face</td><td>UB KinFace</td><td>Kinship Verification through Transfer Learning</td><td>Understanding Kin Relationships in a Photo</td><td><a href="http://www1.ece.neu.edu/~yunfu/papers/Kinship-TMM.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=kinship verification through transfer learning&sort=relevance" target="_blank">[s2]</a></td><td></td><td>08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7</td></tr><tr><td>kin_face</td><td>UB KinFace</td><td>Understanding Kin Relationships in a Photo</td><td>Understanding Kin Relationships in a Photo</td><td><a href="http://www1.ece.neu.edu/~yunfu/papers/Kinship-TMM.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=understanding kin relationships in a photo&sort=relevance" target="_blank">[s2]</a></td><td></td><td>08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7</td></tr><tr><td>kinectface</td><td>KinectFaceDB</td><td>KinectFaceDB: A Kinect Database for Face Recognition</td><td>KinectFaceDB: A Kinect Database for Face Recognition</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6866883" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=kinectfacedb: a kinect database for face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0b440695c822a8e35184fb2f60dcdaa8a6de84ae</td></tr><tr><td>kitti</td><td>KITTI</td><td>Vision meets Robotics: The KITTI Dataset</td><td>The Role of Machine Vision for Intelligent Vehicles</td><td><a href="http://www.path.berkeley.edu/sites/default/files/my_folder_76/Pub_03.2016_Role.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=vision meets robotics: the kitti dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>35ba4ebfd017a56b51e967105af9ae273c9b0178</td></tr><tr><td>lag</td><td>LAG</td><td>Large Age-Gap Face Verification by Feature Injection in Deep Networks</td><td>Large age-gap face verification by feature injection in deep networks</td><td><a href="http://pdfs.semanticscholar.org/0d2d/d4fc016cb6a517d8fb43a7cc3ff62964832e.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=large age-gap face verification by feature injection in deep networks&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0d2dd4fc016cb6a517d8fb43a7cc3ff62964832e</td></tr><tr><td>large_scale_person_search</td><td>Large Scale Person Search</td><td>End-to-End Deep Learning for Person Search</td><td>End-to-End Deep Learning for Person Search</td><td><a href="https://pdfs.semanticscholar.org/2161/f6b7ee3c0acc81603b01dc0df689683577b9.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=end-to-end deep learning for person search&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2161f6b7ee3c0acc81603b01dc0df689683577b9</td></tr><tr><td>leeds_sports_pose</td><td>Leeds Sports Pose</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td><a href="http://pdfs.semanticscholar.org/4b1d/23d17476fcf78f4cbadf69fb130b1aa627c0.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clustered pose and nonlinear appearance models for human pose estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td></tr><tr><td>leeds_sports_pose_extended</td><td>Leeds Sports Pose Extended</td><td>Learning Effective Human Pose Estimation from Inaccurate Annotation</td><td>Learning effective human pose estimation from inaccurate annotation</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995318" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning effective human pose estimation from inaccurate annotation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4e4746094bf60ee83e40d8597a6191e463b57f76</td></tr><tr><td>lfw</td><td>LFW</td><td>Labeled Faces in the Wild: A Survey</td><td>Labeled Faces in the Wild : A Survey</td><td><a href="http://pdfs.semanticscholar.org/7de6/e81d775e9cd7becbfd1bd685f4e2a5eebb22.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=labeled faces in the wild: a survey&sort=relevance" target="_blank">[s2]</a></td><td></td><td>7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22</td></tr><tr><td>lfw</td><td>LFW</td><td>Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments</td><td>Labeled Faces in the Wild : A Database for Studying Face Recognition in Unconstrained Environments</td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=labeled faces in the wild: a database for studying face recognition in unconstrained environments&sort=relevance" target="_blank">[s2]</a></td><td></td><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td></tr><tr><td>lfw</td><td>LFW</td><td>Labeled Faces in the Wild: Updates and New Reporting Procedures</td><td>Labeled Faces in the Wild : Updates and New Reporting Procedures</td><td><a href="http://pdfs.semanticscholar.org/2d34/82dcff69c7417c7b933f22de606a0e8e42d4.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=labeled faces in the wild: updates and new reporting procedures&sort=relevance" target="_blank">[s2]</a></td><td>University of Massachusetts</td><td>2d3482dcff69c7417c7b933f22de606a0e8e42d4</td></tr><tr><td>lfw_a</td><td>LFW-a</td><td>Effective Unconstrained Face Recognition by
- Combining Multiple Descriptors and Learned
- Background Statistics</td><td>Effective Unconstrained Face Recognition by Combining Multiple Descriptors and Learned Background Statistics</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/TPAMI.2010.230" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=effective unconstrained face recognition by
- combining multiple descriptors and learned
- background statistics&sort=relevance" target="_blank">[s2]</a></td><td></td><td>133f01aec1534604d184d56de866a4bd531dac87</td></tr><tr><td>lfw_p</td><td>LFWP</td><td>Localizing Parts of Faces Using a Consensus of Exemplars</td><td>Localizing Parts of Faces Using a Consensus of Exemplars</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2011.5995602" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=localizing parts of faces using a consensus of exemplars&sort=relevance" target="_blank">[s2]</a></td><td>Columbia University</td><td>140438a77a771a8fb656b39a78ff488066eb6b50</td></tr><tr><td>m2vts</td><td>m2vts</td><td>The M2VTS Multimodal Face Database (Release 1.00)</td><td>The CAS-PEAL Large-Scale Chinese Face Database and Baseline Evaluations</td><td><a href="https://doi.org/10.1109/TSMCA.2007.909557" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the m2vts multimodal face database (release 1.00)&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2485c98aa44131d1a2f7d1355b1e372f2bb148ad</td></tr><tr><td>m2vtsdb_extended</td><td>xm2vtsdb</td><td>XM2VTSDB: The Extended M2VTS Database</td><td>Labeled Faces in the Wild : A Database for Studying Face Recognition in Unconstrained Environments</td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=xm2vtsdb: the extended m2vts database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td></tr><tr><td>mafl</td><td>MAFL</td><td>Facial Landmark Detection by Deep Multi-task Learning</td><td>Learning Deep Representation for Face Alignment with Auxiliary Attributes</td><td><a href="https://arxiv.org/pdf/1408.3967.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=facial landmark detection by deep multi-task learning&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td></tr><tr><td>mafl</td><td>MAFL</td><td>Learning Deep Representation for Face Alignment with Auxiliary Attributes</td><td>Learning Deep Representation for Face Alignment with Auxiliary Attributes</td><td><a href="https://arxiv.org/pdf/1408.3967.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning deep representation for face alignment with auxiliary attributes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td></tr><tr><td>malf</td><td>MALF</td><td>Fine-grained Evaluation on Face Detection in the Wild.</td><td>Fine-grained evaluation on face detection in the wild</td><td><a href="http://www.cs.toronto.edu/~byang/papers/malf_fg15.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=fine-grained evaluation on face detection in the wild.&sort=relevance" target="_blank">[s2]</a></td><td>Chinese Academy of Sciences</td><td>45e616093a92e5f1e61a7c6037d5f637aa8964af</td></tr><tr><td>mapillary</td><td>Mapillary</td><td>The Mapillary Vistas Dataset for Semantic Understanding of Street Scenes</td><td>The Mapillary Vistas Dataset for Semantic Understanding of Street Scenes</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237796" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the mapillary vistas dataset for semantic understanding of street scenes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>79828e6e9f137a583082b8b5a9dfce0c301989b8</td></tr><tr><td>market_1501</td><td>Market 1501</td><td>Scalable Person Re-identification: A Benchmark</td><td>Scalable Person Re-identification: A Benchmark</td><td><a href="https://www.microsoft.com/en-us/research/wp-content/uploads/2017/01/ICCV15-ReIDDataset.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=scalable person re-identification: a benchmark&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4308bd8c28e37e2ed9a3fcfe74d5436cce34b410</td></tr><tr><td>market1203</td><td>Market 1203</td><td>Orientation Driven Bag of Appearances for Person Re-identification</td><td>Orientation Driven Bag of Appearances for Person Re-identification</td><td><a href="http://pdfs.semanticscholar.org/a7fe/834a0af614ce6b50dc093132b031dd9a856b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=orientation driven bag of appearances for person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a7fe834a0af614ce6b50dc093132b031dd9a856b</td></tr><tr><td>mars</td><td>MARS</td><td>MARS: A Video Benchmark for Large-Scale Person Re-identification</td><td>MARS: A Video Benchmark for Large-Scale Person Re-Identification</td><td><a href="http://pdfs.semanticscholar.org/c038/7e788a52f10bf35d4d50659cfa515d89fbec.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=mars: a video benchmark for large-scale person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>c0387e788a52f10bf35d4d50659cfa515d89fbec</td></tr><tr><td>mcgill</td><td>McGill Real World</td><td>Hierarchical Temporal Graphical Model for Head Pose Estimation and Subsequent Attribute Classification in Real-World Videos</td><td>Robust semi-automatic head pose labeling for real-world face video sequences</td><td><a href="https://doi.org/10.1007/s11042-012-1352-1" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=hierarchical temporal graphical model for head pose estimation and subsequent attribute classification in real-world videos&sort=relevance" target="_blank">[s2]</a></td><td>McGill University</td><td>c570d1247e337f91e555c3be0e8c8a5aba539d9f</td></tr><tr><td>mcgill</td><td>McGill Real World</td><td>Robust Semi-automatic Head Pose Labeling for Real-World Face Video Sequences</td><td>Robust semi-automatic head pose labeling for real-world face video sequences</td><td><a href="https://doi.org/10.1007/s11042-012-1352-1" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=robust semi-automatic head pose labeling for real-world face video sequences&sort=relevance" target="_blank">[s2]</a></td><td>McGill University</td><td>c570d1247e337f91e555c3be0e8c8a5aba539d9f</td></tr><tr><td>megaage</td><td>MegaAge</td><td>Quantifying Facial Age by Posterior of Age Comparisons</td><td>Quantifying Facial Age by Posterior of Age Comparisons</td><td><a href="https://arxiv.org/pdf/1708.09687.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=quantifying facial age by posterior of age comparisons&sort=relevance" target="_blank">[s2]</a></td><td>Chinese University of Hong Kong</td><td>d80a3d1f3a438e02a6685e66ee908446766fefa9</td></tr><tr><td>megaface</td><td>MegaFace</td><td>The MegaFace Benchmark: 1 Million Faces for Recognition at Scale</td><td>The MegaFace Benchmark: 1 Million Faces for Recognition at Scale</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.527" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the megaface benchmark: 1 million faces for recognition at scale&sort=relevance" target="_blank">[s2]</a></td><td>University of Washington</td><td>96e0cfcd81cdeb8282e29ef9ec9962b125f379b0</td></tr><tr><td>megaface</td><td>MegaFace</td><td>Level Playing Field for Million Scale Face Recognition</td><td>Level Playing Field for Million Scale Face Recognition</td><td><a href="https://arxiv.org/pdf/1705.00393.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=level playing field for million scale face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>28d4e027c7e90b51b7d8908fce68128d1964668a</td></tr><tr><td>mifs</td><td>MIFS</td><td>Spoofing Faces Using Makeup: An Investigative Study</td><td>Spoofing faces using makeup: An investigative study</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7947686" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=spoofing faces using makeup: an investigative study&sort=relevance" target="_blank">[s2]</a></td><td>INRIA Méditerranée</td><td>23e824d1dfc33f3780dd18076284f07bd99f1c43</td></tr><tr><td>mit_cbcl</td><td>MIT CBCL</td><td>Component-based Face Recognition with 3D Morphable Models</td><td>Component-Based Face Recognition with 3D Morphable Models</td><td><a href="http://www.bheisele.com/avbpa2003.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=component-based face recognition with 3d morphable models&sort=relevance" target="_blank">[s2]</a></td><td></td><td>079a0a3bf5200994e1f972b1b9197bf2f90e87d4</td></tr><tr><td>miw</td><td>MIW</td><td>Automatic Facial Makeup Detection with Application in Face Recognition</td><td>Automatic facial makeup detection with application in face recognition</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6612994" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=automatic facial makeup detection with application in face recognition&sort=relevance" target="_blank">[s2]</a></td><td>West Virginia University</td><td>fcc6fe6007c322641796cb8792718641856a22a7</td></tr><tr><td>mmi_facial_expression</td><td>MMI Facial Expression Dataset</td><td>WEB-BASED DATABASE FOR FACIAL EXPRESSION ANALYSIS</td><td>Web-based database for facial expression analysis</td><td><a href="http://ibug.doc.ic.ac.uk/media/uploads/documents/PanticEtAl-ICME2005-final.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=web-based database for facial expression analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2a75f34663a60ab1b04a0049ed1d14335129e908</td></tr><tr><td>moments_in_time</td><td>Moments in Time</td><td>Moments in Time Dataset: one million videos for event understanding</td><td>Moments in Time Dataset: one million videos for event understanding</td><td><a href="https://arxiv.org/pdf/1801.03150.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=moments in time dataset: one million videos for event understanding&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a5a44a32a91474f00a3cda671a802e87c899fbb4</td></tr><tr><td>morph</td><td>MORPH Commercial</td><td>MORPH: A Longitudinal Image Database of Normal Adult Age-Progression</td><td>MORPH: a longitudinal image database of normal adult age-progression</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/FGR.2006.78" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=morph: a longitudinal image database of normal adult age-progression&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9055b155cbabdce3b98e16e5ac9c0edf00f9552f</td></tr><tr><td>morph_nc</td><td>MORPH Non-Commercial</td><td>MORPH: A Longitudinal Image Database of Normal Adult Age-Progression</td><td>MORPH: a longitudinal image database of normal adult age-progression</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/FGR.2006.78" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=morph: a longitudinal image database of normal adult age-progression&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9055b155cbabdce3b98e16e5ac9c0edf00f9552f</td></tr><tr><td>mot</td><td>MOT</td><td>Evaluating Multiple Object Tracking Performance: The CLEAR MOT Metrics</td><td>Learning to associate: HybridBoosted multi-target tracker for crowded scene</td><td><a href="http://vision.lbl.gov/Conferences/cvpr/Papers/data/papers/0633.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=evaluating multiple object tracking performance: the clear mot metrics&sort=relevance" target="_blank">[s2]</a></td><td></td><td>5981e6479c3fd4e31644db35d236bfb84ae46514</td></tr><tr><td>mot</td><td>MOT</td><td>Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</td><td>Learning to associate: HybridBoosted multi-target tracker for crowded scene</td><td><a href="http://vision.lbl.gov/Conferences/cvpr/Papers/data/papers/0633.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=performance measures and a data set for multi-target, multi-camera tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>5981e6479c3fd4e31644db35d236bfb84ae46514</td></tr><tr><td>mot</td><td>MOT</td><td>Learning to associate: HybridBoosted multi-target tracker for crowded scene</td><td>Learning to associate: HybridBoosted multi-target tracker for crowded scene</td><td><a href="http://vision.lbl.gov/Conferences/cvpr/Papers/data/papers/0633.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning to associate: hybridboosted multi-target tracker for crowded scene&sort=relevance" target="_blank">[s2]</a></td><td></td><td>5981e6479c3fd4e31644db35d236bfb84ae46514</td></tr><tr><td>mpi_large</td><td>Large MPI Facial Expression</td><td>The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</td><td>The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</td><td><a href="http://pdfs.semanticscholar.org/ea05/0801199f98a1c7c1df6769f23f658299a3ae.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the mpi facial expression database — a validated database of emotional and conversational facial expressions&sort=relevance" target="_blank">[s2]</a></td><td></td><td>ea050801199f98a1c7c1df6769f23f658299a3ae</td></tr><tr><td>mpi_small</td><td>Small MPI Facial Expression</td><td>The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</td><td>The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</td><td><a href="http://pdfs.semanticscholar.org/ea05/0801199f98a1c7c1df6769f23f658299a3ae.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the mpi facial expression database — a validated database of emotional and conversational facial expressions&sort=relevance" target="_blank">[s2]</a></td><td></td><td>ea050801199f98a1c7c1df6769f23f658299a3ae</td></tr><tr><td>mpii_gaze</td><td>MPIIGaze</td><td>Appearance-based Gaze Estimation in the Wild</td><td>Appearance-based gaze estimation in the wild</td><td><a href="https://scalable.mpi-inf.mpg.de/files/2015/09/zhang_CVPR15.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=appearance-based gaze estimation in the wild&sort=relevance" target="_blank">[s2]</a></td><td>Max Planck Institute for Informatics</td><td>0df0d1adea39a5bef318b74faa37de7f3e00b452</td></tr><tr><td>mpii_human_pose</td><td>MPII Human Pose</td><td>2D Human Pose Estimation: New Benchmark and State of the Art Analysis</td><td>2D Human Pose Estimation: New Benchmark and State of the Art Analysis</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6909866" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=2d human pose estimation: new benchmark and state of the art analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3325860c0c82a93b2eac654f5324dd6a776f609e</td></tr><tr><td>mr2</td><td>MR2</td><td>The MR2: A multi-racial mega-resolution database of facial stimuli</td><td>The MR2: A multi-racial, mega-resolution database of facial stimuli.</td><td><a href="https://pdfs.semanticscholar.org/be5b/455abd379240460d022a0e246615b0b86c14.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the mr2: a multi-racial mega-resolution database of facial stimuli&sort=relevance" target="_blank">[s2]</a></td><td></td><td>578d4ad74818086bb64f182f72e2c8bd31e3d426</td></tr><tr><td>mrp_drone</td><td>MRP Drone</td><td>Investigating Open-World Person Re-identification Using a Drone</td><td>Investigating Open-World Person Re-identification Using a Drone</td><td><a href="http://pdfs.semanticscholar.org/ad01/687649d95cd5b56d7399a9603c4b8e2217d7.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=investigating open-world person re-identification using a drone&sort=relevance" target="_blank">[s2]</a></td><td></td><td>ad01687649d95cd5b56d7399a9603c4b8e2217d7</td></tr><tr><td>msceleb</td><td>MsCeleb</td><td>MS-Celeb-1M: A Dataset and Benchmark for Large-Scale Face Recognition</td><td>MS-Celeb-1M: A Dataset and Benchmark for Large-Scale Face Recognition</td><td><a href="http://pdfs.semanticscholar.org/4603/cb8e05258bb0572ae912ad20903b8f99f4b1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=ms-celeb-1m: a dataset and benchmark for large-scale face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>291265db88023e92bb8c8e6390438e5da148e8f5</td></tr><tr><td>msmt_17</td><td>MSMT17</td><td>Person Transfer GAN to Bridge Domain Gap for Person Re-Identification</td><td>Person Transfer GAN to Bridge Domain Gap for Person Re-Identification</td><td><a href="https://arxiv.org/pdf/1711.08565.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person transfer gan to bridge domain gap for person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>ec792ad2433b6579f2566c932ee414111e194537</td></tr><tr><td>mtfl</td><td>MTFL</td><td>Facial Landmark Detection by Deep Multi-task Learning</td><td>Learning Deep Representation for Face Alignment with Auxiliary Attributes</td><td><a href="https://arxiv.org/pdf/1408.3967.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=facial landmark detection by deep multi-task learning&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td></tr><tr><td>mtfl</td><td>MTFL</td><td>Learning Deep Representation for Face Alignment with Auxiliary Attributes</td><td>Learning Deep Representation for Face Alignment with Auxiliary Attributes</td><td><a href="https://arxiv.org/pdf/1408.3967.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning deep representation for face alignment with auxiliary attributes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td></tr><tr><td>muct</td><td>MUCT</td><td>The MUCT Landmarked Face Database</td><td>Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</td><td><a href="http://lrs.icg.tugraz.at/pubs/koestinger_befit_11.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the muct landmarked face database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td></tr><tr><td>mug_faces</td><td>MUG Faces</td><td>The MUG Facial Expression Database</td><td>The MUG facial expression database</td><td><a href="http://ieeexplore.ieee.org/document/5617662/" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the mug facial expression database&sort=relevance" target="_blank">[s2]</a></td><td>Aristotle University of Thessaloniki</td><td>f1af714b92372c8e606485a3982eab2f16772ad8</td></tr><tr><td>multi_pie</td><td>MULTIPIE</td><td>Multi-PIE</td><td>Scheduling heterogeneous multi-cores through performance impact estimation (PIE)</td><td><a href="http://dl.acm.org/citation.cfm?id=2337184" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-pie&sort=relevance" target="_blank">[s2]</a></td><td></td><td>109df0e8e5969ddf01e073143e83599228a1163f</td></tr><tr><td>names_and_faces_news</td><td>News Dataset</td><td>Names and Faces</td><td>Names and faces in the news</td><td><a href="http://www.cs.utexas.edu/~grauman/courses/spring2007/395T/papers/berg_names_and_faces.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=names and faces&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2fda164863a06a92d3a910b96eef927269aeb730</td></tr><tr><td>nd_2006</td><td>ND-2006</td><td>Using a Multi-Instance Enrollment Representation to Improve 3D Face Recognition</td><td>Using a Multi-Instance Enrollment Representation to Improve 3D Face Recognition</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4401928" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=using a multi-instance enrollment representation to improve 3d face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>fd8168f1c50de85bac58a8d328df0a50248b16ae</td></tr><tr><td>nova_emotions</td><td>Novaemötions Dataset</td><td>Crowdsourcing facial expressions for affective-interaction</td><td>Competitive affective gaming: winning with a smile</td><td><a href="http://doi.acm.org/10.1145/2502081.2502115" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=crowdsourcing facial expressions for affective-interaction&sort=relevance" target="_blank">[s2]</a></td><td></td><td>7f4040b482d16354d5938c1d1b926b544652bf5b</td></tr><tr><td>nova_emotions</td><td>Novaemötions Dataset</td><td>Competitive affective gamming: Winning with a smile</td><td>Competitive affective gaming: winning with a smile</td><td><a href="http://doi.acm.org/10.1145/2502081.2502115" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=competitive affective gamming: winning with a smile&sort=relevance" target="_blank">[s2]</a></td><td></td><td>7f4040b482d16354d5938c1d1b926b544652bf5b</td></tr><tr><td>nudedetection</td><td>Nude Detection</td><td>A Bag-of-Features Approach based on Hue-SIFT Descriptor for Nude Detection</td><td>A bag-of-features approach based on Hue-SIFT descriptor for nude detection</td><td><a href="http://ieeexplore.ieee.org/document/7077625/" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a bag-of-features approach based on hue-sift descriptor for nude detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>7ace44190729927e5cb0dd5d363fcae966fe13f7</td></tr><tr><td>orl</td><td>ORL</td><td>Parameterisation of a Stochastic Model for Human Face Identification</td><td>Parameterisation of a stochastic model for human face identification</td><td><a href="http://pdfs.semanticscholar.org/5520/6f0b5f57ce17358999145506cd01e570358c.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=parameterisation of a stochastic model for human face identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>55206f0b5f57ce17358999145506cd01e570358c</td></tr><tr><td>penn_fudan</td><td>Penn Fudan</td><td>Object Detection Combining Recognition and Segmentation</td><td>Object Detection Combining Recognition and Segmentation</td><td><a href="http://pdfs.semanticscholar.org/f531/a554cade14b9b340de6730683a28c292dd74.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=object detection combining recognition and segmentation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3394168ff0719b03ff65bcea35336a76b21fe5e4</td></tr><tr><td>peta</td><td>PETA</td><td>Pedestrian Attribute Recognition At Far Distance</td><td>Pedestrian Attribute Recognition At Far Distance</td><td><a href="http://personal.ie.cuhk.edu.hk/~pluo/pdf/mm14.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pedestrian attribute recognition at far distance&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2a4bbee0b4cf52d5aadbbc662164f7efba89566c</td></tr><tr><td>pets</td><td>PETS 2017</td><td>PETS 2017: Dataset and Challenge</td><td>PETS 2017: Dataset and Challenge</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014998" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pets 2017: dataset and challenge&sort=relevance" target="_blank">[s2]</a></td><td></td><td>22909dd19a0ec3b6065334cb5be5392cb24d839d</td></tr><tr><td>pilot_parliament</td><td>PPB</td><td>Gender Shades: Intersectional Accuracy Disparities in Commercial Gender Classi cation</td><td>Summary of Research on Informant Accuracy in Network Data, 11 and on the Reverse Small World Problem</td><td><a href="http://pdfs.semanticscholar.org/fb82/681ac5d3487bd8e52dbb3d1fa220eeac855e.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=gender shades: intersectional accuracy disparities in commercial gender classi cation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>fb82681ac5d3487bd8e52dbb3d1fa220eeac855e</td></tr><tr><td>pipa</td><td>PIPA</td><td>Beyond Frontal Faces: Improving Person Recognition Using Multiple Cues</td><td>Beyond frontal faces: Improving Person Recognition using multiple cues</td><td><a href="https://doi.org/10.1109/CVPR.2015.7299113" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=beyond frontal faces: improving person recognition using multiple cues&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0a85bdff552615643dd74646ac881862a7c7072d</td></tr><tr><td>pku</td><td>PKU</td><td>Swiss-System Based Cascade Ranking for Gait-based Person Re-identification</td><td>Swiss-System Based Cascade Ranking for Gait-Based Person Re-Identification</td><td><a href="http://pdfs.semanticscholar.org/f6c8/d5e35d7e4d60a0104f233ac1a3ab757da53f.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=swiss-system based cascade ranking for gait-based person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>f6c8d5e35d7e4d60a0104f233ac1a3ab757da53f</td></tr><tr><td>pku_reid</td><td>PKU-Reid</td><td>Orientation driven bag of appearances for person re-identification</td><td>Orientation Driven Bag of Appearances for Person Re-identification</td><td><a href="http://pdfs.semanticscholar.org/a7fe/834a0af614ce6b50dc093132b031dd9a856b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=orientation driven bag of appearances for person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a7fe834a0af614ce6b50dc093132b031dd9a856b</td></tr><tr><td>pornodb</td><td>Pornography DB</td><td>Pooling in Image Representation: the Visual Codeword Point of View</td><td>Pooling in image representation: The visual codeword point of view</td><td><a href="http://pdfs.semanticscholar.org/b92a/1ed9622b8268ae3ac9090e25789fc41cc9b8.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pooling in image representation: the visual codeword point of view&sort=relevance" target="_blank">[s2]</a></td><td></td><td>b92a1ed9622b8268ae3ac9090e25789fc41cc9b8</td></tr><tr><td>precarious</td><td>Precarious</td><td>Expecting the Unexpected: Training Detectors for Unusual Pedestrians With Adversarial Imposters</td><td>Expecting the Unexpected: Training Detectors for Unusual Pedestrians with Adversarial Imposters</td><td><a href="https://arxiv.org/pdf/1703.06283.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=expecting the unexpected: training detectors for unusual pedestrians with adversarial imposters&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9e5378e7b336c89735d3bb15cf67eff96f86d39a</td></tr><tr><td>prid</td><td>PRID</td><td>Person Re-Identification by Descriptive and Discriminative Classification</td><td>Person Re-identification by Descriptive and Discriminative Classification</td><td><a href="http://pdfs.semanticscholar.org/4c1b/f0592be3e535faf256c95e27982db9b3d3d3.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person re-identification by descriptive and discriminative classification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>16c7c31a7553d99f1837fc6e88e77b5ccbb346b8</td></tr><tr><td>prw</td><td>PRW</td><td>Person Re-identification in the Wild</td><td>Person Re-identification in the Wild</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2017.357" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person re-identification in the wild&sort=relevance" target="_blank">[s2]</a></td><td>University of Technology Sydney</td><td>0b84f07af44f964817675ad961def8a51406dd2e</td></tr><tr><td>psu</td><td>PSU</td><td>Vision-based Analysis of Small Groups in Pedestrian Crowds</td><td>Vision-Based Analysis of Small Groups in Pedestrian Crowds</td><td><a href="http://vision.cse.psu.edu/publications/pdfs/GeCollinsRubackPAMI2011.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=vision-based analysis of small groups in pedestrian crowds&sort=relevance" target="_blank">[s2]</a></td><td></td><td>066000d44d6691d27202896691f08b27117918b9</td></tr><tr><td>pubfig</td><td>PubFig</td><td>Attribute and Simile Classifiers for Face Verification</td><td>Attribute and simile classifiers for face verification</td><td><a href="http://homes.cs.washington.edu/~neeraj/projects/faceverification/base/papers/nk_iccv2009_attrs.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=attribute and simile classifiers for face verification&sort=relevance" target="_blank">[s2]</a></td><td>Columbia University</td><td>759a3b3821d9f0e08e0b0a62c8b693230afc3f8d</td></tr><tr><td>pubfig_83</td><td>pubfig83</td><td>Scaling Up Biologically-Inspired Computer Vision: A Case Study in Unconstrained Face Recognition on Facebook</td><td>Scaling up biologically-inspired computer vision: A case study in unconstrained face recognition on facebook</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5981788" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=scaling up biologically-inspired computer vision: a case study in unconstrained face recognition on facebook&sort=relevance" target="_blank">[s2]</a></td><td>Harvard University</td><td>9c23859ec7313f2e756a3e85575735e0c52249f4</td></tr><tr><td>put_face</td><td>Put Face</td><td>The PUT face database</td><td>Labeled Faces in the Wild : A Database for Studying Face Recognition in Unconstrained Environments</td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the put face database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td></tr><tr><td>qmul_grid</td><td>GRID</td><td>Multi-Camera Activity Correlation Analysis</td><td>Time-Delayed Correlation Analysis for Multi-Camera Activity Understanding</td><td><a href="https://doi.org/10.1007/s11263-010-0347-5" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-camera activity correlation analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2edb87494278ad11641b6cf7a3f8996de12b8e14</td></tr><tr><td>qmul_grid</td><td>GRID</td><td>Time-delayed correlation analysis for multi-camera activity understanding</td><td>Time-Delayed Correlation Analysis for Multi-Camera Activity Understanding</td><td><a href="https://doi.org/10.1007/s11263-010-0347-5" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=time-delayed correlation analysis for multi-camera activity understanding&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2edb87494278ad11641b6cf7a3f8996de12b8e14</td></tr><tr><td>qmul_surv_face</td><td>QMUL-SurvFace</td><td>Surveillance Face Recognition Challenge</td><td>Surveillance Face Recognition Challenge</td><td><a href="https://arxiv.org/pdf/1804.09691.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=surveillance face recognition challenge&sort=relevance" target="_blank">[s2]</a></td><td></td><td>c866a2afc871910e3282fd9498dce4ab20f6a332</td></tr><tr><td>rafd</td><td>RaFD</td><td>Presentation and validation of the Radboud Faces Database</td><td>Presentation and validation of the Radboud Faces Database</td><td><a href="https://pdfs.semanticscholar.org/3765/df816dc5a061bc261e190acc8bdd9d47bec0.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=presentation and validation of the radboud faces database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3765df816dc5a061bc261e190acc8bdd9d47bec0</td></tr><tr><td>raid</td><td>RAiD</td><td>Consistent Re-identification in a Camera Network</td><td>Consistent Re-identification in a Camera Network</td><td><a href="http://pdfs.semanticscholar.org/c27f/099e6e7e3f7f9979cbe9e0a5175fc5848ea0.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=consistent re-identification in a camera network&sort=relevance" target="_blank">[s2]</a></td><td></td><td>09d78009687bec46e70efcf39d4612822e61cb8c</td></tr><tr><td>rap_pedestrian</td><td>RAP</td><td>A Richly Annotated Dataset for Pedestrian Attribute Recognition</td><td>A Richly Annotated Dataset for Pedestrian Attribute Recognition</td><td><a href="http://pdfs.semanticscholar.org/221c/18238b829c12b911706947ab38fd017acef7.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a richly annotated dataset for pedestrian attribute recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>221c18238b829c12b911706947ab38fd017acef7</td></tr><tr><td>reseed</td><td>ReSEED</td><td>ReSEED: Social Event dEtection Dataset</td><td>ReSEED: social event dEtection dataset</td><td><a href="https://pub.uni-bielefeld.de/download/2663466/2686734" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=reseed: social event detection dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>54983972aafc8e149259d913524581357b0f91c3</td></tr><tr><td>saivt</td><td>SAIVT SoftBio</td><td>A Database for Person Re-Identification in Multi-Camera Surveillance Networks</td><td>A Database for Person Re-Identification in Multi-Camera Surveillance Networks</td><td><a href="http://eprints.qut.edu.au/53437/3/Bialkowski_Database4PersonReID_DICTA.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a database for person re-identification in multi-camera surveillance networks&sort=relevance" target="_blank">[s2]</a></td><td></td><td>22646e00a7ba34d1b5fbe3b1efcd91a1e1be3c2b</td></tr><tr><td>sarc3d</td><td>Sarc3D</td><td>SARC3D: a new 3D body model for People Tracking and Re-identification</td><td>SARC3D: A New 3D Body Model for People Tracking and Re-identification</td><td><a href="https://pdfs.semanticscholar.org/e27e/f52c641c2b5100a1b34fd0b819e84a31b4df.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=sarc3d: a new 3d body model for people tracking and re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>e27ef52c641c2b5100a1b34fd0b819e84a31b4df</td></tr><tr><td>scface</td><td>SCface</td><td>SCface – surveillance cameras face database</td><td>Large Variability Surveillance Camera Face Database</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=scface – surveillance cameras face database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>f3b84a03985de3890b400b68e2a92c0a00afd9d0</td></tr><tr><td>scut_fbp</td><td>SCUT-FBP</td><td>SCUT-FBP: A Benchmark Dataset for Facial Beauty Perception</td><td>SCUT-FBP: A Benchmark Dataset for Facial Beauty Perception</td><td><a href="https://arxiv.org/pdf/1511.02459.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=scut-fbp: a benchmark dataset for facial beauty perception&sort=relevance" target="_blank">[s2]</a></td><td>South China University of Technology</td><td>bd26dabab576adb6af30484183c9c9c8379bf2e0</td></tr><tr><td>scut_head</td><td>SCUT HEAD</td><td>Detecting Heads using Feature Refine Net and Cascaded Multi-scale Architecture</td><td>Detecting Heads using Feature Refine Net and Cascaded Multi-scale Architecture</td><td><a href="https://arxiv.org/pdf/1803.09256.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=detecting heads using feature refine net and cascaded multi-scale architecture&sort=relevance" target="_blank">[s2]</a></td><td></td><td>d3200d49a19a4a4e4e9745ee39649b65d80c834b</td></tr><tr><td>sdu_vid</td><td>SDU-VID</td><td>A Spatio-Temporal Appearance Representation for Video-Based Pedestrian Re-Identification</td><td>Person Re-identification by Video Ranking</td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a spatio-temporal appearance representation for video-based pedestrian re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>98bb029afe2a1239c3fdab517323066f0957b81b</td></tr><tr><td>sdu_vid</td><td>SDU-VID</td><td>Local descriptors encoded by Fisher vectors for person re-identification</td><td>Person Re-identification by Video Ranking</td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=local descriptors encoded by fisher vectors for person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>98bb029afe2a1239c3fdab517323066f0957b81b</td></tr><tr><td>sdu_vid</td><td>SDU-VID</td><td>Person reidentification by video ranking</td><td>Person Re-identification by Video Ranking</td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person reidentification by video ranking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>98bb029afe2a1239c3fdab517323066f0957b81b</td></tr><tr><td>sheffield</td><td>Sheffield Face</td><td>Face Recognition: From Theory to Applications</td><td>Face Description with Local Binary Patterns: Application to Face Recognition</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/TPAMI.2006.244" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=face recognition: from theory to applications&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3607afdb204de9a5a9300ae98aa4635d9effcda2</td></tr><tr><td>social_relation</td><td>Social Relation</td><td>From Facial Expression Recognition to Interpersonal Relation Prediction</td><td>Learning Social Relation Traits from Face Images</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.414" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=from facial expression recognition to interpersonal relation prediction&sort=relevance" target="_blank">[s2]</a></td><td>Chinese University of Hong Kong</td><td>2a171f8d14b6b8735001a11c217af9587d095848</td></tr><tr><td>social_relation</td><td>Social Relation</td><td>Learning Social Relation Traits from Face Images</td><td>Learning Social Relation Traits from Face Images</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.414" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning social relation traits from face images&sort=relevance" target="_blank">[s2]</a></td><td>Chinese University of Hong Kong</td><td>2a171f8d14b6b8735001a11c217af9587d095848</td></tr><tr><td>soton</td><td>SOTON HiD</td><td>On a Large Sequence-Based Human Gait Database</td><td>On a large sequence-based human gait database</td><td><a href="http://pdfs.semanticscholar.org/4f93/cd09785c6e77bf4bc5a788e079df524c8d21.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=on a large sequence-based human gait database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4f93cd09785c6e77bf4bc5a788e079df524c8d21</td></tr><tr><td>sports_videos_in_the_wild</td><td>SVW</td><td>Sports Videos in the Wild (SVW): A Video Dataset for Sports Analysis</td><td>Sports Videos in the Wild (SVW): A video dataset for sports analysis</td><td><a href="http://web.cse.msu.edu/~liuxm/publication/Safdarnejad_Liu_Udpa_Andrus_Wood_Craven_FG2015.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=sports videos in the wild (svw): a video dataset for sports analysis&sort=relevance" target="_blank">[s2]</a></td><td>Michigan State University</td><td>1a40092b493c6b8840257ab7f96051d1a4dbfeb2</td></tr><tr><td>stair_actions</td><td>STAIR Action</td><td>STAIR Actions: A Video Dataset of Everyday Home Actions</td><td>STAIR Actions: A Video Dataset of Everyday Home Actions</td><td><a href="https://arxiv.org/pdf/1804.04326.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=stair actions: a video dataset of everyday home actions&sort=relevance" target="_blank">[s2]</a></td><td></td><td>d3f5a1848b0028d8ab51d0b0673732cad2e3c8c9</td></tr><tr><td>stanford_drone</td><td>Stanford Drone</td><td>Learning Social Etiquette: Human Trajectory Prediction In Crowded Scenes</td><td>Learning to Predict Human Behavior in Crowded Scenes</td><td><a href="http://pdfs.semanticscholar.org/c9bd/a86e23cab9e4f15ea0c4cbb6cc02b9dfb709.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning social etiquette: human trajectory prediction in crowded scenes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>c9bda86e23cab9e4f15ea0c4cbb6cc02b9dfb709</td></tr><tr><td>stickmen_buffy</td><td>Buffy Stickmen</td><td>Learning to Parse Images of Articulated Objects</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td><a href="http://pdfs.semanticscholar.org/4b1d/23d17476fcf78f4cbadf69fb130b1aa627c0.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning to parse images of articulated objects&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td></tr><tr><td>stickmen_buffy</td><td>Buffy Stickmen</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td><a href="http://pdfs.semanticscholar.org/4b1d/23d17476fcf78f4cbadf69fb130b1aa627c0.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clustered pose and nonlinear appearance models for human pose estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td></tr><tr><td>stickmen_family</td><td>We Are Family Stickmen</td><td>We Are Family: Joint Pose Estimation of Multiple Persons</td><td>We Are Family: Joint Pose Estimation of Multiple Persons</td><td><a href="http://pdfs.semanticscholar.org/0dc1/1a37cadda92886c56a6fb5191ded62099c28.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=we are family: joint pose estimation of multiple persons&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0dc11a37cadda92886c56a6fb5191ded62099c28</td></tr><tr><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td>Learning to parse images of articulated bodies</td><td><a href="http://pdfs.semanticscholar.org/9cd7/4c43dbf9be0b9caae4606ee53e6d45850471.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clustered pose and nonlinear appearance models for human pose estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td></tr><tr><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td>Learning to Parse Images of Articulated Objects</td><td>Learning to parse images of articulated bodies</td><td><a href="http://pdfs.semanticscholar.org/9cd7/4c43dbf9be0b9caae4606ee53e6d45850471.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning to parse images of articulated objects&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td></tr><tr><td>sun_attributes</td><td>SUN</td><td>The SUN Attribute Database: Beyond Categories for Deeper Scene Understanding</td><td>SUN attribute database: Discovering, annotating, and recognizing scene attributes</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2012.6247998" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the sun attribute database: beyond categories for deeper scene understanding&sort=relevance" target="_blank">[s2]</a></td><td></td><td>833fa04463d90aab4a9fe2870d480f0b40df446e</td></tr><tr><td>sun_attributes</td><td>SUN</td><td>SUN Attribute Database:
+<!doctype html><html><head><meta charset='utf-8'><title>Paper Title Sanity Check</title><link rel='stylesheet' href='reports.css'></head><body><h2>Paper Title Sanity Check</h2><table border='1' cellpadding='3' cellspacing='3'><th>key</th><th>name</th><th>our title</th><th>found title</th><th></th><th></th><th>address</th><th>s2 id</th><tr><td>yfcc_100m</td><td>YFCC100M</td><td>YFCC100M: The New Data in Multimedia Research</td><td>YFCC100M: the new data in multimedia research</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=yfcc100m: the new data in multimedia research&sort=relevance" target="_blank">[s2]</a></td><td></td><td>010f0f4929e6a6644fb01f0e43820f91d0fad292</td></tr><tr><td>fiw_300</td><td>300-W</td><td>A semi-automatic methodology for facial landmark annotation</td><td>A Semi-automatic Methodology for Facial Landmark Annotation</td><td><a href="http://ibug.doc.ic.ac.uk/media/uploads/documents/sagonas_cvpr_2013_amfg_w.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a semi-automatic methodology for facial landmark annotation&sort=relevance" target="_blank">[s2]</a></td><td>University of Twente</td><td>013909077ad843eb6df7a3e8e290cfd5575999d2</td></tr><tr><td>buhmap_db</td><td>BUHMAP-DB</td><td>Facial Feature Tracking and Expression Recognition for Sign Language</td><td>Facial feature tracking and expression recognition for sign language</td><td><a href="http://www.cmpe.boun.edu.tr/pilab/pilabfiles/databases/buhmap/files/ari2008facialfeaturetracking.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=facial feature tracking and expression recognition for sign language&sort=relevance" target="_blank">[s2]</a></td><td></td><td>014b8df0180f33b9fea98f34ae611c6447d761d2</td></tr><tr><td>vqa</td><td>VQA</td><td>VQA: Visual Question Answering</td><td>VQA: Visual Question Answering</td><td><a href="http://arxiv.org/pdf/1505.00468v3.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=vqa: visual question answering&sort=relevance" target="_blank">[s2]</a></td><td></td><td>01959ef569f74c286956024866c1d107099199f7</td></tr><tr><td>kitti</td><td>KITTI</td><td>Vision meets Robotics: The KITTI Dataset</td><td>Vision meets robotics: The KITTI dataset</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=vision meets robotics: the kitti dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>026e3363b7f76b51cc711886597a44d5f1fd1de2</td></tr><tr><td>ilids_mcts</td><td>i-LIDS</td><td>Imagery Library for Intelligent Detection Systems: The i-LIDS User Guide</td><td>Imagery Library for Intelligent Detection Systems (i-LIDS); A Standard for Testing Video Based Detection Systems</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=imagery library for intelligent detection systems: the i-lids user guide&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0297448f3ed948e136bb06ceff10eccb34e5bb77</td></tr><tr><td>ucf_selfie</td><td>UCF Selfie</td><td>How to Take a Good Selfie?</td><td>How to Take a Good Selfie?</td><td><a href="http://doi.acm.org/10.1145/2733373.2806365" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=how to take a good selfie?&sort=relevance" target="_blank">[s2]</a></td><td></td><td>041d3eedf5e45ce5c5229f0181c5c576ed1fafd6</td></tr><tr><td>fiw_300</td><td>300-W</td><td>300 Faces in-the-Wild Challenge: The first facial landmark localization Challenge</td><td>300 Faces in-the-Wild Challenge: The First Facial Landmark Localization Challenge</td><td><a href="https://ibug.doc.ic.ac.uk/media/uploads/documents/sagonas_iccv_2013_300_w.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=300 faces in-the-wild challenge: the first facial landmark localization challenge&sort=relevance" target="_blank">[s2]</a></td><td>University of Twente</td><td>044d9a8c61383312cdafbcc44b9d00d650b21c70</td></tr><tr><td>chokepoint</td><td>ChokePoint</td><td>Patch-based Probabilistic Image Quality Assessment for Face Selection and Improved Video-based Face Recognition</td><td>Patch-based probabilistic image quality assessment for face selection and improved video-based face recognition</td><td><a href="http://conradsanderson.id.au/pdfs/wong_face_selection_cvpr_biometrics_2011.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=patch-based probabilistic image quality assessment for face selection and improved video-based face recognition&sort=relevance" target="_blank">[s2]</a></td><td>University of Queensland</td><td>0486214fb58ee9a04edfe7d6a74c6d0f661a7668</td></tr><tr><td>hipsterwars</td><td>Hipsterwars</td><td>Hipster Wars: Discovering Elements of Fashion Styles</td><td>Hipster Wars: Discovering Elements of Fashion Styles</td><td><a href="http://pdfs.semanticscholar.org/04c2/cda00e5536f4b1508cbd80041e9552880e67.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=hipster wars: discovering elements of fashion styles&sort=relevance" target="_blank">[s2]</a></td><td>Tohoku University</td><td>04c2cda00e5536f4b1508cbd80041e9552880e67</td></tr><tr><td>psu</td><td>PSU</td><td>Vision-based Analysis of Small Groups in Pedestrian Crowds</td><td>Vision-Based Analysis of Small Groups in Pedestrian Crowds</td><td><a href="http://vision.cse.psu.edu/publications/pdfs/GeCollinsRubackPAMI2011.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=vision-based analysis of small groups in pedestrian crowds&sort=relevance" target="_blank">[s2]</a></td><td></td><td>066000d44d6691d27202896691f08b27117918b9</td></tr><tr><td>ifdb</td><td>IFDB</td><td>Iranian Face Database and Evaluation with a New Detection Algorithm</td><td>Iranian Face Database and Evaluation with a New Detection Algorithm</td><td><a href="http://pdfs.semanticscholar.org/066d/71fcd997033dce4ca58df924397dfe0b5fd1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=iranian face database and evaluation with a new detection algorithm&sort=relevance" target="_blank">[s2]</a></td><td></td><td>066d71fcd997033dce4ca58df924397dfe0b5fd1</td></tr><tr><td>columbia_gaze</td><td>Columbia Gaze</td><td>Gaze Locking: Passive Eye Contact Detection for Human–Object Interaction</td><td>Gaze locking: passive eye contact detection for human-object interaction</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=gaze locking: passive eye contact detection for human–object interaction&sort=relevance" target="_blank">[s2]</a></td><td></td><td>06f02199690961ba52997cde1527e714d2b3bf8f</td></tr><tr><td>d3dfacs</td><td>D3DFACS</td><td>A FACS Valid 3D Dynamic Action Unit database with Applications to 3D Dynamic Morphable Facial Modelling</td><td>A FACS valid 3D dynamic action unit database with applications to 3D dynamic morphable facial modeling</td><td><a href="http://www.cs.bath.ac.uk/~dpc/D3DFACS/ICCV_final_2011.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a facs valid 3d dynamic action unit database with applications to 3d dynamic morphable facial modelling&sort=relevance" target="_blank">[s2]</a></td><td>Jacobs University</td><td>070de852bc6eb275d7ca3a9cdde8f6be8795d1a3</td></tr><tr><td>mit_cbcl</td><td>MIT CBCL</td><td>Component-based Face Recognition with 3D Morphable Models</td><td>Component-Based Face Recognition with 3D Morphable Models</td><td><a href="http://www.bheisele.com/avbpa2003.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=component-based face recognition with 3d morphable models&sort=relevance" target="_blank">[s2]</a></td><td></td><td>079a0a3bf5200994e1f972b1b9197bf2f90e87d4</td></tr><tr><td>uccs</td><td>UCCS</td><td>Large scale unconstrained open set face database</td><td>Large scale unconstrained open set face database</td><td><a href="http://www.vast.uccs.edu/~tboult/PAPERS/BTAS13-Sapkota-Boult-UCCSFaceDB.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=large scale unconstrained open set face database&sort=relevance" target="_blank">[s2]</a></td><td>University of Colorado at Colorado Springs</td><td>07fcbae86f7a3ad3ea1cf95178459ee9eaf77cb1</td></tr><tr><td>kin_face</td><td>UB KinFace</td><td>Understanding Kin Relationships in a Photo</td><td>Understanding Kin Relationships in a Photo</td><td><a href="http://www1.ece.neu.edu/~yunfu/papers/Kinship-TMM.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=understanding kin relationships in a photo&sort=relevance" target="_blank">[s2]</a></td><td></td><td>08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7</td></tr><tr><td>raid</td><td>RAiD</td><td>Consistent Re-identification in a Camera Network</td><td>Consistent Re-identification in a Camera Network</td><td><a href="http://pdfs.semanticscholar.org/c27f/099e6e7e3f7f9979cbe9e0a5175fc5848ea0.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=consistent re-identification in a camera network&sort=relevance" target="_blank">[s2]</a></td><td></td><td>09d78009687bec46e70efcf39d4612822e61cb8c</td></tr><tr><td>pipa</td><td>PIPA</td><td>Beyond Frontal Faces: Improving Person Recognition Using Multiple Cues</td><td>Beyond frontal faces: Improving Person Recognition using multiple cues</td><td><a href="https://doi.org/10.1109/CVPR.2015.7299113" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=beyond frontal faces: improving person recognition using multiple cues&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0a85bdff552615643dd74646ac881862a7c7072d</td></tr><tr><td>kinectface</td><td>KinectFaceDB</td><td>KinectFaceDB: A Kinect Database for Face Recognition</td><td>KinectFaceDB: A Kinect Database for Face Recognition</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6866883" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=kinectfacedb: a kinect database for face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0b440695c822a8e35184fb2f60dcdaa8a6de84ae</td></tr><tr><td>prw</td><td>PRW</td><td>Person Re-identification in the Wild</td><td>Person Re-identification in the Wild</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2017.357" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person re-identification in the wild&sort=relevance" target="_blank">[s2]</a></td><td>University of Technology Sydney</td><td>0b84f07af44f964817675ad961def8a51406dd2e</td></tr><tr><td>feret</td><td>FERET</td><td>The FERET Verification Testing Protocol for Face Recognition Algorithms</td><td>The FERET Verification Testing Protocol for Face Recognition Algorithms</td><td><a href="http://pdfs.semanticscholar.org/0c4a/139bb87c6743c7905b29a3cfec27a5130652.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the feret verification testing protocol for face recognition algorithms&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0c4a139bb87c6743c7905b29a3cfec27a5130652</td></tr><tr><td>graz</td><td>Graz Pedestrian</td><td>Weak Hypotheses and Boosting for Generic Object Detection and Recognition</td><td>Weak Hypotheses and Boosting for Generic Object Detection and Recognition</td><td><a href="http://pdfs.semanticscholar.org/0c91/808994a250d7be332400a534a9291ca3b60e.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=weak hypotheses and boosting for generic object detection and recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0c91808994a250d7be332400a534a9291ca3b60e</td></tr><tr><td>ijb_c</td><td>IJB-B</td><td>IARPA Janus Benchmark-B Face Dataset</td><td>IARPA Janus Benchmark-B Face Dataset</td><td><a href="http://www.vislab.ucr.edu/Biometrics2017/program_slides/Noblis_CVPRW_IJBB.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=iarpa janus benchmark-b face dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0cb2dd5f178e3a297a0c33068961018659d0f443</td></tr><tr><td>casablanca</td><td>Casablanca</td><td>Context-aware {CNNs} for person head detection</td><td>Context-Aware CNNs for Person Head Detection</td><td><a href="http://arxiv.org/pdf/1511.07917v1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=context-aware {cnns} for person head detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td></tr><tr><td>hollywood_headset</td><td>HollywoodHeads</td><td>Context-aware CNNs for person head detection</td><td>Context-Aware CNNs for Person Head Detection</td><td><a href="http://arxiv.org/pdf/1511.07917v1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=context-aware cnns for person head detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td></tr><tr><td>lag</td><td>LAG</td><td>Large Age-Gap Face Verification by Feature Injection in Deep Networks</td><td>Large age-gap face verification by feature injection in deep networks</td><td><a href="http://pdfs.semanticscholar.org/0d2d/d4fc016cb6a517d8fb43a7cc3ff62964832e.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=large age-gap face verification by feature injection in deep networks&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0d2dd4fc016cb6a517d8fb43a7cc3ff62964832e</td></tr><tr><td>face_scrub</td><td>FaceScrub</td><td>A data-driven approach to cleaning large face datasets</td><td>A data-driven approach to cleaning large face datasets</td><td><a href="https://doi.org/10.1109/ICIP.2014.7025068" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a data-driven approach to cleaning large face datasets&sort=relevance" target="_blank">[s2]</a></td><td>University of Illinois, Urbana-Champaign</td><td>0d3bb75852098b25d90f31d2f48fd0cb4944702b</td></tr><tr><td>stickmen_family</td><td>We Are Family Stickmen</td><td>We Are Family: Joint Pose Estimation of Multiple Persons</td><td>We Are Family: Joint Pose Estimation of Multiple Persons</td><td><a href="http://pdfs.semanticscholar.org/0dc1/1a37cadda92886c56a6fb5191ded62099c28.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=we are family: joint pose estimation of multiple persons&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0dc11a37cadda92886c56a6fb5191ded62099c28</td></tr><tr><td>mpii_gaze</td><td>MPIIGaze</td><td>Appearance-based Gaze Estimation in the Wild</td><td>Appearance-based gaze estimation in the wild</td><td><a href="https://scalable.mpi-inf.mpg.de/files/2015/09/zhang_CVPR15.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=appearance-based gaze estimation in the wild&sort=relevance" target="_blank">[s2]</a></td><td>Max Planck Institute for Informatics</td><td>0df0d1adea39a5bef318b74faa37de7f3e00b452</td></tr><tr><td>afw</td><td>AFW</td><td>Face detection, pose estimation and landmark localization in the wild</td><td>Face detection, pose estimation, and landmark localization in the wild</td><td><a href="http://vision.ics.uci.edu/papers/ZhuR_CVPR_2012/ZhuR_CVPR_2012.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=face detection, pose estimation and landmark localization in the wild&sort=relevance" target="_blank">[s2]</a></td><td>University of California, Irvine</td><td>0e986f51fe45b00633de9fd0c94d082d2be51406</td></tr><tr><td>voc</td><td>VOC</td><td>The PASCAL Visual Object Classes (VOC) Challenge</td><td>The Pascal Visual Object Classes (VOC) Challenge</td><td><a href="https://doi.org/10.1007/s11263-009-0275-4" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the pascal visual object classes (voc) challenge&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0ee1916a0cb2dc7d3add086b5f1092c3d4beb38a</td></tr><tr><td>feret</td><td>FERET</td><td>The FERET Evaluation Methodology for Face-Recognition Algorithms</td><td>The FERET Evaluation Methodology for Face-Recognition Algorithms</td><td><a href="http://pdfs.semanticscholar.org/0f0f/cf041559703998abf310e56f8a2f90ee6f21.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the feret evaluation methodology for face-recognition algorithms&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0f0fcf041559703998abf310e56f8a2f90ee6f21</td></tr><tr><td>imdb_wiki</td><td>IMDB</td><td>Deep expectation of real and apparent age from a single image without facial landmarks</td><td>Deep Expectation of Real and Apparent Age from a Single Image Without Facial Landmarks</td><td><a href="https://doi.org/10.1007/s11263-016-0940-3" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=deep expectation of real and apparent age from a single image without facial landmarks&sort=relevance" target="_blank">[s2]</a></td><td></td><td>10195a163ab6348eef37213a46f60a3d87f289c5</td></tr><tr><td>inria_person</td><td>INRIA Pedestrian</td><td>Histograms of Oriented Gradients for Human Detection</td><td>Histograms of oriented gradients for human detection</td><td><a href="http://nichol.as/papers/Dalai/Histograms%20of%20oriented%20gradients%20for%20human%20detection.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=histograms of oriented gradients for human detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>10d6b12fa07c7c8d6c8c3f42c7f1c061c131d4c5</td></tr><tr><td>graz</td><td>Graz Pedestrian</td><td>Object Recognition Using Segmentation for Feature Detection</td><td>Object recognition using segmentation for feature detection</td><td><a href="http://www.emt.tugraz.at/~tracking/Publications/fussenegger2004b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=object recognition using segmentation for feature detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>12ad3b5bbbf407f8e54ea692c07633d1a867c566</td></tr><tr><td>lfw_a</td><td>LFW-a</td><td>Effective Unconstrained Face Recognition by Combining Multiple Descriptors and Learned Background Statistics</td><td>Effective Unconstrained Face Recognition by Combining Multiple Descriptors and Learned Background Statistics</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/TPAMI.2010.230" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=effective unconstrained face recognition by combining multiple descriptors and learned background statistics&sort=relevance" target="_blank">[s2]</a></td><td></td><td>133f01aec1534604d184d56de866a4bd531dac87</td></tr><tr><td>hrt_transgender</td><td>HRT Transgender</td><td>Is the Eye Region More Reliable Than the Face? A Preliminary Study of Face-based Recognition on a Transgender Dataset</td><td>Is the eye region more reliable than the face? A preliminary study of face-based recognition on a transgender dataset</td><td><a href="https://doi.org/10.1109/BTAS.2013.6712710" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=is the eye region more reliable than the face? a preliminary study of face-based recognition on a transgender dataset&sort=relevance" target="_blank">[s2]</a></td><td>University of North Carolina Wilmington</td><td>137aa2f891d474fce1e7a1d1e9b3aefe21e22b34</td></tr><tr><td>eth_andreas_ess</td><td>ETHZ Pedestrian</td><td>Depth and Appearance for Mobile Scene Analysis</td><td>Depth and Appearance for Mobile Scene Analysis</td><td><a href="http://www.mmp.rwth-aachen.de/publications/pdf/ess-depthandappearance-iccv07.pdf/at_download/file" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=depth and appearance for mobile scene analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>13f06b08f371ba8b5d31c3e288b4deb61335b462</td></tr><tr><td>lfw_p</td><td>LFWP</td><td>Localizing Parts of Faces Using a Consensus of Exemplars</td><td>Localizing Parts of Faces Using a Consensus of Exemplars</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2011.5995602" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=localizing parts of faces using a consensus of exemplars&sort=relevance" target="_blank">[s2]</a></td><td>Columbia University</td><td>140438a77a771a8fb656b39a78ff488066eb6b50</td></tr><tr><td>ijb_c</td><td>IJB-A</td><td>Pushing the Frontiers of Unconstrained Face Detection and Recognition: IARPA Janus Benchmark A</td><td>Pushing the frontiers of unconstrained face detection and recognition: IARPA Janus Benchmark A</td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/1B_089_ext.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pushing the frontiers of unconstrained face detection and recognition: iarpa janus benchmark a&sort=relevance" target="_blank">[s2]</a></td><td></td><td>140c95e53c619eac594d70f6369f518adfea12ef</td></tr><tr><td>vgg_faces</td><td>VGG Face</td><td>Deep Face Recognition</td><td>Deep Face Recognition</td><td><a href="https://pdfs.semanticscholar.org/f372/ab9b3270d4e4f6a0258c83c2736c3a5c0454.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=deep face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>162ea969d1929ed180cc6de9f0bf116993ff6e06</td></tr><tr><td>prid</td><td>PRID</td><td>Person Re-Identification by Descriptive and Discriminative Classification</td><td>Person Re-identification by Descriptive and Discriminative Classification</td><td><a href="http://pdfs.semanticscholar.org/4c1b/f0592be3e535faf256c95e27982db9b3d3d3.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person re-identification by descriptive and discriminative classification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>16c7c31a7553d99f1837fc6e88e77b5ccbb346b8</td></tr><tr><td>umb</td><td>UMB</td><td>UMB-DB: A Database of Partially Occluded 3D Faces</td><td>UMB-DB: A database of partially occluded 3D faces</td><td><a href="https://doi.org/10.1109/ICCVW.2011.6130509" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=umb-db: a database of partially occluded 3d faces&sort=relevance" target="_blank">[s2]</a></td><td></td><td>16e8b0a1e8451d5f697b94c0c2b32a00abee1d52</td></tr><tr><td>celeba_plus</td><td>CelebFaces+</td><td>Deep Learning Face Representation from Predicting 10,000 Classes</td><td>Deep Learning Face Representation from Predicting 10,000 Classes</td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Sun_Deep_Learning_Face_2014_CVPR_paper.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=deep learning face representation from predicting 10,000 classes&sort=relevance" target="_blank">[s2]</a></td><td>Shenzhen Institutes of Advanced Technology</td><td>177bc509dd0c7b8d388bb47403f28d6228c14b5c</td></tr><tr><td>geofaces</td><td>GeoFaces</td><td>GeoFaceExplorer: Exploring the Geo-Dependence of Facial Attributes</td><td>GeoFaceExplorer: exploring the geo-dependence of facial attributes</td><td><a href="http://doi.acm.org/10.1145/2676440.2676443" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=geofaceexplorer: exploring the geo-dependence of facial attributes&sort=relevance" target="_blank">[s2]</a></td><td>University of Kentucky</td><td>17b46e2dad927836c689d6787ddb3387c6159ece</td></tr><tr><td>deep_fashion</td><td>DeepFashion</td><td>DeepFashion: Powering Robust Clothes Recognition and Retrieval with Rich Annotations</td><td>DeepFashion: Powering Robust Clothes Recognition and Retrieval with Rich Annotations</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7780493" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=deepfashion: powering robust clothes recognition and retrieval with rich annotations&sort=relevance" target="_blank">[s2]</a></td><td></td><td>18010284894ed0edcca74e5bf768ee2e15ef7841</td></tr><tr><td>pilot_parliament</td><td>PPB</td><td>Gender Shades: Intersectional Accuracy Disparities in Commercial Gender Classification</td><td>Gender Shades: Intersectional Accuracy Disparities in Commercial Gender Classification</td><td><a href="https://pdfs.semanticscholar.org/03c1/fc9c3339813ed81ad0de540132f9f695a0f8.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=gender shades: intersectional accuracy disparities in commercial gender classification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>18858cc936947fc96b5c06bbe3c6c2faa5614540</td></tr><tr><td>frgc</td><td>FRGC</td><td>Overview of the Face Recognition Grand Challenge</td><td>Overview of the face recognition grand challenge</td><td><a href="http://www3.nd.edu/~kwb/PhillipsEtAlCVPR_2005.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=overview of the face recognition grand challenge&sort=relevance" target="_blank">[s2]</a></td><td></td><td>18ae7c9a4bbc832b8b14bc4122070d7939f5e00e</td></tr><tr><td>yale_faces</td><td>YaleFaces</td><td>From Few to Many: Illumination Cone Models for Face Recognition under Variable Lighting and Pose</td><td>From Few to Many: Illumination Cone Models for Face Recognition under Variable Lighting and Pose</td><td><a href="http://pdfs.semanticscholar.org/97bb/c2b439a79d4dc0dc7199d71ed96ad5e3fd0e.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=from few to many: illumination cone models for face recognition under variable lighting and pose&sort=relevance" target="_blank">[s2]</a></td><td></td><td>18c72175ddbb7d5956d180b65a96005c100f6014</td></tr><tr><td>york_3d</td><td>UOY 3D Face Database</td><td>Three-Dimensional Face Recognition: An Eigensurface Approach</td><td>Three-dimensional face recognition: an eigensurface approach</td><td><a href="http://www-users.cs.york.ac.uk/~nep/research/3Dface/tomh/3DFaceRecognition-Eigensurface-ICIP(web)2.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=three-dimensional face recognition: an eigensurface approach&sort=relevance" target="_blank">[s2]</a></td><td></td><td>19d1b811df60f86cbd5e04a094b07f32fff7a32a</td></tr><tr><td>sports_videos_in_the_wild</td><td>SVW</td><td>Sports Videos in the Wild (SVW): A Video Dataset for Sports Analysis</td><td>Sports Videos in the Wild (SVW): A video dataset for sports analysis</td><td><a href="http://web.cse.msu.edu/~liuxm/publication/Safdarnejad_Liu_Udpa_Andrus_Wood_Craven_FG2015.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=sports videos in the wild (svw): a video dataset for sports analysis&sort=relevance" target="_blank">[s2]</a></td><td>Michigan State University</td><td>1a40092b493c6b8840257ab7f96051d1a4dbfeb2</td></tr><tr><td>jpl_pose</td><td>JPL-Interaction dataset</td><td>First-Person Activity Recognition: What Are They Doing to Me?</td><td>First-Person Activity Recognition: What Are They Doing to Me?</td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2013/papers/Ryoo_First-Person_Activity_Recognition_2013_CVPR_paper.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=first-person activity recognition: what are they doing to me?&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1aad2da473888cb7ebc1bfaa15bfa0f1502ce005</td></tr><tr><td>adience</td><td>Adience</td><td>Age and Gender Estimation of Unfiltered Faces</td><td>Age and Gender Estimation of Unfiltered Faces</td><td><a href="http://www.openu.ac.il/home/hassner/Adience/EidingerEnbarHassner_tifs.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=age and gender estimation of unfiltered faces&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1be498d4bbc30c3bfd0029114c784bc2114d67c0</td></tr><tr><td>youtube_poses</td><td>YouTube Pose</td><td>Personalizing Human Video Pose Estimation</td><td>Personalizing Human Video Pose Estimation</td><td><a href="http://arxiv.org/pdf/1511.06676v1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=personalizing human video pose estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1c2802c2199b6d15ecefe7ba0c39bfe44363de38</td></tr><tr><td>caltech_pedestrians</td><td>Caltech Pedestrians</td><td>Pedestrian Detection: A Benchmark</td><td>Pedestrian detection: A benchmark</td><td><a href="http://vision.ucsd.edu/~pdollar/files/papers/DollarCVPR09peds.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pedestrian detection: a benchmark&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1dc35905a1deff8bc74688f2d7e2f48fd2273275</td></tr><tr><td>immediacy</td><td>Immediacy</td><td>Multi-task Recurrent Neural Network for Immediacy Prediction</td><td>Multi-task Recurrent Neural Network for Immediacy Prediction</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.383" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-task recurrent neural network for immediacy prediction&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1e3df3ca8feab0b36fd293fe689f93bb2aaac591</td></tr><tr><td>cafe</td><td>CAFE</td><td>The Child Affective Facial Expression (CAFE) Set: Validity and reliability from untrained adults</td><td>The Child Affective Facial Expression (CAFE) set: validity and reliability from untrained adults</td><td><a href="http://pdfs.semanticscholar.org/2038/8099cc415c772926e47bcbbe554e133343d1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the child affective facial expression (cafe) set: validity and reliability from untrained adults&sort=relevance" target="_blank">[s2]</a></td><td></td><td>20388099cc415c772926e47bcbbe554e133343d1</td></tr><tr><td>bbc_pose</td><td>BBC Pose</td><td>Automatic and Efficient Human Pose Estimation for Sign Language Videos</td><td>Automatic and Efficient Human Pose Estimation for Sign Language Videos</td><td><a href="https://doi.org/10.1007/s11263-013-0672-6" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=automatic and efficient human pose estimation for sign language videos&sort=relevance" target="_blank">[s2]</a></td><td></td><td>213a579af9e4f57f071b884aa872651372b661fd</td></tr><tr><td>3d_rma</td><td>3D-RMA</td><td>Automatic 3D Face Authentication</td><td>Automatic 3D face authentication</td><td><a href="http://pdfs.semanticscholar.org/2160/788824c4c29ffe213b2cbeb3f52972d73f37.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=automatic 3d face authentication&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2160788824c4c29ffe213b2cbeb3f52972d73f37</td></tr><tr><td>large_scale_person_search</td><td>Large Scale Person Search</td><td>End-to-End Deep Learning for Person Search</td><td>End-to-End Deep Learning for Person Search</td><td><a href="https://pdfs.semanticscholar.org/2161/f6b7ee3c0acc81603b01dc0df689683577b9.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=end-to-end deep learning for person search&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2161f6b7ee3c0acc81603b01dc0df689683577b9</td></tr><tr><td>images_of_groups</td><td>Images of Groups</td><td>Understanding Groups of Images of People</td><td>Understanding images of groups of people</td><td><a href="http://amp.ece.cmu.edu/people/Andy/Andy_files/cvpr09.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=understanding groups of images of people&sort=relevance" target="_blank">[s2]</a></td><td></td><td>21d9d0deed16f0ad62a4865e9acf0686f4f15492</td></tr><tr><td>rap_pedestrian</td><td>RAP</td><td>A Richly Annotated Dataset for Pedestrian Attribute Recognition</td><td>A Richly Annotated Dataset for Pedestrian Attribute Recognition</td><td><a href="http://pdfs.semanticscholar.org/221c/18238b829c12b911706947ab38fd017acef7.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a richly annotated dataset for pedestrian attribute recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>221c18238b829c12b911706947ab38fd017acef7</td></tr><tr><td>mot</td><td>MOT</td><td>Evaluating Multiple Object Tracking Performance: The CLEAR MOT Metrics</td><td>Evaluating Multiple Object Tracking Performance: The CLEAR MOT Metrics</td><td><a href="http://pdfs.semanticscholar.org/2e0b/00f4043e2d4b04c59c88bb54bcd907d0dcd4.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=evaluating multiple object tracking performance: the clear mot metrics&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2258e01865367018ed6f4262c880df85b94959f8</td></tr><tr><td>saivt</td><td>SAIVT SoftBio</td><td>A Database for Person Re-Identification in Multi-Camera Surveillance Networks</td><td>A Database for Person Re-Identification in Multi-Camera Surveillance Networks</td><td><a href="http://eprints.qut.edu.au/53437/3/Bialkowski_Database4PersonReID_DICTA.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a database for person re-identification in multi-camera surveillance networks&sort=relevance" target="_blank">[s2]</a></td><td></td><td>22646e00a7ba34d1b5fbe3b1efcd91a1e1be3c2b</td></tr><tr><td>pets</td><td>PETS 2017</td><td>PETS 2017: Dataset and Challenge</td><td>PETS 2017: Dataset and Challenge</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014998" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pets 2017: dataset and challenge&sort=relevance" target="_blank">[s2]</a></td><td></td><td>22909dd19a0ec3b6065334cb5be5392cb24d839d</td></tr><tr><td>gallagher</td><td>Gallagher</td><td>Clothing Cosegmentation for Recognizing People</td><td>Clothing cosegmentation for recognizing people</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2008.4587481" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clothing cosegmentation for recognizing people&sort=relevance" target="_blank">[s2]</a></td><td></td><td>22ad2c8c0f4d6aa4328b38d894b814ec22579761</td></tr><tr><td>expw</td><td>ExpW</td><td>From Facial Expression Recognition to Interpersonal Relation Prediction</td><td>From Facial Expression Recognition to Interpersonal Relation Prediction</td><td><a href="http://arxiv.org/abs/1609.06426" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=from facial expression recognition to interpersonal relation prediction&sort=relevance" target="_blank">[s2]</a></td><td></td><td>22f656d0f8426c84a33a267977f511f127bfd7f3</td></tr><tr><td>mifs</td><td>MIFS</td><td>Spoofing Faces Using Makeup: An Investigative Study</td><td>Spoofing faces using makeup: An investigative study</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7947686" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=spoofing faces using makeup: an investigative study&sort=relevance" target="_blank">[s2]</a></td><td>INRIA Méditerranée</td><td>23e824d1dfc33f3780dd18076284f07bd99f1c43</td></tr><tr><td>cohn_kanade</td><td>CK</td><td>Comprehensive Database for Facial Expression Analysis</td><td>Comprehensive Database for Facial Expression Analysis</td><td><a href="http://pdfs.semanticscholar.org/23fc/83c8cfff14a16df7ca497661264fc54ed746.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=comprehensive database for facial expression analysis&sort=relevance" target="_blank">[s2]</a></td><td>Carnegie Mellon University</td><td>23fc83c8cfff14a16df7ca497661264fc54ed746</td></tr><tr><td>cas_peal</td><td>CAS-PEAL</td><td>The CAS-PEAL Large-Scale Chinese Face Database and Baseline Evaluations</td><td>The CAS-PEAL Large-Scale Chinese Face Database and Baseline Evaluations</td><td><a href="https://doi.org/10.1109/TSMCA.2007.909557" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the cas-peal large-scale chinese face database and baseline evaluations&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2485c98aa44131d1a2f7d1355b1e372f2bb148ad</td></tr><tr><td>faceplace</td><td>Face Place</td><td>Recognizing disguised faces</td><td>Recognizing disguised faces</td><td><a href="http://pdfs.semanticscholar.org/d936/7ceb0be378c3a9ddf7cb741c678c1a3c574c.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=recognizing disguised faces&sort=relevance" target="_blank">[s2]</a></td><td></td><td>25474c21613607f6bb7687a281d5f9d4ffa1f9f3</td></tr><tr><td>afew_va</td><td>AFEW-VA</td><td>AFEW-VA database for valence and arousal estimation in-the-wild</td><td>AFEW-VA database for valence and arousal estimation in-the-wild</td><td><a href="http://pdfs.semanticscholar.org/2624/d84503bc2f8e190e061c5480b6aa4d89277a.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=afew-va database for valence and arousal estimation in-the-wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2624d84503bc2f8e190e061c5480b6aa4d89277a</td></tr><tr><td>cofw</td><td>COFW</td><td>Robust face landmark estimation under occlusion</td><td>Robust Face Landmark Estimation under Occlusion</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6751298" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=robust face landmark estimation under occlusion&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2724ba85ec4a66de18da33925e537f3902f21249</td></tr><tr><td>duke_mtmc</td><td>Duke MTMC</td><td>Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</td><td>Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</td><td><a href="http://pdfs.semanticscholar.org/b5f2/4f49f9a5e47d6601399dc068158ad88d7651.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=performance measures and a data set for multi-target, multi-camera tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>27a2fad58dd8727e280f97036e0d2bc55ef5424c</td></tr><tr><td>mot</td><td>MOT</td><td>Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</td><td>Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</td><td><a href="http://pdfs.semanticscholar.org/b5f2/4f49f9a5e47d6601399dc068158ad88d7651.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=performance measures and a data set for multi-target, multi-camera tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>27a2fad58dd8727e280f97036e0d2bc55ef5424c</td></tr><tr><td>h3d</td><td>H3D</td><td>Poselets: Body Part Detectors Trained Using 3D Human Pose Annotations</td><td>Poselets: Body part detectors trained using 3D human pose annotations</td><td><a href="http://vision.stanford.edu/teaching/cs231b_spring1213/papers/ICCV09_BourdevMalik.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=poselets: body part detectors trained using 3d human pose annotations&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2830fb5282de23d7784b4b4bc37065d27839a412</td></tr><tr><td>megaface</td><td>MegaFace</td><td>Level Playing Field for Million Scale Face Recognition</td><td>Level Playing Field for Million Scale Face Recognition</td><td><a href="https://arxiv.org/pdf/1705.00393.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=level playing field for million scale face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>28d4e027c7e90b51b7d8908fce68128d1964668a</td></tr><tr><td>msceleb</td><td>MsCeleb</td><td>MS-Celeb-1M: A Dataset and Benchmark for Large-Scale Face Recognition</td><td>MS-Celeb-1M: A Dataset and Benchmark for Large-Scale Face Recognition</td><td><a href="http://pdfs.semanticscholar.org/4603/cb8e05258bb0572ae912ad20903b8f99f4b1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=ms-celeb-1m: a dataset and benchmark for large-scale face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>291265db88023e92bb8c8e6390438e5da148e8f5</td></tr><tr><td>ferplus</td><td>FER+</td><td>Training Deep Networks for Facial Expression Recognition with Crowd-Sourced Label Distribution</td><td>Training deep networks for facial expression recognition with crowd-sourced label distribution</td><td><a href="http://arxiv.org/pdf/1608.01041v1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=training deep networks for facial expression recognition with crowd-sourced label distribution&sort=relevance" target="_blank">[s2]</a></td><td></td><td>298cbc3dfbbb3a20af4eed97906650a4ea1c29e0</td></tr><tr><td>scface</td><td>SCface</td><td>SCface – surveillance cameras face database</td><td>SCface – surveillance cameras face database</td><td><a href="http://scface.org/SCface%20-%20Surveillance%20Cameras%20Face%20Database.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=scface – surveillance cameras face database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>29a705a5fa76641e0d8963f1fdd67ee4c0d92d3d</td></tr><tr><td>social_relation</td><td>Social Relation</td><td>Learning Social Relation Traits from Face Images</td><td>Learning Social Relation Traits from Face Images</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.414" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning social relation traits from face images&sort=relevance" target="_blank">[s2]</a></td><td>Chinese University of Hong Kong</td><td>2a171f8d14b6b8735001a11c217af9587d095848</td></tr><tr><td>peta</td><td>PETA</td><td>Pedestrian Attribute Recognition At Far Distance</td><td>Pedestrian Attribute Recognition At Far Distance</td><td><a href="http://personal.ie.cuhk.edu.hk/~pluo/pdf/mm14.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pedestrian attribute recognition at far distance&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2a4bbee0b4cf52d5aadbbc662164f7efba89566c</td></tr><tr><td>mmi_facial_expression</td><td>MMI Facial Expression Dataset</td><td>WEB-BASED DATABASE FOR FACIAL EXPRESSION ANALYSIS</td><td>Web-based database for facial expression analysis</td><td><a href="http://ibug.doc.ic.ac.uk/media/uploads/documents/PanticEtAl-ICME2005-final.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=web-based database for facial expression analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2a75f34663a60ab1b04a0049ed1d14335129e908</td></tr><tr><td>bosphorus</td><td>The Bosphorus</td><td>Bosphorus Database for 3D Face Analysis</td><td>Bosphorus Database for 3D Face Analysis</td><td><a href="http://pdfs.semanticscholar.org/4254/fbba3846008f50671edc9cf70b99d7304543.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=bosphorus database for 3d face analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2acf7e58f0a526b957be2099c10aab693f795973</td></tr><tr><td>yale_faces</td><td>YaleFaces</td><td>Acquiring Linear Subspaces for Face Recognition under Variable Lighting</td><td>Acquiring linear subspaces for face recognition under variable lighting</td><td><a href="http://vision.cornell.edu/se3/wp-content/uploads/2014/09/pami05.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=acquiring linear subspaces for face recognition under variable lighting&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2ad0ee93d029e790ebb50574f403a09854b65b7e</td></tr><tr><td>frav3d</td><td>FRAV3D</td><td>MULTIMODAL 2D, 2.5D & 3D FACE VERIFICATION</td><td>Multimodal 2D, 2.5D & 3D Face Verification</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multimodal 2d, 2.5d & 3d face verification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2b926b3586399d028b46315d7d9fb9d879e4f79c</td></tr><tr><td>clothing_co_parsing</td><td>CCP</td><td>Clothing Co-Parsing by Joint Image Segmentation and Labeling</td><td>Clothing Co-parsing by Joint Image Segmentation and Labeling</td><td><a href="https://arxiv.org/pdf/1502.00739v1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clothing co-parsing by joint image segmentation and labeling&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2bf8541199728262f78d4dced6fb91479b39b738</td></tr><tr><td>texas_3dfrd</td><td>Texas 3DFRD</td><td>Anthropometric 3D Face Recognition</td><td>Anthropometric 3D Face Recognition</td><td><a href="http://live.ece.utexas.edu/publications/2010/sg_ijcv_june10.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=anthropometric 3d face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2ce2560cf59db59ce313bbeb004e8ce55c5ce928</td></tr><tr><td>lfw</td><td>LFW</td><td>Labeled Faces in the Wild: Updates and New Reporting Procedures</td><td>Labeled Faces in the Wild : Updates and New Reporting Procedures</td><td><a href="http://pdfs.semanticscholar.org/2d34/82dcff69c7417c7b933f22de606a0e8e42d4.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=labeled faces in the wild: updates and new reporting procedures&sort=relevance" target="_blank">[s2]</a></td><td>University of Massachusetts</td><td>2d3482dcff69c7417c7b933f22de606a0e8e42d4</td></tr><tr><td>a_pascal_yahoo</td><td>aPascal</td><td>Describing Objects by their Attributes</td><td>Describing objects by their attributes</td><td><a href="http://www-2.cs.cmu.edu/~dhoiem/publications/cvpr2009_attributes.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=describing objects by their attributes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2e384f057211426ac5922f1b33d2aa8df5d51f57</td></tr><tr><td>3dpes</td><td>3DPeS</td><td>3DPes: 3D People Dataset for Surveillance and Forensics</td><td>3DPeS: 3D people dataset for surveillance and forensics</td><td><a href="http://doi.acm.org/10.1145/2072572.2072590" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=3dpes: 3d people dataset for surveillance and forensics&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2e8d0f1802e50cccfd3c0aabac0d0beab3a7846e</td></tr><tr><td>kin_face</td><td>UB KinFace</td><td>Genealogical Face Recognition based on UB KinFace Database</td><td>Genealogical face recognition based on UB KinFace database</td><td><a href="https://doi.org/10.1109/CVPRW.2011.5981801" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=genealogical face recognition based on ub kinface database&sort=relevance" target="_blank">[s2]</a></td><td>SUNY Buffalo</td><td>2eb84aaba316b095d4bb51da1a3e4365bbf9ab1d</td></tr><tr><td>qmul_grid</td><td>GRID</td><td>Time-delayed correlation analysis for multi-camera activity understanding</td><td>Time-Delayed Correlation Analysis for Multi-Camera Activity Understanding</td><td><a href="https://doi.org/10.1007/s11263-010-0347-5" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=time-delayed correlation analysis for multi-camera activity understanding&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2edb87494278ad11641b6cf7a3f8996de12b8e14</td></tr><tr><td>graz</td><td>Graz Pedestrian</td><td>Generic Object Recognition with Boosting</td><td>Generic object recognition with boosting</td><td><a href="http://www.emt.tu-graz.ac.at/~pinz/onlinepapers/Reprint_Vol_28_No_3_2006.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=generic object recognition with boosting&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2eed184680edcdec8a3b605ad1a3ba8e8f7cc2e9</td></tr><tr><td>hrt_transgender</td><td>HRT Transgender</td><td>Investigating the Periocular-Based Face Recognition Across Gender Transformation</td><td>Investigating the Periocular-Based Face Recognition Across Gender Transformation</td><td><a href="https://doi.org/10.1109/TIFS.2014.2361479" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=investigating the periocular-based face recognition across gender transformation&sort=relevance" target="_blank">[s2]</a></td><td>University of North Carolina at Wilmington</td><td>2f43b614607163abf41dfe5d17ef6749a1b61304</td></tr><tr><td>names_and_faces_news</td><td>News Dataset</td><td>Names and Faces</td><td>Names and faces in the news</td><td><a href="http://www.cs.utexas.edu/~grauman/courses/spring2007/395T/papers/berg_names_and_faces.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=names and faces&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2fda164863a06a92d3a910b96eef927269aeb730</td></tr><tr><td>umd_faces</td><td>UMD</td><td>UMDFaces: An Annotated Face Dataset for Training Deep Networks</td><td>UMDFaces: An annotated face dataset for training deep networks</td><td><a href="http://arxiv.org/abs/1611.01484" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=umdfaces: an annotated face dataset for training deep networks&sort=relevance" target="_blank">[s2]</a></td><td></td><td>31b05f65405534a696a847dd19c621b7b8588263</td></tr><tr><td>tiny_images</td><td>Tiny Images</td><td>80 million tiny images: a large dataset for non-parametric object and scene recognition</td><td>80 Million Tiny Images: A Large Data Set for Nonparametric Object and Scene Recognition</td><td><a href="http://pdfs.semanticscholar.org/31b5/8ced31f22eab10bd3ee2d9174e7c14c27c01.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=80 million tiny images: a large dataset for non-parametric object and scene recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>31b58ced31f22eab10bd3ee2d9174e7c14c27c01</td></tr><tr><td>feret</td><td>FERET</td><td>FERET ( Face Recognition Technology ) Recognition Algorithm Development and Test Results</td><td>FERET ( Face Recognition Technology ) Recognition Algorithm Development and Test Results</td><td><a href="http://pdfs.semanticscholar.org/31de/9b3dd6106ce6eec9a35991b2b9083395fd0b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=feret ( face recognition technology ) recognition algorithm development and test results&sort=relevance" target="_blank">[s2]</a></td><td></td><td>31de9b3dd6106ce6eec9a35991b2b9083395fd0b</td></tr><tr><td>ucf_crowd</td><td>UCF-CC-50</td><td>Multi-Source Multi-Scale Counting in Extremely Dense Crowd Images</td><td>Multi-source Multi-scale Counting in Extremely Dense Crowd Images</td><td><a href="http://www.cs.ucf.edu/~haroon/datafiles/Idrees_Counting_CVPR_2013.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-source multi-scale counting in extremely dense crowd images&sort=relevance" target="_blank">[s2]</a></td><td></td><td>32c801cb7fbeb742edfd94cccfca4934baec71da</td></tr><tr><td>cityscapes</td><td>Cityscapes</td><td>The Cityscapes Dataset for Semantic Urban Scene Understanding</td><td>The Cityscapes Dataset for Semantic Urban Scene Understanding</td><td><a href="https://arxiv.org/pdf/1604.01685.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the cityscapes dataset for semantic urban scene understanding&sort=relevance" target="_blank">[s2]</a></td><td></td><td>32cde90437ab5a70cf003ea36f66f2de0e24b3ab</td></tr><tr><td>tud_campus</td><td>TUD-Campus</td><td>People-Tracking-by-Detection and People-Detection-by-Tracking</td><td>People-tracking-by-detection and people-detection-by-tracking</td><td><a href="http://mplab.ucsd.edu/wp-content/uploads/CVPR2008/Conference/data/papers/243.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=people-tracking-by-detection and people-detection-by-tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td></tr><tr><td>tud_crossing</td><td>TUD-Crossing</td><td>People-Tracking-by-Detection and People-Detection-by-Tracking</td><td>People-tracking-by-detection and people-detection-by-tracking</td><td><a href="http://mplab.ucsd.edu/wp-content/uploads/CVPR2008/Conference/data/papers/243.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=people-tracking-by-detection and people-detection-by-tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td></tr><tr><td>tud_pedestrian</td><td>TUD-Pedestrian</td><td>People-Tracking-by-Detection and People-Detection-by-Tracking</td><td>People-tracking-by-detection and people-detection-by-tracking</td><td><a href="http://mplab.ucsd.edu/wp-content/uploads/CVPR2008/Conference/data/papers/243.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=people-tracking-by-detection and people-detection-by-tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td></tr><tr><td>mpii_human_pose</td><td>MPII Human Pose</td><td>2D Human Pose Estimation: New Benchmark and State of the Art Analysis</td><td>2D Human Pose Estimation: New Benchmark and State of the Art Analysis</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6909866" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=2d human pose estimation: new benchmark and state of the art analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3325860c0c82a93b2eac654f5324dd6a776f609e</td></tr><tr><td>penn_fudan</td><td>Penn Fudan</td><td>Object Detection Combining Recognition and Segmentation</td><td>Object Detection Combining Recognition and Segmentation</td><td><a href="http://pdfs.semanticscholar.org/f531/a554cade14b9b340de6730683a28c292dd74.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=object detection combining recognition and segmentation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3394168ff0719b03ff65bcea35336a76b21fe5e4</td></tr><tr><td>wider</td><td>WIDER</td><td>Recognize Complex Events from Static Images by Fusing Deep Channels</td><td>Recognize complex events from static images by fusing deep channels</td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Xiong_Recognize_Complex_Events_2015_CVPR_paper.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=recognize complex events from static images by fusing deep channels&sort=relevance" target="_blank">[s2]</a></td><td>Shenzhen Institutes of Advanced Technology</td><td>356b431d4f7a2a0a38cf971c84568207dcdbf189</td></tr><tr><td>coco_qa</td><td>COCO QA</td><td>Exploring Models and Data for Image Question Answering</td><td>Exploring Models and Data for Image Question Answering</td><td><a href="http://pdfs.semanticscholar.org/aa79/9c29c0d44ece1864467af520fe70540c069b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=exploring models and data for image question answering&sort=relevance" target="_blank">[s2]</a></td><td></td><td>35b0331dfcd2897abd5749b49ff5e2b8ba0f7a62</td></tr><tr><td>lfw</td><td>LFW</td><td>Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments</td><td>Labeled Faces in the Wild : A Database for Studying Face Recognition in Unconstrained Environments</td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=labeled faces in the wild: a database for studying face recognition in unconstrained environments&sort=relevance" target="_blank">[s2]</a></td><td></td><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td></tr><tr><td>rafd</td><td>RaFD</td><td>Presentation and validation of the Radboud Faces Database</td><td>Presentation and validation of the Radboud Faces Database</td><td><a href="https://pdfs.semanticscholar.org/3765/df816dc5a061bc261e190acc8bdd9d47bec0.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=presentation and validation of the radboud faces database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3765df816dc5a061bc261e190acc8bdd9d47bec0</td></tr><tr><td>ufdd</td><td>UFDD</td><td>Pushing the Limits of Unconstrained Face Detection: a Challenge Dataset and Baseline Results</td><td>Pushing the Limits of Unconstrained Face Detection: a Challenge Dataset and Baseline Results</td><td><a href="https://arxiv.org/pdf/1804.10275.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pushing the limits of unconstrained face detection: a challenge dataset and baseline results&sort=relevance" target="_blank">[s2]</a></td><td>Johns Hopkins University</td><td>377f2b65e6a9300448bdccf678cde59449ecd337</td></tr><tr><td>vmu</td><td>VMU</td><td>Can Facial Cosmetics Affect the Matching Accuracy of Face Recognition Systems?</td><td>Can facial cosmetics affect the matching accuracy of face recognition systems?</td><td><a href="http://www.cse.msu.edu/~climer/DantchevaChenRossFaceCosmetics_BTAS2012.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=can facial cosmetics affect the matching accuracy of face recognition systems?&sort=relevance" target="_blank">[s2]</a></td><td></td><td>37d6f0eb074d207b53885bd2eb78ccc8a04be597</td></tr><tr><td>youtube_makeup</td><td>YMU</td><td>Can Facial Cosmetics Affect the Matching Accuracy of Face Recognition Systems?</td><td>Can facial cosmetics affect the matching accuracy of face recognition systems?</td><td><a href="http://www.cse.msu.edu/~climer/DantchevaChenRossFaceCosmetics_BTAS2012.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=can facial cosmetics affect the matching accuracy of face recognition systems?&sort=relevance" target="_blank">[s2]</a></td><td></td><td>37d6f0eb074d207b53885bd2eb78ccc8a04be597</td></tr><tr><td>cuhk02</td><td>CUHK02</td><td>Locally Aligned Feature Transforms across Views</td><td>Locally Aligned Feature Transforms across Views</td><td><a href="http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_CVPR2013/data/Papers/4989d594.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=locally aligned feature transforms across views&sort=relevance" target="_blank">[s2]</a></td><td></td><td>38b55d95189c5e69cf4ab45098a48fba407609b4</td></tr><tr><td>sdu_vid</td><td>SDU-VID</td><td>A Spatio-Temporal Appearance Representation for Video-Based Pedestrian Re-Identification</td><td>A Spatio-Temporal Appearance Representation for Video-Based Pedestrian Re-Identification</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.434" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a spatio-temporal appearance representation for video-based pedestrian re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3b4ec8af470948a72a6ed37a9fd226719a874ebc</td></tr><tr><td>qmul_grid</td><td>GRID</td><td>Multi-Camera Activity Correlation Analysis</td><td>Multi-camera activity correlation analysis</td><td><a href="http://vision.lbl.gov/Conferences/cvpr/Papers/data/papers/0163.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-camera activity correlation analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3b5b6d19d4733ab606c39c69a889f9e67967f151</td></tr><tr><td>tvhi</td><td>TVHI</td><td>High Five: Recognising human interactions in TV shows</td><td>High Five: Recognising human interactions in TV shows</td><td><a href="http://pdfs.semanticscholar.org/3cd4/0bfa1ff193a96bde0207e5140a399476466c.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=high five: recognising human interactions in tv shows&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3cd40bfa1ff193a96bde0207e5140a399476466c</td></tr><tr><td>georgia_tech_face_database</td><td>Georgia Tech Face</td><td>Maximum likelihood training of the embedded HMM for face detection and recognition</td><td>MAXIMUM LIKELIHOOD TRAINING OF THE EMBEDDED HMM FOR FACE DETECTION AND RECOGNITION Ara V. Ne an and Monson H. Hayes III Center for Signal and Image Processing School of Electrical and Computer Engineering</td><td><a href="http://pdfs.semanticscholar.org/3dc3/f0b64ef80f573e3a5f96e456e52ee980b877.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=maximum likelihood training of the embedded hmm for face detection and recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3dc3f0b64ef80f573e3a5f96e456e52ee980b877</td></tr><tr><td>bio_id</td><td>BioID Face</td><td>Robust Face Detection Using the Hausdorff Distance</td><td>Robust Face Detection Using the Hausdorff Distance</td><td><a href="http://pdfs.semanticscholar.org/4053/e3423fb70ad9140ca89351df49675197196a.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=robust face detection using the hausdorff distance&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4053e3423fb70ad9140ca89351df49675197196a</td></tr><tr><td>distance_nighttime</td><td>Long Distance Heterogeneous Face</td><td>Nighttime Face Recognition at Long Distance: Cross-distance and Cross-spectral Matching</td><td>Nighttime Face Recognition at Long Distance: Cross-Distance and Cross-Spectral Matching</td><td><a href="http://pdfs.semanticscholar.org/4156/b7e88f2e0ab0a7c095b9bab199ae2b23bd06.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=nighttime face recognition at long distance: cross-distance and cross-spectral matching&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4156b7e88f2e0ab0a7c095b9bab199ae2b23bd06</td></tr><tr><td>market_1501</td><td>Market 1501</td><td>Scalable Person Re-identification: A Benchmark</td><td>Scalable Person Re-identification: A Benchmark</td><td><a href="https://www.microsoft.com/en-us/research/wp-content/uploads/2017/01/ICCV15-ReIDDataset.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=scalable person re-identification: a benchmark&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4308bd8c28e37e2ed9a3fcfe74d5436cce34b410</td></tr><tr><td>tud_multiview</td><td>TUD-Multiview</td><td>Monocular 3D Pose Estimation and Tracking by Detection</td><td>Monocular 3D pose estimation and tracking by detection</td><td><a href="http://lmb.informatik.uni-freiburg.de/lectures/seminar_brox/seminar_ws1011/cvpr10_andriluka.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=monocular 3d pose estimation and tracking by detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>436f798d1a4e54e5947c1e7d7375c31b2bdb4064</td></tr><tr><td>tud_stadtmitte</td><td>TUD-Stadtmitte</td><td>Monocular 3D Pose Estimation and Tracking by Detection</td><td>Monocular 3D pose estimation and tracking by detection</td><td><a href="http://lmb.informatik.uni-freiburg.de/lectures/seminar_brox/seminar_ws1011/cvpr10_andriluka.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=monocular 3d pose estimation and tracking by detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>436f798d1a4e54e5947c1e7d7375c31b2bdb4064</td></tr><tr><td>cuhk01</td><td>CUHK01</td><td>Human Reidentification with Transferred Metric Learning</td><td>Human Reidentification with Transferred Metric Learning</td><td><a href="http://pdfs.semanticscholar.org/4448/4d2866f222bbb9b6b0870890f9eea1ffb2d0.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=human reidentification with transferred metric learning&sort=relevance" target="_blank">[s2]</a></td><td></td><td>44484d2866f222bbb9b6b0870890f9eea1ffb2d0</td></tr><tr><td>wider_attribute</td><td>WIDER Attribute</td><td>Human Attribute Recognition by Deep Hierarchical Contexts</td><td>Human Attribute Recognition by Deep Hierarchical Contexts</td><td><a href="https://pdfs.semanticscholar.org/8e28/07f2dd53b03a759e372e07f7191cae65c9fd.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=human attribute recognition by deep hierarchical contexts&sort=relevance" target="_blank">[s2]</a></td><td>Chinese University of Hong Kong</td><td>44d23df380af207f5ac5b41459c722c87283e1eb</td></tr><tr><td>vadana</td><td>VADANA</td><td>VADANA: A dense dataset for facial image analysis</td><td>VADANA: A dense dataset for facial image analysis</td><td><a href="http://vims.cis.udel.edu/publications/VADANA_BeFIT2011.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=vadana: a dense dataset for facial image analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4563b46d42079242f06567b3f2e2f7a80cb3befe</td></tr><tr><td>jaffe</td><td>JAFFE</td><td>Coding Facial Expressions with Gabor Wavelets</td><td>Coding Facial Expressions with Gabor Wavelets</td><td><a href="http://pdfs.semanticscholar.org/45c3/1cde87258414f33412b3b12fc5bec7cb3ba9.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=coding facial expressions with gabor wavelets&sort=relevance" target="_blank">[s2]</a></td><td>Kyushu University</td><td>45c31cde87258414f33412b3b12fc5bec7cb3ba9</td></tr><tr><td>malf</td><td>MALF</td><td>Fine-grained Evaluation on Face Detection in the Wild.</td><td>Fine-grained evaluation on face detection in the wild</td><td><a href="http://www.cs.toronto.edu/~byang/papers/malf_fg15.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=fine-grained evaluation on face detection in the wild.&sort=relevance" target="_blank">[s2]</a></td><td>Chinese Academy of Sciences</td><td>45e616093a92e5f1e61a7c6037d5f637aa8964af</td></tr><tr><td>sdu_vid</td><td>SDU-VID</td><td>Local descriptors encoded by Fisher vectors for person re-identification</td><td>Local Descriptors Encoded by Fisher Vectors for Person Re-identification</td><td><a href="http://pdfs.semanticscholar.org/dd1d/51c3a59cb71cbfe1433ebeb4d973f7f9ddc1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=local descriptors encoded by fisher vectors for person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>46a01565e6afe7c074affb752e7069ee3bf2e4ef</td></tr><tr><td>fia</td><td>CMU FiA</td><td>The CMU Face In Action (FIA) Database</td><td>The CMU Face In Action (FIA) Database</td><td><a href="http://pdfs.semanticscholar.org/bb47/a03401811f9d2ca2da12138697acbc7b97a3.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the cmu face in action (fia) database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>47662d1a368daf70ba70ef2d59eb6209f98b675d</td></tr><tr><td>kin_face</td><td>UB KinFace</td><td>Kinship Verification through Transfer Learning</td><td>Kinship Verification through Transfer Learning</td><td><a href="http://pdfs.semanticscholar.org/4793/f11fbca4a7dba898b9fff68f70d868e2497c.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=kinship verification through transfer learning&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4793f11fbca4a7dba898b9fff68f70d868e2497c</td></tr><tr><td>am_fed</td><td>AM-FED</td><td>Affectiva MIT Facial Expression Dataset (AM-FED): Naturalistic and Spontaneous Facial Expressions Collected “In the Wild”</td><td>Affectiva-MIT Facial Expression Dataset (AM-FED): Naturalistic and Spontaneous Facial Expressions Collected "In-the-Wild"</td><td><a href="http://pdfs.semanticscholar.org/5d06/437656dd94616d7d87260d5eb77513ded30f.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=affectiva mit facial expression dataset (am-fed): naturalistic and spontaneous facial expressions collected “in the wild”&sort=relevance" target="_blank">[s2]</a></td><td></td><td>47aeb3b82f54b5ae8142b4bdda7b614433e69b9a</td></tr><tr><td>apis</td><td>APiS1.0</td><td>Pedestrian Attribute Classification in Surveillance: Database and Evaluation</td><td>Pedestrian Attribute Classification in Surveillance: Database and Evaluation</td><td><a href="http://www.cv-foundation.org/openaccess/content_iccv_workshops_2013/W10/papers/Zhu_Pedestrian_Attribute_Classification_2013_ICCV_paper.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pedestrian attribute classification in surveillance: database and evaluation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>488e475eeb3bb39a145f23ede197cd3620f1d98a</td></tr><tr><td>svs</td><td>SVS</td><td>Pedestrian Attribute Classification in Surveillance: Database and Evaluation</td><td>Pedestrian Attribute Classification in Surveillance: Database and Evaluation</td><td><a href="http://www.cv-foundation.org/openaccess/content_iccv_workshops_2013/W10/papers/Zhu_Pedestrian_Attribute_Classification_2013_ICCV_paper.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pedestrian attribute classification in surveillance: database and evaluation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>488e475eeb3bb39a145f23ede197cd3620f1d98a</td></tr><tr><td>coco_action</td><td>COCO-a</td><td>Describing Common Human Visual Actions in Images</td><td>Describing Common Human Visual Actions in Images</td><td><a href="http://pdfs.semanticscholar.org/b38d/cf5fa5174c0d718d65cc4f3889b03c4a21df.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=describing common human visual actions in images&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4946ba10a4d5a7d0a38372f23e6622bd347ae273</td></tr><tr><td>leeds_sports_pose</td><td>Leeds Sports Pose</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td><a href="http://pdfs.semanticscholar.org/4b1d/23d17476fcf78f4cbadf69fb130b1aa627c0.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clustered pose and nonlinear appearance models for human pose estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td></tr><tr><td>stickmen_buffy</td><td>Buffy Stickmen</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td><a href="http://pdfs.semanticscholar.org/4b1d/23d17476fcf78f4cbadf69fb130b1aa627c0.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clustered pose and nonlinear appearance models for human pose estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td></tr><tr><td>czech_news_agency</td><td>UFI</td><td>Unconstrained Facial Images: Database for Face Recognition under Real-world Conditions</td><td>Unconstrained Facial Images: Database for Face Recognition Under Real-World Conditions</td><td><a href="http://pdfs.semanticscholar.org/4b41/06614c1d553365bad75d7866bff0de6056ed.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=unconstrained facial images: database for face recognition under real-world conditions&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4b4106614c1d553365bad75d7866bff0de6056ed</td></tr><tr><td>face_tracer</td><td>FaceTracer</td><td>FaceTracer: A Search Engine for Large Collections of Images with Faces</td><td>FaceTracer: A Search Engine for Large Collections of Images with Faces</td><td><a href="http://pdfs.semanticscholar.org/73a8/1d311eedac8dea3ca24dc15b6990fa4a725e.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=facetracer: a search engine for large collections of images with faces&sort=relevance" target="_blank">[s2]</a></td><td>Columbia University</td><td>4c170a0dcc8de75587dae21ca508dab2f9343974</td></tr><tr><td>cmu_pie</td><td>CMU PIE</td><td>The CMU Pose, Illumination, and Expression Database</td><td>The CMU Pose, Illumination, and Expression (PIE) Database</td><td><a href="http://pdfs.semanticscholar.org/4d42/3acc78273b75134e2afd1777ba6d3a398973.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the cmu pose, illumination, and expression database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4d423acc78273b75134e2afd1777ba6d3a398973</td></tr><tr><td>multi_pie</td><td>MULTIPIE</td><td>Multi-PIE</td><td>The CMU Pose, Illumination, and Expression (PIE) Database</td><td><a href="http://pdfs.semanticscholar.org/4d42/3acc78273b75134e2afd1777ba6d3a398973.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-pie&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4d423acc78273b75134e2afd1777ba6d3a398973</td></tr><tr><td>3dddb_unconstrained</td><td>3D Dynamic</td><td>A 3D Dynamic Database for Unconstrained Face Recognition</td><td>A 3 D Dynamic Database for Unconstrained Face Recognition</td><td><a href="https://pdfs.semanticscholar.org/4d4b/b462c9f1d4e4ab1e4aa6a75cc0bc71b38461.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a 3d dynamic database for unconstrained face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4d4bb462c9f1d4e4ab1e4aa6a75cc0bc71b38461</td></tr><tr><td>texas_3dfrd</td><td>Texas 3DFRD</td><td>Texas 3D Face Recognition Database</td><td>Texas 3D Face Recognition Database</td><td><a href="http://live.ece.utexas.edu/publications/2010/sg_ssiai_may10.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=texas 3d face recognition database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4d58f886f5150b2d5e48fd1b5a49e09799bf895d</td></tr><tr><td>cohn_kanade_plus</td><td>CK+</td><td>The Extended Cohn-Kanade Dataset (CK+): A complete dataset for action unit and emotion-specified expression</td><td>The Extended Cohn-Kanade Dataset (CK+): A complete dataset for action unit and emotion-specified expression</td><td><a href="http://www.iainm.com/iainm/Publications_files/2010_The%20Extended.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the extended cohn-kanade dataset (ck+): a complete dataset for action unit and emotion-specified expression&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4d9a02d080636e9666c4d1cc438b9893391ec6c7</td></tr><tr><td>cfd</td><td>CFD</td><td>The Chicago face database: A free stimulus set of faces and norming data</td><td>The Chicago face database: A free stimulus set of faces and norming data.</td><td><a href="http://pdfs.semanticscholar.org/4df3/143922bcdf7db78eb91e6b5359d6ada004d2.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the chicago face database: a free stimulus set of faces and norming data&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4df3143922bcdf7db78eb91e6b5359d6ada004d2</td></tr><tr><td>leeds_sports_pose_extended</td><td>Leeds Sports Pose Extended</td><td>Learning Effective Human Pose Estimation from Inaccurate Annotation</td><td>Learning effective human pose estimation from inaccurate annotation</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995318" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning effective human pose estimation from inaccurate annotation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4e4746094bf60ee83e40d8597a6191e463b57f76</td></tr><tr><td>dartmouth_children</td><td>Dartmouth Children</td><td>The Dartmouth Database of Children's Faces: Acquisition and validation of a new face stimulus set</td><td>The Dartmouth Database of Children’s Faces: Acquisition and Validation of a New Face Stimulus Set</td><td><a href="http://pdfs.semanticscholar.org/4e6e/e936eb50dd032f7138702fa39b7c18ee8907.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the dartmouth database of children's faces: acquisition and validation of a new face stimulus set&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4e6ee936eb50dd032f7138702fa39b7c18ee8907</td></tr><tr><td>soton</td><td>SOTON HiD</td><td>On a Large Sequence-Based Human Gait Database</td><td>On a large sequence-based human gait database</td><td><a href="http://pdfs.semanticscholar.org/4f93/cd09785c6e77bf4bc5a788e079df524c8d21.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=on a large sequence-based human gait database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4f93cd09785c6e77bf4bc5a788e079df524c8d21</td></tr><tr><td>deep_fashion</td><td>DeepFashion</td><td>Fashion Landmark Detection in the Wild</td><td>Fashion Landmark Detection in the Wild</td><td><a href="http://pdfs.semanticscholar.org/d8ca/e259c1c5bba0c096f480dc7322bbaebfac1a.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=fashion landmark detection in the wild&sort=relevance" target="_blank">[s2]</a></td><td>Chinese University of Hong Kong</td><td>4fefd1bc8dc4e0ab37ee3324ddfa43ad9d6a04a7</td></tr><tr><td>violent_flows</td><td>Violent Flows</td><td>Violent Flows: Real-Time Detection of Violent Crowd Behavior</td><td>Violent flows: Real-time detection of violent crowd behavior</td><td><a href="http://www.wisdom.weizmann.ac.il/mathusers/kliper/Papers/violent_flows.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=violent flows: real-time detection of violent crowd behavior&sort=relevance" target="_blank">[s2]</a></td><td></td><td>5194cbd51f9769ab25260446b4fa17204752e799</td></tr><tr><td>imsitu</td><td>imSitu</td><td>Situation Recognition: Visual Semantic Role Labeling for Image Understanding</td><td>Situation Recognition: Visual Semantic Role Labeling for Image Understanding</td><td><a href="http://grail.cs.washington.edu/wp-content/uploads/2016/09/yatskar2016srv.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=situation recognition: visual semantic role labeling for image understanding&sort=relevance" target="_blank">[s2]</a></td><td>University of Washington</td><td>51eba481dac6b229a7490f650dff7b17ce05df73</td></tr><tr><td>wider_face</td><td>WIDER FACE</td><td>WIDER FACE: A Face Detection Benchmark</td><td>WIDER FACE: A Face Detection Benchmark</td><td><a href="https://arxiv.org/pdf/1511.06523.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=wider face: a face detection benchmark&sort=relevance" target="_blank">[s2]</a></td><td>Chinese University of Hong Kong</td><td>52d7eb0fbc3522434c13cc247549f74bb9609c5d</td></tr><tr><td>bp4d_plus</td><td>BP4D+</td><td>Multimodal Spontaneous Emotion Corpus for Human Behavior Analysis</td><td>Multimodal Spontaneous Emotion Corpus for Human Behavior Analysis</td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Zhang_Multimodal_Spontaneous_Emotion_CVPR_2016_paper.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multimodal spontaneous emotion corpus for human behavior analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>53ae38a6bb2b21b42bac4f0c4c8ed1f9fa02f9d4</td></tr><tr><td>reseed</td><td>ReSEED</td><td>ReSEED: Social Event dEtection Dataset</td><td>ReSEED: social event dEtection dataset</td><td><a href="https://pub.uni-bielefeld.de/download/2663466/2686734" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=reseed: social event detection dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>54983972aafc8e149259d913524581357b0f91c3</td></tr><tr><td>orl</td><td>ORL</td><td>Parameterisation of a Stochastic Model for Human Face Identification</td><td>Parameterisation of a stochastic model for human face identification</td><td><a href="http://pdfs.semanticscholar.org/5520/6f0b5f57ce17358999145506cd01e570358c.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=parameterisation of a stochastic model for human face identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>55206f0b5f57ce17358999145506cd01e570358c</td></tr><tr><td>ifad</td><td>IFAD</td><td>Indian Face Age Database: A Database for Face Recognition with Age Variation</td><td>Indian Face Age Database : A Database for Face Recognition with Age Variation</td><td><a href="https://pdfs.semanticscholar.org/55c4/0cbcf49a0225e72d911d762c27bb1c2d14aa.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=indian face age database: a database for face recognition with age variation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>55c40cbcf49a0225e72d911d762c27bb1c2d14aa</td></tr><tr><td>youtube_faces</td><td>YouTubeFaces</td><td>Face Recognition in Unconstrained Videos with Matched Background Similarity</td><td>Face recognition in unconstrained videos with matched background similarity</td><td><a href="http://www.cs.tau.ac.il/~wolf/papers/lvfw.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=face recognition in unconstrained videos with matched background similarity&sort=relevance" target="_blank">[s2]</a></td><td>Open University of Israel</td><td>560e0e58d0059259ddf86fcec1fa7975dee6a868</td></tr><tr><td>data_61</td><td>Data61 Pedestrian</td><td>A Multi-Modal Graphical Model for Scene Analysis</td><td>A Multi-modal Graphical Model for Scene Analysis</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/WACV.2015.139" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a multi-modal graphical model for scene analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>563c940054e4b456661762c1ab858e6f730c3159</td></tr><tr><td>cmdp</td><td>CMDP</td><td>Distance Estimation of an Unknown Person from a Portrait</td><td>Distance Estimation of an Unknown Person from a Portrait</td><td><a href="http://pdfs.semanticscholar.org/56ae/6d94fc6097ec4ca861f0daa87941d1c10b70.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=distance estimation of an unknown person from a portrait&sort=relevance" target="_blank">[s2]</a></td><td>California Institute of Technology</td><td>56ae6d94fc6097ec4ca861f0daa87941d1c10b70</td></tr><tr><td>unbc_shoulder_pain</td><td>UNBC-McMaster Pain</td><td>PAINFUL DATA: The UNBC-McMaster Shoulder Pain Expression Archive Database</td><td>Painful data: The UNBC-McMaster shoulder pain expression archive database</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5771462" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=painful data: the unbc-mcmaster shoulder pain expression archive database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>56ffa7d906b08d02d6d5a12c7377a57e24ef3391</td></tr><tr><td>stanford_drone</td><td>Stanford Drone</td><td>Learning Social Etiquette: Human Trajectory Prediction In Crowded Scenes</td><td>Social LSTM: Human Trajectory Prediction in Crowded Spaces</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning social etiquette: human trajectory prediction in crowded scenes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>570f37ed63142312e6ccdf00ecc376341ec72b9f</td></tr><tr><td>ijb_c</td><td>IJB-C</td><td>IARPA Janus Benchmark C</td><td>IARPA Janus Benchmark - C: Face Dataset and Protocol</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411217" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=iarpa janus benchmark c&sort=relevance" target="_blank">[s2]</a></td><td></td><td>57178b36c21fd7f4529ac6748614bb3374714e91</td></tr><tr><td>50_people_one_question</td><td>50 People One Question</td><td>Merging Pose Estimates Across Space and Time</td><td>Merging Pose Estimates Across Space and Time</td><td><a href="http://pdfs.semanticscholar.org/63b2/f5348af0f969dfc2afb4977732393c6459ec.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=merging pose estimates across space and time&sort=relevance" target="_blank">[s2]</a></td><td></td><td>5753b2b5e442eaa3be066daa4a2ca8d8a0bb1725</td></tr><tr><td>mr2</td><td>MR2</td><td>The MR2: A multi-racial mega-resolution database of facial stimuli</td><td>The MR2: A multi-racial, mega-resolution database of facial stimuli.</td><td><a href="https://pdfs.semanticscholar.org/be5b/455abd379240460d022a0e246615b0b86c14.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the mr2: a multi-racial mega-resolution database of facial stimuli&sort=relevance" target="_blank">[s2]</a></td><td></td><td>578d4ad74818086bb64f182f72e2c8bd31e3d426</td></tr><tr><td>cvc_01_barcelona</td><td>CVC-01</td><td>Adaptive Image Sampling and Windows Classification for On-board Pedestrian Detection</td><td>Adaptive Image Sampling and Windows Classification for On – board Pedestrian Detection</td><td><a href="http://pdfs.semanticscholar.org/57fe/081950f21ca03b5b375ae3e84b399c015861.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=adaptive image sampling and windows classification for on-board pedestrian detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>57fe081950f21ca03b5b375ae3e84b399c015861</td></tr><tr><td>mot</td><td>MOT</td><td>Learning to associate: HybridBoosted multi-target tracker for crowded scene</td><td>Learning to associate: HybridBoosted multi-target tracker for crowded scene</td><td><a href="http://vision.lbl.gov/Conferences/cvpr/Papers/data/papers/0633.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning to associate: hybridboosted multi-target tracker for crowded scene&sort=relevance" target="_blank">[s2]</a></td><td></td><td>5981e6479c3fd4e31644db35d236bfb84ae46514</td></tr><tr><td>disfa</td><td>DISFA</td><td>DISFA: A Spontaneous Facial Action Intensity Database</td><td>DISFA: A Spontaneous Facial Action Intensity Database</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/T-AFFC.2013.4" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=disfa: a spontaneous facial action intensity database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>5a5f0287484f0d480fed1ce585dbf729586f0edc</td></tr><tr><td>wlfdb</td><td>WLFDB</td><td>WLFDB: Weakly Labeled Face Databases</td><td>WLFDB: Weakly Labeled Face Databases</td><td><a href="https://pdfs.semanticscholar.org/5ad4/e9f947c1653c247d418f05dad758a3f9277b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=wlfdb: weakly labeled face databases&sort=relevance" target="_blank">[s2]</a></td><td></td><td>5ad4e9f947c1653c247d418f05dad758a3f9277b</td></tr><tr><td>coco</td><td>COCO</td><td>Microsoft COCO: Common Objects in Context</td><td>Microsoft COCO: Common Objects in Context</td><td><a href="http://pdfs.semanticscholar.org/71b7/178df5d2b112d07e45038cb5637208659ff7.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=microsoft coco: common objects in context&sort=relevance" target="_blank">[s2]</a></td><td></td><td>5e0f8c355a37a5a89351c02f174e7a5ddcb98683</td></tr><tr><td>cityscapes</td><td>Cityscapes</td><td>The Cityscapes Dataset</td><td>The Cityscapes Dataset</td><td><a href="http://pdfs.semanticscholar.org/5ffd/74d2873b7cba2cbc5fd295cc7fbdedca22a2.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the cityscapes dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>5ffd74d2873b7cba2cbc5fd295cc7fbdedca22a2</td></tr><tr><td>viper</td><td>VIPeR</td><td>Evaluating Appearance Models for Recognition, Reacquisition, and Tracking</td><td>Evaluating Appearance Models for Recognition , Reacquisition , and Tracking</td><td><a href="http://pdfs.semanticscholar.org/6273/b3491e94ea4dd1ce42b791d77bdc96ee73a8.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=evaluating appearance models for recognition, reacquisition, and tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6273b3491e94ea4dd1ce42b791d77bdc96ee73a8</td></tr><tr><td>caltech_10k_web_faces</td><td>Caltech 10K Web Faces</td><td>Pruning Training Sets for Learning of Object Categories</td><td>Pruning training sets for learning of object categories</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1467308" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pruning training sets for learning of object categories&sort=relevance" target="_blank">[s2]</a></td><td></td><td>636b8ffc09b1b23ff714ac8350bb35635e49fa3c</td></tr><tr><td>bfm</td><td>BFM</td><td>A 3D Face Model for Pose and Illumination Invariant Face Recognition</td><td>A 3D Face Model for Pose and Illumination Invariant Face Recognition</td><td><a href="https://pdfs.semanticscholar.org/6399/37b3a1b8bded3f7e9a40e85bd3770016cf3c.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a 3d face model for pose and illumination invariant face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>639937b3a1b8bded3f7e9a40e85bd3770016cf3c</td></tr><tr><td>alert_airport</td><td>ALERT Airport</td><td>A Systematic Evaluation and Benchmark for Person Re-Identification: Features, Metrics, and Datasets</td><td>A Systematic Evaluation and Benchmark for Person Re-Identification: Features, Metrics, and Datasets.</td><td><a href="https://arxiv.org/pdf/1605.09653.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a systematic evaluation and benchmark for person re-identification: features, metrics, and datasets&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6403117f9c005ae81f1e8e6d1302f4a045e3d99d</td></tr><tr><td>celeba</td><td>CelebA</td><td>Deep Learning Face Attributes in the Wild</td><td>Deep Learning Face Attributes in the Wild</td><td><a href="http://arxiv.org/pdf/1411.7766v2.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=deep learning face attributes in the wild&sort=relevance" target="_blank">[s2]</a></td><td>Chinese University of Hong Kong</td><td>6424b69f3ff4d35249c0bb7ef912fbc2c86f4ff4</td></tr><tr><td>complex_activities</td><td>Ongoing Complex Activities</td><td>Recognition of Ongoing Complex Activities by Sequence Prediction over a Hierarchical Label Space</td><td>Recognition of ongoing complex activities by sequence prediction over a hierarchical label space</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/WACV.2016.7477586" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=recognition of ongoing complex activities by sequence prediction over a hierarchical label space&sort=relevance" target="_blank">[s2]</a></td><td></td><td>65355cbb581a219bd7461d48b3afd115263ea760</td></tr><tr><td>afad</td><td>AFAD</td><td>Ordinal Regression with a Multiple Output CNN for Age Estimation</td><td>Ordinal Regression with Multiple Output CNN for Age Estimation</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.532" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=ordinal regression with a multiple output cnn for age estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6618cff7f2ed440a0d2fa9e74ad5469df5cdbe4c</td></tr><tr><td>sun_attributes</td><td>SUN</td><td>The SUN Attribute Database: Beyond Categories for Deeper Scene Understanding</td><td>The SUN Attribute Database: Beyond Categories for Deeper Scene Understanding</td><td><a href="http://www.cc.gatech.edu/~hays/papers/attribute_ijcv.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the sun attribute database: beyond categories for deeper scene understanding&sort=relevance" target="_blank">[s2]</a></td><td></td><td>66e6f08873325d37e0ec20a4769ce881e04e964e</td></tr><tr><td>face_tracer</td><td>FaceTracer</td><td>Face Swapping: Automatically Replacing Faces in Photographs</td><td>Face swapping: automatically replacing faces in photographs</td><td><a href="https://classes.cs.uoregon.edu/16F/cis607photo/faces.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=face swapping: automatically replacing faces in photographs&sort=relevance" target="_blank">[s2]</a></td><td>Columbia University</td><td>670637d0303a863c1548d5b19f705860a23e285c</td></tr><tr><td>tud_brussels</td><td>TUD-Brussels</td><td>Multi-Cue Onboard Pedestrian Detection</td><td>Multi-cue onboard pedestrian detection</td><td><a href="https://www.mpi-inf.mpg.de/fileadmin/inf/d2/wojek/poster_cwojek_cvpr09.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-cue onboard pedestrian detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6ad5a38df8dd4cdddd74f31996ce096d41219f72</td></tr><tr><td>tud_motionpairs</td><td>TUD-Motionparis</td><td>Multi-Cue Onboard Pedestrian Detection</td><td>Multi-cue onboard pedestrian detection</td><td><a href="https://www.mpi-inf.mpg.de/fileadmin/inf/d2/wojek/poster_cwojek_cvpr09.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-cue onboard pedestrian detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6ad5a38df8dd4cdddd74f31996ce096d41219f72</td></tr><tr><td>cuhk03</td><td>CUHK03</td><td>DeepReID: Deep Filter Pairing Neural Network for Person Re-identification</td><td>DeepReID: Deep Filter Pairing Neural Network for Person Re-identification</td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Li_DeepReID_Deep_Filter_2014_CVPR_paper.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=deepreid: deep filter pairing neural network for person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6bd36e9fd0ef20a3074e1430a6cc601e6d407fc3</td></tr><tr><td>ar_facedb</td><td>AR Face</td><td>The AR Face Database</td><td>The AR face database</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the ar face database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6d96f946aaabc734af7fe3fc4454cf8547fcd5ed</td></tr><tr><td>agedb</td><td>AgeDB</td><td>AgeDB: the first manually collected, in-the-wild age database</td><td>AgeDB: The First Manually Collected, In-the-Wild Age Database</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014984" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=agedb: the first manually collected, in-the-wild age database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6dcf418c778f528b5792104760f1fbfe90c6dd6a</td></tr><tr><td>stickmen_buffy</td><td>Buffy Stickmen</td><td>Learning to Parse Images of Articulated Objects</td><td>Learning to parse images of articulated bodies</td><td><a href="http://pdfs.semanticscholar.org/9cd7/4c43dbf9be0b9caae4606ee53e6d45850471.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning to parse images of articulated objects&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td></tr><tr><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td>Learning to parse images of articulated bodies</td><td><a href="http://pdfs.semanticscholar.org/9cd7/4c43dbf9be0b9caae4606ee53e6d45850471.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clustered pose and nonlinear appearance models for human pose estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td></tr><tr><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td>Learning to Parse Images of Articulated Objects</td><td>Learning to parse images of articulated bodies</td><td><a href="http://pdfs.semanticscholar.org/9cd7/4c43dbf9be0b9caae4606ee53e6d45850471.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning to parse images of articulated objects&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td></tr><tr><td>ward</td><td>WARD</td><td>Re-identify people in wide area camera network</td><td>Re-identify people in wide area camera network</td><td><a href="https://doi.org/10.1109/CVPRW.2012.6239203" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=re-identify people in wide area camera network&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6f3c76b7c0bd8e1d122c6ea808a271fd4749c951</td></tr><tr><td>umd_faces</td><td>UMD</td><td>The Do's and Don'ts for CNN-based Face Verification</td><td>The Do’s and Don’ts for CNN-Based Face Verification</td><td><a href="https://arxiv.org/pdf/1705.07426.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the do's and don'ts for cnn-based face verification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>71b7fc715e2f1bb24c0030af8d7e7b6e7cd128a6</td></tr><tr><td>affectnet</td><td>AffectNet</td><td>AffectNet: A Database for Facial Expression, Valence, and Arousal Computing in the Wild</td><td>AffectNet: A Database for Facial Expression, Valence, and Arousal Computing in the Wild</td><td><a href="https://arxiv.org/pdf/1708.03985.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=affectnet: a database for facial expression, valence, and arousal computing in the wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>758d7e1be64cc668c59ef33ba8882c8597406e53</td></tr><tr><td>pubfig</td><td>PubFig</td><td>Attribute and Simile Classifiers for Face Verification</td><td>Attribute and simile classifiers for face verification</td><td><a href="http://homes.cs.washington.edu/~neeraj/projects/faceverification/base/papers/nk_iccv2009_attrs.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=attribute and simile classifiers for face verification&sort=relevance" target="_blank">[s2]</a></td><td>Columbia University</td><td>759a3b3821d9f0e08e0b0a62c8b693230afc3f8d</td></tr><tr><td>fddb</td><td>FDDB</td><td>FDDB: A Benchmark for Face Detection in Unconstrained Settings</td><td>A Benchmark for Face Detection in Unconstrained Settings</td><td><a href="http://pdfs.semanticscholar.org/75da/1df4ed319926c544eefe17ec8d720feef8c0.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=fddb: a benchmark for face detection in unconstrained settings&sort=relevance" target="_blank">[s2]</a></td><td>University of Massachusetts</td><td>75da1df4ed319926c544eefe17ec8d720feef8c0</td></tr><tr><td>urban_tribes</td><td>Urban Tribes</td><td>From Bikers to Surfers: Visual Recognition of Urban Tribes</td><td>From Bikers to Surfers: Visual Recognition of Urban Tribes</td><td><a href="http://pdfs.semanticscholar.org/774c/bb45968607a027ae4729077734db000a1ec5.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=from bikers to surfers: visual recognition of urban tribes&sort=relevance" target="_blank">[s2]</a></td><td>Columbia University</td><td>774cbb45968607a027ae4729077734db000a1ec5</td></tr><tr><td>wildtrack</td><td>WildTrack</td><td>WILDTRACK: A Multi-camera HD Dataset for Dense Unscripted Pedestrian Detection</td><td>WILDTRACK : A Multi-camera HD Dataset for Dense Unscripted Pedestrian Detection</td><td><a href="https://pdfs.semanticscholar.org/fe1c/ec4e4995b8615855572374ae3efc94949105.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=wildtrack: a multi-camera hd dataset for dense unscripted pedestrian detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>77c81c13a110a341c140995bedb98101b9e84f7f</td></tr><tr><td>berkeley_pose</td><td>BPAD</td><td>Describing People: A Poselet-Based Approach to Attribute Classification</td><td>Describing people: A poselet-based approach to attribute classification</td><td><a href="http://ttic.uchicago.edu/~smaji/papers/attributes-iccv11.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=describing people: a poselet-based approach to attribute classification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>7808937b46acad36e43c30ae4e9f3fd57462853d</td></tr><tr><td>mapillary</td><td>Mapillary</td><td>The Mapillary Vistas Dataset for Semantic Understanding of Street Scenes</td><td>The Mapillary Vistas Dataset for Semantic Understanding of Street Scenes</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237796" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the mapillary vistas dataset for semantic understanding of street scenes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>79828e6e9f137a583082b8b5a9dfce0c301989b8</td></tr><tr><td>nudedetection</td><td>Nude Detection</td><td>A Bag-of-Features Approach based on Hue-SIFT Descriptor for Nude Detection</td><td>A bag-of-features approach based on Hue-SIFT descriptor for nude detection</td><td><a href="http://ieeexplore.ieee.org/document/7077625/" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a bag-of-features approach based on hue-sift descriptor for nude detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>7ace44190729927e5cb0dd5d363fcae966fe13f7</td></tr><tr><td>lfw</td><td>LFW</td><td>Labeled Faces in the Wild: A Survey</td><td>Labeled Faces in the Wild : A Survey</td><td><a href="http://pdfs.semanticscholar.org/7de6/e81d775e9cd7becbfd1bd685f4e2a5eebb22.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=labeled faces in the wild: a survey&sort=relevance" target="_blank">[s2]</a></td><td></td><td>7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22</td></tr><tr><td>vgg_celebs_in_places</td><td>CIP</td><td>Faces in Places: Compound Query Retrieval</td><td>Faces in Places: compound query retrieval</td><td><a href="https://pdfs.semanticscholar.org/7ebb/153704706e457ab57b432793d2b6e5d12592.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=faces in places: compound query retrieval&sort=relevance" target="_blank">[s2]</a></td><td>University of Oxford</td><td>7ebb153704706e457ab57b432793d2b6e5d12592</td></tr><tr><td>nova_emotions</td><td>Novaemötions Dataset</td><td>Competitive affective gamming: Winning with a smile</td><td>Competitive affective gaming: winning with a smile</td><td><a href="http://doi.acm.org/10.1145/2502081.2502115" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=competitive affective gamming: winning with a smile&sort=relevance" target="_blank">[s2]</a></td><td></td><td>7f4040b482d16354d5938c1d1b926b544652bf5b</td></tr><tr><td>sun_attributes</td><td>SUN</td><td>SUN Attribute Database:
Discovering, Annotating, and Recognizing Scene Attributes</td><td>SUN attribute database: Discovering, annotating, and recognizing scene attributes</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2012.6247998" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=sun attribute database:
-discovering, annotating, and recognizing scene attributes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>833fa04463d90aab4a9fe2870d480f0b40df446e</td></tr><tr><td>svs</td><td>SVS</td><td>Pedestrian Attribute Classification in Surveillance: Database and Evaluation</td><td>Pedestrian Attribute Classification in Surveillance: Database and Evaluation</td><td><a href="http://www.cv-foundation.org/openaccess/content_iccv_workshops_2013/W10/papers/Zhu_Pedestrian_Attribute_Classification_2013_ICCV_paper.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pedestrian attribute classification in surveillance: database and evaluation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>488e475eeb3bb39a145f23ede197cd3620f1d98a</td></tr><tr><td>texas_3dfrd</td><td>Texas 3DFRD</td><td>Texas 3D Face Recognition Database</td><td>Anthropometric 3D Face Recognition</td><td><a href="http://live.ece.utexas.edu/publications/2010/sg_ijcv_june10.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=texas 3d face recognition database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2ce2560cf59db59ce313bbeb004e8ce55c5ce928</td></tr><tr><td>texas_3dfrd</td><td>Texas 3DFRD</td><td>Anthropometric 3D Face Recognition</td><td>Anthropometric 3D Face Recognition</td><td><a href="http://live.ece.utexas.edu/publications/2010/sg_ijcv_june10.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=anthropometric 3d face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2ce2560cf59db59ce313bbeb004e8ce55c5ce928</td></tr><tr><td>tiny_faces</td><td>TinyFace</td><td>Low-Resolution Face Recognition</td><td>Low-Resolution Face Recognition</td><td><a href="https://arxiv.org/pdf/1811.08965.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=low-resolution face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8990cdce3f917dad622e43e033db686b354d057c</td></tr><tr><td>tiny_images</td><td>Tiny Images</td><td>80 million tiny images: a large dataset for non-parametric object and scene recognition</td><td>80 Million Tiny Images: A Large Data Set for Nonparametric Object and Scene Recognition</td><td><a href="http://pdfs.semanticscholar.org/31b5/8ced31f22eab10bd3ee2d9174e7c14c27c01.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=80 million tiny images: a large dataset for non-parametric object and scene recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>31b58ced31f22eab10bd3ee2d9174e7c14c27c01</td></tr><tr><td>towncenter</td><td>TownCenter</td><td>Stable Multi-Target Tracking in Real-Time Surveillance Video</td><td>Stable multi-target tracking in real-time surveillance video</td><td><a href="http://pdfs.semanticscholar.org/9361/b784e73e9238d5cefbea5ac40d35d1e3103f.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=stable multi-target tracking in real-time surveillance video&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9361b784e73e9238d5cefbea5ac40d35d1e3103f</td></tr><tr><td>tud_brussels</td><td>TUD-Brussels</td><td>Multi-Cue Onboard Pedestrian Detection</td><td>Multi-cue onboard pedestrian detection</td><td><a href="https://www.mpi-inf.mpg.de/fileadmin/inf/d2/wojek/poster_cwojek_cvpr09.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-cue onboard pedestrian detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6ad5a38df8dd4cdddd74f31996ce096d41219f72</td></tr><tr><td>tud_campus</td><td>TUD-Campus</td><td>People-Tracking-by-Detection and People-Detection-by-Tracking</td><td>People-tracking-by-detection and people-detection-by-tracking</td><td><a href="http://mplab.ucsd.edu/wp-content/uploads/CVPR2008/Conference/data/papers/243.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=people-tracking-by-detection and people-detection-by-tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td></tr><tr><td>tud_crossing</td><td>TUD-Crossing</td><td>People-Tracking-by-Detection and People-Detection-by-Tracking</td><td>People-tracking-by-detection and people-detection-by-tracking</td><td><a href="http://mplab.ucsd.edu/wp-content/uploads/CVPR2008/Conference/data/papers/243.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=people-tracking-by-detection and people-detection-by-tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td></tr><tr><td>tud_motionpairs</td><td>TUD-Motionparis</td><td>Multi-Cue Onboard Pedestrian Detection</td><td>Multi-cue onboard pedestrian detection</td><td><a href="https://www.mpi-inf.mpg.de/fileadmin/inf/d2/wojek/poster_cwojek_cvpr09.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-cue onboard pedestrian detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6ad5a38df8dd4cdddd74f31996ce096d41219f72</td></tr><tr><td>tud_multiview</td><td>TUD-Multiview</td><td>Monocular 3D Pose Estimation and Tracking by Detection</td><td>Monocular 3D pose estimation and tracking by detection</td><td><a href="http://lmb.informatik.uni-freiburg.de/lectures/seminar_brox/seminar_ws1011/cvpr10_andriluka.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=monocular 3d pose estimation and tracking by detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>436f798d1a4e54e5947c1e7d7375c31b2bdb4064</td></tr><tr><td>tud_pedestrian</td><td>TUD-Pedestrian</td><td>People-Tracking-by-Detection and People-Detection-by-Tracking</td><td>People-tracking-by-detection and people-detection-by-tracking</td><td><a href="http://mplab.ucsd.edu/wp-content/uploads/CVPR2008/Conference/data/papers/243.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=people-tracking-by-detection and people-detection-by-tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td></tr><tr><td>tud_stadtmitte</td><td>TUD-Stadtmitte</td><td>Monocular 3D Pose Estimation and Tracking by Detection</td><td>Monocular 3D pose estimation and tracking by detection</td><td><a href="http://lmb.informatik.uni-freiburg.de/lectures/seminar_brox/seminar_ws1011/cvpr10_andriluka.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=monocular 3d pose estimation and tracking by detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>436f798d1a4e54e5947c1e7d7375c31b2bdb4064</td></tr><tr><td>tvhi</td><td>TVHI</td><td>High Five: Recognising human interactions in TV shows</td><td>High Five: Recognising human interactions in TV shows</td><td><a href="http://pdfs.semanticscholar.org/3cd4/0bfa1ff193a96bde0207e5140a399476466c.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=high five: recognising human interactions in tv shows&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3cd40bfa1ff193a96bde0207e5140a399476466c</td></tr><tr><td>uccs</td><td>UCCS</td><td>Large scale unconstrained open set face database</td><td>Large scale unconstrained open set face database</td><td><a href="http://www.vast.uccs.edu/~tboult/PAPERS/BTAS13-Sapkota-Boult-UCCSFaceDB.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=large scale unconstrained open set face database&sort=relevance" target="_blank">[s2]</a></td><td>University of Colorado at Colorado Springs</td><td>07fcbae86f7a3ad3ea1cf95178459ee9eaf77cb1</td></tr><tr><td>ucf_101</td><td>UCF101</td><td>UCF101: A Dataset of 101 Human Actions Classes From Videos in The Wild</td><td>UCF101: A Dataset of 101 Human Actions Classes From Videos in The Wild</td><td><a href="https://arxiv.org/pdf/1212.0402.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=ucf101: a dataset of 101 human actions classes from videos in the wild&sort=relevance" target="_blank">[s2]</a></td><td>University of Central Florida</td><td>b5f2846a506fc417e7da43f6a7679146d99c5e96</td></tr><tr><td>ucf_crowd</td><td>UCF-CC-50</td><td>Multi-Source Multi-Scale Counting in Extremely Dense Crowd Images</td><td>Multi-source Multi-scale Counting in Extremely Dense Crowd Images</td><td><a href="http://www.cs.ucf.edu/~haroon/datafiles/Idrees_Counting_CVPR_2013.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-source multi-scale counting in extremely dense crowd images&sort=relevance" target="_blank">[s2]</a></td><td></td><td>32c801cb7fbeb742edfd94cccfca4934baec71da</td></tr><tr><td>ucf_selfie</td><td>UCF Selfie</td><td>How to Take a Good Selfie?</td><td>How to Take a Good Selfie?</td><td><a href="http://doi.acm.org/10.1145/2733373.2806365" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=how to take a good selfie?&sort=relevance" target="_blank">[s2]</a></td><td></td><td>041d3eedf5e45ce5c5229f0181c5c576ed1fafd6</td></tr><tr><td>ufdd</td><td>UFDD</td><td>Pushing the Limits of Unconstrained Face Detection: a Challenge Dataset and Baseline Results</td><td>Pushing the Limits of Unconstrained Face Detection: a Challenge Dataset and Baseline Results</td><td><a href="https://arxiv.org/pdf/1804.10275.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pushing the limits of unconstrained face detection: a challenge dataset and baseline results&sort=relevance" target="_blank">[s2]</a></td><td>Johns Hopkins University</td><td>377f2b65e6a9300448bdccf678cde59449ecd337</td></tr><tr><td>umb</td><td>UMB</td><td>UMB-DB: A Database of Partially Occluded 3D Faces</td><td>UMB-DB: A database of partially occluded 3D faces</td><td><a href="https://doi.org/10.1109/ICCVW.2011.6130509" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=umb-db: a database of partially occluded 3d faces&sort=relevance" target="_blank">[s2]</a></td><td></td><td>16e8b0a1e8451d5f697b94c0c2b32a00abee1d52</td></tr><tr><td>umd_faces</td><td>UMD</td><td>UMDFaces: An Annotated Face Dataset for Training Deep Networks</td><td>UMDFaces: An annotated face dataset for training deep networks</td><td><a href="http://arxiv.org/abs/1611.01484" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=umdfaces: an annotated face dataset for training deep networks&sort=relevance" target="_blank">[s2]</a></td><td></td><td>31b05f65405534a696a847dd19c621b7b8588263</td></tr><tr><td>umd_faces</td><td>UMD</td><td>The Do's and Don'ts for CNN-based Face Verification</td><td>The Do’s and Don’ts for CNN-Based Face Verification</td><td><a href="https://arxiv.org/pdf/1705.07426.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the do's and don'ts for cnn-based face verification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>71b7fc715e2f1bb24c0030af8d7e7b6e7cd128a6</td></tr><tr><td>unbc_shoulder_pain</td><td>UNBC-McMaster Pain</td><td>PAINFUL DATA: The UNBC-McMaster Shoulder Pain Expression Archive Database</td><td>Painful data: The UNBC-McMaster shoulder pain expression archive database</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5771462" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=painful data: the unbc-mcmaster shoulder pain expression archive database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>56ffa7d906b08d02d6d5a12c7377a57e24ef3391</td></tr><tr><td>urban_tribes</td><td>Urban Tribes</td><td>From Bikers to Surfers: Visual Recognition of Urban Tribes</td><td>From Bikers to Surfers: Visual Recognition of Urban Tribes</td><td><a href="http://pdfs.semanticscholar.org/774c/bb45968607a027ae4729077734db000a1ec5.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=from bikers to surfers: visual recognition of urban tribes&sort=relevance" target="_blank">[s2]</a></td><td>Columbia University</td><td>774cbb45968607a027ae4729077734db000a1ec5</td></tr><tr><td>used</td><td>USED Social Event Dataset</td><td>USED: A Large-scale Social Event Detection Dataset</td><td>USED: a large-scale social event detection dataset</td><td><a href="http://doi.acm.org/10.1145/2910017.2910624" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=used: a large-scale social event detection dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8627f019882b024aef92e4eb9355c499c733e5b7</td></tr><tr><td>v47</td><td>V47</td><td>Re-identification of Pedestrians with Variable Occlusion and Scale</td><td>Re-identification of pedestrians with variable occlusion and scale</td><td><a href="https://doi.org/10.1109/ICCVW.2011.6130477" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=re-identification of pedestrians with variable occlusion and scale&sort=relevance" target="_blank">[s2]</a></td><td></td><td>922e0a51a3b8c67c4c6ac09a577ff674cbd28b34</td></tr><tr><td>vadana</td><td>VADANA</td><td>VADANA: A dense dataset for facial image analysis</td><td>VADANA: A dense dataset for facial image analysis</td><td><a href="http://vims.cis.udel.edu/publications/VADANA_BeFIT2011.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=vadana: a dense dataset for facial image analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4563b46d42079242f06567b3f2e2f7a80cb3befe</td></tr><tr><td>vgg_celebs_in_places</td><td>CIP</td><td>Faces in Places: Compound Query Retrieval</td><td>Faces in Places: compound query retrieval</td><td><a href="https://pdfs.semanticscholar.org/7ebb/153704706e457ab57b432793d2b6e5d12592.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=faces in places: compound query retrieval&sort=relevance" target="_blank">[s2]</a></td><td>University of Oxford</td><td>7ebb153704706e457ab57b432793d2b6e5d12592</td></tr><tr><td>vgg_faces</td><td>VGG Face</td><td>Deep Face Recognition</td><td>Deep Face Recognition</td><td><a href="https://pdfs.semanticscholar.org/f372/ab9b3270d4e4f6a0258c83c2736c3a5c0454.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=deep face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>162ea969d1929ed180cc6de9f0bf116993ff6e06</td></tr><tr><td>vgg_faces2</td><td>VGG Face2</td><td>VGGFace2: A dataset for recognising faces across pose and age</td><td>VGGFace2: A Dataset for Recognising Faces across Pose and Age</td><td><a href="https://arxiv.org/pdf/1710.08092.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=vggface2: a dataset for recognising faces across pose and age&sort=relevance" target="_blank">[s2]</a></td><td>University of Oxford</td><td>eb027969f9310e0ae941e2adee2d42cdf07d938c</td></tr><tr><td>violent_flows</td><td>Violent Flows</td><td>Violent Flows: Real-Time Detection of Violent Crowd Behavior</td><td>Violent flows: Real-time detection of violent crowd behavior</td><td><a href="http://www.wisdom.weizmann.ac.il/mathusers/kliper/Papers/violent_flows.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=violent flows: real-time detection of violent crowd behavior&sort=relevance" target="_blank">[s2]</a></td><td></td><td>5194cbd51f9769ab25260446b4fa17204752e799</td></tr><tr><td>viper</td><td>VIPeR</td><td>Evaluating Appearance Models for Recognition, Reacquisition, and Tracking</td><td>Evaluating Appearance Models for Recognition , Reacquisition , and Tracking</td><td><a href="http://pdfs.semanticscholar.org/6273/b3491e94ea4dd1ce42b791d77bdc96ee73a8.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=evaluating appearance models for recognition, reacquisition, and tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6273b3491e94ea4dd1ce42b791d77bdc96ee73a8</td></tr><tr><td>visual_phrases</td><td>Phrasal Recognition</td><td>Recognition using Visual Phrases</td><td>Recognition using visual phrases</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995711" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=recognition using visual phrases&sort=relevance" target="_blank">[s2]</a></td><td></td><td>e8de844fefd54541b71c9823416daa238be65546</td></tr><tr><td>vmu</td><td>VMU</td><td>Can Facial Cosmetics Affect the Matching Accuracy of Face Recognition Systems?</td><td>Can facial cosmetics affect the matching accuracy of face recognition systems?</td><td><a href="http://www.cse.msu.edu/~climer/DantchevaChenRossFaceCosmetics_BTAS2012.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=can facial cosmetics affect the matching accuracy of face recognition systems?&sort=relevance" target="_blank">[s2]</a></td><td></td><td>37d6f0eb074d207b53885bd2eb78ccc8a04be597</td></tr><tr><td>voc</td><td>VOC</td><td>The PASCAL Visual Object Classes (VOC) Challenge</td><td>The Pascal Visual Object Classes Challenge: A Retrospective</td><td><a href="http://homepages.inf.ed.ac.uk/ckiw/postscript/ijcv_voc14.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the pascal visual object classes (voc) challenge&sort=relevance" target="_blank">[s2]</a></td><td></td><td>abe9f3b91fd26fa1b50cd685c0d20debfb372f73</td></tr><tr><td>vqa</td><td>VQA</td><td>VQA: Visual Question Answering</td><td>VQA: Visual Question Answering</td><td><a href="http://arxiv.org/pdf/1505.00468v3.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=vqa: visual question answering&sort=relevance" target="_blank">[s2]</a></td><td></td><td>01959ef569f74c286956024866c1d107099199f7</td></tr><tr><td>ward</td><td>WARD</td><td>Re-identify people in wide area camera network</td><td>Re-identify people in wide area camera network</td><td><a href="https://doi.org/10.1109/CVPRW.2012.6239203" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=re-identify people in wide area camera network&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6f3c76b7c0bd8e1d122c6ea808a271fd4749c951</td></tr><tr><td>who_goes_there</td><td>WGT</td><td>Who Goes There? Approaches to Mapping Facial Appearance Diversity</td><td>Who goes there?: approaches to mapping facial appearance diversity</td><td><a href="http://doi.acm.org/10.1145/2996913.2996997" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=who goes there? approaches to mapping facial appearance diversity&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9b9bf5e623cb8af7407d2d2d857bc3f1b531c182</td></tr><tr><td>wider</td><td>WIDER</td><td>Recognize Complex Events from Static Images by Fusing Deep Channels</td><td>Recognize complex events from static images by fusing deep channels</td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Xiong_Recognize_Complex_Events_2015_CVPR_paper.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=recognize complex events from static images by fusing deep channels&sort=relevance" target="_blank">[s2]</a></td><td>Shenzhen Institutes of Advanced Technology</td><td>356b431d4f7a2a0a38cf971c84568207dcdbf189</td></tr><tr><td>wider_attribute</td><td>WIDER Attribute</td><td>Human Attribute Recognition by Deep Hierarchical Contexts</td><td>Human Attribute Recognition by Deep Hierarchical Contexts</td><td><a href="https://pdfs.semanticscholar.org/8e28/07f2dd53b03a759e372e07f7191cae65c9fd.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=human attribute recognition by deep hierarchical contexts&sort=relevance" target="_blank">[s2]</a></td><td>Chinese University of Hong Kong</td><td>44d23df380af207f5ac5b41459c722c87283e1eb</td></tr><tr><td>wider_face</td><td>WIDER FACE</td><td>WIDER FACE: A Face Detection Benchmark</td><td>WIDER FACE: A Face Detection Benchmark</td><td><a href="https://arxiv.org/pdf/1511.06523.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=wider face: a face detection benchmark&sort=relevance" target="_blank">[s2]</a></td><td>Chinese University of Hong Kong</td><td>52d7eb0fbc3522434c13cc247549f74bb9609c5d</td></tr><tr><td>wildtrack</td><td>WildTrack</td><td>WILDTRACK: A Multi-camera HD Dataset for Dense Unscripted Pedestrian Detection</td><td>WILDTRACK : A Multi-camera HD Dataset for Dense Unscripted Pedestrian Detection</td><td><a href="https://pdfs.semanticscholar.org/fe1c/ec4e4995b8615855572374ae3efc94949105.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=wildtrack: a multi-camera hd dataset for dense unscripted pedestrian detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>77c81c13a110a341c140995bedb98101b9e84f7f</td></tr><tr><td>wlfdb</td><td></td><td>WLFDB: Weakly Labeled Face Databases</td><td>WLFDB: Weakly Labeled Face Databases</td><td><a href="https://pdfs.semanticscholar.org/5ad4/e9f947c1653c247d418f05dad758a3f9277b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=wlfdb: weakly labeled face databases&sort=relevance" target="_blank">[s2]</a></td><td></td><td>5ad4e9f947c1653c247d418f05dad758a3f9277b</td></tr><tr><td>yale_faces</td><td>YaleFaces</td><td>Acquiring Linear Subspaces for Face Recognition under Variable Lighting</td><td>From Few to Many: Illumination Cone Models for Face Recognition under Variable Lighting and Pose</td><td><a href="http://pdfs.semanticscholar.org/97bb/c2b439a79d4dc0dc7199d71ed96ad5e3fd0e.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=acquiring linear subspaces for face recognition under variable lighting&sort=relevance" target="_blank">[s2]</a></td><td></td><td>18c72175ddbb7d5956d180b65a96005c100f6014</td></tr><tr><td>yale_faces</td><td>YaleFaces</td><td>From Few to Many: Illumination Cone Models for Face Recognition under Variable Lighting and Pose</td><td>From Few to Many: Illumination Cone Models for Face Recognition under Variable Lighting and Pose</td><td><a href="http://pdfs.semanticscholar.org/97bb/c2b439a79d4dc0dc7199d71ed96ad5e3fd0e.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=from few to many: illumination cone models for face recognition under variable lighting and pose&sort=relevance" target="_blank">[s2]</a></td><td></td><td>18c72175ddbb7d5956d180b65a96005c100f6014</td></tr><tr><td>yawdd</td><td>YawDD</td><td>YawDD: A Yawning Detection Dataset</td><td>YawDD: a yawning detection dataset</td><td><a href="http://www.site.uottawa.ca/~shervin/pubs/CogniVue-Dataset-ACM-MMSys2014.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=yawdd: a yawning detection dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a94cae786d515d3450d48267e12ca954aab791c4</td></tr><tr><td>yfcc_100m</td><td>YFCC100M</td><td>YFCC100M: The New Data in Multimedia Research</td><td>The New Data and New Challenges in Multimedia Research</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=yfcc100m: the new data in multimedia research&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a6e695ddd07aad719001c0fc1129328452385949</td></tr><tr><td>york_3d</td><td>UOY 3D Face Database</td><td>Three-Dimensional Face Recognition: An Eigensurface Approach</td><td>Three-dimensional face recognition: an eigensurface approach</td><td><a href="http://www-users.cs.york.ac.uk/~nep/research/3Dface/tomh/3DFaceRecognition-Eigensurface-ICIP(web)2.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=three-dimensional face recognition: an eigensurface approach&sort=relevance" target="_blank">[s2]</a></td><td></td><td>19d1b811df60f86cbd5e04a094b07f32fff7a32a</td></tr><tr><td>youtube_faces</td><td>YouTubeFaces</td><td>Face Recognition in Unconstrained Videos with Matched Background Similarity</td><td>Face recognition in unconstrained videos with matched background similarity</td><td><a href="http://www.cs.tau.ac.il/~wolf/papers/lvfw.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=face recognition in unconstrained videos with matched background similarity&sort=relevance" target="_blank">[s2]</a></td><td>Open University of Israel</td><td>560e0e58d0059259ddf86fcec1fa7975dee6a868</td></tr><tr><td>youtube_makeup</td><td>YMU</td><td>Can Facial Cosmetics Affect the Matching Accuracy of Face Recognition Systems?</td><td>Automatic facial makeup detection with application in face recognition</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6612994" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=can facial cosmetics affect the matching accuracy of face recognition systems?&sort=relevance" target="_blank">[s2]</a></td><td>West Virginia University</td><td>fcc6fe6007c322641796cb8792718641856a22a7</td></tr><tr><td>youtube_makeup</td><td>YMU</td><td>Automatic Facial Makeup Detection with Application in Face Recognition</td><td>Automatic facial makeup detection with application in face recognition</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6612994" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=automatic facial makeup detection with application in face recognition&sort=relevance" target="_blank">[s2]</a></td><td>West Virginia University</td><td>fcc6fe6007c322641796cb8792718641856a22a7</td></tr><tr><td>youtube_poses</td><td>YouTube Pose</td><td>Personalizing Human Video Pose Estimation</td><td>Personalizing Human Video Pose Estimation</td><td><a href="http://arxiv.org/pdf/1511.06676v1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=personalizing human video pose estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1c2802c2199b6d15ecefe7ba0c39bfe44363de38</td></tr></table></body></html> \ No newline at end of file
+discovering, annotating, and recognizing scene attributes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>833fa04463d90aab4a9fe2870d480f0b40df446e</td></tr><tr><td>imdb_wiki</td><td>IMDB</td><td>DEX: Deep EXpectation of apparent age from a single image</td><td>DEX: Deep EXpectation of Apparent Age from a Single Image</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406390" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=dex: deep expectation of apparent age from a single image&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8355d095d3534ef511a9af68a3b2893339e3f96b</td></tr><tr><td>awe_ears</td><td>AWE Ears</td><td>Ear Recognition: More Than a Survey</td><td>Ear Recognition: More Than a Survey</td><td><a href="http://pdfs.semanticscholar.org/84fe/5b4ac805af63206012d29523a1e033bc827e.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=ear recognition: more than a survey&sort=relevance" target="_blank">[s2]</a></td><td></td><td>84fe5b4ac805af63206012d29523a1e033bc827e</td></tr><tr><td>casia_webface</td><td>CASIA Webface</td><td>Learning Face Representation from Scratch</td><td>Learning Face Representation from Scratch</td><td><a href="http://pdfs.semanticscholar.org/b8a2/0ed7e74325da76d7183d1ab77b082a92b447.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning face representation from scratch&sort=relevance" target="_blank">[s2]</a></td><td></td><td>853bd61bc48a431b9b1c7cab10c603830c488e39</td></tr><tr><td>used</td><td>USED Social Event Dataset</td><td>USED: A Large-scale Social Event Detection Dataset</td><td>USED: a large-scale social event detection dataset</td><td><a href="http://doi.acm.org/10.1145/2910017.2910624" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=used: a large-scale social event detection dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8627f019882b024aef92e4eb9355c499c733e5b7</td></tr><tr><td>tiny_faces</td><td>TinyFace</td><td>Low-Resolution Face Recognition</td><td>Low-Resolution Face Recognition</td><td><a href="https://arxiv.org/pdf/1811.08965.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=low-resolution face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8990cdce3f917dad622e43e033db686b354d057c</td></tr><tr><td>mafl</td><td>MAFL</td><td>Facial Landmark Detection by Deep Multi-task Learning</td><td>Facial Landmark Detection by Deep Multi-task Learning</td><td><a href="http://pdfs.semanticscholar.org/fcd7/1c18192928a2e0b264edd4d919ab2f8f652a.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=facial landmark detection by deep multi-task learning&sort=relevance" target="_blank">[s2]</a></td><td>Chinese University of Hong Kong</td><td>8a3c5507237957d013a0fe0f082cab7f757af6ee</td></tr><tr><td>mtfl</td><td>MTFL</td><td>Facial Landmark Detection by Deep Multi-task Learning</td><td>Facial Landmark Detection by Deep Multi-task Learning</td><td><a href="http://pdfs.semanticscholar.org/fcd7/1c18192928a2e0b264edd4d919ab2f8f652a.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=facial landmark detection by deep multi-task learning&sort=relevance" target="_blank">[s2]</a></td><td>Chinese University of Hong Kong</td><td>8a3c5507237957d013a0fe0f082cab7f757af6ee</td></tr><tr><td>10k_US_adult_faces</td><td>10K US Adult Faces</td><td>The intrinsic memorability of face images</td><td>The intrinsic memorability of face photographs.</td><td><a href="https://pdfs.semanticscholar.org/8b2d/d5c61b23ead5ae5508bb8ce808b5ea266730.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the intrinsic memorability of face images&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8b2dd5c61b23ead5ae5508bb8ce808b5ea266730</td></tr><tr><td>fei</td><td>FEI</td><td>Captura e Alinhamento de Imagens: Um Banco de Faces Brasileiro</td><td>A new ranking method for principal components analysis and its application to face image analysis</td><td><a href="https://doi.org/10.1016/j.imavis.2009.11.005" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=captura e alinhamento de imagens: um banco de faces brasileiro&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8b56e33f33e582f3e473dba573a16b598ed9bcdc</td></tr><tr><td>chalearn</td><td>ChaLearn</td><td>ChaLearn Looking at People: A Review of Events and Resources</td><td>ChaLearn looking at people: A review of events and resources</td><td><a href="https://arxiv.org/pdf/1701.02664.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=chalearn looking at people: a review of events and resources&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8d5998cd984e7cce307da7d46f155f9db99c6590</td></tr><tr><td>hda_plus</td><td>HDA+</td><td>The HDA+ data set for research on fully automated re-identification systems</td><td>The HDA+ Data Set for Research on Fully Automated Re-identification Systems</td><td><a href="http://pdfs.semanticscholar.org/8f02/ec0be21461fbcedf51d864f944cfc42c875f.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the hda+ data set for research on fully automated re-identification systems&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8f02ec0be21461fbcedf51d864f944cfc42c875f</td></tr><tr><td>morph</td><td>MORPH Commercial</td><td>MORPH: A Longitudinal Image Database of Normal Adult Age-Progression</td><td>MORPH: a longitudinal image database of normal adult age-progression</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/FGR.2006.78" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=morph: a longitudinal image database of normal adult age-progression&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9055b155cbabdce3b98e16e5ac9c0edf00f9552f</td></tr><tr><td>morph_nc</td><td>MORPH Non-Commercial</td><td>MORPH: A Longitudinal Image Database of Normal Adult Age-Progression</td><td>MORPH: a longitudinal image database of normal adult age-progression</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/FGR.2006.78" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=morph: a longitudinal image database of normal adult age-progression&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9055b155cbabdce3b98e16e5ac9c0edf00f9552f</td></tr><tr><td>v47</td><td>V47</td><td>Re-identification of Pedestrians with Variable Occlusion and Scale</td><td>Re-identification of pedestrians with variable occlusion and scale</td><td><a href="https://doi.org/10.1109/ICCVW.2011.6130477" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=re-identification of pedestrians with variable occlusion and scale&sort=relevance" target="_blank">[s2]</a></td><td></td><td>922e0a51a3b8c67c4c6ac09a577ff674cbd28b34</td></tr><tr><td>towncenter</td><td>TownCenter</td><td>Stable Multi-Target Tracking in Real-Time Surveillance Video</td><td>Stable multi-target tracking in real-time surveillance video</td><td><a href="http://pdfs.semanticscholar.org/9361/b784e73e9238d5cefbea5ac40d35d1e3103f.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=stable multi-target tracking in real-time surveillance video&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9361b784e73e9238d5cefbea5ac40d35d1e3103f</td></tr><tr><td>helen</td><td>Helen</td><td>Interactive Facial Feature Localization</td><td>Interactive Facial Feature Localization</td><td><a href="http://pdfs.semanticscholar.org/95f1/2d27c3b4914e0668a268360948bce92f7db3.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=interactive facial feature localization&sort=relevance" target="_blank">[s2]</a></td><td>University of Illinois, Urbana-Champaign</td><td>95f12d27c3b4914e0668a268360948bce92f7db3</td></tr><tr><td>4dfab</td><td>4DFAB</td><td>4DFAB: A Large Scale 4D Facial Expression Database for Biometric Applications</td><td>4DFAB: A Large Scale 4D Facial Expression Database for Biometric Applications</td><td><a href="https://arxiv.org/pdf/1712.01443.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=4dfab: a large scale 4d facial expression database for biometric applications&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9696ad8b164f5e10fcfe23aacf74bd6168aebb15</td></tr><tr><td>megaface</td><td>MegaFace</td><td>The MegaFace Benchmark: 1 Million Faces for Recognition at Scale</td><td>The MegaFace Benchmark: 1 Million Faces for Recognition at Scale</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.527" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the megaface benchmark: 1 million faces for recognition at scale&sort=relevance" target="_blank">[s2]</a></td><td>University of Washington</td><td>96e0cfcd81cdeb8282e29ef9ec9962b125f379b0</td></tr><tr><td>ilids_vid_reid</td><td>iLIDS-VID</td><td>Person Re-Identi cation by Video Ranking</td><td>Person Re-identification by Video Ranking</td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person re-identi cation by video ranking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>98bb029afe2a1239c3fdab517323066f0957b81b</td></tr><tr><td>sdu_vid</td><td>SDU-VID</td><td>Person reidentification by video ranking</td><td>Person Re-identification by Video Ranking</td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person reidentification by video ranking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>98bb029afe2a1239c3fdab517323066f0957b81b</td></tr><tr><td>m2vts</td><td>m2vts</td><td>The M2VTS Multimodal Face Database (Release 1.00)</td><td>The M2VTS Multimodal Face Database (Release 1.00)</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the m2vts multimodal face database (release 1.00)&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9a9877791945c6fa4c1743ec6d3fb32570ef8481</td></tr><tr><td>who_goes_there</td><td>WGT</td><td>Who Goes There? Approaches to Mapping Facial Appearance Diversity</td><td>Who goes there?: approaches to mapping facial appearance diversity</td><td><a href="http://doi.acm.org/10.1145/2996913.2996997" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=who goes there? approaches to mapping facial appearance diversity&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9b9bf5e623cb8af7407d2d2d857bc3f1b531c182</td></tr><tr><td>facebook_100</td><td>Facebook100</td><td>Scaling Up Biologically-Inspired Computer Vision: A Case Study in Unconstrained Face Recognition on Facebook</td><td>Scaling up biologically-inspired computer vision: A case study in unconstrained face recognition on facebook</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5981788" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=scaling up biologically-inspired computer vision: a case study in unconstrained face recognition on facebook&sort=relevance" target="_blank">[s2]</a></td><td>Harvard University</td><td>9c23859ec7313f2e756a3e85575735e0c52249f4</td></tr><tr><td>pubfig_83</td><td>pubfig83</td><td>Scaling Up Biologically-Inspired Computer Vision: A Case Study in Unconstrained Face Recognition on Facebook</td><td>Scaling up biologically-inspired computer vision: A case study in unconstrained face recognition on facebook</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5981788" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=scaling up biologically-inspired computer vision: a case study in unconstrained face recognition on facebook&sort=relevance" target="_blank">[s2]</a></td><td>Harvard University</td><td>9c23859ec7313f2e756a3e85575735e0c52249f4</td></tr><tr><td>precarious</td><td>Precarious</td><td>Expecting the Unexpected: Training Detectors for Unusual Pedestrians With Adversarial Imposters</td><td>Expecting the Unexpected: Training Detectors for Unusual Pedestrians with Adversarial Imposters</td><td><a href="https://arxiv.org/pdf/1703.06283.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=expecting the unexpected: training detectors for unusual pedestrians with adversarial imposters&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9e5378e7b336c89735d3bb15cf67eff96f86d39a</td></tr><tr><td>mafl</td><td>MAFL</td><td>Learning Deep Representation for Face Alignment with Auxiliary Attributes</td><td>Learning Deep Representation for Face Alignment with Auxiliary Attributes</td><td><a href="https://arxiv.org/pdf/1408.3967.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning deep representation for face alignment with auxiliary attributes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td></tr><tr><td>mtfl</td><td>MTFL</td><td>Learning Deep Representation for Face Alignment with Auxiliary Attributes</td><td>Learning Deep Representation for Face Alignment with Auxiliary Attributes</td><td><a href="https://arxiv.org/pdf/1408.3967.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning deep representation for face alignment with auxiliary attributes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td></tr><tr><td>moments_in_time</td><td>Moments in Time</td><td>Moments in Time Dataset: one million videos for event understanding</td><td>Moments in Time Dataset: one million videos for event understanding</td><td><a href="https://arxiv.org/pdf/1801.03150.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=moments in time dataset: one million videos for event understanding&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a5a44a32a91474f00a3cda671a802e87c899fbb4</td></tr><tr><td>aflw</td><td>AFLW</td><td>Annotated Facial Landmarks in the Wild: A Large-scale, Real-world Database for Facial Landmark Localization</td><td>Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</td><td><a href="http://lrs.icg.tugraz.at/pubs/koestinger_befit_11.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=annotated facial landmarks in the wild: a large-scale, real-world database for facial landmark localization&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td></tr><tr><td>market1203</td><td>Market 1203</td><td>Orientation Driven Bag of Appearances for Person Re-identification</td><td>Orientation Driven Bag of Appearances for Person Re-identification</td><td><a href="http://pdfs.semanticscholar.org/a7fe/834a0af614ce6b50dc093132b031dd9a856b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=orientation driven bag of appearances for person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a7fe834a0af614ce6b50dc093132b031dd9a856b</td></tr><tr><td>pku_reid</td><td>PKU-Reid</td><td>Orientation driven bag of appearances for person re-identification</td><td>Orientation Driven Bag of Appearances for Person Re-identification</td><td><a href="http://pdfs.semanticscholar.org/a7fe/834a0af614ce6b50dc093132b031dd9a856b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=orientation driven bag of appearances for person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a7fe834a0af614ce6b50dc093132b031dd9a856b</td></tr><tr><td>hi4d_adsip</td><td>Hi4D-ADSIP</td><td>Hi4D-ADSIP 3-D dynamic facial articulation database</td><td>Hi4D-ADSIP 3-D dynamic facial articulation database</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=hi4d-adsip 3-d dynamic facial articulation database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a8d0b149c2eadaa02204d3e4356fbc8eccf3b315</td></tr><tr><td>yawdd</td><td>YawDD</td><td>YawDD: A Yawning Detection Dataset</td><td>YawDD: a yawning detection dataset</td><td><a href="http://www.site.uottawa.ca/~shervin/pubs/CogniVue-Dataset-ACM-MMSys2014.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=yawdd: a yawning detection dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a94cae786d515d3450d48267e12ca954aab791c4</td></tr><tr><td>mrp_drone</td><td>MRP Drone</td><td>Investigating Open-World Person Re-identification Using a Drone</td><td>Investigating Open-World Person Re-identification Using a Drone</td><td><a href="http://pdfs.semanticscholar.org/ad01/687649d95cd5b56d7399a9603c4b8e2217d7.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=investigating open-world person re-identification using a drone&sort=relevance" target="_blank">[s2]</a></td><td></td><td>ad01687649d95cd5b56d7399a9603c4b8e2217d7</td></tr><tr><td>put_face</td><td>Put Face</td><td>The PUT face database</td><td>The put face database</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the put face database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>ae0aee03d946efffdc7af2362a42d3750e7dd48a</td></tr><tr><td>afew_va</td><td>AFEW-VA</td><td>Collecting Large, Richly Annotated Facial-Expression Databases from Movies</td><td>Collecting Large, Richly Annotated Facial-Expression Databases from Movies</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6200254" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=collecting large, richly annotated facial-expression databases from movies&sort=relevance" target="_blank">[s2]</a></td><td>Australian National University</td><td>b1f4423c227fa37b9680787be38857069247a307</td></tr><tr><td>ucf_101</td><td>UCF101</td><td>UCF101: A Dataset of 101 Human Actions Classes From Videos in The Wild</td><td>UCF101: A Dataset of 101 Human Actions Classes From Videos in The Wild</td><td><a href="https://arxiv.org/pdf/1212.0402.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=ucf101: a dataset of 101 human actions classes from videos in the wild&sort=relevance" target="_blank">[s2]</a></td><td>University of Central Florida</td><td>b5f2846a506fc417e7da43f6a7679146d99c5e96</td></tr><tr><td>m2vtsdb_extended</td><td>xm2vtsdb</td><td>XM2VTSDB: The Extended M2VTS Database</td><td>Xm2vtsdb: the Extended M2vts Database</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=xm2vtsdb: the extended m2vts database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>b62628ac06bbac998a3ab825324a41a11bc3a988</td></tr><tr><td>ifdb</td><td>IFDB</td><td>Iranian Face Database with age, pose and expression</td><td>Iranian Face Database with age, pose and expression</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=iranian face database with age, pose and expression&sort=relevance" target="_blank">[s2]</a></td><td></td><td>b71d1aa90dcbe3638888725314c0d56640c1fef1</td></tr><tr><td>bp4d_spontanous</td><td>BP4D-Spontanous</td><td>A high resolution spontaneous 3D dynamic facial expression database</td><td>A high-resolution spontaneous 3D dynamic facial expression database</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6553788" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a high resolution spontaneous 3d dynamic facial expression database&sort=relevance" target="_blank">[s2]</a></td><td>SUNY Binghamton</td><td>b91f54e1581fbbf60392364323d00a0cd43e493c</td></tr><tr><td>pornodb</td><td>Pornography DB</td><td>Pooling in Image Representation: the Visual Codeword Point of View</td><td>Pooling in image representation: The visual codeword point of view</td><td><a href="http://pdfs.semanticscholar.org/b92a/1ed9622b8268ae3ac9090e25789fc41cc9b8.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pooling in image representation: the visual codeword point of view&sort=relevance" target="_blank">[s2]</a></td><td></td><td>b92a1ed9622b8268ae3ac9090e25789fc41cc9b8</td></tr><tr><td>scut_fbp</td><td>SCUT-FBP</td><td>SCUT-FBP: A Benchmark Dataset for Facial Beauty Perception</td><td>SCUT-FBP: A Benchmark Dataset for Facial Beauty Perception</td><td><a href="https://arxiv.org/pdf/1511.02459.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=scut-fbp: a benchmark dataset for facial beauty perception&sort=relevance" target="_blank">[s2]</a></td><td>South China University of Technology</td><td>bd26dabab576adb6af30484183c9c9c8379bf2e0</td></tr><tr><td>hda_plus</td><td>HDA+</td><td>A Multi-camera video data set for research on High-Definition surveillance</td><td>HDA dataset-DRAFT 1 A Multi-camera video data set for research on High-Definition surveillance</td><td><a href="http://pdfs.semanticscholar.org/bd88/bb2e4f351352d88ee7375af834360e223498.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a multi-camera video data set for research on high-definition surveillance&sort=relevance" target="_blank">[s2]</a></td><td></td><td>bd88bb2e4f351352d88ee7375af834360e223498</td></tr><tr><td>mars</td><td>MARS</td><td>MARS: A Video Benchmark for Large-Scale Person Re-identification</td><td>MARS: A Video Benchmark for Large-Scale Person Re-Identification</td><td><a href="http://pdfs.semanticscholar.org/c038/7e788a52f10bf35d4d50659cfa515d89fbec.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=mars: a video benchmark for large-scale person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>c0387e788a52f10bf35d4d50659cfa515d89fbec</td></tr><tr><td>nova_emotions</td><td>Novaemötions Dataset</td><td>Crowdsourcing facial expressions for affective-interaction</td><td>Crowdsourcing facial expressions for affective-interaction</td><td><a href="https://doi.org/10.1016/j.cviu.2016.02.001" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=crowdsourcing facial expressions for affective-interaction&sort=relevance" target="_blank">[s2]</a></td><td></td><td>c06b13d0ec3f5c43e2782cd22542588e233733c3</td></tr><tr><td>mcgill</td><td>McGill Real World</td><td>Hierarchical Temporal Graphical Model for Head Pose Estimation and Subsequent Attribute Classification in Real-World Videos</td><td>Hierarchical temporal graphical model for head pose estimation and subsequent attribute classification in real-world videos</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=hierarchical temporal graphical model for head pose estimation and subsequent attribute classification in real-world videos&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a5a3bc3e5e9753769163cb30b16dbd12e266b93e</td></tr><tr><td>mcgill</td><td>McGill Real World</td><td>Robust Semi-automatic Head Pose Labeling for Real-World Face Video Sequences</td><td>Robust semi-automatic head pose labeling for real-world face video sequences</td><td><a href="https://doi.org/10.1007/s11042-012-1352-1" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=robust semi-automatic head pose labeling for real-world face video sequences&sort=relevance" target="_blank">[s2]</a></td><td>McGill University</td><td>c570d1247e337f91e555c3be0e8c8a5aba539d9f</td></tr><tr><td>qmul_surv_face</td><td>QMUL-SurvFace</td><td>Surveillance Face Recognition Challenge</td><td>Surveillance Face Recognition Challenge</td><td><a href="https://arxiv.org/pdf/1804.09691.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=surveillance face recognition challenge&sort=relevance" target="_blank">[s2]</a></td><td></td><td>c866a2afc871910e3282fd9498dce4ab20f6a332</td></tr><tr><td>emotio_net</td><td>EmotioNet Database</td><td>EmotioNet: An Accurate, Real-Time Algorithm for the Automatic Annotation of a Million Facial Expressions in the Wild</td><td>EmotioNet: An Accurate, Real-Time Algorithm for the Automatic Annotation of a Million Facial Expressions in the Wild</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7780969" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=emotionet: an accurate, real-time algorithm for the automatic annotation of a million facial expressions in the wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>c900e0ad4c95948baaf0acd8449fde26f9b4952a</td></tr><tr><td>imfdb</td><td>IMFDB</td><td>Indian Movie Face Database: A Benchmark for Face Recognition Under Wide Variations</td><td>Indian Movie Face Database: A benchmark for face recognition under wide variations</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=indian movie face database: a benchmark for face recognition under wide variations&sort=relevance" target="_blank">[s2]</a></td><td></td><td>ca3e88d87e1344d076c964ea89d91a75c417f5ee</td></tr><tr><td>bu_3dfe</td><td>BU-3DFE</td><td>A 3D Facial Expression Database For Facial Behavior Research</td><td>A 3D facial expression database for facial behavior research</td><td><a href="http://www.cs.binghamton.edu/~lijun/Research/3DFE/Yin_FGR06_a.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a 3d facial expression database for facial behavior research&sort=relevance" target="_blank">[s2]</a></td><td>SUNY Binghamton</td><td>cc589c499dcf323fe4a143bbef0074c3e31f9b60</td></tr><tr><td>b3d_ac</td><td>B3D(AC)</td><td>A 3-D Audio-Visual Corpus of Affective Communication</td><td>A 3-D Audio-Visual Corpus of Affective Communication</td><td><a href="http://files.is.tue.mpg.de/jgall/download/jgall_avcorpus_mm10.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a 3-d audio-visual corpus of affective communication&sort=relevance" target="_blank">[s2]</a></td><td></td><td>d08cc366a4a0192a01e9a7495af1eb5d9f9e73ae</td></tr><tr><td>jiku_mobile</td><td>Jiku Mobile Video Dataset</td><td>The Jiku Mobile Video Dataset</td><td>The jiku mobile video dataset</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the jiku mobile video dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>d178cde92ab3dc0dd2ebee5a76a33d556c39448b</td></tr><tr><td>stair_actions</td><td>STAIR Action</td><td>STAIR Actions: A Video Dataset of Everyday Home Actions</td><td>STAIR Actions: A Video Dataset of Everyday Home Actions</td><td><a href="https://arxiv.org/pdf/1804.04326.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=stair actions: a video dataset of everyday home actions&sort=relevance" target="_blank">[s2]</a></td><td></td><td>d3f5a1848b0028d8ab51d0b0673732cad2e3c8c9</td></tr><tr><td>megaage</td><td>MegaAge</td><td>Quantifying Facial Age by Posterior of Age Comparisons</td><td>Quantifying Facial Age by Posterior of Age Comparisons</td><td><a href="https://arxiv.org/pdf/1708.09687.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=quantifying facial age by posterior of age comparisons&sort=relevance" target="_blank">[s2]</a></td><td>Chinese University of Hong Kong</td><td>d80a3d1f3a438e02a6685e66ee908446766fefa9</td></tr><tr><td>feret</td><td>FERET</td><td>The FERET database and evaluation procedure for face-recognition algorithms</td><td>The FERET database and evaluation procedure for face-recognition algorithms</td><td><a href="http://pdfs.semanticscholar.org/dc8b/25e35a3acb812beb499844734081722319b4.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the feret database and evaluation procedure for face-recognition algorithms&sort=relevance" target="_blank">[s2]</a></td><td></td><td>dc8b25e35a3acb812beb499844734081722319b4</td></tr><tr><td>families_in_the_wild</td><td>FIW</td><td>Visual Kinship Recognition of Families in the Wild</td><td>Visual Kinship Recognition of Families in the Wild</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8337841" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=visual kinship recognition of families in the wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>dd65f71dac86e36eecbd3ed225d016c3336b4a13</td></tr><tr><td>scut_head</td><td>SCUT HEAD</td><td>Detecting Heads using Feature Refine Net and Cascaded Multi-scale Architecture</td><td>Detecting Heads using Feature Refine Net and Cascaded Multi-scale Architecture</td><td><a href="https://arxiv.org/pdf/1803.09256.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=detecting heads using feature refine net and cascaded multi-scale architecture&sort=relevance" target="_blank">[s2]</a></td><td></td><td>d3200d49a19a4a4e4e9745ee39649b65d80c834b</td></tr><tr><td>sarc3d</td><td>Sarc3D</td><td>SARC3D: a new 3D body model for People Tracking and Re-identification</td><td>SARC3D: A New 3D Body Model for People Tracking and Re-identification</td><td><a href="https://pdfs.semanticscholar.org/e27e/f52c641c2b5100a1b34fd0b819e84a31b4df.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=sarc3d: a new 3d body model for people tracking and re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>e27ef52c641c2b5100a1b34fd0b819e84a31b4df</td></tr><tr><td>fiw_300</td><td>300-W</td><td>300 faces In-the-wild challenge: Database and results</td><td>300 Faces In-The-Wild Challenge: database and results</td><td><a href="http://doi.org/10.1016/j.imavis.2016.01.002" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=300 faces in-the-wild challenge: database and results&sort=relevance" target="_blank">[s2]</a></td><td></td><td>e4754afaa15b1b53e70743880484b8d0736990ff</td></tr><tr><td>gfw</td><td>YouTube Pose</td><td>Merge or Not? Learning to Group Faces via Imitation Learning</td><td>Merge or Not? Learning to Group Faces via Imitation Learning</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=merge or not? learning to group faces via imitation learning&sort=relevance" target="_blank">[s2]</a></td><td></td><td>e58dd160a76349d46f881bd6ddbc2921f08d1050</td></tr><tr><td>visual_phrases</td><td>Phrasal Recognition</td><td>Recognition using Visual Phrases</td><td>Recognition using visual phrases</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995711" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=recognition using visual phrases&sort=relevance" target="_blank">[s2]</a></td><td></td><td>e8de844fefd54541b71c9823416daa238be65546</td></tr><tr><td>mpi_large</td><td>Large MPI Facial Expression</td><td>The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</td><td>The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</td><td><a href="http://pdfs.semanticscholar.org/ea05/0801199f98a1c7c1df6769f23f658299a3ae.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the mpi facial expression database — a validated database of emotional and conversational facial expressions&sort=relevance" target="_blank">[s2]</a></td><td></td><td>ea050801199f98a1c7c1df6769f23f658299a3ae</td></tr><tr><td>mpi_small</td><td>Small MPI Facial Expression</td><td>The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</td><td>The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</td><td><a href="http://pdfs.semanticscholar.org/ea05/0801199f98a1c7c1df6769f23f658299a3ae.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the mpi facial expression database — a validated database of emotional and conversational facial expressions&sort=relevance" target="_blank">[s2]</a></td><td></td><td>ea050801199f98a1c7c1df6769f23f658299a3ae</td></tr><tr><td>vgg_faces2</td><td>VGG Face2</td><td>VGGFace2: A dataset for recognising faces across pose and age</td><td>VGGFace2: A Dataset for Recognising Faces across Pose and Age</td><td><a href="https://arxiv.org/pdf/1710.08092.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=vggface2: a dataset for recognising faces across pose and age&sort=relevance" target="_blank">[s2]</a></td><td>University of Oxford</td><td>eb027969f9310e0ae941e2adee2d42cdf07d938c</td></tr><tr><td>msmt_17</td><td>MSMT17</td><td>Person Transfer GAN to Bridge Domain Gap for Person Re-Identification</td><td>Person Transfer GAN to Bridge Domain Gap for Person Re-Identification</td><td><a href="https://arxiv.org/pdf/1711.08565.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person transfer gan to bridge domain gap for person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>ec792ad2433b6579f2566c932ee414111e194537</td></tr><tr><td>europersons</td><td>EuroCity Persons</td><td>The EuroCity Persons Dataset: A Novel Benchmark for Object Detection</td><td>The EuroCity Persons Dataset: A Novel Benchmark for Object Detection</td><td><a href="https://arxiv.org/pdf/1805.07193.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the eurocity persons dataset: a novel benchmark for object detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>f0e17f27f029db4ad650ff278fe3c10ecb6cb0c4</td></tr><tr><td>mug_faces</td><td>MUG Faces</td><td>The MUG Facial Expression Database</td><td>The MUG facial expression database</td><td><a href="http://ieeexplore.ieee.org/document/5617662/" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the mug facial expression database&sort=relevance" target="_blank">[s2]</a></td><td>Aristotle University of Thessaloniki</td><td>f1af714b92372c8e606485a3982eab2f16772ad8</td></tr><tr><td>pku</td><td>PKU</td><td>Swiss-System Based Cascade Ranking for Gait-based Person Re-identification</td><td>Swiss-System Based Cascade Ranking for Gait-Based Person Re-Identification</td><td><a href="http://pdfs.semanticscholar.org/f6c8/d5e35d7e4d60a0104f233ac1a3ab757da53f.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=swiss-system based cascade ranking for gait-based person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>f6c8d5e35d7e4d60a0104f233ac1a3ab757da53f</td></tr><tr><td>caltech_pedestrians</td><td>Caltech Pedestrians</td><td>Pedestrian Detection: An Evaluation of the State of the Art</td><td>Pedestrian Detection: An Evaluation of the State of the Art</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5975165" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pedestrian detection: an evaluation of the state of the art&sort=relevance" target="_blank">[s2]</a></td><td></td><td>f72f6a45ee240cc99296a287ff725aaa7e7ebb35</td></tr><tr><td>iit_dehli_ear</td><td>IIT Dehli Ear</td><td>Automated human identification using ear imaging</td><td>Automated Human Identification Using Ear Imaging</td><td><a href="https://pdfs.semanticscholar.org/faf4/0ce28857aedf183e193486f5b4b0a8c478a2.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=automated human identification using ear imaging&sort=relevance" target="_blank">[s2]</a></td><td></td><td>faf40ce28857aedf183e193486f5b4b0a8c478a2</td></tr><tr><td>miw</td><td>MIW</td><td>Automatic Facial Makeup Detection with Application in Face Recognition</td><td>Automatic facial makeup detection with application in face recognition</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6612994" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=automatic facial makeup detection with application in face recognition&sort=relevance" target="_blank">[s2]</a></td><td>West Virginia University</td><td>fcc6fe6007c322641796cb8792718641856a22a7</td></tr><tr><td>youtube_makeup</td><td>YMU</td><td>Automatic Facial Makeup Detection with Application in Face Recognition</td><td>Automatic facial makeup detection with application in face recognition</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6612994" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=automatic facial makeup detection with application in face recognition&sort=relevance" target="_blank">[s2]</a></td><td>West Virginia University</td><td>fcc6fe6007c322641796cb8792718641856a22a7</td></tr><tr><td>nd_2006</td><td>ND-2006</td><td>Using a Multi-Instance Enrollment Representation to Improve 3D Face Recognition</td><td>Using a Multi-Instance Enrollment Representation to Improve 3D Face Recognition</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4401928" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=using a multi-instance enrollment representation to improve 3d face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>fd8168f1c50de85bac58a8d328df0a50248b16ae</td></tr></table></body></html> \ No newline at end of file
diff --git a/scraper/reports/paper_title_report_no_location.html b/scraper/reports/paper_title_report_no_location.html
index 33de04e4..66e93e87 100644
--- a/scraper/reports/paper_title_report_no_location.html
+++ b/scraper/reports/paper_title_report_no_location.html
@@ -1,9 +1,3 @@
-<!doctype html><html><head><meta charset='utf-8'><title>Papers with no location</title><link rel='stylesheet' href='reports.css'></head><body><h2>Papers with no location</h2><table border='1' cellpadding='3' cellspacing='3'><th>key</th><th>name</th><th>our title</th><th>found title</th><th></th><th></th><th>address</th><th>s2 id</th><tr><td>10k_US_adult_faces</td><td>10K US Adult Faces</td><td>The intrinsic memorability of face images</td><td>The intrinsic memorability of face photographs.</td><td><a href="https://pdfs.semanticscholar.org/8b2d/d5c61b23ead5ae5508bb8ce808b5ea266730.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the intrinsic memorability of face images&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8b2dd5c61b23ead5ae5508bb8ce808b5ea266730</td></tr><tr><td>3d_rma</td><td>3D-RMA</td><td>Automatic 3D Face Authentication</td><td>Automatic 3D face authentication</td><td><a href="http://pdfs.semanticscholar.org/2160/788824c4c29ffe213b2cbeb3f52972d73f37.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=automatic 3d face authentication&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2160788824c4c29ffe213b2cbeb3f52972d73f37</td></tr><tr><td>3dddb_unconstrained</td><td>3D Dynamic</td><td>A 3D Dynamic Database for Unconstrained Face Recognition</td><td>A 3 D Dynamic Database for Unconstrained Face Recognition</td><td><a href="https://pdfs.semanticscholar.org/4d4b/b462c9f1d4e4ab1e4aa6a75cc0bc71b38461.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a 3d dynamic database for unconstrained face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4d4bb462c9f1d4e4ab1e4aa6a75cc0bc71b38461</td></tr><tr><td>3dpes</td><td>3DPeS</td><td>3DPes: 3D People Dataset for Surveillance and Forensics</td><td>3DPeS: 3D people dataset for surveillance and forensics</td><td><a href="http://doi.acm.org/10.1145/2072572.2072590" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=3dpes: 3d people dataset for surveillance and forensics&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2e8d0f1802e50cccfd3c0aabac0d0beab3a7846e</td></tr><tr><td>4dfab</td><td>4DFAB</td><td>4DFAB: A Large Scale 4D Facial Expression Database for Biometric Applications</td><td>4DFAB: A Large Scale 4D Facial Expression Database for Biometric Applications</td><td><a href="https://arxiv.org/pdf/1712.01443.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=4dfab: a large scale 4d facial expression database for biometric applications&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9696ad8b164f5e10fcfe23aacf74bd6168aebb15</td></tr><tr><td>50_people_one_question</td><td>50 People One Question</td><td>Merging Pose Estimates Across Space and Time</td><td>Merging Pose Estimates Across Space and Time</td><td><a href="http://pdfs.semanticscholar.org/63b2/f5348af0f969dfc2afb4977732393c6459ec.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=merging pose estimates across space and time&sort=relevance" target="_blank">[s2]</a></td><td></td><td>5753b2b5e442eaa3be066daa4a2ca8d8a0bb1725</td></tr><tr><td>a_pascal_yahoo</td><td>aPascal</td><td>Describing Objects by their Attributes</td><td>Describing objects by their attributes</td><td><a href="http://www-2.cs.cmu.edu/~dhoiem/publications/cvpr2009_attributes.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=describing objects by their attributes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2e384f057211426ac5922f1b33d2aa8df5d51f57</td></tr><tr><td>adience</td><td>Adience</td><td>Age and Gender Estimation of Unfiltered Faces</td><td>Age and Gender Estimation of Unfiltered Faces</td><td><a href="http://www.openu.ac.il/home/hassner/Adience/EidingerEnbarHassner_tifs.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=age and gender estimation of unfiltered faces&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1be498d4bbc30c3bfd0029114c784bc2114d67c0</td></tr><tr><td>afad</td><td>AFAD</td><td>Ordinal Regression with a Multiple Output CNN for Age Estimation</td><td>Ordinal Regression with Multiple Output CNN for Age Estimation</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.532" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=ordinal regression with a multiple output cnn for age estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6618cff7f2ed440a0d2fa9e74ad5469df5cdbe4c</td></tr><tr><td>afew_va</td><td>AFEW-VA</td><td>AFEW-VA database for valence and arousal estimation in-the-wild</td><td>AFEW-VA database for valence and arousal estimation in-the-wild</td><td><a href="http://pdfs.semanticscholar.org/2624/d84503bc2f8e190e061c5480b6aa4d89277a.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=afew-va database for valence and arousal estimation in-the-wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2624d84503bc2f8e190e061c5480b6aa4d89277a</td></tr><tr><td>affectnet</td><td>AffectNet</td><td>AffectNet: A New Database for Facial Expression, Valence, and Arousal Computation in the Wild</td><td>Skybiometry and AffectNet on Facial Emotion Recognition Using Supervised Machine Learning Algorithms</td><td><a href="http://dl.acm.org/citation.cfm?id=3232665" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=affectnet: a new database for facial expression, valence, and arousal computation in the wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>f152b6ee251cca940dd853c54e6a7b78fbc6b235</td></tr><tr><td>aflw</td><td>AFLW</td><td>Annotated Facial Landmarks in the Wild: A Large-scale, Real-world Database for Facial Landmark Localization</td><td>Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</td><td><a href="http://lrs.icg.tugraz.at/pubs/koestinger_befit_11.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=annotated facial landmarks in the wild: a large-scale, real-world database for facial landmark localization&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td></tr><tr><td>agedb</td><td>AgeDB</td><td>AgeDB: the first manually collected, in-the-wild age database</td><td>AgeDB: The First Manually Collected, In-the-Wild Age Database</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014984" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=agedb: the first manually collected, in-the-wild age database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6dcf418c778f528b5792104760f1fbfe90c6dd6a</td></tr><tr><td>alert_airport</td><td>ALERT Airport</td><td>A Systematic Evaluation and Benchmark for Person Re-Identification: Features, Metrics, and Datasets</td><td>A Systematic Evaluation and Benchmark for Person Re-Identification: Features, Metrics, and Datasets.</td><td><a href="https://arxiv.org/pdf/1605.09653.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a systematic evaluation and benchmark for person re-identification: features, metrics, and datasets&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6403117f9c005ae81f1e8e6d1302f4a045e3d99d</td></tr><tr><td>am_fed</td><td>AM-FED</td><td>Affectiva MIT Facial Expression Dataset (AM-FED): Naturalistic and Spontaneous Facial Expressions Collected “In the Wild”</td><td>Affectiva-MIT Facial Expression Dataset (AM-FED): Naturalistic and Spontaneous Facial Expressions Collected "In-the-Wild"</td><td><a href="http://pdfs.semanticscholar.org/5d06/437656dd94616d7d87260d5eb77513ded30f.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=affectiva mit facial expression dataset (am-fed): naturalistic and spontaneous facial expressions collected “in the wild”&sort=relevance" target="_blank">[s2]</a></td><td></td><td>47aeb3b82f54b5ae8142b4bdda7b614433e69b9a</td></tr><tr><td>apis</td><td>APiS1.0</td><td>Pedestrian Attribute Classification in Surveillance: Database and Evaluation</td><td>Pedestrian Attribute Classification in Surveillance: Database and Evaluation</td><td><a href="http://www.cv-foundation.org/openaccess/content_iccv_workshops_2013/W10/papers/Zhu_Pedestrian_Attribute_Classification_2013_ICCV_paper.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pedestrian attribute classification in surveillance: database and evaluation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>488e475eeb3bb39a145f23ede197cd3620f1d98a</td></tr><tr><td>ar_facedb</td><td>AR Face</td><td>The AR Face Database</td><td>The AR face database</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the ar face database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6d96f946aaabc734af7fe3fc4454cf8547fcd5ed</td></tr><tr><td>awe_ears</td><td>AWE Ears</td><td>Ear Recognition: More Than a Survey</td><td>Ear Recognition: More Than a Survey</td><td><a href="http://pdfs.semanticscholar.org/84fe/5b4ac805af63206012d29523a1e033bc827e.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=ear recognition: more than a survey&sort=relevance" target="_blank">[s2]</a></td><td></td><td>84fe5b4ac805af63206012d29523a1e033bc827e</td></tr><tr><td>b3d_ac</td><td>B3D(AC)</td><td>A 3-D Audio-Visual Corpus of Affective Communication</td><td>A 3-D Audio-Visual Corpus of Affective Communication</td><td><a href="http://files.is.tue.mpg.de/jgall/download/jgall_avcorpus_mm10.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a 3-d audio-visual corpus of affective communication&sort=relevance" target="_blank">[s2]</a></td><td></td><td>d08cc366a4a0192a01e9a7495af1eb5d9f9e73ae</td></tr><tr><td>bbc_pose</td><td>BBC Pose</td><td>Automatic and Efficient Human Pose Estimation for Sign Language Videos</td><td>Automatic and Efficient Human Pose Estimation for Sign Language Videos</td><td><a href="https://doi.org/10.1007/s11263-013-0672-6" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=automatic and efficient human pose estimation for sign language videos&sort=relevance" target="_blank">[s2]</a></td><td></td><td>213a579af9e4f57f071b884aa872651372b661fd</td></tr><tr><td>berkeley_pose</td><td>BPAD</td><td>Describing People: A Poselet-Based Approach to Attribute Classification</td><td>Describing people: A poselet-based approach to attribute classification</td><td><a href="http://ttic.uchicago.edu/~smaji/papers/attributes-iccv11.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=describing people: a poselet-based approach to attribute classification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>7808937b46acad36e43c30ae4e9f3fd57462853d</td></tr><tr><td>bfm</td><td>BFM</td><td>A 3D Face Model for Pose and Illumination Invariant Face Recognition</td><td>A 3D Face Model for Pose and Illumination Invariant Face Recognition</td><td><a href="https://pdfs.semanticscholar.org/6399/37b3a1b8bded3f7e9a40e85bd3770016cf3c.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a 3d face model for pose and illumination invariant face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>639937b3a1b8bded3f7e9a40e85bd3770016cf3c</td></tr><tr><td>bio_id</td><td>BioID Face</td><td>Robust Face Detection Using the Hausdorff Distance</td><td>Robust Face Detection Using the Hausdorff Distance</td><td><a href="http://pdfs.semanticscholar.org/4053/e3423fb70ad9140ca89351df49675197196a.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=robust face detection using the hausdorff distance&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4053e3423fb70ad9140ca89351df49675197196a</td></tr><tr><td>bjut_3d</td><td>BJUT-3D</td><td>The BJUT-3D Large-Scale Chinese Face Database</td><td>A novel face recognition method based on 3D face model</td><td><a href="https://doi.org/10.1109/ROBIO.2007.4522202" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the bjut-3d large-scale chinese face database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1ed1a49534ad8dd00f81939449f6389cfbc25321</td></tr><tr><td>bosphorus</td><td>The Bosphorus</td><td>Bosphorus Database for 3D Face Analysis</td><td>Bosphorus Database for 3D Face Analysis</td><td><a href="http://pdfs.semanticscholar.org/4254/fbba3846008f50671edc9cf70b99d7304543.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=bosphorus database for 3d face analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2acf7e58f0a526b957be2099c10aab693f795973</td></tr><tr><td>bp4d_plus</td><td>BP4D+</td><td>Multimodal Spontaneous Emotion Corpus for Human Behavior Analysis</td><td>Multimodal Spontaneous Emotion Corpus for Human Behavior Analysis</td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Zhang_Multimodal_Spontaneous_Emotion_CVPR_2016_paper.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multimodal spontaneous emotion corpus for human behavior analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>53ae38a6bb2b21b42bac4f0c4c8ed1f9fa02f9d4</td></tr><tr><td>brainwash</td><td>Brainwash</td><td>Brainwash dataset</td><td>Brainwash: A Data System for Feature Engineering</td><td><a href="http://pdfs.semanticscholar.org/ae44/8015b2ff2bd3b8a5c9a3266f954f5af9ffa9.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=brainwash dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>214c966d1f9c2a4b66f4535d9a0d4078e63a5867</td></tr><tr><td>buhmap_db</td><td>BUHMAP-DB</td><td>Facial Feature Tracking and Expression Recognition for Sign Language</td><td>Facial feature tracking and expression recognition for sign language</td><td><a href="http://www.cmpe.boun.edu.tr/pilab/pilabfiles/databases/buhmap/files/ari2008facialfeaturetracking.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=facial feature tracking and expression recognition for sign language&sort=relevance" target="_blank">[s2]</a></td><td></td><td>014b8df0180f33b9fea98f34ae611c6447d761d2</td></tr><tr><td>cafe</td><td>CAFE</td><td>The Child Affective Facial Expression (CAFE) Set: Validity and reliability from untrained adults</td><td>The Child Affective Facial Expression (CAFE) set: validity and reliability from untrained adults</td><td><a href="http://pdfs.semanticscholar.org/2038/8099cc415c772926e47bcbbe554e133343d1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the child affective facial expression (cafe) set: validity and reliability from untrained adults&sort=relevance" target="_blank">[s2]</a></td><td></td><td>20388099cc415c772926e47bcbbe554e133343d1</td></tr><tr><td>caltech_10k_web_faces</td><td>Caltech 10K Web Faces</td><td>Pruning Training Sets for Learning of Object Categories</td><td>Pruning training sets for learning of object categories</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1467308" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pruning training sets for learning of object categories&sort=relevance" target="_blank">[s2]</a></td><td></td><td>636b8ffc09b1b23ff714ac8350bb35635e49fa3c</td></tr><tr><td>caltech_pedestrians</td><td>Caltech Pedestrians</td><td>Pedestrian Detection: A Benchmark</td><td>Pedestrian detection: A benchmark</td><td><a href="http://vision.ucsd.edu/~pdollar/files/papers/DollarCVPR09peds.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pedestrian detection: a benchmark&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1dc35905a1deff8bc74688f2d7e2f48fd2273275</td></tr><tr><td>caltech_pedestrians</td><td>Caltech Pedestrians</td><td>Pedestrian Detection: An Evaluation of the State of the Art</td><td>Pedestrian Detection: An Evaluation of the State of the Art</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5975165" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pedestrian detection: an evaluation of the state of the art&sort=relevance" target="_blank">[s2]</a></td><td></td><td>f72f6a45ee240cc99296a287ff725aaa7e7ebb35</td></tr><tr><td>camel</td><td>CAMEL</td><td>CAMEL Dataset for Visual and Thermal Infrared Multiple Object Detection and Tracking</td><td>Application of Object Based Classification and High Resolution Satellite Imagery for Savanna Ecosystem Analysis</td><td><a href="http://pdfs.semanticscholar.org/5801/690199c1917fa58c35c3dead177c0b8f9f2d.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=camel dataset for visual and thermal infrared multiple object detection and tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>5801690199c1917fa58c35c3dead177c0b8f9f2d</td></tr><tr><td>cas_peal</td><td>CAS-PEAL</td><td>The CAS-PEAL Large-Scale Chinese Face Database and Baseline Evaluations</td><td>The CAS-PEAL Large-Scale Chinese Face Database and Baseline Evaluations</td><td><a href="https://doi.org/10.1109/TSMCA.2007.909557" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the cas-peal large-scale chinese face database and baseline evaluations&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2485c98aa44131d1a2f7d1355b1e372f2bb148ad</td></tr><tr><td>casablanca</td><td>Casablanca</td><td>Context-aware {CNNs} for person head detection</td><td>Context-Aware CNNs for Person Head Detection</td><td><a href="http://arxiv.org/pdf/1511.07917v1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=context-aware {cnns} for person head detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td></tr><tr><td>casia_webface</td><td>CASIA Webface</td><td>Learning Face Representation from Scratch</td><td>Learning Face Representation from Scratch</td><td><a href="http://pdfs.semanticscholar.org/b8a2/0ed7e74325da76d7183d1ab77b082a92b447.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning face representation from scratch&sort=relevance" target="_blank">[s2]</a></td><td></td><td>853bd61bc48a431b9b1c7cab10c603830c488e39</td></tr><tr><td>cfd</td><td>CFD</td><td>The Chicago face database: A free stimulus set of faces and norming data</td><td>The Chicago face database: A free stimulus set of faces and norming data.</td><td><a href="http://pdfs.semanticscholar.org/4df3/143922bcdf7db78eb91e6b5359d6ada004d2.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the chicago face database: a free stimulus set of faces and norming data&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4df3143922bcdf7db78eb91e6b5359d6ada004d2</td></tr><tr><td>chalearn</td><td>ChaLearn</td><td>ChaLearn Looking at People: A Review of Events and Resources</td><td>ChaLearn looking at people: A review of events and resources</td><td><a href="https://arxiv.org/pdf/1701.02664.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=chalearn looking at people: a review of events and resources&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8d5998cd984e7cce307da7d46f155f9db99c6590</td></tr><tr><td>cityscapes</td><td>Cityscapes</td><td>The Cityscapes Dataset for Semantic Urban Scene Understanding</td><td>The Cityscapes Dataset for Semantic Urban Scene Understanding</td><td><a href="https://arxiv.org/pdf/1604.01685.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the cityscapes dataset for semantic urban scene understanding&sort=relevance" target="_blank">[s2]</a></td><td></td><td>32cde90437ab5a70cf003ea36f66f2de0e24b3ab</td></tr><tr><td>cityscapes</td><td>Cityscapes</td><td>The Cityscapes Dataset</td><td>The Cityscapes Dataset for Semantic Urban Scene Understanding</td><td><a href="https://arxiv.org/pdf/1604.01685.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the cityscapes dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>32cde90437ab5a70cf003ea36f66f2de0e24b3ab</td></tr><tr><td>clothing_co_parsing</td><td>CCP</td><td>Clothing Co-Parsing by Joint Image Segmentation and Labeling</td><td>Clothing Co-parsing by Joint Image Segmentation and Labeling</td><td><a href="https://arxiv.org/pdf/1502.00739v1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clothing co-parsing by joint image segmentation and labeling&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2bf8541199728262f78d4dced6fb91479b39b738</td></tr><tr><td>cmu_pie</td><td>CMU PIE</td><td>The CMU Pose, Illumination, and Expression Database</td><td>The CMU Pose, Illumination, and Expression (PIE) Database</td><td><a href="http://pdfs.semanticscholar.org/4d42/3acc78273b75134e2afd1777ba6d3a398973.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the cmu pose, illumination, and expression database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4d423acc78273b75134e2afd1777ba6d3a398973</td></tr><tr><td>coco</td><td>COCO</td><td>Microsoft COCO: Common Objects in Context</td><td>Microsoft COCO Captions: Data Collection and Evaluation Server</td><td><a href="http://pdfs.semanticscholar.org/ba95/81c33a7eebe87c50e61763e4c8d1723538f2.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=microsoft coco: common objects in context&sort=relevance" target="_blank">[s2]</a></td><td></td><td>696ca58d93f6404fea0fc75c62d1d7b378f47628</td></tr><tr><td>coco_action</td><td>COCO-a</td><td>Describing Common Human Visual Actions in Images</td><td>Describing Common Human Visual Actions in Images</td><td><a href="http://pdfs.semanticscholar.org/b38d/cf5fa5174c0d718d65cc4f3889b03c4a21df.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=describing common human visual actions in images&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4946ba10a4d5a7d0a38372f23e6622bd347ae273</td></tr><tr><td>coco_qa</td><td>COCO QA</td><td>Exploring Models and Data for Image Question Answering</td><td>Exploring Models and Data for Image Question Answering</td><td><a href="http://pdfs.semanticscholar.org/aa79/9c29c0d44ece1864467af520fe70540c069b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=exploring models and data for image question answering&sort=relevance" target="_blank">[s2]</a></td><td></td><td>35b0331dfcd2897abd5749b49ff5e2b8ba0f7a62</td></tr><tr><td>cofw</td><td>COFW</td><td>Robust face landmark estimation under occlusion</td><td>Robust Face Landmark Estimation under Occlusion</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6751298" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=robust face landmark estimation under occlusion&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2724ba85ec4a66de18da33925e537f3902f21249</td></tr><tr><td>cohn_kanade_plus</td><td>CK+</td><td>The Extended Cohn-Kanade Dataset (CK+): A complete dataset for action unit and emotion-specified expression</td><td>The Extended Cohn-Kanade Dataset (CK+): A complete dataset for action unit and emotion-specified expression</td><td><a href="http://www.iainm.com/iainm/Publications_files/2010_The%20Extended.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the extended cohn-kanade dataset (ck+): a complete dataset for action unit and emotion-specified expression&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4d9a02d080636e9666c4d1cc438b9893391ec6c7</td></tr><tr><td>complex_activities</td><td>Ongoing Complex Activities</td><td>Recognition of Ongoing Complex Activities by Sequence Prediction over a Hierarchical Label Space</td><td>Recognition of ongoing complex activities by sequence prediction over a hierarchical label space</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/WACV.2016.7477586" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=recognition of ongoing complex activities by sequence prediction over a hierarchical label space&sort=relevance" target="_blank">[s2]</a></td><td></td><td>65355cbb581a219bd7461d48b3afd115263ea760</td></tr><tr><td>cuhk01</td><td>CUHK01</td><td>Human Reidentification with Transferred Metric Learning</td><td>Human Reidentification with Transferred Metric Learning</td><td><a href="http://pdfs.semanticscholar.org/4448/4d2866f222bbb9b6b0870890f9eea1ffb2d0.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=human reidentification with transferred metric learning&sort=relevance" target="_blank">[s2]</a></td><td></td><td>44484d2866f222bbb9b6b0870890f9eea1ffb2d0</td></tr><tr><td>cuhk02</td><td>CUHK02</td><td>Locally Aligned Feature Transforms across Views</td><td>Locally Aligned Feature Transforms across Views</td><td><a href="http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_CVPR2013/data/Papers/4989d594.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=locally aligned feature transforms across views&sort=relevance" target="_blank">[s2]</a></td><td></td><td>38b55d95189c5e69cf4ab45098a48fba407609b4</td></tr><tr><td>cuhk03</td><td>CUHK03</td><td>DeepReID: Deep Filter Pairing Neural Network for Person Re-identification</td><td>DeepReID: Deep Filter Pairing Neural Network for Person Re-identification</td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Li_DeepReID_Deep_Filter_2014_CVPR_paper.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=deepreid: deep filter pairing neural network for person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6bd36e9fd0ef20a3074e1430a6cc601e6d407fc3</td></tr><tr><td>cvc_01_barcelona</td><td>CVC-01</td><td>Adaptive Image Sampling and Windows Classification for On-board Pedestrian Detection</td><td>Adaptive Image Sampling and Windows Classification for On – board Pedestrian Detection</td><td><a href="http://pdfs.semanticscholar.org/57fe/081950f21ca03b5b375ae3e84b399c015861.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=adaptive image sampling and windows classification for on-board pedestrian detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>57fe081950f21ca03b5b375ae3e84b399c015861</td></tr><tr><td>czech_news_agency</td><td>UFI</td><td>Unconstrained Facial Images: Database for Face Recognition under Real-world Conditions</td><td>Unconstrained Facial Images: Database for Face Recognition Under Real-World Conditions</td><td><a href="http://pdfs.semanticscholar.org/4b41/06614c1d553365bad75d7866bff0de6056ed.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=unconstrained facial images: database for face recognition under real-world conditions&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4b4106614c1d553365bad75d7866bff0de6056ed</td></tr><tr><td>dartmouth_children</td><td>Dartmouth Children</td><td>The Dartmouth Database of Children's Faces: Acquisition and validation of a new face stimulus set</td><td>The Dartmouth Database of Children’s Faces: Acquisition and Validation of a New Face Stimulus Set</td><td><a href="http://pdfs.semanticscholar.org/4e6e/e936eb50dd032f7138702fa39b7c18ee8907.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the dartmouth database of children's faces: acquisition and validation of a new face stimulus set&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4e6ee936eb50dd032f7138702fa39b7c18ee8907</td></tr><tr><td>data_61</td><td>Data61 Pedestrian</td><td>A Multi-Modal Graphical Model for Scene Analysis</td><td>A Multi-modal Graphical Model for Scene Analysis</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/WACV.2015.139" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a multi-modal graphical model for scene analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>563c940054e4b456661762c1ab858e6f730c3159</td></tr><tr><td>disfa</td><td>DISFA</td><td>DISFA: A Spontaneous Facial Action Intensity Database</td><td>Extended DISFA Dataset: Investigating Posed and Spontaneous Facial Expressions</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7789672" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=disfa: a spontaneous facial action intensity database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a5acda0e8c0937bfed013e6382da127103e41395</td></tr><tr><td>distance_nighttime</td><td>Long Distance Heterogeneous Face</td><td>Nighttime Face Recognition at Long Distance: Cross-distance and Cross-spectral Matching</td><td>Nighttime Face Recognition at Long Distance: Cross-Distance and Cross-Spectral Matching</td><td><a href="http://pdfs.semanticscholar.org/4156/b7e88f2e0ab0a7c095b9bab199ae2b23bd06.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=nighttime face recognition at long distance: cross-distance and cross-spectral matching&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4156b7e88f2e0ab0a7c095b9bab199ae2b23bd06</td></tr><tr><td>duke_mtmc</td><td>Duke MTMC</td><td>Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</td><td>Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</td><td><a href="http://pdfs.semanticscholar.org/b5f2/4f49f9a5e47d6601399dc068158ad88d7651.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=performance measures and a data set for multi-target, multi-camera tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>27a2fad58dd8727e280f97036e0d2bc55ef5424c</td></tr><tr><td>emotio_net</td><td>EmotioNet Database</td><td>EmotioNet: An Accurate, Real-Time Algorithm for the Automatic Annotation of a Million Facial Expressions in the Wild</td><td>EmotioNet: An Accurate, Real-Time Algorithm for the Automatic Annotation of a Million Facial Expressions in the Wild</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7780969" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=emotionet: an accurate, real-time algorithm for the automatic annotation of a million facial expressions in the wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>c900e0ad4c95948baaf0acd8449fde26f9b4952a</td></tr><tr><td>eth_andreas_ess</td><td>ETHZ Pedestrian</td><td>Depth and Appearance for Mobile Scene Analysis</td><td>Depth and Appearance for Mobile Scene Analysis</td><td><a href="http://www.mmp.rwth-aachen.de/publications/pdf/ess-depthandappearance-iccv07.pdf/at_download/file" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=depth and appearance for mobile scene analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>13f06b08f371ba8b5d31c3e288b4deb61335b462</td></tr><tr><td>europersons</td><td>EuroCity Persons</td><td>The EuroCity Persons Dataset: A Novel Benchmark for Object Detection</td><td>The EuroCity Persons Dataset: A Novel Benchmark for Object Detection</td><td><a href="https://arxiv.org/pdf/1805.07193.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the eurocity persons dataset: a novel benchmark for object detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>f0e17f27f029db4ad650ff278fe3c10ecb6cb0c4</td></tr><tr><td>expw</td><td>ExpW</td><td>Learning Social Relation Traits from Face Images</td><td>From Facial Expression Recognition to Interpersonal Relation Prediction</td><td><a href="http://arxiv.org/abs/1609.06426" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning social relation traits from face images&sort=relevance" target="_blank">[s2]</a></td><td></td><td>22f656d0f8426c84a33a267977f511f127bfd7f3</td></tr><tr><td>expw</td><td>ExpW</td><td>From Facial Expression Recognition to Interpersonal Relation Prediction</td><td>From Facial Expression Recognition to Interpersonal Relation Prediction</td><td><a href="http://arxiv.org/abs/1609.06426" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=from facial expression recognition to interpersonal relation prediction&sort=relevance" target="_blank">[s2]</a></td><td></td><td>22f656d0f8426c84a33a267977f511f127bfd7f3</td></tr><tr><td>faceplace</td><td>Face Place</td><td>Recognizing disguised faces</td><td>Recognizing disguised faces</td><td><a href="http://pdfs.semanticscholar.org/d936/7ceb0be378c3a9ddf7cb741c678c1a3c574c.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=recognizing disguised faces&sort=relevance" target="_blank">[s2]</a></td><td></td><td>25474c21613607f6bb7687a281d5f9d4ffa1f9f3</td></tr><tr><td>families_in_the_wild</td><td>FIW</td><td>Visual Kinship Recognition of Families in the Wild</td><td>Visual Kinship Recognition of Families in the Wild</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8337841" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=visual kinship recognition of families in the wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>dd65f71dac86e36eecbd3ed225d016c3336b4a13</td></tr><tr><td>fei</td><td>FEI</td><td>Captura e Alinhamento de Imagens: Um Banco de Faces Brasileiro</td><td>Generalização cartográfica automatizada para um banco de dados cadastral</td><td><a href="https://pdfs.semanticscholar.org/b6b1/b0632eb9d4ab1427278f5e5c46f97753c73d.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=captura e alinhamento de imagens: um banco de faces brasileiro&sort=relevance" target="_blank">[s2]</a></td><td></td><td>b6b1b0632eb9d4ab1427278f5e5c46f97753c73d</td></tr><tr><td>feret</td><td>FERET</td><td>The FERET Verification Testing Protocol for Face Recognition Algorithms</td><td>The FERET Verification Testing Protocol for Face Recognition Algorithms</td><td><a href="http://pdfs.semanticscholar.org/0c4a/139bb87c6743c7905b29a3cfec27a5130652.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the feret verification testing protocol for face recognition algorithms&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0c4a139bb87c6743c7905b29a3cfec27a5130652</td></tr><tr><td>feret</td><td>FERET</td><td>The FERET database and evaluation procedure for face-recognition algorithms</td><td>The FERET database and evaluation procedure for face-recognition algorithms</td><td><a href="http://pdfs.semanticscholar.org/dc8b/25e35a3acb812beb499844734081722319b4.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the feret database and evaluation procedure for face-recognition algorithms&sort=relevance" target="_blank">[s2]</a></td><td></td><td>dc8b25e35a3acb812beb499844734081722319b4</td></tr><tr><td>feret</td><td>FERET</td><td>FERET ( Face Recognition Technology ) Recognition Algorithm Development and Test Results</td><td>FERET ( Face Recognition Technology ) Recognition Algorithm Development and Test Results</td><td><a href="http://pdfs.semanticscholar.org/31de/9b3dd6106ce6eec9a35991b2b9083395fd0b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=feret ( face recognition technology ) recognition algorithm development and test results&sort=relevance" target="_blank">[s2]</a></td><td></td><td>31de9b3dd6106ce6eec9a35991b2b9083395fd0b</td></tr><tr><td>feret</td><td>FERET</td><td>The FERET Evaluation Methodology for Face-Recognition Algorithms</td><td>The FERET Evaluation Methodology for Face-Recognition Algorithms</td><td><a href="http://pdfs.semanticscholar.org/0f0f/cf041559703998abf310e56f8a2f90ee6f21.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the feret evaluation methodology for face-recognition algorithms&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0f0fcf041559703998abf310e56f8a2f90ee6f21</td></tr><tr><td>ferplus</td><td>FER+</td><td>Training Deep Networks for Facial Expression Recognition with Crowd-Sourced Label Distribution</td><td>Training deep networks for facial expression recognition with crowd-sourced label distribution</td><td><a href="http://arxiv.org/pdf/1608.01041v1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=training deep networks for facial expression recognition with crowd-sourced label distribution&sort=relevance" target="_blank">[s2]</a></td><td></td><td>298cbc3dfbbb3a20af4eed97906650a4ea1c29e0</td></tr><tr><td>fia</td><td>CMU FiA</td><td>The CMU Face In Action (FIA) Database</td><td>The CMU Face In Action (FIA) Database</td><td><a href="http://pdfs.semanticscholar.org/bb47/a03401811f9d2ca2da12138697acbc7b97a3.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the cmu face in action (fia) database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>47662d1a368daf70ba70ef2d59eb6209f98b675d</td></tr><tr><td>frav3d</td><td>FRAV3D</td><td>MULTIMODAL 2D, 2.5D & 3D FACE VERIFICATION</td><td>2D and 3D face recognition: A survey</td><td><a href="http://pdfs.semanticscholar.org/2f5d/44dc3e1b5955942133ff872ebd31716ec604.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multimodal 2d, 2.5d & 3d face verification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2f5d44dc3e1b5955942133ff872ebd31716ec604</td></tr><tr><td>frgc</td><td>FRGC</td><td>Overview of the Face Recognition Grand Challenge</td><td>Overview of the face recognition grand challenge</td><td><a href="http://www3.nd.edu/~kwb/PhillipsEtAlCVPR_2005.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=overview of the face recognition grand challenge&sort=relevance" target="_blank">[s2]</a></td><td></td><td>18ae7c9a4bbc832b8b14bc4122070d7939f5e00e</td></tr><tr><td>gallagher</td><td>Gallagher</td><td>Clothing Cosegmentation for Recognizing People</td><td>Clothing Cosegmentation for Shopping Images With Cluttered Background</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7423747" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clothing cosegmentation for recognizing people&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6dbe8e5121c534339d6e41f8683e85f87e6abf81</td></tr><tr><td>gavab_db</td><td>Gavab</td><td>GavabDB: a 3D face database</td><td>Expression invariant 3D face recognition with a Morphable Model</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4813376" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=gavabdb: a 3d face database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>42505464808dfb446f521fc6ff2cfeffd4d68ff1</td></tr><tr><td>georgia_tech_face_database</td><td>Georgia Tech Face</td><td>Maximum likelihood training of the embedded HMM for face detection and recognition</td><td>MAXIMUM LIKELIHOOD TRAINING OF THE EMBEDDED HMM FOR FACE DETECTION AND RECOGNITION Ara V. Ne an and Monson H. Hayes III Center for Signal and Image Processing School of Electrical and Computer Engineering</td><td><a href="http://pdfs.semanticscholar.org/3dc3/f0b64ef80f573e3a5f96e456e52ee980b877.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=maximum likelihood training of the embedded hmm for face detection and recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3dc3f0b64ef80f573e3a5f96e456e52ee980b877</td></tr><tr><td>graz</td><td>Graz Pedestrian</td><td>Generic Object Recognition with Boosting</td><td>Object recognition using segmentation for feature detection</td><td><a href="http://www.emt.tugraz.at/~tracking/Publications/fussenegger2004b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=generic object recognition with boosting&sort=relevance" target="_blank">[s2]</a></td><td></td><td>12ad3b5bbbf407f8e54ea692c07633d1a867c566</td></tr><tr><td>graz</td><td>Graz Pedestrian</td><td>Weak Hypotheses and Boosting for Generic Object Detection and Recognition</td><td>Object recognition using segmentation for feature detection</td><td><a href="http://www.emt.tugraz.at/~tracking/Publications/fussenegger2004b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=weak hypotheses and boosting for generic object detection and recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>12ad3b5bbbf407f8e54ea692c07633d1a867c566</td></tr><tr><td>graz</td><td>Graz Pedestrian</td><td>Object Recognition Using Segmentation for Feature Detection</td><td>Object recognition using segmentation for feature detection</td><td><a href="http://www.emt.tugraz.at/~tracking/Publications/fussenegger2004b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=object recognition using segmentation for feature detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>12ad3b5bbbf407f8e54ea692c07633d1a867c566</td></tr><tr><td>h3d</td><td>H3D</td><td>Poselets: Body Part Detectors Trained Using 3D Human Pose Annotations</td><td>Poselets: Body part detectors trained using 3D human pose annotations</td><td><a href="http://vision.stanford.edu/teaching/cs231b_spring1213/papers/ICCV09_BourdevMalik.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=poselets: body part detectors trained using 3d human pose annotations&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2830fb5282de23d7784b4b4bc37065d27839a412</td></tr><tr><td>hda_plus</td><td>HDA+</td><td>The HDA+ data set for research on fully automated re-identification systems</td><td>HDA dataset-DRAFT 1 A Multi-camera video data set for research on High-Definition surveillance</td><td><a href="http://pdfs.semanticscholar.org/bd88/bb2e4f351352d88ee7375af834360e223498.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the hda+ data set for research on fully automated re-identification systems&sort=relevance" target="_blank">[s2]</a></td><td></td><td>bd88bb2e4f351352d88ee7375af834360e223498</td></tr><tr><td>hda_plus</td><td>HDA+</td><td>A Multi-camera video data set for research on High-Definition surveillance</td><td>HDA dataset-DRAFT 1 A Multi-camera video data set for research on High-Definition surveillance</td><td><a href="http://pdfs.semanticscholar.org/bd88/bb2e4f351352d88ee7375af834360e223498.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a multi-camera video data set for research on high-definition surveillance&sort=relevance" target="_blank">[s2]</a></td><td></td><td>bd88bb2e4f351352d88ee7375af834360e223498</td></tr><tr><td>hi4d_adsip</td><td>Hi4D-ADSIP</td><td>Hi4D-ADSIP 3-D dynamic facial articulation database</td><td>High-resolution comprehensive 3-D dynamic database for facial articulation analysis</td><td><a href="http://www.researchgate.net/profile/Wei_Quan3/publication/221430048_High-resolution_comprehensive_3-D_dynamic_database_for_facial_articulation_analysis/links/0deec534309495805d000000.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=hi4d-adsip 3-d dynamic facial articulation database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>24830e3979d4ed01b9fd0feebf4a8fd22e0c35fd</td></tr><tr><td>hollywood_headset</td><td>HollywoodHeads</td><td>Context-aware CNNs for person head detection</td><td>Context-Aware CNNs for Person Head Detection</td><td><a href="http://arxiv.org/pdf/1511.07917v1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=context-aware cnns for person head detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td></tr><tr><td>hrt_transgender</td><td>HRT Transgender</td><td>Is the Eye Region More Reliable Than the Face? A Preliminary Study of Face-based Recognition on a Transgender Dataset</td><td>Face recognition: A literature survey</td><td><a href="http://pdfs.semanticscholar.org/2831/2c3a47c1be3a67365700744d3d6665b86f22.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=is the eye region more reliable than the face? a preliminary study of face-based recognition on a transgender dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>28312c3a47c1be3a67365700744d3d6665b86f22</td></tr><tr><td>hrt_transgender</td><td>HRT Transgender</td><td>Investigating the Periocular-Based Face Recognition Across Gender Transformation</td><td>Face recognition: A literature survey</td><td><a href="http://pdfs.semanticscholar.org/2831/2c3a47c1be3a67365700744d3d6665b86f22.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=investigating the periocular-based face recognition across gender transformation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>28312c3a47c1be3a67365700744d3d6665b86f22</td></tr><tr><td>hrt_transgender</td><td>HRT Transgender</td><td>Face recognition across gender transformation using SVM Classifier</td><td>Face recognition: A literature survey</td><td><a href="http://pdfs.semanticscholar.org/2831/2c3a47c1be3a67365700744d3d6665b86f22.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=face recognition across gender transformation using svm classifier&sort=relevance" target="_blank">[s2]</a></td><td></td><td>28312c3a47c1be3a67365700744d3d6665b86f22</td></tr><tr><td>ifad</td><td>IFAD</td><td>Indian Face Age Database: A Database for Face Recognition with Age Variation</td><td>Indian Face Age Database : A Database for Face Recognition with Age Variation</td><td><a href="https://pdfs.semanticscholar.org/55c4/0cbcf49a0225e72d911d762c27bb1c2d14aa.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=indian face age database: a database for face recognition with age variation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>55c40cbcf49a0225e72d911d762c27bb1c2d14aa</td></tr><tr><td>ifdb</td><td>IFDB</td><td>Iranian Face Database with age, pose and expression</td><td>Iranian Face Database and Evaluation with a New Detection Algorithm</td><td><a href="http://pdfs.semanticscholar.org/066d/71fcd997033dce4ca58df924397dfe0b5fd1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=iranian face database with age, pose and expression&sort=relevance" target="_blank">[s2]</a></td><td></td><td>066d71fcd997033dce4ca58df924397dfe0b5fd1</td></tr><tr><td>ifdb</td><td>IFDB</td><td>Iranian Face Database and Evaluation with a New Detection Algorithm</td><td>Iranian Face Database and Evaluation with a New Detection Algorithm</td><td><a href="http://pdfs.semanticscholar.org/066d/71fcd997033dce4ca58df924397dfe0b5fd1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=iranian face database and evaluation with a new detection algorithm&sort=relevance" target="_blank">[s2]</a></td><td></td><td>066d71fcd997033dce4ca58df924397dfe0b5fd1</td></tr><tr><td>iit_dehli_ear</td><td>IIT Dehli Ear</td><td>Automated human identification using ear imaging</td><td>Automated Human Identification Using Ear Imaging</td><td><a href="https://pdfs.semanticscholar.org/faf4/0ce28857aedf183e193486f5b4b0a8c478a2.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=automated human identification using ear imaging&sort=relevance" target="_blank">[s2]</a></td><td></td><td>faf40ce28857aedf183e193486f5b4b0a8c478a2</td></tr><tr><td>ijb_c</td><td>IJB-A</td><td>Pushing the Frontiers of Unconstrained Face Detection and Recognition: IARPA Janus Benchmark A</td><td>Pushing the frontiers of unconstrained face detection and recognition: IARPA Janus Benchmark A</td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/1B_089_ext.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pushing the frontiers of unconstrained face detection and recognition: iarpa janus benchmark a&sort=relevance" target="_blank">[s2]</a></td><td></td><td>140c95e53c619eac594d70f6369f518adfea12ef</td></tr><tr><td>ijb_c</td><td>IJB-B</td><td>IARPA Janus Benchmark-B Face Dataset</td><td>IARPA Janus Benchmark-B Face Dataset</td><td><a href="http://www.vislab.ucr.edu/Biometrics2017/program_slides/Noblis_CVPRW_IJBB.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=iarpa janus benchmark-b face dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0cb2dd5f178e3a297a0c33068961018659d0f443</td></tr><tr><td>ijb_c</td><td>IJB-C</td><td>IARPA Janus Benchmark C</td><td>IARPA Janus Benchmark - C: Face Dataset and Protocol</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411217" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=iarpa janus benchmark c&sort=relevance" target="_blank">[s2]</a></td><td></td><td>57178b36c21fd7f4529ac6748614bb3374714e91</td></tr><tr><td>ilids_mcts</td><td></td><td>Imagery Library for Intelligent Detection Systems:
-The i-LIDS User Guide</td><td>Imagery Library for Intelligent Detection Systems (i-LIDS); A Standard for Testing Video Based Detection Systems</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=imagery library for intelligent detection systems:
-the i-lids user guide&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0297448f3ed948e136bb06ceff10eccb34e5bb77</td></tr><tr><td>ilids_vid_reid</td><td>iLIDS-VID</td><td>Person Re-Identi cation by Video Ranking</td><td>Person Re-identification by Exploiting Spatio-Temporal Cues and Multi-view Metric Learning</td><td><a href="https://doi.org/10.1109/LSP.2016.2574323" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person re-identi cation by video ranking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>99eb4cea0d9bc9fe777a5c5172f8638a37a7f262</td></tr><tr><td>images_of_groups</td><td>Images of Groups</td><td>Understanding Groups of Images of People</td><td>Understanding images of groups of people</td><td><a href="http://amp.ece.cmu.edu/people/Andy/Andy_files/cvpr09.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=understanding groups of images of people&sort=relevance" target="_blank">[s2]</a></td><td></td><td>21d9d0deed16f0ad62a4865e9acf0686f4f15492</td></tr><tr><td>imdb_wiki</td><td>IMDB</td><td>Deep expectation of real and apparent age from a single image without facial landmarks</td><td>Deep Expectation of Real and Apparent Age from a Single Image Without Facial Landmarks</td><td><a href="https://doi.org/10.1007/s11263-016-0940-3" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=deep expectation of real and apparent age from a single image without facial landmarks&sort=relevance" target="_blank">[s2]</a></td><td></td><td>10195a163ab6348eef37213a46f60a3d87f289c5</td></tr><tr><td>imdb_wiki</td><td>IMDB</td><td>DEX: Deep EXpectation of apparent age from a single image</td><td>DEX: Deep EXpectation of Apparent Age from a Single Image</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406390" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=dex: deep expectation of apparent age from a single image&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8355d095d3534ef511a9af68a3b2893339e3f96b</td></tr><tr><td>imfdb</td><td>IMFDB</td><td>Indian Movie Face Database: A Benchmark for Face Recognition Under Wide Variations</td><td>Indian Movie Face Database: A benchmark for face recognition under wide variations</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=indian movie face database: a benchmark for face recognition under wide variations&sort=relevance" target="_blank">[s2]</a></td><td></td><td>ca3e88d87e1344d076c964ea89d91a75c417f5ee</td></tr><tr><td>imm_face</td><td>IMM Face Dataset</td><td>The IMM Face Database - An Annotated Dataset of 240 Face Images</td><td>Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</td><td><a href="http://lrs.icg.tugraz.at/pubs/koestinger_befit_11.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the imm face database - an annotated dataset of 240 face images&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td></tr><tr><td>immediacy</td><td>Immediacy</td><td>Multi-task Recurrent Neural Network for Immediacy Prediction</td><td>Multi-task Recurrent Neural Network for Immediacy Prediction</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.383" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-task recurrent neural network for immediacy prediction&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1e3df3ca8feab0b36fd293fe689f93bb2aaac591</td></tr><tr><td>inria_person</td><td>INRIA Pedestrian</td><td>Histograms of Oriented Gradients for Human Detection</td><td>Histograms of oriented gradients for human detection</td><td><a href="http://nichol.as/papers/Dalai/Histograms%20of%20oriented%20gradients%20for%20human%20detection.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=histograms of oriented gradients for human detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>10d6b12fa07c7c8d6c8c3f42c7f1c061c131d4c5</td></tr><tr><td>jiku_mobile</td><td>Jiku Mobile Video Dataset</td><td>The Jiku Mobile Video Dataset</td><td>A Synchronization Ground Truth for the Jiku Mobile Video Dataset</td><td><a href="http://pdfs.semanticscholar.org/ad62/c6e17bc39b4dec20d32f6ac667ae42d2c118.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the jiku mobile video dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>ad62c6e17bc39b4dec20d32f6ac667ae42d2c118</td></tr><tr><td>jpl_pose</td><td>JPL-Interaction dataset</td><td>First-Person Activity Recognition: What Are They Doing to Me?</td><td>First-Person Activity Recognition: What Are They Doing to Me?</td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2013/papers/Ryoo_First-Person_Activity_Recognition_2013_CVPR_paper.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=first-person activity recognition: what are they doing to me?&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1aad2da473888cb7ebc1bfaa15bfa0f1502ce005</td></tr><tr><td>kdef</td><td>KDEF</td><td>The Karolinska Directed Emotional Faces – KDEF</td><td>Gaze fixation and the neural circuitry of face processing in autism</td><td><a href="http://doi.org/10.1038/nn1421" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the karolinska directed emotional faces – kdef&sort=relevance" target="_blank">[s2]</a></td><td></td><td>93884e46c49f7ae1c7c34046fbc28882f2bd6341</td></tr><tr><td>kin_face</td><td>UB KinFace</td><td>Genealogical Face Recognition based on UB KinFace Database</td><td>Understanding Kin Relationships in a Photo</td><td><a href="http://www1.ece.neu.edu/~yunfu/papers/Kinship-TMM.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=genealogical face recognition based on ub kinface database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7</td></tr><tr><td>kin_face</td><td>UB KinFace</td><td>Kinship Verification through Transfer Learning</td><td>Understanding Kin Relationships in a Photo</td><td><a href="http://www1.ece.neu.edu/~yunfu/papers/Kinship-TMM.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=kinship verification through transfer learning&sort=relevance" target="_blank">[s2]</a></td><td></td><td>08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7</td></tr><tr><td>kin_face</td><td>UB KinFace</td><td>Understanding Kin Relationships in a Photo</td><td>Understanding Kin Relationships in a Photo</td><td><a href="http://www1.ece.neu.edu/~yunfu/papers/Kinship-TMM.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=understanding kin relationships in a photo&sort=relevance" target="_blank">[s2]</a></td><td></td><td>08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7</td></tr><tr><td>kinectface</td><td>KinectFaceDB</td><td>KinectFaceDB: A Kinect Database for Face Recognition</td><td>KinectFaceDB: A Kinect Database for Face Recognition</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6866883" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=kinectfacedb: a kinect database for face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0b440695c822a8e35184fb2f60dcdaa8a6de84ae</td></tr><tr><td>kitti</td><td>KITTI</td><td>Vision meets Robotics: The KITTI Dataset</td><td>The Role of Machine Vision for Intelligent Vehicles</td><td><a href="http://www.path.berkeley.edu/sites/default/files/my_folder_76/Pub_03.2016_Role.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=vision meets robotics: the kitti dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>35ba4ebfd017a56b51e967105af9ae273c9b0178</td></tr><tr><td>lag</td><td>LAG</td><td>Large Age-Gap Face Verification by Feature Injection in Deep Networks</td><td>Large age-gap face verification by feature injection in deep networks</td><td><a href="http://pdfs.semanticscholar.org/0d2d/d4fc016cb6a517d8fb43a7cc3ff62964832e.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=large age-gap face verification by feature injection in deep networks&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0d2dd4fc016cb6a517d8fb43a7cc3ff62964832e</td></tr><tr><td>large_scale_person_search</td><td>Large Scale Person Search</td><td>End-to-End Deep Learning for Person Search</td><td>End-to-End Deep Learning for Person Search</td><td><a href="https://pdfs.semanticscholar.org/2161/f6b7ee3c0acc81603b01dc0df689683577b9.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=end-to-end deep learning for person search&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2161f6b7ee3c0acc81603b01dc0df689683577b9</td></tr><tr><td>leeds_sports_pose</td><td>Leeds Sports Pose</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td><a href="http://pdfs.semanticscholar.org/4b1d/23d17476fcf78f4cbadf69fb130b1aa627c0.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clustered pose and nonlinear appearance models for human pose estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td></tr><tr><td>leeds_sports_pose_extended</td><td>Leeds Sports Pose Extended</td><td>Learning Effective Human Pose Estimation from Inaccurate Annotation</td><td>Learning effective human pose estimation from inaccurate annotation</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995318" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning effective human pose estimation from inaccurate annotation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4e4746094bf60ee83e40d8597a6191e463b57f76</td></tr><tr><td>lfw</td><td>LFW</td><td>Labeled Faces in the Wild: A Survey</td><td>Labeled Faces in the Wild : A Survey</td><td><a href="http://pdfs.semanticscholar.org/7de6/e81d775e9cd7becbfd1bd685f4e2a5eebb22.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=labeled faces in the wild: a survey&sort=relevance" target="_blank">[s2]</a></td><td></td><td>7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22</td></tr><tr><td>lfw</td><td>LFW</td><td>Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments</td><td>Labeled Faces in the Wild : A Database for Studying Face Recognition in Unconstrained Environments</td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=labeled faces in the wild: a database for studying face recognition in unconstrained environments&sort=relevance" target="_blank">[s2]</a></td><td></td><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td></tr><tr><td>lfw_a</td><td>LFW-a</td><td>Effective Unconstrained Face Recognition by
- Combining Multiple Descriptors and Learned
- Background Statistics</td><td>Effective Unconstrained Face Recognition by Combining Multiple Descriptors and Learned Background Statistics</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/TPAMI.2010.230" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=effective unconstrained face recognition by
- combining multiple descriptors and learned
- background statistics&sort=relevance" target="_blank">[s2]</a></td><td></td><td>133f01aec1534604d184d56de866a4bd531dac87</td></tr><tr><td>m2vts</td><td>m2vts</td><td>The M2VTS Multimodal Face Database (Release 1.00)</td><td>The CAS-PEAL Large-Scale Chinese Face Database and Baseline Evaluations</td><td><a href="https://doi.org/10.1109/TSMCA.2007.909557" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the m2vts multimodal face database (release 1.00)&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2485c98aa44131d1a2f7d1355b1e372f2bb148ad</td></tr><tr><td>m2vtsdb_extended</td><td>xm2vtsdb</td><td>XM2VTSDB: The Extended M2VTS Database</td><td>Labeled Faces in the Wild : A Database for Studying Face Recognition in Unconstrained Environments</td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=xm2vtsdb: the extended m2vts database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td></tr><tr><td>mafl</td><td>MAFL</td><td>Facial Landmark Detection by Deep Multi-task Learning</td><td>Learning Deep Representation for Face Alignment with Auxiliary Attributes</td><td><a href="https://arxiv.org/pdf/1408.3967.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=facial landmark detection by deep multi-task learning&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td></tr><tr><td>mafl</td><td>MAFL</td><td>Learning Deep Representation for Face Alignment with Auxiliary Attributes</td><td>Learning Deep Representation for Face Alignment with Auxiliary Attributes</td><td><a href="https://arxiv.org/pdf/1408.3967.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning deep representation for face alignment with auxiliary attributes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td></tr><tr><td>mapillary</td><td>Mapillary</td><td>The Mapillary Vistas Dataset for Semantic Understanding of Street Scenes</td><td>The Mapillary Vistas Dataset for Semantic Understanding of Street Scenes</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237796" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the mapillary vistas dataset for semantic understanding of street scenes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>79828e6e9f137a583082b8b5a9dfce0c301989b8</td></tr><tr><td>market_1501</td><td>Market 1501</td><td>Scalable Person Re-identification: A Benchmark</td><td>Scalable Person Re-identification: A Benchmark</td><td><a href="https://www.microsoft.com/en-us/research/wp-content/uploads/2017/01/ICCV15-ReIDDataset.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=scalable person re-identification: a benchmark&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4308bd8c28e37e2ed9a3fcfe74d5436cce34b410</td></tr><tr><td>market1203</td><td>Market 1203</td><td>Orientation Driven Bag of Appearances for Person Re-identification</td><td>Orientation Driven Bag of Appearances for Person Re-identification</td><td><a href="http://pdfs.semanticscholar.org/a7fe/834a0af614ce6b50dc093132b031dd9a856b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=orientation driven bag of appearances for person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a7fe834a0af614ce6b50dc093132b031dd9a856b</td></tr><tr><td>mars</td><td>MARS</td><td>MARS: A Video Benchmark for Large-Scale Person Re-identification</td><td>MARS: A Video Benchmark for Large-Scale Person Re-Identification</td><td><a href="http://pdfs.semanticscholar.org/c038/7e788a52f10bf35d4d50659cfa515d89fbec.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=mars: a video benchmark for large-scale person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>c0387e788a52f10bf35d4d50659cfa515d89fbec</td></tr><tr><td>megaface</td><td>MegaFace</td><td>Level Playing Field for Million Scale Face Recognition</td><td>Level Playing Field for Million Scale Face Recognition</td><td><a href="https://arxiv.org/pdf/1705.00393.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=level playing field for million scale face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>28d4e027c7e90b51b7d8908fce68128d1964668a</td></tr><tr><td>mit_cbcl</td><td>MIT CBCL</td><td>Component-based Face Recognition with 3D Morphable Models</td><td>Component-Based Face Recognition with 3D Morphable Models</td><td><a href="http://www.bheisele.com/avbpa2003.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=component-based face recognition with 3d morphable models&sort=relevance" target="_blank">[s2]</a></td><td></td><td>079a0a3bf5200994e1f972b1b9197bf2f90e87d4</td></tr><tr><td>mmi_facial_expression</td><td>MMI Facial Expression Dataset</td><td>WEB-BASED DATABASE FOR FACIAL EXPRESSION ANALYSIS</td><td>Web-based database for facial expression analysis</td><td><a href="http://ibug.doc.ic.ac.uk/media/uploads/documents/PanticEtAl-ICME2005-final.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=web-based database for facial expression analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2a75f34663a60ab1b04a0049ed1d14335129e908</td></tr><tr><td>moments_in_time</td><td>Moments in Time</td><td>Moments in Time Dataset: one million videos for event understanding</td><td>Moments in Time Dataset: one million videos for event understanding</td><td><a href="https://arxiv.org/pdf/1801.03150.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=moments in time dataset: one million videos for event understanding&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a5a44a32a91474f00a3cda671a802e87c899fbb4</td></tr><tr><td>morph</td><td>MORPH Commercial</td><td>MORPH: A Longitudinal Image Database of Normal Adult Age-Progression</td><td>MORPH: a longitudinal image database of normal adult age-progression</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/FGR.2006.78" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=morph: a longitudinal image database of normal adult age-progression&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9055b155cbabdce3b98e16e5ac9c0edf00f9552f</td></tr><tr><td>morph_nc</td><td>MORPH Non-Commercial</td><td>MORPH: A Longitudinal Image Database of Normal Adult Age-Progression</td><td>MORPH: a longitudinal image database of normal adult age-progression</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/FGR.2006.78" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=morph: a longitudinal image database of normal adult age-progression&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9055b155cbabdce3b98e16e5ac9c0edf00f9552f</td></tr><tr><td>mot</td><td>MOT</td><td>Evaluating Multiple Object Tracking Performance: The CLEAR MOT Metrics</td><td>Learning to associate: HybridBoosted multi-target tracker for crowded scene</td><td><a href="http://vision.lbl.gov/Conferences/cvpr/Papers/data/papers/0633.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=evaluating multiple object tracking performance: the clear mot metrics&sort=relevance" target="_blank">[s2]</a></td><td></td><td>5981e6479c3fd4e31644db35d236bfb84ae46514</td></tr><tr><td>mot</td><td>MOT</td><td>Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</td><td>Learning to associate: HybridBoosted multi-target tracker for crowded scene</td><td><a href="http://vision.lbl.gov/Conferences/cvpr/Papers/data/papers/0633.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=performance measures and a data set for multi-target, multi-camera tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>5981e6479c3fd4e31644db35d236bfb84ae46514</td></tr><tr><td>mot</td><td>MOT</td><td>Learning to associate: HybridBoosted multi-target tracker for crowded scene</td><td>Learning to associate: HybridBoosted multi-target tracker for crowded scene</td><td><a href="http://vision.lbl.gov/Conferences/cvpr/Papers/data/papers/0633.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning to associate: hybridboosted multi-target tracker for crowded scene&sort=relevance" target="_blank">[s2]</a></td><td></td><td>5981e6479c3fd4e31644db35d236bfb84ae46514</td></tr><tr><td>mpi_large</td><td>Large MPI Facial Expression</td><td>The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</td><td>The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</td><td><a href="http://pdfs.semanticscholar.org/ea05/0801199f98a1c7c1df6769f23f658299a3ae.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the mpi facial expression database — a validated database of emotional and conversational facial expressions&sort=relevance" target="_blank">[s2]</a></td><td></td><td>ea050801199f98a1c7c1df6769f23f658299a3ae</td></tr><tr><td>mpi_small</td><td>Small MPI Facial Expression</td><td>The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</td><td>The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</td><td><a href="http://pdfs.semanticscholar.org/ea05/0801199f98a1c7c1df6769f23f658299a3ae.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the mpi facial expression database — a validated database of emotional and conversational facial expressions&sort=relevance" target="_blank">[s2]</a></td><td></td><td>ea050801199f98a1c7c1df6769f23f658299a3ae</td></tr><tr><td>mpii_human_pose</td><td>MPII Human Pose</td><td>2D Human Pose Estimation: New Benchmark and State of the Art Analysis</td><td>2D Human Pose Estimation: New Benchmark and State of the Art Analysis</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6909866" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=2d human pose estimation: new benchmark and state of the art analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3325860c0c82a93b2eac654f5324dd6a776f609e</td></tr><tr><td>mr2</td><td>MR2</td><td>The MR2: A multi-racial mega-resolution database of facial stimuli</td><td>The MR2: A multi-racial, mega-resolution database of facial stimuli.</td><td><a href="https://pdfs.semanticscholar.org/be5b/455abd379240460d022a0e246615b0b86c14.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the mr2: a multi-racial mega-resolution database of facial stimuli&sort=relevance" target="_blank">[s2]</a></td><td></td><td>578d4ad74818086bb64f182f72e2c8bd31e3d426</td></tr><tr><td>mrp_drone</td><td>MRP Drone</td><td>Investigating Open-World Person Re-identification Using a Drone</td><td>Investigating Open-World Person Re-identification Using a Drone</td><td><a href="http://pdfs.semanticscholar.org/ad01/687649d95cd5b56d7399a9603c4b8e2217d7.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=investigating open-world person re-identification using a drone&sort=relevance" target="_blank">[s2]</a></td><td></td><td>ad01687649d95cd5b56d7399a9603c4b8e2217d7</td></tr><tr><td>msceleb</td><td>MsCeleb</td><td>MS-Celeb-1M: A Dataset and Benchmark for Large-Scale Face Recognition</td><td>MS-Celeb-1M: A Dataset and Benchmark for Large-Scale Face Recognition</td><td><a href="http://pdfs.semanticscholar.org/4603/cb8e05258bb0572ae912ad20903b8f99f4b1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=ms-celeb-1m: a dataset and benchmark for large-scale face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>291265db88023e92bb8c8e6390438e5da148e8f5</td></tr><tr><td>msmt_17</td><td>MSMT17</td><td>Person Transfer GAN to Bridge Domain Gap for Person Re-Identification</td><td>Person Transfer GAN to Bridge Domain Gap for Person Re-Identification</td><td><a href="https://arxiv.org/pdf/1711.08565.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person transfer gan to bridge domain gap for person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>ec792ad2433b6579f2566c932ee414111e194537</td></tr><tr><td>mtfl</td><td>MTFL</td><td>Facial Landmark Detection by Deep Multi-task Learning</td><td>Learning Deep Representation for Face Alignment with Auxiliary Attributes</td><td><a href="https://arxiv.org/pdf/1408.3967.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=facial landmark detection by deep multi-task learning&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td></tr><tr><td>mtfl</td><td>MTFL</td><td>Learning Deep Representation for Face Alignment with Auxiliary Attributes</td><td>Learning Deep Representation for Face Alignment with Auxiliary Attributes</td><td><a href="https://arxiv.org/pdf/1408.3967.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning deep representation for face alignment with auxiliary attributes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td></tr><tr><td>muct</td><td>MUCT</td><td>The MUCT Landmarked Face Database</td><td>Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</td><td><a href="http://lrs.icg.tugraz.at/pubs/koestinger_befit_11.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the muct landmarked face database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td></tr><tr><td>multi_pie</td><td>MULTIPIE</td><td>Multi-PIE</td><td>Scheduling heterogeneous multi-cores through performance impact estimation (PIE)</td><td><a href="http://dl.acm.org/citation.cfm?id=2337184" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-pie&sort=relevance" target="_blank">[s2]</a></td><td></td><td>109df0e8e5969ddf01e073143e83599228a1163f</td></tr><tr><td>names_and_faces_news</td><td>News Dataset</td><td>Names and Faces</td><td>Names and faces in the news</td><td><a href="http://www.cs.utexas.edu/~grauman/courses/spring2007/395T/papers/berg_names_and_faces.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=names and faces&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2fda164863a06a92d3a910b96eef927269aeb730</td></tr><tr><td>nd_2006</td><td>ND-2006</td><td>Using a Multi-Instance Enrollment Representation to Improve 3D Face Recognition</td><td>Using a Multi-Instance Enrollment Representation to Improve 3D Face Recognition</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4401928" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=using a multi-instance enrollment representation to improve 3d face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>fd8168f1c50de85bac58a8d328df0a50248b16ae</td></tr><tr><td>nova_emotions</td><td>Novaemötions Dataset</td><td>Crowdsourcing facial expressions for affective-interaction</td><td>Competitive affective gaming: winning with a smile</td><td><a href="http://doi.acm.org/10.1145/2502081.2502115" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=crowdsourcing facial expressions for affective-interaction&sort=relevance" target="_blank">[s2]</a></td><td></td><td>7f4040b482d16354d5938c1d1b926b544652bf5b</td></tr><tr><td>nova_emotions</td><td>Novaemötions Dataset</td><td>Competitive affective gamming: Winning with a smile</td><td>Competitive affective gaming: winning with a smile</td><td><a href="http://doi.acm.org/10.1145/2502081.2502115" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=competitive affective gamming: winning with a smile&sort=relevance" target="_blank">[s2]</a></td><td></td><td>7f4040b482d16354d5938c1d1b926b544652bf5b</td></tr><tr><td>nudedetection</td><td>Nude Detection</td><td>A Bag-of-Features Approach based on Hue-SIFT Descriptor for Nude Detection</td><td>A bag-of-features approach based on Hue-SIFT descriptor for nude detection</td><td><a href="http://ieeexplore.ieee.org/document/7077625/" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a bag-of-features approach based on hue-sift descriptor for nude detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>7ace44190729927e5cb0dd5d363fcae966fe13f7</td></tr><tr><td>orl</td><td>ORL</td><td>Parameterisation of a Stochastic Model for Human Face Identification</td><td>Parameterisation of a stochastic model for human face identification</td><td><a href="http://pdfs.semanticscholar.org/5520/6f0b5f57ce17358999145506cd01e570358c.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=parameterisation of a stochastic model for human face identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>55206f0b5f57ce17358999145506cd01e570358c</td></tr><tr><td>penn_fudan</td><td>Penn Fudan</td><td>Object Detection Combining Recognition and Segmentation</td><td>Object Detection Combining Recognition and Segmentation</td><td><a href="http://pdfs.semanticscholar.org/f531/a554cade14b9b340de6730683a28c292dd74.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=object detection combining recognition and segmentation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3394168ff0719b03ff65bcea35336a76b21fe5e4</td></tr><tr><td>peta</td><td>PETA</td><td>Pedestrian Attribute Recognition At Far Distance</td><td>Pedestrian Attribute Recognition At Far Distance</td><td><a href="http://personal.ie.cuhk.edu.hk/~pluo/pdf/mm14.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pedestrian attribute recognition at far distance&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2a4bbee0b4cf52d5aadbbc662164f7efba89566c</td></tr><tr><td>pets</td><td>PETS 2017</td><td>PETS 2017: Dataset and Challenge</td><td>PETS 2017: Dataset and Challenge</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014998" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pets 2017: dataset and challenge&sort=relevance" target="_blank">[s2]</a></td><td></td><td>22909dd19a0ec3b6065334cb5be5392cb24d839d</td></tr><tr><td>pilot_parliament</td><td>PPB</td><td>Gender Shades: Intersectional Accuracy Disparities in Commercial Gender Classi cation</td><td>Summary of Research on Informant Accuracy in Network Data, 11 and on the Reverse Small World Problem</td><td><a href="http://pdfs.semanticscholar.org/fb82/681ac5d3487bd8e52dbb3d1fa220eeac855e.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=gender shades: intersectional accuracy disparities in commercial gender classi cation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>fb82681ac5d3487bd8e52dbb3d1fa220eeac855e</td></tr><tr><td>pipa</td><td>PIPA</td><td>Beyond Frontal Faces: Improving Person Recognition Using Multiple Cues</td><td>Beyond frontal faces: Improving Person Recognition using multiple cues</td><td><a href="https://doi.org/10.1109/CVPR.2015.7299113" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=beyond frontal faces: improving person recognition using multiple cues&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0a85bdff552615643dd74646ac881862a7c7072d</td></tr><tr><td>pku</td><td>PKU</td><td>Swiss-System Based Cascade Ranking for Gait-based Person Re-identification</td><td>Swiss-System Based Cascade Ranking for Gait-Based Person Re-Identification</td><td><a href="http://pdfs.semanticscholar.org/f6c8/d5e35d7e4d60a0104f233ac1a3ab757da53f.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=swiss-system based cascade ranking for gait-based person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>f6c8d5e35d7e4d60a0104f233ac1a3ab757da53f</td></tr><tr><td>pku_reid</td><td>PKU-Reid</td><td>Orientation driven bag of appearances for person re-identification</td><td>Orientation Driven Bag of Appearances for Person Re-identification</td><td><a href="http://pdfs.semanticscholar.org/a7fe/834a0af614ce6b50dc093132b031dd9a856b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=orientation driven bag of appearances for person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a7fe834a0af614ce6b50dc093132b031dd9a856b</td></tr><tr><td>pornodb</td><td>Pornography DB</td><td>Pooling in Image Representation: the Visual Codeword Point of View</td><td>Pooling in image representation: The visual codeword point of view</td><td><a href="http://pdfs.semanticscholar.org/b92a/1ed9622b8268ae3ac9090e25789fc41cc9b8.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pooling in image representation: the visual codeword point of view&sort=relevance" target="_blank">[s2]</a></td><td></td><td>b92a1ed9622b8268ae3ac9090e25789fc41cc9b8</td></tr><tr><td>precarious</td><td>Precarious</td><td>Expecting the Unexpected: Training Detectors for Unusual Pedestrians With Adversarial Imposters</td><td>Expecting the Unexpected: Training Detectors for Unusual Pedestrians with Adversarial Imposters</td><td><a href="https://arxiv.org/pdf/1703.06283.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=expecting the unexpected: training detectors for unusual pedestrians with adversarial imposters&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9e5378e7b336c89735d3bb15cf67eff96f86d39a</td></tr><tr><td>prid</td><td>PRID</td><td>Person Re-Identification by Descriptive and Discriminative Classification</td><td>Person Re-identification by Descriptive and Discriminative Classification</td><td><a href="http://pdfs.semanticscholar.org/4c1b/f0592be3e535faf256c95e27982db9b3d3d3.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person re-identification by descriptive and discriminative classification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>16c7c31a7553d99f1837fc6e88e77b5ccbb346b8</td></tr><tr><td>psu</td><td>PSU</td><td>Vision-based Analysis of Small Groups in Pedestrian Crowds</td><td>Vision-Based Analysis of Small Groups in Pedestrian Crowds</td><td><a href="http://vision.cse.psu.edu/publications/pdfs/GeCollinsRubackPAMI2011.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=vision-based analysis of small groups in pedestrian crowds&sort=relevance" target="_blank">[s2]</a></td><td></td><td>066000d44d6691d27202896691f08b27117918b9</td></tr><tr><td>put_face</td><td>Put Face</td><td>The PUT face database</td><td>Labeled Faces in the Wild : A Database for Studying Face Recognition in Unconstrained Environments</td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the put face database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td></tr><tr><td>qmul_grid</td><td>GRID</td><td>Multi-Camera Activity Correlation Analysis</td><td>Time-Delayed Correlation Analysis for Multi-Camera Activity Understanding</td><td><a href="https://doi.org/10.1007/s11263-010-0347-5" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-camera activity correlation analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2edb87494278ad11641b6cf7a3f8996de12b8e14</td></tr><tr><td>qmul_grid</td><td>GRID</td><td>Time-delayed correlation analysis for multi-camera activity understanding</td><td>Time-Delayed Correlation Analysis for Multi-Camera Activity Understanding</td><td><a href="https://doi.org/10.1007/s11263-010-0347-5" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=time-delayed correlation analysis for multi-camera activity understanding&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2edb87494278ad11641b6cf7a3f8996de12b8e14</td></tr><tr><td>qmul_surv_face</td><td>QMUL-SurvFace</td><td>Surveillance Face Recognition Challenge</td><td>Surveillance Face Recognition Challenge</td><td><a href="https://arxiv.org/pdf/1804.09691.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=surveillance face recognition challenge&sort=relevance" target="_blank">[s2]</a></td><td></td><td>c866a2afc871910e3282fd9498dce4ab20f6a332</td></tr><tr><td>rafd</td><td>RaFD</td><td>Presentation and validation of the Radboud Faces Database</td><td>Presentation and validation of the Radboud Faces Database</td><td><a href="https://pdfs.semanticscholar.org/3765/df816dc5a061bc261e190acc8bdd9d47bec0.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=presentation and validation of the radboud faces database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3765df816dc5a061bc261e190acc8bdd9d47bec0</td></tr><tr><td>raid</td><td>RAiD</td><td>Consistent Re-identification in a Camera Network</td><td>Consistent Re-identification in a Camera Network</td><td><a href="http://pdfs.semanticscholar.org/c27f/099e6e7e3f7f9979cbe9e0a5175fc5848ea0.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=consistent re-identification in a camera network&sort=relevance" target="_blank">[s2]</a></td><td></td><td>09d78009687bec46e70efcf39d4612822e61cb8c</td></tr><tr><td>rap_pedestrian</td><td>RAP</td><td>A Richly Annotated Dataset for Pedestrian Attribute Recognition</td><td>A Richly Annotated Dataset for Pedestrian Attribute Recognition</td><td><a href="http://pdfs.semanticscholar.org/221c/18238b829c12b911706947ab38fd017acef7.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a richly annotated dataset for pedestrian attribute recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>221c18238b829c12b911706947ab38fd017acef7</td></tr><tr><td>reseed</td><td>ReSEED</td><td>ReSEED: Social Event dEtection Dataset</td><td>ReSEED: social event dEtection dataset</td><td><a href="https://pub.uni-bielefeld.de/download/2663466/2686734" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=reseed: social event detection dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>54983972aafc8e149259d913524581357b0f91c3</td></tr><tr><td>saivt</td><td>SAIVT SoftBio</td><td>A Database for Person Re-Identification in Multi-Camera Surveillance Networks</td><td>A Database for Person Re-Identification in Multi-Camera Surveillance Networks</td><td><a href="http://eprints.qut.edu.au/53437/3/Bialkowski_Database4PersonReID_DICTA.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a database for person re-identification in multi-camera surveillance networks&sort=relevance" target="_blank">[s2]</a></td><td></td><td>22646e00a7ba34d1b5fbe3b1efcd91a1e1be3c2b</td></tr><tr><td>sarc3d</td><td>Sarc3D</td><td>SARC3D: a new 3D body model for People Tracking and Re-identification</td><td>SARC3D: A New 3D Body Model for People Tracking and Re-identification</td><td><a href="https://pdfs.semanticscholar.org/e27e/f52c641c2b5100a1b34fd0b819e84a31b4df.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=sarc3d: a new 3d body model for people tracking and re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>e27ef52c641c2b5100a1b34fd0b819e84a31b4df</td></tr><tr><td>scface</td><td>SCface</td><td>SCface – surveillance cameras face database</td><td>Large Variability Surveillance Camera Face Database</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=scface – surveillance cameras face database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>f3b84a03985de3890b400b68e2a92c0a00afd9d0</td></tr><tr><td>scut_head</td><td>SCUT HEAD</td><td>Detecting Heads using Feature Refine Net and Cascaded Multi-scale Architecture</td><td>Detecting Heads using Feature Refine Net and Cascaded Multi-scale Architecture</td><td><a href="https://arxiv.org/pdf/1803.09256.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=detecting heads using feature refine net and cascaded multi-scale architecture&sort=relevance" target="_blank">[s2]</a></td><td></td><td>d3200d49a19a4a4e4e9745ee39649b65d80c834b</td></tr><tr><td>sdu_vid</td><td>SDU-VID</td><td>A Spatio-Temporal Appearance Representation for Video-Based Pedestrian Re-Identification</td><td>Person Re-identification by Video Ranking</td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a spatio-temporal appearance representation for video-based pedestrian re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>98bb029afe2a1239c3fdab517323066f0957b81b</td></tr><tr><td>sdu_vid</td><td>SDU-VID</td><td>Local descriptors encoded by Fisher vectors for person re-identification</td><td>Person Re-identification by Video Ranking</td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=local descriptors encoded by fisher vectors for person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>98bb029afe2a1239c3fdab517323066f0957b81b</td></tr><tr><td>sdu_vid</td><td>SDU-VID</td><td>Person reidentification by video ranking</td><td>Person Re-identification by Video Ranking</td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person reidentification by video ranking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>98bb029afe2a1239c3fdab517323066f0957b81b</td></tr><tr><td>sheffield</td><td>Sheffield Face</td><td>Face Recognition: From Theory to Applications</td><td>Face Description with Local Binary Patterns: Application to Face Recognition</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/TPAMI.2006.244" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=face recognition: from theory to applications&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3607afdb204de9a5a9300ae98aa4635d9effcda2</td></tr><tr><td>soton</td><td>SOTON HiD</td><td>On a Large Sequence-Based Human Gait Database</td><td>On a large sequence-based human gait database</td><td><a href="http://pdfs.semanticscholar.org/4f93/cd09785c6e77bf4bc5a788e079df524c8d21.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=on a large sequence-based human gait database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4f93cd09785c6e77bf4bc5a788e079df524c8d21</td></tr><tr><td>stair_actions</td><td>STAIR Action</td><td>STAIR Actions: A Video Dataset of Everyday Home Actions</td><td>STAIR Actions: A Video Dataset of Everyday Home Actions</td><td><a href="https://arxiv.org/pdf/1804.04326.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=stair actions: a video dataset of everyday home actions&sort=relevance" target="_blank">[s2]</a></td><td></td><td>d3f5a1848b0028d8ab51d0b0673732cad2e3c8c9</td></tr><tr><td>stanford_drone</td><td>Stanford Drone</td><td>Learning Social Etiquette: Human Trajectory Prediction In Crowded Scenes</td><td>Learning to Predict Human Behavior in Crowded Scenes</td><td><a href="http://pdfs.semanticscholar.org/c9bd/a86e23cab9e4f15ea0c4cbb6cc02b9dfb709.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning social etiquette: human trajectory prediction in crowded scenes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>c9bda86e23cab9e4f15ea0c4cbb6cc02b9dfb709</td></tr><tr><td>stickmen_buffy</td><td>Buffy Stickmen</td><td>Learning to Parse Images of Articulated Objects</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td><a href="http://pdfs.semanticscholar.org/4b1d/23d17476fcf78f4cbadf69fb130b1aa627c0.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning to parse images of articulated objects&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td></tr><tr><td>stickmen_buffy</td><td>Buffy Stickmen</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td><a href="http://pdfs.semanticscholar.org/4b1d/23d17476fcf78f4cbadf69fb130b1aa627c0.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clustered pose and nonlinear appearance models for human pose estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td></tr><tr><td>stickmen_family</td><td>We Are Family Stickmen</td><td>We Are Family: Joint Pose Estimation of Multiple Persons</td><td>We Are Family: Joint Pose Estimation of Multiple Persons</td><td><a href="http://pdfs.semanticscholar.org/0dc1/1a37cadda92886c56a6fb5191ded62099c28.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=we are family: joint pose estimation of multiple persons&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0dc11a37cadda92886c56a6fb5191ded62099c28</td></tr><tr><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td>Learning to parse images of articulated bodies</td><td><a href="http://pdfs.semanticscholar.org/9cd7/4c43dbf9be0b9caae4606ee53e6d45850471.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clustered pose and nonlinear appearance models for human pose estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td></tr><tr><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td>Learning to Parse Images of Articulated Objects</td><td>Learning to parse images of articulated bodies</td><td><a href="http://pdfs.semanticscholar.org/9cd7/4c43dbf9be0b9caae4606ee53e6d45850471.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning to parse images of articulated objects&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td></tr><tr><td>sun_attributes</td><td>SUN</td><td>The SUN Attribute Database: Beyond Categories for Deeper Scene Understanding</td><td>SUN attribute database: Discovering, annotating, and recognizing scene attributes</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2012.6247998" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the sun attribute database: beyond categories for deeper scene understanding&sort=relevance" target="_blank">[s2]</a></td><td></td><td>833fa04463d90aab4a9fe2870d480f0b40df446e</td></tr><tr><td>sun_attributes</td><td>SUN</td><td>SUN Attribute Database:
+<!doctype html><html><head><meta charset='utf-8'><title>Papers with no location</title><link rel='stylesheet' href='reports.css'></head><body><h2>Papers with no location</h2><table border='1' cellpadding='3' cellspacing='3'><th>key</th><th>name</th><th>our title</th><th>found title</th><th></th><th></th><th>address</th><th>s2 id</th><tr><td>yfcc_100m</td><td>YFCC100M</td><td>YFCC100M: The New Data in Multimedia Research</td><td>YFCC100M: the new data in multimedia research</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=yfcc100m: the new data in multimedia research&sort=relevance" target="_blank">[s2]</a></td><td></td><td>010f0f4929e6a6644fb01f0e43820f91d0fad292</td></tr><tr><td>buhmap_db</td><td>BUHMAP-DB</td><td>Facial Feature Tracking and Expression Recognition for Sign Language</td><td>Facial feature tracking and expression recognition for sign language</td><td><a href="http://www.cmpe.boun.edu.tr/pilab/pilabfiles/databases/buhmap/files/ari2008facialfeaturetracking.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=facial feature tracking and expression recognition for sign language&sort=relevance" target="_blank">[s2]</a></td><td></td><td>014b8df0180f33b9fea98f34ae611c6447d761d2</td></tr><tr><td>vqa</td><td>VQA</td><td>VQA: Visual Question Answering</td><td>VQA: Visual Question Answering</td><td><a href="http://arxiv.org/pdf/1505.00468v3.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=vqa: visual question answering&sort=relevance" target="_blank">[s2]</a></td><td></td><td>01959ef569f74c286956024866c1d107099199f7</td></tr><tr><td>kitti</td><td>KITTI</td><td>Vision meets Robotics: The KITTI Dataset</td><td>Vision meets robotics: The KITTI dataset</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=vision meets robotics: the kitti dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>026e3363b7f76b51cc711886597a44d5f1fd1de2</td></tr><tr><td>ilids_mcts</td><td>i-LIDS</td><td>Imagery Library for Intelligent Detection Systems: The i-LIDS User Guide</td><td>Imagery Library for Intelligent Detection Systems (i-LIDS); A Standard for Testing Video Based Detection Systems</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=imagery library for intelligent detection systems: the i-lids user guide&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0297448f3ed948e136bb06ceff10eccb34e5bb77</td></tr><tr><td>ucf_selfie</td><td>UCF Selfie</td><td>How to Take a Good Selfie?</td><td>How to Take a Good Selfie?</td><td><a href="http://doi.acm.org/10.1145/2733373.2806365" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=how to take a good selfie?&sort=relevance" target="_blank">[s2]</a></td><td></td><td>041d3eedf5e45ce5c5229f0181c5c576ed1fafd6</td></tr><tr><td>psu</td><td>PSU</td><td>Vision-based Analysis of Small Groups in Pedestrian Crowds</td><td>Vision-Based Analysis of Small Groups in Pedestrian Crowds</td><td><a href="http://vision.cse.psu.edu/publications/pdfs/GeCollinsRubackPAMI2011.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=vision-based analysis of small groups in pedestrian crowds&sort=relevance" target="_blank">[s2]</a></td><td></td><td>066000d44d6691d27202896691f08b27117918b9</td></tr><tr><td>ifdb</td><td>IFDB</td><td>Iranian Face Database and Evaluation with a New Detection Algorithm</td><td>Iranian Face Database and Evaluation with a New Detection Algorithm</td><td><a href="http://pdfs.semanticscholar.org/066d/71fcd997033dce4ca58df924397dfe0b5fd1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=iranian face database and evaluation with a new detection algorithm&sort=relevance" target="_blank">[s2]</a></td><td></td><td>066d71fcd997033dce4ca58df924397dfe0b5fd1</td></tr><tr><td>columbia_gaze</td><td>Columbia Gaze</td><td>Gaze Locking: Passive Eye Contact Detection for Human–Object Interaction</td><td>Gaze locking: passive eye contact detection for human-object interaction</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=gaze locking: passive eye contact detection for human–object interaction&sort=relevance" target="_blank">[s2]</a></td><td></td><td>06f02199690961ba52997cde1527e714d2b3bf8f</td></tr><tr><td>mit_cbcl</td><td>MIT CBCL</td><td>Component-based Face Recognition with 3D Morphable Models</td><td>Component-Based Face Recognition with 3D Morphable Models</td><td><a href="http://www.bheisele.com/avbpa2003.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=component-based face recognition with 3d morphable models&sort=relevance" target="_blank">[s2]</a></td><td></td><td>079a0a3bf5200994e1f972b1b9197bf2f90e87d4</td></tr><tr><td>kin_face</td><td>UB KinFace</td><td>Understanding Kin Relationships in a Photo</td><td>Understanding Kin Relationships in a Photo</td><td><a href="http://www1.ece.neu.edu/~yunfu/papers/Kinship-TMM.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=understanding kin relationships in a photo&sort=relevance" target="_blank">[s2]</a></td><td></td><td>08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7</td></tr><tr><td>raid</td><td>RAiD</td><td>Consistent Re-identification in a Camera Network</td><td>Consistent Re-identification in a Camera Network</td><td><a href="http://pdfs.semanticscholar.org/c27f/099e6e7e3f7f9979cbe9e0a5175fc5848ea0.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=consistent re-identification in a camera network&sort=relevance" target="_blank">[s2]</a></td><td></td><td>09d78009687bec46e70efcf39d4612822e61cb8c</td></tr><tr><td>pipa</td><td>PIPA</td><td>Beyond Frontal Faces: Improving Person Recognition Using Multiple Cues</td><td>Beyond frontal faces: Improving Person Recognition using multiple cues</td><td><a href="https://doi.org/10.1109/CVPR.2015.7299113" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=beyond frontal faces: improving person recognition using multiple cues&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0a85bdff552615643dd74646ac881862a7c7072d</td></tr><tr><td>kinectface</td><td>KinectFaceDB</td><td>KinectFaceDB: A Kinect Database for Face Recognition</td><td>KinectFaceDB: A Kinect Database for Face Recognition</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6866883" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=kinectfacedb: a kinect database for face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0b440695c822a8e35184fb2f60dcdaa8a6de84ae</td></tr><tr><td>feret</td><td>FERET</td><td>The FERET Verification Testing Protocol for Face Recognition Algorithms</td><td>The FERET Verification Testing Protocol for Face Recognition Algorithms</td><td><a href="http://pdfs.semanticscholar.org/0c4a/139bb87c6743c7905b29a3cfec27a5130652.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the feret verification testing protocol for face recognition algorithms&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0c4a139bb87c6743c7905b29a3cfec27a5130652</td></tr><tr><td>graz</td><td>Graz Pedestrian</td><td>Weak Hypotheses and Boosting for Generic Object Detection and Recognition</td><td>Weak Hypotheses and Boosting for Generic Object Detection and Recognition</td><td><a href="http://pdfs.semanticscholar.org/0c91/808994a250d7be332400a534a9291ca3b60e.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=weak hypotheses and boosting for generic object detection and recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0c91808994a250d7be332400a534a9291ca3b60e</td></tr><tr><td>ijb_c</td><td>IJB-B</td><td>IARPA Janus Benchmark-B Face Dataset</td><td>IARPA Janus Benchmark-B Face Dataset</td><td><a href="http://www.vislab.ucr.edu/Biometrics2017/program_slides/Noblis_CVPRW_IJBB.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=iarpa janus benchmark-b face dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0cb2dd5f178e3a297a0c33068961018659d0f443</td></tr><tr><td>casablanca</td><td>Casablanca</td><td>Context-aware {CNNs} for person head detection</td><td>Context-Aware CNNs for Person Head Detection</td><td><a href="http://arxiv.org/pdf/1511.07917v1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=context-aware {cnns} for person head detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td></tr><tr><td>hollywood_headset</td><td>HollywoodHeads</td><td>Context-aware CNNs for person head detection</td><td>Context-Aware CNNs for Person Head Detection</td><td><a href="http://arxiv.org/pdf/1511.07917v1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=context-aware cnns for person head detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td></tr><tr><td>lag</td><td>LAG</td><td>Large Age-Gap Face Verification by Feature Injection in Deep Networks</td><td>Large age-gap face verification by feature injection in deep networks</td><td><a href="http://pdfs.semanticscholar.org/0d2d/d4fc016cb6a517d8fb43a7cc3ff62964832e.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=large age-gap face verification by feature injection in deep networks&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0d2dd4fc016cb6a517d8fb43a7cc3ff62964832e</td></tr><tr><td>stickmen_family</td><td>We Are Family Stickmen</td><td>We Are Family: Joint Pose Estimation of Multiple Persons</td><td>We Are Family: Joint Pose Estimation of Multiple Persons</td><td><a href="http://pdfs.semanticscholar.org/0dc1/1a37cadda92886c56a6fb5191ded62099c28.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=we are family: joint pose estimation of multiple persons&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0dc11a37cadda92886c56a6fb5191ded62099c28</td></tr><tr><td>voc</td><td>VOC</td><td>The PASCAL Visual Object Classes (VOC) Challenge</td><td>The Pascal Visual Object Classes (VOC) Challenge</td><td><a href="https://doi.org/10.1007/s11263-009-0275-4" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the pascal visual object classes (voc) challenge&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0ee1916a0cb2dc7d3add086b5f1092c3d4beb38a</td></tr><tr><td>feret</td><td>FERET</td><td>The FERET Evaluation Methodology for Face-Recognition Algorithms</td><td>The FERET Evaluation Methodology for Face-Recognition Algorithms</td><td><a href="http://pdfs.semanticscholar.org/0f0f/cf041559703998abf310e56f8a2f90ee6f21.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the feret evaluation methodology for face-recognition algorithms&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0f0fcf041559703998abf310e56f8a2f90ee6f21</td></tr><tr><td>imdb_wiki</td><td>IMDB</td><td>Deep expectation of real and apparent age from a single image without facial landmarks</td><td>Deep Expectation of Real and Apparent Age from a Single Image Without Facial Landmarks</td><td><a href="https://doi.org/10.1007/s11263-016-0940-3" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=deep expectation of real and apparent age from a single image without facial landmarks&sort=relevance" target="_blank">[s2]</a></td><td></td><td>10195a163ab6348eef37213a46f60a3d87f289c5</td></tr><tr><td>inria_person</td><td>INRIA Pedestrian</td><td>Histograms of Oriented Gradients for Human Detection</td><td>Histograms of oriented gradients for human detection</td><td><a href="http://nichol.as/papers/Dalai/Histograms%20of%20oriented%20gradients%20for%20human%20detection.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=histograms of oriented gradients for human detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>10d6b12fa07c7c8d6c8c3f42c7f1c061c131d4c5</td></tr><tr><td>graz</td><td>Graz Pedestrian</td><td>Object Recognition Using Segmentation for Feature Detection</td><td>Object recognition using segmentation for feature detection</td><td><a href="http://www.emt.tugraz.at/~tracking/Publications/fussenegger2004b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=object recognition using segmentation for feature detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>12ad3b5bbbf407f8e54ea692c07633d1a867c566</td></tr><tr><td>lfw_a</td><td>LFW-a</td><td>Effective Unconstrained Face Recognition by Combining Multiple Descriptors and Learned Background Statistics</td><td>Effective Unconstrained Face Recognition by Combining Multiple Descriptors and Learned Background Statistics</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/TPAMI.2010.230" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=effective unconstrained face recognition by combining multiple descriptors and learned background statistics&sort=relevance" target="_blank">[s2]</a></td><td></td><td>133f01aec1534604d184d56de866a4bd531dac87</td></tr><tr><td>eth_andreas_ess</td><td>ETHZ Pedestrian</td><td>Depth and Appearance for Mobile Scene Analysis</td><td>Depth and Appearance for Mobile Scene Analysis</td><td><a href="http://www.mmp.rwth-aachen.de/publications/pdf/ess-depthandappearance-iccv07.pdf/at_download/file" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=depth and appearance for mobile scene analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>13f06b08f371ba8b5d31c3e288b4deb61335b462</td></tr><tr><td>ijb_c</td><td>IJB-A</td><td>Pushing the Frontiers of Unconstrained Face Detection and Recognition: IARPA Janus Benchmark A</td><td>Pushing the frontiers of unconstrained face detection and recognition: IARPA Janus Benchmark A</td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/1B_089_ext.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pushing the frontiers of unconstrained face detection and recognition: iarpa janus benchmark a&sort=relevance" target="_blank">[s2]</a></td><td></td><td>140c95e53c619eac594d70f6369f518adfea12ef</td></tr><tr><td>vgg_faces</td><td>VGG Face</td><td>Deep Face Recognition</td><td>Deep Face Recognition</td><td><a href="https://pdfs.semanticscholar.org/f372/ab9b3270d4e4f6a0258c83c2736c3a5c0454.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=deep face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>162ea969d1929ed180cc6de9f0bf116993ff6e06</td></tr><tr><td>prid</td><td>PRID</td><td>Person Re-Identification by Descriptive and Discriminative Classification</td><td>Person Re-identification by Descriptive and Discriminative Classification</td><td><a href="http://pdfs.semanticscholar.org/4c1b/f0592be3e535faf256c95e27982db9b3d3d3.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person re-identification by descriptive and discriminative classification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>16c7c31a7553d99f1837fc6e88e77b5ccbb346b8</td></tr><tr><td>umb</td><td>UMB</td><td>UMB-DB: A Database of Partially Occluded 3D Faces</td><td>UMB-DB: A database of partially occluded 3D faces</td><td><a href="https://doi.org/10.1109/ICCVW.2011.6130509" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=umb-db: a database of partially occluded 3d faces&sort=relevance" target="_blank">[s2]</a></td><td></td><td>16e8b0a1e8451d5f697b94c0c2b32a00abee1d52</td></tr><tr><td>deep_fashion</td><td>DeepFashion</td><td>DeepFashion: Powering Robust Clothes Recognition and Retrieval with Rich Annotations</td><td>DeepFashion: Powering Robust Clothes Recognition and Retrieval with Rich Annotations</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7780493" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=deepfashion: powering robust clothes recognition and retrieval with rich annotations&sort=relevance" target="_blank">[s2]</a></td><td></td><td>18010284894ed0edcca74e5bf768ee2e15ef7841</td></tr><tr><td>pilot_parliament</td><td>PPB</td><td>Gender Shades: Intersectional Accuracy Disparities in Commercial Gender Classification</td><td>Gender Shades: Intersectional Accuracy Disparities in Commercial Gender Classification</td><td><a href="https://pdfs.semanticscholar.org/03c1/fc9c3339813ed81ad0de540132f9f695a0f8.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=gender shades: intersectional accuracy disparities in commercial gender classification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>18858cc936947fc96b5c06bbe3c6c2faa5614540</td></tr><tr><td>frgc</td><td>FRGC</td><td>Overview of the Face Recognition Grand Challenge</td><td>Overview of the face recognition grand challenge</td><td><a href="http://www3.nd.edu/~kwb/PhillipsEtAlCVPR_2005.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=overview of the face recognition grand challenge&sort=relevance" target="_blank">[s2]</a></td><td></td><td>18ae7c9a4bbc832b8b14bc4122070d7939f5e00e</td></tr><tr><td>yale_faces</td><td>YaleFaces</td><td>From Few to Many: Illumination Cone Models for Face Recognition under Variable Lighting and Pose</td><td>From Few to Many: Illumination Cone Models for Face Recognition under Variable Lighting and Pose</td><td><a href="http://pdfs.semanticscholar.org/97bb/c2b439a79d4dc0dc7199d71ed96ad5e3fd0e.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=from few to many: illumination cone models for face recognition under variable lighting and pose&sort=relevance" target="_blank">[s2]</a></td><td></td><td>18c72175ddbb7d5956d180b65a96005c100f6014</td></tr><tr><td>york_3d</td><td>UOY 3D Face Database</td><td>Three-Dimensional Face Recognition: An Eigensurface Approach</td><td>Three-dimensional face recognition: an eigensurface approach</td><td><a href="http://www-users.cs.york.ac.uk/~nep/research/3Dface/tomh/3DFaceRecognition-Eigensurface-ICIP(web)2.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=three-dimensional face recognition: an eigensurface approach&sort=relevance" target="_blank">[s2]</a></td><td></td><td>19d1b811df60f86cbd5e04a094b07f32fff7a32a</td></tr><tr><td>jpl_pose</td><td>JPL-Interaction dataset</td><td>First-Person Activity Recognition: What Are They Doing to Me?</td><td>First-Person Activity Recognition: What Are They Doing to Me?</td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2013/papers/Ryoo_First-Person_Activity_Recognition_2013_CVPR_paper.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=first-person activity recognition: what are they doing to me?&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1aad2da473888cb7ebc1bfaa15bfa0f1502ce005</td></tr><tr><td>adience</td><td>Adience</td><td>Age and Gender Estimation of Unfiltered Faces</td><td>Age and Gender Estimation of Unfiltered Faces</td><td><a href="http://www.openu.ac.il/home/hassner/Adience/EidingerEnbarHassner_tifs.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=age and gender estimation of unfiltered faces&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1be498d4bbc30c3bfd0029114c784bc2114d67c0</td></tr><tr><td>youtube_poses</td><td>YouTube Pose</td><td>Personalizing Human Video Pose Estimation</td><td>Personalizing Human Video Pose Estimation</td><td><a href="http://arxiv.org/pdf/1511.06676v1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=personalizing human video pose estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1c2802c2199b6d15ecefe7ba0c39bfe44363de38</td></tr><tr><td>caltech_pedestrians</td><td>Caltech Pedestrians</td><td>Pedestrian Detection: A Benchmark</td><td>Pedestrian detection: A benchmark</td><td><a href="http://vision.ucsd.edu/~pdollar/files/papers/DollarCVPR09peds.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pedestrian detection: a benchmark&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1dc35905a1deff8bc74688f2d7e2f48fd2273275</td></tr><tr><td>immediacy</td><td>Immediacy</td><td>Multi-task Recurrent Neural Network for Immediacy Prediction</td><td>Multi-task Recurrent Neural Network for Immediacy Prediction</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.383" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-task recurrent neural network for immediacy prediction&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1e3df3ca8feab0b36fd293fe689f93bb2aaac591</td></tr><tr><td>cafe</td><td>CAFE</td><td>The Child Affective Facial Expression (CAFE) Set: Validity and reliability from untrained adults</td><td>The Child Affective Facial Expression (CAFE) set: validity and reliability from untrained adults</td><td><a href="http://pdfs.semanticscholar.org/2038/8099cc415c772926e47bcbbe554e133343d1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the child affective facial expression (cafe) set: validity and reliability from untrained adults&sort=relevance" target="_blank">[s2]</a></td><td></td><td>20388099cc415c772926e47bcbbe554e133343d1</td></tr><tr><td>bbc_pose</td><td>BBC Pose</td><td>Automatic and Efficient Human Pose Estimation for Sign Language Videos</td><td>Automatic and Efficient Human Pose Estimation for Sign Language Videos</td><td><a href="https://doi.org/10.1007/s11263-013-0672-6" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=automatic and efficient human pose estimation for sign language videos&sort=relevance" target="_blank">[s2]</a></td><td></td><td>213a579af9e4f57f071b884aa872651372b661fd</td></tr><tr><td>3d_rma</td><td>3D-RMA</td><td>Automatic 3D Face Authentication</td><td>Automatic 3D face authentication</td><td><a href="http://pdfs.semanticscholar.org/2160/788824c4c29ffe213b2cbeb3f52972d73f37.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=automatic 3d face authentication&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2160788824c4c29ffe213b2cbeb3f52972d73f37</td></tr><tr><td>large_scale_person_search</td><td>Large Scale Person Search</td><td>End-to-End Deep Learning for Person Search</td><td>End-to-End Deep Learning for Person Search</td><td><a href="https://pdfs.semanticscholar.org/2161/f6b7ee3c0acc81603b01dc0df689683577b9.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=end-to-end deep learning for person search&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2161f6b7ee3c0acc81603b01dc0df689683577b9</td></tr><tr><td>images_of_groups</td><td>Images of Groups</td><td>Understanding Groups of Images of People</td><td>Understanding images of groups of people</td><td><a href="http://amp.ece.cmu.edu/people/Andy/Andy_files/cvpr09.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=understanding groups of images of people&sort=relevance" target="_blank">[s2]</a></td><td></td><td>21d9d0deed16f0ad62a4865e9acf0686f4f15492</td></tr><tr><td>rap_pedestrian</td><td>RAP</td><td>A Richly Annotated Dataset for Pedestrian Attribute Recognition</td><td>A Richly Annotated Dataset for Pedestrian Attribute Recognition</td><td><a href="http://pdfs.semanticscholar.org/221c/18238b829c12b911706947ab38fd017acef7.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a richly annotated dataset for pedestrian attribute recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>221c18238b829c12b911706947ab38fd017acef7</td></tr><tr><td>mot</td><td>MOT</td><td>Evaluating Multiple Object Tracking Performance: The CLEAR MOT Metrics</td><td>Evaluating Multiple Object Tracking Performance: The CLEAR MOT Metrics</td><td><a href="http://pdfs.semanticscholar.org/2e0b/00f4043e2d4b04c59c88bb54bcd907d0dcd4.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=evaluating multiple object tracking performance: the clear mot metrics&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2258e01865367018ed6f4262c880df85b94959f8</td></tr><tr><td>saivt</td><td>SAIVT SoftBio</td><td>A Database for Person Re-Identification in Multi-Camera Surveillance Networks</td><td>A Database for Person Re-Identification in Multi-Camera Surveillance Networks</td><td><a href="http://eprints.qut.edu.au/53437/3/Bialkowski_Database4PersonReID_DICTA.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a database for person re-identification in multi-camera surveillance networks&sort=relevance" target="_blank">[s2]</a></td><td></td><td>22646e00a7ba34d1b5fbe3b1efcd91a1e1be3c2b</td></tr><tr><td>pets</td><td>PETS 2017</td><td>PETS 2017: Dataset and Challenge</td><td>PETS 2017: Dataset and Challenge</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014998" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pets 2017: dataset and challenge&sort=relevance" target="_blank">[s2]</a></td><td></td><td>22909dd19a0ec3b6065334cb5be5392cb24d839d</td></tr><tr><td>gallagher</td><td>Gallagher</td><td>Clothing Cosegmentation for Recognizing People</td><td>Clothing cosegmentation for recognizing people</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2008.4587481" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clothing cosegmentation for recognizing people&sort=relevance" target="_blank">[s2]</a></td><td></td><td>22ad2c8c0f4d6aa4328b38d894b814ec22579761</td></tr><tr><td>expw</td><td>ExpW</td><td>From Facial Expression Recognition to Interpersonal Relation Prediction</td><td>From Facial Expression Recognition to Interpersonal Relation Prediction</td><td><a href="http://arxiv.org/abs/1609.06426" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=from facial expression recognition to interpersonal relation prediction&sort=relevance" target="_blank">[s2]</a></td><td></td><td>22f656d0f8426c84a33a267977f511f127bfd7f3</td></tr><tr><td>cas_peal</td><td>CAS-PEAL</td><td>The CAS-PEAL Large-Scale Chinese Face Database and Baseline Evaluations</td><td>The CAS-PEAL Large-Scale Chinese Face Database and Baseline Evaluations</td><td><a href="https://doi.org/10.1109/TSMCA.2007.909557" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the cas-peal large-scale chinese face database and baseline evaluations&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2485c98aa44131d1a2f7d1355b1e372f2bb148ad</td></tr><tr><td>faceplace</td><td>Face Place</td><td>Recognizing disguised faces</td><td>Recognizing disguised faces</td><td><a href="http://pdfs.semanticscholar.org/d936/7ceb0be378c3a9ddf7cb741c678c1a3c574c.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=recognizing disguised faces&sort=relevance" target="_blank">[s2]</a></td><td></td><td>25474c21613607f6bb7687a281d5f9d4ffa1f9f3</td></tr><tr><td>afew_va</td><td>AFEW-VA</td><td>AFEW-VA database for valence and arousal estimation in-the-wild</td><td>AFEW-VA database for valence and arousal estimation in-the-wild</td><td><a href="http://pdfs.semanticscholar.org/2624/d84503bc2f8e190e061c5480b6aa4d89277a.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=afew-va database for valence and arousal estimation in-the-wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2624d84503bc2f8e190e061c5480b6aa4d89277a</td></tr><tr><td>cofw</td><td>COFW</td><td>Robust face landmark estimation under occlusion</td><td>Robust Face Landmark Estimation under Occlusion</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6751298" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=robust face landmark estimation under occlusion&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2724ba85ec4a66de18da33925e537f3902f21249</td></tr><tr><td>duke_mtmc</td><td>Duke MTMC</td><td>Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</td><td>Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</td><td><a href="http://pdfs.semanticscholar.org/b5f2/4f49f9a5e47d6601399dc068158ad88d7651.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=performance measures and a data set for multi-target, multi-camera tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>27a2fad58dd8727e280f97036e0d2bc55ef5424c</td></tr><tr><td>mot</td><td>MOT</td><td>Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</td><td>Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</td><td><a href="http://pdfs.semanticscholar.org/b5f2/4f49f9a5e47d6601399dc068158ad88d7651.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=performance measures and a data set for multi-target, multi-camera tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>27a2fad58dd8727e280f97036e0d2bc55ef5424c</td></tr><tr><td>h3d</td><td>H3D</td><td>Poselets: Body Part Detectors Trained Using 3D Human Pose Annotations</td><td>Poselets: Body part detectors trained using 3D human pose annotations</td><td><a href="http://vision.stanford.edu/teaching/cs231b_spring1213/papers/ICCV09_BourdevMalik.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=poselets: body part detectors trained using 3d human pose annotations&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2830fb5282de23d7784b4b4bc37065d27839a412</td></tr><tr><td>megaface</td><td>MegaFace</td><td>Level Playing Field for Million Scale Face Recognition</td><td>Level Playing Field for Million Scale Face Recognition</td><td><a href="https://arxiv.org/pdf/1705.00393.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=level playing field for million scale face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>28d4e027c7e90b51b7d8908fce68128d1964668a</td></tr><tr><td>msceleb</td><td>MsCeleb</td><td>MS-Celeb-1M: A Dataset and Benchmark for Large-Scale Face Recognition</td><td>MS-Celeb-1M: A Dataset and Benchmark for Large-Scale Face Recognition</td><td><a href="http://pdfs.semanticscholar.org/4603/cb8e05258bb0572ae912ad20903b8f99f4b1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=ms-celeb-1m: a dataset and benchmark for large-scale face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>291265db88023e92bb8c8e6390438e5da148e8f5</td></tr><tr><td>ferplus</td><td>FER+</td><td>Training Deep Networks for Facial Expression Recognition with Crowd-Sourced Label Distribution</td><td>Training deep networks for facial expression recognition with crowd-sourced label distribution</td><td><a href="http://arxiv.org/pdf/1608.01041v1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=training deep networks for facial expression recognition with crowd-sourced label distribution&sort=relevance" target="_blank">[s2]</a></td><td></td><td>298cbc3dfbbb3a20af4eed97906650a4ea1c29e0</td></tr><tr><td>scface</td><td>SCface</td><td>SCface – surveillance cameras face database</td><td>SCface – surveillance cameras face database</td><td><a href="http://scface.org/SCface%20-%20Surveillance%20Cameras%20Face%20Database.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=scface – surveillance cameras face database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>29a705a5fa76641e0d8963f1fdd67ee4c0d92d3d</td></tr><tr><td>peta</td><td>PETA</td><td>Pedestrian Attribute Recognition At Far Distance</td><td>Pedestrian Attribute Recognition At Far Distance</td><td><a href="http://personal.ie.cuhk.edu.hk/~pluo/pdf/mm14.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pedestrian attribute recognition at far distance&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2a4bbee0b4cf52d5aadbbc662164f7efba89566c</td></tr><tr><td>mmi_facial_expression</td><td>MMI Facial Expression Dataset</td><td>WEB-BASED DATABASE FOR FACIAL EXPRESSION ANALYSIS</td><td>Web-based database for facial expression analysis</td><td><a href="http://ibug.doc.ic.ac.uk/media/uploads/documents/PanticEtAl-ICME2005-final.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=web-based database for facial expression analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2a75f34663a60ab1b04a0049ed1d14335129e908</td></tr><tr><td>bosphorus</td><td>The Bosphorus</td><td>Bosphorus Database for 3D Face Analysis</td><td>Bosphorus Database for 3D Face Analysis</td><td><a href="http://pdfs.semanticscholar.org/4254/fbba3846008f50671edc9cf70b99d7304543.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=bosphorus database for 3d face analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2acf7e58f0a526b957be2099c10aab693f795973</td></tr><tr><td>yale_faces</td><td>YaleFaces</td><td>Acquiring Linear Subspaces for Face Recognition under Variable Lighting</td><td>Acquiring linear subspaces for face recognition under variable lighting</td><td><a href="http://vision.cornell.edu/se3/wp-content/uploads/2014/09/pami05.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=acquiring linear subspaces for face recognition under variable lighting&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2ad0ee93d029e790ebb50574f403a09854b65b7e</td></tr><tr><td>frav3d</td><td>FRAV3D</td><td>MULTIMODAL 2D, 2.5D & 3D FACE VERIFICATION</td><td>Multimodal 2D, 2.5D & 3D Face Verification</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multimodal 2d, 2.5d & 3d face verification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2b926b3586399d028b46315d7d9fb9d879e4f79c</td></tr><tr><td>clothing_co_parsing</td><td>CCP</td><td>Clothing Co-Parsing by Joint Image Segmentation and Labeling</td><td>Clothing Co-parsing by Joint Image Segmentation and Labeling</td><td><a href="https://arxiv.org/pdf/1502.00739v1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clothing co-parsing by joint image segmentation and labeling&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2bf8541199728262f78d4dced6fb91479b39b738</td></tr><tr><td>texas_3dfrd</td><td>Texas 3DFRD</td><td>Anthropometric 3D Face Recognition</td><td>Anthropometric 3D Face Recognition</td><td><a href="http://live.ece.utexas.edu/publications/2010/sg_ijcv_june10.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=anthropometric 3d face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2ce2560cf59db59ce313bbeb004e8ce55c5ce928</td></tr><tr><td>a_pascal_yahoo</td><td>aPascal</td><td>Describing Objects by their Attributes</td><td>Describing objects by their attributes</td><td><a href="http://www-2.cs.cmu.edu/~dhoiem/publications/cvpr2009_attributes.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=describing objects by their attributes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2e384f057211426ac5922f1b33d2aa8df5d51f57</td></tr><tr><td>3dpes</td><td>3DPeS</td><td>3DPes: 3D People Dataset for Surveillance and Forensics</td><td>3DPeS: 3D people dataset for surveillance and forensics</td><td><a href="http://doi.acm.org/10.1145/2072572.2072590" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=3dpes: 3d people dataset for surveillance and forensics&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2e8d0f1802e50cccfd3c0aabac0d0beab3a7846e</td></tr><tr><td>qmul_grid</td><td>GRID</td><td>Time-delayed correlation analysis for multi-camera activity understanding</td><td>Time-Delayed Correlation Analysis for Multi-Camera Activity Understanding</td><td><a href="https://doi.org/10.1007/s11263-010-0347-5" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=time-delayed correlation analysis for multi-camera activity understanding&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2edb87494278ad11641b6cf7a3f8996de12b8e14</td></tr><tr><td>graz</td><td>Graz Pedestrian</td><td>Generic Object Recognition with Boosting</td><td>Generic object recognition with boosting</td><td><a href="http://www.emt.tu-graz.ac.at/~pinz/onlinepapers/Reprint_Vol_28_No_3_2006.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=generic object recognition with boosting&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2eed184680edcdec8a3b605ad1a3ba8e8f7cc2e9</td></tr><tr><td>names_and_faces_news</td><td>News Dataset</td><td>Names and Faces</td><td>Names and faces in the news</td><td><a href="http://www.cs.utexas.edu/~grauman/courses/spring2007/395T/papers/berg_names_and_faces.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=names and faces&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2fda164863a06a92d3a910b96eef927269aeb730</td></tr><tr><td>umd_faces</td><td>UMD</td><td>UMDFaces: An Annotated Face Dataset for Training Deep Networks</td><td>UMDFaces: An annotated face dataset for training deep networks</td><td><a href="http://arxiv.org/abs/1611.01484" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=umdfaces: an annotated face dataset for training deep networks&sort=relevance" target="_blank">[s2]</a></td><td></td><td>31b05f65405534a696a847dd19c621b7b8588263</td></tr><tr><td>tiny_images</td><td>Tiny Images</td><td>80 million tiny images: a large dataset for non-parametric object and scene recognition</td><td>80 Million Tiny Images: A Large Data Set for Nonparametric Object and Scene Recognition</td><td><a href="http://pdfs.semanticscholar.org/31b5/8ced31f22eab10bd3ee2d9174e7c14c27c01.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=80 million tiny images: a large dataset for non-parametric object and scene recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>31b58ced31f22eab10bd3ee2d9174e7c14c27c01</td></tr><tr><td>feret</td><td>FERET</td><td>FERET ( Face Recognition Technology ) Recognition Algorithm Development and Test Results</td><td>FERET ( Face Recognition Technology ) Recognition Algorithm Development and Test Results</td><td><a href="http://pdfs.semanticscholar.org/31de/9b3dd6106ce6eec9a35991b2b9083395fd0b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=feret ( face recognition technology ) recognition algorithm development and test results&sort=relevance" target="_blank">[s2]</a></td><td></td><td>31de9b3dd6106ce6eec9a35991b2b9083395fd0b</td></tr><tr><td>ucf_crowd</td><td>UCF-CC-50</td><td>Multi-Source Multi-Scale Counting in Extremely Dense Crowd Images</td><td>Multi-source Multi-scale Counting in Extremely Dense Crowd Images</td><td><a href="http://www.cs.ucf.edu/~haroon/datafiles/Idrees_Counting_CVPR_2013.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-source multi-scale counting in extremely dense crowd images&sort=relevance" target="_blank">[s2]</a></td><td></td><td>32c801cb7fbeb742edfd94cccfca4934baec71da</td></tr><tr><td>cityscapes</td><td>Cityscapes</td><td>The Cityscapes Dataset for Semantic Urban Scene Understanding</td><td>The Cityscapes Dataset for Semantic Urban Scene Understanding</td><td><a href="https://arxiv.org/pdf/1604.01685.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the cityscapes dataset for semantic urban scene understanding&sort=relevance" target="_blank">[s2]</a></td><td></td><td>32cde90437ab5a70cf003ea36f66f2de0e24b3ab</td></tr><tr><td>tud_campus</td><td>TUD-Campus</td><td>People-Tracking-by-Detection and People-Detection-by-Tracking</td><td>People-tracking-by-detection and people-detection-by-tracking</td><td><a href="http://mplab.ucsd.edu/wp-content/uploads/CVPR2008/Conference/data/papers/243.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=people-tracking-by-detection and people-detection-by-tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td></tr><tr><td>tud_crossing</td><td>TUD-Crossing</td><td>People-Tracking-by-Detection and People-Detection-by-Tracking</td><td>People-tracking-by-detection and people-detection-by-tracking</td><td><a href="http://mplab.ucsd.edu/wp-content/uploads/CVPR2008/Conference/data/papers/243.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=people-tracking-by-detection and people-detection-by-tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td></tr><tr><td>tud_pedestrian</td><td>TUD-Pedestrian</td><td>People-Tracking-by-Detection and People-Detection-by-Tracking</td><td>People-tracking-by-detection and people-detection-by-tracking</td><td><a href="http://mplab.ucsd.edu/wp-content/uploads/CVPR2008/Conference/data/papers/243.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=people-tracking-by-detection and people-detection-by-tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td></tr><tr><td>mpii_human_pose</td><td>MPII Human Pose</td><td>2D Human Pose Estimation: New Benchmark and State of the Art Analysis</td><td>2D Human Pose Estimation: New Benchmark and State of the Art Analysis</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6909866" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=2d human pose estimation: new benchmark and state of the art analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3325860c0c82a93b2eac654f5324dd6a776f609e</td></tr><tr><td>penn_fudan</td><td>Penn Fudan</td><td>Object Detection Combining Recognition and Segmentation</td><td>Object Detection Combining Recognition and Segmentation</td><td><a href="http://pdfs.semanticscholar.org/f531/a554cade14b9b340de6730683a28c292dd74.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=object detection combining recognition and segmentation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3394168ff0719b03ff65bcea35336a76b21fe5e4</td></tr><tr><td>coco_qa</td><td>COCO QA</td><td>Exploring Models and Data for Image Question Answering</td><td>Exploring Models and Data for Image Question Answering</td><td><a href="http://pdfs.semanticscholar.org/aa79/9c29c0d44ece1864467af520fe70540c069b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=exploring models and data for image question answering&sort=relevance" target="_blank">[s2]</a></td><td></td><td>35b0331dfcd2897abd5749b49ff5e2b8ba0f7a62</td></tr><tr><td>lfw</td><td>LFW</td><td>Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments</td><td>Labeled Faces in the Wild : A Database for Studying Face Recognition in Unconstrained Environments</td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=labeled faces in the wild: a database for studying face recognition in unconstrained environments&sort=relevance" target="_blank">[s2]</a></td><td></td><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td></tr><tr><td>rafd</td><td>RaFD</td><td>Presentation and validation of the Radboud Faces Database</td><td>Presentation and validation of the Radboud Faces Database</td><td><a href="https://pdfs.semanticscholar.org/3765/df816dc5a061bc261e190acc8bdd9d47bec0.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=presentation and validation of the radboud faces database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3765df816dc5a061bc261e190acc8bdd9d47bec0</td></tr><tr><td>vmu</td><td>VMU</td><td>Can Facial Cosmetics Affect the Matching Accuracy of Face Recognition Systems?</td><td>Can facial cosmetics affect the matching accuracy of face recognition systems?</td><td><a href="http://www.cse.msu.edu/~climer/DantchevaChenRossFaceCosmetics_BTAS2012.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=can facial cosmetics affect the matching accuracy of face recognition systems?&sort=relevance" target="_blank">[s2]</a></td><td></td><td>37d6f0eb074d207b53885bd2eb78ccc8a04be597</td></tr><tr><td>youtube_makeup</td><td>YMU</td><td>Can Facial Cosmetics Affect the Matching Accuracy of Face Recognition Systems?</td><td>Can facial cosmetics affect the matching accuracy of face recognition systems?</td><td><a href="http://www.cse.msu.edu/~climer/DantchevaChenRossFaceCosmetics_BTAS2012.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=can facial cosmetics affect the matching accuracy of face recognition systems?&sort=relevance" target="_blank">[s2]</a></td><td></td><td>37d6f0eb074d207b53885bd2eb78ccc8a04be597</td></tr><tr><td>cuhk02</td><td>CUHK02</td><td>Locally Aligned Feature Transforms across Views</td><td>Locally Aligned Feature Transforms across Views</td><td><a href="http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_CVPR2013/data/Papers/4989d594.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=locally aligned feature transforms across views&sort=relevance" target="_blank">[s2]</a></td><td></td><td>38b55d95189c5e69cf4ab45098a48fba407609b4</td></tr><tr><td>sdu_vid</td><td>SDU-VID</td><td>A Spatio-Temporal Appearance Representation for Video-Based Pedestrian Re-Identification</td><td>A Spatio-Temporal Appearance Representation for Video-Based Pedestrian Re-Identification</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.434" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a spatio-temporal appearance representation for video-based pedestrian re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3b4ec8af470948a72a6ed37a9fd226719a874ebc</td></tr><tr><td>qmul_grid</td><td>GRID</td><td>Multi-Camera Activity Correlation Analysis</td><td>Multi-camera activity correlation analysis</td><td><a href="http://vision.lbl.gov/Conferences/cvpr/Papers/data/papers/0163.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-camera activity correlation analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3b5b6d19d4733ab606c39c69a889f9e67967f151</td></tr><tr><td>tvhi</td><td>TVHI</td><td>High Five: Recognising human interactions in TV shows</td><td>High Five: Recognising human interactions in TV shows</td><td><a href="http://pdfs.semanticscholar.org/3cd4/0bfa1ff193a96bde0207e5140a399476466c.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=high five: recognising human interactions in tv shows&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3cd40bfa1ff193a96bde0207e5140a399476466c</td></tr><tr><td>georgia_tech_face_database</td><td>Georgia Tech Face</td><td>Maximum likelihood training of the embedded HMM for face detection and recognition</td><td>MAXIMUM LIKELIHOOD TRAINING OF THE EMBEDDED HMM FOR FACE DETECTION AND RECOGNITION Ara V. Ne an and Monson H. Hayes III Center for Signal and Image Processing School of Electrical and Computer Engineering</td><td><a href="http://pdfs.semanticscholar.org/3dc3/f0b64ef80f573e3a5f96e456e52ee980b877.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=maximum likelihood training of the embedded hmm for face detection and recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3dc3f0b64ef80f573e3a5f96e456e52ee980b877</td></tr><tr><td>bio_id</td><td>BioID Face</td><td>Robust Face Detection Using the Hausdorff Distance</td><td>Robust Face Detection Using the Hausdorff Distance</td><td><a href="http://pdfs.semanticscholar.org/4053/e3423fb70ad9140ca89351df49675197196a.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=robust face detection using the hausdorff distance&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4053e3423fb70ad9140ca89351df49675197196a</td></tr><tr><td>distance_nighttime</td><td>Long Distance Heterogeneous Face</td><td>Nighttime Face Recognition at Long Distance: Cross-distance and Cross-spectral Matching</td><td>Nighttime Face Recognition at Long Distance: Cross-Distance and Cross-Spectral Matching</td><td><a href="http://pdfs.semanticscholar.org/4156/b7e88f2e0ab0a7c095b9bab199ae2b23bd06.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=nighttime face recognition at long distance: cross-distance and cross-spectral matching&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4156b7e88f2e0ab0a7c095b9bab199ae2b23bd06</td></tr><tr><td>market_1501</td><td>Market 1501</td><td>Scalable Person Re-identification: A Benchmark</td><td>Scalable Person Re-identification: A Benchmark</td><td><a href="https://www.microsoft.com/en-us/research/wp-content/uploads/2017/01/ICCV15-ReIDDataset.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=scalable person re-identification: a benchmark&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4308bd8c28e37e2ed9a3fcfe74d5436cce34b410</td></tr><tr><td>tud_multiview</td><td>TUD-Multiview</td><td>Monocular 3D Pose Estimation and Tracking by Detection</td><td>Monocular 3D pose estimation and tracking by detection</td><td><a href="http://lmb.informatik.uni-freiburg.de/lectures/seminar_brox/seminar_ws1011/cvpr10_andriluka.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=monocular 3d pose estimation and tracking by detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>436f798d1a4e54e5947c1e7d7375c31b2bdb4064</td></tr><tr><td>tud_stadtmitte</td><td>TUD-Stadtmitte</td><td>Monocular 3D Pose Estimation and Tracking by Detection</td><td>Monocular 3D pose estimation and tracking by detection</td><td><a href="http://lmb.informatik.uni-freiburg.de/lectures/seminar_brox/seminar_ws1011/cvpr10_andriluka.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=monocular 3d pose estimation and tracking by detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>436f798d1a4e54e5947c1e7d7375c31b2bdb4064</td></tr><tr><td>cuhk01</td><td>CUHK01</td><td>Human Reidentification with Transferred Metric Learning</td><td>Human Reidentification with Transferred Metric Learning</td><td><a href="http://pdfs.semanticscholar.org/4448/4d2866f222bbb9b6b0870890f9eea1ffb2d0.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=human reidentification with transferred metric learning&sort=relevance" target="_blank">[s2]</a></td><td></td><td>44484d2866f222bbb9b6b0870890f9eea1ffb2d0</td></tr><tr><td>vadana</td><td>VADANA</td><td>VADANA: A dense dataset for facial image analysis</td><td>VADANA: A dense dataset for facial image analysis</td><td><a href="http://vims.cis.udel.edu/publications/VADANA_BeFIT2011.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=vadana: a dense dataset for facial image analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4563b46d42079242f06567b3f2e2f7a80cb3befe</td></tr><tr><td>sdu_vid</td><td>SDU-VID</td><td>Local descriptors encoded by Fisher vectors for person re-identification</td><td>Local Descriptors Encoded by Fisher Vectors for Person Re-identification</td><td><a href="http://pdfs.semanticscholar.org/dd1d/51c3a59cb71cbfe1433ebeb4d973f7f9ddc1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=local descriptors encoded by fisher vectors for person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>46a01565e6afe7c074affb752e7069ee3bf2e4ef</td></tr><tr><td>fia</td><td>CMU FiA</td><td>The CMU Face In Action (FIA) Database</td><td>The CMU Face In Action (FIA) Database</td><td><a href="http://pdfs.semanticscholar.org/bb47/a03401811f9d2ca2da12138697acbc7b97a3.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the cmu face in action (fia) database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>47662d1a368daf70ba70ef2d59eb6209f98b675d</td></tr><tr><td>kin_face</td><td>UB KinFace</td><td>Kinship Verification through Transfer Learning</td><td>Kinship Verification through Transfer Learning</td><td><a href="http://pdfs.semanticscholar.org/4793/f11fbca4a7dba898b9fff68f70d868e2497c.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=kinship verification through transfer learning&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4793f11fbca4a7dba898b9fff68f70d868e2497c</td></tr><tr><td>am_fed</td><td>AM-FED</td><td>Affectiva MIT Facial Expression Dataset (AM-FED): Naturalistic and Spontaneous Facial Expressions Collected “In the Wild”</td><td>Affectiva-MIT Facial Expression Dataset (AM-FED): Naturalistic and Spontaneous Facial Expressions Collected "In-the-Wild"</td><td><a href="http://pdfs.semanticscholar.org/5d06/437656dd94616d7d87260d5eb77513ded30f.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=affectiva mit facial expression dataset (am-fed): naturalistic and spontaneous facial expressions collected “in the wild”&sort=relevance" target="_blank">[s2]</a></td><td></td><td>47aeb3b82f54b5ae8142b4bdda7b614433e69b9a</td></tr><tr><td>apis</td><td>APiS1.0</td><td>Pedestrian Attribute Classification in Surveillance: Database and Evaluation</td><td>Pedestrian Attribute Classification in Surveillance: Database and Evaluation</td><td><a href="http://www.cv-foundation.org/openaccess/content_iccv_workshops_2013/W10/papers/Zhu_Pedestrian_Attribute_Classification_2013_ICCV_paper.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pedestrian attribute classification in surveillance: database and evaluation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>488e475eeb3bb39a145f23ede197cd3620f1d98a</td></tr><tr><td>svs</td><td>SVS</td><td>Pedestrian Attribute Classification in Surveillance: Database and Evaluation</td><td>Pedestrian Attribute Classification in Surveillance: Database and Evaluation</td><td><a href="http://www.cv-foundation.org/openaccess/content_iccv_workshops_2013/W10/papers/Zhu_Pedestrian_Attribute_Classification_2013_ICCV_paper.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pedestrian attribute classification in surveillance: database and evaluation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>488e475eeb3bb39a145f23ede197cd3620f1d98a</td></tr><tr><td>coco_action</td><td>COCO-a</td><td>Describing Common Human Visual Actions in Images</td><td>Describing Common Human Visual Actions in Images</td><td><a href="http://pdfs.semanticscholar.org/b38d/cf5fa5174c0d718d65cc4f3889b03c4a21df.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=describing common human visual actions in images&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4946ba10a4d5a7d0a38372f23e6622bd347ae273</td></tr><tr><td>leeds_sports_pose</td><td>Leeds Sports Pose</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td><a href="http://pdfs.semanticscholar.org/4b1d/23d17476fcf78f4cbadf69fb130b1aa627c0.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clustered pose and nonlinear appearance models for human pose estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td></tr><tr><td>stickmen_buffy</td><td>Buffy Stickmen</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td><a href="http://pdfs.semanticscholar.org/4b1d/23d17476fcf78f4cbadf69fb130b1aa627c0.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clustered pose and nonlinear appearance models for human pose estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td></tr><tr><td>czech_news_agency</td><td>UFI</td><td>Unconstrained Facial Images: Database for Face Recognition under Real-world Conditions</td><td>Unconstrained Facial Images: Database for Face Recognition Under Real-World Conditions</td><td><a href="http://pdfs.semanticscholar.org/4b41/06614c1d553365bad75d7866bff0de6056ed.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=unconstrained facial images: database for face recognition under real-world conditions&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4b4106614c1d553365bad75d7866bff0de6056ed</td></tr><tr><td>cmu_pie</td><td>CMU PIE</td><td>The CMU Pose, Illumination, and Expression Database</td><td>The CMU Pose, Illumination, and Expression (PIE) Database</td><td><a href="http://pdfs.semanticscholar.org/4d42/3acc78273b75134e2afd1777ba6d3a398973.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the cmu pose, illumination, and expression database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4d423acc78273b75134e2afd1777ba6d3a398973</td></tr><tr><td>multi_pie</td><td>MULTIPIE</td><td>Multi-PIE</td><td>The CMU Pose, Illumination, and Expression (PIE) Database</td><td><a href="http://pdfs.semanticscholar.org/4d42/3acc78273b75134e2afd1777ba6d3a398973.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-pie&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4d423acc78273b75134e2afd1777ba6d3a398973</td></tr><tr><td>3dddb_unconstrained</td><td>3D Dynamic</td><td>A 3D Dynamic Database for Unconstrained Face Recognition</td><td>A 3 D Dynamic Database for Unconstrained Face Recognition</td><td><a href="https://pdfs.semanticscholar.org/4d4b/b462c9f1d4e4ab1e4aa6a75cc0bc71b38461.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a 3d dynamic database for unconstrained face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4d4bb462c9f1d4e4ab1e4aa6a75cc0bc71b38461</td></tr><tr><td>texas_3dfrd</td><td>Texas 3DFRD</td><td>Texas 3D Face Recognition Database</td><td>Texas 3D Face Recognition Database</td><td><a href="http://live.ece.utexas.edu/publications/2010/sg_ssiai_may10.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=texas 3d face recognition database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4d58f886f5150b2d5e48fd1b5a49e09799bf895d</td></tr><tr><td>cohn_kanade_plus</td><td>CK+</td><td>The Extended Cohn-Kanade Dataset (CK+): A complete dataset for action unit and emotion-specified expression</td><td>The Extended Cohn-Kanade Dataset (CK+): A complete dataset for action unit and emotion-specified expression</td><td><a href="http://www.iainm.com/iainm/Publications_files/2010_The%20Extended.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the extended cohn-kanade dataset (ck+): a complete dataset for action unit and emotion-specified expression&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4d9a02d080636e9666c4d1cc438b9893391ec6c7</td></tr><tr><td>cfd</td><td>CFD</td><td>The Chicago face database: A free stimulus set of faces and norming data</td><td>The Chicago face database: A free stimulus set of faces and norming data.</td><td><a href="http://pdfs.semanticscholar.org/4df3/143922bcdf7db78eb91e6b5359d6ada004d2.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the chicago face database: a free stimulus set of faces and norming data&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4df3143922bcdf7db78eb91e6b5359d6ada004d2</td></tr><tr><td>leeds_sports_pose_extended</td><td>Leeds Sports Pose Extended</td><td>Learning Effective Human Pose Estimation from Inaccurate Annotation</td><td>Learning effective human pose estimation from inaccurate annotation</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995318" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning effective human pose estimation from inaccurate annotation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4e4746094bf60ee83e40d8597a6191e463b57f76</td></tr><tr><td>dartmouth_children</td><td>Dartmouth Children</td><td>The Dartmouth Database of Children's Faces: Acquisition and validation of a new face stimulus set</td><td>The Dartmouth Database of Children’s Faces: Acquisition and Validation of a New Face Stimulus Set</td><td><a href="http://pdfs.semanticscholar.org/4e6e/e936eb50dd032f7138702fa39b7c18ee8907.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the dartmouth database of children's faces: acquisition and validation of a new face stimulus set&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4e6ee936eb50dd032f7138702fa39b7c18ee8907</td></tr><tr><td>soton</td><td>SOTON HiD</td><td>On a Large Sequence-Based Human Gait Database</td><td>On a large sequence-based human gait database</td><td><a href="http://pdfs.semanticscholar.org/4f93/cd09785c6e77bf4bc5a788e079df524c8d21.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=on a large sequence-based human gait database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4f93cd09785c6e77bf4bc5a788e079df524c8d21</td></tr><tr><td>violent_flows</td><td>Violent Flows</td><td>Violent Flows: Real-Time Detection of Violent Crowd Behavior</td><td>Violent flows: Real-time detection of violent crowd behavior</td><td><a href="http://www.wisdom.weizmann.ac.il/mathusers/kliper/Papers/violent_flows.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=violent flows: real-time detection of violent crowd behavior&sort=relevance" target="_blank">[s2]</a></td><td></td><td>5194cbd51f9769ab25260446b4fa17204752e799</td></tr><tr><td>bp4d_plus</td><td>BP4D+</td><td>Multimodal Spontaneous Emotion Corpus for Human Behavior Analysis</td><td>Multimodal Spontaneous Emotion Corpus for Human Behavior Analysis</td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Zhang_Multimodal_Spontaneous_Emotion_CVPR_2016_paper.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multimodal spontaneous emotion corpus for human behavior analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>53ae38a6bb2b21b42bac4f0c4c8ed1f9fa02f9d4</td></tr><tr><td>reseed</td><td>ReSEED</td><td>ReSEED: Social Event dEtection Dataset</td><td>ReSEED: social event dEtection dataset</td><td><a href="https://pub.uni-bielefeld.de/download/2663466/2686734" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=reseed: social event detection dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>54983972aafc8e149259d913524581357b0f91c3</td></tr><tr><td>orl</td><td>ORL</td><td>Parameterisation of a Stochastic Model for Human Face Identification</td><td>Parameterisation of a stochastic model for human face identification</td><td><a href="http://pdfs.semanticscholar.org/5520/6f0b5f57ce17358999145506cd01e570358c.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=parameterisation of a stochastic model for human face identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>55206f0b5f57ce17358999145506cd01e570358c</td></tr><tr><td>ifad</td><td>IFAD</td><td>Indian Face Age Database: A Database for Face Recognition with Age Variation</td><td>Indian Face Age Database : A Database for Face Recognition with Age Variation</td><td><a href="https://pdfs.semanticscholar.org/55c4/0cbcf49a0225e72d911d762c27bb1c2d14aa.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=indian face age database: a database for face recognition with age variation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>55c40cbcf49a0225e72d911d762c27bb1c2d14aa</td></tr><tr><td>data_61</td><td>Data61 Pedestrian</td><td>A Multi-Modal Graphical Model for Scene Analysis</td><td>A Multi-modal Graphical Model for Scene Analysis</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/WACV.2015.139" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a multi-modal graphical model for scene analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>563c940054e4b456661762c1ab858e6f730c3159</td></tr><tr><td>unbc_shoulder_pain</td><td>UNBC-McMaster Pain</td><td>PAINFUL DATA: The UNBC-McMaster Shoulder Pain Expression Archive Database</td><td>Painful data: The UNBC-McMaster shoulder pain expression archive database</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5771462" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=painful data: the unbc-mcmaster shoulder pain expression archive database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>56ffa7d906b08d02d6d5a12c7377a57e24ef3391</td></tr><tr><td>stanford_drone</td><td>Stanford Drone</td><td>Learning Social Etiquette: Human Trajectory Prediction In Crowded Scenes</td><td>Social LSTM: Human Trajectory Prediction in Crowded Spaces</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning social etiquette: human trajectory prediction in crowded scenes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>570f37ed63142312e6ccdf00ecc376341ec72b9f</td></tr><tr><td>ijb_c</td><td>IJB-C</td><td>IARPA Janus Benchmark C</td><td>IARPA Janus Benchmark - C: Face Dataset and Protocol</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411217" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=iarpa janus benchmark c&sort=relevance" target="_blank">[s2]</a></td><td></td><td>57178b36c21fd7f4529ac6748614bb3374714e91</td></tr><tr><td>50_people_one_question</td><td>50 People One Question</td><td>Merging Pose Estimates Across Space and Time</td><td>Merging Pose Estimates Across Space and Time</td><td><a href="http://pdfs.semanticscholar.org/63b2/f5348af0f969dfc2afb4977732393c6459ec.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=merging pose estimates across space and time&sort=relevance" target="_blank">[s2]</a></td><td></td><td>5753b2b5e442eaa3be066daa4a2ca8d8a0bb1725</td></tr><tr><td>mr2</td><td>MR2</td><td>The MR2: A multi-racial mega-resolution database of facial stimuli</td><td>The MR2: A multi-racial, mega-resolution database of facial stimuli.</td><td><a href="https://pdfs.semanticscholar.org/be5b/455abd379240460d022a0e246615b0b86c14.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the mr2: a multi-racial mega-resolution database of facial stimuli&sort=relevance" target="_blank">[s2]</a></td><td></td><td>578d4ad74818086bb64f182f72e2c8bd31e3d426</td></tr><tr><td>cvc_01_barcelona</td><td>CVC-01</td><td>Adaptive Image Sampling and Windows Classification for On-board Pedestrian Detection</td><td>Adaptive Image Sampling and Windows Classification for On – board Pedestrian Detection</td><td><a href="http://pdfs.semanticscholar.org/57fe/081950f21ca03b5b375ae3e84b399c015861.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=adaptive image sampling and windows classification for on-board pedestrian detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>57fe081950f21ca03b5b375ae3e84b399c015861</td></tr><tr><td>mot</td><td>MOT</td><td>Learning to associate: HybridBoosted multi-target tracker for crowded scene</td><td>Learning to associate: HybridBoosted multi-target tracker for crowded scene</td><td><a href="http://vision.lbl.gov/Conferences/cvpr/Papers/data/papers/0633.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning to associate: hybridboosted multi-target tracker for crowded scene&sort=relevance" target="_blank">[s2]</a></td><td></td><td>5981e6479c3fd4e31644db35d236bfb84ae46514</td></tr><tr><td>disfa</td><td>DISFA</td><td>DISFA: A Spontaneous Facial Action Intensity Database</td><td>DISFA: A Spontaneous Facial Action Intensity Database</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/T-AFFC.2013.4" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=disfa: a spontaneous facial action intensity database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>5a5f0287484f0d480fed1ce585dbf729586f0edc</td></tr><tr><td>wlfdb</td><td>WLFDB</td><td>WLFDB: Weakly Labeled Face Databases</td><td>WLFDB: Weakly Labeled Face Databases</td><td><a href="https://pdfs.semanticscholar.org/5ad4/e9f947c1653c247d418f05dad758a3f9277b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=wlfdb: weakly labeled face databases&sort=relevance" target="_blank">[s2]</a></td><td></td><td>5ad4e9f947c1653c247d418f05dad758a3f9277b</td></tr><tr><td>coco</td><td>COCO</td><td>Microsoft COCO: Common Objects in Context</td><td>Microsoft COCO: Common Objects in Context</td><td><a href="http://pdfs.semanticscholar.org/71b7/178df5d2b112d07e45038cb5637208659ff7.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=microsoft coco: common objects in context&sort=relevance" target="_blank">[s2]</a></td><td></td><td>5e0f8c355a37a5a89351c02f174e7a5ddcb98683</td></tr><tr><td>cityscapes</td><td>Cityscapes</td><td>The Cityscapes Dataset</td><td>The Cityscapes Dataset</td><td><a href="http://pdfs.semanticscholar.org/5ffd/74d2873b7cba2cbc5fd295cc7fbdedca22a2.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the cityscapes dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>5ffd74d2873b7cba2cbc5fd295cc7fbdedca22a2</td></tr><tr><td>viper</td><td>VIPeR</td><td>Evaluating Appearance Models for Recognition, Reacquisition, and Tracking</td><td>Evaluating Appearance Models for Recognition , Reacquisition , and Tracking</td><td><a href="http://pdfs.semanticscholar.org/6273/b3491e94ea4dd1ce42b791d77bdc96ee73a8.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=evaluating appearance models for recognition, reacquisition, and tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6273b3491e94ea4dd1ce42b791d77bdc96ee73a8</td></tr><tr><td>caltech_10k_web_faces</td><td>Caltech 10K Web Faces</td><td>Pruning Training Sets for Learning of Object Categories</td><td>Pruning training sets for learning of object categories</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1467308" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pruning training sets for learning of object categories&sort=relevance" target="_blank">[s2]</a></td><td></td><td>636b8ffc09b1b23ff714ac8350bb35635e49fa3c</td></tr><tr><td>bfm</td><td>BFM</td><td>A 3D Face Model for Pose and Illumination Invariant Face Recognition</td><td>A 3D Face Model for Pose and Illumination Invariant Face Recognition</td><td><a href="https://pdfs.semanticscholar.org/6399/37b3a1b8bded3f7e9a40e85bd3770016cf3c.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a 3d face model for pose and illumination invariant face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>639937b3a1b8bded3f7e9a40e85bd3770016cf3c</td></tr><tr><td>alert_airport</td><td>ALERT Airport</td><td>A Systematic Evaluation and Benchmark for Person Re-Identification: Features, Metrics, and Datasets</td><td>A Systematic Evaluation and Benchmark for Person Re-Identification: Features, Metrics, and Datasets.</td><td><a href="https://arxiv.org/pdf/1605.09653.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a systematic evaluation and benchmark for person re-identification: features, metrics, and datasets&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6403117f9c005ae81f1e8e6d1302f4a045e3d99d</td></tr><tr><td>complex_activities</td><td>Ongoing Complex Activities</td><td>Recognition of Ongoing Complex Activities by Sequence Prediction over a Hierarchical Label Space</td><td>Recognition of ongoing complex activities by sequence prediction over a hierarchical label space</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/WACV.2016.7477586" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=recognition of ongoing complex activities by sequence prediction over a hierarchical label space&sort=relevance" target="_blank">[s2]</a></td><td></td><td>65355cbb581a219bd7461d48b3afd115263ea760</td></tr><tr><td>afad</td><td>AFAD</td><td>Ordinal Regression with a Multiple Output CNN for Age Estimation</td><td>Ordinal Regression with Multiple Output CNN for Age Estimation</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.532" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=ordinal regression with a multiple output cnn for age estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6618cff7f2ed440a0d2fa9e74ad5469df5cdbe4c</td></tr><tr><td>sun_attributes</td><td>SUN</td><td>The SUN Attribute Database: Beyond Categories for Deeper Scene Understanding</td><td>The SUN Attribute Database: Beyond Categories for Deeper Scene Understanding</td><td><a href="http://www.cc.gatech.edu/~hays/papers/attribute_ijcv.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the sun attribute database: beyond categories for deeper scene understanding&sort=relevance" target="_blank">[s2]</a></td><td></td><td>66e6f08873325d37e0ec20a4769ce881e04e964e</td></tr><tr><td>tud_brussels</td><td>TUD-Brussels</td><td>Multi-Cue Onboard Pedestrian Detection</td><td>Multi-cue onboard pedestrian detection</td><td><a href="https://www.mpi-inf.mpg.de/fileadmin/inf/d2/wojek/poster_cwojek_cvpr09.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-cue onboard pedestrian detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6ad5a38df8dd4cdddd74f31996ce096d41219f72</td></tr><tr><td>tud_motionpairs</td><td>TUD-Motionparis</td><td>Multi-Cue Onboard Pedestrian Detection</td><td>Multi-cue onboard pedestrian detection</td><td><a href="https://www.mpi-inf.mpg.de/fileadmin/inf/d2/wojek/poster_cwojek_cvpr09.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-cue onboard pedestrian detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6ad5a38df8dd4cdddd74f31996ce096d41219f72</td></tr><tr><td>cuhk03</td><td>CUHK03</td><td>DeepReID: Deep Filter Pairing Neural Network for Person Re-identification</td><td>DeepReID: Deep Filter Pairing Neural Network for Person Re-identification</td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Li_DeepReID_Deep_Filter_2014_CVPR_paper.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=deepreid: deep filter pairing neural network for person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6bd36e9fd0ef20a3074e1430a6cc601e6d407fc3</td></tr><tr><td>ar_facedb</td><td>AR Face</td><td>The AR Face Database</td><td>The AR face database</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the ar face database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6d96f946aaabc734af7fe3fc4454cf8547fcd5ed</td></tr><tr><td>agedb</td><td>AgeDB</td><td>AgeDB: the first manually collected, in-the-wild age database</td><td>AgeDB: The First Manually Collected, In-the-Wild Age Database</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014984" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=agedb: the first manually collected, in-the-wild age database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6dcf418c778f528b5792104760f1fbfe90c6dd6a</td></tr><tr><td>stickmen_buffy</td><td>Buffy Stickmen</td><td>Learning to Parse Images of Articulated Objects</td><td>Learning to parse images of articulated bodies</td><td><a href="http://pdfs.semanticscholar.org/9cd7/4c43dbf9be0b9caae4606ee53e6d45850471.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning to parse images of articulated objects&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td></tr><tr><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td>Learning to parse images of articulated bodies</td><td><a href="http://pdfs.semanticscholar.org/9cd7/4c43dbf9be0b9caae4606ee53e6d45850471.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clustered pose and nonlinear appearance models for human pose estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td></tr><tr><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td>Learning to Parse Images of Articulated Objects</td><td>Learning to parse images of articulated bodies</td><td><a href="http://pdfs.semanticscholar.org/9cd7/4c43dbf9be0b9caae4606ee53e6d45850471.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning to parse images of articulated objects&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td></tr><tr><td>ward</td><td>WARD</td><td>Re-identify people in wide area camera network</td><td>Re-identify people in wide area camera network</td><td><a href="https://doi.org/10.1109/CVPRW.2012.6239203" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=re-identify people in wide area camera network&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6f3c76b7c0bd8e1d122c6ea808a271fd4749c951</td></tr><tr><td>umd_faces</td><td>UMD</td><td>The Do's and Don'ts for CNN-based Face Verification</td><td>The Do’s and Don’ts for CNN-Based Face Verification</td><td><a href="https://arxiv.org/pdf/1705.07426.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the do's and don'ts for cnn-based face verification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>71b7fc715e2f1bb24c0030af8d7e7b6e7cd128a6</td></tr><tr><td>affectnet</td><td>AffectNet</td><td>AffectNet: A Database for Facial Expression, Valence, and Arousal Computing in the Wild</td><td>AffectNet: A Database for Facial Expression, Valence, and Arousal Computing in the Wild</td><td><a href="https://arxiv.org/pdf/1708.03985.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=affectnet: a database for facial expression, valence, and arousal computing in the wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>758d7e1be64cc668c59ef33ba8882c8597406e53</td></tr><tr><td>wildtrack</td><td>WildTrack</td><td>WILDTRACK: A Multi-camera HD Dataset for Dense Unscripted Pedestrian Detection</td><td>WILDTRACK : A Multi-camera HD Dataset for Dense Unscripted Pedestrian Detection</td><td><a href="https://pdfs.semanticscholar.org/fe1c/ec4e4995b8615855572374ae3efc94949105.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=wildtrack: a multi-camera hd dataset for dense unscripted pedestrian detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>77c81c13a110a341c140995bedb98101b9e84f7f</td></tr><tr><td>berkeley_pose</td><td>BPAD</td><td>Describing People: A Poselet-Based Approach to Attribute Classification</td><td>Describing people: A poselet-based approach to attribute classification</td><td><a href="http://ttic.uchicago.edu/~smaji/papers/attributes-iccv11.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=describing people: a poselet-based approach to attribute classification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>7808937b46acad36e43c30ae4e9f3fd57462853d</td></tr><tr><td>mapillary</td><td>Mapillary</td><td>The Mapillary Vistas Dataset for Semantic Understanding of Street Scenes</td><td>The Mapillary Vistas Dataset for Semantic Understanding of Street Scenes</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237796" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the mapillary vistas dataset for semantic understanding of street scenes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>79828e6e9f137a583082b8b5a9dfce0c301989b8</td></tr><tr><td>nudedetection</td><td>Nude Detection</td><td>A Bag-of-Features Approach based on Hue-SIFT Descriptor for Nude Detection</td><td>A bag-of-features approach based on Hue-SIFT descriptor for nude detection</td><td><a href="http://ieeexplore.ieee.org/document/7077625/" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a bag-of-features approach based on hue-sift descriptor for nude detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>7ace44190729927e5cb0dd5d363fcae966fe13f7</td></tr><tr><td>lfw</td><td>LFW</td><td>Labeled Faces in the Wild: A Survey</td><td>Labeled Faces in the Wild : A Survey</td><td><a href="http://pdfs.semanticscholar.org/7de6/e81d775e9cd7becbfd1bd685f4e2a5eebb22.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=labeled faces in the wild: a survey&sort=relevance" target="_blank">[s2]</a></td><td></td><td>7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22</td></tr><tr><td>nova_emotions</td><td>Novaemötions Dataset</td><td>Competitive affective gamming: Winning with a smile</td><td>Competitive affective gaming: winning with a smile</td><td><a href="http://doi.acm.org/10.1145/2502081.2502115" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=competitive affective gamming: winning with a smile&sort=relevance" target="_blank">[s2]</a></td><td></td><td>7f4040b482d16354d5938c1d1b926b544652bf5b</td></tr><tr><td>sun_attributes</td><td>SUN</td><td>SUN Attribute Database:
Discovering, Annotating, and Recognizing Scene Attributes</td><td>SUN attribute database: Discovering, annotating, and recognizing scene attributes</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2012.6247998" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=sun attribute database:
-discovering, annotating, and recognizing scene attributes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>833fa04463d90aab4a9fe2870d480f0b40df446e</td></tr><tr><td>svs</td><td>SVS</td><td>Pedestrian Attribute Classification in Surveillance: Database and Evaluation</td><td>Pedestrian Attribute Classification in Surveillance: Database and Evaluation</td><td><a href="http://www.cv-foundation.org/openaccess/content_iccv_workshops_2013/W10/papers/Zhu_Pedestrian_Attribute_Classification_2013_ICCV_paper.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pedestrian attribute classification in surveillance: database and evaluation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>488e475eeb3bb39a145f23ede197cd3620f1d98a</td></tr><tr><td>texas_3dfrd</td><td>Texas 3DFRD</td><td>Texas 3D Face Recognition Database</td><td>Anthropometric 3D Face Recognition</td><td><a href="http://live.ece.utexas.edu/publications/2010/sg_ijcv_june10.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=texas 3d face recognition database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2ce2560cf59db59ce313bbeb004e8ce55c5ce928</td></tr><tr><td>texas_3dfrd</td><td>Texas 3DFRD</td><td>Anthropometric 3D Face Recognition</td><td>Anthropometric 3D Face Recognition</td><td><a href="http://live.ece.utexas.edu/publications/2010/sg_ijcv_june10.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=anthropometric 3d face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2ce2560cf59db59ce313bbeb004e8ce55c5ce928</td></tr><tr><td>tiny_faces</td><td>TinyFace</td><td>Low-Resolution Face Recognition</td><td>Low-Resolution Face Recognition</td><td><a href="https://arxiv.org/pdf/1811.08965.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=low-resolution face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8990cdce3f917dad622e43e033db686b354d057c</td></tr><tr><td>tiny_images</td><td>Tiny Images</td><td>80 million tiny images: a large dataset for non-parametric object and scene recognition</td><td>80 Million Tiny Images: A Large Data Set for Nonparametric Object and Scene Recognition</td><td><a href="http://pdfs.semanticscholar.org/31b5/8ced31f22eab10bd3ee2d9174e7c14c27c01.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=80 million tiny images: a large dataset for non-parametric object and scene recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>31b58ced31f22eab10bd3ee2d9174e7c14c27c01</td></tr><tr><td>towncenter</td><td>TownCenter</td><td>Stable Multi-Target Tracking in Real-Time Surveillance Video</td><td>Stable multi-target tracking in real-time surveillance video</td><td><a href="http://pdfs.semanticscholar.org/9361/b784e73e9238d5cefbea5ac40d35d1e3103f.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=stable multi-target tracking in real-time surveillance video&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9361b784e73e9238d5cefbea5ac40d35d1e3103f</td></tr><tr><td>tud_brussels</td><td>TUD-Brussels</td><td>Multi-Cue Onboard Pedestrian Detection</td><td>Multi-cue onboard pedestrian detection</td><td><a href="https://www.mpi-inf.mpg.de/fileadmin/inf/d2/wojek/poster_cwojek_cvpr09.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-cue onboard pedestrian detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6ad5a38df8dd4cdddd74f31996ce096d41219f72</td></tr><tr><td>tud_campus</td><td>TUD-Campus</td><td>People-Tracking-by-Detection and People-Detection-by-Tracking</td><td>People-tracking-by-detection and people-detection-by-tracking</td><td><a href="http://mplab.ucsd.edu/wp-content/uploads/CVPR2008/Conference/data/papers/243.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=people-tracking-by-detection and people-detection-by-tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td></tr><tr><td>tud_crossing</td><td>TUD-Crossing</td><td>People-Tracking-by-Detection and People-Detection-by-Tracking</td><td>People-tracking-by-detection and people-detection-by-tracking</td><td><a href="http://mplab.ucsd.edu/wp-content/uploads/CVPR2008/Conference/data/papers/243.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=people-tracking-by-detection and people-detection-by-tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td></tr><tr><td>tud_motionpairs</td><td>TUD-Motionparis</td><td>Multi-Cue Onboard Pedestrian Detection</td><td>Multi-cue onboard pedestrian detection</td><td><a href="https://www.mpi-inf.mpg.de/fileadmin/inf/d2/wojek/poster_cwojek_cvpr09.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-cue onboard pedestrian detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6ad5a38df8dd4cdddd74f31996ce096d41219f72</td></tr><tr><td>tud_multiview</td><td>TUD-Multiview</td><td>Monocular 3D Pose Estimation and Tracking by Detection</td><td>Monocular 3D pose estimation and tracking by detection</td><td><a href="http://lmb.informatik.uni-freiburg.de/lectures/seminar_brox/seminar_ws1011/cvpr10_andriluka.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=monocular 3d pose estimation and tracking by detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>436f798d1a4e54e5947c1e7d7375c31b2bdb4064</td></tr><tr><td>tud_pedestrian</td><td>TUD-Pedestrian</td><td>People-Tracking-by-Detection and People-Detection-by-Tracking</td><td>People-tracking-by-detection and people-detection-by-tracking</td><td><a href="http://mplab.ucsd.edu/wp-content/uploads/CVPR2008/Conference/data/papers/243.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=people-tracking-by-detection and people-detection-by-tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td></tr><tr><td>tud_stadtmitte</td><td>TUD-Stadtmitte</td><td>Monocular 3D Pose Estimation and Tracking by Detection</td><td>Monocular 3D pose estimation and tracking by detection</td><td><a href="http://lmb.informatik.uni-freiburg.de/lectures/seminar_brox/seminar_ws1011/cvpr10_andriluka.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=monocular 3d pose estimation and tracking by detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>436f798d1a4e54e5947c1e7d7375c31b2bdb4064</td></tr><tr><td>tvhi</td><td>TVHI</td><td>High Five: Recognising human interactions in TV shows</td><td>High Five: Recognising human interactions in TV shows</td><td><a href="http://pdfs.semanticscholar.org/3cd4/0bfa1ff193a96bde0207e5140a399476466c.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=high five: recognising human interactions in tv shows&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3cd40bfa1ff193a96bde0207e5140a399476466c</td></tr><tr><td>ucf_crowd</td><td>UCF-CC-50</td><td>Multi-Source Multi-Scale Counting in Extremely Dense Crowd Images</td><td>Multi-source Multi-scale Counting in Extremely Dense Crowd Images</td><td><a href="http://www.cs.ucf.edu/~haroon/datafiles/Idrees_Counting_CVPR_2013.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-source multi-scale counting in extremely dense crowd images&sort=relevance" target="_blank">[s2]</a></td><td></td><td>32c801cb7fbeb742edfd94cccfca4934baec71da</td></tr><tr><td>ucf_selfie</td><td>UCF Selfie</td><td>How to Take a Good Selfie?</td><td>How to Take a Good Selfie?</td><td><a href="http://doi.acm.org/10.1145/2733373.2806365" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=how to take a good selfie?&sort=relevance" target="_blank">[s2]</a></td><td></td><td>041d3eedf5e45ce5c5229f0181c5c576ed1fafd6</td></tr><tr><td>umb</td><td>UMB</td><td>UMB-DB: A Database of Partially Occluded 3D Faces</td><td>UMB-DB: A database of partially occluded 3D faces</td><td><a href="https://doi.org/10.1109/ICCVW.2011.6130509" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=umb-db: a database of partially occluded 3d faces&sort=relevance" target="_blank">[s2]</a></td><td></td><td>16e8b0a1e8451d5f697b94c0c2b32a00abee1d52</td></tr><tr><td>umd_faces</td><td>UMD</td><td>UMDFaces: An Annotated Face Dataset for Training Deep Networks</td><td>UMDFaces: An annotated face dataset for training deep networks</td><td><a href="http://arxiv.org/abs/1611.01484" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=umdfaces: an annotated face dataset for training deep networks&sort=relevance" target="_blank">[s2]</a></td><td></td><td>31b05f65405534a696a847dd19c621b7b8588263</td></tr><tr><td>umd_faces</td><td>UMD</td><td>The Do's and Don'ts for CNN-based Face Verification</td><td>The Do’s and Don’ts for CNN-Based Face Verification</td><td><a href="https://arxiv.org/pdf/1705.07426.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the do's and don'ts for cnn-based face verification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>71b7fc715e2f1bb24c0030af8d7e7b6e7cd128a6</td></tr><tr><td>unbc_shoulder_pain</td><td>UNBC-McMaster Pain</td><td>PAINFUL DATA: The UNBC-McMaster Shoulder Pain Expression Archive Database</td><td>Painful data: The UNBC-McMaster shoulder pain expression archive database</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5771462" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=painful data: the unbc-mcmaster shoulder pain expression archive database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>56ffa7d906b08d02d6d5a12c7377a57e24ef3391</td></tr><tr><td>used</td><td>USED Social Event Dataset</td><td>USED: A Large-scale Social Event Detection Dataset</td><td>USED: a large-scale social event detection dataset</td><td><a href="http://doi.acm.org/10.1145/2910017.2910624" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=used: a large-scale social event detection dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8627f019882b024aef92e4eb9355c499c733e5b7</td></tr><tr><td>v47</td><td>V47</td><td>Re-identification of Pedestrians with Variable Occlusion and Scale</td><td>Re-identification of pedestrians with variable occlusion and scale</td><td><a href="https://doi.org/10.1109/ICCVW.2011.6130477" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=re-identification of pedestrians with variable occlusion and scale&sort=relevance" target="_blank">[s2]</a></td><td></td><td>922e0a51a3b8c67c4c6ac09a577ff674cbd28b34</td></tr><tr><td>vadana</td><td>VADANA</td><td>VADANA: A dense dataset for facial image analysis</td><td>VADANA: A dense dataset for facial image analysis</td><td><a href="http://vims.cis.udel.edu/publications/VADANA_BeFIT2011.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=vadana: a dense dataset for facial image analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4563b46d42079242f06567b3f2e2f7a80cb3befe</td></tr><tr><td>vgg_faces</td><td>VGG Face</td><td>Deep Face Recognition</td><td>Deep Face Recognition</td><td><a href="https://pdfs.semanticscholar.org/f372/ab9b3270d4e4f6a0258c83c2736c3a5c0454.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=deep face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>162ea969d1929ed180cc6de9f0bf116993ff6e06</td></tr><tr><td>violent_flows</td><td>Violent Flows</td><td>Violent Flows: Real-Time Detection of Violent Crowd Behavior</td><td>Violent flows: Real-time detection of violent crowd behavior</td><td><a href="http://www.wisdom.weizmann.ac.il/mathusers/kliper/Papers/violent_flows.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=violent flows: real-time detection of violent crowd behavior&sort=relevance" target="_blank">[s2]</a></td><td></td><td>5194cbd51f9769ab25260446b4fa17204752e799</td></tr><tr><td>viper</td><td>VIPeR</td><td>Evaluating Appearance Models for Recognition, Reacquisition, and Tracking</td><td>Evaluating Appearance Models for Recognition , Reacquisition , and Tracking</td><td><a href="http://pdfs.semanticscholar.org/6273/b3491e94ea4dd1ce42b791d77bdc96ee73a8.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=evaluating appearance models for recognition, reacquisition, and tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6273b3491e94ea4dd1ce42b791d77bdc96ee73a8</td></tr><tr><td>visual_phrases</td><td>Phrasal Recognition</td><td>Recognition using Visual Phrases</td><td>Recognition using visual phrases</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995711" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=recognition using visual phrases&sort=relevance" target="_blank">[s2]</a></td><td></td><td>e8de844fefd54541b71c9823416daa238be65546</td></tr><tr><td>vmu</td><td>VMU</td><td>Can Facial Cosmetics Affect the Matching Accuracy of Face Recognition Systems?</td><td>Can facial cosmetics affect the matching accuracy of face recognition systems?</td><td><a href="http://www.cse.msu.edu/~climer/DantchevaChenRossFaceCosmetics_BTAS2012.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=can facial cosmetics affect the matching accuracy of face recognition systems?&sort=relevance" target="_blank">[s2]</a></td><td></td><td>37d6f0eb074d207b53885bd2eb78ccc8a04be597</td></tr><tr><td>voc</td><td>VOC</td><td>The PASCAL Visual Object Classes (VOC) Challenge</td><td>The Pascal Visual Object Classes Challenge: A Retrospective</td><td><a href="http://homepages.inf.ed.ac.uk/ckiw/postscript/ijcv_voc14.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the pascal visual object classes (voc) challenge&sort=relevance" target="_blank">[s2]</a></td><td></td><td>abe9f3b91fd26fa1b50cd685c0d20debfb372f73</td></tr><tr><td>vqa</td><td>VQA</td><td>VQA: Visual Question Answering</td><td>VQA: Visual Question Answering</td><td><a href="http://arxiv.org/pdf/1505.00468v3.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=vqa: visual question answering&sort=relevance" target="_blank">[s2]</a></td><td></td><td>01959ef569f74c286956024866c1d107099199f7</td></tr><tr><td>ward</td><td>WARD</td><td>Re-identify people in wide area camera network</td><td>Re-identify people in wide area camera network</td><td><a href="https://doi.org/10.1109/CVPRW.2012.6239203" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=re-identify people in wide area camera network&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6f3c76b7c0bd8e1d122c6ea808a271fd4749c951</td></tr><tr><td>who_goes_there</td><td>WGT</td><td>Who Goes There? Approaches to Mapping Facial Appearance Diversity</td><td>Who goes there?: approaches to mapping facial appearance diversity</td><td><a href="http://doi.acm.org/10.1145/2996913.2996997" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=who goes there? approaches to mapping facial appearance diversity&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9b9bf5e623cb8af7407d2d2d857bc3f1b531c182</td></tr><tr><td>wildtrack</td><td>WildTrack</td><td>WILDTRACK: A Multi-camera HD Dataset for Dense Unscripted Pedestrian Detection</td><td>WILDTRACK : A Multi-camera HD Dataset for Dense Unscripted Pedestrian Detection</td><td><a href="https://pdfs.semanticscholar.org/fe1c/ec4e4995b8615855572374ae3efc94949105.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=wildtrack: a multi-camera hd dataset for dense unscripted pedestrian detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>77c81c13a110a341c140995bedb98101b9e84f7f</td></tr><tr><td>wlfdb</td><td></td><td>WLFDB: Weakly Labeled Face Databases</td><td>WLFDB: Weakly Labeled Face Databases</td><td><a href="https://pdfs.semanticscholar.org/5ad4/e9f947c1653c247d418f05dad758a3f9277b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=wlfdb: weakly labeled face databases&sort=relevance" target="_blank">[s2]</a></td><td></td><td>5ad4e9f947c1653c247d418f05dad758a3f9277b</td></tr><tr><td>yale_faces</td><td>YaleFaces</td><td>Acquiring Linear Subspaces for Face Recognition under Variable Lighting</td><td>From Few to Many: Illumination Cone Models for Face Recognition under Variable Lighting and Pose</td><td><a href="http://pdfs.semanticscholar.org/97bb/c2b439a79d4dc0dc7199d71ed96ad5e3fd0e.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=acquiring linear subspaces for face recognition under variable lighting&sort=relevance" target="_blank">[s2]</a></td><td></td><td>18c72175ddbb7d5956d180b65a96005c100f6014</td></tr><tr><td>yale_faces</td><td>YaleFaces</td><td>From Few to Many: Illumination Cone Models for Face Recognition under Variable Lighting and Pose</td><td>From Few to Many: Illumination Cone Models for Face Recognition under Variable Lighting and Pose</td><td><a href="http://pdfs.semanticscholar.org/97bb/c2b439a79d4dc0dc7199d71ed96ad5e3fd0e.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=from few to many: illumination cone models for face recognition under variable lighting and pose&sort=relevance" target="_blank">[s2]</a></td><td></td><td>18c72175ddbb7d5956d180b65a96005c100f6014</td></tr><tr><td>yawdd</td><td>YawDD</td><td>YawDD: A Yawning Detection Dataset</td><td>YawDD: a yawning detection dataset</td><td><a href="http://www.site.uottawa.ca/~shervin/pubs/CogniVue-Dataset-ACM-MMSys2014.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=yawdd: a yawning detection dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a94cae786d515d3450d48267e12ca954aab791c4</td></tr><tr><td>yfcc_100m</td><td>YFCC100M</td><td>YFCC100M: The New Data in Multimedia Research</td><td>The New Data and New Challenges in Multimedia Research</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=yfcc100m: the new data in multimedia research&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a6e695ddd07aad719001c0fc1129328452385949</td></tr><tr><td>york_3d</td><td>UOY 3D Face Database</td><td>Three-Dimensional Face Recognition: An Eigensurface Approach</td><td>Three-dimensional face recognition: an eigensurface approach</td><td><a href="http://www-users.cs.york.ac.uk/~nep/research/3Dface/tomh/3DFaceRecognition-Eigensurface-ICIP(web)2.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=three-dimensional face recognition: an eigensurface approach&sort=relevance" target="_blank">[s2]</a></td><td></td><td>19d1b811df60f86cbd5e04a094b07f32fff7a32a</td></tr><tr><td>youtube_poses</td><td>YouTube Pose</td><td>Personalizing Human Video Pose Estimation</td><td>Personalizing Human Video Pose Estimation</td><td><a href="http://arxiv.org/pdf/1511.06676v1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=personalizing human video pose estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1c2802c2199b6d15ecefe7ba0c39bfe44363de38</td></tr></table></body></html> \ No newline at end of file
+discovering, annotating, and recognizing scene attributes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>833fa04463d90aab4a9fe2870d480f0b40df446e</td></tr><tr><td>imdb_wiki</td><td>IMDB</td><td>DEX: Deep EXpectation of apparent age from a single image</td><td>DEX: Deep EXpectation of Apparent Age from a Single Image</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406390" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=dex: deep expectation of apparent age from a single image&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8355d095d3534ef511a9af68a3b2893339e3f96b</td></tr><tr><td>awe_ears</td><td>AWE Ears</td><td>Ear Recognition: More Than a Survey</td><td>Ear Recognition: More Than a Survey</td><td><a href="http://pdfs.semanticscholar.org/84fe/5b4ac805af63206012d29523a1e033bc827e.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=ear recognition: more than a survey&sort=relevance" target="_blank">[s2]</a></td><td></td><td>84fe5b4ac805af63206012d29523a1e033bc827e</td></tr><tr><td>casia_webface</td><td>CASIA Webface</td><td>Learning Face Representation from Scratch</td><td>Learning Face Representation from Scratch</td><td><a href="http://pdfs.semanticscholar.org/b8a2/0ed7e74325da76d7183d1ab77b082a92b447.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning face representation from scratch&sort=relevance" target="_blank">[s2]</a></td><td></td><td>853bd61bc48a431b9b1c7cab10c603830c488e39</td></tr><tr><td>used</td><td>USED Social Event Dataset</td><td>USED: A Large-scale Social Event Detection Dataset</td><td>USED: a large-scale social event detection dataset</td><td><a href="http://doi.acm.org/10.1145/2910017.2910624" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=used: a large-scale social event detection dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8627f019882b024aef92e4eb9355c499c733e5b7</td></tr><tr><td>tiny_faces</td><td>TinyFace</td><td>Low-Resolution Face Recognition</td><td>Low-Resolution Face Recognition</td><td><a href="https://arxiv.org/pdf/1811.08965.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=low-resolution face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8990cdce3f917dad622e43e033db686b354d057c</td></tr><tr><td>10k_US_adult_faces</td><td>10K US Adult Faces</td><td>The intrinsic memorability of face images</td><td>The intrinsic memorability of face photographs.</td><td><a href="https://pdfs.semanticscholar.org/8b2d/d5c61b23ead5ae5508bb8ce808b5ea266730.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the intrinsic memorability of face images&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8b2dd5c61b23ead5ae5508bb8ce808b5ea266730</td></tr><tr><td>fei</td><td>FEI</td><td>Captura e Alinhamento de Imagens: Um Banco de Faces Brasileiro</td><td>A new ranking method for principal components analysis and its application to face image analysis</td><td><a href="https://doi.org/10.1016/j.imavis.2009.11.005" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=captura e alinhamento de imagens: um banco de faces brasileiro&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8b56e33f33e582f3e473dba573a16b598ed9bcdc</td></tr><tr><td>chalearn</td><td>ChaLearn</td><td>ChaLearn Looking at People: A Review of Events and Resources</td><td>ChaLearn looking at people: A review of events and resources</td><td><a href="https://arxiv.org/pdf/1701.02664.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=chalearn looking at people: a review of events and resources&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8d5998cd984e7cce307da7d46f155f9db99c6590</td></tr><tr><td>hda_plus</td><td>HDA+</td><td>The HDA+ data set for research on fully automated re-identification systems</td><td>The HDA+ Data Set for Research on Fully Automated Re-identification Systems</td><td><a href="http://pdfs.semanticscholar.org/8f02/ec0be21461fbcedf51d864f944cfc42c875f.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the hda+ data set for research on fully automated re-identification systems&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8f02ec0be21461fbcedf51d864f944cfc42c875f</td></tr><tr><td>morph</td><td>MORPH Commercial</td><td>MORPH: A Longitudinal Image Database of Normal Adult Age-Progression</td><td>MORPH: a longitudinal image database of normal adult age-progression</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/FGR.2006.78" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=morph: a longitudinal image database of normal adult age-progression&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9055b155cbabdce3b98e16e5ac9c0edf00f9552f</td></tr><tr><td>morph_nc</td><td>MORPH Non-Commercial</td><td>MORPH: A Longitudinal Image Database of Normal Adult Age-Progression</td><td>MORPH: a longitudinal image database of normal adult age-progression</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/FGR.2006.78" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=morph: a longitudinal image database of normal adult age-progression&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9055b155cbabdce3b98e16e5ac9c0edf00f9552f</td></tr><tr><td>v47</td><td>V47</td><td>Re-identification of Pedestrians with Variable Occlusion and Scale</td><td>Re-identification of pedestrians with variable occlusion and scale</td><td><a href="https://doi.org/10.1109/ICCVW.2011.6130477" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=re-identification of pedestrians with variable occlusion and scale&sort=relevance" target="_blank">[s2]</a></td><td></td><td>922e0a51a3b8c67c4c6ac09a577ff674cbd28b34</td></tr><tr><td>towncenter</td><td>TownCenter</td><td>Stable Multi-Target Tracking in Real-Time Surveillance Video</td><td>Stable multi-target tracking in real-time surveillance video</td><td><a href="http://pdfs.semanticscholar.org/9361/b784e73e9238d5cefbea5ac40d35d1e3103f.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=stable multi-target tracking in real-time surveillance video&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9361b784e73e9238d5cefbea5ac40d35d1e3103f</td></tr><tr><td>4dfab</td><td>4DFAB</td><td>4DFAB: A Large Scale 4D Facial Expression Database for Biometric Applications</td><td>4DFAB: A Large Scale 4D Facial Expression Database for Biometric Applications</td><td><a href="https://arxiv.org/pdf/1712.01443.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=4dfab: a large scale 4d facial expression database for biometric applications&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9696ad8b164f5e10fcfe23aacf74bd6168aebb15</td></tr><tr><td>ilids_vid_reid</td><td>iLIDS-VID</td><td>Person Re-Identi cation by Video Ranking</td><td>Person Re-identification by Video Ranking</td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person re-identi cation by video ranking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>98bb029afe2a1239c3fdab517323066f0957b81b</td></tr><tr><td>sdu_vid</td><td>SDU-VID</td><td>Person reidentification by video ranking</td><td>Person Re-identification by Video Ranking</td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person reidentification by video ranking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>98bb029afe2a1239c3fdab517323066f0957b81b</td></tr><tr><td>m2vts</td><td>m2vts</td><td>The M2VTS Multimodal Face Database (Release 1.00)</td><td>The M2VTS Multimodal Face Database (Release 1.00)</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the m2vts multimodal face database (release 1.00)&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9a9877791945c6fa4c1743ec6d3fb32570ef8481</td></tr><tr><td>who_goes_there</td><td>WGT</td><td>Who Goes There? Approaches to Mapping Facial Appearance Diversity</td><td>Who goes there?: approaches to mapping facial appearance diversity</td><td><a href="http://doi.acm.org/10.1145/2996913.2996997" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=who goes there? approaches to mapping facial appearance diversity&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9b9bf5e623cb8af7407d2d2d857bc3f1b531c182</td></tr><tr><td>precarious</td><td>Precarious</td><td>Expecting the Unexpected: Training Detectors for Unusual Pedestrians With Adversarial Imposters</td><td>Expecting the Unexpected: Training Detectors for Unusual Pedestrians with Adversarial Imposters</td><td><a href="https://arxiv.org/pdf/1703.06283.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=expecting the unexpected: training detectors for unusual pedestrians with adversarial imposters&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9e5378e7b336c89735d3bb15cf67eff96f86d39a</td></tr><tr><td>mafl</td><td>MAFL</td><td>Learning Deep Representation for Face Alignment with Auxiliary Attributes</td><td>Learning Deep Representation for Face Alignment with Auxiliary Attributes</td><td><a href="https://arxiv.org/pdf/1408.3967.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning deep representation for face alignment with auxiliary attributes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td></tr><tr><td>mtfl</td><td>MTFL</td><td>Learning Deep Representation for Face Alignment with Auxiliary Attributes</td><td>Learning Deep Representation for Face Alignment with Auxiliary Attributes</td><td><a href="https://arxiv.org/pdf/1408.3967.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning deep representation for face alignment with auxiliary attributes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td></tr><tr><td>moments_in_time</td><td>Moments in Time</td><td>Moments in Time Dataset: one million videos for event understanding</td><td>Moments in Time Dataset: one million videos for event understanding</td><td><a href="https://arxiv.org/pdf/1801.03150.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=moments in time dataset: one million videos for event understanding&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a5a44a32a91474f00a3cda671a802e87c899fbb4</td></tr><tr><td>aflw</td><td>AFLW</td><td>Annotated Facial Landmarks in the Wild: A Large-scale, Real-world Database for Facial Landmark Localization</td><td>Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</td><td><a href="http://lrs.icg.tugraz.at/pubs/koestinger_befit_11.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=annotated facial landmarks in the wild: a large-scale, real-world database for facial landmark localization&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td></tr><tr><td>market1203</td><td>Market 1203</td><td>Orientation Driven Bag of Appearances for Person Re-identification</td><td>Orientation Driven Bag of Appearances for Person Re-identification</td><td><a href="http://pdfs.semanticscholar.org/a7fe/834a0af614ce6b50dc093132b031dd9a856b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=orientation driven bag of appearances for person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a7fe834a0af614ce6b50dc093132b031dd9a856b</td></tr><tr><td>pku_reid</td><td>PKU-Reid</td><td>Orientation driven bag of appearances for person re-identification</td><td>Orientation Driven Bag of Appearances for Person Re-identification</td><td><a href="http://pdfs.semanticscholar.org/a7fe/834a0af614ce6b50dc093132b031dd9a856b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=orientation driven bag of appearances for person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a7fe834a0af614ce6b50dc093132b031dd9a856b</td></tr><tr><td>hi4d_adsip</td><td>Hi4D-ADSIP</td><td>Hi4D-ADSIP 3-D dynamic facial articulation database</td><td>Hi4D-ADSIP 3-D dynamic facial articulation database</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=hi4d-adsip 3-d dynamic facial articulation database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a8d0b149c2eadaa02204d3e4356fbc8eccf3b315</td></tr><tr><td>yawdd</td><td>YawDD</td><td>YawDD: A Yawning Detection Dataset</td><td>YawDD: a yawning detection dataset</td><td><a href="http://www.site.uottawa.ca/~shervin/pubs/CogniVue-Dataset-ACM-MMSys2014.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=yawdd: a yawning detection dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a94cae786d515d3450d48267e12ca954aab791c4</td></tr><tr><td>mrp_drone</td><td>MRP Drone</td><td>Investigating Open-World Person Re-identification Using a Drone</td><td>Investigating Open-World Person Re-identification Using a Drone</td><td><a href="http://pdfs.semanticscholar.org/ad01/687649d95cd5b56d7399a9603c4b8e2217d7.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=investigating open-world person re-identification using a drone&sort=relevance" target="_blank">[s2]</a></td><td></td><td>ad01687649d95cd5b56d7399a9603c4b8e2217d7</td></tr><tr><td>put_face</td><td>Put Face</td><td>The PUT face database</td><td>The put face database</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the put face database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>ae0aee03d946efffdc7af2362a42d3750e7dd48a</td></tr><tr><td>m2vtsdb_extended</td><td>xm2vtsdb</td><td>XM2VTSDB: The Extended M2VTS Database</td><td>Xm2vtsdb: the Extended M2vts Database</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=xm2vtsdb: the extended m2vts database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>b62628ac06bbac998a3ab825324a41a11bc3a988</td></tr><tr><td>ifdb</td><td>IFDB</td><td>Iranian Face Database with age, pose and expression</td><td>Iranian Face Database with age, pose and expression</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=iranian face database with age, pose and expression&sort=relevance" target="_blank">[s2]</a></td><td></td><td>b71d1aa90dcbe3638888725314c0d56640c1fef1</td></tr><tr><td>pornodb</td><td>Pornography DB</td><td>Pooling in Image Representation: the Visual Codeword Point of View</td><td>Pooling in image representation: The visual codeword point of view</td><td><a href="http://pdfs.semanticscholar.org/b92a/1ed9622b8268ae3ac9090e25789fc41cc9b8.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pooling in image representation: the visual codeword point of view&sort=relevance" target="_blank">[s2]</a></td><td></td><td>b92a1ed9622b8268ae3ac9090e25789fc41cc9b8</td></tr><tr><td>hda_plus</td><td>HDA+</td><td>A Multi-camera video data set for research on High-Definition surveillance</td><td>HDA dataset-DRAFT 1 A Multi-camera video data set for research on High-Definition surveillance</td><td><a href="http://pdfs.semanticscholar.org/bd88/bb2e4f351352d88ee7375af834360e223498.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a multi-camera video data set for research on high-definition surveillance&sort=relevance" target="_blank">[s2]</a></td><td></td><td>bd88bb2e4f351352d88ee7375af834360e223498</td></tr><tr><td>mars</td><td>MARS</td><td>MARS: A Video Benchmark for Large-Scale Person Re-identification</td><td>MARS: A Video Benchmark for Large-Scale Person Re-Identification</td><td><a href="http://pdfs.semanticscholar.org/c038/7e788a52f10bf35d4d50659cfa515d89fbec.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=mars: a video benchmark for large-scale person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>c0387e788a52f10bf35d4d50659cfa515d89fbec</td></tr><tr><td>nova_emotions</td><td>Novaemötions Dataset</td><td>Crowdsourcing facial expressions for affective-interaction</td><td>Crowdsourcing facial expressions for affective-interaction</td><td><a href="https://doi.org/10.1016/j.cviu.2016.02.001" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=crowdsourcing facial expressions for affective-interaction&sort=relevance" target="_blank">[s2]</a></td><td></td><td>c06b13d0ec3f5c43e2782cd22542588e233733c3</td></tr><tr><td>mcgill</td><td>McGill Real World</td><td>Hierarchical Temporal Graphical Model for Head Pose Estimation and Subsequent Attribute Classification in Real-World Videos</td><td>Hierarchical temporal graphical model for head pose estimation and subsequent attribute classification in real-world videos</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=hierarchical temporal graphical model for head pose estimation and subsequent attribute classification in real-world videos&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a5a3bc3e5e9753769163cb30b16dbd12e266b93e</td></tr><tr><td>qmul_surv_face</td><td>QMUL-SurvFace</td><td>Surveillance Face Recognition Challenge</td><td>Surveillance Face Recognition Challenge</td><td><a href="https://arxiv.org/pdf/1804.09691.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=surveillance face recognition challenge&sort=relevance" target="_blank">[s2]</a></td><td></td><td>c866a2afc871910e3282fd9498dce4ab20f6a332</td></tr><tr><td>emotio_net</td><td>EmotioNet Database</td><td>EmotioNet: An Accurate, Real-Time Algorithm for the Automatic Annotation of a Million Facial Expressions in the Wild</td><td>EmotioNet: An Accurate, Real-Time Algorithm for the Automatic Annotation of a Million Facial Expressions in the Wild</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7780969" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=emotionet: an accurate, real-time algorithm for the automatic annotation of a million facial expressions in the wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>c900e0ad4c95948baaf0acd8449fde26f9b4952a</td></tr><tr><td>imfdb</td><td>IMFDB</td><td>Indian Movie Face Database: A Benchmark for Face Recognition Under Wide Variations</td><td>Indian Movie Face Database: A benchmark for face recognition under wide variations</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=indian movie face database: a benchmark for face recognition under wide variations&sort=relevance" target="_blank">[s2]</a></td><td></td><td>ca3e88d87e1344d076c964ea89d91a75c417f5ee</td></tr><tr><td>b3d_ac</td><td>B3D(AC)</td><td>A 3-D Audio-Visual Corpus of Affective Communication</td><td>A 3-D Audio-Visual Corpus of Affective Communication</td><td><a href="http://files.is.tue.mpg.de/jgall/download/jgall_avcorpus_mm10.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a 3-d audio-visual corpus of affective communication&sort=relevance" target="_blank">[s2]</a></td><td></td><td>d08cc366a4a0192a01e9a7495af1eb5d9f9e73ae</td></tr><tr><td>jiku_mobile</td><td>Jiku Mobile Video Dataset</td><td>The Jiku Mobile Video Dataset</td><td>The jiku mobile video dataset</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the jiku mobile video dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>d178cde92ab3dc0dd2ebee5a76a33d556c39448b</td></tr><tr><td>stair_actions</td><td>STAIR Action</td><td>STAIR Actions: A Video Dataset of Everyday Home Actions</td><td>STAIR Actions: A Video Dataset of Everyday Home Actions</td><td><a href="https://arxiv.org/pdf/1804.04326.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=stair actions: a video dataset of everyday home actions&sort=relevance" target="_blank">[s2]</a></td><td></td><td>d3f5a1848b0028d8ab51d0b0673732cad2e3c8c9</td></tr><tr><td>feret</td><td>FERET</td><td>The FERET database and evaluation procedure for face-recognition algorithms</td><td>The FERET database and evaluation procedure for face-recognition algorithms</td><td><a href="http://pdfs.semanticscholar.org/dc8b/25e35a3acb812beb499844734081722319b4.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the feret database and evaluation procedure for face-recognition algorithms&sort=relevance" target="_blank">[s2]</a></td><td></td><td>dc8b25e35a3acb812beb499844734081722319b4</td></tr><tr><td>families_in_the_wild</td><td>FIW</td><td>Visual Kinship Recognition of Families in the Wild</td><td>Visual Kinship Recognition of Families in the Wild</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8337841" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=visual kinship recognition of families in the wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>dd65f71dac86e36eecbd3ed225d016c3336b4a13</td></tr><tr><td>scut_head</td><td>SCUT HEAD</td><td>Detecting Heads using Feature Refine Net and Cascaded Multi-scale Architecture</td><td>Detecting Heads using Feature Refine Net and Cascaded Multi-scale Architecture</td><td><a href="https://arxiv.org/pdf/1803.09256.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=detecting heads using feature refine net and cascaded multi-scale architecture&sort=relevance" target="_blank">[s2]</a></td><td></td><td>d3200d49a19a4a4e4e9745ee39649b65d80c834b</td></tr><tr><td>sarc3d</td><td>Sarc3D</td><td>SARC3D: a new 3D body model for People Tracking and Re-identification</td><td>SARC3D: A New 3D Body Model for People Tracking and Re-identification</td><td><a href="https://pdfs.semanticscholar.org/e27e/f52c641c2b5100a1b34fd0b819e84a31b4df.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=sarc3d: a new 3d body model for people tracking and re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>e27ef52c641c2b5100a1b34fd0b819e84a31b4df</td></tr><tr><td>fiw_300</td><td>300-W</td><td>300 faces In-the-wild challenge: Database and results</td><td>300 Faces In-The-Wild Challenge: database and results</td><td><a href="http://doi.org/10.1016/j.imavis.2016.01.002" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=300 faces in-the-wild challenge: database and results&sort=relevance" target="_blank">[s2]</a></td><td></td><td>e4754afaa15b1b53e70743880484b8d0736990ff</td></tr><tr><td>gfw</td><td>YouTube Pose</td><td>Merge or Not? Learning to Group Faces via Imitation Learning</td><td>Merge or Not? Learning to Group Faces via Imitation Learning</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=merge or not? learning to group faces via imitation learning&sort=relevance" target="_blank">[s2]</a></td><td></td><td>e58dd160a76349d46f881bd6ddbc2921f08d1050</td></tr><tr><td>visual_phrases</td><td>Phrasal Recognition</td><td>Recognition using Visual Phrases</td><td>Recognition using visual phrases</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995711" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=recognition using visual phrases&sort=relevance" target="_blank">[s2]</a></td><td></td><td>e8de844fefd54541b71c9823416daa238be65546</td></tr><tr><td>mpi_large</td><td>Large MPI Facial Expression</td><td>The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</td><td>The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</td><td><a href="http://pdfs.semanticscholar.org/ea05/0801199f98a1c7c1df6769f23f658299a3ae.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the mpi facial expression database — a validated database of emotional and conversational facial expressions&sort=relevance" target="_blank">[s2]</a></td><td></td><td>ea050801199f98a1c7c1df6769f23f658299a3ae</td></tr><tr><td>mpi_small</td><td>Small MPI Facial Expression</td><td>The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</td><td>The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</td><td><a href="http://pdfs.semanticscholar.org/ea05/0801199f98a1c7c1df6769f23f658299a3ae.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the mpi facial expression database — a validated database of emotional and conversational facial expressions&sort=relevance" target="_blank">[s2]</a></td><td></td><td>ea050801199f98a1c7c1df6769f23f658299a3ae</td></tr><tr><td>msmt_17</td><td>MSMT17</td><td>Person Transfer GAN to Bridge Domain Gap for Person Re-Identification</td><td>Person Transfer GAN to Bridge Domain Gap for Person Re-Identification</td><td><a href="https://arxiv.org/pdf/1711.08565.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person transfer gan to bridge domain gap for person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>ec792ad2433b6579f2566c932ee414111e194537</td></tr><tr><td>europersons</td><td>EuroCity Persons</td><td>The EuroCity Persons Dataset: A Novel Benchmark for Object Detection</td><td>The EuroCity Persons Dataset: A Novel Benchmark for Object Detection</td><td><a href="https://arxiv.org/pdf/1805.07193.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the eurocity persons dataset: a novel benchmark for object detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>f0e17f27f029db4ad650ff278fe3c10ecb6cb0c4</td></tr><tr><td>pku</td><td>PKU</td><td>Swiss-System Based Cascade Ranking for Gait-based Person Re-identification</td><td>Swiss-System Based Cascade Ranking for Gait-Based Person Re-Identification</td><td><a href="http://pdfs.semanticscholar.org/f6c8/d5e35d7e4d60a0104f233ac1a3ab757da53f.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=swiss-system based cascade ranking for gait-based person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>f6c8d5e35d7e4d60a0104f233ac1a3ab757da53f</td></tr><tr><td>caltech_pedestrians</td><td>Caltech Pedestrians</td><td>Pedestrian Detection: An Evaluation of the State of the Art</td><td>Pedestrian Detection: An Evaluation of the State of the Art</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5975165" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pedestrian detection: an evaluation of the state of the art&sort=relevance" target="_blank">[s2]</a></td><td></td><td>f72f6a45ee240cc99296a287ff725aaa7e7ebb35</td></tr><tr><td>iit_dehli_ear</td><td>IIT Dehli Ear</td><td>Automated human identification using ear imaging</td><td>Automated Human Identification Using Ear Imaging</td><td><a href="https://pdfs.semanticscholar.org/faf4/0ce28857aedf183e193486f5b4b0a8c478a2.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=automated human identification using ear imaging&sort=relevance" target="_blank">[s2]</a></td><td></td><td>faf40ce28857aedf183e193486f5b4b0a8c478a2</td></tr><tr><td>nd_2006</td><td>ND-2006</td><td>Using a Multi-Instance Enrollment Representation to Improve 3D Face Recognition</td><td>Using a Multi-Instance Enrollment Representation to Improve 3D Face Recognition</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4401928" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=using a multi-instance enrollment representation to improve 3d face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>fd8168f1c50de85bac58a8d328df0a50248b16ae</td></tr></table></body></html> \ No newline at end of file
diff --git a/scraper/reports/paper_title_report_nonmatching.html b/scraper/reports/paper_title_report_nonmatching.html
index a59cf813..d24cec59 100644
--- a/scraper/reports/paper_title_report_nonmatching.html
+++ b/scraper/reports/paper_title_report_nonmatching.html
@@ -1,9 +1,3 @@
-<!doctype html><html><head><meta charset='utf-8'><title>Paper Titles that do not match</title><link rel='stylesheet' href='reports.css'></head><body><h2>Paper Titles that do not match</h2><table border='1' cellpadding='3' cellspacing='3'><th>key</th><th>name</th><th>our title</th><th>found title</th><th></th><th></th><th>address</th><th>s2 id</th><tr><td>10k_US_adult_faces</td><td>10K US Adult Faces</td><td>The intrinsic memorability of face images</td><td>The intrinsic memorability of face photographs.</td><td><a href="https://pdfs.semanticscholar.org/8b2d/d5c61b23ead5ae5508bb8ce808b5ea266730.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the intrinsic memorability of face images&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8b2dd5c61b23ead5ae5508bb8ce808b5ea266730</td></tr><tr><td>3dddb_unconstrained</td><td>3D Dynamic</td><td>A 3D Dynamic Database for Unconstrained Face Recognition</td><td>A 3 D Dynamic Database for Unconstrained Face Recognition</td><td><a href="https://pdfs.semanticscholar.org/4d4b/b462c9f1d4e4ab1e4aa6a75cc0bc71b38461.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a 3d dynamic database for unconstrained face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4d4bb462c9f1d4e4ab1e4aa6a75cc0bc71b38461</td></tr><tr><td>afad</td><td>AFAD</td><td>Ordinal Regression with a Multiple Output CNN for Age Estimation</td><td>Ordinal Regression with Multiple Output CNN for Age Estimation</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.532" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=ordinal regression with a multiple output cnn for age estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6618cff7f2ed440a0d2fa9e74ad5469df5cdbe4c</td></tr><tr><td>affectnet</td><td>AffectNet</td><td>AffectNet: A New Database for Facial Expression, Valence, and Arousal Computation in the Wild</td><td>Skybiometry and AffectNet on Facial Emotion Recognition Using Supervised Machine Learning Algorithms</td><td><a href="http://dl.acm.org/citation.cfm?id=3232665" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=affectnet: a new database for facial expression, valence, and arousal computation in the wild&sort=relevance" target="_blank">[s2]</a></td><td></td><td>f152b6ee251cca940dd853c54e6a7b78fbc6b235</td></tr><tr><td>afw</td><td>AFW</td><td>Face detection, pose estimation and landmark localization in the wild</td><td>Face detection, pose estimation, and landmark localization in the wild</td><td><a href="http://vision.ics.uci.edu/papers/ZhuR_CVPR_2012/ZhuR_CVPR_2012.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=face detection, pose estimation and landmark localization in the wild&sort=relevance" target="_blank">[s2]</a></td><td>University of California, Irvine</td><td>0e986f51fe45b00633de9fd0c94d082d2be51406</td></tr><tr><td>alert_airport</td><td>ALERT Airport</td><td>A Systematic Evaluation and Benchmark for Person Re-Identification: Features, Metrics, and Datasets</td><td>A Systematic Evaluation and Benchmark for Person Re-Identification: Features, Metrics, and Datasets.</td><td><a href="https://arxiv.org/pdf/1605.09653.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a systematic evaluation and benchmark for person re-identification: features, metrics, and datasets&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6403117f9c005ae81f1e8e6d1302f4a045e3d99d</td></tr><tr><td>am_fed</td><td>AM-FED</td><td>Affectiva MIT Facial Expression Dataset (AM-FED): Naturalistic and Spontaneous Facial Expressions Collected “In the Wild”</td><td>Affectiva-MIT Facial Expression Dataset (AM-FED): Naturalistic and Spontaneous Facial Expressions Collected "In-the-Wild"</td><td><a href="http://pdfs.semanticscholar.org/5d06/437656dd94616d7d87260d5eb77513ded30f.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=affectiva mit facial expression dataset (am-fed): naturalistic and spontaneous facial expressions collected “in the wild”&sort=relevance" target="_blank">[s2]</a></td><td></td><td>47aeb3b82f54b5ae8142b4bdda7b614433e69b9a</td></tr><tr><td>bjut_3d</td><td>BJUT-3D</td><td>The BJUT-3D Large-Scale Chinese Face Database</td><td>A novel face recognition method based on 3D face model</td><td><a href="https://doi.org/10.1109/ROBIO.2007.4522202" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the bjut-3d large-scale chinese face database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1ed1a49534ad8dd00f81939449f6389cfbc25321</td></tr><tr><td>bp4d_spontanous</td><td>BP4D-Spontanous</td><td>A high resolution spontaneous 3D dynamic facial expression database</td><td>A high-resolution spontaneous 3D dynamic facial expression database</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6553788" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a high resolution spontaneous 3d dynamic facial expression database&sort=relevance" target="_blank">[s2]</a></td><td>SUNY Binghamton</td><td>b91f54e1581fbbf60392364323d00a0cd43e493c</td></tr><tr><td>brainwash</td><td>Brainwash</td><td>Brainwash dataset</td><td>Brainwash: A Data System for Feature Engineering</td><td><a href="http://pdfs.semanticscholar.org/ae44/8015b2ff2bd3b8a5c9a3266f954f5af9ffa9.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=brainwash dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>214c966d1f9c2a4b66f4535d9a0d4078e63a5867</td></tr><tr><td>camel</td><td>CAMEL</td><td>CAMEL Dataset for Visual and Thermal Infrared Multiple Object Detection and Tracking</td><td>Application of Object Based Classification and High Resolution Satellite Imagery for Savanna Ecosystem Analysis</td><td><a href="http://pdfs.semanticscholar.org/5801/690199c1917fa58c35c3dead177c0b8f9f2d.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=camel dataset for visual and thermal infrared multiple object detection and tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>5801690199c1917fa58c35c3dead177c0b8f9f2d</td></tr><tr><td>casablanca</td><td>Casablanca</td><td>Context-aware {CNNs} for person head detection</td><td>Context-Aware CNNs for Person Head Detection</td><td><a href="http://arxiv.org/pdf/1511.07917v1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=context-aware {cnns} for person head detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td></tr><tr><td>celeba_plus</td><td>CelebFaces+</td><td>Deep Learning Face Representation from Predicting 10,000 Classes</td><td>Learning Deep Representation for Imbalanced Classification</td><td><a href="http://personal.ie.cuhk.edu.hk/~ccloy/files/cvpr_2016_imbalanced.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=deep learning face representation from predicting 10,000 classes&sort=relevance" target="_blank">[s2]</a></td><td>Shenzhen Institutes of Advanced Technology</td><td>69a68f9cf874c69e2232f47808016c2736b90c35</td></tr><tr><td>cfd</td><td>CFD</td><td>The Chicago face database: A free stimulus set of faces and norming data</td><td>The Chicago face database: A free stimulus set of faces and norming data.</td><td><a href="http://pdfs.semanticscholar.org/4df3/143922bcdf7db78eb91e6b5359d6ada004d2.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the chicago face database: a free stimulus set of faces and norming data&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4df3143922bcdf7db78eb91e6b5359d6ada004d2</td></tr><tr><td>cityscapes</td><td>Cityscapes</td><td>The Cityscapes Dataset</td><td>The Cityscapes Dataset for Semantic Urban Scene Understanding</td><td><a href="https://arxiv.org/pdf/1604.01685.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the cityscapes dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>32cde90437ab5a70cf003ea36f66f2de0e24b3ab</td></tr><tr><td>cmu_pie</td><td>CMU PIE</td><td>The CMU Pose, Illumination, and Expression Database</td><td>The CMU Pose, Illumination, and Expression (PIE) Database</td><td><a href="http://pdfs.semanticscholar.org/4d42/3acc78273b75134e2afd1777ba6d3a398973.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the cmu pose, illumination, and expression database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4d423acc78273b75134e2afd1777ba6d3a398973</td></tr><tr><td>coco</td><td>COCO</td><td>Microsoft COCO: Common Objects in Context</td><td>Microsoft COCO Captions: Data Collection and Evaluation Server</td><td><a href="http://pdfs.semanticscholar.org/ba95/81c33a7eebe87c50e61763e4c8d1723538f2.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=microsoft coco: common objects in context&sort=relevance" target="_blank">[s2]</a></td><td></td><td>696ca58d93f6404fea0fc75c62d1d7b378f47628</td></tr><tr><td>columbia_gaze</td><td>Columbia Gaze</td><td>Gaze Locking: Passive Eye Contact Detection for Human–Object Interaction</td><td>A 3D Morphable Eye Region Model for Gaze Estimation</td><td><a href="https://pdfs.semanticscholar.org/0d43/3b9435b874a1eea6d7999e86986c910fa285.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=gaze locking: passive eye contact detection for human–object interaction&sort=relevance" target="_blank">[s2]</a></td><td>Carnegie Mellon University</td><td>c34532fe6bfbd1e6df477c9ffdbb043b77e7804d</td></tr><tr><td>cvc_01_barcelona</td><td>CVC-01</td><td>Adaptive Image Sampling and Windows Classification for On-board Pedestrian Detection</td><td>Adaptive Image Sampling and Windows Classification for On – board Pedestrian Detection</td><td><a href="http://pdfs.semanticscholar.org/57fe/081950f21ca03b5b375ae3e84b399c015861.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=adaptive image sampling and windows classification for on-board pedestrian detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>57fe081950f21ca03b5b375ae3e84b399c015861</td></tr><tr><td>d3dfacs</td><td>D3DFACS</td><td>A FACS Valid 3D Dynamic Action Unit database with Applications to 3D Dynamic Morphable Facial Modelling</td><td>A FACS valid 3D dynamic action unit database with applications to 3D dynamic morphable facial modeling</td><td><a href="http://www.cs.bath.ac.uk/~dpc/D3DFACS/ICCV_final_2011.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a facs valid 3d dynamic action unit database with applications to 3d dynamic morphable facial modelling&sort=relevance" target="_blank">[s2]</a></td><td>Jacobs University</td><td>070de852bc6eb275d7ca3a9cdde8f6be8795d1a3</td></tr><tr><td>dartmouth_children</td><td>Dartmouth Children</td><td>The Dartmouth Database of Children's Faces: Acquisition and validation of a new face stimulus set</td><td>The Dartmouth Database of Children’s Faces: Acquisition and Validation of a New Face Stimulus Set</td><td><a href="http://pdfs.semanticscholar.org/4e6e/e936eb50dd032f7138702fa39b7c18ee8907.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the dartmouth database of children's faces: acquisition and validation of a new face stimulus set&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4e6ee936eb50dd032f7138702fa39b7c18ee8907</td></tr><tr><td>deep_fashion</td><td>DeepFashion</td><td>DeepFashion: Powering Robust Clothes Recognition and Retrieval with Rich Annotations</td><td>Fashion Landmark Detection in the Wild</td><td><a href="http://pdfs.semanticscholar.org/d8ca/e259c1c5bba0c096f480dc7322bbaebfac1a.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=deepfashion: powering robust clothes recognition and retrieval with rich annotations&sort=relevance" target="_blank">[s2]</a></td><td>Chinese University of Hong Kong</td><td>4fefd1bc8dc4e0ab37ee3324ddfa43ad9d6a04a7</td></tr><tr><td>disfa</td><td>DISFA</td><td>DISFA: A Spontaneous Facial Action Intensity Database</td><td>Extended DISFA Dataset: Investigating Posed and Spontaneous Facial Expressions</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7789672" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=disfa: a spontaneous facial action intensity database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a5acda0e8c0937bfed013e6382da127103e41395</td></tr><tr><td>expw</td><td>ExpW</td><td>Learning Social Relation Traits from Face Images</td><td>From Facial Expression Recognition to Interpersonal Relation Prediction</td><td><a href="http://arxiv.org/abs/1609.06426" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning social relation traits from face images&sort=relevance" target="_blank">[s2]</a></td><td></td><td>22f656d0f8426c84a33a267977f511f127bfd7f3</td></tr><tr><td>face_research_lab</td><td>Face Research Lab London</td><td>Face Research Lab London Set. figshare</td><td>Anxiety promotes memory for mood-congruent faces but does not alter loss aversion.</td><td><a href="http://pdfs.semanticscholar.org/c652/6dd3060d63a6c90e8b7ff340383c4e0e0dd8.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=face research lab london set. figshare&sort=relevance" target="_blank">[s2]</a></td><td>University College London</td><td>c6526dd3060d63a6c90e8b7ff340383c4e0e0dd8</td></tr><tr><td>face_tracer</td><td>FaceTracer</td><td>FaceTracer: A Search Engine for Large Collections of Images with Faces</td><td>Face swapping: automatically replacing faces in photographs</td><td><a href="https://classes.cs.uoregon.edu/16F/cis607photo/faces.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=facetracer: a search engine for large collections of images with faces&sort=relevance" target="_blank">[s2]</a></td><td>Columbia University</td><td>670637d0303a863c1548d5b19f705860a23e285c</td></tr><tr><td>fddb</td><td>FDDB</td><td>FDDB: A Benchmark for Face Detection in Unconstrained Settings</td><td>A Benchmark for Face Detection in Unconstrained Settings</td><td><a href="http://pdfs.semanticscholar.org/75da/1df4ed319926c544eefe17ec8d720feef8c0.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=fddb: a benchmark for face detection in unconstrained settings&sort=relevance" target="_blank">[s2]</a></td><td>University of Massachusetts</td><td>75da1df4ed319926c544eefe17ec8d720feef8c0</td></tr><tr><td>fei</td><td>FEI</td><td>Captura e Alinhamento de Imagens: Um Banco de Faces Brasileiro</td><td>Generalização cartográfica automatizada para um banco de dados cadastral</td><td><a href="https://pdfs.semanticscholar.org/b6b1/b0632eb9d4ab1427278f5e5c46f97753c73d.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=captura e alinhamento de imagens: um banco de faces brasileiro&sort=relevance" target="_blank">[s2]</a></td><td></td><td>b6b1b0632eb9d4ab1427278f5e5c46f97753c73d</td></tr><tr><td>fiw_300</td><td>300-W</td><td>300 faces In-the-wild challenge: Database and results</td><td>A Semi-automatic Methodology for Facial Landmark Annotation</td><td><a href="http://ibug.doc.ic.ac.uk/media/uploads/documents/sagonas_cvpr_2013_amfg_w.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=300 faces in-the-wild challenge: database and results&sort=relevance" target="_blank">[s2]</a></td><td>University of Twente</td><td>013909077ad843eb6df7a3e8e290cfd5575999d2</td></tr><tr><td>fiw_300</td><td>300-W</td><td>300 Faces in-the-Wild Challenge: The first facial landmark localization Challenge</td><td>A Semi-automatic Methodology for Facial Landmark Annotation</td><td><a href="http://ibug.doc.ic.ac.uk/media/uploads/documents/sagonas_cvpr_2013_amfg_w.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=300 faces in-the-wild challenge: the first facial landmark localization challenge&sort=relevance" target="_blank">[s2]</a></td><td>University of Twente</td><td>013909077ad843eb6df7a3e8e290cfd5575999d2</td></tr><tr><td>frav3d</td><td>FRAV3D</td><td>MULTIMODAL 2D, 2.5D & 3D FACE VERIFICATION</td><td>2D and 3D face recognition: A survey</td><td><a href="http://pdfs.semanticscholar.org/2f5d/44dc3e1b5955942133ff872ebd31716ec604.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multimodal 2d, 2.5d & 3d face verification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2f5d44dc3e1b5955942133ff872ebd31716ec604</td></tr><tr><td>frgc</td><td>FRGC</td><td>Overview of the Face Recognition Grand Challenge</td><td>Overview of the face recognition grand challenge</td><td><a href="http://www3.nd.edu/~kwb/PhillipsEtAlCVPR_2005.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=overview of the face recognition grand challenge&sort=relevance" target="_blank">[s2]</a></td><td></td><td>18ae7c9a4bbc832b8b14bc4122070d7939f5e00e</td></tr><tr><td>gallagher</td><td>Gallagher</td><td>Clothing Cosegmentation for Recognizing People</td><td>Clothing Cosegmentation for Shopping Images With Cluttered Background</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7423747" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clothing cosegmentation for recognizing people&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6dbe8e5121c534339d6e41f8683e85f87e6abf81</td></tr><tr><td>gavab_db</td><td>Gavab</td><td>GavabDB: a 3D face database</td><td>Expression invariant 3D face recognition with a Morphable Model</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4813376" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=gavabdb: a 3d face database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>42505464808dfb446f521fc6ff2cfeffd4d68ff1</td></tr><tr><td>georgia_tech_face_database</td><td>Georgia Tech Face</td><td>Maximum likelihood training of the embedded HMM for face detection and recognition</td><td>MAXIMUM LIKELIHOOD TRAINING OF THE EMBEDDED HMM FOR FACE DETECTION AND RECOGNITION Ara V. Ne an and Monson H. Hayes III Center for Signal and Image Processing School of Electrical and Computer Engineering</td><td><a href="http://pdfs.semanticscholar.org/3dc3/f0b64ef80f573e3a5f96e456e52ee980b877.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=maximum likelihood training of the embedded hmm for face detection and recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3dc3f0b64ef80f573e3a5f96e456e52ee980b877</td></tr><tr><td>graz</td><td>Graz Pedestrian</td><td>Generic Object Recognition with Boosting</td><td>Object recognition using segmentation for feature detection</td><td><a href="http://www.emt.tugraz.at/~tracking/Publications/fussenegger2004b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=generic object recognition with boosting&sort=relevance" target="_blank">[s2]</a></td><td></td><td>12ad3b5bbbf407f8e54ea692c07633d1a867c566</td></tr><tr><td>graz</td><td>Graz Pedestrian</td><td>Weak Hypotheses and Boosting for Generic Object Detection and Recognition</td><td>Object recognition using segmentation for feature detection</td><td><a href="http://www.emt.tugraz.at/~tracking/Publications/fussenegger2004b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=weak hypotheses and boosting for generic object detection and recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>12ad3b5bbbf407f8e54ea692c07633d1a867c566</td></tr><tr><td>hda_plus</td><td>HDA+</td><td>The HDA+ data set for research on fully automated re-identification systems</td><td>HDA dataset-DRAFT 1 A Multi-camera video data set for research on High-Definition surveillance</td><td><a href="http://pdfs.semanticscholar.org/bd88/bb2e4f351352d88ee7375af834360e223498.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the hda+ data set for research on fully automated re-identification systems&sort=relevance" target="_blank">[s2]</a></td><td></td><td>bd88bb2e4f351352d88ee7375af834360e223498</td></tr><tr><td>hda_plus</td><td>HDA+</td><td>A Multi-camera video data set for research on High-Definition surveillance</td><td>HDA dataset-DRAFT 1 A Multi-camera video data set for research on High-Definition surveillance</td><td><a href="http://pdfs.semanticscholar.org/bd88/bb2e4f351352d88ee7375af834360e223498.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a multi-camera video data set for research on high-definition surveillance&sort=relevance" target="_blank">[s2]</a></td><td></td><td>bd88bb2e4f351352d88ee7375af834360e223498</td></tr><tr><td>hi4d_adsip</td><td>Hi4D-ADSIP</td><td>Hi4D-ADSIP 3-D dynamic facial articulation database</td><td>High-resolution comprehensive 3-D dynamic database for facial articulation analysis</td><td><a href="http://www.researchgate.net/profile/Wei_Quan3/publication/221430048_High-resolution_comprehensive_3-D_dynamic_database_for_facial_articulation_analysis/links/0deec534309495805d000000.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=hi4d-adsip 3-d dynamic facial articulation database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>24830e3979d4ed01b9fd0feebf4a8fd22e0c35fd</td></tr><tr><td>hrt_transgender</td><td>HRT Transgender</td><td>Is the Eye Region More Reliable Than the Face? A Preliminary Study of Face-based Recognition on a Transgender Dataset</td><td>Face recognition: A literature survey</td><td><a href="http://pdfs.semanticscholar.org/2831/2c3a47c1be3a67365700744d3d6665b86f22.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=is the eye region more reliable than the face? a preliminary study of face-based recognition on a transgender dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>28312c3a47c1be3a67365700744d3d6665b86f22</td></tr><tr><td>hrt_transgender</td><td>HRT Transgender</td><td>Investigating the Periocular-Based Face Recognition Across Gender Transformation</td><td>Face recognition: A literature survey</td><td><a href="http://pdfs.semanticscholar.org/2831/2c3a47c1be3a67365700744d3d6665b86f22.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=investigating the periocular-based face recognition across gender transformation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>28312c3a47c1be3a67365700744d3d6665b86f22</td></tr><tr><td>hrt_transgender</td><td>HRT Transgender</td><td>Face recognition across gender transformation using SVM Classifier</td><td>Face recognition: A literature survey</td><td><a href="http://pdfs.semanticscholar.org/2831/2c3a47c1be3a67365700744d3d6665b86f22.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=face recognition across gender transformation using svm classifier&sort=relevance" target="_blank">[s2]</a></td><td></td><td>28312c3a47c1be3a67365700744d3d6665b86f22</td></tr><tr><td>ifad</td><td>IFAD</td><td>Indian Face Age Database: A Database for Face Recognition with Age Variation</td><td>Indian Face Age Database : A Database for Face Recognition with Age Variation</td><td><a href="https://pdfs.semanticscholar.org/55c4/0cbcf49a0225e72d911d762c27bb1c2d14aa.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=indian face age database: a database for face recognition with age variation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>55c40cbcf49a0225e72d911d762c27bb1c2d14aa</td></tr><tr><td>ifdb</td><td>IFDB</td><td>Iranian Face Database with age, pose and expression</td><td>Iranian Face Database and Evaluation with a New Detection Algorithm</td><td><a href="http://pdfs.semanticscholar.org/066d/71fcd997033dce4ca58df924397dfe0b5fd1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=iranian face database with age, pose and expression&sort=relevance" target="_blank">[s2]</a></td><td></td><td>066d71fcd997033dce4ca58df924397dfe0b5fd1</td></tr><tr><td>ijb_c</td><td>IJB-C</td><td>IARPA Janus Benchmark C</td><td>IARPA Janus Benchmark - C: Face Dataset and Protocol</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411217" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=iarpa janus benchmark c&sort=relevance" target="_blank">[s2]</a></td><td></td><td>57178b36c21fd7f4529ac6748614bb3374714e91</td></tr><tr><td>ilids_mcts</td><td></td><td>Imagery Library for Intelligent Detection Systems:
-The i-LIDS User Guide</td><td>Imagery Library for Intelligent Detection Systems (i-LIDS); A Standard for Testing Video Based Detection Systems</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=imagery library for intelligent detection systems:
-the i-lids user guide&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0297448f3ed948e136bb06ceff10eccb34e5bb77</td></tr><tr><td>ilids_vid_reid</td><td>iLIDS-VID</td><td>Person Re-Identi cation by Video Ranking</td><td>Person Re-identification by Exploiting Spatio-Temporal Cues and Multi-view Metric Learning</td><td><a href="https://doi.org/10.1109/LSP.2016.2574323" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person re-identi cation by video ranking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>99eb4cea0d9bc9fe777a5c5172f8638a37a7f262</td></tr><tr><td>images_of_groups</td><td>Images of Groups</td><td>Understanding Groups of Images of People</td><td>Understanding images of groups of people</td><td><a href="http://amp.ece.cmu.edu/people/Andy/Andy_files/cvpr09.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=understanding groups of images of people&sort=relevance" target="_blank">[s2]</a></td><td></td><td>21d9d0deed16f0ad62a4865e9acf0686f4f15492</td></tr><tr><td>imm_face</td><td>IMM Face Dataset</td><td>The IMM Face Database - An Annotated Dataset of 240 Face Images</td><td>Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</td><td><a href="http://lrs.icg.tugraz.at/pubs/koestinger_befit_11.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the imm face database - an annotated dataset of 240 face images&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td></tr><tr><td>jiku_mobile</td><td>Jiku Mobile Video Dataset</td><td>The Jiku Mobile Video Dataset</td><td>A Synchronization Ground Truth for the Jiku Mobile Video Dataset</td><td><a href="http://pdfs.semanticscholar.org/ad62/c6e17bc39b4dec20d32f6ac667ae42d2c118.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the jiku mobile video dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>ad62c6e17bc39b4dec20d32f6ac667ae42d2c118</td></tr><tr><td>kdef</td><td>KDEF</td><td>The Karolinska Directed Emotional Faces – KDEF</td><td>Gaze fixation and the neural circuitry of face processing in autism</td><td><a href="http://doi.org/10.1038/nn1421" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the karolinska directed emotional faces – kdef&sort=relevance" target="_blank">[s2]</a></td><td></td><td>93884e46c49f7ae1c7c34046fbc28882f2bd6341</td></tr><tr><td>kin_face</td><td>UB KinFace</td><td>Genealogical Face Recognition based on UB KinFace Database</td><td>Understanding Kin Relationships in a Photo</td><td><a href="http://www1.ece.neu.edu/~yunfu/papers/Kinship-TMM.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=genealogical face recognition based on ub kinface database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7</td></tr><tr><td>kin_face</td><td>UB KinFace</td><td>Kinship Verification through Transfer Learning</td><td>Understanding Kin Relationships in a Photo</td><td><a href="http://www1.ece.neu.edu/~yunfu/papers/Kinship-TMM.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=kinship verification through transfer learning&sort=relevance" target="_blank">[s2]</a></td><td></td><td>08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7</td></tr><tr><td>kitti</td><td>KITTI</td><td>Vision meets Robotics: The KITTI Dataset</td><td>The Role of Machine Vision for Intelligent Vehicles</td><td><a href="http://www.path.berkeley.edu/sites/default/files/my_folder_76/Pub_03.2016_Role.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=vision meets robotics: the kitti dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>35ba4ebfd017a56b51e967105af9ae273c9b0178</td></tr><tr><td>lfw</td><td>LFW</td><td>Labeled Faces in the Wild: A Survey</td><td>Labeled Faces in the Wild : A Survey</td><td><a href="http://pdfs.semanticscholar.org/7de6/e81d775e9cd7becbfd1bd685f4e2a5eebb22.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=labeled faces in the wild: a survey&sort=relevance" target="_blank">[s2]</a></td><td></td><td>7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22</td></tr><tr><td>lfw</td><td>LFW</td><td>Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments</td><td>Labeled Faces in the Wild : A Database for Studying Face Recognition in Unconstrained Environments</td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=labeled faces in the wild: a database for studying face recognition in unconstrained environments&sort=relevance" target="_blank">[s2]</a></td><td></td><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td></tr><tr><td>lfw</td><td>LFW</td><td>Labeled Faces in the Wild: Updates and New Reporting Procedures</td><td>Labeled Faces in the Wild : Updates and New Reporting Procedures</td><td><a href="http://pdfs.semanticscholar.org/2d34/82dcff69c7417c7b933f22de606a0e8e42d4.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=labeled faces in the wild: updates and new reporting procedures&sort=relevance" target="_blank">[s2]</a></td><td>University of Massachusetts</td><td>2d3482dcff69c7417c7b933f22de606a0e8e42d4</td></tr><tr><td>lfw_a</td><td>LFW-a</td><td>Effective Unconstrained Face Recognition by
- Combining Multiple Descriptors and Learned
- Background Statistics</td><td>Effective Unconstrained Face Recognition by Combining Multiple Descriptors and Learned Background Statistics</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/TPAMI.2010.230" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=effective unconstrained face recognition by
- combining multiple descriptors and learned
- background statistics&sort=relevance" target="_blank">[s2]</a></td><td></td><td>133f01aec1534604d184d56de866a4bd531dac87</td></tr><tr><td>m2vts</td><td>m2vts</td><td>The M2VTS Multimodal Face Database (Release 1.00)</td><td>The CAS-PEAL Large-Scale Chinese Face Database and Baseline Evaluations</td><td><a href="https://doi.org/10.1109/TSMCA.2007.909557" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the m2vts multimodal face database (release 1.00)&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2485c98aa44131d1a2f7d1355b1e372f2bb148ad</td></tr><tr><td>m2vtsdb_extended</td><td>xm2vtsdb</td><td>XM2VTSDB: The Extended M2VTS Database</td><td>Labeled Faces in the Wild : A Database for Studying Face Recognition in Unconstrained Environments</td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=xm2vtsdb: the extended m2vts database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td></tr><tr><td>mafl</td><td>MAFL</td><td>Facial Landmark Detection by Deep Multi-task Learning</td><td>Learning Deep Representation for Face Alignment with Auxiliary Attributes</td><td><a href="https://arxiv.org/pdf/1408.3967.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=facial landmark detection by deep multi-task learning&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td></tr><tr><td>malf</td><td>MALF</td><td>Fine-grained Evaluation on Face Detection in the Wild.</td><td>Fine-grained evaluation on face detection in the wild</td><td><a href="http://www.cs.toronto.edu/~byang/papers/malf_fg15.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=fine-grained evaluation on face detection in the wild.&sort=relevance" target="_blank">[s2]</a></td><td>Chinese Academy of Sciences</td><td>45e616093a92e5f1e61a7c6037d5f637aa8964af</td></tr><tr><td>mcgill</td><td>McGill Real World</td><td>Hierarchical Temporal Graphical Model for Head Pose Estimation and Subsequent Attribute Classification in Real-World Videos</td><td>Robust semi-automatic head pose labeling for real-world face video sequences</td><td><a href="https://doi.org/10.1007/s11042-012-1352-1" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=hierarchical temporal graphical model for head pose estimation and subsequent attribute classification in real-world videos&sort=relevance" target="_blank">[s2]</a></td><td>McGill University</td><td>c570d1247e337f91e555c3be0e8c8a5aba539d9f</td></tr><tr><td>mot</td><td>MOT</td><td>Evaluating Multiple Object Tracking Performance: The CLEAR MOT Metrics</td><td>Learning to associate: HybridBoosted multi-target tracker for crowded scene</td><td><a href="http://vision.lbl.gov/Conferences/cvpr/Papers/data/papers/0633.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=evaluating multiple object tracking performance: the clear mot metrics&sort=relevance" target="_blank">[s2]</a></td><td></td><td>5981e6479c3fd4e31644db35d236bfb84ae46514</td></tr><tr><td>mot</td><td>MOT</td><td>Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</td><td>Learning to associate: HybridBoosted multi-target tracker for crowded scene</td><td><a href="http://vision.lbl.gov/Conferences/cvpr/Papers/data/papers/0633.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=performance measures and a data set for multi-target, multi-camera tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>5981e6479c3fd4e31644db35d236bfb84ae46514</td></tr><tr><td>mr2</td><td>MR2</td><td>The MR2: A multi-racial mega-resolution database of facial stimuli</td><td>The MR2: A multi-racial, mega-resolution database of facial stimuli.</td><td><a href="https://pdfs.semanticscholar.org/be5b/455abd379240460d022a0e246615b0b86c14.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the mr2: a multi-racial mega-resolution database of facial stimuli&sort=relevance" target="_blank">[s2]</a></td><td></td><td>578d4ad74818086bb64f182f72e2c8bd31e3d426</td></tr><tr><td>mtfl</td><td>MTFL</td><td>Facial Landmark Detection by Deep Multi-task Learning</td><td>Learning Deep Representation for Face Alignment with Auxiliary Attributes</td><td><a href="https://arxiv.org/pdf/1408.3967.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=facial landmark detection by deep multi-task learning&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td></tr><tr><td>muct</td><td>MUCT</td><td>The MUCT Landmarked Face Database</td><td>Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</td><td><a href="http://lrs.icg.tugraz.at/pubs/koestinger_befit_11.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the muct landmarked face database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td></tr><tr><td>multi_pie</td><td>MULTIPIE</td><td>Multi-PIE</td><td>Scheduling heterogeneous multi-cores through performance impact estimation (PIE)</td><td><a href="http://dl.acm.org/citation.cfm?id=2337184" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-pie&sort=relevance" target="_blank">[s2]</a></td><td></td><td>109df0e8e5969ddf01e073143e83599228a1163f</td></tr><tr><td>names_and_faces_news</td><td>News Dataset</td><td>Names and Faces</td><td>Names and faces in the news</td><td><a href="http://www.cs.utexas.edu/~grauman/courses/spring2007/395T/papers/berg_names_and_faces.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=names and faces&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2fda164863a06a92d3a910b96eef927269aeb730</td></tr><tr><td>nova_emotions</td><td>Novaemötions Dataset</td><td>Crowdsourcing facial expressions for affective-interaction</td><td>Competitive affective gaming: winning with a smile</td><td><a href="http://doi.acm.org/10.1145/2502081.2502115" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=crowdsourcing facial expressions for affective-interaction&sort=relevance" target="_blank">[s2]</a></td><td></td><td>7f4040b482d16354d5938c1d1b926b544652bf5b</td></tr><tr><td>nova_emotions</td><td>Novaemötions Dataset</td><td>Competitive affective gamming: Winning with a smile</td><td>Competitive affective gaming: winning with a smile</td><td><a href="http://doi.acm.org/10.1145/2502081.2502115" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=competitive affective gamming: winning with a smile&sort=relevance" target="_blank">[s2]</a></td><td></td><td>7f4040b482d16354d5938c1d1b926b544652bf5b</td></tr><tr><td>pilot_parliament</td><td>PPB</td><td>Gender Shades: Intersectional Accuracy Disparities in Commercial Gender Classi cation</td><td>Summary of Research on Informant Accuracy in Network Data, 11 and on the Reverse Small World Problem</td><td><a href="http://pdfs.semanticscholar.org/fb82/681ac5d3487bd8e52dbb3d1fa220eeac855e.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=gender shades: intersectional accuracy disparities in commercial gender classi cation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>fb82681ac5d3487bd8e52dbb3d1fa220eeac855e</td></tr><tr><td>put_face</td><td>Put Face</td><td>The PUT face database</td><td>Labeled Faces in the Wild : A Database for Studying Face Recognition in Unconstrained Environments</td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the put face database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td></tr><tr><td>qmul_grid</td><td>GRID</td><td>Multi-Camera Activity Correlation Analysis</td><td>Time-Delayed Correlation Analysis for Multi-Camera Activity Understanding</td><td><a href="https://doi.org/10.1007/s11263-010-0347-5" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-camera activity correlation analysis&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2edb87494278ad11641b6cf7a3f8996de12b8e14</td></tr><tr><td>scface</td><td>SCface</td><td>SCface – surveillance cameras face database</td><td>Large Variability Surveillance Camera Face Database</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=scface – surveillance cameras face database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>f3b84a03985de3890b400b68e2a92c0a00afd9d0</td></tr><tr><td>sdu_vid</td><td>SDU-VID</td><td>A Spatio-Temporal Appearance Representation for Video-Based Pedestrian Re-Identification</td><td>Person Re-identification by Video Ranking</td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a spatio-temporal appearance representation for video-based pedestrian re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>98bb029afe2a1239c3fdab517323066f0957b81b</td></tr><tr><td>sdu_vid</td><td>SDU-VID</td><td>Local descriptors encoded by Fisher vectors for person re-identification</td><td>Person Re-identification by Video Ranking</td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=local descriptors encoded by fisher vectors for person re-identification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>98bb029afe2a1239c3fdab517323066f0957b81b</td></tr><tr><td>sdu_vid</td><td>SDU-VID</td><td>Person reidentification by video ranking</td><td>Person Re-identification by Video Ranking</td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person reidentification by video ranking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>98bb029afe2a1239c3fdab517323066f0957b81b</td></tr><tr><td>sheffield</td><td>Sheffield Face</td><td>Face Recognition: From Theory to Applications</td><td>Face Description with Local Binary Patterns: Application to Face Recognition</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/TPAMI.2006.244" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=face recognition: from theory to applications&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3607afdb204de9a5a9300ae98aa4635d9effcda2</td></tr><tr><td>social_relation</td><td>Social Relation</td><td>From Facial Expression Recognition to Interpersonal Relation Prediction</td><td>Learning Social Relation Traits from Face Images</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.414" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=from facial expression recognition to interpersonal relation prediction&sort=relevance" target="_blank">[s2]</a></td><td>Chinese University of Hong Kong</td><td>2a171f8d14b6b8735001a11c217af9587d095848</td></tr><tr><td>stanford_drone</td><td>Stanford Drone</td><td>Learning Social Etiquette: Human Trajectory Prediction In Crowded Scenes</td><td>Learning to Predict Human Behavior in Crowded Scenes</td><td><a href="http://pdfs.semanticscholar.org/c9bd/a86e23cab9e4f15ea0c4cbb6cc02b9dfb709.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning social etiquette: human trajectory prediction in crowded scenes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>c9bda86e23cab9e4f15ea0c4cbb6cc02b9dfb709</td></tr><tr><td>stickmen_buffy</td><td>Buffy Stickmen</td><td>Learning to Parse Images of Articulated Objects</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td><a href="http://pdfs.semanticscholar.org/4b1d/23d17476fcf78f4cbadf69fb130b1aa627c0.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning to parse images of articulated objects&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td></tr><tr><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td>Learning to parse images of articulated bodies</td><td><a href="http://pdfs.semanticscholar.org/9cd7/4c43dbf9be0b9caae4606ee53e6d45850471.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clustered pose and nonlinear appearance models for human pose estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td></tr><tr><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td>Learning to Parse Images of Articulated Objects</td><td>Learning to parse images of articulated bodies</td><td><a href="http://pdfs.semanticscholar.org/9cd7/4c43dbf9be0b9caae4606ee53e6d45850471.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning to parse images of articulated objects&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td></tr><tr><td>sun_attributes</td><td>SUN</td><td>The SUN Attribute Database: Beyond Categories for Deeper Scene Understanding</td><td>SUN attribute database: Discovering, annotating, and recognizing scene attributes</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2012.6247998" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the sun attribute database: beyond categories for deeper scene understanding&sort=relevance" target="_blank">[s2]</a></td><td></td><td>833fa04463d90aab4a9fe2870d480f0b40df446e</td></tr><tr><td>sun_attributes</td><td>SUN</td><td>SUN Attribute Database:
+<!doctype html><html><head><meta charset='utf-8'><title>Paper Titles that do not match</title><link rel='stylesheet' href='reports.css'></head><body><h2>Paper Titles that do not match</h2><table border='1' cellpadding='3' cellspacing='3'><th>key</th><th>name</th><th>our title</th><th>found title</th><th></th><th></th><th>address</th><th>s2 id</th><tr><td>ilids_mcts</td><td>i-LIDS</td><td>Imagery Library for Intelligent Detection Systems: The i-LIDS User Guide</td><td>Imagery Library for Intelligent Detection Systems (i-LIDS); A Standard for Testing Video Based Detection Systems</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=imagery library for intelligent detection systems: the i-lids user guide&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0297448f3ed948e136bb06ceff10eccb34e5bb77</td></tr><tr><td>columbia_gaze</td><td>Columbia Gaze</td><td>Gaze Locking: Passive Eye Contact Detection for Human–Object Interaction</td><td>Gaze locking: passive eye contact detection for human-object interaction</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=gaze locking: passive eye contact detection for human–object interaction&sort=relevance" target="_blank">[s2]</a></td><td></td><td>06f02199690961ba52997cde1527e714d2b3bf8f</td></tr><tr><td>d3dfacs</td><td>D3DFACS</td><td>A FACS Valid 3D Dynamic Action Unit database with Applications to 3D Dynamic Morphable Facial Modelling</td><td>A FACS valid 3D dynamic action unit database with applications to 3D dynamic morphable facial modeling</td><td><a href="http://www.cs.bath.ac.uk/~dpc/D3DFACS/ICCV_final_2011.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a facs valid 3d dynamic action unit database with applications to 3d dynamic morphable facial modelling&sort=relevance" target="_blank">[s2]</a></td><td>Jacobs University</td><td>070de852bc6eb275d7ca3a9cdde8f6be8795d1a3</td></tr><tr><td>casablanca</td><td>Casablanca</td><td>Context-aware {CNNs} for person head detection</td><td>Context-Aware CNNs for Person Head Detection</td><td><a href="http://arxiv.org/pdf/1511.07917v1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=context-aware {cnns} for person head detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td></tr><tr><td>afw</td><td>AFW</td><td>Face detection, pose estimation and landmark localization in the wild</td><td>Face detection, pose estimation, and landmark localization in the wild</td><td><a href="http://vision.ics.uci.edu/papers/ZhuR_CVPR_2012/ZhuR_CVPR_2012.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=face detection, pose estimation and landmark localization in the wild&sort=relevance" target="_blank">[s2]</a></td><td>University of California, Irvine</td><td>0e986f51fe45b00633de9fd0c94d082d2be51406</td></tr><tr><td>frgc</td><td>FRGC</td><td>Overview of the Face Recognition Grand Challenge</td><td>Overview of the face recognition grand challenge</td><td><a href="http://www3.nd.edu/~kwb/PhillipsEtAlCVPR_2005.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=overview of the face recognition grand challenge&sort=relevance" target="_blank">[s2]</a></td><td></td><td>18ae7c9a4bbc832b8b14bc4122070d7939f5e00e</td></tr><tr><td>images_of_groups</td><td>Images of Groups</td><td>Understanding Groups of Images of People</td><td>Understanding images of groups of people</td><td><a href="http://amp.ece.cmu.edu/people/Andy/Andy_files/cvpr09.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=understanding groups of images of people&sort=relevance" target="_blank">[s2]</a></td><td></td><td>21d9d0deed16f0ad62a4865e9acf0686f4f15492</td></tr><tr><td>lfw</td><td>LFW</td><td>Labeled Faces in the Wild: Updates and New Reporting Procedures</td><td>Labeled Faces in the Wild : Updates and New Reporting Procedures</td><td><a href="http://pdfs.semanticscholar.org/2d34/82dcff69c7417c7b933f22de606a0e8e42d4.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=labeled faces in the wild: updates and new reporting procedures&sort=relevance" target="_blank">[s2]</a></td><td>University of Massachusetts</td><td>2d3482dcff69c7417c7b933f22de606a0e8e42d4</td></tr><tr><td>names_and_faces_news</td><td>News Dataset</td><td>Names and Faces</td><td>Names and faces in the news</td><td><a href="http://www.cs.utexas.edu/~grauman/courses/spring2007/395T/papers/berg_names_and_faces.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=names and faces&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2fda164863a06a92d3a910b96eef927269aeb730</td></tr><tr><td>tiny_images</td><td>Tiny Images</td><td>80 million tiny images: a large dataset for non-parametric object and scene recognition</td><td>80 Million Tiny Images: A Large Data Set for Nonparametric Object and Scene Recognition</td><td><a href="http://pdfs.semanticscholar.org/31b5/8ced31f22eab10bd3ee2d9174e7c14c27c01.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=80 million tiny images: a large dataset for non-parametric object and scene recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>31b58ced31f22eab10bd3ee2d9174e7c14c27c01</td></tr><tr><td>lfw</td><td>LFW</td><td>Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments</td><td>Labeled Faces in the Wild : A Database for Studying Face Recognition in Unconstrained Environments</td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=labeled faces in the wild: a database for studying face recognition in unconstrained environments&sort=relevance" target="_blank">[s2]</a></td><td></td><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td></tr><tr><td>georgia_tech_face_database</td><td>Georgia Tech Face</td><td>Maximum likelihood training of the embedded HMM for face detection and recognition</td><td>MAXIMUM LIKELIHOOD TRAINING OF THE EMBEDDED HMM FOR FACE DETECTION AND RECOGNITION Ara V. Ne an and Monson H. Hayes III Center for Signal and Image Processing School of Electrical and Computer Engineering</td><td><a href="http://pdfs.semanticscholar.org/3dc3/f0b64ef80f573e3a5f96e456e52ee980b877.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=maximum likelihood training of the embedded hmm for face detection and recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3dc3f0b64ef80f573e3a5f96e456e52ee980b877</td></tr><tr><td>malf</td><td>MALF</td><td>Fine-grained Evaluation on Face Detection in the Wild.</td><td>Fine-grained evaluation on face detection in the wild</td><td><a href="http://www.cs.toronto.edu/~byang/papers/malf_fg15.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=fine-grained evaluation on face detection in the wild.&sort=relevance" target="_blank">[s2]</a></td><td>Chinese Academy of Sciences</td><td>45e616093a92e5f1e61a7c6037d5f637aa8964af</td></tr><tr><td>am_fed</td><td>AM-FED</td><td>Affectiva MIT Facial Expression Dataset (AM-FED): Naturalistic and Spontaneous Facial Expressions Collected “In the Wild”</td><td>Affectiva-MIT Facial Expression Dataset (AM-FED): Naturalistic and Spontaneous Facial Expressions Collected "In-the-Wild"</td><td><a href="http://pdfs.semanticscholar.org/5d06/437656dd94616d7d87260d5eb77513ded30f.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=affectiva mit facial expression dataset (am-fed): naturalistic and spontaneous facial expressions collected “in the wild”&sort=relevance" target="_blank">[s2]</a></td><td></td><td>47aeb3b82f54b5ae8142b4bdda7b614433e69b9a</td></tr><tr><td>cmu_pie</td><td>CMU PIE</td><td>The CMU Pose, Illumination, and Expression Database</td><td>The CMU Pose, Illumination, and Expression (PIE) Database</td><td><a href="http://pdfs.semanticscholar.org/4d42/3acc78273b75134e2afd1777ba6d3a398973.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the cmu pose, illumination, and expression database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4d423acc78273b75134e2afd1777ba6d3a398973</td></tr><tr><td>multi_pie</td><td>MULTIPIE</td><td>Multi-PIE</td><td>The CMU Pose, Illumination, and Expression (PIE) Database</td><td><a href="http://pdfs.semanticscholar.org/4d42/3acc78273b75134e2afd1777ba6d3a398973.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-pie&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4d423acc78273b75134e2afd1777ba6d3a398973</td></tr><tr><td>3dddb_unconstrained</td><td>3D Dynamic</td><td>A 3D Dynamic Database for Unconstrained Face Recognition</td><td>A 3 D Dynamic Database for Unconstrained Face Recognition</td><td><a href="https://pdfs.semanticscholar.org/4d4b/b462c9f1d4e4ab1e4aa6a75cc0bc71b38461.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a 3d dynamic database for unconstrained face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4d4bb462c9f1d4e4ab1e4aa6a75cc0bc71b38461</td></tr><tr><td>cfd</td><td>CFD</td><td>The Chicago face database: A free stimulus set of faces and norming data</td><td>The Chicago face database: A free stimulus set of faces and norming data.</td><td><a href="http://pdfs.semanticscholar.org/4df3/143922bcdf7db78eb91e6b5359d6ada004d2.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the chicago face database: a free stimulus set of faces and norming data&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4df3143922bcdf7db78eb91e6b5359d6ada004d2</td></tr><tr><td>dartmouth_children</td><td>Dartmouth Children</td><td>The Dartmouth Database of Children's Faces: Acquisition and validation of a new face stimulus set</td><td>The Dartmouth Database of Children’s Faces: Acquisition and Validation of a New Face Stimulus Set</td><td><a href="http://pdfs.semanticscholar.org/4e6e/e936eb50dd032f7138702fa39b7c18ee8907.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the dartmouth database of children's faces: acquisition and validation of a new face stimulus set&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4e6ee936eb50dd032f7138702fa39b7c18ee8907</td></tr><tr><td>ifad</td><td>IFAD</td><td>Indian Face Age Database: A Database for Face Recognition with Age Variation</td><td>Indian Face Age Database : A Database for Face Recognition with Age Variation</td><td><a href="https://pdfs.semanticscholar.org/55c4/0cbcf49a0225e72d911d762c27bb1c2d14aa.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=indian face age database: a database for face recognition with age variation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>55c40cbcf49a0225e72d911d762c27bb1c2d14aa</td></tr><tr><td>stanford_drone</td><td>Stanford Drone</td><td>Learning Social Etiquette: Human Trajectory Prediction In Crowded Scenes</td><td>Social LSTM: Human Trajectory Prediction in Crowded Spaces</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning social etiquette: human trajectory prediction in crowded scenes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>570f37ed63142312e6ccdf00ecc376341ec72b9f</td></tr><tr><td>ijb_c</td><td>IJB-C</td><td>IARPA Janus Benchmark C</td><td>IARPA Janus Benchmark - C: Face Dataset and Protocol</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411217" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=iarpa janus benchmark c&sort=relevance" target="_blank">[s2]</a></td><td></td><td>57178b36c21fd7f4529ac6748614bb3374714e91</td></tr><tr><td>mr2</td><td>MR2</td><td>The MR2: A multi-racial mega-resolution database of facial stimuli</td><td>The MR2: A multi-racial, mega-resolution database of facial stimuli.</td><td><a href="https://pdfs.semanticscholar.org/be5b/455abd379240460d022a0e246615b0b86c14.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the mr2: a multi-racial mega-resolution database of facial stimuli&sort=relevance" target="_blank">[s2]</a></td><td></td><td>578d4ad74818086bb64f182f72e2c8bd31e3d426</td></tr><tr><td>cvc_01_barcelona</td><td>CVC-01</td><td>Adaptive Image Sampling and Windows Classification for On-board Pedestrian Detection</td><td>Adaptive Image Sampling and Windows Classification for On – board Pedestrian Detection</td><td><a href="http://pdfs.semanticscholar.org/57fe/081950f21ca03b5b375ae3e84b399c015861.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=adaptive image sampling and windows classification for on-board pedestrian detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>57fe081950f21ca03b5b375ae3e84b399c015861</td></tr><tr><td>viper</td><td>VIPeR</td><td>Evaluating Appearance Models for Recognition, Reacquisition, and Tracking</td><td>Evaluating Appearance Models for Recognition , Reacquisition , and Tracking</td><td><a href="http://pdfs.semanticscholar.org/6273/b3491e94ea4dd1ce42b791d77bdc96ee73a8.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=evaluating appearance models for recognition, reacquisition, and tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6273b3491e94ea4dd1ce42b791d77bdc96ee73a8</td></tr><tr><td>alert_airport</td><td>ALERT Airport</td><td>A Systematic Evaluation and Benchmark for Person Re-Identification: Features, Metrics, and Datasets</td><td>A Systematic Evaluation and Benchmark for Person Re-Identification: Features, Metrics, and Datasets.</td><td><a href="https://arxiv.org/pdf/1605.09653.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a systematic evaluation and benchmark for person re-identification: features, metrics, and datasets&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6403117f9c005ae81f1e8e6d1302f4a045e3d99d</td></tr><tr><td>afad</td><td>AFAD</td><td>Ordinal Regression with a Multiple Output CNN for Age Estimation</td><td>Ordinal Regression with Multiple Output CNN for Age Estimation</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.532" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=ordinal regression with a multiple output cnn for age estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6618cff7f2ed440a0d2fa9e74ad5469df5cdbe4c</td></tr><tr><td>stickmen_buffy</td><td>Buffy Stickmen</td><td>Learning to Parse Images of Articulated Objects</td><td>Learning to parse images of articulated bodies</td><td><a href="http://pdfs.semanticscholar.org/9cd7/4c43dbf9be0b9caae4606ee53e6d45850471.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning to parse images of articulated objects&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td></tr><tr><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td>Learning to parse images of articulated bodies</td><td><a href="http://pdfs.semanticscholar.org/9cd7/4c43dbf9be0b9caae4606ee53e6d45850471.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clustered pose and nonlinear appearance models for human pose estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td></tr><tr><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td>Learning to Parse Images of Articulated Objects</td><td>Learning to parse images of articulated bodies</td><td><a href="http://pdfs.semanticscholar.org/9cd7/4c43dbf9be0b9caae4606ee53e6d45850471.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning to parse images of articulated objects&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td></tr><tr><td>umd_faces</td><td>UMD</td><td>The Do's and Don'ts for CNN-based Face Verification</td><td>The Do’s and Don’ts for CNN-Based Face Verification</td><td><a href="https://arxiv.org/pdf/1705.07426.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the do's and don'ts for cnn-based face verification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>71b7fc715e2f1bb24c0030af8d7e7b6e7cd128a6</td></tr><tr><td>fddb</td><td>FDDB</td><td>FDDB: A Benchmark for Face Detection in Unconstrained Settings</td><td>A Benchmark for Face Detection in Unconstrained Settings</td><td><a href="http://pdfs.semanticscholar.org/75da/1df4ed319926c544eefe17ec8d720feef8c0.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=fddb: a benchmark for face detection in unconstrained settings&sort=relevance" target="_blank">[s2]</a></td><td>University of Massachusetts</td><td>75da1df4ed319926c544eefe17ec8d720feef8c0</td></tr><tr><td>wildtrack</td><td>WildTrack</td><td>WILDTRACK: A Multi-camera HD Dataset for Dense Unscripted Pedestrian Detection</td><td>WILDTRACK : A Multi-camera HD Dataset for Dense Unscripted Pedestrian Detection</td><td><a href="https://pdfs.semanticscholar.org/fe1c/ec4e4995b8615855572374ae3efc94949105.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=wildtrack: a multi-camera hd dataset for dense unscripted pedestrian detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>77c81c13a110a341c140995bedb98101b9e84f7f</td></tr><tr><td>lfw</td><td>LFW</td><td>Labeled Faces in the Wild: A Survey</td><td>Labeled Faces in the Wild : A Survey</td><td><a href="http://pdfs.semanticscholar.org/7de6/e81d775e9cd7becbfd1bd685f4e2a5eebb22.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=labeled faces in the wild: a survey&sort=relevance" target="_blank">[s2]</a></td><td></td><td>7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22</td></tr><tr><td>nova_emotions</td><td>Novaemötions Dataset</td><td>Competitive affective gamming: Winning with a smile</td><td>Competitive affective gaming: winning with a smile</td><td><a href="http://doi.acm.org/10.1145/2502081.2502115" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=competitive affective gamming: winning with a smile&sort=relevance" target="_blank">[s2]</a></td><td></td><td>7f4040b482d16354d5938c1d1b926b544652bf5b</td></tr><tr><td>sun_attributes</td><td>SUN</td><td>SUN Attribute Database:
Discovering, Annotating, and Recognizing Scene Attributes</td><td>SUN attribute database: Discovering, annotating, and recognizing scene attributes</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2012.6247998" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=sun attribute database:
-discovering, annotating, and recognizing scene attributes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>833fa04463d90aab4a9fe2870d480f0b40df446e</td></tr><tr><td>texas_3dfrd</td><td>Texas 3DFRD</td><td>Texas 3D Face Recognition Database</td><td>Anthropometric 3D Face Recognition</td><td><a href="http://live.ece.utexas.edu/publications/2010/sg_ijcv_june10.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=texas 3d face recognition database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2ce2560cf59db59ce313bbeb004e8ce55c5ce928</td></tr><tr><td>tiny_images</td><td>Tiny Images</td><td>80 million tiny images: a large dataset for non-parametric object and scene recognition</td><td>80 Million Tiny Images: A Large Data Set for Nonparametric Object and Scene Recognition</td><td><a href="http://pdfs.semanticscholar.org/31b5/8ced31f22eab10bd3ee2d9174e7c14c27c01.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=80 million tiny images: a large dataset for non-parametric object and scene recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>31b58ced31f22eab10bd3ee2d9174e7c14c27c01</td></tr><tr><td>umd_faces</td><td>UMD</td><td>The Do's and Don'ts for CNN-based Face Verification</td><td>The Do’s and Don’ts for CNN-Based Face Verification</td><td><a href="https://arxiv.org/pdf/1705.07426.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the do's and don'ts for cnn-based face verification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>71b7fc715e2f1bb24c0030af8d7e7b6e7cd128a6</td></tr><tr><td>viper</td><td>VIPeR</td><td>Evaluating Appearance Models for Recognition, Reacquisition, and Tracking</td><td>Evaluating Appearance Models for Recognition , Reacquisition , and Tracking</td><td><a href="http://pdfs.semanticscholar.org/6273/b3491e94ea4dd1ce42b791d77bdc96ee73a8.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=evaluating appearance models for recognition, reacquisition, and tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6273b3491e94ea4dd1ce42b791d77bdc96ee73a8</td></tr><tr><td>voc</td><td>VOC</td><td>The PASCAL Visual Object Classes (VOC) Challenge</td><td>The Pascal Visual Object Classes Challenge: A Retrospective</td><td><a href="http://homepages.inf.ed.ac.uk/ckiw/postscript/ijcv_voc14.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the pascal visual object classes (voc) challenge&sort=relevance" target="_blank">[s2]</a></td><td></td><td>abe9f3b91fd26fa1b50cd685c0d20debfb372f73</td></tr><tr><td>who_goes_there</td><td>WGT</td><td>Who Goes There? Approaches to Mapping Facial Appearance Diversity</td><td>Who goes there?: approaches to mapping facial appearance diversity</td><td><a href="http://doi.acm.org/10.1145/2996913.2996997" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=who goes there? approaches to mapping facial appearance diversity&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9b9bf5e623cb8af7407d2d2d857bc3f1b531c182</td></tr><tr><td>wildtrack</td><td>WildTrack</td><td>WILDTRACK: A Multi-camera HD Dataset for Dense Unscripted Pedestrian Detection</td><td>WILDTRACK : A Multi-camera HD Dataset for Dense Unscripted Pedestrian Detection</td><td><a href="https://pdfs.semanticscholar.org/fe1c/ec4e4995b8615855572374ae3efc94949105.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=wildtrack: a multi-camera hd dataset for dense unscripted pedestrian detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>77c81c13a110a341c140995bedb98101b9e84f7f</td></tr><tr><td>yale_faces</td><td>YaleFaces</td><td>Acquiring Linear Subspaces for Face Recognition under Variable Lighting</td><td>From Few to Many: Illumination Cone Models for Face Recognition under Variable Lighting and Pose</td><td><a href="http://pdfs.semanticscholar.org/97bb/c2b439a79d4dc0dc7199d71ed96ad5e3fd0e.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=acquiring linear subspaces for face recognition under variable lighting&sort=relevance" target="_blank">[s2]</a></td><td></td><td>18c72175ddbb7d5956d180b65a96005c100f6014</td></tr><tr><td>yfcc_100m</td><td>YFCC100M</td><td>YFCC100M: The New Data in Multimedia Research</td><td>The New Data and New Challenges in Multimedia Research</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=yfcc100m: the new data in multimedia research&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a6e695ddd07aad719001c0fc1129328452385949</td></tr><tr><td>youtube_makeup</td><td>YMU</td><td>Can Facial Cosmetics Affect the Matching Accuracy of Face Recognition Systems?</td><td>Automatic facial makeup detection with application in face recognition</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6612994" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=can facial cosmetics affect the matching accuracy of face recognition systems?&sort=relevance" target="_blank">[s2]</a></td><td>West Virginia University</td><td>fcc6fe6007c322641796cb8792718641856a22a7</td></tr></table></body></html> \ No newline at end of file
+discovering, annotating, and recognizing scene attributes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>833fa04463d90aab4a9fe2870d480f0b40df446e</td></tr><tr><td>10k_US_adult_faces</td><td>10K US Adult Faces</td><td>The intrinsic memorability of face images</td><td>The intrinsic memorability of face photographs.</td><td><a href="https://pdfs.semanticscholar.org/8b2d/d5c61b23ead5ae5508bb8ce808b5ea266730.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the intrinsic memorability of face images&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8b2dd5c61b23ead5ae5508bb8ce808b5ea266730</td></tr><tr><td>fei</td><td>FEI</td><td>Captura e Alinhamento de Imagens: Um Banco de Faces Brasileiro</td><td>A new ranking method for principal components analysis and its application to face image analysis</td><td><a href="https://doi.org/10.1016/j.imavis.2009.11.005" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=captura e alinhamento de imagens: um banco de faces brasileiro&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8b56e33f33e582f3e473dba573a16b598ed9bcdc</td></tr><tr><td>ilids_vid_reid</td><td>iLIDS-VID</td><td>Person Re-Identi cation by Video Ranking</td><td>Person Re-identification by Video Ranking</td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person re-identi cation by video ranking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>98bb029afe2a1239c3fdab517323066f0957b81b</td></tr><tr><td>sdu_vid</td><td>SDU-VID</td><td>Person reidentification by video ranking</td><td>Person Re-identification by Video Ranking</td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person reidentification by video ranking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>98bb029afe2a1239c3fdab517323066f0957b81b</td></tr><tr><td>who_goes_there</td><td>WGT</td><td>Who Goes There? Approaches to Mapping Facial Appearance Diversity</td><td>Who goes there?: approaches to mapping facial appearance diversity</td><td><a href="http://doi.acm.org/10.1145/2996913.2996997" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=who goes there? approaches to mapping facial appearance diversity&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9b9bf5e623cb8af7407d2d2d857bc3f1b531c182</td></tr><tr><td>bp4d_spontanous</td><td>BP4D-Spontanous</td><td>A high resolution spontaneous 3D dynamic facial expression database</td><td>A high-resolution spontaneous 3D dynamic facial expression database</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6553788" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a high resolution spontaneous 3d dynamic facial expression database&sort=relevance" target="_blank">[s2]</a></td><td>SUNY Binghamton</td><td>b91f54e1581fbbf60392364323d00a0cd43e493c</td></tr><tr><td>hda_plus</td><td>HDA+</td><td>A Multi-camera video data set for research on High-Definition surveillance</td><td>HDA dataset-DRAFT 1 A Multi-camera video data set for research on High-Definition surveillance</td><td><a href="http://pdfs.semanticscholar.org/bd88/bb2e4f351352d88ee7375af834360e223498.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a multi-camera video data set for research on high-definition surveillance&sort=relevance" target="_blank">[s2]</a></td><td></td><td>bd88bb2e4f351352d88ee7375af834360e223498</td></tr></table></body></html> \ No newline at end of file
diff --git a/scraper/s2-final-report.py b/scraper/s2-final-report.py
index 489e43df..58ac481f 100644
--- a/scraper/s2-final-report.py
+++ b/scraper/s2-final-report.py
@@ -9,6 +9,7 @@ import subprocess
from util import *
DIR_PUBLIC_CITATIONS = "../site/datasets/final"
+DIR_UNKNOWN_CITATIONS = "../site/datasets/unknown"
@click.command()
def s2_final_report():
@@ -23,15 +24,16 @@ def s2_final_report():
def process_paper(row, addresses):
aggregate_citations = {}
+ unknown_citations = {}
address = None
papers = []
print(row['paper_ids'])
for paper_id in row['paper_ids']:
- res = process_single_paper(row, paper_id, addresses, aggregate_citations)
+ res = process_single_paper(row, paper_id, addresses, aggregate_citations, unknown_citations)
if res:
papers.append(res)
- if res['address']:
- address = res['address']
+ if res['address']:
+ address = res['address']
if not len(papers):
return
with open('{}/{}.json'.format(DIR_PUBLIC_CITATIONS, row['key']), 'w') as f:
@@ -42,8 +44,14 @@ def process_paper(row, addresses):
'additional_papers': papers[1:],
'citations': [aggregate_citations[key] for key in aggregate_citations.keys()],
}, f)
+ with open('{}/{}.json'.format(DIR_UNKNOWN_CITATIONS, row['key']), 'w') as f:
+ json.dump({
+ 'id': papers[0]['paper_id'],
+ 'paper': papers[0],
+ 'citations': [unknown_citations[key] for key in unknown_citations.keys()],
+ }, f)
-def process_single_paper(row, paper_id, addresses, aggregate_citations):
+def process_single_paper(row, paper_id, addresses, aggregate_citations, unknown_citations):
res = {
'paper_id': '',
'key': '',
@@ -60,13 +68,6 @@ def process_single_paper(row, paper_id, addresses, aggregate_citations):
# 'citations_doi': 0,
}
- geocoded_citations = []
- unknown_citations = []
- empty_citations = []
- pdf_count = 0
- doi_count = 0
- address_count = 0
-
fn = file_path('papers', paper_id, 'paper.json')
with open(fn, 'r') as f:
@@ -103,14 +104,16 @@ def process_single_paper(row, paper_id, addresses, aggregate_citations):
citationId = cite['paperId']
if citationId in aggregate_citations:
continue
+ elif citationId in unknown_citations:
+ continue
seen_here = {}
citation = load_paper(citationId)
has_pdf = os.path.exists(file_path('pdf', citationId, 'paper.txt'))
has_doi = os.path.exists(file_path('doi', citationId, 'paper.doi'))
- if has_pdf:
- pdf_count += 1
- if has_doi:
- doi_count += 1
+ # if has_pdf:
+ # pdf_count += 1
+ # if has_doi:
+ # doi_count += 1
if citation is None or citation.data is None:
print("Citation missing! {}".format(cite['paperId']))
continue
@@ -120,7 +123,7 @@ def process_single_paper(row, paper_id, addresses, aggregate_citations):
institution = ''
address = None
for inst in sorted(institutions, key=operator.itemgetter(1)):
- address_count += 1
+ # address_count += 1
institution = inst[1]
next_address = addresses.findObject(institution)
if next_address and next_address['address'] not in seen_here:
@@ -142,21 +145,20 @@ def process_single_paper(row, paper_id, addresses, aggregate_citations):
address = next_address
geocoded_addresses.append(next_address)
if address:
- if citationId not in aggregate_citations:
- aggregate_citations[citationId] = {
- 'id': citationId,
- 'title': citation.title,
- 'addresses': geocoded_addresses,
- 'year': citation.year,
- 'pdf': citation.pdf_link,
- }
-
- # res['citation_count'] = len(data['citations'])
- # res['citations_geocoded'] = len(geocoded_citations)
- # res['citations_unknown'] = len(unknown_citations)
- # res['citations_empty'] = len(empty_citations)
- # res['citations_pdf'] = pdf_count
- # res['citations_doi'] = doi_count
+ aggregate_citations[citationId] = {
+ 'id': citationId,
+ 'title': citation.title,
+ 'addresses': geocoded_addresses,
+ 'year': citation.year,
+ 'pdf': citation.pdf_link,
+ }
+ else:
+ unknown_citations[citationId] = {
+ 'id': citationId,
+ 'title': citation.title,
+ 'year': citation.year,
+ 'pdf': citation.pdf_link,
+ }
return res
def load_ft_lookup():
@@ -179,6 +181,8 @@ def load_megapixels_lookup():
rec = {}
for index, key in enumerate(keys):
rec[key] = row[index]
+ if rec['paper_id'] == "":
+ continue
paper_key = rec['key']
if paper_key not in lookup:
rec['paper_ids'] = []
diff --git a/scraper/s2-geocode-server.py b/scraper/s2-geocode-server.py
new file mode 100644
index 00000000..0b1b0937
--- /dev/null
+++ b/scraper/s2-geocode-server.py
@@ -0,0 +1,68 @@
+#!python
+
+import os
+import sys
+import json
+import time
+import argparse
+from datetime import datetime
+from flask import Flask, request, render_template, jsonify
+
+from dotenv import load_dotenv
+load_dotenv()
+
+from util import *
+
+locations_worksheet = fetch_worksheet('paper_locations')
+
+app = Flask(__name__, static_url_path="/reports", static_folder=os.path.abspath("reports"))
+
+# static api route
+@app.route('/', methods=['GET'])
+def index():
+ return app.send_static_file('geocode_papers.html')
+
+@app.errorhandler(404)
+def page_not_found(e):
+ return app.send_static_file('geocode_papers.html')
+
+# route to get all the manually geocoded IDs (to dedupe)
+# route to add a geocoding for a paper
+
+@app.route('/api/institutions', methods=['GET'])
+def list_locations():
+ addresses = AddressBook()
+ return jsonify({
+ 'entities': addresses.entities,
+ 'lookup': addresses.lookup,
+ })
+
+@app.route('/api/papers', methods=['GET'])
+def list_papers():
+ lookup_keys, lines = fetch_google_sheet('citation_lookup')
+ paper_lookup = {}
+ for line in lines:
+ paper_lookup[line[0]] = line
+ return jsonify({
+ 'papers': paper_lookup,
+ })
+
+@app.route('/api/address', methods=['POST'])
+def add_address():
+ # id, title, institution_1, institution_2, institution_3, institution_4, notes
+ locations_worksheet.insert_row([
+ request.form['paper_id'],
+ request.form['title'],
+ request.form['institution_1'],
+ request.form['institution_2'],
+ request.form['institution_3'],
+ request.form['institution_4'],
+ request.form['notes'],
+ ])
+ return jsonify({
+ 'status': 'ok'
+ })
+
+if __name__=="__main__":
+ app.run("0.0.0.0", debug=False)
+
diff --git a/scraper/s2-papers.py b/scraper/s2-papers.py
index 9a584e29..744454b7 100644
--- a/scraper/s2-papers.py
+++ b/scraper/s2-papers.py
@@ -23,8 +23,16 @@ def fetch_papers():
no_location_rows = []
nonmatching_rows = []
for line in lines:
- key, name, title, paper_id, is_unknown = line
+ # key, name, title, paper_id, is_unknown, notes = line
+ key = line[0]
+ name = line[1]
+ title = line[2]
+ paper_id = line[3]
+ if paper_id == '':
+ continue
paper = fetch_paper(s2, paper_id)
+ if paper is None:
+ continue
db_paper = load_paper(paper_id)
pdf_link = db_paper.pdf_link if db_paper else ""
diff --git a/scraper/util.py b/scraper/util.py
index 2d7c2ccb..9b47510a 100644
--- a/scraper/util.py
+++ b/scraper/util.py
@@ -331,11 +331,11 @@ def fetch_paper(s2, paper_id):
print(paper_id)
paper = s2.paper(paper_id)
if paper is None:
- print("Got none paper??")
+ print("Paper not found: {}".format(paper_id))
# time.sleep(random.randint(1, 2))
paper = s2.paper(paper_id)
if paper is None:
- print("Paper not found")
+ # print("Paper not found")
return None
write_json(paper_fn, paper)
# time.sleep(random.randint(1, 2))
@@ -343,7 +343,8 @@ def fetch_paper(s2, paper_id):
def fetch_spreadsheet():
scope = ['https://spreadsheets.google.com/feeds','https://www.googleapis.com/auth/drive']
- credentials = ServiceAccountCredentials.from_json_keyfile_name('./.creds/Megapixels-ef28f91112a9.json', scope)
+ path = os.path.dirname(os.path.abspath(__file__))
+ credentials = ServiceAccountCredentials.from_json_keyfile_name(os.path.join(path, '.creds/Megapixels-ef28f91112a9.json'), scope)
docid = "1denb7TjYsN9igHyvYah7fQ0daABW32Z30lwV7QrDJQc"
client = gspread.authorize(credentials)
spreadsheet = client.open_by_key(docid)