summaryrefslogtreecommitdiff
path: root/scraper/reports/paper_title_report_nonmatching.html
diff options
context:
space:
mode:
Diffstat (limited to 'scraper/reports/paper_title_report_nonmatching.html')
-rw-r--r--scraper/reports/paper_title_report_nonmatching.html8
1 files changed, 4 insertions, 4 deletions
diff --git a/scraper/reports/paper_title_report_nonmatching.html b/scraper/reports/paper_title_report_nonmatching.html
index a9651307..79bf81ab 100644
--- a/scraper/reports/paper_title_report_nonmatching.html
+++ b/scraper/reports/paper_title_report_nonmatching.html
@@ -1,9 +1,9 @@
-<!doctype html><html><head><meta charset='utf-8'><title>Paper Titles that do not match</title><link rel='stylesheet' href='reports.css'></head><body><h2>Paper Titles that do not match</h2><table border='1' cellpadding='3' cellspacing='3'><th>key</th><th>name</th><th>our title</th><th>found title</th><th></th><th></th><th>address</th><th>s2 id</th><tr><td>10k_US_adult_faces</td><td>10K US Adult Faces</td><td>The intrinsic memorability of face images</td><td>The intrinsic memorability of face photographs.</td><td><a href="https://pdfs.semanticscholar.org/8b2d/d5c61b23ead5ae5508bb8ce808b5ea266730.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the intrinsic memorability of face images&sort=relevance">[s2]</a></td><td></td><td>8b2dd5c61b23ead5ae5508bb8ce808b5ea266730</td></tr><tr><td>3dddb_unconstrained</td><td>3D Dynamic</td><td>A 3D Dynamic Database for Unconstrained Face Recognition</td><td>Labeled Faces in the Wild : A Database for Studying Face Recognition in Unconstrained Environments</td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a 3d dynamic database for unconstrained face recognition&sort=relevance">[s2]</a></td><td></td><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td></tr><tr><td>afad</td><td>AFAD</td><td>Ordinal Regression with a Multiple Output CNN for Age Estimation</td><td>Ordinal Regression with Multiple Output CNN for Age Estimation</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.532">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=ordinal regression with a multiple output cnn for age estimation&sort=relevance">[s2]</a></td><td></td><td>6618cff7f2ed440a0d2fa9e74ad5469df5cdbe4c</td></tr><tr><td>afew_va</td><td>AFEW-VA</td><td>AFEW-VA database for valence and arousal estimation in-the-wild</td><td>Collecting Large, Richly Annotated Facial-Expression Databases from Movies</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6200254">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=afew-va database for valence and arousal estimation in-the-wild&sort=relevance">[s2]</a></td><td>Australian National University</td><td>b1f4423c227fa37b9680787be38857069247a307</td></tr><tr><td>affectnet</td><td>AffectNet</td><td>AffectNet: A New Database for Facial Expression, Valence, and Arousal Computation in the Wild</td><td>Skybiometry and AffectNet on Facial Emotion Recognition Using Supervised Machine Learning Algorithms</td><td><a href="http://dl.acm.org/citation.cfm?id=3232665">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=affectnet: a new database for facial expression, valence, and arousal computation in the wild&sort=relevance">[s2]</a></td><td></td><td>f152b6ee251cca940dd853c54e6a7b78fbc6b235</td></tr><tr><td>afw</td><td>AFW</td><td>Face detection, pose estimation and landmark localization in the wild</td><td>Face detection, pose estimation, and landmark localization in the wild</td><td><a href="http://vision.ics.uci.edu/papers/ZhuR_CVPR_2012/ZhuR_CVPR_2012.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=face detection, pose estimation and landmark localization in the wild&sort=relevance">[s2]</a></td><td>University of California, Irvine</td><td>0e986f51fe45b00633de9fd0c94d082d2be51406</td></tr><tr><td>alert_airport</td><td>ALERT Airport</td><td>A Systematic Evaluation and Benchmark for Person Re-Identification: Features, Metrics, and Datasets</td><td>A Systematic Evaluation and Benchmark for Person Re-Identification: Features, Metrics, and Datasets.</td><td><a href="https://arxiv.org/pdf/1605.09653.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a systematic evaluation and benchmark for person re-identification: features, metrics, and datasets&sort=relevance">[s2]</a></td><td></td><td>6403117f9c005ae81f1e8e6d1302f4a045e3d99d</td></tr><tr><td>am_fed</td><td>AM-FED</td><td>Affectiva MIT Facial Expression Dataset (AM-FED): Naturalistic and Spontaneous Facial Expressions Collected “In the Wild”</td><td>Affectiva-MIT Facial Expression Dataset (AM-FED): Naturalistic and Spontaneous Facial Expressions Collected "In-the-Wild"</td><td><a href="http://pdfs.semanticscholar.org/5d06/437656dd94616d7d87260d5eb77513ded30f.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=affectiva mit facial expression dataset (am-fed): naturalistic and spontaneous facial expressions collected “in the wild”&sort=relevance">[s2]</a></td><td></td><td>47aeb3b82f54b5ae8142b4bdda7b614433e69b9a</td></tr><tr><td>ar_facedb</td><td>AR Face</td><td>The AR Face Database</td><td>Labeled Faces in the Wild : A Database for Studying Face Recognition in Unconstrained Environments</td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the ar face database&sort=relevance">[s2]</a></td><td></td><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td></tr><tr><td>bjut_3d</td><td>BJUT-3D</td><td>The BJUT-3D Large-Scale Chinese Face Database</td><td>A novel face recognition method based on 3D face model</td><td><a href="https://doi.org/10.1109/ROBIO.2007.4522202">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the bjut-3d large-scale chinese face database&sort=relevance">[s2]</a></td><td></td><td>1ed1a49534ad8dd00f81939449f6389cfbc25321</td></tr><tr><td>bp4d_spontanous</td><td>BP4D-Spontanous</td><td>A high resolution spontaneous 3D dynamic facial expression database</td><td>A high-resolution spontaneous 3D dynamic facial expression database</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6553788">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a high resolution spontaneous 3d dynamic facial expression database&sort=relevance">[s2]</a></td><td>SUNY Binghamton</td><td>b91f54e1581fbbf60392364323d00a0cd43e493c</td></tr><tr><td>brainwash</td><td>Brainwash</td><td>Brainwash dataset</td><td>Brainwash: A Data System for Feature Engineering</td><td><a href="http://pdfs.semanticscholar.org/ae44/8015b2ff2bd3b8a5c9a3266f954f5af9ffa9.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=brainwash dataset&sort=relevance">[s2]</a></td><td></td><td>214c966d1f9c2a4b66f4535d9a0d4078e63a5867</td></tr><tr><td>caltech_pedestrians</td><td>Caltech Pedestrians</td><td>Pedestrian Detection: A Benchmark</td><td>Pedestrian Detection: An Evaluation of the State of the Art</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5975165">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pedestrian detection: a benchmark&sort=relevance">[s2]</a></td><td></td><td>f72f6a45ee240cc99296a287ff725aaa7e7ebb35</td></tr><tr><td>camel</td><td>CAMEL</td><td>CAMEL Dataset for Visual and Thermal Infrared Multiple Object Detection and Tracking</td><td>Application of Object Based Classification and High Resolution Satellite Imagery for Savanna Ecosystem Analysis</td><td><a href="http://pdfs.semanticscholar.org/5801/690199c1917fa58c35c3dead177c0b8f9f2d.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=camel dataset for visual and thermal infrared multiple object detection and tracking&sort=relevance">[s2]</a></td><td></td><td>5801690199c1917fa58c35c3dead177c0b8f9f2d</td></tr><tr><td>casablanca</td><td>Casablanca</td><td>Context-aware {CNNs} for person head detection</td><td>Context-Aware CNNs for Person Head Detection</td><td><a href="http://arxiv.org/pdf/1511.07917v1.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=context-aware {cnns} for person head detection&sort=relevance">[s2]</a></td><td></td><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td></tr><tr><td>celeba_plus</td><td>CelebFaces+</td><td>Deep Learning Face Representation from Predicting 10,000 Classes</td><td>Learning Deep Representation for Imbalanced Classification</td><td><a href="http://personal.ie.cuhk.edu.hk/~ccloy/files/cvpr_2016_imbalanced.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=deep learning face representation from predicting 10,000 classes&sort=relevance">[s2]</a></td><td>Shenzhen Institutes of Advanced Technology</td><td>69a68f9cf874c69e2232f47808016c2736b90c35</td></tr><tr><td>cfd</td><td>CFD</td><td>The Chicago face database: A free stimulus set of faces and norming data</td><td>The Chicago face database: A free stimulus set of faces and norming data.</td><td><a href="http://pdfs.semanticscholar.org/4df3/143922bcdf7db78eb91e6b5359d6ada004d2.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the chicago face database: a free stimulus set of faces and norming data&sort=relevance">[s2]</a></td><td></td><td>4df3143922bcdf7db78eb91e6b5359d6ada004d2</td></tr><tr><td>cityscapes</td><td>Cityscapes</td><td>The Cityscapes Dataset</td><td>The Cityscapes Dataset for Semantic Urban Scene Understanding</td><td><a href="https://arxiv.org/pdf/1604.01685.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the cityscapes dataset&sort=relevance">[s2]</a></td><td></td><td>32cde90437ab5a70cf003ea36f66f2de0e24b3ab</td></tr><tr><td>cmu_pie</td><td>CMU PIE</td><td>The CMU Pose, Illumination, and Expression Database</td><td>The CMU Pose, Illumination, and Expression (PIE) Database</td><td><a href="http://pdfs.semanticscholar.org/4d42/3acc78273b75134e2afd1777ba6d3a398973.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the cmu pose, illumination, and expression database&sort=relevance">[s2]</a></td><td></td><td>4d423acc78273b75134e2afd1777ba6d3a398973</td></tr><tr><td>coco</td><td>COCO</td><td>Microsoft COCO: Common Objects in Context</td><td>Microsoft COCO Captions: Data Collection and Evaluation Server</td><td><a href="http://pdfs.semanticscholar.org/ba95/81c33a7eebe87c50e61763e4c8d1723538f2.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=microsoft coco: common objects in context&sort=relevance">[s2]</a></td><td></td><td>696ca58d93f6404fea0fc75c62d1d7b378f47628</td></tr><tr><td>columbia_gaze</td><td>Columbia Gaze</td><td>Gaze Locking: Passive Eye Contact Detection for Human–Object Interaction</td><td>A 3D Morphable Eye Region Model for Gaze Estimation</td><td><a href="https://pdfs.semanticscholar.org/0d43/3b9435b874a1eea6d7999e86986c910fa285.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=gaze locking: passive eye contact detection for human–object interaction&sort=relevance">[s2]</a></td><td>Carnegie Mellon University</td><td>c34532fe6bfbd1e6df477c9ffdbb043b77e7804d</td></tr><tr><td>cvc_01_barcelona</td><td>CVC-01</td><td>Adaptive Image Sampling and Windows Classification for On-board Pedestrian Detection</td><td>Adaptive Image Sampling and Windows Classification for On – board Pedestrian Detection</td><td><a href="http://pdfs.semanticscholar.org/57fe/081950f21ca03b5b375ae3e84b399c015861.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=adaptive image sampling and windows classification for on-board pedestrian detection&sort=relevance">[s2]</a></td><td></td><td>57fe081950f21ca03b5b375ae3e84b399c015861</td></tr><tr><td>d3dfacs</td><td>D3DFACS</td><td>A FACS Valid 3D Dynamic Action Unit database with Applications to 3D Dynamic Morphable Facial Modelling</td><td>A FACS valid 3D dynamic action unit database with applications to 3D dynamic morphable facial modeling</td><td><a href="http://www.cs.bath.ac.uk/~dpc/D3DFACS/ICCV_final_2011.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a facs valid 3d dynamic action unit database with applications to 3d dynamic morphable facial modelling&sort=relevance">[s2]</a></td><td>Jacobs University</td><td>070de852bc6eb275d7ca3a9cdde8f6be8795d1a3</td></tr><tr><td>dartmouth_children</td><td>Dartmouth Children</td><td>The Dartmouth Database of Children's Faces: Acquisition and validation of a new face stimulus set</td><td>The Dartmouth Database of Children’s Faces: Acquisition and Validation of a New Face Stimulus Set</td><td><a href="http://pdfs.semanticscholar.org/4e6e/e936eb50dd032f7138702fa39b7c18ee8907.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the dartmouth database of children's faces: acquisition and validation of a new face stimulus set&sort=relevance">[s2]</a></td><td></td><td>4e6ee936eb50dd032f7138702fa39b7c18ee8907</td></tr><tr><td>deep_fashion</td><td>DeepFashion</td><td>DeepFashion: Powering Robust Clothes Recognition and Retrieval with Rich Annotations</td><td>Fashion Landmark Detection in the Wild</td><td><a href="http://pdfs.semanticscholar.org/d8ca/e259c1c5bba0c096f480dc7322bbaebfac1a.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=deepfashion: powering robust clothes recognition and retrieval with rich annotations&sort=relevance">[s2]</a></td><td>Chinese University of Hong Kong</td><td>4fefd1bc8dc4e0ab37ee3324ddfa43ad9d6a04a7</td></tr><tr><td>disfa</td><td>DISFA</td><td>DISFA: A Spontaneous Facial Action Intensity Database</td><td>Extended DISFA Dataset: Investigating Posed and Spontaneous Facial Expressions</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7789672">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=disfa: a spontaneous facial action intensity database&sort=relevance">[s2]</a></td><td></td><td>a5acda0e8c0937bfed013e6382da127103e41395</td></tr><tr><td>expw</td><td>ExpW</td><td>Learning Social Relation Traits from Face Images</td><td>From Facial Expression Recognition to Interpersonal Relation Prediction</td><td><a href="http://arxiv.org/abs/1609.06426">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning social relation traits from face images&sort=relevance">[s2]</a></td><td></td><td>22f656d0f8426c84a33a267977f511f127bfd7f3</td></tr><tr><td>face_research_lab</td><td>Face Research Lab London</td><td>Face Research Lab London Set. figshare</td><td>Anxiety promotes memory for mood-congruent faces but does not alter loss aversion.</td><td><a href="http://pdfs.semanticscholar.org/c652/6dd3060d63a6c90e8b7ff340383c4e0e0dd8.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=face research lab london set. figshare&sort=relevance">[s2]</a></td><td>University College London</td><td>c6526dd3060d63a6c90e8b7ff340383c4e0e0dd8</td></tr><tr><td>face_tracer</td><td>FaceTracer</td><td>FaceTracer: A Search Engine for Large Collections of Images with Faces</td><td>Face swapping: automatically replacing faces in photographs</td><td><a href="https://classes.cs.uoregon.edu/16F/cis607photo/faces.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=facetracer: a search engine for large collections of images with faces&sort=relevance">[s2]</a></td><td>Columbia University</td><td>670637d0303a863c1548d5b19f705860a23e285c</td></tr><tr><td>fddb</td><td>FDDB</td><td>FDDB: A Benchmark for Face Detection in Unconstrained Settings</td><td>A Benchmark for Face Detection in Unconstrained Settings</td><td><a href="http://pdfs.semanticscholar.org/75da/1df4ed319926c544eefe17ec8d720feef8c0.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=fddb: a benchmark for face detection in unconstrained settings&sort=relevance">[s2]</a></td><td>University of Massachusetts</td><td>75da1df4ed319926c544eefe17ec8d720feef8c0</td></tr><tr><td>fei</td><td>FEI</td><td>Captura e Alinhamento de Imagens: Um Banco de Faces Brasileiro</td><td>Generalização cartográfica automatizada para um banco de dados cadastral</td><td><a href="https://pdfs.semanticscholar.org/b6b1/b0632eb9d4ab1427278f5e5c46f97753c73d.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=captura e alinhamento de imagens: um banco de faces brasileiro&sort=relevance">[s2]</a></td><td></td><td>b6b1b0632eb9d4ab1427278f5e5c46f97753c73d</td></tr><tr><td>fiw_300</td><td>300-W</td><td>300 faces In-the-wild challenge: Database and results</td><td>A Semi-automatic Methodology for Facial Landmark Annotation</td><td><a href="http://ibug.doc.ic.ac.uk/media/uploads/documents/sagonas_cvpr_2013_amfg_w.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=300 faces in-the-wild challenge: database and results&sort=relevance">[s2]</a></td><td>University of Twente</td><td>013909077ad843eb6df7a3e8e290cfd5575999d2</td></tr><tr><td>fiw_300</td><td>300-W</td><td>300 Faces in-the-Wild Challenge: The first facial landmark localization Challenge</td><td>A Semi-automatic Methodology for Facial Landmark Annotation</td><td><a href="http://ibug.doc.ic.ac.uk/media/uploads/documents/sagonas_cvpr_2013_amfg_w.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=300 faces in-the-wild challenge: the first facial landmark localization challenge&sort=relevance">[s2]</a></td><td>University of Twente</td><td>013909077ad843eb6df7a3e8e290cfd5575999d2</td></tr><tr><td>frav3d</td><td>FRAV3D</td><td>MULTIMODAL 2D, 2.5D & 3D FACE VERIFICATION</td><td>2D and 3D face recognition: A survey</td><td><a href="http://pdfs.semanticscholar.org/2f5d/44dc3e1b5955942133ff872ebd31716ec604.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multimodal 2d, 2.5d & 3d face verification&sort=relevance">[s2]</a></td><td></td><td>2f5d44dc3e1b5955942133ff872ebd31716ec604</td></tr><tr><td>frgc</td><td>FRGC</td><td>Overview of the Face Recognition Grand Challenge</td><td>Overview of the face recognition grand challenge</td><td><a href="http://www3.nd.edu/~kwb/PhillipsEtAlCVPR_2005.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=overview of the face recognition grand challenge&sort=relevance">[s2]</a></td><td></td><td>18ae7c9a4bbc832b8b14bc4122070d7939f5e00e</td></tr><tr><td>gallagher</td><td>Gallagher</td><td>Clothing Cosegmentation for Recognizing People</td><td>Clothing Cosegmentation for Shopping Images With Cluttered Background</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7423747">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clothing cosegmentation for recognizing people&sort=relevance">[s2]</a></td><td></td><td>6dbe8e5121c534339d6e41f8683e85f87e6abf81</td></tr><tr><td>gavab_db</td><td>Gavab</td><td>GavabDB: a 3D face database</td><td>Expression invariant 3D face recognition with a Morphable Model</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4813376">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=gavabdb: a 3d face database&sort=relevance">[s2]</a></td><td></td><td>42505464808dfb446f521fc6ff2cfeffd4d68ff1</td></tr><tr><td>georgia_tech_face_database</td><td>Georgia Tech Face</td><td>Maximum likelihood training of the embedded HMM for face detection and recognition</td><td>MAXIMUM LIKELIHOOD TRAINING OF THE EMBEDDED HMM FOR FACE DETECTION AND RECOGNITION Ara V. Ne an and Monson H. Hayes III Center for Signal and Image Processing School of Electrical and Computer Engineering</td><td><a href="http://pdfs.semanticscholar.org/3dc3/f0b64ef80f573e3a5f96e456e52ee980b877.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=maximum likelihood training of the embedded hmm for face detection and recognition&sort=relevance">[s2]</a></td><td></td><td>3dc3f0b64ef80f573e3a5f96e456e52ee980b877</td></tr><tr><td>graz</td><td>Graz Pedestrian</td><td>Generic Object Recognition with Boosting</td><td>Object recognition using segmentation for feature detection</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=generic object recognition with boosting&sort=relevance">[s2]</a></td><td></td><td>12ad3b5bbbf407f8e54ea692c07633d1a867c566</td></tr><tr><td>graz</td><td>Graz Pedestrian</td><td>Weak Hypotheses and Boosting for Generic Object Detection and Recognition</td><td>Object recognition using segmentation for feature detection</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=weak hypotheses and boosting for generic object detection and recognition&sort=relevance">[s2]</a></td><td></td><td>12ad3b5bbbf407f8e54ea692c07633d1a867c566</td></tr><tr><td>hda_plus</td><td>HDA+</td><td>The HDA+ data set for research on fully automated re-identification systems</td><td>HDA dataset-DRAFT 1 A Multi-camera video data set for research on High-Definition surveillance</td><td><a href="http://pdfs.semanticscholar.org/bd88/bb2e4f351352d88ee7375af834360e223498.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the hda+ data set for research on fully automated re-identification systems&sort=relevance">[s2]</a></td><td></td><td>bd88bb2e4f351352d88ee7375af834360e223498</td></tr><tr><td>hda_plus</td><td>HDA+</td><td>A Multi-camera video data set for research on High-Definition surveillance</td><td>HDA dataset-DRAFT 1 A Multi-camera video data set for research on High-Definition surveillance</td><td><a href="http://pdfs.semanticscholar.org/bd88/bb2e4f351352d88ee7375af834360e223498.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a multi-camera video data set for research on high-definition surveillance&sort=relevance">[s2]</a></td><td></td><td>bd88bb2e4f351352d88ee7375af834360e223498</td></tr><tr><td>hi4d_adsip</td><td>Hi4D-ADSIP</td><td>Hi4D-ADSIP 3-D dynamic facial articulation database</td><td>High-resolution comprehensive 3-D dynamic database for facial articulation analysis</td><td><a href="http://www.researchgate.net/profile/Wei_Quan3/publication/221430048_High-resolution_comprehensive_3-D_dynamic_database_for_facial_articulation_analysis/links/0deec534309495805d000000.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=hi4d-adsip 3-d dynamic facial articulation database&sort=relevance">[s2]</a></td><td></td><td>24830e3979d4ed01b9fd0feebf4a8fd22e0c35fd</td></tr><tr><td>hrt_transgender</td><td>HRT Transgender</td><td>Is the Eye Region More Reliable Than the Face? A Preliminary Study of Face-based Recognition on a Transgender Dataset</td><td>Face recognition: A literature survey</td><td><a href="http://pdfs.semanticscholar.org/2831/2c3a47c1be3a67365700744d3d6665b86f22.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=is the eye region more reliable than the face? a preliminary study of face-based recognition on a transgender dataset&sort=relevance">[s2]</a></td><td></td><td>28312c3a47c1be3a67365700744d3d6665b86f22</td></tr><tr><td>hrt_transgender</td><td>HRT Transgender</td><td>Investigating the Periocular-Based Face Recognition Across Gender Transformation</td><td>Face recognition: A literature survey</td><td><a href="http://pdfs.semanticscholar.org/2831/2c3a47c1be3a67365700744d3d6665b86f22.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=investigating the periocular-based face recognition across gender transformation&sort=relevance">[s2]</a></td><td></td><td>28312c3a47c1be3a67365700744d3d6665b86f22</td></tr><tr><td>hrt_transgender</td><td>HRT Transgender</td><td>Face recognition across gender transformation using SVM Classifier</td><td>Face recognition: A literature survey</td><td><a href="http://pdfs.semanticscholar.org/2831/2c3a47c1be3a67365700744d3d6665b86f22.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=face recognition across gender transformation using svm classifier&sort=relevance">[s2]</a></td><td></td><td>28312c3a47c1be3a67365700744d3d6665b86f22</td></tr><tr><td>ifad</td><td>IFAD</td><td>Indian Face Age Database: A Database for Face Recognition with Age Variation</td><td>Indian Face Age Database : A Database for Face Recognition with Age Variation</td><td><a href="https://pdfs.semanticscholar.org/55c4/0cbcf49a0225e72d911d762c27bb1c2d14aa.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=indian face age database: a database for face recognition with age variation&sort=relevance">[s2]</a></td><td></td><td>55c40cbcf49a0225e72d911d762c27bb1c2d14aa</td></tr><tr><td>ifdb</td><td>IFDB</td><td>Iranian Face Database with age, pose and expression</td><td>Iranian Face Database and Evaluation with a New Detection Algorithm</td><td><a href="http://pdfs.semanticscholar.org/066d/71fcd997033dce4ca58df924397dfe0b5fd1.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=iranian face database with age, pose and expression&sort=relevance">[s2]</a></td><td></td><td>066d71fcd997033dce4ca58df924397dfe0b5fd1</td></tr><tr><td>ijb_c</td><td>IJB-C</td><td>IARPA Janus Benchmark C</td><td>IARPA Janus Benchmark - C: Face Dataset and Protocol</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411217">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=iarpa janus benchmark c&sort=relevance">[s2]</a></td><td></td><td>57178b36c21fd7f4529ac6748614bb3374714e91</td></tr><tr><td>ilids_mcts</td><td></td><td>Imagery Library for Intelligent Detection Systems:
+<!doctype html><html><head><meta charset='utf-8'><title>Paper Titles that do not match</title><link rel='stylesheet' href='reports.css'></head><body><h2>Paper Titles that do not match</h2><table border='1' cellpadding='3' cellspacing='3'><th>key</th><th>name</th><th>our title</th><th>found title</th><th></th><th></th><th>address</th><th>s2 id</th><tr><td>10k_US_adult_faces</td><td>10K US Adult Faces</td><td>The intrinsic memorability of face images</td><td>The intrinsic memorability of face photographs.</td><td><a href="https://pdfs.semanticscholar.org/8b2d/d5c61b23ead5ae5508bb8ce808b5ea266730.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the intrinsic memorability of face images&sort=relevance">[s2]</a></td><td></td><td>8b2dd5c61b23ead5ae5508bb8ce808b5ea266730</td></tr><tr><td>3dddb_unconstrained</td><td>3D Dynamic</td><td>A 3D Dynamic Database for Unconstrained Face Recognition</td><td>Labeled Faces in the Wild : A Database for Studying Face Recognition in Unconstrained Environments</td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a 3d dynamic database for unconstrained face recognition&sort=relevance">[s2]</a></td><td></td><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td></tr><tr><td>afad</td><td>AFAD</td><td>Ordinal Regression with a Multiple Output CNN for Age Estimation</td><td>Ordinal Regression with Multiple Output CNN for Age Estimation</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.532">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=ordinal regression with a multiple output cnn for age estimation&sort=relevance">[s2]</a></td><td></td><td>6618cff7f2ed440a0d2fa9e74ad5469df5cdbe4c</td></tr><tr><td>afew_va</td><td>AFEW-VA</td><td>AFEW-VA database for valence and arousal estimation in-the-wild</td><td>Collecting Large, Richly Annotated Facial-Expression Databases from Movies</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6200254">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=afew-va database for valence and arousal estimation in-the-wild&sort=relevance">[s2]</a></td><td></td><td>b1f4423c227fa37b9680787be38857069247a307</td></tr><tr><td>affectnet</td><td>AffectNet</td><td>AffectNet: A New Database for Facial Expression, Valence, and Arousal Computation in the Wild</td><td>Skybiometry and AffectNet on Facial Emotion Recognition Using Supervised Machine Learning Algorithms</td><td><a href="http://dl.acm.org/citation.cfm?id=3232665">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=affectnet: a new database for facial expression, valence, and arousal computation in the wild&sort=relevance">[s2]</a></td><td></td><td>f152b6ee251cca940dd853c54e6a7b78fbc6b235</td></tr><tr><td>afw</td><td>AFW</td><td>Face detection, pose estimation and landmark localization in the wild</td><td>Face detection, pose estimation, and landmark localization in the wild</td><td><a href="http://vision.ics.uci.edu/papers/ZhuR_CVPR_2012/ZhuR_CVPR_2012.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=face detection, pose estimation and landmark localization in the wild&sort=relevance">[s2]</a></td><td></td><td>0e986f51fe45b00633de9fd0c94d082d2be51406</td></tr><tr><td>alert_airport</td><td>ALERT Airport</td><td>A Systematic Evaluation and Benchmark for Person Re-Identification: Features, Metrics, and Datasets</td><td>A Systematic Evaluation and Benchmark for Person Re-Identification: Features, Metrics, and Datasets.</td><td><a href="https://arxiv.org/pdf/1605.09653.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a systematic evaluation and benchmark for person re-identification: features, metrics, and datasets&sort=relevance">[s2]</a></td><td></td><td>6403117f9c005ae81f1e8e6d1302f4a045e3d99d</td></tr><tr><td>am_fed</td><td>AM-FED</td><td>Affectiva MIT Facial Expression Dataset (AM-FED): Naturalistic and Spontaneous Facial Expressions Collected “In the Wild”</td><td>Affectiva-MIT Facial Expression Dataset (AM-FED): Naturalistic and Spontaneous Facial Expressions Collected "In-the-Wild"</td><td><a href="http://pdfs.semanticscholar.org/5d06/437656dd94616d7d87260d5eb77513ded30f.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=affectiva mit facial expression dataset (am-fed): naturalistic and spontaneous facial expressions collected “in the wild”&sort=relevance">[s2]</a></td><td></td><td>47aeb3b82f54b5ae8142b4bdda7b614433e69b9a</td></tr><tr><td>ar_facedb</td><td>AR Face</td><td>The AR Face Database</td><td>Labeled Faces in the Wild : A Database for Studying Face Recognition in Unconstrained Environments</td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the ar face database&sort=relevance">[s2]</a></td><td></td><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td></tr><tr><td>bjut_3d</td><td>BJUT-3D</td><td>The BJUT-3D Large-Scale Chinese Face Database</td><td>A novel face recognition method based on 3D face model</td><td><a href="https://doi.org/10.1109/ROBIO.2007.4522202">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the bjut-3d large-scale chinese face database&sort=relevance">[s2]</a></td><td></td><td>1ed1a49534ad8dd00f81939449f6389cfbc25321</td></tr><tr><td>bp4d_spontanous</td><td>BP4D-Spontanous</td><td>A high resolution spontaneous 3D dynamic facial expression database</td><td>A high-resolution spontaneous 3D dynamic facial expression database</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6553788">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a high resolution spontaneous 3d dynamic facial expression database&sort=relevance">[s2]</a></td><td></td><td>b91f54e1581fbbf60392364323d00a0cd43e493c</td></tr><tr><td>brainwash</td><td>Brainwash</td><td>Brainwash dataset</td><td>Brainwash: A Data System for Feature Engineering</td><td><a href="http://pdfs.semanticscholar.org/ae44/8015b2ff2bd3b8a5c9a3266f954f5af9ffa9.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=brainwash dataset&sort=relevance">[s2]</a></td><td></td><td>214c966d1f9c2a4b66f4535d9a0d4078e63a5867</td></tr><tr><td>caltech_pedestrians</td><td>Caltech Pedestrians</td><td>Pedestrian Detection: A Benchmark</td><td>Pedestrian Detection: An Evaluation of the State of the Art</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5975165">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pedestrian detection: a benchmark&sort=relevance">[s2]</a></td><td></td><td>f72f6a45ee240cc99296a287ff725aaa7e7ebb35</td></tr><tr><td>camel</td><td>CAMEL</td><td>CAMEL Dataset for Visual and Thermal Infrared Multiple Object Detection and Tracking</td><td>Application of Object Based Classification and High Resolution Satellite Imagery for Savanna Ecosystem Analysis</td><td><a href="http://pdfs.semanticscholar.org/5801/690199c1917fa58c35c3dead177c0b8f9f2d.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=camel dataset for visual and thermal infrared multiple object detection and tracking&sort=relevance">[s2]</a></td><td></td><td>5801690199c1917fa58c35c3dead177c0b8f9f2d</td></tr><tr><td>casablanca</td><td>Casablanca</td><td>Context-aware {CNNs} for person head detection</td><td>Context-Aware CNNs for Person Head Detection</td><td><a href="http://arxiv.org/pdf/1511.07917v1.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=context-aware {cnns} for person head detection&sort=relevance">[s2]</a></td><td></td><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td></tr><tr><td>celeba_plus</td><td>CelebFaces+</td><td>Deep Learning Face Representation from Predicting 10,000 Classes</td><td>Learning Deep Representation for Imbalanced Classification</td><td><a href="http://personal.ie.cuhk.edu.hk/~ccloy/files/cvpr_2016_imbalanced.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=deep learning face representation from predicting 10,000 classes&sort=relevance">[s2]</a></td><td></td><td>69a68f9cf874c69e2232f47808016c2736b90c35</td></tr><tr><td>cfd</td><td>CFD</td><td>The Chicago face database: A free stimulus set of faces and norming data</td><td>The Chicago face database: A free stimulus set of faces and norming data.</td><td><a href="http://pdfs.semanticscholar.org/4df3/143922bcdf7db78eb91e6b5359d6ada004d2.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the chicago face database: a free stimulus set of faces and norming data&sort=relevance">[s2]</a></td><td></td><td>4df3143922bcdf7db78eb91e6b5359d6ada004d2</td></tr><tr><td>cityscapes</td><td>Cityscapes</td><td>The Cityscapes Dataset</td><td>The Cityscapes Dataset for Semantic Urban Scene Understanding</td><td><a href="https://arxiv.org/pdf/1604.01685.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the cityscapes dataset&sort=relevance">[s2]</a></td><td></td><td>32cde90437ab5a70cf003ea36f66f2de0e24b3ab</td></tr><tr><td>cmu_pie</td><td>CMU PIE</td><td>The CMU Pose, Illumination, and Expression Database</td><td>The CMU Pose, Illumination, and Expression (PIE) Database</td><td><a href="http://pdfs.semanticscholar.org/4d42/3acc78273b75134e2afd1777ba6d3a398973.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the cmu pose, illumination, and expression database&sort=relevance">[s2]</a></td><td></td><td>4d423acc78273b75134e2afd1777ba6d3a398973</td></tr><tr><td>coco</td><td>COCO</td><td>Microsoft COCO: Common Objects in Context</td><td>Microsoft COCO Captions: Data Collection and Evaluation Server</td><td><a href="http://pdfs.semanticscholar.org/ba95/81c33a7eebe87c50e61763e4c8d1723538f2.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=microsoft coco: common objects in context&sort=relevance">[s2]</a></td><td></td><td>696ca58d93f6404fea0fc75c62d1d7b378f47628</td></tr><tr><td>columbia_gaze</td><td>Columbia Gaze</td><td>Gaze Locking: Passive Eye Contact Detection for Human–Object Interaction</td><td>A 3D Morphable Eye Region Model for Gaze Estimation</td><td><a href="https://pdfs.semanticscholar.org/0d43/3b9435b874a1eea6d7999e86986c910fa285.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=gaze locking: passive eye contact detection for human–object interaction&sort=relevance">[s2]</a></td><td></td><td>c34532fe6bfbd1e6df477c9ffdbb043b77e7804d</td></tr><tr><td>cvc_01_barcelona</td><td>CVC-01</td><td>Adaptive Image Sampling and Windows Classification for On-board Pedestrian Detection</td><td>Adaptive Image Sampling and Windows Classification for On – board Pedestrian Detection</td><td><a href="http://pdfs.semanticscholar.org/57fe/081950f21ca03b5b375ae3e84b399c015861.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=adaptive image sampling and windows classification for on-board pedestrian detection&sort=relevance">[s2]</a></td><td></td><td>57fe081950f21ca03b5b375ae3e84b399c015861</td></tr><tr><td>d3dfacs</td><td>D3DFACS</td><td>A FACS Valid 3D Dynamic Action Unit database with Applications to 3D Dynamic Morphable Facial Modelling</td><td>A FACS valid 3D dynamic action unit database with applications to 3D dynamic morphable facial modeling</td><td><a href="http://www.cs.bath.ac.uk/~dpc/D3DFACS/ICCV_final_2011.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a facs valid 3d dynamic action unit database with applications to 3d dynamic morphable facial modelling&sort=relevance">[s2]</a></td><td></td><td>070de852bc6eb275d7ca3a9cdde8f6be8795d1a3</td></tr><tr><td>dartmouth_children</td><td>Dartmouth Children</td><td>The Dartmouth Database of Children's Faces: Acquisition and validation of a new face stimulus set</td><td>The Dartmouth Database of Children’s Faces: Acquisition and Validation of a New Face Stimulus Set</td><td><a href="http://pdfs.semanticscholar.org/4e6e/e936eb50dd032f7138702fa39b7c18ee8907.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the dartmouth database of children's faces: acquisition and validation of a new face stimulus set&sort=relevance">[s2]</a></td><td></td><td>4e6ee936eb50dd032f7138702fa39b7c18ee8907</td></tr><tr><td>deep_fashion</td><td>DeepFashion</td><td>DeepFashion: Powering Robust Clothes Recognition and Retrieval with Rich Annotations</td><td>Fashion Landmark Detection in the Wild</td><td><a href="http://pdfs.semanticscholar.org/d8ca/e259c1c5bba0c096f480dc7322bbaebfac1a.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=deepfashion: powering robust clothes recognition and retrieval with rich annotations&sort=relevance">[s2]</a></td><td></td><td>4fefd1bc8dc4e0ab37ee3324ddfa43ad9d6a04a7</td></tr><tr><td>disfa</td><td>DISFA</td><td>DISFA: A Spontaneous Facial Action Intensity Database</td><td>Extended DISFA Dataset: Investigating Posed and Spontaneous Facial Expressions</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7789672">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=disfa: a spontaneous facial action intensity database&sort=relevance">[s2]</a></td><td></td><td>a5acda0e8c0937bfed013e6382da127103e41395</td></tr><tr><td>expw</td><td>ExpW</td><td>Learning Social Relation Traits from Face Images</td><td>From Facial Expression Recognition to Interpersonal Relation Prediction</td><td><a href="http://arxiv.org/abs/1609.06426">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning social relation traits from face images&sort=relevance">[s2]</a></td><td></td><td>22f656d0f8426c84a33a267977f511f127bfd7f3</td></tr><tr><td>face_research_lab</td><td>Face Research Lab London</td><td>Face Research Lab London Set. figshare</td><td>Anxiety promotes memory for mood-congruent faces but does not alter loss aversion.</td><td><a href="http://pdfs.semanticscholar.org/c652/6dd3060d63a6c90e8b7ff340383c4e0e0dd8.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=face research lab london set. figshare&sort=relevance">[s2]</a></td><td></td><td>c6526dd3060d63a6c90e8b7ff340383c4e0e0dd8</td></tr><tr><td>face_tracer</td><td>FaceTracer</td><td>FaceTracer: A Search Engine for Large Collections of Images with Faces</td><td>Face swapping: automatically replacing faces in photographs</td><td><a href="https://classes.cs.uoregon.edu/16F/cis607photo/faces.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=facetracer: a search engine for large collections of images with faces&sort=relevance">[s2]</a></td><td></td><td>670637d0303a863c1548d5b19f705860a23e285c</td></tr><tr><td>fddb</td><td>FDDB</td><td>FDDB: A Benchmark for Face Detection in Unconstrained Settings</td><td>A Benchmark for Face Detection in Unconstrained Settings</td><td><a href="http://pdfs.semanticscholar.org/75da/1df4ed319926c544eefe17ec8d720feef8c0.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=fddb: a benchmark for face detection in unconstrained settings&sort=relevance">[s2]</a></td><td></td><td>75da1df4ed319926c544eefe17ec8d720feef8c0</td></tr><tr><td>fei</td><td>FEI</td><td>Captura e Alinhamento de Imagens: Um Banco de Faces Brasileiro</td><td>Generalização cartográfica automatizada para um banco de dados cadastral</td><td><a href="https://pdfs.semanticscholar.org/b6b1/b0632eb9d4ab1427278f5e5c46f97753c73d.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=captura e alinhamento de imagens: um banco de faces brasileiro&sort=relevance">[s2]</a></td><td></td><td>b6b1b0632eb9d4ab1427278f5e5c46f97753c73d</td></tr><tr><td>fiw_300</td><td>300-W</td><td>300 faces In-the-wild challenge: Database and results</td><td>A Semi-automatic Methodology for Facial Landmark Annotation</td><td><a href="http://ibug.doc.ic.ac.uk/media/uploads/documents/sagonas_cvpr_2013_amfg_w.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=300 faces in-the-wild challenge: database and results&sort=relevance">[s2]</a></td><td></td><td>013909077ad843eb6df7a3e8e290cfd5575999d2</td></tr><tr><td>fiw_300</td><td>300-W</td><td>300 Faces in-the-Wild Challenge: The first facial landmark localization Challenge</td><td>A Semi-automatic Methodology for Facial Landmark Annotation</td><td><a href="http://ibug.doc.ic.ac.uk/media/uploads/documents/sagonas_cvpr_2013_amfg_w.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=300 faces in-the-wild challenge: the first facial landmark localization challenge&sort=relevance">[s2]</a></td><td></td><td>013909077ad843eb6df7a3e8e290cfd5575999d2</td></tr><tr><td>frav3d</td><td>FRAV3D</td><td>MULTIMODAL 2D, 2.5D & 3D FACE VERIFICATION</td><td>2D and 3D face recognition: A survey</td><td><a href="http://pdfs.semanticscholar.org/2f5d/44dc3e1b5955942133ff872ebd31716ec604.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multimodal 2d, 2.5d & 3d face verification&sort=relevance">[s2]</a></td><td></td><td>2f5d44dc3e1b5955942133ff872ebd31716ec604</td></tr><tr><td>frgc</td><td>FRGC</td><td>Overview of the Face Recognition Grand Challenge</td><td>Overview of the face recognition grand challenge</td><td><a href="http://www3.nd.edu/~kwb/PhillipsEtAlCVPR_2005.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=overview of the face recognition grand challenge&sort=relevance">[s2]</a></td><td></td><td>18ae7c9a4bbc832b8b14bc4122070d7939f5e00e</td></tr><tr><td>gallagher</td><td>Gallagher</td><td>Clothing Cosegmentation for Recognizing People</td><td>Clothing Cosegmentation for Shopping Images With Cluttered Background</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7423747">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clothing cosegmentation for recognizing people&sort=relevance">[s2]</a></td><td></td><td>6dbe8e5121c534339d6e41f8683e85f87e6abf81</td></tr><tr><td>gavab_db</td><td>Gavab</td><td>GavabDB: a 3D face database</td><td>Expression invariant 3D face recognition with a Morphable Model</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4813376">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=gavabdb: a 3d face database&sort=relevance">[s2]</a></td><td></td><td>42505464808dfb446f521fc6ff2cfeffd4d68ff1</td></tr><tr><td>georgia_tech_face_database</td><td>Georgia Tech Face</td><td>Maximum likelihood training of the embedded HMM for face detection and recognition</td><td>MAXIMUM LIKELIHOOD TRAINING OF THE EMBEDDED HMM FOR FACE DETECTION AND RECOGNITION Ara V. Ne an and Monson H. Hayes III Center for Signal and Image Processing School of Electrical and Computer Engineering</td><td><a href="http://pdfs.semanticscholar.org/3dc3/f0b64ef80f573e3a5f96e456e52ee980b877.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=maximum likelihood training of the embedded hmm for face detection and recognition&sort=relevance">[s2]</a></td><td></td><td>3dc3f0b64ef80f573e3a5f96e456e52ee980b877</td></tr><tr><td>graz</td><td>Graz Pedestrian</td><td>Generic Object Recognition with Boosting</td><td>Object recognition using segmentation for feature detection</td><td><a href="http://www.emt.tugraz.at/~tracking/Publications/fussenegger2004b.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=generic object recognition with boosting&sort=relevance">[s2]</a></td><td></td><td>12ad3b5bbbf407f8e54ea692c07633d1a867c566</td></tr><tr><td>graz</td><td>Graz Pedestrian</td><td>Weak Hypotheses and Boosting for Generic Object Detection and Recognition</td><td>Object recognition using segmentation for feature detection</td><td><a href="http://www.emt.tugraz.at/~tracking/Publications/fussenegger2004b.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=weak hypotheses and boosting for generic object detection and recognition&sort=relevance">[s2]</a></td><td></td><td>12ad3b5bbbf407f8e54ea692c07633d1a867c566</td></tr><tr><td>hda_plus</td><td>HDA+</td><td>The HDA+ data set for research on fully automated re-identification systems</td><td>HDA dataset-DRAFT 1 A Multi-camera video data set for research on High-Definition surveillance</td><td><a href="http://pdfs.semanticscholar.org/bd88/bb2e4f351352d88ee7375af834360e223498.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the hda+ data set for research on fully automated re-identification systems&sort=relevance">[s2]</a></td><td></td><td>bd88bb2e4f351352d88ee7375af834360e223498</td></tr><tr><td>hda_plus</td><td>HDA+</td><td>A Multi-camera video data set for research on High-Definition surveillance</td><td>HDA dataset-DRAFT 1 A Multi-camera video data set for research on High-Definition surveillance</td><td><a href="http://pdfs.semanticscholar.org/bd88/bb2e4f351352d88ee7375af834360e223498.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a multi-camera video data set for research on high-definition surveillance&sort=relevance">[s2]</a></td><td></td><td>bd88bb2e4f351352d88ee7375af834360e223498</td></tr><tr><td>hi4d_adsip</td><td>Hi4D-ADSIP</td><td>Hi4D-ADSIP 3-D dynamic facial articulation database</td><td>High-resolution comprehensive 3-D dynamic database for facial articulation analysis</td><td><a href="http://www.researchgate.net/profile/Wei_Quan3/publication/221430048_High-resolution_comprehensive_3-D_dynamic_database_for_facial_articulation_analysis/links/0deec534309495805d000000.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=hi4d-adsip 3-d dynamic facial articulation database&sort=relevance">[s2]</a></td><td></td><td>24830e3979d4ed01b9fd0feebf4a8fd22e0c35fd</td></tr><tr><td>hrt_transgender</td><td>HRT Transgender</td><td>Is the Eye Region More Reliable Than the Face? A Preliminary Study of Face-based Recognition on a Transgender Dataset</td><td>Face recognition: A literature survey</td><td><a href="http://pdfs.semanticscholar.org/2831/2c3a47c1be3a67365700744d3d6665b86f22.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=is the eye region more reliable than the face? a preliminary study of face-based recognition on a transgender dataset&sort=relevance">[s2]</a></td><td></td><td>28312c3a47c1be3a67365700744d3d6665b86f22</td></tr><tr><td>hrt_transgender</td><td>HRT Transgender</td><td>Investigating the Periocular-Based Face Recognition Across Gender Transformation</td><td>Face recognition: A literature survey</td><td><a href="http://pdfs.semanticscholar.org/2831/2c3a47c1be3a67365700744d3d6665b86f22.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=investigating the periocular-based face recognition across gender transformation&sort=relevance">[s2]</a></td><td></td><td>28312c3a47c1be3a67365700744d3d6665b86f22</td></tr><tr><td>hrt_transgender</td><td>HRT Transgender</td><td>Face recognition across gender transformation using SVM Classifier</td><td>Face recognition: A literature survey</td><td><a href="http://pdfs.semanticscholar.org/2831/2c3a47c1be3a67365700744d3d6665b86f22.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=face recognition across gender transformation using svm classifier&sort=relevance">[s2]</a></td><td></td><td>28312c3a47c1be3a67365700744d3d6665b86f22</td></tr><tr><td>ifad</td><td>IFAD</td><td>Indian Face Age Database: A Database for Face Recognition with Age Variation</td><td>Indian Face Age Database : A Database for Face Recognition with Age Variation</td><td><a href="https://pdfs.semanticscholar.org/55c4/0cbcf49a0225e72d911d762c27bb1c2d14aa.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=indian face age database: a database for face recognition with age variation&sort=relevance">[s2]</a></td><td></td><td>55c40cbcf49a0225e72d911d762c27bb1c2d14aa</td></tr><tr><td>ifdb</td><td>IFDB</td><td>Iranian Face Database with age, pose and expression</td><td>Iranian Face Database and Evaluation with a New Detection Algorithm</td><td><a href="http://pdfs.semanticscholar.org/066d/71fcd997033dce4ca58df924397dfe0b5fd1.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=iranian face database with age, pose and expression&sort=relevance">[s2]</a></td><td></td><td>066d71fcd997033dce4ca58df924397dfe0b5fd1</td></tr><tr><td>ijb_c</td><td>IJB-C</td><td>IARPA Janus Benchmark C</td><td>IARPA Janus Benchmark - C: Face Dataset and Protocol</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411217">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=iarpa janus benchmark c&sort=relevance">[s2]</a></td><td></td><td>57178b36c21fd7f4529ac6748614bb3374714e91</td></tr><tr><td>ilids_mcts</td><td></td><td>Imagery Library for Intelligent Detection Systems:
The i-LIDS User Guide</td><td>Imagery Library for Intelligent Detection Systems (i-LIDS); A Standard for Testing Video Based Detection Systems</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=imagery library for intelligent detection systems:
-the i-lids user guide&sort=relevance">[s2]</a></td><td></td><td>0297448f3ed948e136bb06ceff10eccb34e5bb77</td></tr><tr><td>ilids_vid_reid</td><td>iLIDS-VID</td><td>Person Re-Identi cation by Video Ranking</td><td>Person Re-identification by Exploiting Spatio-Temporal Cues and Multi-view Metric Learning</td><td><a href="https://doi.org/10.1109/LSP.2016.2574323">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person re-identi cation by video ranking&sort=relevance">[s2]</a></td><td></td><td>99eb4cea0d9bc9fe777a5c5172f8638a37a7f262</td></tr><tr><td>images_of_groups</td><td>Images of Groups</td><td>Understanding Groups of Images of People</td><td>Understanding images of groups of people</td><td><a href="http://amp.ece.cmu.edu/people/Andy/Andy_files/cvpr09.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=understanding groups of images of people&sort=relevance">[s2]</a></td><td></td><td>21d9d0deed16f0ad62a4865e9acf0686f4f15492</td></tr><tr><td>imm_face</td><td>IMM Face Dataset</td><td>The IMM Face Database - An Annotated Dataset of 240 Face Images</td><td>Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</td><td><a href="http://lrs.icg.tugraz.at/pubs/koestinger_befit_11.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the imm face database - an annotated dataset of 240 face images&sort=relevance">[s2]</a></td><td></td><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td></tr><tr><td>jiku_mobile</td><td>Jiku Mobile Video Dataset</td><td>The Jiku Mobile Video Dataset</td><td>A Synchronization Ground Truth for the Jiku Mobile Video Dataset</td><td><a href="http://pdfs.semanticscholar.org/ad62/c6e17bc39b4dec20d32f6ac667ae42d2c118.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the jiku mobile video dataset&sort=relevance">[s2]</a></td><td></td><td>ad62c6e17bc39b4dec20d32f6ac667ae42d2c118</td></tr><tr><td>kdef</td><td>KDEF</td><td>The Karolinska Directed Emotional Faces – KDEF</td><td>Gaze fixation and the neural circuitry of face processing in autism</td><td><a href="http://doi.org/10.1038/nn1421">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the karolinska directed emotional faces – kdef&sort=relevance">[s2]</a></td><td></td><td>93884e46c49f7ae1c7c34046fbc28882f2bd6341</td></tr><tr><td>kin_face</td><td>UB KinFace</td><td>Genealogical Face Recognition based on UB KinFace Database</td><td>Understanding Kin Relationships in a Photo</td><td><a href="http://www1.ece.neu.edu/~yunfu/papers/Kinship-TMM.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=genealogical face recognition based on ub kinface database&sort=relevance">[s2]</a></td><td></td><td>08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7</td></tr><tr><td>kin_face</td><td>UB KinFace</td><td>Kinship Verification through Transfer Learning</td><td>Understanding Kin Relationships in a Photo</td><td><a href="http://www1.ece.neu.edu/~yunfu/papers/Kinship-TMM.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=kinship verification through transfer learning&sort=relevance">[s2]</a></td><td></td><td>08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7</td></tr><tr><td>kitti</td><td>KITTI</td><td>Vision meets Robotics: The KITTI Dataset</td><td>The Role of Machine Vision for Intelligent Vehicles</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=vision meets robotics: the kitti dataset&sort=relevance">[s2]</a></td><td></td><td>35ba4ebfd017a56b51e967105af9ae273c9b0178</td></tr><tr><td>lfw</td><td>LFW</td><td>Labeled Faces in the Wild: A Survey</td><td>Labeled Faces in the Wild : A Survey</td><td><a href="http://pdfs.semanticscholar.org/7de6/e81d775e9cd7becbfd1bd685f4e2a5eebb22.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=labeled faces in the wild: a survey&sort=relevance">[s2]</a></td><td></td><td>7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22</td></tr><tr><td>lfw</td><td>LFW</td><td>Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments</td><td>Labeled Faces in the Wild : A Database for Studying Face Recognition in Unconstrained Environments</td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=labeled faces in the wild: a database for studying face recognition in unconstrained environments&sort=relevance">[s2]</a></td><td></td><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td></tr><tr><td>lfw</td><td>LFW</td><td>Labeled Faces in the Wild: Updates and New Reporting Procedures</td><td>Labeled Faces in the Wild : Updates and New Reporting Procedures</td><td><a href="http://pdfs.semanticscholar.org/2d34/82dcff69c7417c7b933f22de606a0e8e42d4.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=labeled faces in the wild: updates and new reporting procedures&sort=relevance">[s2]</a></td><td>University of Massachusetts</td><td>2d3482dcff69c7417c7b933f22de606a0e8e42d4</td></tr><tr><td>lfw_a</td><td>LFW-a</td><td>Effective Unconstrained Face Recognition by
+the i-lids user guide&sort=relevance">[s2]</a></td><td></td><td>0297448f3ed948e136bb06ceff10eccb34e5bb77</td></tr><tr><td>ilids_vid_reid</td><td>iLIDS-VID</td><td>Person Re-Identi cation by Video Ranking</td><td>Person Re-identification by Exploiting Spatio-Temporal Cues and Multi-view Metric Learning</td><td><a href="https://doi.org/10.1109/LSP.2016.2574323">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person re-identi cation by video ranking&sort=relevance">[s2]</a></td><td></td><td>99eb4cea0d9bc9fe777a5c5172f8638a37a7f262</td></tr><tr><td>images_of_groups</td><td>Images of Groups</td><td>Understanding Groups of Images of People</td><td>Understanding images of groups of people</td><td><a href="http://amp.ece.cmu.edu/people/Andy/Andy_files/cvpr09.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=understanding groups of images of people&sort=relevance">[s2]</a></td><td></td><td>21d9d0deed16f0ad62a4865e9acf0686f4f15492</td></tr><tr><td>imm_face</td><td>IMM Face Dataset</td><td>The IMM Face Database - An Annotated Dataset of 240 Face Images</td><td>Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</td><td><a href="http://lrs.icg.tugraz.at/pubs/koestinger_befit_11.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the imm face database - an annotated dataset of 240 face images&sort=relevance">[s2]</a></td><td></td><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td></tr><tr><td>jiku_mobile</td><td>Jiku Mobile Video Dataset</td><td>The Jiku Mobile Video Dataset</td><td>A Synchronization Ground Truth for the Jiku Mobile Video Dataset</td><td><a href="http://pdfs.semanticscholar.org/ad62/c6e17bc39b4dec20d32f6ac667ae42d2c118.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the jiku mobile video dataset&sort=relevance">[s2]</a></td><td></td><td>ad62c6e17bc39b4dec20d32f6ac667ae42d2c118</td></tr><tr><td>kdef</td><td>KDEF</td><td>The Karolinska Directed Emotional Faces – KDEF</td><td>Gaze fixation and the neural circuitry of face processing in autism</td><td><a href="http://doi.org/10.1038/nn1421">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the karolinska directed emotional faces – kdef&sort=relevance">[s2]</a></td><td></td><td>93884e46c49f7ae1c7c34046fbc28882f2bd6341</td></tr><tr><td>kin_face</td><td>UB KinFace</td><td>Genealogical Face Recognition based on UB KinFace Database</td><td>Understanding Kin Relationships in a Photo</td><td><a href="http://www1.ece.neu.edu/~yunfu/papers/Kinship-TMM.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=genealogical face recognition based on ub kinface database&sort=relevance">[s2]</a></td><td></td><td>08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7</td></tr><tr><td>kin_face</td><td>UB KinFace</td><td>Kinship Verification through Transfer Learning</td><td>Understanding Kin Relationships in a Photo</td><td><a href="http://www1.ece.neu.edu/~yunfu/papers/Kinship-TMM.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=kinship verification through transfer learning&sort=relevance">[s2]</a></td><td></td><td>08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7</td></tr><tr><td>kitti</td><td>KITTI</td><td>Vision meets Robotics: The KITTI Dataset</td><td>The Role of Machine Vision for Intelligent Vehicles</td><td><a href="http://www.path.berkeley.edu/sites/default/files/my_folder_76/Pub_03.2016_Role.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=vision meets robotics: the kitti dataset&sort=relevance">[s2]</a></td><td></td><td>35ba4ebfd017a56b51e967105af9ae273c9b0178</td></tr><tr><td>lfw</td><td>LFW</td><td>Labeled Faces in the Wild: A Survey</td><td>Labeled Faces in the Wild : A Survey</td><td><a href="http://pdfs.semanticscholar.org/7de6/e81d775e9cd7becbfd1bd685f4e2a5eebb22.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=labeled faces in the wild: a survey&sort=relevance">[s2]</a></td><td></td><td>7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22</td></tr><tr><td>lfw</td><td>LFW</td><td>Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments</td><td>Labeled Faces in the Wild : A Database for Studying Face Recognition in Unconstrained Environments</td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=labeled faces in the wild: a database for studying face recognition in unconstrained environments&sort=relevance">[s2]</a></td><td></td><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td></tr><tr><td>lfw</td><td>LFW</td><td>Labeled Faces in the Wild: Updates and New Reporting Procedures</td><td>Labeled Faces in the Wild : Updates and New Reporting Procedures</td><td><a href="http://pdfs.semanticscholar.org/2d34/82dcff69c7417c7b933f22de606a0e8e42d4.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=labeled faces in the wild: updates and new reporting procedures&sort=relevance">[s2]</a></td><td></td><td>2d3482dcff69c7417c7b933f22de606a0e8e42d4</td></tr><tr><td>lfw_a</td><td>LFW-a</td><td>Effective Unconstrained Face Recognition by
Combining Multiple Descriptors and Learned
Background Statistics</td><td>Effective Unconstrained Face Recognition by Combining Multiple Descriptors and Learned Background Statistics</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/TPAMI.2010.230">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=effective unconstrained face recognition by
combining multiple descriptors and learned
- background statistics&sort=relevance">[s2]</a></td><td></td><td>133f01aec1534604d184d56de866a4bd531dac87</td></tr><tr><td>m2vts</td><td>m2vts</td><td>The M2VTS Multimodal Face Database (Release 1.00)</td><td>The CAS-PEAL Large-Scale Chinese Face Database and Baseline Evaluations</td><td><a href="https://doi.org/10.1109/TSMCA.2007.909557">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the m2vts multimodal face database (release 1.00)&sort=relevance">[s2]</a></td><td></td><td>2485c98aa44131d1a2f7d1355b1e372f2bb148ad</td></tr><tr><td>m2vtsdb_extended</td><td>xm2vtsdb</td><td>XM2VTSDB: The Extended M2VTS Database</td><td>Labeled Faces in the Wild : A Database for Studying Face Recognition in Unconstrained Environments</td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=xm2vtsdb: the extended m2vts database&sort=relevance">[s2]</a></td><td></td><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td></tr><tr><td>mafl</td><td>MAFL</td><td>Facial Landmark Detection by Deep Multi-task Learning</td><td>Learning Deep Representation for Face Alignment with Auxiliary Attributes</td><td><a href="https://arxiv.org/pdf/1408.3967.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=facial landmark detection by deep multi-task learning&sort=relevance">[s2]</a></td><td></td><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td></tr><tr><td>malf</td><td>MALF</td><td>Fine-grained Evaluation on Face Detection in the Wild.</td><td>Fine-grained evaluation on face detection in the wild</td><td><a href="http://www.cs.toronto.edu/~byang/papers/malf_fg15.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=fine-grained evaluation on face detection in the wild.&sort=relevance">[s2]</a></td><td>Chinese Academy of Sciences</td><td>45e616093a92e5f1e61a7c6037d5f637aa8964af</td></tr><tr><td>mcgill</td><td>McGill Real World</td><td>Hierarchical Temporal Graphical Model for Head Pose Estimation and Subsequent Attribute Classification in Real-World Videos</td><td>Robust semi-automatic head pose labeling for real-world face video sequences</td><td><a href="https://doi.org/10.1007/s11042-012-1352-1">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=hierarchical temporal graphical model for head pose estimation and subsequent attribute classification in real-world videos&sort=relevance">[s2]</a></td><td>McGill University</td><td>c570d1247e337f91e555c3be0e8c8a5aba539d9f</td></tr><tr><td>mot</td><td>MOT</td><td>Evaluating Multiple Object Tracking Performance: The CLEAR MOT Metrics</td><td>Learning to associate: HybridBoosted multi-target tracker for crowded scene</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=evaluating multiple object tracking performance: the clear mot metrics&sort=relevance">[s2]</a></td><td></td><td>5981e6479c3fd4e31644db35d236bfb84ae46514</td></tr><tr><td>mot</td><td>MOT</td><td>Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</td><td>Learning to associate: HybridBoosted multi-target tracker for crowded scene</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=performance measures and a data set for multi-target, multi-camera tracking&sort=relevance">[s2]</a></td><td></td><td>5981e6479c3fd4e31644db35d236bfb84ae46514</td></tr><tr><td>mr2</td><td>MR2</td><td>The MR2: A multi-racial mega-resolution database of facial stimuli</td><td>The MR2: A multi-racial, mega-resolution database of facial stimuli.</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the mr2: a multi-racial mega-resolution database of facial stimuli&sort=relevance">[s2]</a></td><td></td><td>578d4ad74818086bb64f182f72e2c8bd31e3d426</td></tr><tr><td>mtfl</td><td>MTFL</td><td>Facial Landmark Detection by Deep Multi-task Learning</td><td>Learning Deep Representation for Face Alignment with Auxiliary Attributes</td><td><a href="https://arxiv.org/pdf/1408.3967.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=facial landmark detection by deep multi-task learning&sort=relevance">[s2]</a></td><td></td><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td></tr><tr><td>muct</td><td>MUCT</td><td>The MUCT Landmarked Face Database</td><td>Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</td><td><a href="http://lrs.icg.tugraz.at/pubs/koestinger_befit_11.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the muct landmarked face database&sort=relevance">[s2]</a></td><td></td><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td></tr><tr><td>multi_pie</td><td>MULTIPIE</td><td>Multi-PIE</td><td>Scheduling heterogeneous multi-cores through performance impact estimation (PIE)</td><td><a href="http://dl.acm.org/citation.cfm?id=2337184">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-pie&sort=relevance">[s2]</a></td><td></td><td>109df0e8e5969ddf01e073143e83599228a1163f</td></tr><tr><td>names_and_faces_news</td><td>News Dataset</td><td>Names and Faces</td><td>Names and faces in the news</td><td><a href="http://www.cs.utexas.edu/~grauman/courses/spring2007/395T/papers/berg_names_and_faces.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=names and faces&sort=relevance">[s2]</a></td><td></td><td>2fda164863a06a92d3a910b96eef927269aeb730</td></tr><tr><td>nova_emotions</td><td>Novaemötions Dataset</td><td>Crowdsourcing facial expressions for affective-interaction</td><td>Competitive affective gaming: winning with a smile</td><td><a href="http://doi.acm.org/10.1145/2502081.2502115">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=crowdsourcing facial expressions for affective-interaction&sort=relevance">[s2]</a></td><td></td><td>7f4040b482d16354d5938c1d1b926b544652bf5b</td></tr><tr><td>nova_emotions</td><td>Novaemötions Dataset</td><td>Competitive affective gamming: Winning with a smile</td><td>Competitive affective gaming: winning with a smile</td><td><a href="http://doi.acm.org/10.1145/2502081.2502115">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=competitive affective gamming: winning with a smile&sort=relevance">[s2]</a></td><td></td><td>7f4040b482d16354d5938c1d1b926b544652bf5b</td></tr><tr><td>pilot_parliament</td><td>PPB</td><td>Gender Shades: Intersectional Accuracy Disparities in Commercial Gender Classi cation</td><td>Summary of Research on Informant Accuracy in Network Data, 11 and on the Reverse Small World Problem</td><td><a href="http://pdfs.semanticscholar.org/fb82/681ac5d3487bd8e52dbb3d1fa220eeac855e.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=gender shades: intersectional accuracy disparities in commercial gender classi cation&sort=relevance">[s2]</a></td><td></td><td>fb82681ac5d3487bd8e52dbb3d1fa220eeac855e</td></tr><tr><td>put_face</td><td>Put Face</td><td>The PUT face database</td><td>Labeled Faces in the Wild : A Database for Studying Face Recognition in Unconstrained Environments</td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the put face database&sort=relevance">[s2]</a></td><td></td><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td></tr><tr><td>qmul_grid</td><td>GRID</td><td>Multi-Camera Activity Correlation Analysis</td><td>Time-Delayed Correlation Analysis for Multi-Camera Activity Understanding</td><td><a href="https://doi.org/10.1007/s11263-010-0347-5">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-camera activity correlation analysis&sort=relevance">[s2]</a></td><td></td><td>2edb87494278ad11641b6cf7a3f8996de12b8e14</td></tr><tr><td>scface</td><td>SCface</td><td>SCface – surveillance cameras face database</td><td>Large Variability Surveillance Camera Face Database</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=scface – surveillance cameras face database&sort=relevance">[s2]</a></td><td></td><td>f3b84a03985de3890b400b68e2a92c0a00afd9d0</td></tr><tr><td>sdu_vid</td><td>SDU-VID</td><td>A Spatio-Temporal Appearance Representation for Video-Based Pedestrian Re-Identification</td><td>Person Re-identification by Video Ranking</td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a spatio-temporal appearance representation for video-based pedestrian re-identification&sort=relevance">[s2]</a></td><td></td><td>98bb029afe2a1239c3fdab517323066f0957b81b</td></tr><tr><td>sdu_vid</td><td>SDU-VID</td><td>Local descriptors encoded by Fisher vectors for person re-identification</td><td>Person Re-identification by Video Ranking</td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=local descriptors encoded by fisher vectors for person re-identification&sort=relevance">[s2]</a></td><td></td><td>98bb029afe2a1239c3fdab517323066f0957b81b</td></tr><tr><td>sdu_vid</td><td>SDU-VID</td><td>Person reidentification by video ranking</td><td>Person Re-identification by Video Ranking</td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person reidentification by video ranking&sort=relevance">[s2]</a></td><td></td><td>98bb029afe2a1239c3fdab517323066f0957b81b</td></tr><tr><td>sheffield</td><td>Sheffield Face</td><td>Face Recognition: From Theory to Applications</td><td>Face Description with Local Binary Patterns: Application to Face Recognition</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/TPAMI.2006.244">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=face recognition: from theory to applications&sort=relevance">[s2]</a></td><td></td><td>3607afdb204de9a5a9300ae98aa4635d9effcda2</td></tr><tr><td>social_relation</td><td>Social Relation</td><td>From Facial Expression Recognition to Interpersonal Relation Prediction</td><td>Learning Social Relation Traits from Face Images</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.414">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=from facial expression recognition to interpersonal relation prediction&sort=relevance">[s2]</a></td><td>Chinese University of Hong Kong</td><td>2a171f8d14b6b8735001a11c217af9587d095848</td></tr><tr><td>stanford_drone</td><td>Stanford Drone</td><td>Learning Social Etiquette: Human Trajectory Prediction In Crowded Scenes</td><td>Learning to Predict Human Behavior in Crowded Scenes</td><td><a href="http://pdfs.semanticscholar.org/c9bd/a86e23cab9e4f15ea0c4cbb6cc02b9dfb709.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning social etiquette: human trajectory prediction in crowded scenes&sort=relevance">[s2]</a></td><td></td><td>c9bda86e23cab9e4f15ea0c4cbb6cc02b9dfb709</td></tr><tr><td>stickmen_buffy</td><td>Buffy Stickmen</td><td>Learning to Parse Images of Articulated Objects</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td><a href="http://pdfs.semanticscholar.org/4b1d/23d17476fcf78f4cbadf69fb130b1aa627c0.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning to parse images of articulated objects&sort=relevance">[s2]</a></td><td></td><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td></tr><tr><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td>Learning to parse images of articulated bodies</td><td><a href="http://pdfs.semanticscholar.org/9cd7/4c43dbf9be0b9caae4606ee53e6d45850471.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clustered pose and nonlinear appearance models for human pose estimation&sort=relevance">[s2]</a></td><td></td><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td></tr><tr><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td>Learning to Parse Images of Articulated Objects</td><td>Learning to parse images of articulated bodies</td><td><a href="http://pdfs.semanticscholar.org/9cd7/4c43dbf9be0b9caae4606ee53e6d45850471.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning to parse images of articulated objects&sort=relevance">[s2]</a></td><td></td><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td></tr><tr><td>sun_attributes</td><td>SUN</td><td>The SUN Attribute Database: Beyond Categories for Deeper Scene Understanding</td><td>SUN attribute database: Discovering, annotating, and recognizing scene attributes</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2012.6247998">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the sun attribute database: beyond categories for deeper scene understanding&sort=relevance">[s2]</a></td><td></td><td>833fa04463d90aab4a9fe2870d480f0b40df446e</td></tr><tr><td>sun_attributes</td><td>SUN</td><td>SUN Attribute Database:
+ background statistics&sort=relevance">[s2]</a></td><td></td><td>133f01aec1534604d184d56de866a4bd531dac87</td></tr><tr><td>m2vts</td><td>m2vts</td><td>The M2VTS Multimodal Face Database (Release 1.00)</td><td>The CAS-PEAL Large-Scale Chinese Face Database and Baseline Evaluations</td><td><a href="https://doi.org/10.1109/TSMCA.2007.909557">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the m2vts multimodal face database (release 1.00)&sort=relevance">[s2]</a></td><td></td><td>2485c98aa44131d1a2f7d1355b1e372f2bb148ad</td></tr><tr><td>m2vtsdb_extended</td><td>xm2vtsdb</td><td>XM2VTSDB: The Extended M2VTS Database</td><td>Labeled Faces in the Wild : A Database for Studying Face Recognition in Unconstrained Environments</td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=xm2vtsdb: the extended m2vts database&sort=relevance">[s2]</a></td><td></td><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td></tr><tr><td>mafl</td><td>MAFL</td><td>Facial Landmark Detection by Deep Multi-task Learning</td><td>Learning Deep Representation for Face Alignment with Auxiliary Attributes</td><td><a href="https://arxiv.org/pdf/1408.3967.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=facial landmark detection by deep multi-task learning&sort=relevance">[s2]</a></td><td></td><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td></tr><tr><td>malf</td><td>MALF</td><td>Fine-grained Evaluation on Face Detection in the Wild.</td><td>Fine-grained evaluation on face detection in the wild</td><td><a href="http://www.cs.toronto.edu/~byang/papers/malf_fg15.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=fine-grained evaluation on face detection in the wild.&sort=relevance">[s2]</a></td><td></td><td>45e616093a92e5f1e61a7c6037d5f637aa8964af</td></tr><tr><td>mcgill</td><td>McGill Real World</td><td>Hierarchical Temporal Graphical Model for Head Pose Estimation and Subsequent Attribute Classification in Real-World Videos</td><td>Robust semi-automatic head pose labeling for real-world face video sequences</td><td><a href="https://doi.org/10.1007/s11042-012-1352-1">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=hierarchical temporal graphical model for head pose estimation and subsequent attribute classification in real-world videos&sort=relevance">[s2]</a></td><td></td><td>c570d1247e337f91e555c3be0e8c8a5aba539d9f</td></tr><tr><td>mot</td><td>MOT</td><td>Evaluating Multiple Object Tracking Performance: The CLEAR MOT Metrics</td><td>Learning to associate: HybridBoosted multi-target tracker for crowded scene</td><td><a href="http://vision.lbl.gov/Conferences/cvpr/Papers/data/papers/0633.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=evaluating multiple object tracking performance: the clear mot metrics&sort=relevance">[s2]</a></td><td></td><td>5981e6479c3fd4e31644db35d236bfb84ae46514</td></tr><tr><td>mot</td><td>MOT</td><td>Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</td><td>Learning to associate: HybridBoosted multi-target tracker for crowded scene</td><td><a href="http://vision.lbl.gov/Conferences/cvpr/Papers/data/papers/0633.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=performance measures and a data set for multi-target, multi-camera tracking&sort=relevance">[s2]</a></td><td></td><td>5981e6479c3fd4e31644db35d236bfb84ae46514</td></tr><tr><td>mr2</td><td>MR2</td><td>The MR2: A multi-racial mega-resolution database of facial stimuli</td><td>The MR2: A multi-racial, mega-resolution database of facial stimuli.</td><td><a href="https://pdfs.semanticscholar.org/be5b/455abd379240460d022a0e246615b0b86c14.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the mr2: a multi-racial mega-resolution database of facial stimuli&sort=relevance">[s2]</a></td><td></td><td>578d4ad74818086bb64f182f72e2c8bd31e3d426</td></tr><tr><td>mtfl</td><td>MTFL</td><td>Facial Landmark Detection by Deep Multi-task Learning</td><td>Learning Deep Representation for Face Alignment with Auxiliary Attributes</td><td><a href="https://arxiv.org/pdf/1408.3967.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=facial landmark detection by deep multi-task learning&sort=relevance">[s2]</a></td><td></td><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td></tr><tr><td>muct</td><td>MUCT</td><td>The MUCT Landmarked Face Database</td><td>Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</td><td><a href="http://lrs.icg.tugraz.at/pubs/koestinger_befit_11.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the muct landmarked face database&sort=relevance">[s2]</a></td><td></td><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td></tr><tr><td>multi_pie</td><td>MULTIPIE</td><td>Multi-PIE</td><td>Scheduling heterogeneous multi-cores through performance impact estimation (PIE)</td><td><a href="http://dl.acm.org/citation.cfm?id=2337184">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-pie&sort=relevance">[s2]</a></td><td></td><td>109df0e8e5969ddf01e073143e83599228a1163f</td></tr><tr><td>names_and_faces_news</td><td>News Dataset</td><td>Names and Faces</td><td>Names and faces in the news</td><td><a href="http://www.cs.utexas.edu/~grauman/courses/spring2007/395T/papers/berg_names_and_faces.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=names and faces&sort=relevance">[s2]</a></td><td></td><td>2fda164863a06a92d3a910b96eef927269aeb730</td></tr><tr><td>nova_emotions</td><td>Novaemötions Dataset</td><td>Crowdsourcing facial expressions for affective-interaction</td><td>Competitive affective gaming: winning with a smile</td><td><a href="http://doi.acm.org/10.1145/2502081.2502115">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=crowdsourcing facial expressions for affective-interaction&sort=relevance">[s2]</a></td><td></td><td>7f4040b482d16354d5938c1d1b926b544652bf5b</td></tr><tr><td>nova_emotions</td><td>Novaemötions Dataset</td><td>Competitive affective gamming: Winning with a smile</td><td>Competitive affective gaming: winning with a smile</td><td><a href="http://doi.acm.org/10.1145/2502081.2502115">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=competitive affective gamming: winning with a smile&sort=relevance">[s2]</a></td><td></td><td>7f4040b482d16354d5938c1d1b926b544652bf5b</td></tr><tr><td>pilot_parliament</td><td>PPB</td><td>Gender Shades: Intersectional Accuracy Disparities in Commercial Gender Classi cation</td><td>Summary of Research on Informant Accuracy in Network Data, 11 and on the Reverse Small World Problem</td><td><a href="http://pdfs.semanticscholar.org/fb82/681ac5d3487bd8e52dbb3d1fa220eeac855e.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=gender shades: intersectional accuracy disparities in commercial gender classi cation&sort=relevance">[s2]</a></td><td></td><td>fb82681ac5d3487bd8e52dbb3d1fa220eeac855e</td></tr><tr><td>put_face</td><td>Put Face</td><td>The PUT face database</td><td>Labeled Faces in the Wild : A Database for Studying Face Recognition in Unconstrained Environments</td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the put face database&sort=relevance">[s2]</a></td><td></td><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td></tr><tr><td>qmul_grid</td><td>GRID</td><td>Multi-Camera Activity Correlation Analysis</td><td>Time-Delayed Correlation Analysis for Multi-Camera Activity Understanding</td><td><a href="https://doi.org/10.1007/s11263-010-0347-5">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-camera activity correlation analysis&sort=relevance">[s2]</a></td><td></td><td>2edb87494278ad11641b6cf7a3f8996de12b8e14</td></tr><tr><td>scface</td><td>SCface</td><td>SCface – surveillance cameras face database</td><td>Large Variability Surveillance Camera Face Database</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=scface – surveillance cameras face database&sort=relevance">[s2]</a></td><td></td><td>f3b84a03985de3890b400b68e2a92c0a00afd9d0</td></tr><tr><td>sdu_vid</td><td>SDU-VID</td><td>A Spatio-Temporal Appearance Representation for Video-Based Pedestrian Re-Identification</td><td>Person Re-identification by Video Ranking</td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a spatio-temporal appearance representation for video-based pedestrian re-identification&sort=relevance">[s2]</a></td><td></td><td>98bb029afe2a1239c3fdab517323066f0957b81b</td></tr><tr><td>sdu_vid</td><td>SDU-VID</td><td>Local descriptors encoded by Fisher vectors for person re-identification</td><td>Person Re-identification by Video Ranking</td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=local descriptors encoded by fisher vectors for person re-identification&sort=relevance">[s2]</a></td><td></td><td>98bb029afe2a1239c3fdab517323066f0957b81b</td></tr><tr><td>sdu_vid</td><td>SDU-VID</td><td>Person reidentification by video ranking</td><td>Person Re-identification by Video Ranking</td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person reidentification by video ranking&sort=relevance">[s2]</a></td><td></td><td>98bb029afe2a1239c3fdab517323066f0957b81b</td></tr><tr><td>sheffield</td><td>Sheffield Face</td><td>Face Recognition: From Theory to Applications</td><td>Face Description with Local Binary Patterns: Application to Face Recognition</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/TPAMI.2006.244">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=face recognition: from theory to applications&sort=relevance">[s2]</a></td><td></td><td>3607afdb204de9a5a9300ae98aa4635d9effcda2</td></tr><tr><td>social_relation</td><td>Social Relation</td><td>From Facial Expression Recognition to Interpersonal Relation Prediction</td><td>Learning Social Relation Traits from Face Images</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.414">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=from facial expression recognition to interpersonal relation prediction&sort=relevance">[s2]</a></td><td></td><td>2a171f8d14b6b8735001a11c217af9587d095848</td></tr><tr><td>stanford_drone</td><td>Stanford Drone</td><td>Learning Social Etiquette: Human Trajectory Prediction In Crowded Scenes</td><td>Learning to Predict Human Behavior in Crowded Scenes</td><td><a href="http://pdfs.semanticscholar.org/c9bd/a86e23cab9e4f15ea0c4cbb6cc02b9dfb709.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning social etiquette: human trajectory prediction in crowded scenes&sort=relevance">[s2]</a></td><td></td><td>c9bda86e23cab9e4f15ea0c4cbb6cc02b9dfb709</td></tr><tr><td>stickmen_buffy</td><td>Buffy Stickmen</td><td>Learning to Parse Images of Articulated Objects</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td><a href="http://pdfs.semanticscholar.org/4b1d/23d17476fcf78f4cbadf69fb130b1aa627c0.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning to parse images of articulated objects&sort=relevance">[s2]</a></td><td></td><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td></tr><tr><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td>Learning to parse images of articulated bodies</td><td><a href="http://pdfs.semanticscholar.org/9cd7/4c43dbf9be0b9caae4606ee53e6d45850471.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clustered pose and nonlinear appearance models for human pose estimation&sort=relevance">[s2]</a></td><td></td><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td></tr><tr><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td>Learning to Parse Images of Articulated Objects</td><td>Learning to parse images of articulated bodies</td><td><a href="http://pdfs.semanticscholar.org/9cd7/4c43dbf9be0b9caae4606ee53e6d45850471.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning to parse images of articulated objects&sort=relevance">[s2]</a></td><td></td><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td></tr><tr><td>sun_attributes</td><td>SUN</td><td>The SUN Attribute Database: Beyond Categories for Deeper Scene Understanding</td><td>SUN attribute database: Discovering, annotating, and recognizing scene attributes</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2012.6247998">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the sun attribute database: beyond categories for deeper scene understanding&sort=relevance">[s2]</a></td><td></td><td>833fa04463d90aab4a9fe2870d480f0b40df446e</td></tr><tr><td>sun_attributes</td><td>SUN</td><td>SUN Attribute Database:
Discovering, Annotating, and Recognizing Scene Attributes</td><td>SUN attribute database: Discovering, annotating, and recognizing scene attributes</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2012.6247998">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=sun attribute database:
-discovering, annotating, and recognizing scene attributes&sort=relevance">[s2]</a></td><td></td><td>833fa04463d90aab4a9fe2870d480f0b40df446e</td></tr><tr><td>texas_3dfrd</td><td>Texas 3DFRD</td><td>Texas 3D Face Recognition Database</td><td>Anthropometric 3D Face Recognition</td><td><a href="http://live.ece.utexas.edu/publications/2010/sg_ijcv_june10.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=texas 3d face recognition database&sort=relevance">[s2]</a></td><td></td><td>2ce2560cf59db59ce313bbeb004e8ce55c5ce928</td></tr><tr><td>tiny_images</td><td>Tiny Images</td><td>80 million tiny images: a large dataset for non-parametric object and scene recognition</td><td>80 Million Tiny Images: A Large Data Set for Nonparametric Object and Scene Recognition</td><td><a href="http://pdfs.semanticscholar.org/31b5/8ced31f22eab10bd3ee2d9174e7c14c27c01.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=80 million tiny images: a large dataset for non-parametric object and scene recognition&sort=relevance">[s2]</a></td><td></td><td>31b58ced31f22eab10bd3ee2d9174e7c14c27c01</td></tr><tr><td>umd_faces</td><td>UMD</td><td>The Do's and Don'ts for CNN-based Face Verification</td><td>The Do’s and Don’ts for CNN-Based Face Verification</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the do's and don'ts for cnn-based face verification&sort=relevance">[s2]</a></td><td></td><td>71b7fc715e2f1bb24c0030af8d7e7b6e7cd128a6</td></tr><tr><td>viper</td><td>VIPeR</td><td>Evaluating Appearance Models for Recognition, Reacquisition, and Tracking</td><td>Evaluating Appearance Models for Recognition , Reacquisition , and Tracking</td><td><a href="http://pdfs.semanticscholar.org/6273/b3491e94ea4dd1ce42b791d77bdc96ee73a8.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=evaluating appearance models for recognition, reacquisition, and tracking&sort=relevance">[s2]</a></td><td></td><td>6273b3491e94ea4dd1ce42b791d77bdc96ee73a8</td></tr><tr><td>voc</td><td>VOC</td><td>The PASCAL Visual Object Classes (VOC) Challenge</td><td>The Pascal Visual Object Classes Challenge: A Retrospective</td><td><a href="http://homepages.inf.ed.ac.uk/ckiw/postscript/ijcv_voc14.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the pascal visual object classes (voc) challenge&sort=relevance">[s2]</a></td><td></td><td>abe9f3b91fd26fa1b50cd685c0d20debfb372f73</td></tr><tr><td>who_goes_there</td><td>WGT</td><td>Who Goes There? Approaches to Mapping Facial Appearance Diversity</td><td>Who goes there?: approaches to mapping facial appearance diversity</td><td><a href="http://doi.acm.org/10.1145/2996913.2996997">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=who goes there? approaches to mapping facial appearance diversity&sort=relevance">[s2]</a></td><td></td><td>9b9bf5e623cb8af7407d2d2d857bc3f1b531c182</td></tr><tr><td>wildtrack</td><td>WildTrack</td><td>WILDTRACK: A Multi-camera HD Dataset for Dense Unscripted Pedestrian Detection</td><td>WILDTRACK : A Multi-camera HD Dataset for Dense Unscripted Pedestrian Detection</td><td><a href="https://pdfs.semanticscholar.org/fe1c/ec4e4995b8615855572374ae3efc94949105.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=wildtrack: a multi-camera hd dataset for dense unscripted pedestrian detection&sort=relevance">[s2]</a></td><td></td><td>77c81c13a110a341c140995bedb98101b9e84f7f</td></tr><tr><td>yale_faces</td><td>YaleFaces</td><td>Acquiring Linear Subspaces for Face Recognition under Variable Lighting</td><td>From Few to Many: Illumination Cone Models for Face Recognition under Variable Lighting and Pose</td><td><a href="http://pdfs.semanticscholar.org/97bb/c2b439a79d4dc0dc7199d71ed96ad5e3fd0e.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=acquiring linear subspaces for face recognition under variable lighting&sort=relevance">[s2]</a></td><td></td><td>18c72175ddbb7d5956d180b65a96005c100f6014</td></tr><tr><td>yfcc_100m</td><td>YFCC100M</td><td>YFCC100M: The New Data in Multimedia Research</td><td>The New Data and New Challenges in Multimedia Research</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=yfcc100m: the new data in multimedia research&sort=relevance">[s2]</a></td><td></td><td>a6e695ddd07aad719001c0fc1129328452385949</td></tr><tr><td>youtube_makeup</td><td>YMU</td><td>Can Facial Cosmetics Affect the Matching Accuracy of Face Recognition Systems?</td><td>Automatic facial makeup detection with application in face recognition</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6612994">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=can facial cosmetics affect the matching accuracy of face recognition systems?&sort=relevance">[s2]</a></td><td>West Virginia University</td><td>fcc6fe6007c322641796cb8792718641856a22a7</td></tr></table></body></html> \ No newline at end of file
+discovering, annotating, and recognizing scene attributes&sort=relevance">[s2]</a></td><td></td><td>833fa04463d90aab4a9fe2870d480f0b40df446e</td></tr><tr><td>texas_3dfrd</td><td>Texas 3DFRD</td><td>Texas 3D Face Recognition Database</td><td>Anthropometric 3D Face Recognition</td><td><a href="http://live.ece.utexas.edu/publications/2010/sg_ijcv_june10.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=texas 3d face recognition database&sort=relevance">[s2]</a></td><td></td><td>2ce2560cf59db59ce313bbeb004e8ce55c5ce928</td></tr><tr><td>tiny_images</td><td>Tiny Images</td><td>80 million tiny images: a large dataset for non-parametric object and scene recognition</td><td>80 Million Tiny Images: A Large Data Set for Nonparametric Object and Scene Recognition</td><td><a href="http://pdfs.semanticscholar.org/31b5/8ced31f22eab10bd3ee2d9174e7c14c27c01.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=80 million tiny images: a large dataset for non-parametric object and scene recognition&sort=relevance">[s2]</a></td><td></td><td>31b58ced31f22eab10bd3ee2d9174e7c14c27c01</td></tr><tr><td>umd_faces</td><td>UMD</td><td>The Do's and Don'ts for CNN-based Face Verification</td><td>The Do’s and Don’ts for CNN-Based Face Verification</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the do's and don'ts for cnn-based face verification&sort=relevance">[s2]</a></td><td></td><td>71b7fc715e2f1bb24c0030af8d7e7b6e7cd128a6</td></tr><tr><td>viper</td><td>VIPeR</td><td>Evaluating Appearance Models for Recognition, Reacquisition, and Tracking</td><td>Evaluating Appearance Models for Recognition , Reacquisition , and Tracking</td><td><a href="http://pdfs.semanticscholar.org/6273/b3491e94ea4dd1ce42b791d77bdc96ee73a8.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=evaluating appearance models for recognition, reacquisition, and tracking&sort=relevance">[s2]</a></td><td></td><td>6273b3491e94ea4dd1ce42b791d77bdc96ee73a8</td></tr><tr><td>voc</td><td>VOC</td><td>The PASCAL Visual Object Classes (VOC) Challenge</td><td>The Pascal Visual Object Classes Challenge: A Retrospective</td><td><a href="http://homepages.inf.ed.ac.uk/ckiw/postscript/ijcv_voc14.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the pascal visual object classes (voc) challenge&sort=relevance">[s2]</a></td><td></td><td>abe9f3b91fd26fa1b50cd685c0d20debfb372f73</td></tr><tr><td>who_goes_there</td><td>WGT</td><td>Who Goes There? Approaches to Mapping Facial Appearance Diversity</td><td>Who goes there?: approaches to mapping facial appearance diversity</td><td><a href="http://doi.acm.org/10.1145/2996913.2996997">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=who goes there? approaches to mapping facial appearance diversity&sort=relevance">[s2]</a></td><td></td><td>9b9bf5e623cb8af7407d2d2d857bc3f1b531c182</td></tr><tr><td>wildtrack</td><td>WildTrack</td><td>WILDTRACK: A Multi-camera HD Dataset for Dense Unscripted Pedestrian Detection</td><td>WILDTRACK : A Multi-camera HD Dataset for Dense Unscripted Pedestrian Detection</td><td><a href="https://pdfs.semanticscholar.org/fe1c/ec4e4995b8615855572374ae3efc94949105.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=wildtrack: a multi-camera hd dataset for dense unscripted pedestrian detection&sort=relevance">[s2]</a></td><td></td><td>77c81c13a110a341c140995bedb98101b9e84f7f</td></tr><tr><td>yale_faces</td><td>YaleFaces</td><td>Acquiring Linear Subspaces for Face Recognition under Variable Lighting</td><td>From Few to Many: Illumination Cone Models for Face Recognition under Variable Lighting and Pose</td><td><a href="http://pdfs.semanticscholar.org/97bb/c2b439a79d4dc0dc7199d71ed96ad5e3fd0e.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=acquiring linear subspaces for face recognition under variable lighting&sort=relevance">[s2]</a></td><td></td><td>18c72175ddbb7d5956d180b65a96005c100f6014</td></tr><tr><td>yfcc_100m</td><td>YFCC100M</td><td>YFCC100M: The New Data in Multimedia Research</td><td>The New Data and New Challenges in Multimedia Research</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=yfcc100m: the new data in multimedia research&sort=relevance">[s2]</a></td><td></td><td>a6e695ddd07aad719001c0fc1129328452385949</td></tr><tr><td>youtube_makeup</td><td>YMU</td><td>Can Facial Cosmetics Affect the Matching Accuracy of Face Recognition Systems?</td><td>Automatic facial makeup detection with application in face recognition</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6612994">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=can facial cosmetics affect the matching accuracy of face recognition systems?&sort=relevance">[s2]</a></td><td></td><td>fcc6fe6007c322641796cb8792718641856a22a7</td></tr></table></body></html> \ No newline at end of file