1
2
3
|
<!doctype html><html><head><meta charset='utf-8'><title>Paper Titles that do not match</title><link rel='stylesheet' href='reports.css'></head><body><h2>Paper Titles that do not match</h2><table border='1' cellpadding='3' cellspacing='3'><th>key</th><th>name</th><th>our title</th><th>found title</th><th></th><th></th><th>address</th><th>s2 id</th><tr><td>10k_US_adult_faces</td><td>10K US Adult Faces</td><td>The intrinsic memorability of face images</td><td>The intrinsic memorability of face photographs.</td><td><a href="https://pdfs.semanticscholar.org/8b2d/d5c61b23ead5ae5508bb8ce808b5ea266730.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the intrinsic memorability of face images&sort=relevance" target="_blank">[s2]</a></td><td></td><td>8b2dd5c61b23ead5ae5508bb8ce808b5ea266730</td></tr><tr><td>3dddb_unconstrained</td><td>3D Dynamic</td><td>A 3D Dynamic Database for Unconstrained Face Recognition</td><td>A 3 D Dynamic Database for Unconstrained Face Recognition</td><td><a href="https://pdfs.semanticscholar.org/4d4b/b462c9f1d4e4ab1e4aa6a75cc0bc71b38461.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a 3d dynamic database for unconstrained face recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4d4bb462c9f1d4e4ab1e4aa6a75cc0bc71b38461</td></tr><tr><td>afad</td><td>AFAD</td><td>Ordinal Regression with a Multiple Output CNN for Age Estimation</td><td>Ordinal Regression with Multiple Output CNN for Age Estimation</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.532" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=ordinal regression with a multiple output cnn for age estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6618cff7f2ed440a0d2fa9e74ad5469df5cdbe4c</td></tr><tr><td>afw</td><td>AFW</td><td>Face detection, pose estimation and landmark localization in the wild</td><td>Face detection, pose estimation, and landmark localization in the wild</td><td><a href="http://vision.ics.uci.edu/papers/ZhuR_CVPR_2012/ZhuR_CVPR_2012.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=face detection, pose estimation and landmark localization in the wild&sort=relevance" target="_blank">[s2]</a></td><td>University of California, Irvine</td><td>0e986f51fe45b00633de9fd0c94d082d2be51406</td></tr><tr><td>alert_airport</td><td>ALERT Airport</td><td>A Systematic Evaluation and Benchmark for Person Re-Identification: Features, Metrics, and Datasets</td><td>A Systematic Evaluation and Benchmark for Person Re-Identification: Features, Metrics, and Datasets.</td><td><a href="https://arxiv.org/pdf/1605.09653.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a systematic evaluation and benchmark for person re-identification: features, metrics, and datasets&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6403117f9c005ae81f1e8e6d1302f4a045e3d99d</td></tr><tr><td>am_fed</td><td>AM-FED</td><td>Affectiva MIT Facial Expression Dataset (AM-FED): Naturalistic and Spontaneous Facial Expressions Collected “In the Wild”</td><td>Affectiva-MIT Facial Expression Dataset (AM-FED): Naturalistic and Spontaneous Facial Expressions Collected "In-the-Wild"</td><td><a href="http://pdfs.semanticscholar.org/5d06/437656dd94616d7d87260d5eb77513ded30f.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=affectiva mit facial expression dataset (am-fed): naturalistic and spontaneous facial expressions collected “in the wild”&sort=relevance" target="_blank">[s2]</a></td><td></td><td>47aeb3b82f54b5ae8142b4bdda7b614433e69b9a</td></tr><tr><td>bjut_3d</td><td>BJUT-3D</td><td>The BJUT-3D Large-Scale Chinese Face Database</td><td>A novel face recognition method based on 3D face model</td><td><a href="https://doi.org/10.1109/ROBIO.2007.4522202" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the bjut-3d large-scale chinese face database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>1ed1a49534ad8dd00f81939449f6389cfbc25321</td></tr><tr><td>bp4d_spontanous</td><td>BP4D-Spontanous</td><td>A high resolution spontaneous 3D dynamic facial expression database</td><td>A high-resolution spontaneous 3D dynamic facial expression database</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6553788" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a high resolution spontaneous 3d dynamic facial expression database&sort=relevance" target="_blank">[s2]</a></td><td>SUNY Binghamton</td><td>b91f54e1581fbbf60392364323d00a0cd43e493c</td></tr><tr><td>brainwash</td><td>Brainwash</td><td>Brainwash dataset</td><td>Brainwash: A Data System for Feature Engineering</td><td><a href="http://pdfs.semanticscholar.org/ae44/8015b2ff2bd3b8a5c9a3266f954f5af9ffa9.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=brainwash dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>214c966d1f9c2a4b66f4535d9a0d4078e63a5867</td></tr><tr><td>camel</td><td>CAMEL</td><td>CAMEL Dataset for Visual and Thermal Infrared Multiple Object Detection and Tracking</td><td>Application of Object Based Classification and High Resolution Satellite Imagery for Savanna Ecosystem Analysis</td><td><a href="http://pdfs.semanticscholar.org/5801/690199c1917fa58c35c3dead177c0b8f9f2d.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=camel dataset for visual and thermal infrared multiple object detection and tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>5801690199c1917fa58c35c3dead177c0b8f9f2d</td></tr><tr><td>casablanca</td><td>Casablanca</td><td>Context-aware {CNNs} for person head detection</td><td>Context-Aware CNNs for Person Head Detection</td><td><a href="http://arxiv.org/pdf/1511.07917v1.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=context-aware {cnns} for person head detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td></tr><tr><td>cfd</td><td>CFD</td><td>The Chicago face database: A free stimulus set of faces and norming data</td><td>The Chicago face database: A free stimulus set of faces and norming data.</td><td><a href="http://pdfs.semanticscholar.org/4df3/143922bcdf7db78eb91e6b5359d6ada004d2.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the chicago face database: a free stimulus set of faces and norming data&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4df3143922bcdf7db78eb91e6b5359d6ada004d2</td></tr><tr><td>cmu_pie</td><td>CMU PIE</td><td>The CMU Pose, Illumination, and Expression Database</td><td>The CMU Pose, Illumination, and Expression (PIE) Database</td><td><a href="http://pdfs.semanticscholar.org/4d42/3acc78273b75134e2afd1777ba6d3a398973.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the cmu pose, illumination, and expression database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4d423acc78273b75134e2afd1777ba6d3a398973</td></tr><tr><td>columbia_gaze</td><td>Columbia Gaze</td><td>Gaze Locking: Passive Eye Contact Detection for Human–Object Interaction</td><td>Gaze locking: passive eye contact detection for human-object interaction</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=gaze locking: passive eye contact detection for human–object interaction&sort=relevance" target="_blank">[s2]</a></td><td></td><td>06f02199690961ba52997cde1527e714d2b3bf8f</td></tr><tr><td>cvc_01_barcelona</td><td>CVC-01</td><td>Adaptive Image Sampling and Windows Classification for On-board Pedestrian Detection</td><td>Adaptive Image Sampling and Windows Classification for On – board Pedestrian Detection</td><td><a href="http://pdfs.semanticscholar.org/57fe/081950f21ca03b5b375ae3e84b399c015861.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=adaptive image sampling and windows classification for on-board pedestrian detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>57fe081950f21ca03b5b375ae3e84b399c015861</td></tr><tr><td>d3dfacs</td><td>D3DFACS</td><td>A FACS Valid 3D Dynamic Action Unit database with Applications to 3D Dynamic Morphable Facial Modelling</td><td>A FACS valid 3D dynamic action unit database with applications to 3D dynamic morphable facial modeling</td><td><a href="http://www.cs.bath.ac.uk/~dpc/D3DFACS/ICCV_final_2011.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a facs valid 3d dynamic action unit database with applications to 3d dynamic morphable facial modelling&sort=relevance" target="_blank">[s2]</a></td><td>Jacobs University</td><td>070de852bc6eb275d7ca3a9cdde8f6be8795d1a3</td></tr><tr><td>dartmouth_children</td><td>Dartmouth Children</td><td>The Dartmouth Database of Children's Faces: Acquisition and validation of a new face stimulus set</td><td>The Dartmouth Database of Children’s Faces: Acquisition and Validation of a New Face Stimulus Set</td><td><a href="http://pdfs.semanticscholar.org/4e6e/e936eb50dd032f7138702fa39b7c18ee8907.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the dartmouth database of children's faces: acquisition and validation of a new face stimulus set&sort=relevance" target="_blank">[s2]</a></td><td></td><td>4e6ee936eb50dd032f7138702fa39b7c18ee8907</td></tr><tr><td>disfa</td><td>DISFA</td><td>DISFA: A Spontaneous Facial Action Intensity Database</td><td>Extended DISFA Dataset: Investigating Posed and Spontaneous Facial Expressions</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7789672" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=disfa: a spontaneous facial action intensity database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a5acda0e8c0937bfed013e6382da127103e41395</td></tr><tr><td>face_research_lab</td><td>Face Research Lab London</td><td>Face Research Lab London Set. figshare</td><td>Anxiety promotes memory for mood-congruent faces but does not alter loss aversion.</td><td><a href="http://pdfs.semanticscholar.org/c652/6dd3060d63a6c90e8b7ff340383c4e0e0dd8.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=face research lab london set. figshare&sort=relevance" target="_blank">[s2]</a></td><td>University College London</td><td>c6526dd3060d63a6c90e8b7ff340383c4e0e0dd8</td></tr><tr><td>fddb</td><td>FDDB</td><td>FDDB: A Benchmark for Face Detection in Unconstrained Settings</td><td>A Benchmark for Face Detection in Unconstrained Settings</td><td><a href="http://pdfs.semanticscholar.org/75da/1df4ed319926c544eefe17ec8d720feef8c0.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=fddb: a benchmark for face detection in unconstrained settings&sort=relevance" target="_blank">[s2]</a></td><td>University of Massachusetts</td><td>75da1df4ed319926c544eefe17ec8d720feef8c0</td></tr><tr><td>fei</td><td>FEI</td><td>Captura e Alinhamento de Imagens: Um Banco de Faces Brasileiro</td><td>Generalização cartográfica automatizada para um banco de dados cadastral</td><td><a href="https://pdfs.semanticscholar.org/b6b1/b0632eb9d4ab1427278f5e5c46f97753c73d.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=captura e alinhamento de imagens: um banco de faces brasileiro&sort=relevance" target="_blank">[s2]</a></td><td></td><td>b6b1b0632eb9d4ab1427278f5e5c46f97753c73d</td></tr><tr><td>frgc</td><td>FRGC</td><td>Overview of the Face Recognition Grand Challenge</td><td>Overview of the face recognition grand challenge</td><td><a href="http://www3.nd.edu/~kwb/PhillipsEtAlCVPR_2005.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=overview of the face recognition grand challenge&sort=relevance" target="_blank">[s2]</a></td><td></td><td>18ae7c9a4bbc832b8b14bc4122070d7939f5e00e</td></tr><tr><td>gavab_db</td><td>Gavab</td><td>GavabDB: a 3D face database</td><td>Expression invariant 3D face recognition with a Morphable Model</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4813376" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=gavabdb: a 3d face database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>42505464808dfb446f521fc6ff2cfeffd4d68ff1</td></tr><tr><td>georgia_tech_face_database</td><td>Georgia Tech Face</td><td>Maximum likelihood training of the embedded HMM for face detection and recognition</td><td>MAXIMUM LIKELIHOOD TRAINING OF THE EMBEDDED HMM FOR FACE DETECTION AND RECOGNITION Ara V. Ne an and Monson H. Hayes III Center for Signal and Image Processing School of Electrical and Computer Engineering</td><td><a href="http://pdfs.semanticscholar.org/3dc3/f0b64ef80f573e3a5f96e456e52ee980b877.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=maximum likelihood training of the embedded hmm for face detection and recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3dc3f0b64ef80f573e3a5f96e456e52ee980b877</td></tr><tr><td>hda_plus</td><td>HDA+</td><td>A Multi-camera video data set for research on High-Definition surveillance</td><td>HDA dataset-DRAFT 1 A Multi-camera video data set for research on High-Definition surveillance</td><td><a href="http://pdfs.semanticscholar.org/bd88/bb2e4f351352d88ee7375af834360e223498.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a multi-camera video data set for research on high-definition surveillance&sort=relevance" target="_blank">[s2]</a></td><td></td><td>bd88bb2e4f351352d88ee7375af834360e223498</td></tr><tr><td>hi4d_adsip</td><td>Hi4D-ADSIP</td><td>Hi4D-ADSIP 3-D dynamic facial articulation database</td><td>High-resolution comprehensive 3-D dynamic database for facial articulation analysis</td><td><a href="http://www.researchgate.net/profile/Wei_Quan3/publication/221430048_High-resolution_comprehensive_3-D_dynamic_database_for_facial_articulation_analysis/links/0deec534309495805d000000.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=hi4d-adsip 3-d dynamic facial articulation database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>24830e3979d4ed01b9fd0feebf4a8fd22e0c35fd</td></tr><tr><td>hrt_transgender</td><td>HRT Transgender</td><td>Face recognition across gender transformation using SVM Classifier</td><td>Face recognition: A literature survey</td><td><a href="http://pdfs.semanticscholar.org/2831/2c3a47c1be3a67365700744d3d6665b86f22.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=face recognition across gender transformation using svm classifier&sort=relevance" target="_blank">[s2]</a></td><td></td><td>28312c3a47c1be3a67365700744d3d6665b86f22</td></tr><tr><td>ifad</td><td>IFAD</td><td>Indian Face Age Database: A Database for Face Recognition with Age Variation</td><td>Indian Face Age Database : A Database for Face Recognition with Age Variation</td><td><a href="https://pdfs.semanticscholar.org/55c4/0cbcf49a0225e72d911d762c27bb1c2d14aa.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=indian face age database: a database for face recognition with age variation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>55c40cbcf49a0225e72d911d762c27bb1c2d14aa</td></tr><tr><td>ijb_c</td><td>IJB-C</td><td>IARPA Janus Benchmark C</td><td>IARPA Janus Benchmark - C: Face Dataset and Protocol</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411217" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=iarpa janus benchmark c&sort=relevance" target="_blank">[s2]</a></td><td></td><td>57178b36c21fd7f4529ac6748614bb3374714e91</td></tr><tr><td>ilids_mcts</td><td></td><td>Imagery Library for Intelligent Detection Systems: The i-LIDS User Guide</td><td>Imagery Library for Intelligent Detection Systems (i-LIDS); A Standard for Testing Video Based Detection Systems</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=imagery library for intelligent detection systems: the i-lids user guide&sort=relevance" target="_blank">[s2]</a></td><td></td><td>0297448f3ed948e136bb06ceff10eccb34e5bb77</td></tr><tr><td>ilids_vid_reid</td><td>iLIDS-VID</td><td>Person Re-Identication by Video Ranking</td><td>Person Re-identification by Video Ranking</td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person re-identication by video ranking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>98bb029afe2a1239c3fdab517323066f0957b81b</td></tr><tr><td>images_of_groups</td><td>Images of Groups</td><td>Understanding Groups of Images of People</td><td>Understanding images of groups of people</td><td><a href="http://amp.ece.cmu.edu/people/Andy/Andy_files/cvpr09.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=understanding groups of images of people&sort=relevance" target="_blank">[s2]</a></td><td></td><td>21d9d0deed16f0ad62a4865e9acf0686f4f15492</td></tr><tr><td>imm_face</td><td>IMM Face Dataset</td><td>The IMM Face Database - An Annotated Dataset of 240 Face Images</td><td>Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</td><td><a href="http://lrs.icg.tugraz.at/pubs/koestinger_befit_11.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the imm face database - an annotated dataset of 240 face images&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td></tr><tr><td>kdef</td><td>KDEF</td><td>The Karolinska Directed Emotional Faces – KDEF</td><td>Gaze fixation and the neural circuitry of face processing in autism</td><td><a href="http://doi.org/10.1038/nn1421" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the karolinska directed emotional faces – kdef&sort=relevance" target="_blank">[s2]</a></td><td></td><td>93884e46c49f7ae1c7c34046fbc28882f2bd6341</td></tr><tr><td>kitti</td><td>KITTI</td><td>Vision meets Robotics: The KITTI Dataset</td><td>The Role of Machine Vision for Intelligent Vehicles</td><td><a href="http://www.path.berkeley.edu/sites/default/files/my_folder_76/Pub_03.2016_Role.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=vision meets robotics: the kitti dataset&sort=relevance" target="_blank">[s2]</a></td><td></td><td>35ba4ebfd017a56b51e967105af9ae273c9b0178</td></tr><tr><td>lfw</td><td>LFW</td><td>Labeled Faces in the Wild: A Survey</td><td>Labeled Faces in the Wild : A Survey</td><td><a href="http://pdfs.semanticscholar.org/7de6/e81d775e9cd7becbfd1bd685f4e2a5eebb22.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=labeled faces in the wild: a survey&sort=relevance" target="_blank">[s2]</a></td><td></td><td>7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22</td></tr><tr><td>lfw</td><td>LFW</td><td>Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments</td><td>Labeled Faces in the Wild : A Database for Studying Face Recognition in Unconstrained Environments</td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=labeled faces in the wild: a database for studying face recognition in unconstrained environments&sort=relevance" target="_blank">[s2]</a></td><td></td><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td></tr><tr><td>lfw</td><td>LFW</td><td>Labeled Faces in the Wild: Updates and New Reporting Procedures</td><td>Labeled Faces in the Wild : Updates and New Reporting Procedures</td><td><a href="http://pdfs.semanticscholar.org/2d34/82dcff69c7417c7b933f22de606a0e8e42d4.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=labeled faces in the wild: updates and new reporting procedures&sort=relevance" target="_blank">[s2]</a></td><td>University of Massachusetts</td><td>2d3482dcff69c7417c7b933f22de606a0e8e42d4</td></tr><tr><td>m2vts</td><td>m2vts</td><td>The M2VTS Multimodal Face Database (Release 1.00)</td><td>The CAS-PEAL Large-Scale Chinese Face Database and Baseline Evaluations</td><td><a href="https://doi.org/10.1109/TSMCA.2007.909557" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the m2vts multimodal face database (release 1.00)&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2485c98aa44131d1a2f7d1355b1e372f2bb148ad</td></tr><tr><td>m2vtsdb_extended</td><td>xm2vtsdb</td><td>XM2VTSDB: The Extended M2VTS Database</td><td>Labeled Faces in the Wild : A Database for Studying Face Recognition in Unconstrained Environments</td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=xm2vtsdb: the extended m2vts database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td></tr><tr><td>malf</td><td>MALF</td><td>Fine-grained Evaluation on Face Detection in the Wild.</td><td>Fine-grained evaluation on face detection in the wild</td><td><a href="http://www.cs.toronto.edu/~byang/papers/malf_fg15.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=fine-grained evaluation on face detection in the wild.&sort=relevance" target="_blank">[s2]</a></td><td>Chinese Academy of Sciences</td><td>45e616093a92e5f1e61a7c6037d5f637aa8964af</td></tr><tr><td>mcgill</td><td>McGill Real World</td><td>Hierarchical Temporal Graphical Model for Head Pose Estimation and Subsequent Attribute Classification in Real-World Videos</td><td>Robust semi-automatic head pose labeling for real-world face video sequences</td><td><a href="https://doi.org/10.1007/s11042-012-1352-1" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=hierarchical temporal graphical model for head pose estimation and subsequent attribute classification in real-world videos&sort=relevance" target="_blank">[s2]</a></td><td>McGill University</td><td>c570d1247e337f91e555c3be0e8c8a5aba539d9f</td></tr><tr><td>mot</td><td>MOT</td><td>Evaluating Multiple Object Tracking Performance: The CLEAR MOT Metrics</td><td>Learning to associate: HybridBoosted multi-target tracker for crowded scene</td><td><a href="http://vision.lbl.gov/Conferences/cvpr/Papers/data/papers/0633.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=evaluating multiple object tracking performance: the clear mot metrics&sort=relevance" target="_blank">[s2]</a></td><td></td><td>5981e6479c3fd4e31644db35d236bfb84ae46514</td></tr><tr><td>mot</td><td>MOT</td><td>Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</td><td>Learning to associate: HybridBoosted multi-target tracker for crowded scene</td><td><a href="http://vision.lbl.gov/Conferences/cvpr/Papers/data/papers/0633.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=performance measures and a data set for multi-target, multi-camera tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>5981e6479c3fd4e31644db35d236bfb84ae46514</td></tr><tr><td>mr2</td><td>MR2</td><td>The MR2: A multi-racial mega-resolution database of facial stimuli</td><td>The MR2: A multi-racial, mega-resolution database of facial stimuli.</td><td><a href="https://pdfs.semanticscholar.org/be5b/455abd379240460d022a0e246615b0b86c14.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the mr2: a multi-racial mega-resolution database of facial stimuli&sort=relevance" target="_blank">[s2]</a></td><td></td><td>578d4ad74818086bb64f182f72e2c8bd31e3d426</td></tr><tr><td>mtfl</td><td>MTFL</td><td>Facial Landmark Detection by Deep Multi-task Learning</td><td>Learning Deep Representation for Face Alignment with Auxiliary Attributes</td><td><a href="https://arxiv.org/pdf/1408.3967.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=facial landmark detection by deep multi-task learning&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td></tr><tr><td>muct</td><td>MUCT</td><td>The MUCT Landmarked Face Database</td><td>Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</td><td><a href="http://lrs.icg.tugraz.at/pubs/koestinger_befit_11.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the muct landmarked face database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td></tr><tr><td>multi_pie</td><td>MULTIPIE</td><td>Multi-PIE</td><td>Scheduling heterogeneous multi-cores through performance impact estimation (PIE)</td><td><a href="http://dl.acm.org/citation.cfm?id=2337184" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-pie&sort=relevance" target="_blank">[s2]</a></td><td></td><td>109df0e8e5969ddf01e073143e83599228a1163f</td></tr><tr><td>names_and_faces_news</td><td>News Dataset</td><td>Names and Faces</td><td>Names and faces in the news</td><td><a href="http://www.cs.utexas.edu/~grauman/courses/spring2007/395T/papers/berg_names_and_faces.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=names and faces&sort=relevance" target="_blank">[s2]</a></td><td></td><td>2fda164863a06a92d3a910b96eef927269aeb730</td></tr><tr><td>nova_emotions</td><td>Novaemötions Dataset</td><td>Competitive affective gamming: Winning with a smile</td><td>Competitive affective gaming: winning with a smile</td><td><a href="http://doi.acm.org/10.1145/2502081.2502115" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=competitive affective gamming: winning with a smile&sort=relevance" target="_blank">[s2]</a></td><td></td><td>7f4040b482d16354d5938c1d1b926b544652bf5b</td></tr><tr><td>pilot_parliament</td><td>PPB</td><td>Gender Shades: Intersectional Accuracy Disparities in Commercial Gender Classication</td><td>Summary of Research on Informant Accuracy in Network Data, 11 and on the Reverse Small World Problem</td><td><a href="http://pdfs.semanticscholar.org/fb82/681ac5d3487bd8e52dbb3d1fa220eeac855e.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=gender shades: intersectional accuracy disparities in commercial gender classication&sort=relevance" target="_blank">[s2]</a></td><td></td><td>fb82681ac5d3487bd8e52dbb3d1fa220eeac855e</td></tr><tr><td>put_face</td><td>Put Face</td><td>The PUT face database</td><td>Labeled Faces in the Wild : A Database for Studying Face Recognition in Unconstrained Environments</td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the put face database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td></tr><tr><td>scface</td><td>SCface</td><td>SCface – surveillance cameras face database</td><td>Large Variability Surveillance Camera Face Database</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=scface – surveillance cameras face database&sort=relevance" target="_blank">[s2]</a></td><td></td><td>f3b84a03985de3890b400b68e2a92c0a00afd9d0</td></tr><tr><td>sdu_vid</td><td>SDU-VID</td><td>Person reidentification by video ranking</td><td>Person Re-identification by Video Ranking</td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person reidentification by video ranking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>98bb029afe2a1239c3fdab517323066f0957b81b</td></tr><tr><td>sheffield</td><td>Sheffield Face</td><td>Face Recognition: From Theory to Applications</td><td>Face Description with Local Binary Patterns: Application to Face Recognition</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/TPAMI.2006.244" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=face recognition: from theory to applications&sort=relevance" target="_blank">[s2]</a></td><td></td><td>3607afdb204de9a5a9300ae98aa4635d9effcda2</td></tr><tr><td>stanford_drone</td><td>Stanford Drone</td><td>Learning Social Etiquette: Human Trajectory Prediction In Crowded Scenes</td><td>Social LSTM: Human Trajectory Prediction in Crowded Spaces</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning social etiquette: human trajectory prediction in crowded scenes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>570f37ed63142312e6ccdf00ecc376341ec72b9f</td></tr><tr><td>stickmen_buffy</td><td>Buffy Stickmen</td><td>Learning to Parse Images of Articulated Objects</td><td>Learning to parse images of articulated bodies</td><td><a href="http://pdfs.semanticscholar.org/9cd7/4c43dbf9be0b9caae4606ee53e6d45850471.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning to parse images of articulated objects&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td></tr><tr><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td>Learning to parse images of articulated bodies</td><td><a href="http://pdfs.semanticscholar.org/9cd7/4c43dbf9be0b9caae4606ee53e6d45850471.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clustered pose and nonlinear appearance models for human pose estimation&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td></tr><tr><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td>Learning to Parse Images of Articulated Objects</td><td>Learning to parse images of articulated bodies</td><td><a href="http://pdfs.semanticscholar.org/9cd7/4c43dbf9be0b9caae4606ee53e6d45850471.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning to parse images of articulated objects&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td></tr><tr><td>sun_attributes</td><td>SUN</td><td>SUN Attribute Database:
Discovering, Annotating, and Recognizing Scene Attributes</td><td>SUN attribute database: Discovering, annotating, and recognizing scene attributes</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2012.6247998" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=sun attribute database:
discovering, annotating, and recognizing scene attributes&sort=relevance" target="_blank">[s2]</a></td><td></td><td>833fa04463d90aab4a9fe2870d480f0b40df446e</td></tr><tr><td>tiny_images</td><td>Tiny Images</td><td>80 million tiny images: a large dataset for non-parametric object and scene recognition</td><td>80 Million Tiny Images: A Large Data Set for Nonparametric Object and Scene Recognition</td><td><a href="http://pdfs.semanticscholar.org/31b5/8ced31f22eab10bd3ee2d9174e7c14c27c01.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=80 million tiny images: a large dataset for non-parametric object and scene recognition&sort=relevance" target="_blank">[s2]</a></td><td></td><td>31b58ced31f22eab10bd3ee2d9174e7c14c27c01</td></tr><tr><td>umd_faces</td><td>UMD</td><td>The Do's and Don'ts for CNN-based Face Verification</td><td>The Do’s and Don’ts for CNN-Based Face Verification</td><td><a href="https://arxiv.org/pdf/1705.07426.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the do's and don'ts for cnn-based face verification&sort=relevance" target="_blank">[s2]</a></td><td></td><td>71b7fc715e2f1bb24c0030af8d7e7b6e7cd128a6</td></tr><tr><td>viper</td><td>VIPeR</td><td>Evaluating Appearance Models for Recognition, Reacquisition, and Tracking</td><td>Evaluating Appearance Models for Recognition , Reacquisition , and Tracking</td><td><a href="http://pdfs.semanticscholar.org/6273/b3491e94ea4dd1ce42b791d77bdc96ee73a8.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=evaluating appearance models for recognition, reacquisition, and tracking&sort=relevance" target="_blank">[s2]</a></td><td></td><td>6273b3491e94ea4dd1ce42b791d77bdc96ee73a8</td></tr><tr><td>who_goes_there</td><td>WGT</td><td>Who Goes There? Approaches to Mapping Facial Appearance Diversity</td><td>Who goes there?: approaches to mapping facial appearance diversity</td><td><a href="http://doi.acm.org/10.1145/2996913.2996997" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=who goes there? approaches to mapping facial appearance diversity&sort=relevance" target="_blank">[s2]</a></td><td></td><td>9b9bf5e623cb8af7407d2d2d857bc3f1b531c182</td></tr><tr><td>wildtrack</td><td>WildTrack</td><td>WILDTRACK: A Multi-camera HD Dataset for Dense Unscripted Pedestrian Detection</td><td>WILDTRACK : A Multi-camera HD Dataset for Dense Unscripted Pedestrian Detection</td><td><a href="https://pdfs.semanticscholar.org/fe1c/ec4e4995b8615855572374ae3efc94949105.pdf" target="_blank">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=wildtrack: a multi-camera hd dataset for dense unscripted pedestrian detection&sort=relevance" target="_blank">[s2]</a></td><td></td><td>77c81c13a110a341c140995bedb98101b9e84f7f</td></tr></table></body></html>
|