summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authoradamhrv <adam@ahprojects.com>2019-02-09 14:02:22 +0100
committeradamhrv <adam@ahprojects.com>2019-02-09 14:02:22 +0100
commit31305e9d9de9b7624cb9b2dfb462a3e68c120798 (patch)
tree51b37499dc21bb600639d9a3e6050a5d01e2dbd1
parentedc5e1542071fdc1a18a2bb1af2c2b5bed8be02a (diff)
parent865be13c0d7e22db4f23f1d4dddc381e7392fe55 (diff)
Merge branch 'master' of github.com:adamhrv/megapixels_dev
-rw-r--r--README.md2
-rw-r--r--client/splash/index.js3
-rw-r--r--client/splash/modal.js19
-rw-r--r--scraper/README.md4
-rw-r--r--scraper/datasets/citation_lookup.csv292
-rw-r--r--scraper/pdf_dump_all.sh20
-rw-r--r--scraper/pdf_dump_first_page.sh8
-rw-r--r--scraper/reports/paper_title_report.html9
-rw-r--r--scraper/reports/paper_title_report_no_location.html9
-rw-r--r--scraper/reports/paper_title_report_nonmatching.html9
-rw-r--r--scraper/s2-fetch-pdf.py2
-rw-r--r--scraper/s2-final-report.py202
-rw-r--r--scraper/s2-papers.py88
-rw-r--r--scraper/s2-pdf-first-pages.py2
-rw-r--r--scraper/s2-search.py18
-rw-r--r--scraper/util.py47
-rw-r--r--site/assets/css/css.css2
-rw-r--r--site/assets/css/splash.css63
-rw-r--r--site/assets/demo/splash/index.html2
-rw-r--r--site/datasets/final/10k_US_adult_faces.json1
-rw-r--r--site/datasets/final/feret.json1
-rw-r--r--site/datasets/final/ijb_c.json1
-rw-r--r--site/datasets/final/images_of_groups.json1
-rw-r--r--site/datasets/final/imdb_wiki.json1
-rw-r--r--site/datasets/final/lfw.json1
-rw-r--r--site/datasets/final/megaface.json1
-rw-r--r--site/datasets/final/umd_faces.json1
-rw-r--r--site/datasets/final/vgg_faces2.json1
28 files changed, 742 insertions, 68 deletions
diff --git a/README.md b/README.md
index 0348562f..e1a2c1d0 100644
--- a/README.md
+++ b/README.md
@@ -54,7 +54,7 @@ python cli_site.py build
## Running the site
-On OSX, you must run `pythonw` to use matplotlib.
+On OSX, you must run the server with `pythonw` because of matplotlib.
```
python cli_flask.py run
diff --git a/client/splash/index.js b/client/splash/index.js
index 322ed0ff..e247b7f5 100644
--- a/client/splash/index.js
+++ b/client/splash/index.js
@@ -30,8 +30,9 @@ function build() {
}
function bind() {
+ document.querySelector('.slogan').addEventListener('click', modal.close)
toArray(document.querySelectorAll('.aboutLink')).forEach(el => {
- el.addEventListener('click', modal.open)
+ el.addEventListener('click', modal.toggle)
})
document.querySelector('.about .inner').addEventListener('click', e => e.stopPropagation())
document.querySelector('.about').addEventListener('click', modal.close)
diff --git a/client/splash/modal.js b/client/splash/modal.js
index d5a63d75..47d26c06 100644
--- a/client/splash/modal.js
+++ b/client/splash/modal.js
@@ -1,10 +1,25 @@
-export function open() {
+let isOpen = false
+
+export function toggle(e) {
+ if (isOpen) close(e)
+ else open(e)
+}
+
+export function open(e) {
+ if (e) e.preventDefault()
+ if (isOpen) return
const el = document.querySelector('.about')
+ document.body.classList.add('modalOpen')
el.classList.add('open')
+ isOpen = true
}
-export function close() {
+export function close(e) {
+ if (e) e.preventDefault()
+ if (!isOpen) return
const el = document.querySelector('.about')
+ document.body.classList.remove('modalOpen')
el.classList.remove('open')
+ isOpen = false
}
diff --git a/scraper/README.md b/scraper/README.md
index 4399abd3..33b2d975 100644
--- a/scraper/README.md
+++ b/scraper/README.md
@@ -42,6 +42,10 @@ We do a two-stage fetch process as only about 66% of their papers are in this da
Loads titles from citations file and queries the S2 search API to get paper IDs, then uses the paper IDs from the search entries to query the S2 papers API to get first-degree citations, authors, etc.
+### s2-papers.py
+
+Of course, searching is not totally accurate, so run the s2-papers.py script to build a report of all the papers, so you can correct any papers that did not resolve. Also reports papers without a location.
+
### s2-dump-ids.py
Dump all the paper IDs and citation IDs from the queried papers.
diff --git a/scraper/datasets/citation_lookup.csv b/scraper/datasets/citation_lookup.csv
new file mode 100644
index 00000000..d48c1025
--- /dev/null
+++ b/scraper/datasets/citation_lookup.csv
@@ -0,0 +1,292 @@
+key,name,title,paper_id
+10k_US_adult_faces,10K US Adult Faces,The intrinsic memorability of face images,8b2dd5c61b23ead5ae5508bb8ce808b5ea266730
+3d_rma,3D-RMA,Automatic 3D Face Authentication,2160788824c4c29ffe213b2cbeb3f52972d73f37
+3dddb_unconstrained,3D Dynamic,A 3D Dynamic Database for Unconstrained Face Recognition,370b5757a5379b15e30d619e4d3fb9e8e13f3256
+3dpes,3DPeS,3DPes: 3D People Dataset for Surveillance and Forensics,2e8d0f1802e50cccfd3c0aabac0d0beab3a7846e
+4dfab,4DFAB,4DFAB: A Large Scale 4D Facial Expression Database for Biometric Applications,9696ad8b164f5e10fcfe23aacf74bd6168aebb15
+50_people_one_question,50 People One Question,Merging Pose Estimates Across Space and Time,5753b2b5e442eaa3be066daa4a2ca8d8a0bb1725
+a_pascal_yahoo,aPascal,Describing Objects by their Attributes,2e384f057211426ac5922f1b33d2aa8df5d51f57
+adience,Adience,Age and Gender Estimation of Unfiltered Faces,1be498d4bbc30c3bfd0029114c784bc2114d67c0
+afad,AFAD,Ordinal Regression with a Multiple Output CNN for Age Estimation,6618cff7f2ed440a0d2fa9e74ad5469df5cdbe4c
+afew_va,AFEW-VA,AFEW-VA database for valence and arousal estimation in-the-wild,b1f4423c227fa37b9680787be38857069247a307
+afew_va,AFEW-VA,"Collecting Large, Richly Annotated Facial-Expression Databases from Movies",b1f4423c227fa37b9680787be38857069247a307
+affectnet,AffectNet,"AffectNet: A New Database for Facial Expression, Valence, and Arousal Computation in the Wild",f152b6ee251cca940dd853c54e6a7b78fbc6b235
+aflw,AFLW,"Annotated Facial Landmarks in the Wild: A Large-scale, Real-world Database for Facial Landmark Localization",a74251efa970b92925b89eeef50a5e37d9281ad0
+afw,AFW,"Face detection, pose estimation and landmark localization in the wild",0e986f51fe45b00633de9fd0c94d082d2be51406
+agedb,AgeDB,"AgeDB: the first manually collected, in-the-wild age database",6dcf418c778f528b5792104760f1fbfe90c6dd6a
+alert_airport,ALERT Airport,"A Systematic Evaluation and Benchmark for Person Re-Identification: Features, Metrics, and Datasets",6403117f9c005ae81f1e8e6d1302f4a045e3d99d
+am_fed,AM-FED,Affectiva MIT Facial Expression Dataset (AM-FED): Naturalistic and Spontaneous Facial Expressions Collected “In the Wild”,47aeb3b82f54b5ae8142b4bdda7b614433e69b9a
+apis,APiS1.0,Pedestrian Attribute Classification in Surveillance: Database and Evaluation,488e475eeb3bb39a145f23ede197cd3620f1d98a
+ar_facedb,AR Face,The AR Face Database,370b5757a5379b15e30d619e4d3fb9e8e13f3256
+awe_ears,AWE Ears,Ear Recognition: More Than a Survey,84fe5b4ac805af63206012d29523a1e033bc827e
+b3d_ac,B3D(AC),A 3-D Audio-Visual Corpus of Affective Communication,d08cc366a4a0192a01e9a7495af1eb5d9f9e73ae
+bbc_pose,BBC Pose,Automatic and Efficient Human Pose Estimation for Sign Language Videos,213a579af9e4f57f071b884aa872651372b661fd
+berkeley_pose,BPAD,Describing People: A Poselet-Based Approach to Attribute Classification,7808937b46acad36e43c30ae4e9f3fd57462853d
+bfm,BFM,A 3D Face Model for Pose and Illumination Invariant Face Recognition,639937b3a1b8bded3f7e9a40e85bd3770016cf3c
+bio_id,BioID Face,Robust Face Detection Using the Hausdorff Distance,4053e3423fb70ad9140ca89351df49675197196a
+bjut_3d,BJUT-3D,The BJUT-3D Large-Scale Chinese Face Database,1ed1a49534ad8dd00f81939449f6389cfbc25321
+bosphorus,The Bosphorus,Bosphorus Database for 3D Face Analysis,2acf7e58f0a526b957be2099c10aab693f795973
+bp4d_plus,BP4D+,Multimodal Spontaneous Emotion Corpus for Human Behavior Analysis,53ae38a6bb2b21b42bac4f0c4c8ed1f9fa02f9d4
+bp4d_spontanous,BP4D-Spontanous,A high resolution spontaneous 3D dynamic facial expression database,b91f54e1581fbbf60392364323d00a0cd43e493c
+brainwash,Brainwash,Brainwash dataset,214c966d1f9c2a4b66f4535d9a0d4078e63a5867
+bu_3dfe,BU-3DFE,A 3D Facial Expression Database For Facial Behavior Research,cc589c499dcf323fe4a143bbef0074c3e31f9b60
+buhmap_db,BUHMAP-DB ,Facial Feature Tracking and Expression Recognition for Sign Language,014b8df0180f33b9fea98f34ae611c6447d761d2
+cafe,CAFE,The Child Affective Facial Expression (CAFE) Set: Validity and reliability from untrained adults,20388099cc415c772926e47bcbbe554e133343d1
+caltech_10k_web_faces,Caltech 10K Web Faces,Pruning Training Sets for Learning of Object Categories,636b8ffc09b1b23ff714ac8350bb35635e49fa3c
+caltech_pedestrians,Caltech Pedestrians,Pedestrian Detection: A Benchmark,f72f6a45ee240cc99296a287ff725aaa7e7ebb35
+caltech_pedestrians,Caltech Pedestrians,Pedestrian Detection: An Evaluation of the State of the Art,f72f6a45ee240cc99296a287ff725aaa7e7ebb35
+camel,CAMEL,CAMEL Dataset for Visual and Thermal Infrared Multiple Object Detection and Tracking,5801690199c1917fa58c35c3dead177c0b8f9f2d
+cas_peal,CAS-PEAL,The CAS-PEAL Large-Scale Chinese Face Database and Baseline Evaluations,2485c98aa44131d1a2f7d1355b1e372f2bb148ad
+casablanca,Casablanca,Context-aware {CNNs} for person head detection,0ceda9dae8b9f322df65ca2ef02caca9758aec6f
+casia_webface,CASIA Webface,Learning Face Representation from Scratch,853bd61bc48a431b9b1c7cab10c603830c488e39
+celeba,CelebA,Deep Learning Face Attributes in the Wild,6424b69f3ff4d35249c0bb7ef912fbc2c86f4ff4
+celeba_plus,CelebFaces+,"Deep Learning Face Representation from Predicting 10,000 Classes",69a68f9cf874c69e2232f47808016c2736b90c35
+cfd,CFD,The Chicago face database: A free stimulus set of faces and norming data,4df3143922bcdf7db78eb91e6b5359d6ada004d2
+chalearn,ChaLearn,ChaLearn Looking at People: A Review of Events and Resources,8d5998cd984e7cce307da7d46f155f9db99c6590
+chokepoint,ChokePoint,Patch-based Probabilistic Image Quality Assessment for Face Selection and Improved Video-based Face Recognition,0486214fb58ee9a04edfe7d6a74c6d0f661a7668
+cityscapes,Cityscapes,The Cityscapes Dataset for Semantic Urban Scene Understanding,32cde90437ab5a70cf003ea36f66f2de0e24b3ab
+cityscapes,Cityscapes,The Cityscapes Dataset,32cde90437ab5a70cf003ea36f66f2de0e24b3ab
+clothing_co_parsing,CCP,Clothing Co-Parsing by Joint Image Segmentation and Labeling,2bf8541199728262f78d4dced6fb91479b39b738
+cmdp,CMDP,Distance Estimation of an Unknown Person from a Portrait,56ae6d94fc6097ec4ca861f0daa87941d1c10b70
+cmu_pie,CMU PIE,"The CMU Pose, Illumination, and Expression Database",4d423acc78273b75134e2afd1777ba6d3a398973
+coco,COCO,Microsoft COCO: Common Objects in Context,696ca58d93f6404fea0fc75c62d1d7b378f47628
+coco_action,COCO-a,Describing Common Human Visual Actions in Images,4946ba10a4d5a7d0a38372f23e6622bd347ae273
+coco_qa,COCO QA,Exploring Models and Data for Image Question Answering,35b0331dfcd2897abd5749b49ff5e2b8ba0f7a62
+cofw,COFW,Robust face landmark estimation under occlusion,2724ba85ec4a66de18da33925e537f3902f21249
+cohn_kanade,CK,Comprehensive Database for Facial Expression Analysis,23fc83c8cfff14a16df7ca497661264fc54ed746
+cohn_kanade_plus,CK+,The Extended Cohn-Kanade Dataset (CK+): A complete dataset for action unit and emotion-specified expression,4d9a02d080636e9666c4d1cc438b9893391ec6c7
+columbia_gaze,Columbia Gaze,Gaze Locking: Passive Eye Contact Detection for Human–Object Interaction,c34532fe6bfbd1e6df477c9ffdbb043b77e7804d
+complex_activities,Ongoing Complex Activities,Recognition of Ongoing Complex Activities by Sequence Prediction over a Hierarchical Label Space,65355cbb581a219bd7461d48b3afd115263ea760
+cuhk01,CUHK01,Human Reidentification with Transferred Metric Learning,44484d2866f222bbb9b6b0870890f9eea1ffb2d0
+cuhk02,CUHK02,Locally Aligned Feature Transforms across Views,38b55d95189c5e69cf4ab45098a48fba407609b4
+cuhk03,CUHK03,DeepReID: Deep Filter Pairing Neural Network for Person Re-identification,6bd36e9fd0ef20a3074e1430a6cc601e6d407fc3
+cvc_01_barcelona,CVC-01,Adaptive Image Sampling and Windows Classification for On-board Pedestrian Detection,57fe081950f21ca03b5b375ae3e84b399c015861
+czech_news_agency,UFI,Unconstrained Facial Images: Database for Face Recognition under Real-world Conditions,4b4106614c1d553365bad75d7866bff0de6056ed
+d3dfacs,D3DFACS,A FACS Valid 3D Dynamic Action Unit database with Applications to 3D Dynamic Morphable Facial Modelling,070de852bc6eb275d7ca3a9cdde8f6be8795d1a3
+dartmouth_children,Dartmouth Children,The Dartmouth Database of Children's Faces: Acquisition and validation of a new face stimulus set,4e6ee936eb50dd032f7138702fa39b7c18ee8907
+data_61,Data61 Pedestrian,A Multi-Modal Graphical Model for Scene Analysis,563c940054e4b456661762c1ab858e6f730c3159
+deep_fashion,DeepFashion,DeepFashion: Powering Robust Clothes Recognition and Retrieval with Rich Annotations,4fefd1bc8dc4e0ab37ee3324ddfa43ad9d6a04a7
+deep_fashion,DeepFashion,Fashion Landmark Detection in the Wild,4fefd1bc8dc4e0ab37ee3324ddfa43ad9d6a04a7
+disfa,DISFA,DISFA: A Spontaneous Facial Action Intensity Database,a5acda0e8c0937bfed013e6382da127103e41395
+distance_nighttime,Long Distance Heterogeneous Face,Nighttime Face Recognition at Long Distance: Cross-distance and Cross-spectral Matching,4156b7e88f2e0ab0a7c095b9bab199ae2b23bd06
+duke_mtmc,Duke MTMC,"Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking",27a2fad58dd8727e280f97036e0d2bc55ef5424c
+emotio_net,EmotioNet Database,"EmotioNet: An Accurate, Real-Time Algorithm for the Automatic Annotation of a Million Facial Expressions in the Wild",c900e0ad4c95948baaf0acd8449fde26f9b4952a
+eth_andreas_ess,ETHZ Pedestrian,Depth and Appearance for Mobile Scene Analysis,13f06b08f371ba8b5d31c3e288b4deb61335b462
+europersons,EuroCity Persons,The EuroCity Persons Dataset: A Novel Benchmark for Object Detection,f0e17f27f029db4ad650ff278fe3c10ecb6cb0c4
+expw,ExpW,Learning Social Relation Traits from Face Images,22f656d0f8426c84a33a267977f511f127bfd7f3
+expw,ExpW,From Facial Expression Recognition to Interpersonal Relation Prediction,22f656d0f8426c84a33a267977f511f127bfd7f3
+face_research_lab,Face Research Lab London,Face Research Lab London Set. figshare,c6526dd3060d63a6c90e8b7ff340383c4e0e0dd8
+face_scrub,FaceScrub,A data-driven approach to cleaning large face datasets,0d3bb75852098b25d90f31d2f48fd0cb4944702b
+face_tracer,FaceTracer,FaceTracer: A Search Engine for Large Collections of Images with Faces,670637d0303a863c1548d5b19f705860a23e285c
+face_tracer,FaceTracer,Face Swapping: Automatically Replacing Faces in Photographs,670637d0303a863c1548d5b19f705860a23e285c
+facebook_100,Facebook100,Scaling Up Biologically-Inspired Computer Vision: A Case Study in Unconstrained Face Recognition on Facebook,9c23859ec7313f2e756a3e85575735e0c52249f4
+faceplace,Face Place,Recognizing disguised faces,25474c21613607f6bb7687a281d5f9d4ffa1f9f3
+families_in_the_wild,FIW,Visual Kinship Recognition of Families in the Wild,dd65f71dac86e36eecbd3ed225d016c3336b4a13
+fddb,FDDB,FDDB: A Benchmark for Face Detection in Unconstrained Settings,75da1df4ed319926c544eefe17ec8d720feef8c0
+fei,FEI,Captura e Alinhamento de Imagens: Um Banco de Faces Brasileiro,b6b1b0632eb9d4ab1427278f5e5c46f97753c73d
+feret,FERET,The FERET Verification Testing Protocol for Face Recognition Algorithms,0c4a139bb87c6743c7905b29a3cfec27a5130652
+feret,FERET,The FERET database and evaluation procedure for face-recognition algorithms,dc8b25e35a3acb812beb499844734081722319b4
+feret,FERET,FERET ( Face Recognition Technology ) Recognition Algorithm Development and Test Results,31de9b3dd6106ce6eec9a35991b2b9083395fd0b
+feret,FERET,The FERET Evaluation Methodology for Face-Recognition Algorithms,0f0fcf041559703998abf310e56f8a2f90ee6f21
+ferplus,FER+,Training Deep Networks for Facial Expression Recognition with Crowd-Sourced Label Distribution,298cbc3dfbbb3a20af4eed97906650a4ea1c29e0
+fia,CMU FiA,The CMU Face In Action (FIA) Database,47662d1a368daf70ba70ef2d59eb6209f98b675d
+fiw_300,300-W,300 faces In-the-wild challenge: Database and results,013909077ad843eb6df7a3e8e290cfd5575999d2
+fiw_300,300-W,300 Faces in-the-Wild Challenge: The first facial landmark localization Challenge,013909077ad843eb6df7a3e8e290cfd5575999d2
+fiw_300,300-W,A semi-automatic methodology for facial landmark annotation,013909077ad843eb6df7a3e8e290cfd5575999d2
+frav3d,FRAV3D,"MULTIMODAL 2D, 2.5D & 3D FACE VERIFICATION",2f5d44dc3e1b5955942133ff872ebd31716ec604
+frgc,FRGC,Overview of the Face Recognition Grand Challenge,18ae7c9a4bbc832b8b14bc4122070d7939f5e00e
+gallagher,Gallagher,Clothing Cosegmentation for Recognizing People,6dbe8e5121c534339d6e41f8683e85f87e6abf81
+gavab_db,Gavab,GavabDB: a 3D face database,42505464808dfb446f521fc6ff2cfeffd4d68ff1
+geofaces,GeoFaces,GeoFaceExplorer: Exploring the Geo-Dependence of Facial Attributes,17b46e2dad927836c689d6787ddb3387c6159ece
+georgia_tech_face_database,Georgia Tech Face,Maximum likelihood training of the embedded HMM for face detection and recognition,3dc3f0b64ef80f573e3a5f96e456e52ee980b877
+graz,Graz Pedestrian,Generic Object Recognition with Boosting,12ad3b5bbbf407f8e54ea692c07633d1a867c566
+graz,Graz Pedestrian,Weak Hypotheses and Boosting for Generic Object Detection and Recognition,12ad3b5bbbf407f8e54ea692c07633d1a867c566
+graz,Graz Pedestrian,Object Recognition Using Segmentation for Feature Detection,12ad3b5bbbf407f8e54ea692c07633d1a867c566
+h3d,H3D,Poselets: Body Part Detectors Trained Using 3D Human Pose Annotations,2830fb5282de23d7784b4b4bc37065d27839a412
+hda_plus,HDA+,The HDA+ data set for research on fully automated re-identification systems,bd88bb2e4f351352d88ee7375af834360e223498
+hda_plus,HDA+,A Multi-camera video data set for research on High-Definition surveillance,bd88bb2e4f351352d88ee7375af834360e223498
+helen,Helen,Interactive Facial Feature Localization,95f12d27c3b4914e0668a268360948bce92f7db3
+hi4d_adsip,Hi4D-ADSIP,Hi4D-ADSIP 3-D dynamic facial articulation database,24830e3979d4ed01b9fd0feebf4a8fd22e0c35fd
+hipsterwars,Hipsterwars,Hipster Wars: Discovering Elements of Fashion Styles,04c2cda00e5536f4b1508cbd80041e9552880e67
+hollywood_headset,HollywoodHeads,Context-aware CNNs for person head detection,0ceda9dae8b9f322df65ca2ef02caca9758aec6f
+hrt_transgender,HRT Transgender,Is the Eye Region More Reliable Than the Face? A Preliminary Study of Face-based Recognition on a Transgender Dataset,28312c3a47c1be3a67365700744d3d6665b86f22
+hrt_transgender,HRT Transgender,Investigating the Periocular-Based Face Recognition Across Gender Transformation,28312c3a47c1be3a67365700744d3d6665b86f22
+hrt_transgender,HRT Transgender,Face recognition across gender transformation using SVM Classifier,28312c3a47c1be3a67365700744d3d6665b86f22
+ifad,IFAD,Indian Face Age Database: A Database for Face Recognition with Age Variation,55c40cbcf49a0225e72d911d762c27bb1c2d14aa
+ifdb,IFDB,"Iranian Face Database with age, pose and expression",066d71fcd997033dce4ca58df924397dfe0b5fd1
+ifdb,IFDB,Iranian Face Database and Evaluation with a New Detection Algorithm,066d71fcd997033dce4ca58df924397dfe0b5fd1
+iit_dehli_ear,IIT Dehli Ear,Automated human identification using ear imaging,faf40ce28857aedf183e193486f5b4b0a8c478a2
+ijb_a,IJB-A,Pushing the Frontiers of Unconstrained Face Detection and Recognition: IARPA Janus Benchmark A,140c95e53c619eac594d70f6369f518adfea12ef
+ijb_b,IJB-B,IARPA Janus Benchmark-B Face Dataset,0cb2dd5f178e3a297a0c33068961018659d0f443
+ijb_c,IJB-C,IARPA Janus Benchmark C,57178b36c21fd7f4529ac6748614bb3374714e91
+ilids_mcts,,"Imagery Library for Intelligent Detection Systems:
+The i-LIDS User Guide",0297448f3ed948e136bb06ceff10eccb34e5bb77
+ilids_vid_reid,iLIDS-VID,Person Re-Identi cation by Video Ranking,99eb4cea0d9bc9fe777a5c5172f8638a37a7f262
+images_of_groups,Images of Groups,Understanding Groups of Images of People,21d9d0deed16f0ad62a4865e9acf0686f4f15492
+imdb_wiki,IMDB,Deep expectation of real and apparent age from a single image without facial landmarks,10195a163ab6348eef37213a46f60a3d87f289c5
+imdb_wiki,IMDB,DEX: Deep EXpectation of apparent age from a single image,8355d095d3534ef511a9af68a3b2893339e3f96b
+imfdb,IMFDB,Indian Movie Face Database: A Benchmark for Face Recognition Under Wide Variations,ca3e88d87e1344d076c964ea89d91a75c417f5ee
+imm_face,IMM Face Dataset,The IMM Face Database - An Annotated Dataset of 240 Face Images,a74251efa970b92925b89eeef50a5e37d9281ad0
+immediacy,Immediacy,Multi-task Recurrent Neural Network for Immediacy Prediction,1e3df3ca8feab0b36fd293fe689f93bb2aaac591
+imsitu,imSitu,Situation Recognition: Visual Semantic Role Labeling for Image Understanding,51eba481dac6b229a7490f650dff7b17ce05df73
+inria_person,INRIA Pedestrian,Histograms of Oriented Gradients for Human Detection,10d6b12fa07c7c8d6c8c3f42c7f1c061c131d4c5
+jaffe,JAFFE,Coding Facial Expressions with Gabor Wavelets,45c31cde87258414f33412b3b12fc5bec7cb3ba9
+jiku_mobile,Jiku Mobile Video Dataset,The Jiku Mobile Video Dataset,ad62c6e17bc39b4dec20d32f6ac667ae42d2c118
+jpl_pose,JPL-Interaction dataset,First-Person Activity Recognition: What Are They Doing to Me?,1aad2da473888cb7ebc1bfaa15bfa0f1502ce005
+kdef,KDEF,The Karolinska Directed Emotional Faces – KDEF,93884e46c49f7ae1c7c34046fbc28882f2bd6341
+kin_face,UB KinFace,Genealogical Face Recognition based on UB KinFace Database,08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7
+kin_face,UB KinFace,Kinship Verification through Transfer Learning,08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7
+kin_face,UB KinFace,Understanding Kin Relationships in a Photo,08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7
+kinectface,KinectFaceDB,KinectFaceDB: A Kinect Database for Face Recognition,0b440695c822a8e35184fb2f60dcdaa8a6de84ae
+kitti,KITTI,Vision meets Robotics: The KITTI Dataset,35ba4ebfd017a56b51e967105af9ae273c9b0178
+lag,LAG,Large Age-Gap Face Verification by Feature Injection in Deep Networks,0d2dd4fc016cb6a517d8fb43a7cc3ff62964832e
+large_scale_person_search,Large Scale Person Search,End-to-End Deep Learning for Person Search,2161f6b7ee3c0acc81603b01dc0df689683577b9
+leeds_sports_pose,Leeds Sports Pose,Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation,4b1d23d17476fcf78f4cbadf69fb130b1aa627c0
+leeds_sports_pose_extended,Leeds Sports Pose Extended,Learning Effective Human Pose Estimation from Inaccurate Annotation,4e4746094bf60ee83e40d8597a6191e463b57f76
+lfw,LFW,Labeled Faces in the Wild: A Survey,7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22
+lfw,LFW,Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments,370b5757a5379b15e30d619e4d3fb9e8e13f3256
+lfw,LFW,Labeled Faces in the Wild: Updates and New Reporting Procedures,2d3482dcff69c7417c7b933f22de606a0e8e42d4
+lfw_a,LFW-a,"Effective Unconstrained Face Recognition by
+ Combining Multiple Descriptors and Learned
+ Background Statistics",133f01aec1534604d184d56de866a4bd531dac87
+lfw_p,LFWP,Localizing Parts of Faces Using a Consensus of Exemplars,140438a77a771a8fb656b39a78ff488066eb6b50
+m2vts,m2vts,The M2VTS Multimodal Face Database (Release 1.00),2485c98aa44131d1a2f7d1355b1e372f2bb148ad
+m2vtsdb_extended,xm2vtsdb,XM2VTSDB: The Extended M2VTS Database,370b5757a5379b15e30d619e4d3fb9e8e13f3256
+mafl,MAFL,Facial Landmark Detection by Deep Multi-task Learning,a0fd85b3400c7b3e11122f44dc5870ae2de9009a
+mafl,MAFL,Learning Deep Representation for Face Alignment with Auxiliary Attributes,a0fd85b3400c7b3e11122f44dc5870ae2de9009a
+malf,MALF,Fine-grained Evaluation on Face Detection in the Wild.,45e616093a92e5f1e61a7c6037d5f637aa8964af
+mapillary,Mapillary,The Mapillary Vistas Dataset for Semantic Understanding of Street Scenes,79828e6e9f137a583082b8b5a9dfce0c301989b8
+market_1501,Market 1501,Scalable Person Re-identification: A Benchmark,4308bd8c28e37e2ed9a3fcfe74d5436cce34b410
+market1203,Market 1203,Orientation Driven Bag of Appearances for Person Re-identification,a7fe834a0af614ce6b50dc093132b031dd9a856b
+mars,MARS,MARS: A Video Benchmark for Large-Scale Person Re-identification,c0387e788a52f10bf35d4d50659cfa515d89fbec
+mcgill,McGill Real World,Hierarchical Temporal Graphical Model for Head Pose Estimation and Subsequent Attribute Classification in Real-World Videos,c570d1247e337f91e555c3be0e8c8a5aba539d9f
+mcgill,McGill Real World,Robust Semi-automatic Head Pose Labeling for Real-World Face Video Sequences,c570d1247e337f91e555c3be0e8c8a5aba539d9f
+megaage,MegaAge,Quantifying Facial Age by Posterior of Age Comparisons,d80a3d1f3a438e02a6685e66ee908446766fefa9
+megaface,MegaFace,The MegaFace Benchmark: 1 Million Faces for Recognition at Scale,96e0cfcd81cdeb8282e29ef9ec9962b125f379b0
+megaface,MegaFace,Level Playing Field for Million Scale Face Recognition,28d4e027c7e90b51b7d8908fce68128d1964668a
+mifs,MIFS,Spoofing Faces Using Makeup: An Investigative Study,23e824d1dfc33f3780dd18076284f07bd99f1c43
+mit_cbcl,MIT CBCL,Component-based Face Recognition with 3D Morphable Models,079a0a3bf5200994e1f972b1b9197bf2f90e87d4
+miw,MIW,Automatic Facial Makeup Detection with Application in Face Recognition,fcc6fe6007c322641796cb8792718641856a22a7
+mmi_facial_expression,MMI Facial Expression Dataset,WEB-BASED DATABASE FOR FACIAL EXPRESSION ANALYSIS,2a75f34663a60ab1b04a0049ed1d14335129e908
+moments_in_time,Moments in Time,Moments in Time Dataset: one million videos for event understanding,a5a44a32a91474f00a3cda671a802e87c899fbb4
+morph,MORPH Commercial,MORPH: A Longitudinal Image Database of Normal Adult Age-Progression,9055b155cbabdce3b98e16e5ac9c0edf00f9552f
+morph_nc,MORPH Non-Commercial,MORPH: A Longitudinal Image Database of Normal Adult Age-Progression,9055b155cbabdce3b98e16e5ac9c0edf00f9552f
+mot,MOT,Evaluating Multiple Object Tracking Performance: The CLEAR MOT Metrics,5981e6479c3fd4e31644db35d236bfb84ae46514
+mot,MOT,"Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking",5981e6479c3fd4e31644db35d236bfb84ae46514
+mot,MOT,Learning to associate: HybridBoosted multi-target tracker for crowded scene,5981e6479c3fd4e31644db35d236bfb84ae46514
+mpi_large,Large MPI Facial Expression,The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions,ea050801199f98a1c7c1df6769f23f658299a3ae
+mpi_small,Small MPI Facial Expression,The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions,ea050801199f98a1c7c1df6769f23f658299a3ae
+mpii_gaze,MPIIGaze,Appearance-based Gaze Estimation in the Wild,0df0d1adea39a5bef318b74faa37de7f3e00b452
+mpii_human_pose,MPII Human Pose,2D Human Pose Estimation: New Benchmark and State of the Art Analysis,3325860c0c82a93b2eac654f5324dd6a776f609e
+mr2,MR2,The MR2: A multi-racial mega-resolution database of facial stimuli,578d4ad74818086bb64f182f72e2c8bd31e3d426
+mrp_drone,MRP Drone,Investigating Open-World Person Re-identification Using a Drone,ad01687649d95cd5b56d7399a9603c4b8e2217d7
+msceleb,MsCeleb,MS-Celeb-1M: A Dataset and Benchmark for Large-Scale Face Recognition,291265db88023e92bb8c8e6390438e5da148e8f5
+msmt_17,MSMT17,Person Transfer GAN to Bridge Domain Gap for Person Re-Identification,ec792ad2433b6579f2566c932ee414111e194537
+mtfl,MTFL,Facial Landmark Detection by Deep Multi-task Learning,a0fd85b3400c7b3e11122f44dc5870ae2de9009a
+mtfl,MTFL,Learning Deep Representation for Face Alignment with Auxiliary Attributes,a0fd85b3400c7b3e11122f44dc5870ae2de9009a
+muct,MUCT,The MUCT Landmarked Face Database,a74251efa970b92925b89eeef50a5e37d9281ad0
+mug_faces,MUG Faces,The MUG Facial Expression Database,f1af714b92372c8e606485a3982eab2f16772ad8
+multi_pie,MULTIPIE,Multi-PIE,109df0e8e5969ddf01e073143e83599228a1163f
+names_and_faces_news,News Dataset,Names and Faces,2fda164863a06a92d3a910b96eef927269aeb730
+nd_2006,ND-2006,Using a Multi-Instance Enrollment Representation to Improve 3D Face Recognition,fd8168f1c50de85bac58a8d328df0a50248b16ae
+nova_emotions,Novaemötions Dataset,Crowdsourcing facial expressions for affective-interaction,7f4040b482d16354d5938c1d1b926b544652bf5b
+nova_emotions,Novaemötions Dataset,Competitive affective gamming: Winning with a smile,7f4040b482d16354d5938c1d1b926b544652bf5b
+nudedetection,Nude Detection,A Bag-of-Features Approach based on Hue-SIFT Descriptor for Nude Detection,7ace44190729927e5cb0dd5d363fcae966fe13f7
+orl,ORL,Parameterisation of a Stochastic Model for Human Face Identification,55206f0b5f57ce17358999145506cd01e570358c
+penn_fudan,Penn Fudan,Object Detection Combining Recognition and Segmentation,3394168ff0719b03ff65bcea35336a76b21fe5e4
+peta,PETA,Pedestrian Attribute Recognition At Far Distance,2a4bbee0b4cf52d5aadbbc662164f7efba89566c
+pets,PETS 2017,PETS 2017: Dataset and Challenge,22909dd19a0ec3b6065334cb5be5392cb24d839d
+pilot_parliament,PPB,Gender Shades: Intersectional Accuracy Disparities in Commercial Gender Classi cation,fb82681ac5d3487bd8e52dbb3d1fa220eeac855e
+pipa,PIPA,Beyond Frontal Faces: Improving Person Recognition Using Multiple Cues,0a85bdff552615643dd74646ac881862a7c7072d
+pku,PKU,Swiss-System Based Cascade Ranking for Gait-based Person Re-identification,f6c8d5e35d7e4d60a0104f233ac1a3ab757da53f
+pku_reid,PKU-Reid,Orientation driven bag of appearances for person re-identification,a7fe834a0af614ce6b50dc093132b031dd9a856b
+pornodb,Pornography DB,Pooling in Image Representation: the Visual Codeword Point of View,b92a1ed9622b8268ae3ac9090e25789fc41cc9b8
+precarious,Precarious,Expecting the Unexpected: Training Detectors for Unusual Pedestrians With Adversarial Imposters,9e5378e7b336c89735d3bb15cf67eff96f86d39a
+prid,PRID,Person Re-Identification by Descriptive and Discriminative Classification,16c7c31a7553d99f1837fc6e88e77b5ccbb346b8
+prw,PRW,Person Re-identification in the Wild,0b84f07af44f964817675ad961def8a51406dd2e
+psu,PSU,Vision-based Analysis of Small Groups in Pedestrian Crowds,066000d44d6691d27202896691f08b27117918b9
+pubfig,PubFig,Attribute and Simile Classifiers for Face Verification,759a3b3821d9f0e08e0b0a62c8b693230afc3f8d
+pubfig_83,pubfig83,Scaling Up Biologically-Inspired Computer Vision: A Case Study in Unconstrained Face Recognition on Facebook,9c23859ec7313f2e756a3e85575735e0c52249f4
+put_face,Put Face,The PUT face database,370b5757a5379b15e30d619e4d3fb9e8e13f3256
+qmul_grid,GRID,Multi-Camera Activity Correlation Analysis,2edb87494278ad11641b6cf7a3f8996de12b8e14
+qmul_grid,GRID,Time-delayed correlation analysis for multi-camera activity understanding,2edb87494278ad11641b6cf7a3f8996de12b8e14
+qmul_surv_face,QMUL-SurvFace,Surveillance Face Recognition Challenge,c866a2afc871910e3282fd9498dce4ab20f6a332
+rafd,RaFD,Presentation and validation of the Radboud Faces Database,3765df816dc5a061bc261e190acc8bdd9d47bec0
+raid,RAiD,Consistent Re-identification in a Camera Network,09d78009687bec46e70efcf39d4612822e61cb8c
+rap_pedestrian,RAP,A Richly Annotated Dataset for Pedestrian Attribute Recognition,221c18238b829c12b911706947ab38fd017acef7
+reseed,ReSEED,ReSEED: Social Event dEtection Dataset,54983972aafc8e149259d913524581357b0f91c3
+saivt,SAIVT SoftBio,A Database for Person Re-Identification in Multi-Camera Surveillance Networks,22646e00a7ba34d1b5fbe3b1efcd91a1e1be3c2b
+sarc3d,Sarc3D,SARC3D: a new 3D body model for People Tracking and Re-identification,e27ef52c641c2b5100a1b34fd0b819e84a31b4df
+scface,SCface,SCface – surveillance cameras face database,f3b84a03985de3890b400b68e2a92c0a00afd9d0
+scut_fbp,SCUT-FBP,SCUT-FBP: A Benchmark Dataset for Facial Beauty Perception,bd26dabab576adb6af30484183c9c9c8379bf2e0
+scut_head,SCUT HEAD,Detecting Heads using Feature Refine Net and Cascaded Multi-scale Architecture,dfdcd8c7c91813ba1624c9a21d2d01ef06a49afd
+sdu_vid,SDU-VID,A Spatio-Temporal Appearance Representation for Video-Based Pedestrian Re-Identification,98bb029afe2a1239c3fdab517323066f0957b81b
+sdu_vid,SDU-VID,Local descriptors encoded by Fisher vectors for person re-identification,98bb029afe2a1239c3fdab517323066f0957b81b
+sdu_vid,SDU-VID,Person reidentification by video ranking,98bb029afe2a1239c3fdab517323066f0957b81b
+sheffield,Sheffield Face,Face Recognition: From Theory to Applications,3607afdb204de9a5a9300ae98aa4635d9effcda2
+social_relation,Social Relation,From Facial Expression Recognition to Interpersonal Relation Prediction,2a171f8d14b6b8735001a11c217af9587d095848
+social_relation,Social Relation,Learning Social Relation Traits from Face Images,2a171f8d14b6b8735001a11c217af9587d095848
+soton,SOTON HiD,On a Large Sequence-Based Human Gait Database,4f93cd09785c6e77bf4bc5a788e079df524c8d21
+sports_videos_in_the_wild,SVW,Sports Videos in the Wild (SVW): A Video Dataset for Sports Analysis,1a40092b493c6b8840257ab7f96051d1a4dbfeb2
+stair_actions,STAIR Action,STAIR Actions: A Video Dataset of Everyday Home Actions,d3f5a1848b0028d8ab51d0b0673732cad2e3c8c9
+stanford_drone,Stanford Drone,Learning Social Etiquette: Human Trajectory Prediction In Crowded Scenes,c9bda86e23cab9e4f15ea0c4cbb6cc02b9dfb709
+stickmen_buffy,Buffy Stickmen,Learning to Parse Images of Articulated Objects,4b1d23d17476fcf78f4cbadf69fb130b1aa627c0
+stickmen_buffy,Buffy Stickmen,Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation,4b1d23d17476fcf78f4cbadf69fb130b1aa627c0
+stickmen_family,We Are Family Stickmen,We Are Family: Joint Pose Estimation of Multiple Persons,0dc11a37cadda92886c56a6fb5191ded62099c28
+stickmen_pascal,Stickmen PASCAL,Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation,6dd0597f8513dc100cd0bc1b493768cde45098a9
+stickmen_pascal,Stickmen PASCAL,Learning to Parse Images of Articulated Objects,6dd0597f8513dc100cd0bc1b493768cde45098a9
+sun_attributes,SUN,The SUN Attribute Database: Beyond Categories for Deeper Scene Understanding,833fa04463d90aab4a9fe2870d480f0b40df446e
+sun_attributes,SUN,"SUN Attribute Database:
+Discovering, Annotating, and Recognizing Scene Attributes",833fa04463d90aab4a9fe2870d480f0b40df446e
+svs,SVS,Pedestrian Attribute Classification in Surveillance: Database and Evaluation,488e475eeb3bb39a145f23ede197cd3620f1d98a
+texas_3dfrd,Texas 3DFRD,Texas 3D Face Recognition Database,2ce2560cf59db59ce313bbeb004e8ce55c5ce928
+texas_3dfrd,Texas 3DFRD,Anthropometric 3D Face Recognition,2ce2560cf59db59ce313bbeb004e8ce55c5ce928
+tiny_faces,TinyFace,Low-Resolution Face Recognition,8990cdce3f917dad622e43e033db686b354d057c
+tiny_images,Tiny Images,80 million tiny images: a large dataset for non-parametric object and scene recognition,31b58ced31f22eab10bd3ee2d9174e7c14c27c01
+towncenter,TownCenter,Stable Multi-Target Tracking in Real-Time Surveillance Video,9361b784e73e9238d5cefbea5ac40d35d1e3103f
+tud_brussels,TUD-Brussels,Multi-Cue Onboard Pedestrian Detection,6ad5a38df8dd4cdddd74f31996ce096d41219f72
+tud_campus,TUD-Campus,People-Tracking-by-Detection and People-Detection-by-Tracking,3316521a5527c7700af8ae6aef32a79a8b83672c
+tud_crossing,TUD-Crossing,People-Tracking-by-Detection and People-Detection-by-Tracking,3316521a5527c7700af8ae6aef32a79a8b83672c
+tud_motionpairs,TUD-Motionparis,Multi-Cue Onboard Pedestrian Detection,6ad5a38df8dd4cdddd74f31996ce096d41219f72
+tud_multiview,TUD-Multiview,Monocular 3D Pose Estimation and Tracking by Detection,436f798d1a4e54e5947c1e7d7375c31b2bdb4064
+tud_pedestrian,TUD-Pedestrian,People-Tracking-by-Detection and People-Detection-by-Tracking,3316521a5527c7700af8ae6aef32a79a8b83672c
+tud_stadtmitte,TUD-Stadtmitte,Monocular 3D Pose Estimation and Tracking by Detection,436f798d1a4e54e5947c1e7d7375c31b2bdb4064
+tvhi,TVHI,High Five: Recognising human interactions in TV shows,3cd40bfa1ff193a96bde0207e5140a399476466c
+uccs,UCCS,Large scale unconstrained open set face database,07fcbae86f7a3ad3ea1cf95178459ee9eaf77cb1
+ucf_101,UCF101,UCF101: A Dataset of 101 Human Actions Classes From Videos in The Wild,b5f2846a506fc417e7da43f6a7679146d99c5e96
+ucf_crowd,UCF-CC-50,Multi-Source Multi-Scale Counting in Extremely Dense Crowd Images,32c801cb7fbeb742edfd94cccfca4934baec71da
+ucf_selfie,UCF Selfie,How to Take a Good Selfie?,041d3eedf5e45ce5c5229f0181c5c576ed1fafd6
+ufdd,UFDD,Pushing the Limits of Unconstrained Face Detection: a Challenge Dataset and Baseline Results,377f2b65e6a9300448bdccf678cde59449ecd337
+umb,UMB,UMB-DB: A Database of Partially Occluded 3D Faces,16e8b0a1e8451d5f697b94c0c2b32a00abee1d52
+umd_faces,UMD,UMDFaces: An Annotated Face Dataset for Training Deep Networks,31b05f65405534a696a847dd19c621b7b8588263
+umd_faces,UMD,The Do's and Don'ts for CNN-based Face Verification,71b7fc715e2f1bb24c0030af8d7e7b6e7cd128a6
+unbc_shoulder_pain,UNBC-McMaster Pain,PAINFUL DATA: The UNBC-McMaster Shoulder Pain Expression Archive Database,56ffa7d906b08d02d6d5a12c7377a57e24ef3391
+urban_tribes,Urban Tribes,From Bikers to Surfers: Visual Recognition of Urban Tribes,774cbb45968607a027ae4729077734db000a1ec5
+used,USED Social Event Dataset,USED: A Large-scale Social Event Detection Dataset,8627f019882b024aef92e4eb9355c499c733e5b7
+v47,V47,Re-identification of Pedestrians with Variable Occlusion and Scale,922e0a51a3b8c67c4c6ac09a577ff674cbd28b34
+vadana,VADANA,VADANA: A dense dataset for facial image analysis,4563b46d42079242f06567b3f2e2f7a80cb3befe
+vgg_celebs_in_places,CIP,Faces in Places: Compound Query Retrieval,7ebb153704706e457ab57b432793d2b6e5d12592
+vgg_faces,VGG Face,Deep Face Recognition,162ea969d1929ed180cc6de9f0bf116993ff6e06
+vgg_faces2,VGG Face2,VGGFace2: A dataset for recognising faces across pose and age,eb027969f9310e0ae941e2adee2d42cdf07d938c
+violent_flows,Violent Flows,Violent Flows: Real-Time Detection of Violent Crowd Behavior,5194cbd51f9769ab25260446b4fa17204752e799
+viper,VIPeR,"Evaluating Appearance Models for Recognition, Reacquisition, and Tracking",6273b3491e94ea4dd1ce42b791d77bdc96ee73a8
+visual_phrases,Phrasal Recognition,Recognition using Visual Phrases,e8de844fefd54541b71c9823416daa238be65546
+vmu,VMU,Can Facial Cosmetics Affect the Matching Accuracy of Face Recognition Systems?,37d6f0eb074d207b53885bd2eb78ccc8a04be597
+voc,VOC,The PASCAL Visual Object Classes (VOC) Challenge,abe9f3b91fd26fa1b50cd685c0d20debfb372f73
+vqa,VQA,VQA: Visual Question Answering,01959ef569f74c286956024866c1d107099199f7
+ward,WARD,Re-identify people in wide area camera network,6f3c76b7c0bd8e1d122c6ea808a271fd4749c951
+who_goes_there,WGT,Who Goes There? Approaches to Mapping Facial Appearance Diversity,9b9bf5e623cb8af7407d2d2d857bc3f1b531c182
+wider,WIDER,Recognize Complex Events from Static Images by Fusing Deep Channels,356b431d4f7a2a0a38cf971c84568207dcdbf189
+wider_attribute,WIDER Attribute,Human Attribute Recognition by Deep Hierarchical Contexts,44d23df380af207f5ac5b41459c722c87283e1eb
+wider_face,WIDER FACE,WIDER FACE: A Face Detection Benchmark,52d7eb0fbc3522434c13cc247549f74bb9609c5d
+wildtrack,WildTrack,WILDTRACK: A Multi-camera HD Dataset for Dense Unscripted Pedestrian Detection,77c81c13a110a341c140995bedb98101b9e84f7f
+wlfdb,,WLFDB: Weakly Labeled Face Databases,5ad4e9f947c1653c247d418f05dad758a3f9277b
+yale_faces,YaleFaces,Acquiring Linear Subspaces for Face Recognition under Variable Lighting,18c72175ddbb7d5956d180b65a96005c100f6014
+yale_faces,YaleFaces,From Few to Many: Illumination Cone Models for Face Recognition under Variable Lighting and Pose,18c72175ddbb7d5956d180b65a96005c100f6014
+yawdd,YawDD,YawDD: A Yawning Detection Dataset,a94cae786d515d3450d48267e12ca954aab791c4
+yfcc_100m,YFCC100M,YFCC100M: The New Data in Multimedia Research,a6e695ddd07aad719001c0fc1129328452385949
+york_3d,UOY 3D Face Database,Three-Dimensional Face Recognition: An Eigensurface Approach,19d1b811df60f86cbd5e04a094b07f32fff7a32a
+youtube_faces,YouTubeFaces,Face Recognition in Unconstrained Videos with Matched Background Similarity,560e0e58d0059259ddf86fcec1fa7975dee6a868
+youtube_makeup,YMU,Can Facial Cosmetics Affect the Matching Accuracy of Face Recognition Systems?,fcc6fe6007c322641796cb8792718641856a22a7
+youtube_makeup,YMU,Automatic Facial Makeup Detection with Application in Face Recognition,fcc6fe6007c322641796cb8792718641856a22a7
+youtube_poses,YouTube Pose,Personalizing Human Video Pose Estimation,1c2802c2199b6d15ecefe7ba0c39bfe44363de38
diff --git a/scraper/pdf_dump_all.sh b/scraper/pdf_dump_all.sh
new file mode 100644
index 00000000..a17c8d44
--- /dev/null
+++ b/scraper/pdf_dump_all.sh
@@ -0,0 +1,20 @@
+#!/bin/bash
+
+for i in datasets/s2/pdf/*/*/*.pdf
+ do
+ OUTPUT="${i%.*}.txt"
+ OUTPUT="${OUTPUT/pdf/txt}"
+ IMDIR=`dirname ${OUTPUT}`
+ if [[ ! -e $OUTPUT ]]
+ then
+ pdf2txt.py -o "${OUTPUT}" -O "${IMDIR}" "${i}"
+ if [ -s $OUTPUT ]
+ then
+ echo "found $OUTPUT"
+ else
+ echo "rm empty $OUTPUT"
+ rm -f $OUTPUT
+ fi
+ fi
+ done
+
diff --git a/scraper/pdf_dump_first_page.sh b/scraper/pdf_dump_first_page.sh
index 6277d40c..20e5182d 100644
--- a/scraper/pdf_dump_first_page.sh
+++ b/scraper/pdf_dump_first_page.sh
@@ -13,13 +13,5 @@ for i in datasets/s2/pdf/*/*/*.pdf
echo "rm empty $OUTPUT"
rm -f $OUTPUT
fi
- else
- if [ -s $OUTPUT ]
- then
- echo "found $OUTPUT"
- else
- echo "rm empty $OUTPUT"
- rm -f $OUTPUT
- fi
fi
done
diff --git a/scraper/reports/paper_title_report.html b/scraper/reports/paper_title_report.html
new file mode 100644
index 00000000..68c002a9
--- /dev/null
+++ b/scraper/reports/paper_title_report.html
@@ -0,0 +1,9 @@
+<!doctype html><html><head><meta charset='utf-8'><title>Paper Title Sanity Check</title><link rel='stylesheet' href='reports.css'></head><body><h2>Paper Title Sanity Check</h2><table border='1' cellpadding='3' cellspacing='3'><th>key</th><th>name</th><th>our title</th><th>found title</th><th></th><th></th><th>address</th><th>s2 id</th><tr><td>10k_US_adult_faces</td><td>10K US Adult Faces</td><td>The intrinsic memorability of face images</td><td>The intrinsic memorability of face photographs.</td><td><a href="https://pdfs.semanticscholar.org/8b2d/d5c61b23ead5ae5508bb8ce808b5ea266730.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the intrinsic memorability of face images&sort=relevance">[s2]</a></td><td></td><td>8b2dd5c61b23ead5ae5508bb8ce808b5ea266730</td></tr><tr><td>3d_rma</td><td>3D-RMA</td><td>Automatic 3D Face Authentication</td><td>Automatic 3D face authentication</td><td><a href="http://pdfs.semanticscholar.org/2160/788824c4c29ffe213b2cbeb3f52972d73f37.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=automatic 3d face authentication&sort=relevance">[s2]</a></td><td></td><td>2160788824c4c29ffe213b2cbeb3f52972d73f37</td></tr><tr><td>3dddb_unconstrained</td><td>3D Dynamic</td><td>A 3D Dynamic Database for Unconstrained Face Recognition</td><td>Labeled Faces in the Wild : A Database for Studying Face Recognition in Unconstrained Environments</td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a 3d dynamic database for unconstrained face recognition&sort=relevance">[s2]</a></td><td></td><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td></tr><tr><td>3dpes</td><td>3DPeS</td><td>3DPes: 3D People Dataset for Surveillance and Forensics</td><td>3DPeS: 3D people dataset for surveillance and forensics</td><td><a href="http://doi.acm.org/10.1145/2072572.2072590">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=3dpes: 3d people dataset for surveillance and forensics&sort=relevance">[s2]</a></td><td></td><td>2e8d0f1802e50cccfd3c0aabac0d0beab3a7846e</td></tr><tr><td>4dfab</td><td>4DFAB</td><td>4DFAB: A Large Scale 4D Facial Expression Database for Biometric Applications</td><td>4DFAB: A Large Scale 4D Facial Expression Database for Biometric Applications</td><td><a href="https://arxiv.org/pdf/1712.01443.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=4dfab: a large scale 4d facial expression database for biometric applications&sort=relevance">[s2]</a></td><td></td><td>9696ad8b164f5e10fcfe23aacf74bd6168aebb15</td></tr><tr><td>50_people_one_question</td><td>50 People One Question</td><td>Merging Pose Estimates Across Space and Time</td><td>Merging Pose Estimates Across Space and Time</td><td><a href="http://pdfs.semanticscholar.org/63b2/f5348af0f969dfc2afb4977732393c6459ec.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=merging pose estimates across space and time&sort=relevance">[s2]</a></td><td></td><td>5753b2b5e442eaa3be066daa4a2ca8d8a0bb1725</td></tr><tr><td>a_pascal_yahoo</td><td>aPascal</td><td>Describing Objects by their Attributes</td><td>Describing objects by their attributes</td><td><a href="http://www-2.cs.cmu.edu/~dhoiem/publications/cvpr2009_attributes.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=describing objects by their attributes&sort=relevance">[s2]</a></td><td></td><td>2e384f057211426ac5922f1b33d2aa8df5d51f57</td></tr><tr><td>adience</td><td>Adience</td><td>Age and Gender Estimation of Unfiltered Faces</td><td>Age and Gender Estimation of Unfiltered Faces</td><td><a href="http://www.openu.ac.il/home/hassner/Adience/EidingerEnbarHassner_tifs.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=age and gender estimation of unfiltered faces&sort=relevance">[s2]</a></td><td></td><td>1be498d4bbc30c3bfd0029114c784bc2114d67c0</td></tr><tr><td>afad</td><td>AFAD</td><td>Ordinal Regression with a Multiple Output CNN for Age Estimation</td><td>Ordinal Regression with Multiple Output CNN for Age Estimation</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.532">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=ordinal regression with a multiple output cnn for age estimation&sort=relevance">[s2]</a></td><td></td><td>6618cff7f2ed440a0d2fa9e74ad5469df5cdbe4c</td></tr><tr><td>afew_va</td><td>AFEW-VA</td><td>AFEW-VA database for valence and arousal estimation in-the-wild</td><td>Collecting Large, Richly Annotated Facial-Expression Databases from Movies</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6200254">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=afew-va database for valence and arousal estimation in-the-wild&sort=relevance">[s2]</a></td><td>Australian National University</td><td>b1f4423c227fa37b9680787be38857069247a307</td></tr><tr><td>afew_va</td><td>AFEW-VA</td><td>Collecting Large, Richly Annotated Facial-Expression Databases from Movies</td><td>Collecting Large, Richly Annotated Facial-Expression Databases from Movies</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6200254">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=collecting large, richly annotated facial-expression databases from movies&sort=relevance">[s2]</a></td><td>Australian National University</td><td>b1f4423c227fa37b9680787be38857069247a307</td></tr><tr><td>affectnet</td><td>AffectNet</td><td>AffectNet: A New Database for Facial Expression, Valence, and Arousal Computation in the Wild</td><td>Skybiometry and AffectNet on Facial Emotion Recognition Using Supervised Machine Learning Algorithms</td><td><a href="http://dl.acm.org/citation.cfm?id=3232665">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=affectnet: a new database for facial expression, valence, and arousal computation in the wild&sort=relevance">[s2]</a></td><td></td><td>f152b6ee251cca940dd853c54e6a7b78fbc6b235</td></tr><tr><td>aflw</td><td>AFLW</td><td>Annotated Facial Landmarks in the Wild: A Large-scale, Real-world Database for Facial Landmark Localization</td><td>Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</td><td><a href="http://lrs.icg.tugraz.at/pubs/koestinger_befit_11.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=annotated facial landmarks in the wild: a large-scale, real-world database for facial landmark localization&sort=relevance">[s2]</a></td><td></td><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td></tr><tr><td>afw</td><td>AFW</td><td>Face detection, pose estimation and landmark localization in the wild</td><td>Face detection, pose estimation, and landmark localization in the wild</td><td><a href="http://vision.ics.uci.edu/papers/ZhuR_CVPR_2012/ZhuR_CVPR_2012.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=face detection, pose estimation and landmark localization in the wild&sort=relevance">[s2]</a></td><td>University of California, Irvine</td><td>0e986f51fe45b00633de9fd0c94d082d2be51406</td></tr><tr><td>agedb</td><td>AgeDB</td><td>AgeDB: the first manually collected, in-the-wild age database</td><td>AgeDB: The First Manually Collected, In-the-Wild Age Database</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014984">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=agedb: the first manually collected, in-the-wild age database&sort=relevance">[s2]</a></td><td></td><td>6dcf418c778f528b5792104760f1fbfe90c6dd6a</td></tr><tr><td>alert_airport</td><td>ALERT Airport</td><td>A Systematic Evaluation and Benchmark for Person Re-Identification: Features, Metrics, and Datasets</td><td>A Systematic Evaluation and Benchmark for Person Re-Identification: Features, Metrics, and Datasets.</td><td><a href="https://arxiv.org/pdf/1605.09653.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a systematic evaluation and benchmark for person re-identification: features, metrics, and datasets&sort=relevance">[s2]</a></td><td></td><td>6403117f9c005ae81f1e8e6d1302f4a045e3d99d</td></tr><tr><td>am_fed</td><td>AM-FED</td><td>Affectiva MIT Facial Expression Dataset (AM-FED): Naturalistic and Spontaneous Facial Expressions Collected “In the Wild”</td><td>Affectiva-MIT Facial Expression Dataset (AM-FED): Naturalistic and Spontaneous Facial Expressions Collected "In-the-Wild"</td><td><a href="http://pdfs.semanticscholar.org/5d06/437656dd94616d7d87260d5eb77513ded30f.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=affectiva mit facial expression dataset (am-fed): naturalistic and spontaneous facial expressions collected “in the wild”&sort=relevance">[s2]</a></td><td></td><td>47aeb3b82f54b5ae8142b4bdda7b614433e69b9a</td></tr><tr><td>apis</td><td>APiS1.0</td><td>Pedestrian Attribute Classification in Surveillance: Database and Evaluation</td><td>Pedestrian Attribute Classification in Surveillance: Database and Evaluation</td><td><a href="http://www.cv-foundation.org/openaccess/content_iccv_workshops_2013/W10/papers/Zhu_Pedestrian_Attribute_Classification_2013_ICCV_paper.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pedestrian attribute classification in surveillance: database and evaluation&sort=relevance">[s2]</a></td><td></td><td>488e475eeb3bb39a145f23ede197cd3620f1d98a</td></tr><tr><td>ar_facedb</td><td>AR Face</td><td>The AR Face Database</td><td>Labeled Faces in the Wild : A Database for Studying Face Recognition in Unconstrained Environments</td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the ar face database&sort=relevance">[s2]</a></td><td></td><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td></tr><tr><td>awe_ears</td><td>AWE Ears</td><td>Ear Recognition: More Than a Survey</td><td>Ear Recognition: More Than a Survey</td><td><a href="http://pdfs.semanticscholar.org/84fe/5b4ac805af63206012d29523a1e033bc827e.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=ear recognition: more than a survey&sort=relevance">[s2]</a></td><td></td><td>84fe5b4ac805af63206012d29523a1e033bc827e</td></tr><tr><td>b3d_ac</td><td>B3D(AC)</td><td>A 3-D Audio-Visual Corpus of Affective Communication</td><td>A 3-D Audio-Visual Corpus of Affective Communication</td><td><a href="http://files.is.tue.mpg.de/jgall/download/jgall_avcorpus_mm10.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a 3-d audio-visual corpus of affective communication&sort=relevance">[s2]</a></td><td></td><td>d08cc366a4a0192a01e9a7495af1eb5d9f9e73ae</td></tr><tr><td>bbc_pose</td><td>BBC Pose</td><td>Automatic and Efficient Human Pose Estimation for Sign Language Videos</td><td>Automatic and Efficient Human Pose Estimation for Sign Language Videos</td><td><a href="https://doi.org/10.1007/s11263-013-0672-6">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=automatic and efficient human pose estimation for sign language videos&sort=relevance">[s2]</a></td><td></td><td>213a579af9e4f57f071b884aa872651372b661fd</td></tr><tr><td>berkeley_pose</td><td>BPAD</td><td>Describing People: A Poselet-Based Approach to Attribute Classification</td><td>Describing people: A poselet-based approach to attribute classification</td><td><a href="http://ttic.uchicago.edu/~smaji/papers/attributes-iccv11.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=describing people: a poselet-based approach to attribute classification&sort=relevance">[s2]</a></td><td></td><td>7808937b46acad36e43c30ae4e9f3fd57462853d</td></tr><tr><td>bfm</td><td>BFM</td><td>A 3D Face Model for Pose and Illumination Invariant Face Recognition</td><td>A 3D Face Model for Pose and Illumination Invariant Face Recognition</td><td><a href="https://pdfs.semanticscholar.org/6399/37b3a1b8bded3f7e9a40e85bd3770016cf3c.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a 3d face model for pose and illumination invariant face recognition&sort=relevance">[s2]</a></td><td></td><td>639937b3a1b8bded3f7e9a40e85bd3770016cf3c</td></tr><tr><td>bio_id</td><td>BioID Face</td><td>Robust Face Detection Using the Hausdorff Distance</td><td>Robust Face Detection Using the Hausdorff Distance</td><td><a href="http://pdfs.semanticscholar.org/4053/e3423fb70ad9140ca89351df49675197196a.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=robust face detection using the hausdorff distance&sort=relevance">[s2]</a></td><td></td><td>4053e3423fb70ad9140ca89351df49675197196a</td></tr><tr><td>bjut_3d</td><td>BJUT-3D</td><td>The BJUT-3D Large-Scale Chinese Face Database</td><td>A novel face recognition method based on 3D face model</td><td><a href="https://doi.org/10.1109/ROBIO.2007.4522202">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the bjut-3d large-scale chinese face database&sort=relevance">[s2]</a></td><td></td><td>1ed1a49534ad8dd00f81939449f6389cfbc25321</td></tr><tr><td>bosphorus</td><td>The Bosphorus</td><td>Bosphorus Database for 3D Face Analysis</td><td>Bosphorus Database for 3D Face Analysis</td><td><a href="http://pdfs.semanticscholar.org/4254/fbba3846008f50671edc9cf70b99d7304543.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=bosphorus database for 3d face analysis&sort=relevance">[s2]</a></td><td></td><td>2acf7e58f0a526b957be2099c10aab693f795973</td></tr><tr><td>bp4d_plus</td><td>BP4D+</td><td>Multimodal Spontaneous Emotion Corpus for Human Behavior Analysis</td><td>Multimodal Spontaneous Emotion Corpus for Human Behavior Analysis</td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Zhang_Multimodal_Spontaneous_Emotion_CVPR_2016_paper.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multimodal spontaneous emotion corpus for human behavior analysis&sort=relevance">[s2]</a></td><td></td><td>53ae38a6bb2b21b42bac4f0c4c8ed1f9fa02f9d4</td></tr><tr><td>bp4d_spontanous</td><td>BP4D-Spontanous</td><td>A high resolution spontaneous 3D dynamic facial expression database</td><td>A high-resolution spontaneous 3D dynamic facial expression database</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6553788">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a high resolution spontaneous 3d dynamic facial expression database&sort=relevance">[s2]</a></td><td>SUNY Binghamton</td><td>b91f54e1581fbbf60392364323d00a0cd43e493c</td></tr><tr><td>brainwash</td><td>Brainwash</td><td>Brainwash dataset</td><td>Brainwash: A Data System for Feature Engineering</td><td><a href="http://pdfs.semanticscholar.org/ae44/8015b2ff2bd3b8a5c9a3266f954f5af9ffa9.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=brainwash dataset&sort=relevance">[s2]</a></td><td></td><td>214c966d1f9c2a4b66f4535d9a0d4078e63a5867</td></tr><tr><td>bu_3dfe</td><td>BU-3DFE</td><td>A 3D Facial Expression Database For Facial Behavior Research</td><td>A 3D facial expression database for facial behavior research</td><td><a href="http://www.cs.binghamton.edu/~lijun/Research/3DFE/Yin_FGR06_a.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a 3d facial expression database for facial behavior research&sort=relevance">[s2]</a></td><td>SUNY Binghamton</td><td>cc589c499dcf323fe4a143bbef0074c3e31f9b60</td></tr><tr><td>buhmap_db</td><td>BUHMAP-DB </td><td>Facial Feature Tracking and Expression Recognition for Sign Language</td><td>Facial feature tracking and expression recognition for sign language</td><td><a href="http://www.cmpe.boun.edu.tr/pilab/pilabfiles/databases/buhmap/files/ari2008facialfeaturetracking.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=facial feature tracking and expression recognition for sign language&sort=relevance">[s2]</a></td><td></td><td>014b8df0180f33b9fea98f34ae611c6447d761d2</td></tr><tr><td>cafe</td><td>CAFE</td><td>The Child Affective Facial Expression (CAFE) Set: Validity and reliability from untrained adults</td><td>The Child Affective Facial Expression (CAFE) set: validity and reliability from untrained adults</td><td><a href="http://pdfs.semanticscholar.org/2038/8099cc415c772926e47bcbbe554e133343d1.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the child affective facial expression (cafe) set: validity and reliability from untrained adults&sort=relevance">[s2]</a></td><td></td><td>20388099cc415c772926e47bcbbe554e133343d1</td></tr><tr><td>caltech_10k_web_faces</td><td>Caltech 10K Web Faces</td><td>Pruning Training Sets for Learning of Object Categories</td><td>Pruning training sets for learning of object categories</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1467308">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pruning training sets for learning of object categories&sort=relevance">[s2]</a></td><td></td><td>636b8ffc09b1b23ff714ac8350bb35635e49fa3c</td></tr><tr><td>caltech_pedestrians</td><td>Caltech Pedestrians</td><td>Pedestrian Detection: A Benchmark</td><td>Pedestrian Detection: An Evaluation of the State of the Art</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5975165">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pedestrian detection: a benchmark&sort=relevance">[s2]</a></td><td></td><td>f72f6a45ee240cc99296a287ff725aaa7e7ebb35</td></tr><tr><td>caltech_pedestrians</td><td>Caltech Pedestrians</td><td>Pedestrian Detection: An Evaluation of the State of the Art</td><td>Pedestrian Detection: An Evaluation of the State of the Art</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5975165">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pedestrian detection: an evaluation of the state of the art&sort=relevance">[s2]</a></td><td></td><td>f72f6a45ee240cc99296a287ff725aaa7e7ebb35</td></tr><tr><td>camel</td><td>CAMEL</td><td>CAMEL Dataset for Visual and Thermal Infrared Multiple Object Detection and Tracking</td><td>Application of Object Based Classification and High Resolution Satellite Imagery for Savanna Ecosystem Analysis</td><td><a href="http://pdfs.semanticscholar.org/5801/690199c1917fa58c35c3dead177c0b8f9f2d.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=camel dataset for visual and thermal infrared multiple object detection and tracking&sort=relevance">[s2]</a></td><td></td><td>5801690199c1917fa58c35c3dead177c0b8f9f2d</td></tr><tr><td>cas_peal</td><td>CAS-PEAL</td><td>The CAS-PEAL Large-Scale Chinese Face Database and Baseline Evaluations</td><td>The CAS-PEAL Large-Scale Chinese Face Database and Baseline Evaluations</td><td><a href="https://doi.org/10.1109/TSMCA.2007.909557">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the cas-peal large-scale chinese face database and baseline evaluations&sort=relevance">[s2]</a></td><td></td><td>2485c98aa44131d1a2f7d1355b1e372f2bb148ad</td></tr><tr><td>casablanca</td><td>Casablanca</td><td>Context-aware {CNNs} for person head detection</td><td>Context-Aware CNNs for Person Head Detection</td><td><a href="http://arxiv.org/pdf/1511.07917v1.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=context-aware {cnns} for person head detection&sort=relevance">[s2]</a></td><td></td><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td></tr><tr><td>casia_webface</td><td>CASIA Webface</td><td>Learning Face Representation from Scratch</td><td>Learning Face Representation from Scratch</td><td><a href="http://pdfs.semanticscholar.org/b8a2/0ed7e74325da76d7183d1ab77b082a92b447.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning face representation from scratch&sort=relevance">[s2]</a></td><td></td><td>853bd61bc48a431b9b1c7cab10c603830c488e39</td></tr><tr><td>celeba</td><td>CelebA</td><td>Deep Learning Face Attributes in the Wild</td><td>Deep Learning Face Attributes in the Wild</td><td><a href="http://arxiv.org/pdf/1411.7766v2.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=deep learning face attributes in the wild&sort=relevance">[s2]</a></td><td>Chinese University of Hong Kong</td><td>6424b69f3ff4d35249c0bb7ef912fbc2c86f4ff4</td></tr><tr><td>celeba_plus</td><td>CelebFaces+</td><td>Deep Learning Face Representation from Predicting 10,000 Classes</td><td>Learning Deep Representation for Imbalanced Classification</td><td><a href="http://personal.ie.cuhk.edu.hk/~ccloy/files/cvpr_2016_imbalanced.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=deep learning face representation from predicting 10,000 classes&sort=relevance">[s2]</a></td><td>Shenzhen Institutes of Advanced Technology</td><td>69a68f9cf874c69e2232f47808016c2736b90c35</td></tr><tr><td>cfd</td><td>CFD</td><td>The Chicago face database: A free stimulus set of faces and norming data</td><td>The Chicago face database: A free stimulus set of faces and norming data.</td><td><a href="http://pdfs.semanticscholar.org/4df3/143922bcdf7db78eb91e6b5359d6ada004d2.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the chicago face database: a free stimulus set of faces and norming data&sort=relevance">[s2]</a></td><td></td><td>4df3143922bcdf7db78eb91e6b5359d6ada004d2</td></tr><tr><td>chalearn</td><td>ChaLearn</td><td>ChaLearn Looking at People: A Review of Events and Resources</td><td>ChaLearn looking at people: A review of events and resources</td><td><a href="https://arxiv.org/pdf/1701.02664.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=chalearn looking at people: a review of events and resources&sort=relevance">[s2]</a></td><td></td><td>8d5998cd984e7cce307da7d46f155f9db99c6590</td></tr><tr><td>chokepoint</td><td>ChokePoint</td><td>Patch-based Probabilistic Image Quality Assessment for Face Selection and Improved Video-based Face Recognition</td><td>Patch-based probabilistic image quality assessment for face selection and improved video-based face recognition</td><td><a href="http://conradsanderson.id.au/pdfs/wong_face_selection_cvpr_biometrics_2011.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=patch-based probabilistic image quality assessment for face selection and improved video-based face recognition&sort=relevance">[s2]</a></td><td>University of Queensland</td><td>0486214fb58ee9a04edfe7d6a74c6d0f661a7668</td></tr><tr><td>cityscapes</td><td>Cityscapes</td><td>The Cityscapes Dataset for Semantic Urban Scene Understanding</td><td>The Cityscapes Dataset for Semantic Urban Scene Understanding</td><td><a href="https://arxiv.org/pdf/1604.01685.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the cityscapes dataset for semantic urban scene understanding&sort=relevance">[s2]</a></td><td></td><td>32cde90437ab5a70cf003ea36f66f2de0e24b3ab</td></tr><tr><td>cityscapes</td><td>Cityscapes</td><td>The Cityscapes Dataset</td><td>The Cityscapes Dataset for Semantic Urban Scene Understanding</td><td><a href="https://arxiv.org/pdf/1604.01685.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the cityscapes dataset&sort=relevance">[s2]</a></td><td></td><td>32cde90437ab5a70cf003ea36f66f2de0e24b3ab</td></tr><tr><td>clothing_co_parsing</td><td>CCP</td><td>Clothing Co-Parsing by Joint Image Segmentation and Labeling</td><td>Clothing Co-parsing by Joint Image Segmentation and Labeling</td><td><a href="https://arxiv.org/pdf/1502.00739v1.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clothing co-parsing by joint image segmentation and labeling&sort=relevance">[s2]</a></td><td></td><td>2bf8541199728262f78d4dced6fb91479b39b738</td></tr><tr><td>cmdp</td><td>CMDP</td><td>Distance Estimation of an Unknown Person from a Portrait</td><td>Distance Estimation of an Unknown Person from a Portrait</td><td><a href="http://pdfs.semanticscholar.org/56ae/6d94fc6097ec4ca861f0daa87941d1c10b70.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=distance estimation of an unknown person from a portrait&sort=relevance">[s2]</a></td><td>California Institute of Technology</td><td>56ae6d94fc6097ec4ca861f0daa87941d1c10b70</td></tr><tr><td>cmu_pie</td><td>CMU PIE</td><td>The CMU Pose, Illumination, and Expression Database</td><td>The CMU Pose, Illumination, and Expression (PIE) Database</td><td><a href="http://pdfs.semanticscholar.org/4d42/3acc78273b75134e2afd1777ba6d3a398973.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the cmu pose, illumination, and expression database&sort=relevance">[s2]</a></td><td></td><td>4d423acc78273b75134e2afd1777ba6d3a398973</td></tr><tr><td>coco</td><td>COCO</td><td>Microsoft COCO: Common Objects in Context</td><td>Microsoft COCO Captions: Data Collection and Evaluation Server</td><td><a href="http://pdfs.semanticscholar.org/ba95/81c33a7eebe87c50e61763e4c8d1723538f2.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=microsoft coco: common objects in context&sort=relevance">[s2]</a></td><td></td><td>696ca58d93f6404fea0fc75c62d1d7b378f47628</td></tr><tr><td>coco_action</td><td>COCO-a</td><td>Describing Common Human Visual Actions in Images</td><td>Describing Common Human Visual Actions in Images</td><td><a href="http://pdfs.semanticscholar.org/b38d/cf5fa5174c0d718d65cc4f3889b03c4a21df.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=describing common human visual actions in images&sort=relevance">[s2]</a></td><td></td><td>4946ba10a4d5a7d0a38372f23e6622bd347ae273</td></tr><tr><td>coco_qa</td><td>COCO QA</td><td>Exploring Models and Data for Image Question Answering</td><td>Exploring Models and Data for Image Question Answering</td><td><a href="http://pdfs.semanticscholar.org/aa79/9c29c0d44ece1864467af520fe70540c069b.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=exploring models and data for image question answering&sort=relevance">[s2]</a></td><td></td><td>35b0331dfcd2897abd5749b49ff5e2b8ba0f7a62</td></tr><tr><td>cofw</td><td>COFW</td><td>Robust face landmark estimation under occlusion</td><td>Robust Face Landmark Estimation under Occlusion</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6751298">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=robust face landmark estimation under occlusion&sort=relevance">[s2]</a></td><td></td><td>2724ba85ec4a66de18da33925e537f3902f21249</td></tr><tr><td>cohn_kanade</td><td>CK</td><td>Comprehensive Database for Facial Expression Analysis</td><td>Comprehensive Database for Facial Expression Analysis</td><td><a href="http://pdfs.semanticscholar.org/23fc/83c8cfff14a16df7ca497661264fc54ed746.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=comprehensive database for facial expression analysis&sort=relevance">[s2]</a></td><td>Carnegie Mellon University</td><td>23fc83c8cfff14a16df7ca497661264fc54ed746</td></tr><tr><td>cohn_kanade_plus</td><td>CK+</td><td>The Extended Cohn-Kanade Dataset (CK+): A complete dataset for action unit and emotion-specified expression</td><td>The Extended Cohn-Kanade Dataset (CK+): A complete dataset for action unit and emotion-specified expression</td><td><a href="http://www.iainm.com/iainm/Publications_files/2010_The%20Extended.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the extended cohn-kanade dataset (ck+): a complete dataset for action unit and emotion-specified expression&sort=relevance">[s2]</a></td><td></td><td>4d9a02d080636e9666c4d1cc438b9893391ec6c7</td></tr><tr><td>columbia_gaze</td><td>Columbia Gaze</td><td>Gaze Locking: Passive Eye Contact Detection for Human–Object Interaction</td><td>A 3D Morphable Eye Region Model for Gaze Estimation</td><td><a href="https://pdfs.semanticscholar.org/0d43/3b9435b874a1eea6d7999e86986c910fa285.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=gaze locking: passive eye contact detection for human–object interaction&sort=relevance">[s2]</a></td><td>Carnegie Mellon University</td><td>c34532fe6bfbd1e6df477c9ffdbb043b77e7804d</td></tr><tr><td>complex_activities</td><td>Ongoing Complex Activities</td><td>Recognition of Ongoing Complex Activities by Sequence Prediction over a Hierarchical Label Space</td><td>Recognition of ongoing complex activities by sequence prediction over a hierarchical label space</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/WACV.2016.7477586">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=recognition of ongoing complex activities by sequence prediction over a hierarchical label space&sort=relevance">[s2]</a></td><td></td><td>65355cbb581a219bd7461d48b3afd115263ea760</td></tr><tr><td>cuhk01</td><td>CUHK01</td><td>Human Reidentification with Transferred Metric Learning</td><td>Human Reidentification with Transferred Metric Learning</td><td><a href="http://pdfs.semanticscholar.org/4448/4d2866f222bbb9b6b0870890f9eea1ffb2d0.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=human reidentification with transferred metric learning&sort=relevance">[s2]</a></td><td></td><td>44484d2866f222bbb9b6b0870890f9eea1ffb2d0</td></tr><tr><td>cuhk02</td><td>CUHK02</td><td>Locally Aligned Feature Transforms across Views</td><td>Locally Aligned Feature Transforms across Views</td><td><a href="http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_CVPR2013/data/Papers/4989d594.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=locally aligned feature transforms across views&sort=relevance">[s2]</a></td><td></td><td>38b55d95189c5e69cf4ab45098a48fba407609b4</td></tr><tr><td>cuhk03</td><td>CUHK03</td><td>DeepReID: Deep Filter Pairing Neural Network for Person Re-identification</td><td>DeepReID: Deep Filter Pairing Neural Network for Person Re-identification</td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Li_DeepReID_Deep_Filter_2014_CVPR_paper.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=deepreid: deep filter pairing neural network for person re-identification&sort=relevance">[s2]</a></td><td></td><td>6bd36e9fd0ef20a3074e1430a6cc601e6d407fc3</td></tr><tr><td>cvc_01_barcelona</td><td>CVC-01</td><td>Adaptive Image Sampling and Windows Classification for On-board Pedestrian Detection</td><td>Adaptive Image Sampling and Windows Classification for On – board Pedestrian Detection</td><td><a href="http://pdfs.semanticscholar.org/57fe/081950f21ca03b5b375ae3e84b399c015861.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=adaptive image sampling and windows classification for on-board pedestrian detection&sort=relevance">[s2]</a></td><td></td><td>57fe081950f21ca03b5b375ae3e84b399c015861</td></tr><tr><td>czech_news_agency</td><td>UFI</td><td>Unconstrained Facial Images: Database for Face Recognition under Real-world Conditions</td><td>Unconstrained Facial Images: Database for Face Recognition Under Real-World Conditions</td><td><a href="http://pdfs.semanticscholar.org/4b41/06614c1d553365bad75d7866bff0de6056ed.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=unconstrained facial images: database for face recognition under real-world conditions&sort=relevance">[s2]</a></td><td></td><td>4b4106614c1d553365bad75d7866bff0de6056ed</td></tr><tr><td>d3dfacs</td><td>D3DFACS</td><td>A FACS Valid 3D Dynamic Action Unit database with Applications to 3D Dynamic Morphable Facial Modelling</td><td>A FACS valid 3D dynamic action unit database with applications to 3D dynamic morphable facial modeling</td><td><a href="http://www.cs.bath.ac.uk/~dpc/D3DFACS/ICCV_final_2011.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a facs valid 3d dynamic action unit database with applications to 3d dynamic morphable facial modelling&sort=relevance">[s2]</a></td><td>Jacobs University</td><td>070de852bc6eb275d7ca3a9cdde8f6be8795d1a3</td></tr><tr><td>dartmouth_children</td><td>Dartmouth Children</td><td>The Dartmouth Database of Children's Faces: Acquisition and validation of a new face stimulus set</td><td>The Dartmouth Database of Children’s Faces: Acquisition and Validation of a New Face Stimulus Set</td><td><a href="http://pdfs.semanticscholar.org/4e6e/e936eb50dd032f7138702fa39b7c18ee8907.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the dartmouth database of children's faces: acquisition and validation of a new face stimulus set&sort=relevance">[s2]</a></td><td></td><td>4e6ee936eb50dd032f7138702fa39b7c18ee8907</td></tr><tr><td>data_61</td><td>Data61 Pedestrian</td><td>A Multi-Modal Graphical Model for Scene Analysis</td><td>A Multi-modal Graphical Model for Scene Analysis</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/WACV.2015.139">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a multi-modal graphical model for scene analysis&sort=relevance">[s2]</a></td><td></td><td>563c940054e4b456661762c1ab858e6f730c3159</td></tr><tr><td>deep_fashion</td><td>DeepFashion</td><td>DeepFashion: Powering Robust Clothes Recognition and Retrieval with Rich Annotations</td><td>Fashion Landmark Detection in the Wild</td><td><a href="http://pdfs.semanticscholar.org/d8ca/e259c1c5bba0c096f480dc7322bbaebfac1a.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=deepfashion: powering robust clothes recognition and retrieval with rich annotations&sort=relevance">[s2]</a></td><td>Chinese University of Hong Kong</td><td>4fefd1bc8dc4e0ab37ee3324ddfa43ad9d6a04a7</td></tr><tr><td>deep_fashion</td><td>DeepFashion</td><td>Fashion Landmark Detection in the Wild</td><td>Fashion Landmark Detection in the Wild</td><td><a href="http://pdfs.semanticscholar.org/d8ca/e259c1c5bba0c096f480dc7322bbaebfac1a.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=fashion landmark detection in the wild&sort=relevance">[s2]</a></td><td>Chinese University of Hong Kong</td><td>4fefd1bc8dc4e0ab37ee3324ddfa43ad9d6a04a7</td></tr><tr><td>disfa</td><td>DISFA</td><td>DISFA: A Spontaneous Facial Action Intensity Database</td><td>Extended DISFA Dataset: Investigating Posed and Spontaneous Facial Expressions</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7789672">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=disfa: a spontaneous facial action intensity database&sort=relevance">[s2]</a></td><td></td><td>a5acda0e8c0937bfed013e6382da127103e41395</td></tr><tr><td>distance_nighttime</td><td>Long Distance Heterogeneous Face</td><td>Nighttime Face Recognition at Long Distance: Cross-distance and Cross-spectral Matching</td><td>Nighttime Face Recognition at Long Distance: Cross-Distance and Cross-Spectral Matching</td><td><a href="http://pdfs.semanticscholar.org/4156/b7e88f2e0ab0a7c095b9bab199ae2b23bd06.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=nighttime face recognition at long distance: cross-distance and cross-spectral matching&sort=relevance">[s2]</a></td><td></td><td>4156b7e88f2e0ab0a7c095b9bab199ae2b23bd06</td></tr><tr><td>duke_mtmc</td><td>Duke MTMC</td><td>Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</td><td>Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</td><td><a href="http://pdfs.semanticscholar.org/b5f2/4f49f9a5e47d6601399dc068158ad88d7651.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=performance measures and a data set for multi-target, multi-camera tracking&sort=relevance">[s2]</a></td><td></td><td>27a2fad58dd8727e280f97036e0d2bc55ef5424c</td></tr><tr><td>emotio_net</td><td>EmotioNet Database</td><td>EmotioNet: An Accurate, Real-Time Algorithm for the Automatic Annotation of a Million Facial Expressions in the Wild</td><td>EmotioNet: An Accurate, Real-Time Algorithm for the Automatic Annotation of a Million Facial Expressions in the Wild</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7780969">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=emotionet: an accurate, real-time algorithm for the automatic annotation of a million facial expressions in the wild&sort=relevance">[s2]</a></td><td></td><td>c900e0ad4c95948baaf0acd8449fde26f9b4952a</td></tr><tr><td>eth_andreas_ess</td><td>ETHZ Pedestrian</td><td>Depth and Appearance for Mobile Scene Analysis</td><td>Depth and Appearance for Mobile Scene Analysis</td><td><a href="http://www.mmp.rwth-aachen.de/publications/pdf/ess-depthandappearance-iccv07.pdf/at_download/file">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=depth and appearance for mobile scene analysis&sort=relevance">[s2]</a></td><td></td><td>13f06b08f371ba8b5d31c3e288b4deb61335b462</td></tr><tr><td>europersons</td><td>EuroCity Persons</td><td>The EuroCity Persons Dataset: A Novel Benchmark for Object Detection</td><td>The EuroCity Persons Dataset: A Novel Benchmark for Object Detection</td><td><a href="https://arxiv.org/pdf/1805.07193.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the eurocity persons dataset: a novel benchmark for object detection&sort=relevance">[s2]</a></td><td></td><td>f0e17f27f029db4ad650ff278fe3c10ecb6cb0c4</td></tr><tr><td>expw</td><td>ExpW</td><td>Learning Social Relation Traits from Face Images</td><td>From Facial Expression Recognition to Interpersonal Relation Prediction</td><td><a href="http://arxiv.org/abs/1609.06426">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning social relation traits from face images&sort=relevance">[s2]</a></td><td></td><td>22f656d0f8426c84a33a267977f511f127bfd7f3</td></tr><tr><td>expw</td><td>ExpW</td><td>From Facial Expression Recognition to Interpersonal Relation Prediction</td><td>From Facial Expression Recognition to Interpersonal Relation Prediction</td><td><a href="http://arxiv.org/abs/1609.06426">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=from facial expression recognition to interpersonal relation prediction&sort=relevance">[s2]</a></td><td></td><td>22f656d0f8426c84a33a267977f511f127bfd7f3</td></tr><tr><td>face_research_lab</td><td>Face Research Lab London</td><td>Face Research Lab London Set. figshare</td><td>Anxiety promotes memory for mood-congruent faces but does not alter loss aversion.</td><td><a href="http://pdfs.semanticscholar.org/c652/6dd3060d63a6c90e8b7ff340383c4e0e0dd8.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=face research lab london set. figshare&sort=relevance">[s2]</a></td><td>University College London</td><td>c6526dd3060d63a6c90e8b7ff340383c4e0e0dd8</td></tr><tr><td>face_scrub</td><td>FaceScrub</td><td>A data-driven approach to cleaning large face datasets</td><td>A data-driven approach to cleaning large face datasets</td><td><a href="https://doi.org/10.1109/ICIP.2014.7025068">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a data-driven approach to cleaning large face datasets&sort=relevance">[s2]</a></td><td>University of Illinois, Urbana-Champaign</td><td>0d3bb75852098b25d90f31d2f48fd0cb4944702b</td></tr><tr><td>face_tracer</td><td>FaceTracer</td><td>FaceTracer: A Search Engine for Large Collections of Images with Faces</td><td>Face swapping: automatically replacing faces in photographs</td><td><a href="https://classes.cs.uoregon.edu/16F/cis607photo/faces.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=facetracer: a search engine for large collections of images with faces&sort=relevance">[s2]</a></td><td>Columbia University</td><td>670637d0303a863c1548d5b19f705860a23e285c</td></tr><tr><td>face_tracer</td><td>FaceTracer</td><td>Face Swapping: Automatically Replacing Faces in Photographs</td><td>Face swapping: automatically replacing faces in photographs</td><td><a href="https://classes.cs.uoregon.edu/16F/cis607photo/faces.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=face swapping: automatically replacing faces in photographs&sort=relevance">[s2]</a></td><td>Columbia University</td><td>670637d0303a863c1548d5b19f705860a23e285c</td></tr><tr><td>facebook_100</td><td>Facebook100</td><td>Scaling Up Biologically-Inspired Computer Vision: A Case Study in Unconstrained Face Recognition on Facebook</td><td>Scaling up biologically-inspired computer vision: A case study in unconstrained face recognition on facebook</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5981788">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=scaling up biologically-inspired computer vision: a case study in unconstrained face recognition on facebook&sort=relevance">[s2]</a></td><td>Harvard University</td><td>9c23859ec7313f2e756a3e85575735e0c52249f4</td></tr><tr><td>faceplace</td><td>Face Place</td><td>Recognizing disguised faces</td><td>Recognizing disguised faces</td><td><a href="http://pdfs.semanticscholar.org/d936/7ceb0be378c3a9ddf7cb741c678c1a3c574c.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=recognizing disguised faces&sort=relevance">[s2]</a></td><td></td><td>25474c21613607f6bb7687a281d5f9d4ffa1f9f3</td></tr><tr><td>families_in_the_wild</td><td>FIW</td><td>Visual Kinship Recognition of Families in the Wild</td><td>Visual Kinship Recognition of Families in the Wild</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8337841">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=visual kinship recognition of families in the wild&sort=relevance">[s2]</a></td><td></td><td>dd65f71dac86e36eecbd3ed225d016c3336b4a13</td></tr><tr><td>fddb</td><td>FDDB</td><td>FDDB: A Benchmark for Face Detection in Unconstrained Settings</td><td>A Benchmark for Face Detection in Unconstrained Settings</td><td><a href="http://pdfs.semanticscholar.org/75da/1df4ed319926c544eefe17ec8d720feef8c0.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=fddb: a benchmark for face detection in unconstrained settings&sort=relevance">[s2]</a></td><td>University of Massachusetts</td><td>75da1df4ed319926c544eefe17ec8d720feef8c0</td></tr><tr><td>fei</td><td>FEI</td><td>Captura e Alinhamento de Imagens: Um Banco de Faces Brasileiro</td><td>Generalização cartográfica automatizada para um banco de dados cadastral</td><td><a href="https://pdfs.semanticscholar.org/b6b1/b0632eb9d4ab1427278f5e5c46f97753c73d.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=captura e alinhamento de imagens: um banco de faces brasileiro&sort=relevance">[s2]</a></td><td></td><td>b6b1b0632eb9d4ab1427278f5e5c46f97753c73d</td></tr><tr><td>feret</td><td>FERET</td><td>The FERET Verification Testing Protocol for Face Recognition Algorithms</td><td>The FERET Verification Testing Protocol for Face Recognition Algorithms</td><td><a href="http://pdfs.semanticscholar.org/0c4a/139bb87c6743c7905b29a3cfec27a5130652.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the feret verification testing protocol for face recognition algorithms&sort=relevance">[s2]</a></td><td></td><td>0c4a139bb87c6743c7905b29a3cfec27a5130652</td></tr><tr><td>feret</td><td>FERET</td><td>The FERET database and evaluation procedure for face-recognition algorithms</td><td>The FERET database and evaluation procedure for face-recognition algorithms</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the feret database and evaluation procedure for face-recognition algorithms&sort=relevance">[s2]</a></td><td></td><td>dc8b25e35a3acb812beb499844734081722319b4</td></tr><tr><td>feret</td><td>FERET</td><td>FERET ( Face Recognition Technology ) Recognition Algorithm Development and Test Results</td><td>FERET ( Face Recognition Technology ) Recognition Algorithm Development and Test Results</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=feret ( face recognition technology ) recognition algorithm development and test results&sort=relevance">[s2]</a></td><td></td><td>31de9b3dd6106ce6eec9a35991b2b9083395fd0b</td></tr><tr><td>feret</td><td>FERET</td><td>The FERET Evaluation Methodology for Face-Recognition Algorithms</td><td>The FERET Evaluation Methodology for Face-Recognition Algorithms</td><td><a href="http://pdfs.semanticscholar.org/0f0f/cf041559703998abf310e56f8a2f90ee6f21.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the feret evaluation methodology for face-recognition algorithms&sort=relevance">[s2]</a></td><td></td><td>0f0fcf041559703998abf310e56f8a2f90ee6f21</td></tr><tr><td>ferplus</td><td>FER+</td><td>Training Deep Networks for Facial Expression Recognition with Crowd-Sourced Label Distribution</td><td>Training deep networks for facial expression recognition with crowd-sourced label distribution</td><td><a href="http://arxiv.org/pdf/1608.01041v1.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=training deep networks for facial expression recognition with crowd-sourced label distribution&sort=relevance">[s2]</a></td><td></td><td>298cbc3dfbbb3a20af4eed97906650a4ea1c29e0</td></tr><tr><td>fia</td><td>CMU FiA</td><td>The CMU Face In Action (FIA) Database</td><td>The CMU Face In Action (FIA) Database</td><td><a href="http://pdfs.semanticscholar.org/bb47/a03401811f9d2ca2da12138697acbc7b97a3.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the cmu face in action (fia) database&sort=relevance">[s2]</a></td><td></td><td>47662d1a368daf70ba70ef2d59eb6209f98b675d</td></tr><tr><td>fiw_300</td><td>300-W</td><td>300 faces In-the-wild challenge: Database and results</td><td>A Semi-automatic Methodology for Facial Landmark Annotation</td><td><a href="http://ibug.doc.ic.ac.uk/media/uploads/documents/sagonas_cvpr_2013_amfg_w.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=300 faces in-the-wild challenge: database and results&sort=relevance">[s2]</a></td><td>University of Twente</td><td>013909077ad843eb6df7a3e8e290cfd5575999d2</td></tr><tr><td>fiw_300</td><td>300-W</td><td>300 Faces in-the-Wild Challenge: The first facial landmark localization Challenge</td><td>A Semi-automatic Methodology for Facial Landmark Annotation</td><td><a href="http://ibug.doc.ic.ac.uk/media/uploads/documents/sagonas_cvpr_2013_amfg_w.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=300 faces in-the-wild challenge: the first facial landmark localization challenge&sort=relevance">[s2]</a></td><td>University of Twente</td><td>013909077ad843eb6df7a3e8e290cfd5575999d2</td></tr><tr><td>fiw_300</td><td>300-W</td><td>A semi-automatic methodology for facial landmark annotation</td><td>A Semi-automatic Methodology for Facial Landmark Annotation</td><td><a href="http://ibug.doc.ic.ac.uk/media/uploads/documents/sagonas_cvpr_2013_amfg_w.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a semi-automatic methodology for facial landmark annotation&sort=relevance">[s2]</a></td><td>University of Twente</td><td>013909077ad843eb6df7a3e8e290cfd5575999d2</td></tr><tr><td>frav3d</td><td>FRAV3D</td><td>MULTIMODAL 2D, 2.5D & 3D FACE VERIFICATION</td><td>2D and 3D face recognition: A survey</td><td><a href="http://pdfs.semanticscholar.org/2f5d/44dc3e1b5955942133ff872ebd31716ec604.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multimodal 2d, 2.5d & 3d face verification&sort=relevance">[s2]</a></td><td></td><td>2f5d44dc3e1b5955942133ff872ebd31716ec604</td></tr><tr><td>frgc</td><td>FRGC</td><td>Overview of the Face Recognition Grand Challenge</td><td>Overview of the face recognition grand challenge</td><td><a href="http://www3.nd.edu/~kwb/PhillipsEtAlCVPR_2005.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=overview of the face recognition grand challenge&sort=relevance">[s2]</a></td><td></td><td>18ae7c9a4bbc832b8b14bc4122070d7939f5e00e</td></tr><tr><td>gallagher</td><td>Gallagher</td><td>Clothing Cosegmentation for Recognizing People</td><td>Clothing Cosegmentation for Shopping Images With Cluttered Background</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7423747">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clothing cosegmentation for recognizing people&sort=relevance">[s2]</a></td><td></td><td>6dbe8e5121c534339d6e41f8683e85f87e6abf81</td></tr><tr><td>gavab_db</td><td>Gavab</td><td>GavabDB: a 3D face database</td><td>Expression invariant 3D face recognition with a Morphable Model</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4813376">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=gavabdb: a 3d face database&sort=relevance">[s2]</a></td><td></td><td>42505464808dfb446f521fc6ff2cfeffd4d68ff1</td></tr><tr><td>geofaces</td><td>GeoFaces</td><td>GeoFaceExplorer: Exploring the Geo-Dependence of Facial Attributes</td><td>GeoFaceExplorer: exploring the geo-dependence of facial attributes</td><td><a href="http://doi.acm.org/10.1145/2676440.2676443">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=geofaceexplorer: exploring the geo-dependence of facial attributes&sort=relevance">[s2]</a></td><td>University of Kentucky</td><td>17b46e2dad927836c689d6787ddb3387c6159ece</td></tr><tr><td>georgia_tech_face_database</td><td>Georgia Tech Face</td><td>Maximum likelihood training of the embedded HMM for face detection and recognition</td><td>MAXIMUM LIKELIHOOD TRAINING OF THE EMBEDDED HMM FOR FACE DETECTION AND RECOGNITION Ara V. Ne an and Monson H. Hayes III Center for Signal and Image Processing School of Electrical and Computer Engineering</td><td><a href="http://pdfs.semanticscholar.org/3dc3/f0b64ef80f573e3a5f96e456e52ee980b877.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=maximum likelihood training of the embedded hmm for face detection and recognition&sort=relevance">[s2]</a></td><td></td><td>3dc3f0b64ef80f573e3a5f96e456e52ee980b877</td></tr><tr><td>graz</td><td>Graz Pedestrian</td><td>Generic Object Recognition with Boosting</td><td>Object recognition using segmentation for feature detection</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=generic object recognition with boosting&sort=relevance">[s2]</a></td><td></td><td>12ad3b5bbbf407f8e54ea692c07633d1a867c566</td></tr><tr><td>graz</td><td>Graz Pedestrian</td><td>Weak Hypotheses and Boosting for Generic Object Detection and Recognition</td><td>Object recognition using segmentation for feature detection</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=weak hypotheses and boosting for generic object detection and recognition&sort=relevance">[s2]</a></td><td></td><td>12ad3b5bbbf407f8e54ea692c07633d1a867c566</td></tr><tr><td>graz</td><td>Graz Pedestrian</td><td>Object Recognition Using Segmentation for Feature Detection</td><td>Object recognition using segmentation for feature detection</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=object recognition using segmentation for feature detection&sort=relevance">[s2]</a></td><td></td><td>12ad3b5bbbf407f8e54ea692c07633d1a867c566</td></tr><tr><td>h3d</td><td>H3D</td><td>Poselets: Body Part Detectors Trained Using 3D Human Pose Annotations</td><td>Poselets: Body part detectors trained using 3D human pose annotations</td><td><a href="http://vision.stanford.edu/teaching/cs231b_spring1213/papers/ICCV09_BourdevMalik.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=poselets: body part detectors trained using 3d human pose annotations&sort=relevance">[s2]</a></td><td></td><td>2830fb5282de23d7784b4b4bc37065d27839a412</td></tr><tr><td>hda_plus</td><td>HDA+</td><td>The HDA+ data set for research on fully automated re-identification systems</td><td>HDA dataset-DRAFT 1 A Multi-camera video data set for research on High-Definition surveillance</td><td><a href="http://pdfs.semanticscholar.org/bd88/bb2e4f351352d88ee7375af834360e223498.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the hda+ data set for research on fully automated re-identification systems&sort=relevance">[s2]</a></td><td></td><td>bd88bb2e4f351352d88ee7375af834360e223498</td></tr><tr><td>hda_plus</td><td>HDA+</td><td>A Multi-camera video data set for research on High-Definition surveillance</td><td>HDA dataset-DRAFT 1 A Multi-camera video data set for research on High-Definition surveillance</td><td><a href="http://pdfs.semanticscholar.org/bd88/bb2e4f351352d88ee7375af834360e223498.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a multi-camera video data set for research on high-definition surveillance&sort=relevance">[s2]</a></td><td></td><td>bd88bb2e4f351352d88ee7375af834360e223498</td></tr><tr><td>helen</td><td>Helen</td><td>Interactive Facial Feature Localization</td><td>Interactive Facial Feature Localization</td><td><a href="http://pdfs.semanticscholar.org/95f1/2d27c3b4914e0668a268360948bce92f7db3.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=interactive facial feature localization&sort=relevance">[s2]</a></td><td>University of Illinois, Urbana-Champaign</td><td>95f12d27c3b4914e0668a268360948bce92f7db3</td></tr><tr><td>hi4d_adsip</td><td>Hi4D-ADSIP</td><td>Hi4D-ADSIP 3-D dynamic facial articulation database</td><td>High-resolution comprehensive 3-D dynamic database for facial articulation analysis</td><td><a href="http://www.researchgate.net/profile/Wei_Quan3/publication/221430048_High-resolution_comprehensive_3-D_dynamic_database_for_facial_articulation_analysis/links/0deec534309495805d000000.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=hi4d-adsip 3-d dynamic facial articulation database&sort=relevance">[s2]</a></td><td></td><td>24830e3979d4ed01b9fd0feebf4a8fd22e0c35fd</td></tr><tr><td>hipsterwars</td><td>Hipsterwars</td><td>Hipster Wars: Discovering Elements of Fashion Styles</td><td>Hipster Wars: Discovering Elements of Fashion Styles</td><td><a href="http://pdfs.semanticscholar.org/04c2/cda00e5536f4b1508cbd80041e9552880e67.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=hipster wars: discovering elements of fashion styles&sort=relevance">[s2]</a></td><td>Tohoku University</td><td>04c2cda00e5536f4b1508cbd80041e9552880e67</td></tr><tr><td>hollywood_headset</td><td>HollywoodHeads</td><td>Context-aware CNNs for person head detection</td><td>Context-Aware CNNs for Person Head Detection</td><td><a href="http://arxiv.org/pdf/1511.07917v1.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=context-aware cnns for person head detection&sort=relevance">[s2]</a></td><td></td><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td></tr><tr><td>hrt_transgender</td><td>HRT Transgender</td><td>Is the Eye Region More Reliable Than the Face? A Preliminary Study of Face-based Recognition on a Transgender Dataset</td><td>Face recognition: A literature survey</td><td><a href="http://pdfs.semanticscholar.org/2831/2c3a47c1be3a67365700744d3d6665b86f22.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=is the eye region more reliable than the face? a preliminary study of face-based recognition on a transgender dataset&sort=relevance">[s2]</a></td><td></td><td>28312c3a47c1be3a67365700744d3d6665b86f22</td></tr><tr><td>hrt_transgender</td><td>HRT Transgender</td><td>Investigating the Periocular-Based Face Recognition Across Gender Transformation</td><td>Face recognition: A literature survey</td><td><a href="http://pdfs.semanticscholar.org/2831/2c3a47c1be3a67365700744d3d6665b86f22.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=investigating the periocular-based face recognition across gender transformation&sort=relevance">[s2]</a></td><td></td><td>28312c3a47c1be3a67365700744d3d6665b86f22</td></tr><tr><td>hrt_transgender</td><td>HRT Transgender</td><td>Face recognition across gender transformation using SVM Classifier</td><td>Face recognition: A literature survey</td><td><a href="http://pdfs.semanticscholar.org/2831/2c3a47c1be3a67365700744d3d6665b86f22.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=face recognition across gender transformation using svm classifier&sort=relevance">[s2]</a></td><td></td><td>28312c3a47c1be3a67365700744d3d6665b86f22</td></tr><tr><td>ifad</td><td>IFAD</td><td>Indian Face Age Database: A Database for Face Recognition with Age Variation</td><td>Indian Face Age Database : A Database for Face Recognition with Age Variation</td><td><a href="https://pdfs.semanticscholar.org/55c4/0cbcf49a0225e72d911d762c27bb1c2d14aa.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=indian face age database: a database for face recognition with age variation&sort=relevance">[s2]</a></td><td></td><td>55c40cbcf49a0225e72d911d762c27bb1c2d14aa</td></tr><tr><td>ifdb</td><td>IFDB</td><td>Iranian Face Database with age, pose and expression</td><td>Iranian Face Database and Evaluation with a New Detection Algorithm</td><td><a href="http://pdfs.semanticscholar.org/066d/71fcd997033dce4ca58df924397dfe0b5fd1.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=iranian face database with age, pose and expression&sort=relevance">[s2]</a></td><td></td><td>066d71fcd997033dce4ca58df924397dfe0b5fd1</td></tr><tr><td>ifdb</td><td>IFDB</td><td>Iranian Face Database and Evaluation with a New Detection Algorithm</td><td>Iranian Face Database and Evaluation with a New Detection Algorithm</td><td><a href="http://pdfs.semanticscholar.org/066d/71fcd997033dce4ca58df924397dfe0b5fd1.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=iranian face database and evaluation with a new detection algorithm&sort=relevance">[s2]</a></td><td></td><td>066d71fcd997033dce4ca58df924397dfe0b5fd1</td></tr><tr><td>iit_dehli_ear</td><td>IIT Dehli Ear</td><td>Automated human identification using ear imaging</td><td>Automated Human Identification Using Ear Imaging</td><td><a href="https://pdfs.semanticscholar.org/faf4/0ce28857aedf183e193486f5b4b0a8c478a2.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=automated human identification using ear imaging&sort=relevance">[s2]</a></td><td></td><td>faf40ce28857aedf183e193486f5b4b0a8c478a2</td></tr><tr><td>ijb_a</td><td>IJB-A</td><td>Pushing the Frontiers of Unconstrained Face Detection and Recognition: IARPA Janus Benchmark A</td><td>Pushing the frontiers of unconstrained face detection and recognition: IARPA Janus Benchmark A</td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/1B_089_ext.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pushing the frontiers of unconstrained face detection and recognition: iarpa janus benchmark a&sort=relevance">[s2]</a></td><td></td><td>140c95e53c619eac594d70f6369f518adfea12ef</td></tr><tr><td>ijb_b</td><td>IJB-B</td><td>IARPA Janus Benchmark-B Face Dataset</td><td>IARPA Janus Benchmark-B Face Dataset</td><td><a href="http://www.vislab.ucr.edu/Biometrics2017/program_slides/Noblis_CVPRW_IJBB.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=iarpa janus benchmark-b face dataset&sort=relevance">[s2]</a></td><td></td><td>0cb2dd5f178e3a297a0c33068961018659d0f443</td></tr><tr><td>ijb_c</td><td>IJB-C</td><td>IARPA Janus Benchmark C</td><td>IARPA Janus Benchmark - C: Face Dataset and Protocol</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411217">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=iarpa janus benchmark c&sort=relevance">[s2]</a></td><td></td><td>57178b36c21fd7f4529ac6748614bb3374714e91</td></tr><tr><td>ilids_mcts</td><td></td><td>Imagery Library for Intelligent Detection Systems:
+The i-LIDS User Guide</td><td>Imagery Library for Intelligent Detection Systems (i-LIDS); A Standard for Testing Video Based Detection Systems</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=imagery library for intelligent detection systems:
+the i-lids user guide&sort=relevance">[s2]</a></td><td></td><td>0297448f3ed948e136bb06ceff10eccb34e5bb77</td></tr><tr><td>ilids_vid_reid</td><td>iLIDS-VID</td><td>Person Re-Identi cation by Video Ranking</td><td>Person Re-identification by Exploiting Spatio-Temporal Cues and Multi-view Metric Learning</td><td><a href="https://doi.org/10.1109/LSP.2016.2574323">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person re-identi cation by video ranking&sort=relevance">[s2]</a></td><td></td><td>99eb4cea0d9bc9fe777a5c5172f8638a37a7f262</td></tr><tr><td>images_of_groups</td><td>Images of Groups</td><td>Understanding Groups of Images of People</td><td>Understanding images of groups of people</td><td><a href="http://amp.ece.cmu.edu/people/Andy/Andy_files/cvpr09.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=understanding groups of images of people&sort=relevance">[s2]</a></td><td></td><td>21d9d0deed16f0ad62a4865e9acf0686f4f15492</td></tr><tr><td>imdb_wiki</td><td>IMDB</td><td>Deep expectation of real and apparent age from a single image without facial landmarks</td><td>Deep Expectation of Real and Apparent Age from a Single Image Without Facial Landmarks</td><td><a href="https://doi.org/10.1007/s11263-016-0940-3">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=deep expectation of real and apparent age from a single image without facial landmarks&sort=relevance">[s2]</a></td><td></td><td>10195a163ab6348eef37213a46f60a3d87f289c5</td></tr><tr><td>imdb_wiki</td><td>IMDB</td><td>DEX: Deep EXpectation of apparent age from a single image</td><td>DEX: Deep EXpectation of Apparent Age from a Single Image</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406390">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=dex: deep expectation of apparent age from a single image&sort=relevance">[s2]</a></td><td></td><td>8355d095d3534ef511a9af68a3b2893339e3f96b</td></tr><tr><td>imfdb</td><td>IMFDB</td><td>Indian Movie Face Database: A Benchmark for Face Recognition Under Wide Variations</td><td>Indian Movie Face Database: A benchmark for face recognition under wide variations</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=indian movie face database: a benchmark for face recognition under wide variations&sort=relevance">[s2]</a></td><td></td><td>ca3e88d87e1344d076c964ea89d91a75c417f5ee</td></tr><tr><td>imm_face</td><td>IMM Face Dataset</td><td>The IMM Face Database - An Annotated Dataset of 240 Face Images</td><td>Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</td><td><a href="http://lrs.icg.tugraz.at/pubs/koestinger_befit_11.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the imm face database - an annotated dataset of 240 face images&sort=relevance">[s2]</a></td><td></td><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td></tr><tr><td>immediacy</td><td>Immediacy</td><td>Multi-task Recurrent Neural Network for Immediacy Prediction</td><td>Multi-task Recurrent Neural Network for Immediacy Prediction</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.383">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-task recurrent neural network for immediacy prediction&sort=relevance">[s2]</a></td><td></td><td>1e3df3ca8feab0b36fd293fe689f93bb2aaac591</td></tr><tr><td>imsitu</td><td>imSitu</td><td>Situation Recognition: Visual Semantic Role Labeling for Image Understanding</td><td>Situation Recognition: Visual Semantic Role Labeling for Image Understanding</td><td><a href="http://grail.cs.washington.edu/wp-content/uploads/2016/09/yatskar2016srv.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=situation recognition: visual semantic role labeling for image understanding&sort=relevance">[s2]</a></td><td>University of Washington</td><td>51eba481dac6b229a7490f650dff7b17ce05df73</td></tr><tr><td>inria_person</td><td>INRIA Pedestrian</td><td>Histograms of Oriented Gradients for Human Detection</td><td>Histograms of oriented gradients for human detection</td><td><a href="http://nichol.as/papers/Dalai/Histograms%20of%20oriented%20gradients%20for%20human%20detection.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=histograms of oriented gradients for human detection&sort=relevance">[s2]</a></td><td></td><td>10d6b12fa07c7c8d6c8c3f42c7f1c061c131d4c5</td></tr><tr><td>jaffe</td><td>JAFFE</td><td>Coding Facial Expressions with Gabor Wavelets</td><td>Coding Facial Expressions with Gabor Wavelets</td><td><a href="http://pdfs.semanticscholar.org/45c3/1cde87258414f33412b3b12fc5bec7cb3ba9.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=coding facial expressions with gabor wavelets&sort=relevance">[s2]</a></td><td>Kyushu University</td><td>45c31cde87258414f33412b3b12fc5bec7cb3ba9</td></tr><tr><td>jiku_mobile</td><td>Jiku Mobile Video Dataset</td><td>The Jiku Mobile Video Dataset</td><td>A Synchronization Ground Truth for the Jiku Mobile Video Dataset</td><td><a href="http://pdfs.semanticscholar.org/ad62/c6e17bc39b4dec20d32f6ac667ae42d2c118.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the jiku mobile video dataset&sort=relevance">[s2]</a></td><td></td><td>ad62c6e17bc39b4dec20d32f6ac667ae42d2c118</td></tr><tr><td>jpl_pose</td><td>JPL-Interaction dataset</td><td>First-Person Activity Recognition: What Are They Doing to Me?</td><td>First-Person Activity Recognition: What Are They Doing to Me?</td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2013/papers/Ryoo_First-Person_Activity_Recognition_2013_CVPR_paper.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=first-person activity recognition: what are they doing to me?&sort=relevance">[s2]</a></td><td></td><td>1aad2da473888cb7ebc1bfaa15bfa0f1502ce005</td></tr><tr><td>kdef</td><td>KDEF</td><td>The Karolinska Directed Emotional Faces – KDEF</td><td>Gaze fixation and the neural circuitry of face processing in autism</td><td><a href="http://doi.org/10.1038/nn1421">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the karolinska directed emotional faces – kdef&sort=relevance">[s2]</a></td><td></td><td>93884e46c49f7ae1c7c34046fbc28882f2bd6341</td></tr><tr><td>kin_face</td><td>UB KinFace</td><td>Genealogical Face Recognition based on UB KinFace Database</td><td>Understanding Kin Relationships in a Photo</td><td><a href="http://www1.ece.neu.edu/~yunfu/papers/Kinship-TMM.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=genealogical face recognition based on ub kinface database&sort=relevance">[s2]</a></td><td></td><td>08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7</td></tr><tr><td>kin_face</td><td>UB KinFace</td><td>Kinship Verification through Transfer Learning</td><td>Understanding Kin Relationships in a Photo</td><td><a href="http://www1.ece.neu.edu/~yunfu/papers/Kinship-TMM.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=kinship verification through transfer learning&sort=relevance">[s2]</a></td><td></td><td>08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7</td></tr><tr><td>kin_face</td><td>UB KinFace</td><td>Understanding Kin Relationships in a Photo</td><td>Understanding Kin Relationships in a Photo</td><td><a href="http://www1.ece.neu.edu/~yunfu/papers/Kinship-TMM.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=understanding kin relationships in a photo&sort=relevance">[s2]</a></td><td></td><td>08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7</td></tr><tr><td>kinectface</td><td>KinectFaceDB</td><td>KinectFaceDB: A Kinect Database for Face Recognition</td><td>KinectFaceDB: A Kinect Database for Face Recognition</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6866883">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=kinectfacedb: a kinect database for face recognition&sort=relevance">[s2]</a></td><td></td><td>0b440695c822a8e35184fb2f60dcdaa8a6de84ae</td></tr><tr><td>kitti</td><td>KITTI</td><td>Vision meets Robotics: The KITTI Dataset</td><td>The Role of Machine Vision for Intelligent Vehicles</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=vision meets robotics: the kitti dataset&sort=relevance">[s2]</a></td><td></td><td>35ba4ebfd017a56b51e967105af9ae273c9b0178</td></tr><tr><td>lag</td><td>LAG</td><td>Large Age-Gap Face Verification by Feature Injection in Deep Networks</td><td>Large age-gap face verification by feature injection in deep networks</td><td><a href="http://pdfs.semanticscholar.org/0d2d/d4fc016cb6a517d8fb43a7cc3ff62964832e.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=large age-gap face verification by feature injection in deep networks&sort=relevance">[s2]</a></td><td></td><td>0d2dd4fc016cb6a517d8fb43a7cc3ff62964832e</td></tr><tr><td>large_scale_person_search</td><td>Large Scale Person Search</td><td>End-to-End Deep Learning for Person Search</td><td>End-to-End Deep Learning for Person Search</td><td><a href="https://pdfs.semanticscholar.org/2161/f6b7ee3c0acc81603b01dc0df689683577b9.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=end-to-end deep learning for person search&sort=relevance">[s2]</a></td><td></td><td>2161f6b7ee3c0acc81603b01dc0df689683577b9</td></tr><tr><td>leeds_sports_pose</td><td>Leeds Sports Pose</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td><a href="http://pdfs.semanticscholar.org/4b1d/23d17476fcf78f4cbadf69fb130b1aa627c0.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clustered pose and nonlinear appearance models for human pose estimation&sort=relevance">[s2]</a></td><td></td><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td></tr><tr><td>leeds_sports_pose_extended</td><td>Leeds Sports Pose Extended</td><td>Learning Effective Human Pose Estimation from Inaccurate Annotation</td><td>Learning effective human pose estimation from inaccurate annotation</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995318">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning effective human pose estimation from inaccurate annotation&sort=relevance">[s2]</a></td><td></td><td>4e4746094bf60ee83e40d8597a6191e463b57f76</td></tr><tr><td>lfw</td><td>LFW</td><td>Labeled Faces in the Wild: A Survey</td><td>Labeled Faces in the Wild : A Survey</td><td><a href="http://pdfs.semanticscholar.org/7de6/e81d775e9cd7becbfd1bd685f4e2a5eebb22.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=labeled faces in the wild: a survey&sort=relevance">[s2]</a></td><td></td><td>7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22</td></tr><tr><td>lfw</td><td>LFW</td><td>Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments</td><td>Labeled Faces in the Wild : A Database for Studying Face Recognition in Unconstrained Environments</td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=labeled faces in the wild: a database for studying face recognition in unconstrained environments&sort=relevance">[s2]</a></td><td></td><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td></tr><tr><td>lfw</td><td>LFW</td><td>Labeled Faces in the Wild: Updates and New Reporting Procedures</td><td>Labeled Faces in the Wild : Updates and New Reporting Procedures</td><td><a href="http://pdfs.semanticscholar.org/2d34/82dcff69c7417c7b933f22de606a0e8e42d4.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=labeled faces in the wild: updates and new reporting procedures&sort=relevance">[s2]</a></td><td>University of Massachusetts</td><td>2d3482dcff69c7417c7b933f22de606a0e8e42d4</td></tr><tr><td>lfw_a</td><td>LFW-a</td><td>Effective Unconstrained Face Recognition by
+ Combining Multiple Descriptors and Learned
+ Background Statistics</td><td>Effective Unconstrained Face Recognition by Combining Multiple Descriptors and Learned Background Statistics</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/TPAMI.2010.230">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=effective unconstrained face recognition by
+ combining multiple descriptors and learned
+ background statistics&sort=relevance">[s2]</a></td><td></td><td>133f01aec1534604d184d56de866a4bd531dac87</td></tr><tr><td>lfw_p</td><td>LFWP</td><td>Localizing Parts of Faces Using a Consensus of Exemplars</td><td>Localizing Parts of Faces Using a Consensus of Exemplars</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2011.5995602">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=localizing parts of faces using a consensus of exemplars&sort=relevance">[s2]</a></td><td>Columbia University</td><td>140438a77a771a8fb656b39a78ff488066eb6b50</td></tr><tr><td>m2vts</td><td>m2vts</td><td>The M2VTS Multimodal Face Database (Release 1.00)</td><td>The CAS-PEAL Large-Scale Chinese Face Database and Baseline Evaluations</td><td><a href="https://doi.org/10.1109/TSMCA.2007.909557">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the m2vts multimodal face database (release 1.00)&sort=relevance">[s2]</a></td><td></td><td>2485c98aa44131d1a2f7d1355b1e372f2bb148ad</td></tr><tr><td>m2vtsdb_extended</td><td>xm2vtsdb</td><td>XM2VTSDB: The Extended M2VTS Database</td><td>Labeled Faces in the Wild : A Database for Studying Face Recognition in Unconstrained Environments</td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=xm2vtsdb: the extended m2vts database&sort=relevance">[s2]</a></td><td></td><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td></tr><tr><td>mafl</td><td>MAFL</td><td>Facial Landmark Detection by Deep Multi-task Learning</td><td>Learning Deep Representation for Face Alignment with Auxiliary Attributes</td><td><a href="https://arxiv.org/pdf/1408.3967.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=facial landmark detection by deep multi-task learning&sort=relevance">[s2]</a></td><td></td><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td></tr><tr><td>mafl</td><td>MAFL</td><td>Learning Deep Representation for Face Alignment with Auxiliary Attributes</td><td>Learning Deep Representation for Face Alignment with Auxiliary Attributes</td><td><a href="https://arxiv.org/pdf/1408.3967.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning deep representation for face alignment with auxiliary attributes&sort=relevance">[s2]</a></td><td></td><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td></tr><tr><td>malf</td><td>MALF</td><td>Fine-grained Evaluation on Face Detection in the Wild.</td><td>Fine-grained evaluation on face detection in the wild</td><td><a href="http://www.cs.toronto.edu/~byang/papers/malf_fg15.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=fine-grained evaluation on face detection in the wild.&sort=relevance">[s2]</a></td><td>Chinese Academy of Sciences</td><td>45e616093a92e5f1e61a7c6037d5f637aa8964af</td></tr><tr><td>mapillary</td><td>Mapillary</td><td>The Mapillary Vistas Dataset for Semantic Understanding of Street Scenes</td><td>The Mapillary Vistas Dataset for Semantic Understanding of Street Scenes</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237796">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the mapillary vistas dataset for semantic understanding of street scenes&sort=relevance">[s2]</a></td><td></td><td>79828e6e9f137a583082b8b5a9dfce0c301989b8</td></tr><tr><td>market_1501</td><td>Market 1501</td><td>Scalable Person Re-identification: A Benchmark</td><td>Scalable Person Re-identification: A Benchmark</td><td><a href="https://www.microsoft.com/en-us/research/wp-content/uploads/2017/01/ICCV15-ReIDDataset.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=scalable person re-identification: a benchmark&sort=relevance">[s2]</a></td><td></td><td>4308bd8c28e37e2ed9a3fcfe74d5436cce34b410</td></tr><tr><td>market1203</td><td>Market 1203</td><td>Orientation Driven Bag of Appearances for Person Re-identification</td><td>Orientation Driven Bag of Appearances for Person Re-identification</td><td><a href="http://pdfs.semanticscholar.org/a7fe/834a0af614ce6b50dc093132b031dd9a856b.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=orientation driven bag of appearances for person re-identification&sort=relevance">[s2]</a></td><td></td><td>a7fe834a0af614ce6b50dc093132b031dd9a856b</td></tr><tr><td>mars</td><td>MARS</td><td>MARS: A Video Benchmark for Large-Scale Person Re-identification</td><td>MARS: A Video Benchmark for Large-Scale Person Re-Identification</td><td><a href="http://pdfs.semanticscholar.org/c038/7e788a52f10bf35d4d50659cfa515d89fbec.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=mars: a video benchmark for large-scale person re-identification&sort=relevance">[s2]</a></td><td></td><td>c0387e788a52f10bf35d4d50659cfa515d89fbec</td></tr><tr><td>mcgill</td><td>McGill Real World</td><td>Hierarchical Temporal Graphical Model for Head Pose Estimation and Subsequent Attribute Classification in Real-World Videos</td><td>Robust semi-automatic head pose labeling for real-world face video sequences</td><td><a href="https://doi.org/10.1007/s11042-012-1352-1">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=hierarchical temporal graphical model for head pose estimation and subsequent attribute classification in real-world videos&sort=relevance">[s2]</a></td><td>McGill University</td><td>c570d1247e337f91e555c3be0e8c8a5aba539d9f</td></tr><tr><td>mcgill</td><td>McGill Real World</td><td>Robust Semi-automatic Head Pose Labeling for Real-World Face Video Sequences</td><td>Robust semi-automatic head pose labeling for real-world face video sequences</td><td><a href="https://doi.org/10.1007/s11042-012-1352-1">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=robust semi-automatic head pose labeling for real-world face video sequences&sort=relevance">[s2]</a></td><td>McGill University</td><td>c570d1247e337f91e555c3be0e8c8a5aba539d9f</td></tr><tr><td>megaage</td><td>MegaAge</td><td>Quantifying Facial Age by Posterior of Age Comparisons</td><td>Quantifying Facial Age by Posterior of Age Comparisons</td><td><a href="https://arxiv.org/pdf/1708.09687.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=quantifying facial age by posterior of age comparisons&sort=relevance">[s2]</a></td><td>Chinese University of Hong Kong</td><td>d80a3d1f3a438e02a6685e66ee908446766fefa9</td></tr><tr><td>megaface</td><td>MegaFace</td><td>The MegaFace Benchmark: 1 Million Faces for Recognition at Scale</td><td>The MegaFace Benchmark: 1 Million Faces for Recognition at Scale</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.527">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the megaface benchmark: 1 million faces for recognition at scale&sort=relevance">[s2]</a></td><td>University of Washington</td><td>96e0cfcd81cdeb8282e29ef9ec9962b125f379b0</td></tr><tr><td>megaface</td><td>MegaFace</td><td>Level Playing Field for Million Scale Face Recognition</td><td>Level Playing Field for Million Scale Face Recognition</td><td><a href="https://arxiv.org/pdf/1705.00393.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=level playing field for million scale face recognition&sort=relevance">[s2]</a></td><td></td><td>28d4e027c7e90b51b7d8908fce68128d1964668a</td></tr><tr><td>mifs</td><td>MIFS</td><td>Spoofing Faces Using Makeup: An Investigative Study</td><td>Spoofing faces using makeup: An investigative study</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7947686">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=spoofing faces using makeup: an investigative study&sort=relevance">[s2]</a></td><td>INRIA Méditerranée</td><td>23e824d1dfc33f3780dd18076284f07bd99f1c43</td></tr><tr><td>mit_cbcl</td><td>MIT CBCL</td><td>Component-based Face Recognition with 3D Morphable Models</td><td>Component-Based Face Recognition with 3D Morphable Models</td><td><a href="http://www.bheisele.com/avbpa2003.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=component-based face recognition with 3d morphable models&sort=relevance">[s2]</a></td><td></td><td>079a0a3bf5200994e1f972b1b9197bf2f90e87d4</td></tr><tr><td>miw</td><td>MIW</td><td>Automatic Facial Makeup Detection with Application in Face Recognition</td><td>Automatic facial makeup detection with application in face recognition</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6612994">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=automatic facial makeup detection with application in face recognition&sort=relevance">[s2]</a></td><td>West Virginia University</td><td>fcc6fe6007c322641796cb8792718641856a22a7</td></tr><tr><td>mmi_facial_expression</td><td>MMI Facial Expression Dataset</td><td>WEB-BASED DATABASE FOR FACIAL EXPRESSION ANALYSIS</td><td>Web-based database for facial expression analysis</td><td><a href="http://ibug.doc.ic.ac.uk/media/uploads/documents/PanticEtAl-ICME2005-final.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=web-based database for facial expression analysis&sort=relevance">[s2]</a></td><td></td><td>2a75f34663a60ab1b04a0049ed1d14335129e908</td></tr><tr><td>moments_in_time</td><td>Moments in Time</td><td>Moments in Time Dataset: one million videos for event understanding</td><td>Moments in Time Dataset: one million videos for event understanding</td><td><a href="https://arxiv.org/pdf/1801.03150.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=moments in time dataset: one million videos for event understanding&sort=relevance">[s2]</a></td><td></td><td>a5a44a32a91474f00a3cda671a802e87c899fbb4</td></tr><tr><td>morph</td><td>MORPH Commercial</td><td>MORPH: A Longitudinal Image Database of Normal Adult Age-Progression</td><td>MORPH: a longitudinal image database of normal adult age-progression</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/FGR.2006.78">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=morph: a longitudinal image database of normal adult age-progression&sort=relevance">[s2]</a></td><td></td><td>9055b155cbabdce3b98e16e5ac9c0edf00f9552f</td></tr><tr><td>morph_nc</td><td>MORPH Non-Commercial</td><td>MORPH: A Longitudinal Image Database of Normal Adult Age-Progression</td><td>MORPH: a longitudinal image database of normal adult age-progression</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/FGR.2006.78">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=morph: a longitudinal image database of normal adult age-progression&sort=relevance">[s2]</a></td><td></td><td>9055b155cbabdce3b98e16e5ac9c0edf00f9552f</td></tr><tr><td>mot</td><td>MOT</td><td>Evaluating Multiple Object Tracking Performance: The CLEAR MOT Metrics</td><td>Learning to associate: HybridBoosted multi-target tracker for crowded scene</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=evaluating multiple object tracking performance: the clear mot metrics&sort=relevance">[s2]</a></td><td></td><td>5981e6479c3fd4e31644db35d236bfb84ae46514</td></tr><tr><td>mot</td><td>MOT</td><td>Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</td><td>Learning to associate: HybridBoosted multi-target tracker for crowded scene</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=performance measures and a data set for multi-target, multi-camera tracking&sort=relevance">[s2]</a></td><td></td><td>5981e6479c3fd4e31644db35d236bfb84ae46514</td></tr><tr><td>mot</td><td>MOT</td><td>Learning to associate: HybridBoosted multi-target tracker for crowded scene</td><td>Learning to associate: HybridBoosted multi-target tracker for crowded scene</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning to associate: hybridboosted multi-target tracker for crowded scene&sort=relevance">[s2]</a></td><td></td><td>5981e6479c3fd4e31644db35d236bfb84ae46514</td></tr><tr><td>mpi_large</td><td>Large MPI Facial Expression</td><td>The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</td><td>The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</td><td><a href="http://pdfs.semanticscholar.org/ea05/0801199f98a1c7c1df6769f23f658299a3ae.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the mpi facial expression database — a validated database of emotional and conversational facial expressions&sort=relevance">[s2]</a></td><td></td><td>ea050801199f98a1c7c1df6769f23f658299a3ae</td></tr><tr><td>mpi_small</td><td>Small MPI Facial Expression</td><td>The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</td><td>The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</td><td><a href="http://pdfs.semanticscholar.org/ea05/0801199f98a1c7c1df6769f23f658299a3ae.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the mpi facial expression database — a validated database of emotional and conversational facial expressions&sort=relevance">[s2]</a></td><td></td><td>ea050801199f98a1c7c1df6769f23f658299a3ae</td></tr><tr><td>mpii_gaze</td><td>MPIIGaze</td><td>Appearance-based Gaze Estimation in the Wild</td><td>Appearance-based gaze estimation in the wild</td><td><a href="https://scalable.mpi-inf.mpg.de/files/2015/09/zhang_CVPR15.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=appearance-based gaze estimation in the wild&sort=relevance">[s2]</a></td><td>Max Planck Institute for Informatics</td><td>0df0d1adea39a5bef318b74faa37de7f3e00b452</td></tr><tr><td>mpii_human_pose</td><td>MPII Human Pose</td><td>2D Human Pose Estimation: New Benchmark and State of the Art Analysis</td><td>2D Human Pose Estimation: New Benchmark and State of the Art Analysis</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6909866">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=2d human pose estimation: new benchmark and state of the art analysis&sort=relevance">[s2]</a></td><td></td><td>3325860c0c82a93b2eac654f5324dd6a776f609e</td></tr><tr><td>mr2</td><td>MR2</td><td>The MR2: A multi-racial mega-resolution database of facial stimuli</td><td>The MR2: A multi-racial, mega-resolution database of facial stimuli.</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the mr2: a multi-racial mega-resolution database of facial stimuli&sort=relevance">[s2]</a></td><td></td><td>578d4ad74818086bb64f182f72e2c8bd31e3d426</td></tr><tr><td>mrp_drone</td><td>MRP Drone</td><td>Investigating Open-World Person Re-identification Using a Drone</td><td>Investigating Open-World Person Re-identification Using a Drone</td><td><a href="http://pdfs.semanticscholar.org/ad01/687649d95cd5b56d7399a9603c4b8e2217d7.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=investigating open-world person re-identification using a drone&sort=relevance">[s2]</a></td><td></td><td>ad01687649d95cd5b56d7399a9603c4b8e2217d7</td></tr><tr><td>msceleb</td><td>MsCeleb</td><td>MS-Celeb-1M: A Dataset and Benchmark for Large-Scale Face Recognition</td><td>MS-Celeb-1M: A Dataset and Benchmark for Large-Scale Face Recognition</td><td><a href="http://pdfs.semanticscholar.org/4603/cb8e05258bb0572ae912ad20903b8f99f4b1.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=ms-celeb-1m: a dataset and benchmark for large-scale face recognition&sort=relevance">[s2]</a></td><td></td><td>291265db88023e92bb8c8e6390438e5da148e8f5</td></tr><tr><td>msmt_17</td><td>MSMT17</td><td>Person Transfer GAN to Bridge Domain Gap for Person Re-Identification</td><td>Person Transfer GAN to Bridge Domain Gap for Person Re-Identification</td><td><a href="https://arxiv.org/pdf/1711.08565.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person transfer gan to bridge domain gap for person re-identification&sort=relevance">[s2]</a></td><td></td><td>ec792ad2433b6579f2566c932ee414111e194537</td></tr><tr><td>mtfl</td><td>MTFL</td><td>Facial Landmark Detection by Deep Multi-task Learning</td><td>Learning Deep Representation for Face Alignment with Auxiliary Attributes</td><td><a href="https://arxiv.org/pdf/1408.3967.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=facial landmark detection by deep multi-task learning&sort=relevance">[s2]</a></td><td></td><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td></tr><tr><td>mtfl</td><td>MTFL</td><td>Learning Deep Representation for Face Alignment with Auxiliary Attributes</td><td>Learning Deep Representation for Face Alignment with Auxiliary Attributes</td><td><a href="https://arxiv.org/pdf/1408.3967.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning deep representation for face alignment with auxiliary attributes&sort=relevance">[s2]</a></td><td></td><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td></tr><tr><td>muct</td><td>MUCT</td><td>The MUCT Landmarked Face Database</td><td>Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</td><td><a href="http://lrs.icg.tugraz.at/pubs/koestinger_befit_11.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the muct landmarked face database&sort=relevance">[s2]</a></td><td></td><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td></tr><tr><td>mug_faces</td><td>MUG Faces</td><td>The MUG Facial Expression Database</td><td>The MUG facial expression database</td><td><a href="http://ieeexplore.ieee.org/document/5617662/">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the mug facial expression database&sort=relevance">[s2]</a></td><td>Aristotle University of Thessaloniki</td><td>f1af714b92372c8e606485a3982eab2f16772ad8</td></tr><tr><td>multi_pie</td><td>MULTIPIE</td><td>Multi-PIE</td><td>Scheduling heterogeneous multi-cores through performance impact estimation (PIE)</td><td><a href="http://dl.acm.org/citation.cfm?id=2337184">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-pie&sort=relevance">[s2]</a></td><td></td><td>109df0e8e5969ddf01e073143e83599228a1163f</td></tr><tr><td>names_and_faces_news</td><td>News Dataset</td><td>Names and Faces</td><td>Names and faces in the news</td><td><a href="http://www.cs.utexas.edu/~grauman/courses/spring2007/395T/papers/berg_names_and_faces.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=names and faces&sort=relevance">[s2]</a></td><td></td><td>2fda164863a06a92d3a910b96eef927269aeb730</td></tr><tr><td>nd_2006</td><td>ND-2006</td><td>Using a Multi-Instance Enrollment Representation to Improve 3D Face Recognition</td><td>Using a Multi-Instance Enrollment Representation to Improve 3D Face Recognition</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4401928">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=using a multi-instance enrollment representation to improve 3d face recognition&sort=relevance">[s2]</a></td><td></td><td>fd8168f1c50de85bac58a8d328df0a50248b16ae</td></tr><tr><td>nova_emotions</td><td>Novaemötions Dataset</td><td>Crowdsourcing facial expressions for affective-interaction</td><td>Competitive affective gaming: winning with a smile</td><td><a href="http://doi.acm.org/10.1145/2502081.2502115">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=crowdsourcing facial expressions for affective-interaction&sort=relevance">[s2]</a></td><td></td><td>7f4040b482d16354d5938c1d1b926b544652bf5b</td></tr><tr><td>nova_emotions</td><td>Novaemötions Dataset</td><td>Competitive affective gamming: Winning with a smile</td><td>Competitive affective gaming: winning with a smile</td><td><a href="http://doi.acm.org/10.1145/2502081.2502115">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=competitive affective gamming: winning with a smile&sort=relevance">[s2]</a></td><td></td><td>7f4040b482d16354d5938c1d1b926b544652bf5b</td></tr><tr><td>nudedetection</td><td>Nude Detection</td><td>A Bag-of-Features Approach based on Hue-SIFT Descriptor for Nude Detection</td><td>A bag-of-features approach based on Hue-SIFT descriptor for nude detection</td><td><a href="http://ieeexplore.ieee.org/document/7077625/">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a bag-of-features approach based on hue-sift descriptor for nude detection&sort=relevance">[s2]</a></td><td></td><td>7ace44190729927e5cb0dd5d363fcae966fe13f7</td></tr><tr><td>orl</td><td>ORL</td><td>Parameterisation of a Stochastic Model for Human Face Identification</td><td>Parameterisation of a stochastic model for human face identification</td><td><a href="http://pdfs.semanticscholar.org/5520/6f0b5f57ce17358999145506cd01e570358c.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=parameterisation of a stochastic model for human face identification&sort=relevance">[s2]</a></td><td></td><td>55206f0b5f57ce17358999145506cd01e570358c</td></tr><tr><td>penn_fudan</td><td>Penn Fudan</td><td>Object Detection Combining Recognition and Segmentation</td><td>Object Detection Combining Recognition and Segmentation</td><td><a href="http://pdfs.semanticscholar.org/f531/a554cade14b9b340de6730683a28c292dd74.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=object detection combining recognition and segmentation&sort=relevance">[s2]</a></td><td></td><td>3394168ff0719b03ff65bcea35336a76b21fe5e4</td></tr><tr><td>peta</td><td>PETA</td><td>Pedestrian Attribute Recognition At Far Distance</td><td>Pedestrian Attribute Recognition At Far Distance</td><td><a href="http://personal.ie.cuhk.edu.hk/~pluo/pdf/mm14.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pedestrian attribute recognition at far distance&sort=relevance">[s2]</a></td><td></td><td>2a4bbee0b4cf52d5aadbbc662164f7efba89566c</td></tr><tr><td>pets</td><td>PETS 2017</td><td>PETS 2017: Dataset and Challenge</td><td>PETS 2017: Dataset and Challenge</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014998">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pets 2017: dataset and challenge&sort=relevance">[s2]</a></td><td></td><td>22909dd19a0ec3b6065334cb5be5392cb24d839d</td></tr><tr><td>pilot_parliament</td><td>PPB</td><td>Gender Shades: Intersectional Accuracy Disparities in Commercial Gender Classi cation</td><td>Summary of Research on Informant Accuracy in Network Data, 11 and on the Reverse Small World Problem</td><td><a href="http://pdfs.semanticscholar.org/fb82/681ac5d3487bd8e52dbb3d1fa220eeac855e.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=gender shades: intersectional accuracy disparities in commercial gender classi cation&sort=relevance">[s2]</a></td><td></td><td>fb82681ac5d3487bd8e52dbb3d1fa220eeac855e</td></tr><tr><td>pipa</td><td>PIPA</td><td>Beyond Frontal Faces: Improving Person Recognition Using Multiple Cues</td><td>Beyond frontal faces: Improving Person Recognition using multiple cues</td><td><a href="https://doi.org/10.1109/CVPR.2015.7299113">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=beyond frontal faces: improving person recognition using multiple cues&sort=relevance">[s2]</a></td><td></td><td>0a85bdff552615643dd74646ac881862a7c7072d</td></tr><tr><td>pku</td><td>PKU</td><td>Swiss-System Based Cascade Ranking for Gait-based Person Re-identification</td><td>Swiss-System Based Cascade Ranking for Gait-Based Person Re-Identification</td><td><a href="http://pdfs.semanticscholar.org/f6c8/d5e35d7e4d60a0104f233ac1a3ab757da53f.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=swiss-system based cascade ranking for gait-based person re-identification&sort=relevance">[s2]</a></td><td></td><td>f6c8d5e35d7e4d60a0104f233ac1a3ab757da53f</td></tr><tr><td>pku_reid</td><td>PKU-Reid</td><td>Orientation driven bag of appearances for person re-identification</td><td>Orientation Driven Bag of Appearances for Person Re-identification</td><td><a href="http://pdfs.semanticscholar.org/a7fe/834a0af614ce6b50dc093132b031dd9a856b.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=orientation driven bag of appearances for person re-identification&sort=relevance">[s2]</a></td><td></td><td>a7fe834a0af614ce6b50dc093132b031dd9a856b</td></tr><tr><td>pornodb</td><td>Pornography DB</td><td>Pooling in Image Representation: the Visual Codeword Point of View</td><td>Pooling in image representation: The visual codeword point of view</td><td><a href="http://pdfs.semanticscholar.org/b92a/1ed9622b8268ae3ac9090e25789fc41cc9b8.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pooling in image representation: the visual codeword point of view&sort=relevance">[s2]</a></td><td></td><td>b92a1ed9622b8268ae3ac9090e25789fc41cc9b8</td></tr><tr><td>precarious</td><td>Precarious</td><td>Expecting the Unexpected: Training Detectors for Unusual Pedestrians With Adversarial Imposters</td><td>Expecting the Unexpected: Training Detectors for Unusual Pedestrians with Adversarial Imposters</td><td><a href="https://arxiv.org/pdf/1703.06283.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=expecting the unexpected: training detectors for unusual pedestrians with adversarial imposters&sort=relevance">[s2]</a></td><td></td><td>9e5378e7b336c89735d3bb15cf67eff96f86d39a</td></tr><tr><td>prid</td><td>PRID</td><td>Person Re-Identification by Descriptive and Discriminative Classification</td><td>Person Re-identification by Descriptive and Discriminative Classification</td><td><a href="http://pdfs.semanticscholar.org/4c1b/f0592be3e535faf256c95e27982db9b3d3d3.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person re-identification by descriptive and discriminative classification&sort=relevance">[s2]</a></td><td></td><td>16c7c31a7553d99f1837fc6e88e77b5ccbb346b8</td></tr><tr><td>prw</td><td>PRW</td><td>Person Re-identification in the Wild</td><td>Person Re-identification in the Wild</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2017.357">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person re-identification in the wild&sort=relevance">[s2]</a></td><td>University of Technology Sydney</td><td>0b84f07af44f964817675ad961def8a51406dd2e</td></tr><tr><td>psu</td><td>PSU</td><td>Vision-based Analysis of Small Groups in Pedestrian Crowds</td><td>Vision-Based Analysis of Small Groups in Pedestrian Crowds</td><td><a href="http://vision.cse.psu.edu/publications/pdfs/GeCollinsRubackPAMI2011.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=vision-based analysis of small groups in pedestrian crowds&sort=relevance">[s2]</a></td><td></td><td>066000d44d6691d27202896691f08b27117918b9</td></tr><tr><td>pubfig</td><td>PubFig</td><td>Attribute and Simile Classifiers for Face Verification</td><td>Attribute and simile classifiers for face verification</td><td><a href="http://homes.cs.washington.edu/~neeraj/projects/faceverification/base/papers/nk_iccv2009_attrs.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=attribute and simile classifiers for face verification&sort=relevance">[s2]</a></td><td>Columbia University</td><td>759a3b3821d9f0e08e0b0a62c8b693230afc3f8d</td></tr><tr><td>pubfig_83</td><td>pubfig83</td><td>Scaling Up Biologically-Inspired Computer Vision: A Case Study in Unconstrained Face Recognition on Facebook</td><td>Scaling up biologically-inspired computer vision: A case study in unconstrained face recognition on facebook</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5981788">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=scaling up biologically-inspired computer vision: a case study in unconstrained face recognition on facebook&sort=relevance">[s2]</a></td><td>Harvard University</td><td>9c23859ec7313f2e756a3e85575735e0c52249f4</td></tr><tr><td>put_face</td><td>Put Face</td><td>The PUT face database</td><td>Labeled Faces in the Wild : A Database for Studying Face Recognition in Unconstrained Environments</td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the put face database&sort=relevance">[s2]</a></td><td></td><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td></tr><tr><td>qmul_grid</td><td>GRID</td><td>Multi-Camera Activity Correlation Analysis</td><td>Time-Delayed Correlation Analysis for Multi-Camera Activity Understanding</td><td><a href="https://doi.org/10.1007/s11263-010-0347-5">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-camera activity correlation analysis&sort=relevance">[s2]</a></td><td></td><td>2edb87494278ad11641b6cf7a3f8996de12b8e14</td></tr><tr><td>qmul_grid</td><td>GRID</td><td>Time-delayed correlation analysis for multi-camera activity understanding</td><td>Time-Delayed Correlation Analysis for Multi-Camera Activity Understanding</td><td><a href="https://doi.org/10.1007/s11263-010-0347-5">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=time-delayed correlation analysis for multi-camera activity understanding&sort=relevance">[s2]</a></td><td></td><td>2edb87494278ad11641b6cf7a3f8996de12b8e14</td></tr><tr><td>qmul_surv_face</td><td>QMUL-SurvFace</td><td>Surveillance Face Recognition Challenge</td><td>Surveillance Face Recognition Challenge</td><td><a href="https://arxiv.org/pdf/1804.09691.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=surveillance face recognition challenge&sort=relevance">[s2]</a></td><td></td><td>c866a2afc871910e3282fd9498dce4ab20f6a332</td></tr><tr><td>rafd</td><td>RaFD</td><td>Presentation and validation of the Radboud Faces Database</td><td>Presentation and validation of the Radboud Faces Database</td><td><a href="https://pdfs.semanticscholar.org/3765/df816dc5a061bc261e190acc8bdd9d47bec0.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=presentation and validation of the radboud faces database&sort=relevance">[s2]</a></td><td></td><td>3765df816dc5a061bc261e190acc8bdd9d47bec0</td></tr><tr><td>raid</td><td>RAiD</td><td>Consistent Re-identification in a Camera Network</td><td>Consistent Re-identification in a Camera Network</td><td><a href="http://pdfs.semanticscholar.org/c27f/099e6e7e3f7f9979cbe9e0a5175fc5848ea0.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=consistent re-identification in a camera network&sort=relevance">[s2]</a></td><td></td><td>09d78009687bec46e70efcf39d4612822e61cb8c</td></tr><tr><td>rap_pedestrian</td><td>RAP</td><td>A Richly Annotated Dataset for Pedestrian Attribute Recognition</td><td>A Richly Annotated Dataset for Pedestrian Attribute Recognition</td><td><a href="http://pdfs.semanticscholar.org/221c/18238b829c12b911706947ab38fd017acef7.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a richly annotated dataset for pedestrian attribute recognition&sort=relevance">[s2]</a></td><td></td><td>221c18238b829c12b911706947ab38fd017acef7</td></tr><tr><td>reseed</td><td>ReSEED</td><td>ReSEED: Social Event dEtection Dataset</td><td>ReSEED: social event dEtection dataset</td><td><a href="https://pub.uni-bielefeld.de/download/2663466/2686734">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=reseed: social event detection dataset&sort=relevance">[s2]</a></td><td></td><td>54983972aafc8e149259d913524581357b0f91c3</td></tr><tr><td>saivt</td><td>SAIVT SoftBio</td><td>A Database for Person Re-Identification in Multi-Camera Surveillance Networks</td><td>A Database for Person Re-Identification in Multi-Camera Surveillance Networks</td><td><a href="http://eprints.qut.edu.au/53437/3/Bialkowski_Database4PersonReID_DICTA.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a database for person re-identification in multi-camera surveillance networks&sort=relevance">[s2]</a></td><td></td><td>22646e00a7ba34d1b5fbe3b1efcd91a1e1be3c2b</td></tr><tr><td>sarc3d</td><td>Sarc3D</td><td>SARC3D: a new 3D body model for People Tracking and Re-identification</td><td>SARC3D: A New 3D Body Model for People Tracking and Re-identification</td><td><a href="https://pdfs.semanticscholar.org/e27e/f52c641c2b5100a1b34fd0b819e84a31b4df.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=sarc3d: a new 3d body model for people tracking and re-identification&sort=relevance">[s2]</a></td><td></td><td>e27ef52c641c2b5100a1b34fd0b819e84a31b4df</td></tr><tr><td>scface</td><td>SCface</td><td>SCface – surveillance cameras face database</td><td>Large Variability Surveillance Camera Face Database</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=scface – surveillance cameras face database&sort=relevance">[s2]</a></td><td></td><td>f3b84a03985de3890b400b68e2a92c0a00afd9d0</td></tr><tr><td>scut_fbp</td><td>SCUT-FBP</td><td>SCUT-FBP: A Benchmark Dataset for Facial Beauty Perception</td><td>SCUT-FBP: A Benchmark Dataset for Facial Beauty Perception</td><td><a href="https://arxiv.org/pdf/1511.02459.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=scut-fbp: a benchmark dataset for facial beauty perception&sort=relevance">[s2]</a></td><td>South China University of Technology</td><td>bd26dabab576adb6af30484183c9c9c8379bf2e0</td></tr><tr><td>scut_head</td><td>SCUT HEAD</td><td>Detecting Heads using Feature Refine Net and Cascaded Multi-scale Architecture</td><td>Detecting Heads using Feature Refine Net and Cascaded Multi-scale Architecture</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=detecting heads using feature refine net and cascaded multi-scale architecture&sort=relevance">[s2]</a></td><td></td><td>dfdcd8c7c91813ba1624c9a21d2d01ef06a49afd</td></tr><tr><td>sdu_vid</td><td>SDU-VID</td><td>A Spatio-Temporal Appearance Representation for Video-Based Pedestrian Re-Identification</td><td>Person Re-identification by Video Ranking</td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a spatio-temporal appearance representation for video-based pedestrian re-identification&sort=relevance">[s2]</a></td><td></td><td>98bb029afe2a1239c3fdab517323066f0957b81b</td></tr><tr><td>sdu_vid</td><td>SDU-VID</td><td>Local descriptors encoded by Fisher vectors for person re-identification</td><td>Person Re-identification by Video Ranking</td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=local descriptors encoded by fisher vectors for person re-identification&sort=relevance">[s2]</a></td><td></td><td>98bb029afe2a1239c3fdab517323066f0957b81b</td></tr><tr><td>sdu_vid</td><td>SDU-VID</td><td>Person reidentification by video ranking</td><td>Person Re-identification by Video Ranking</td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person reidentification by video ranking&sort=relevance">[s2]</a></td><td></td><td>98bb029afe2a1239c3fdab517323066f0957b81b</td></tr><tr><td>sheffield</td><td>Sheffield Face</td><td>Face Recognition: From Theory to Applications</td><td>Face Description with Local Binary Patterns: Application to Face Recognition</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/TPAMI.2006.244">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=face recognition: from theory to applications&sort=relevance">[s2]</a></td><td></td><td>3607afdb204de9a5a9300ae98aa4635d9effcda2</td></tr><tr><td>social_relation</td><td>Social Relation</td><td>From Facial Expression Recognition to Interpersonal Relation Prediction</td><td>Learning Social Relation Traits from Face Images</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.414">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=from facial expression recognition to interpersonal relation prediction&sort=relevance">[s2]</a></td><td>Chinese University of Hong Kong</td><td>2a171f8d14b6b8735001a11c217af9587d095848</td></tr><tr><td>social_relation</td><td>Social Relation</td><td>Learning Social Relation Traits from Face Images</td><td>Learning Social Relation Traits from Face Images</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.414">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning social relation traits from face images&sort=relevance">[s2]</a></td><td>Chinese University of Hong Kong</td><td>2a171f8d14b6b8735001a11c217af9587d095848</td></tr><tr><td>soton</td><td>SOTON HiD</td><td>On a Large Sequence-Based Human Gait Database</td><td>On a large sequence-based human gait database</td><td><a href="http://pdfs.semanticscholar.org/4f93/cd09785c6e77bf4bc5a788e079df524c8d21.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=on a large sequence-based human gait database&sort=relevance">[s2]</a></td><td></td><td>4f93cd09785c6e77bf4bc5a788e079df524c8d21</td></tr><tr><td>sports_videos_in_the_wild</td><td>SVW</td><td>Sports Videos in the Wild (SVW): A Video Dataset for Sports Analysis</td><td>Sports Videos in the Wild (SVW): A video dataset for sports analysis</td><td><a href="http://web.cse.msu.edu/~liuxm/publication/Safdarnejad_Liu_Udpa_Andrus_Wood_Craven_FG2015.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=sports videos in the wild (svw): a video dataset for sports analysis&sort=relevance">[s2]</a></td><td>Michigan State University</td><td>1a40092b493c6b8840257ab7f96051d1a4dbfeb2</td></tr><tr><td>stair_actions</td><td>STAIR Action</td><td>STAIR Actions: A Video Dataset of Everyday Home Actions</td><td>STAIR Actions: A Video Dataset of Everyday Home Actions</td><td><a href="https://arxiv.org/pdf/1804.04326.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=stair actions: a video dataset of everyday home actions&sort=relevance">[s2]</a></td><td></td><td>d3f5a1848b0028d8ab51d0b0673732cad2e3c8c9</td></tr><tr><td>stanford_drone</td><td>Stanford Drone</td><td>Learning Social Etiquette: Human Trajectory Prediction In Crowded Scenes</td><td>Learning to Predict Human Behavior in Crowded Scenes</td><td><a href="http://pdfs.semanticscholar.org/c9bd/a86e23cab9e4f15ea0c4cbb6cc02b9dfb709.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning social etiquette: human trajectory prediction in crowded scenes&sort=relevance">[s2]</a></td><td></td><td>c9bda86e23cab9e4f15ea0c4cbb6cc02b9dfb709</td></tr><tr><td>stickmen_buffy</td><td>Buffy Stickmen</td><td>Learning to Parse Images of Articulated Objects</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td><a href="http://pdfs.semanticscholar.org/4b1d/23d17476fcf78f4cbadf69fb130b1aa627c0.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning to parse images of articulated objects&sort=relevance">[s2]</a></td><td></td><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td></tr><tr><td>stickmen_buffy</td><td>Buffy Stickmen</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td><a href="http://pdfs.semanticscholar.org/4b1d/23d17476fcf78f4cbadf69fb130b1aa627c0.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clustered pose and nonlinear appearance models for human pose estimation&sort=relevance">[s2]</a></td><td></td><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td></tr><tr><td>stickmen_family</td><td>We Are Family Stickmen</td><td>We Are Family: Joint Pose Estimation of Multiple Persons</td><td>We Are Family: Joint Pose Estimation of Multiple Persons</td><td><a href="http://pdfs.semanticscholar.org/0dc1/1a37cadda92886c56a6fb5191ded62099c28.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=we are family: joint pose estimation of multiple persons&sort=relevance">[s2]</a></td><td></td><td>0dc11a37cadda92886c56a6fb5191ded62099c28</td></tr><tr><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td>Learning to parse images of articulated bodies</td><td><a href="http://pdfs.semanticscholar.org/9cd7/4c43dbf9be0b9caae4606ee53e6d45850471.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clustered pose and nonlinear appearance models for human pose estimation&sort=relevance">[s2]</a></td><td></td><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td></tr><tr><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td>Learning to Parse Images of Articulated Objects</td><td>Learning to parse images of articulated bodies</td><td><a href="http://pdfs.semanticscholar.org/9cd7/4c43dbf9be0b9caae4606ee53e6d45850471.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning to parse images of articulated objects&sort=relevance">[s2]</a></td><td></td><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td></tr><tr><td>sun_attributes</td><td>SUN</td><td>The SUN Attribute Database: Beyond Categories for Deeper Scene Understanding</td><td>SUN attribute database: Discovering, annotating, and recognizing scene attributes</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2012.6247998">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the sun attribute database: beyond categories for deeper scene understanding&sort=relevance">[s2]</a></td><td></td><td>833fa04463d90aab4a9fe2870d480f0b40df446e</td></tr><tr><td>sun_attributes</td><td>SUN</td><td>SUN Attribute Database:
+Discovering, Annotating, and Recognizing Scene Attributes</td><td>SUN attribute database: Discovering, annotating, and recognizing scene attributes</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2012.6247998">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=sun attribute database:
+discovering, annotating, and recognizing scene attributes&sort=relevance">[s2]</a></td><td></td><td>833fa04463d90aab4a9fe2870d480f0b40df446e</td></tr><tr><td>svs</td><td>SVS</td><td>Pedestrian Attribute Classification in Surveillance: Database and Evaluation</td><td>Pedestrian Attribute Classification in Surveillance: Database and Evaluation</td><td><a href="http://www.cv-foundation.org/openaccess/content_iccv_workshops_2013/W10/papers/Zhu_Pedestrian_Attribute_Classification_2013_ICCV_paper.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pedestrian attribute classification in surveillance: database and evaluation&sort=relevance">[s2]</a></td><td></td><td>488e475eeb3bb39a145f23ede197cd3620f1d98a</td></tr><tr><td>texas_3dfrd</td><td>Texas 3DFRD</td><td>Texas 3D Face Recognition Database</td><td>Anthropometric 3D Face Recognition</td><td><a href="http://live.ece.utexas.edu/publications/2010/sg_ijcv_june10.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=texas 3d face recognition database&sort=relevance">[s2]</a></td><td></td><td>2ce2560cf59db59ce313bbeb004e8ce55c5ce928</td></tr><tr><td>texas_3dfrd</td><td>Texas 3DFRD</td><td>Anthropometric 3D Face Recognition</td><td>Anthropometric 3D Face Recognition</td><td><a href="http://live.ece.utexas.edu/publications/2010/sg_ijcv_june10.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=anthropometric 3d face recognition&sort=relevance">[s2]</a></td><td></td><td>2ce2560cf59db59ce313bbeb004e8ce55c5ce928</td></tr><tr><td>tiny_faces</td><td>TinyFace</td><td>Low-Resolution Face Recognition</td><td>Low-Resolution Face Recognition</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=low-resolution face recognition&sort=relevance">[s2]</a></td><td></td><td>8990cdce3f917dad622e43e033db686b354d057c</td></tr><tr><td>tiny_images</td><td>Tiny Images</td><td>80 million tiny images: a large dataset for non-parametric object and scene recognition</td><td>80 Million Tiny Images: A Large Data Set for Nonparametric Object and Scene Recognition</td><td><a href="http://pdfs.semanticscholar.org/31b5/8ced31f22eab10bd3ee2d9174e7c14c27c01.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=80 million tiny images: a large dataset for non-parametric object and scene recognition&sort=relevance">[s2]</a></td><td></td><td>31b58ced31f22eab10bd3ee2d9174e7c14c27c01</td></tr><tr><td>towncenter</td><td>TownCenter</td><td>Stable Multi-Target Tracking in Real-Time Surveillance Video</td><td>Stable multi-target tracking in real-time surveillance video</td><td><a href="http://pdfs.semanticscholar.org/9361/b784e73e9238d5cefbea5ac40d35d1e3103f.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=stable multi-target tracking in real-time surveillance video&sort=relevance">[s2]</a></td><td></td><td>9361b784e73e9238d5cefbea5ac40d35d1e3103f</td></tr><tr><td>tud_brussels</td><td>TUD-Brussels</td><td>Multi-Cue Onboard Pedestrian Detection</td><td>Multi-cue onboard pedestrian detection</td><td><a href="https://www.mpi-inf.mpg.de/fileadmin/inf/d2/wojek/poster_cwojek_cvpr09.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-cue onboard pedestrian detection&sort=relevance">[s2]</a></td><td></td><td>6ad5a38df8dd4cdddd74f31996ce096d41219f72</td></tr><tr><td>tud_campus</td><td>TUD-Campus</td><td>People-Tracking-by-Detection and People-Detection-by-Tracking</td><td>People-tracking-by-detection and people-detection-by-tracking</td><td><a href="http://mplab.ucsd.edu/wp-content/uploads/CVPR2008/Conference/data/papers/243.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=people-tracking-by-detection and people-detection-by-tracking&sort=relevance">[s2]</a></td><td></td><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td></tr><tr><td>tud_crossing</td><td>TUD-Crossing</td><td>People-Tracking-by-Detection and People-Detection-by-Tracking</td><td>People-tracking-by-detection and people-detection-by-tracking</td><td><a href="http://mplab.ucsd.edu/wp-content/uploads/CVPR2008/Conference/data/papers/243.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=people-tracking-by-detection and people-detection-by-tracking&sort=relevance">[s2]</a></td><td></td><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td></tr><tr><td>tud_motionpairs</td><td>TUD-Motionparis</td><td>Multi-Cue Onboard Pedestrian Detection</td><td>Multi-cue onboard pedestrian detection</td><td><a href="https://www.mpi-inf.mpg.de/fileadmin/inf/d2/wojek/poster_cwojek_cvpr09.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-cue onboard pedestrian detection&sort=relevance">[s2]</a></td><td></td><td>6ad5a38df8dd4cdddd74f31996ce096d41219f72</td></tr><tr><td>tud_multiview</td><td>TUD-Multiview</td><td>Monocular 3D Pose Estimation and Tracking by Detection</td><td>Monocular 3D pose estimation and tracking by detection</td><td><a href="http://lmb.informatik.uni-freiburg.de/lectures/seminar_brox/seminar_ws1011/cvpr10_andriluka.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=monocular 3d pose estimation and tracking by detection&sort=relevance">[s2]</a></td><td></td><td>436f798d1a4e54e5947c1e7d7375c31b2bdb4064</td></tr><tr><td>tud_pedestrian</td><td>TUD-Pedestrian</td><td>People-Tracking-by-Detection and People-Detection-by-Tracking</td><td>People-tracking-by-detection and people-detection-by-tracking</td><td><a href="http://mplab.ucsd.edu/wp-content/uploads/CVPR2008/Conference/data/papers/243.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=people-tracking-by-detection and people-detection-by-tracking&sort=relevance">[s2]</a></td><td></td><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td></tr><tr><td>tud_stadtmitte</td><td>TUD-Stadtmitte</td><td>Monocular 3D Pose Estimation and Tracking by Detection</td><td>Monocular 3D pose estimation and tracking by detection</td><td><a href="http://lmb.informatik.uni-freiburg.de/lectures/seminar_brox/seminar_ws1011/cvpr10_andriluka.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=monocular 3d pose estimation and tracking by detection&sort=relevance">[s2]</a></td><td></td><td>436f798d1a4e54e5947c1e7d7375c31b2bdb4064</td></tr><tr><td>tvhi</td><td>TVHI</td><td>High Five: Recognising human interactions in TV shows</td><td>High Five: Recognising human interactions in TV shows</td><td><a href="http://pdfs.semanticscholar.org/3cd4/0bfa1ff193a96bde0207e5140a399476466c.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=high five: recognising human interactions in tv shows&sort=relevance">[s2]</a></td><td></td><td>3cd40bfa1ff193a96bde0207e5140a399476466c</td></tr><tr><td>uccs</td><td>UCCS</td><td>Large scale unconstrained open set face database</td><td>Large scale unconstrained open set face database</td><td><a href="http://www.vast.uccs.edu/~tboult/PAPERS/BTAS13-Sapkota-Boult-UCCSFaceDB.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=large scale unconstrained open set face database&sort=relevance">[s2]</a></td><td>University of Colorado at Colorado Springs</td><td>07fcbae86f7a3ad3ea1cf95178459ee9eaf77cb1</td></tr><tr><td>ucf_101</td><td>UCF101</td><td>UCF101: A Dataset of 101 Human Actions Classes From Videos in The Wild</td><td>UCF101: A Dataset of 101 Human Actions Classes From Videos in The Wild</td><td><a href="https://arxiv.org/pdf/1212.0402.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=ucf101: a dataset of 101 human actions classes from videos in the wild&sort=relevance">[s2]</a></td><td>University of Central Florida</td><td>b5f2846a506fc417e7da43f6a7679146d99c5e96</td></tr><tr><td>ucf_crowd</td><td>UCF-CC-50</td><td>Multi-Source Multi-Scale Counting in Extremely Dense Crowd Images</td><td>Multi-source Multi-scale Counting in Extremely Dense Crowd Images</td><td><a href="http://www.cs.ucf.edu/~haroon/datafiles/Idrees_Counting_CVPR_2013.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-source multi-scale counting in extremely dense crowd images&sort=relevance">[s2]</a></td><td></td><td>32c801cb7fbeb742edfd94cccfca4934baec71da</td></tr><tr><td>ucf_selfie</td><td>UCF Selfie</td><td>How to Take a Good Selfie?</td><td>How to Take a Good Selfie?</td><td><a href="http://doi.acm.org/10.1145/2733373.2806365">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=how to take a good selfie?&sort=relevance">[s2]</a></td><td></td><td>041d3eedf5e45ce5c5229f0181c5c576ed1fafd6</td></tr><tr><td>ufdd</td><td>UFDD</td><td>Pushing the Limits of Unconstrained Face Detection: a Challenge Dataset and Baseline Results</td><td>Pushing the Limits of Unconstrained Face Detection: a Challenge Dataset and Baseline Results</td><td><a href="https://arxiv.org/pdf/1804.10275.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pushing the limits of unconstrained face detection: a challenge dataset and baseline results&sort=relevance">[s2]</a></td><td>Johns Hopkins University</td><td>377f2b65e6a9300448bdccf678cde59449ecd337</td></tr><tr><td>umb</td><td>UMB</td><td>UMB-DB: A Database of Partially Occluded 3D Faces</td><td>UMB-DB: A database of partially occluded 3D faces</td><td><a href="https://doi.org/10.1109/ICCVW.2011.6130509">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=umb-db: a database of partially occluded 3d faces&sort=relevance">[s2]</a></td><td></td><td>16e8b0a1e8451d5f697b94c0c2b32a00abee1d52</td></tr><tr><td>umd_faces</td><td>UMD</td><td>UMDFaces: An Annotated Face Dataset for Training Deep Networks</td><td>UMDFaces: An annotated face dataset for training deep networks</td><td><a href="http://arxiv.org/abs/1611.01484">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=umdfaces: an annotated face dataset for training deep networks&sort=relevance">[s2]</a></td><td></td><td>31b05f65405534a696a847dd19c621b7b8588263</td></tr><tr><td>umd_faces</td><td>UMD</td><td>The Do's and Don'ts for CNN-based Face Verification</td><td>The Do’s and Don’ts for CNN-Based Face Verification</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the do's and don'ts for cnn-based face verification&sort=relevance">[s2]</a></td><td></td><td>71b7fc715e2f1bb24c0030af8d7e7b6e7cd128a6</td></tr><tr><td>unbc_shoulder_pain</td><td>UNBC-McMaster Pain</td><td>PAINFUL DATA: The UNBC-McMaster Shoulder Pain Expression Archive Database</td><td>Painful data: The UNBC-McMaster shoulder pain expression archive database</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5771462">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=painful data: the unbc-mcmaster shoulder pain expression archive database&sort=relevance">[s2]</a></td><td></td><td>56ffa7d906b08d02d6d5a12c7377a57e24ef3391</td></tr><tr><td>urban_tribes</td><td>Urban Tribes</td><td>From Bikers to Surfers: Visual Recognition of Urban Tribes</td><td>From Bikers to Surfers: Visual Recognition of Urban Tribes</td><td><a href="http://pdfs.semanticscholar.org/774c/bb45968607a027ae4729077734db000a1ec5.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=from bikers to surfers: visual recognition of urban tribes&sort=relevance">[s2]</a></td><td>Columbia University</td><td>774cbb45968607a027ae4729077734db000a1ec5</td></tr><tr><td>used</td><td>USED Social Event Dataset</td><td>USED: A Large-scale Social Event Detection Dataset</td><td>USED: a large-scale social event detection dataset</td><td><a href="http://doi.acm.org/10.1145/2910017.2910624">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=used: a large-scale social event detection dataset&sort=relevance">[s2]</a></td><td></td><td>8627f019882b024aef92e4eb9355c499c733e5b7</td></tr><tr><td>v47</td><td>V47</td><td>Re-identification of Pedestrians with Variable Occlusion and Scale</td><td>Re-identification of pedestrians with variable occlusion and scale</td><td><a href="https://doi.org/10.1109/ICCVW.2011.6130477">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=re-identification of pedestrians with variable occlusion and scale&sort=relevance">[s2]</a></td><td></td><td>922e0a51a3b8c67c4c6ac09a577ff674cbd28b34</td></tr><tr><td>vadana</td><td>VADANA</td><td>VADANA: A dense dataset for facial image analysis</td><td>VADANA: A dense dataset for facial image analysis</td><td><a href="http://vims.cis.udel.edu/publications/VADANA_BeFIT2011.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=vadana: a dense dataset for facial image analysis&sort=relevance">[s2]</a></td><td></td><td>4563b46d42079242f06567b3f2e2f7a80cb3befe</td></tr><tr><td>vgg_celebs_in_places</td><td>CIP</td><td>Faces in Places: Compound Query Retrieval</td><td>Faces in Places: compound query retrieval</td><td><a href="https://pdfs.semanticscholar.org/7ebb/153704706e457ab57b432793d2b6e5d12592.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=faces in places: compound query retrieval&sort=relevance">[s2]</a></td><td>University of Oxford</td><td>7ebb153704706e457ab57b432793d2b6e5d12592</td></tr><tr><td>vgg_faces</td><td>VGG Face</td><td>Deep Face Recognition</td><td>Deep Face Recognition</td><td><a href="https://pdfs.semanticscholar.org/f372/ab9b3270d4e4f6a0258c83c2736c3a5c0454.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=deep face recognition&sort=relevance">[s2]</a></td><td></td><td>162ea969d1929ed180cc6de9f0bf116993ff6e06</td></tr><tr><td>vgg_faces2</td><td>VGG Face2</td><td>VGGFace2: A dataset for recognising faces across pose and age</td><td>VGGFace2: A Dataset for Recognising Faces across Pose and Age</td><td><a href="https://arxiv.org/pdf/1710.08092.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=vggface2: a dataset for recognising faces across pose and age&sort=relevance">[s2]</a></td><td>University of Oxford</td><td>eb027969f9310e0ae941e2adee2d42cdf07d938c</td></tr><tr><td>violent_flows</td><td>Violent Flows</td><td>Violent Flows: Real-Time Detection of Violent Crowd Behavior</td><td>Violent flows: Real-time detection of violent crowd behavior</td><td><a href="http://www.wisdom.weizmann.ac.il/mathusers/kliper/Papers/violent_flows.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=violent flows: real-time detection of violent crowd behavior&sort=relevance">[s2]</a></td><td></td><td>5194cbd51f9769ab25260446b4fa17204752e799</td></tr><tr><td>viper</td><td>VIPeR</td><td>Evaluating Appearance Models for Recognition, Reacquisition, and Tracking</td><td>Evaluating Appearance Models for Recognition , Reacquisition , and Tracking</td><td><a href="http://pdfs.semanticscholar.org/6273/b3491e94ea4dd1ce42b791d77bdc96ee73a8.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=evaluating appearance models for recognition, reacquisition, and tracking&sort=relevance">[s2]</a></td><td></td><td>6273b3491e94ea4dd1ce42b791d77bdc96ee73a8</td></tr><tr><td>visual_phrases</td><td>Phrasal Recognition</td><td>Recognition using Visual Phrases</td><td>Recognition using visual phrases</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995711">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=recognition using visual phrases&sort=relevance">[s2]</a></td><td></td><td>e8de844fefd54541b71c9823416daa238be65546</td></tr><tr><td>vmu</td><td>VMU</td><td>Can Facial Cosmetics Affect the Matching Accuracy of Face Recognition Systems?</td><td>Can facial cosmetics affect the matching accuracy of face recognition systems?</td><td><a href="http://www.cse.msu.edu/~climer/DantchevaChenRossFaceCosmetics_BTAS2012.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=can facial cosmetics affect the matching accuracy of face recognition systems?&sort=relevance">[s2]</a></td><td></td><td>37d6f0eb074d207b53885bd2eb78ccc8a04be597</td></tr><tr><td>voc</td><td>VOC</td><td>The PASCAL Visual Object Classes (VOC) Challenge</td><td>The Pascal Visual Object Classes Challenge: A Retrospective</td><td><a href="http://homepages.inf.ed.ac.uk/ckiw/postscript/ijcv_voc14.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the pascal visual object classes (voc) challenge&sort=relevance">[s2]</a></td><td></td><td>abe9f3b91fd26fa1b50cd685c0d20debfb372f73</td></tr><tr><td>vqa</td><td>VQA</td><td>VQA: Visual Question Answering</td><td>VQA: Visual Question Answering</td><td><a href="http://arxiv.org/pdf/1505.00468v3.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=vqa: visual question answering&sort=relevance">[s2]</a></td><td></td><td>01959ef569f74c286956024866c1d107099199f7</td></tr><tr><td>ward</td><td>WARD</td><td>Re-identify people in wide area camera network</td><td>Re-identify people in wide area camera network</td><td><a href="https://doi.org/10.1109/CVPRW.2012.6239203">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=re-identify people in wide area camera network&sort=relevance">[s2]</a></td><td></td><td>6f3c76b7c0bd8e1d122c6ea808a271fd4749c951</td></tr><tr><td>who_goes_there</td><td>WGT</td><td>Who Goes There? Approaches to Mapping Facial Appearance Diversity</td><td>Who goes there?: approaches to mapping facial appearance diversity</td><td><a href="http://doi.acm.org/10.1145/2996913.2996997">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=who goes there? approaches to mapping facial appearance diversity&sort=relevance">[s2]</a></td><td></td><td>9b9bf5e623cb8af7407d2d2d857bc3f1b531c182</td></tr><tr><td>wider</td><td>WIDER</td><td>Recognize Complex Events from Static Images by Fusing Deep Channels</td><td>Recognize complex events from static images by fusing deep channels</td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Xiong_Recognize_Complex_Events_2015_CVPR_paper.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=recognize complex events from static images by fusing deep channels&sort=relevance">[s2]</a></td><td>Shenzhen Institutes of Advanced Technology</td><td>356b431d4f7a2a0a38cf971c84568207dcdbf189</td></tr><tr><td>wider_attribute</td><td>WIDER Attribute</td><td>Human Attribute Recognition by Deep Hierarchical Contexts</td><td>Human Attribute Recognition by Deep Hierarchical Contexts</td><td><a href="https://pdfs.semanticscholar.org/8e28/07f2dd53b03a759e372e07f7191cae65c9fd.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=human attribute recognition by deep hierarchical contexts&sort=relevance">[s2]</a></td><td>Chinese University of Hong Kong</td><td>44d23df380af207f5ac5b41459c722c87283e1eb</td></tr><tr><td>wider_face</td><td>WIDER FACE</td><td>WIDER FACE: A Face Detection Benchmark</td><td>WIDER FACE: A Face Detection Benchmark</td><td><a href="https://arxiv.org/pdf/1511.06523.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=wider face: a face detection benchmark&sort=relevance">[s2]</a></td><td>Chinese University of Hong Kong</td><td>52d7eb0fbc3522434c13cc247549f74bb9609c5d</td></tr><tr><td>wildtrack</td><td>WildTrack</td><td>WILDTRACK: A Multi-camera HD Dataset for Dense Unscripted Pedestrian Detection</td><td>WILDTRACK : A Multi-camera HD Dataset for Dense Unscripted Pedestrian Detection</td><td><a href="https://pdfs.semanticscholar.org/fe1c/ec4e4995b8615855572374ae3efc94949105.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=wildtrack: a multi-camera hd dataset for dense unscripted pedestrian detection&sort=relevance">[s2]</a></td><td></td><td>77c81c13a110a341c140995bedb98101b9e84f7f</td></tr><tr><td>wlfdb</td><td></td><td>WLFDB: Weakly Labeled Face Databases</td><td>WLFDB: Weakly Labeled Face Databases</td><td><a href="https://pdfs.semanticscholar.org/5ad4/e9f947c1653c247d418f05dad758a3f9277b.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=wlfdb: weakly labeled face databases&sort=relevance">[s2]</a></td><td></td><td>5ad4e9f947c1653c247d418f05dad758a3f9277b</td></tr><tr><td>yale_faces</td><td>YaleFaces</td><td>Acquiring Linear Subspaces for Face Recognition under Variable Lighting</td><td>From Few to Many: Illumination Cone Models for Face Recognition under Variable Lighting and Pose</td><td><a href="http://pdfs.semanticscholar.org/97bb/c2b439a79d4dc0dc7199d71ed96ad5e3fd0e.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=acquiring linear subspaces for face recognition under variable lighting&sort=relevance">[s2]</a></td><td></td><td>18c72175ddbb7d5956d180b65a96005c100f6014</td></tr><tr><td>yale_faces</td><td>YaleFaces</td><td>From Few to Many: Illumination Cone Models for Face Recognition under Variable Lighting and Pose</td><td>From Few to Many: Illumination Cone Models for Face Recognition under Variable Lighting and Pose</td><td><a href="http://pdfs.semanticscholar.org/97bb/c2b439a79d4dc0dc7199d71ed96ad5e3fd0e.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=from few to many: illumination cone models for face recognition under variable lighting and pose&sort=relevance">[s2]</a></td><td></td><td>18c72175ddbb7d5956d180b65a96005c100f6014</td></tr><tr><td>yawdd</td><td>YawDD</td><td>YawDD: A Yawning Detection Dataset</td><td>YawDD: a yawning detection dataset</td><td><a href="http://www.site.uottawa.ca/~shervin/pubs/CogniVue-Dataset-ACM-MMSys2014.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=yawdd: a yawning detection dataset&sort=relevance">[s2]</a></td><td></td><td>a94cae786d515d3450d48267e12ca954aab791c4</td></tr><tr><td>yfcc_100m</td><td>YFCC100M</td><td>YFCC100M: The New Data in Multimedia Research</td><td>The New Data and New Challenges in Multimedia Research</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=yfcc100m: the new data in multimedia research&sort=relevance">[s2]</a></td><td></td><td>a6e695ddd07aad719001c0fc1129328452385949</td></tr><tr><td>york_3d</td><td>UOY 3D Face Database</td><td>Three-Dimensional Face Recognition: An Eigensurface Approach</td><td>Three-dimensional face recognition: an eigensurface approach</td><td><a href="http://www-users.cs.york.ac.uk/~nep/research/3Dface/tomh/3DFaceRecognition-Eigensurface-ICIP(web)2.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=three-dimensional face recognition: an eigensurface approach&sort=relevance">[s2]</a></td><td></td><td>19d1b811df60f86cbd5e04a094b07f32fff7a32a</td></tr><tr><td>youtube_faces</td><td>YouTubeFaces</td><td>Face Recognition in Unconstrained Videos with Matched Background Similarity</td><td>Face recognition in unconstrained videos with matched background similarity</td><td><a href="http://www.cs.tau.ac.il/~wolf/papers/lvfw.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=face recognition in unconstrained videos with matched background similarity&sort=relevance">[s2]</a></td><td>Open University of Israel</td><td>560e0e58d0059259ddf86fcec1fa7975dee6a868</td></tr><tr><td>youtube_makeup</td><td>YMU</td><td>Can Facial Cosmetics Affect the Matching Accuracy of Face Recognition Systems?</td><td>Automatic facial makeup detection with application in face recognition</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6612994">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=can facial cosmetics affect the matching accuracy of face recognition systems?&sort=relevance">[s2]</a></td><td>West Virginia University</td><td>fcc6fe6007c322641796cb8792718641856a22a7</td></tr><tr><td>youtube_makeup</td><td>YMU</td><td>Automatic Facial Makeup Detection with Application in Face Recognition</td><td>Automatic facial makeup detection with application in face recognition</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6612994">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=automatic facial makeup detection with application in face recognition&sort=relevance">[s2]</a></td><td>West Virginia University</td><td>fcc6fe6007c322641796cb8792718641856a22a7</td></tr><tr><td>youtube_poses</td><td>YouTube Pose</td><td>Personalizing Human Video Pose Estimation</td><td>Personalizing Human Video Pose Estimation</td><td><a href="http://arxiv.org/pdf/1511.06676v1.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=personalizing human video pose estimation&sort=relevance">[s2]</a></td><td></td><td>1c2802c2199b6d15ecefe7ba0c39bfe44363de38</td></tr></table></body></html> \ No newline at end of file
diff --git a/scraper/reports/paper_title_report_no_location.html b/scraper/reports/paper_title_report_no_location.html
new file mode 100644
index 00000000..7144279e
--- /dev/null
+++ b/scraper/reports/paper_title_report_no_location.html
@@ -0,0 +1,9 @@
+<!doctype html><html><head><meta charset='utf-8'><title>Papers with no location</title><link rel='stylesheet' href='reports.css'></head><body><h2>Papers with no location</h2><table border='1' cellpadding='3' cellspacing='3'><th>key</th><th>name</th><th>our title</th><th>found title</th><th></th><th></th><th>address</th><th>s2 id</th><tr><td>10k_US_adult_faces</td><td>10K US Adult Faces</td><td>The intrinsic memorability of face images</td><td>The intrinsic memorability of face photographs.</td><td><a href="https://pdfs.semanticscholar.org/8b2d/d5c61b23ead5ae5508bb8ce808b5ea266730.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the intrinsic memorability of face images&sort=relevance">[s2]</a></td><td></td><td>8b2dd5c61b23ead5ae5508bb8ce808b5ea266730</td></tr><tr><td>3d_rma</td><td>3D-RMA</td><td>Automatic 3D Face Authentication</td><td>Automatic 3D face authentication</td><td><a href="http://pdfs.semanticscholar.org/2160/788824c4c29ffe213b2cbeb3f52972d73f37.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=automatic 3d face authentication&sort=relevance">[s2]</a></td><td></td><td>2160788824c4c29ffe213b2cbeb3f52972d73f37</td></tr><tr><td>3dddb_unconstrained</td><td>3D Dynamic</td><td>A 3D Dynamic Database for Unconstrained Face Recognition</td><td>Labeled Faces in the Wild : A Database for Studying Face Recognition in Unconstrained Environments</td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a 3d dynamic database for unconstrained face recognition&sort=relevance">[s2]</a></td><td></td><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td></tr><tr><td>3dpes</td><td>3DPeS</td><td>3DPes: 3D People Dataset for Surveillance and Forensics</td><td>3DPeS: 3D people dataset for surveillance and forensics</td><td><a href="http://doi.acm.org/10.1145/2072572.2072590">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=3dpes: 3d people dataset for surveillance and forensics&sort=relevance">[s2]</a></td><td></td><td>2e8d0f1802e50cccfd3c0aabac0d0beab3a7846e</td></tr><tr><td>4dfab</td><td>4DFAB</td><td>4DFAB: A Large Scale 4D Facial Expression Database for Biometric Applications</td><td>4DFAB: A Large Scale 4D Facial Expression Database for Biometric Applications</td><td><a href="https://arxiv.org/pdf/1712.01443.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=4dfab: a large scale 4d facial expression database for biometric applications&sort=relevance">[s2]</a></td><td></td><td>9696ad8b164f5e10fcfe23aacf74bd6168aebb15</td></tr><tr><td>50_people_one_question</td><td>50 People One Question</td><td>Merging Pose Estimates Across Space and Time</td><td>Merging Pose Estimates Across Space and Time</td><td><a href="http://pdfs.semanticscholar.org/63b2/f5348af0f969dfc2afb4977732393c6459ec.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=merging pose estimates across space and time&sort=relevance">[s2]</a></td><td></td><td>5753b2b5e442eaa3be066daa4a2ca8d8a0bb1725</td></tr><tr><td>a_pascal_yahoo</td><td>aPascal</td><td>Describing Objects by their Attributes</td><td>Describing objects by their attributes</td><td><a href="http://www-2.cs.cmu.edu/~dhoiem/publications/cvpr2009_attributes.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=describing objects by their attributes&sort=relevance">[s2]</a></td><td></td><td>2e384f057211426ac5922f1b33d2aa8df5d51f57</td></tr><tr><td>adience</td><td>Adience</td><td>Age and Gender Estimation of Unfiltered Faces</td><td>Age and Gender Estimation of Unfiltered Faces</td><td><a href="http://www.openu.ac.il/home/hassner/Adience/EidingerEnbarHassner_tifs.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=age and gender estimation of unfiltered faces&sort=relevance">[s2]</a></td><td></td><td>1be498d4bbc30c3bfd0029114c784bc2114d67c0</td></tr><tr><td>afad</td><td>AFAD</td><td>Ordinal Regression with a Multiple Output CNN for Age Estimation</td><td>Ordinal Regression with Multiple Output CNN for Age Estimation</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.532">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=ordinal regression with a multiple output cnn for age estimation&sort=relevance">[s2]</a></td><td></td><td>6618cff7f2ed440a0d2fa9e74ad5469df5cdbe4c</td></tr><tr><td>affectnet</td><td>AffectNet</td><td>AffectNet: A New Database for Facial Expression, Valence, and Arousal Computation in the Wild</td><td>Skybiometry and AffectNet on Facial Emotion Recognition Using Supervised Machine Learning Algorithms</td><td><a href="http://dl.acm.org/citation.cfm?id=3232665">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=affectnet: a new database for facial expression, valence, and arousal computation in the wild&sort=relevance">[s2]</a></td><td></td><td>f152b6ee251cca940dd853c54e6a7b78fbc6b235</td></tr><tr><td>aflw</td><td>AFLW</td><td>Annotated Facial Landmarks in the Wild: A Large-scale, Real-world Database for Facial Landmark Localization</td><td>Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</td><td><a href="http://lrs.icg.tugraz.at/pubs/koestinger_befit_11.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=annotated facial landmarks in the wild: a large-scale, real-world database for facial landmark localization&sort=relevance">[s2]</a></td><td></td><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td></tr><tr><td>agedb</td><td>AgeDB</td><td>AgeDB: the first manually collected, in-the-wild age database</td><td>AgeDB: The First Manually Collected, In-the-Wild Age Database</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014984">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=agedb: the first manually collected, in-the-wild age database&sort=relevance">[s2]</a></td><td></td><td>6dcf418c778f528b5792104760f1fbfe90c6dd6a</td></tr><tr><td>alert_airport</td><td>ALERT Airport</td><td>A Systematic Evaluation and Benchmark for Person Re-Identification: Features, Metrics, and Datasets</td><td>A Systematic Evaluation and Benchmark for Person Re-Identification: Features, Metrics, and Datasets.</td><td><a href="https://arxiv.org/pdf/1605.09653.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a systematic evaluation and benchmark for person re-identification: features, metrics, and datasets&sort=relevance">[s2]</a></td><td></td><td>6403117f9c005ae81f1e8e6d1302f4a045e3d99d</td></tr><tr><td>am_fed</td><td>AM-FED</td><td>Affectiva MIT Facial Expression Dataset (AM-FED): Naturalistic and Spontaneous Facial Expressions Collected “In the Wild”</td><td>Affectiva-MIT Facial Expression Dataset (AM-FED): Naturalistic and Spontaneous Facial Expressions Collected "In-the-Wild"</td><td><a href="http://pdfs.semanticscholar.org/5d06/437656dd94616d7d87260d5eb77513ded30f.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=affectiva mit facial expression dataset (am-fed): naturalistic and spontaneous facial expressions collected “in the wild”&sort=relevance">[s2]</a></td><td></td><td>47aeb3b82f54b5ae8142b4bdda7b614433e69b9a</td></tr><tr><td>apis</td><td>APiS1.0</td><td>Pedestrian Attribute Classification in Surveillance: Database and Evaluation</td><td>Pedestrian Attribute Classification in Surveillance: Database and Evaluation</td><td><a href="http://www.cv-foundation.org/openaccess/content_iccv_workshops_2013/W10/papers/Zhu_Pedestrian_Attribute_Classification_2013_ICCV_paper.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pedestrian attribute classification in surveillance: database and evaluation&sort=relevance">[s2]</a></td><td></td><td>488e475eeb3bb39a145f23ede197cd3620f1d98a</td></tr><tr><td>ar_facedb</td><td>AR Face</td><td>The AR Face Database</td><td>Labeled Faces in the Wild : A Database for Studying Face Recognition in Unconstrained Environments</td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the ar face database&sort=relevance">[s2]</a></td><td></td><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td></tr><tr><td>awe_ears</td><td>AWE Ears</td><td>Ear Recognition: More Than a Survey</td><td>Ear Recognition: More Than a Survey</td><td><a href="http://pdfs.semanticscholar.org/84fe/5b4ac805af63206012d29523a1e033bc827e.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=ear recognition: more than a survey&sort=relevance">[s2]</a></td><td></td><td>84fe5b4ac805af63206012d29523a1e033bc827e</td></tr><tr><td>b3d_ac</td><td>B3D(AC)</td><td>A 3-D Audio-Visual Corpus of Affective Communication</td><td>A 3-D Audio-Visual Corpus of Affective Communication</td><td><a href="http://files.is.tue.mpg.de/jgall/download/jgall_avcorpus_mm10.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a 3-d audio-visual corpus of affective communication&sort=relevance">[s2]</a></td><td></td><td>d08cc366a4a0192a01e9a7495af1eb5d9f9e73ae</td></tr><tr><td>bbc_pose</td><td>BBC Pose</td><td>Automatic and Efficient Human Pose Estimation for Sign Language Videos</td><td>Automatic and Efficient Human Pose Estimation for Sign Language Videos</td><td><a href="https://doi.org/10.1007/s11263-013-0672-6">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=automatic and efficient human pose estimation for sign language videos&sort=relevance">[s2]</a></td><td></td><td>213a579af9e4f57f071b884aa872651372b661fd</td></tr><tr><td>berkeley_pose</td><td>BPAD</td><td>Describing People: A Poselet-Based Approach to Attribute Classification</td><td>Describing people: A poselet-based approach to attribute classification</td><td><a href="http://ttic.uchicago.edu/~smaji/papers/attributes-iccv11.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=describing people: a poselet-based approach to attribute classification&sort=relevance">[s2]</a></td><td></td><td>7808937b46acad36e43c30ae4e9f3fd57462853d</td></tr><tr><td>bfm</td><td>BFM</td><td>A 3D Face Model for Pose and Illumination Invariant Face Recognition</td><td>A 3D Face Model for Pose and Illumination Invariant Face Recognition</td><td><a href="https://pdfs.semanticscholar.org/6399/37b3a1b8bded3f7e9a40e85bd3770016cf3c.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a 3d face model for pose and illumination invariant face recognition&sort=relevance">[s2]</a></td><td></td><td>639937b3a1b8bded3f7e9a40e85bd3770016cf3c</td></tr><tr><td>bio_id</td><td>BioID Face</td><td>Robust Face Detection Using the Hausdorff Distance</td><td>Robust Face Detection Using the Hausdorff Distance</td><td><a href="http://pdfs.semanticscholar.org/4053/e3423fb70ad9140ca89351df49675197196a.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=robust face detection using the hausdorff distance&sort=relevance">[s2]</a></td><td></td><td>4053e3423fb70ad9140ca89351df49675197196a</td></tr><tr><td>bjut_3d</td><td>BJUT-3D</td><td>The BJUT-3D Large-Scale Chinese Face Database</td><td>A novel face recognition method based on 3D face model</td><td><a href="https://doi.org/10.1109/ROBIO.2007.4522202">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the bjut-3d large-scale chinese face database&sort=relevance">[s2]</a></td><td></td><td>1ed1a49534ad8dd00f81939449f6389cfbc25321</td></tr><tr><td>bosphorus</td><td>The Bosphorus</td><td>Bosphorus Database for 3D Face Analysis</td><td>Bosphorus Database for 3D Face Analysis</td><td><a href="http://pdfs.semanticscholar.org/4254/fbba3846008f50671edc9cf70b99d7304543.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=bosphorus database for 3d face analysis&sort=relevance">[s2]</a></td><td></td><td>2acf7e58f0a526b957be2099c10aab693f795973</td></tr><tr><td>bp4d_plus</td><td>BP4D+</td><td>Multimodal Spontaneous Emotion Corpus for Human Behavior Analysis</td><td>Multimodal Spontaneous Emotion Corpus for Human Behavior Analysis</td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Zhang_Multimodal_Spontaneous_Emotion_CVPR_2016_paper.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multimodal spontaneous emotion corpus for human behavior analysis&sort=relevance">[s2]</a></td><td></td><td>53ae38a6bb2b21b42bac4f0c4c8ed1f9fa02f9d4</td></tr><tr><td>brainwash</td><td>Brainwash</td><td>Brainwash dataset</td><td>Brainwash: A Data System for Feature Engineering</td><td><a href="http://pdfs.semanticscholar.org/ae44/8015b2ff2bd3b8a5c9a3266f954f5af9ffa9.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=brainwash dataset&sort=relevance">[s2]</a></td><td></td><td>214c966d1f9c2a4b66f4535d9a0d4078e63a5867</td></tr><tr><td>buhmap_db</td><td>BUHMAP-DB </td><td>Facial Feature Tracking and Expression Recognition for Sign Language</td><td>Facial feature tracking and expression recognition for sign language</td><td><a href="http://www.cmpe.boun.edu.tr/pilab/pilabfiles/databases/buhmap/files/ari2008facialfeaturetracking.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=facial feature tracking and expression recognition for sign language&sort=relevance">[s2]</a></td><td></td><td>014b8df0180f33b9fea98f34ae611c6447d761d2</td></tr><tr><td>cafe</td><td>CAFE</td><td>The Child Affective Facial Expression (CAFE) Set: Validity and reliability from untrained adults</td><td>The Child Affective Facial Expression (CAFE) set: validity and reliability from untrained adults</td><td><a href="http://pdfs.semanticscholar.org/2038/8099cc415c772926e47bcbbe554e133343d1.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the child affective facial expression (cafe) set: validity and reliability from untrained adults&sort=relevance">[s2]</a></td><td></td><td>20388099cc415c772926e47bcbbe554e133343d1</td></tr><tr><td>caltech_10k_web_faces</td><td>Caltech 10K Web Faces</td><td>Pruning Training Sets for Learning of Object Categories</td><td>Pruning training sets for learning of object categories</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1467308">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pruning training sets for learning of object categories&sort=relevance">[s2]</a></td><td></td><td>636b8ffc09b1b23ff714ac8350bb35635e49fa3c</td></tr><tr><td>caltech_pedestrians</td><td>Caltech Pedestrians</td><td>Pedestrian Detection: A Benchmark</td><td>Pedestrian Detection: An Evaluation of the State of the Art</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5975165">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pedestrian detection: a benchmark&sort=relevance">[s2]</a></td><td></td><td>f72f6a45ee240cc99296a287ff725aaa7e7ebb35</td></tr><tr><td>caltech_pedestrians</td><td>Caltech Pedestrians</td><td>Pedestrian Detection: An Evaluation of the State of the Art</td><td>Pedestrian Detection: An Evaluation of the State of the Art</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5975165">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pedestrian detection: an evaluation of the state of the art&sort=relevance">[s2]</a></td><td></td><td>f72f6a45ee240cc99296a287ff725aaa7e7ebb35</td></tr><tr><td>camel</td><td>CAMEL</td><td>CAMEL Dataset for Visual and Thermal Infrared Multiple Object Detection and Tracking</td><td>Application of Object Based Classification and High Resolution Satellite Imagery for Savanna Ecosystem Analysis</td><td><a href="http://pdfs.semanticscholar.org/5801/690199c1917fa58c35c3dead177c0b8f9f2d.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=camel dataset for visual and thermal infrared multiple object detection and tracking&sort=relevance">[s2]</a></td><td></td><td>5801690199c1917fa58c35c3dead177c0b8f9f2d</td></tr><tr><td>cas_peal</td><td>CAS-PEAL</td><td>The CAS-PEAL Large-Scale Chinese Face Database and Baseline Evaluations</td><td>The CAS-PEAL Large-Scale Chinese Face Database and Baseline Evaluations</td><td><a href="https://doi.org/10.1109/TSMCA.2007.909557">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the cas-peal large-scale chinese face database and baseline evaluations&sort=relevance">[s2]</a></td><td></td><td>2485c98aa44131d1a2f7d1355b1e372f2bb148ad</td></tr><tr><td>casablanca</td><td>Casablanca</td><td>Context-aware {CNNs} for person head detection</td><td>Context-Aware CNNs for Person Head Detection</td><td><a href="http://arxiv.org/pdf/1511.07917v1.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=context-aware {cnns} for person head detection&sort=relevance">[s2]</a></td><td></td><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td></tr><tr><td>casia_webface</td><td>CASIA Webface</td><td>Learning Face Representation from Scratch</td><td>Learning Face Representation from Scratch</td><td><a href="http://pdfs.semanticscholar.org/b8a2/0ed7e74325da76d7183d1ab77b082a92b447.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning face representation from scratch&sort=relevance">[s2]</a></td><td></td><td>853bd61bc48a431b9b1c7cab10c603830c488e39</td></tr><tr><td>cfd</td><td>CFD</td><td>The Chicago face database: A free stimulus set of faces and norming data</td><td>The Chicago face database: A free stimulus set of faces and norming data.</td><td><a href="http://pdfs.semanticscholar.org/4df3/143922bcdf7db78eb91e6b5359d6ada004d2.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the chicago face database: a free stimulus set of faces and norming data&sort=relevance">[s2]</a></td><td></td><td>4df3143922bcdf7db78eb91e6b5359d6ada004d2</td></tr><tr><td>chalearn</td><td>ChaLearn</td><td>ChaLearn Looking at People: A Review of Events and Resources</td><td>ChaLearn looking at people: A review of events and resources</td><td><a href="https://arxiv.org/pdf/1701.02664.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=chalearn looking at people: a review of events and resources&sort=relevance">[s2]</a></td><td></td><td>8d5998cd984e7cce307da7d46f155f9db99c6590</td></tr><tr><td>cityscapes</td><td>Cityscapes</td><td>The Cityscapes Dataset for Semantic Urban Scene Understanding</td><td>The Cityscapes Dataset for Semantic Urban Scene Understanding</td><td><a href="https://arxiv.org/pdf/1604.01685.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the cityscapes dataset for semantic urban scene understanding&sort=relevance">[s2]</a></td><td></td><td>32cde90437ab5a70cf003ea36f66f2de0e24b3ab</td></tr><tr><td>cityscapes</td><td>Cityscapes</td><td>The Cityscapes Dataset</td><td>The Cityscapes Dataset for Semantic Urban Scene Understanding</td><td><a href="https://arxiv.org/pdf/1604.01685.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the cityscapes dataset&sort=relevance">[s2]</a></td><td></td><td>32cde90437ab5a70cf003ea36f66f2de0e24b3ab</td></tr><tr><td>clothing_co_parsing</td><td>CCP</td><td>Clothing Co-Parsing by Joint Image Segmentation and Labeling</td><td>Clothing Co-parsing by Joint Image Segmentation and Labeling</td><td><a href="https://arxiv.org/pdf/1502.00739v1.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clothing co-parsing by joint image segmentation and labeling&sort=relevance">[s2]</a></td><td></td><td>2bf8541199728262f78d4dced6fb91479b39b738</td></tr><tr><td>cmu_pie</td><td>CMU PIE</td><td>The CMU Pose, Illumination, and Expression Database</td><td>The CMU Pose, Illumination, and Expression (PIE) Database</td><td><a href="http://pdfs.semanticscholar.org/4d42/3acc78273b75134e2afd1777ba6d3a398973.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the cmu pose, illumination, and expression database&sort=relevance">[s2]</a></td><td></td><td>4d423acc78273b75134e2afd1777ba6d3a398973</td></tr><tr><td>coco</td><td>COCO</td><td>Microsoft COCO: Common Objects in Context</td><td>Microsoft COCO Captions: Data Collection and Evaluation Server</td><td><a href="http://pdfs.semanticscholar.org/ba95/81c33a7eebe87c50e61763e4c8d1723538f2.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=microsoft coco: common objects in context&sort=relevance">[s2]</a></td><td></td><td>696ca58d93f6404fea0fc75c62d1d7b378f47628</td></tr><tr><td>coco_action</td><td>COCO-a</td><td>Describing Common Human Visual Actions in Images</td><td>Describing Common Human Visual Actions in Images</td><td><a href="http://pdfs.semanticscholar.org/b38d/cf5fa5174c0d718d65cc4f3889b03c4a21df.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=describing common human visual actions in images&sort=relevance">[s2]</a></td><td></td><td>4946ba10a4d5a7d0a38372f23e6622bd347ae273</td></tr><tr><td>coco_qa</td><td>COCO QA</td><td>Exploring Models and Data for Image Question Answering</td><td>Exploring Models and Data for Image Question Answering</td><td><a href="http://pdfs.semanticscholar.org/aa79/9c29c0d44ece1864467af520fe70540c069b.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=exploring models and data for image question answering&sort=relevance">[s2]</a></td><td></td><td>35b0331dfcd2897abd5749b49ff5e2b8ba0f7a62</td></tr><tr><td>cofw</td><td>COFW</td><td>Robust face landmark estimation under occlusion</td><td>Robust Face Landmark Estimation under Occlusion</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6751298">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=robust face landmark estimation under occlusion&sort=relevance">[s2]</a></td><td></td><td>2724ba85ec4a66de18da33925e537f3902f21249</td></tr><tr><td>cohn_kanade_plus</td><td>CK+</td><td>The Extended Cohn-Kanade Dataset (CK+): A complete dataset for action unit and emotion-specified expression</td><td>The Extended Cohn-Kanade Dataset (CK+): A complete dataset for action unit and emotion-specified expression</td><td><a href="http://www.iainm.com/iainm/Publications_files/2010_The%20Extended.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the extended cohn-kanade dataset (ck+): a complete dataset for action unit and emotion-specified expression&sort=relevance">[s2]</a></td><td></td><td>4d9a02d080636e9666c4d1cc438b9893391ec6c7</td></tr><tr><td>complex_activities</td><td>Ongoing Complex Activities</td><td>Recognition of Ongoing Complex Activities by Sequence Prediction over a Hierarchical Label Space</td><td>Recognition of ongoing complex activities by sequence prediction over a hierarchical label space</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/WACV.2016.7477586">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=recognition of ongoing complex activities by sequence prediction over a hierarchical label space&sort=relevance">[s2]</a></td><td></td><td>65355cbb581a219bd7461d48b3afd115263ea760</td></tr><tr><td>cuhk01</td><td>CUHK01</td><td>Human Reidentification with Transferred Metric Learning</td><td>Human Reidentification with Transferred Metric Learning</td><td><a href="http://pdfs.semanticscholar.org/4448/4d2866f222bbb9b6b0870890f9eea1ffb2d0.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=human reidentification with transferred metric learning&sort=relevance">[s2]</a></td><td></td><td>44484d2866f222bbb9b6b0870890f9eea1ffb2d0</td></tr><tr><td>cuhk02</td><td>CUHK02</td><td>Locally Aligned Feature Transforms across Views</td><td>Locally Aligned Feature Transforms across Views</td><td><a href="http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_CVPR2013/data/Papers/4989d594.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=locally aligned feature transforms across views&sort=relevance">[s2]</a></td><td></td><td>38b55d95189c5e69cf4ab45098a48fba407609b4</td></tr><tr><td>cuhk03</td><td>CUHK03</td><td>DeepReID: Deep Filter Pairing Neural Network for Person Re-identification</td><td>DeepReID: Deep Filter Pairing Neural Network for Person Re-identification</td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Li_DeepReID_Deep_Filter_2014_CVPR_paper.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=deepreid: deep filter pairing neural network for person re-identification&sort=relevance">[s2]</a></td><td></td><td>6bd36e9fd0ef20a3074e1430a6cc601e6d407fc3</td></tr><tr><td>cvc_01_barcelona</td><td>CVC-01</td><td>Adaptive Image Sampling and Windows Classification for On-board Pedestrian Detection</td><td>Adaptive Image Sampling and Windows Classification for On – board Pedestrian Detection</td><td><a href="http://pdfs.semanticscholar.org/57fe/081950f21ca03b5b375ae3e84b399c015861.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=adaptive image sampling and windows classification for on-board pedestrian detection&sort=relevance">[s2]</a></td><td></td><td>57fe081950f21ca03b5b375ae3e84b399c015861</td></tr><tr><td>czech_news_agency</td><td>UFI</td><td>Unconstrained Facial Images: Database for Face Recognition under Real-world Conditions</td><td>Unconstrained Facial Images: Database for Face Recognition Under Real-World Conditions</td><td><a href="http://pdfs.semanticscholar.org/4b41/06614c1d553365bad75d7866bff0de6056ed.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=unconstrained facial images: database for face recognition under real-world conditions&sort=relevance">[s2]</a></td><td></td><td>4b4106614c1d553365bad75d7866bff0de6056ed</td></tr><tr><td>dartmouth_children</td><td>Dartmouth Children</td><td>The Dartmouth Database of Children's Faces: Acquisition and validation of a new face stimulus set</td><td>The Dartmouth Database of Children’s Faces: Acquisition and Validation of a New Face Stimulus Set</td><td><a href="http://pdfs.semanticscholar.org/4e6e/e936eb50dd032f7138702fa39b7c18ee8907.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the dartmouth database of children's faces: acquisition and validation of a new face stimulus set&sort=relevance">[s2]</a></td><td></td><td>4e6ee936eb50dd032f7138702fa39b7c18ee8907</td></tr><tr><td>data_61</td><td>Data61 Pedestrian</td><td>A Multi-Modal Graphical Model for Scene Analysis</td><td>A Multi-modal Graphical Model for Scene Analysis</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/WACV.2015.139">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a multi-modal graphical model for scene analysis&sort=relevance">[s2]</a></td><td></td><td>563c940054e4b456661762c1ab858e6f730c3159</td></tr><tr><td>disfa</td><td>DISFA</td><td>DISFA: A Spontaneous Facial Action Intensity Database</td><td>Extended DISFA Dataset: Investigating Posed and Spontaneous Facial Expressions</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7789672">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=disfa: a spontaneous facial action intensity database&sort=relevance">[s2]</a></td><td></td><td>a5acda0e8c0937bfed013e6382da127103e41395</td></tr><tr><td>distance_nighttime</td><td>Long Distance Heterogeneous Face</td><td>Nighttime Face Recognition at Long Distance: Cross-distance and Cross-spectral Matching</td><td>Nighttime Face Recognition at Long Distance: Cross-Distance and Cross-Spectral Matching</td><td><a href="http://pdfs.semanticscholar.org/4156/b7e88f2e0ab0a7c095b9bab199ae2b23bd06.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=nighttime face recognition at long distance: cross-distance and cross-spectral matching&sort=relevance">[s2]</a></td><td></td><td>4156b7e88f2e0ab0a7c095b9bab199ae2b23bd06</td></tr><tr><td>duke_mtmc</td><td>Duke MTMC</td><td>Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</td><td>Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</td><td><a href="http://pdfs.semanticscholar.org/b5f2/4f49f9a5e47d6601399dc068158ad88d7651.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=performance measures and a data set for multi-target, multi-camera tracking&sort=relevance">[s2]</a></td><td></td><td>27a2fad58dd8727e280f97036e0d2bc55ef5424c</td></tr><tr><td>emotio_net</td><td>EmotioNet Database</td><td>EmotioNet: An Accurate, Real-Time Algorithm for the Automatic Annotation of a Million Facial Expressions in the Wild</td><td>EmotioNet: An Accurate, Real-Time Algorithm for the Automatic Annotation of a Million Facial Expressions in the Wild</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7780969">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=emotionet: an accurate, real-time algorithm for the automatic annotation of a million facial expressions in the wild&sort=relevance">[s2]</a></td><td></td><td>c900e0ad4c95948baaf0acd8449fde26f9b4952a</td></tr><tr><td>eth_andreas_ess</td><td>ETHZ Pedestrian</td><td>Depth and Appearance for Mobile Scene Analysis</td><td>Depth and Appearance for Mobile Scene Analysis</td><td><a href="http://www.mmp.rwth-aachen.de/publications/pdf/ess-depthandappearance-iccv07.pdf/at_download/file">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=depth and appearance for mobile scene analysis&sort=relevance">[s2]</a></td><td></td><td>13f06b08f371ba8b5d31c3e288b4deb61335b462</td></tr><tr><td>europersons</td><td>EuroCity Persons</td><td>The EuroCity Persons Dataset: A Novel Benchmark for Object Detection</td><td>The EuroCity Persons Dataset: A Novel Benchmark for Object Detection</td><td><a href="https://arxiv.org/pdf/1805.07193.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the eurocity persons dataset: a novel benchmark for object detection&sort=relevance">[s2]</a></td><td></td><td>f0e17f27f029db4ad650ff278fe3c10ecb6cb0c4</td></tr><tr><td>expw</td><td>ExpW</td><td>Learning Social Relation Traits from Face Images</td><td>From Facial Expression Recognition to Interpersonal Relation Prediction</td><td><a href="http://arxiv.org/abs/1609.06426">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning social relation traits from face images&sort=relevance">[s2]</a></td><td></td><td>22f656d0f8426c84a33a267977f511f127bfd7f3</td></tr><tr><td>expw</td><td>ExpW</td><td>From Facial Expression Recognition to Interpersonal Relation Prediction</td><td>From Facial Expression Recognition to Interpersonal Relation Prediction</td><td><a href="http://arxiv.org/abs/1609.06426">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=from facial expression recognition to interpersonal relation prediction&sort=relevance">[s2]</a></td><td></td><td>22f656d0f8426c84a33a267977f511f127bfd7f3</td></tr><tr><td>faceplace</td><td>Face Place</td><td>Recognizing disguised faces</td><td>Recognizing disguised faces</td><td><a href="http://pdfs.semanticscholar.org/d936/7ceb0be378c3a9ddf7cb741c678c1a3c574c.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=recognizing disguised faces&sort=relevance">[s2]</a></td><td></td><td>25474c21613607f6bb7687a281d5f9d4ffa1f9f3</td></tr><tr><td>families_in_the_wild</td><td>FIW</td><td>Visual Kinship Recognition of Families in the Wild</td><td>Visual Kinship Recognition of Families in the Wild</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8337841">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=visual kinship recognition of families in the wild&sort=relevance">[s2]</a></td><td></td><td>dd65f71dac86e36eecbd3ed225d016c3336b4a13</td></tr><tr><td>fei</td><td>FEI</td><td>Captura e Alinhamento de Imagens: Um Banco de Faces Brasileiro</td><td>Generalização cartográfica automatizada para um banco de dados cadastral</td><td><a href="https://pdfs.semanticscholar.org/b6b1/b0632eb9d4ab1427278f5e5c46f97753c73d.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=captura e alinhamento de imagens: um banco de faces brasileiro&sort=relevance">[s2]</a></td><td></td><td>b6b1b0632eb9d4ab1427278f5e5c46f97753c73d</td></tr><tr><td>feret</td><td>FERET</td><td>The FERET Verification Testing Protocol for Face Recognition Algorithms</td><td>The FERET Verification Testing Protocol for Face Recognition Algorithms</td><td><a href="http://pdfs.semanticscholar.org/0c4a/139bb87c6743c7905b29a3cfec27a5130652.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the feret verification testing protocol for face recognition algorithms&sort=relevance">[s2]</a></td><td></td><td>0c4a139bb87c6743c7905b29a3cfec27a5130652</td></tr><tr><td>feret</td><td>FERET</td><td>The FERET database and evaluation procedure for face-recognition algorithms</td><td>The FERET database and evaluation procedure for face-recognition algorithms</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the feret database and evaluation procedure for face-recognition algorithms&sort=relevance">[s2]</a></td><td></td><td>dc8b25e35a3acb812beb499844734081722319b4</td></tr><tr><td>feret</td><td>FERET</td><td>FERET ( Face Recognition Technology ) Recognition Algorithm Development and Test Results</td><td>FERET ( Face Recognition Technology ) Recognition Algorithm Development and Test Results</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=feret ( face recognition technology ) recognition algorithm development and test results&sort=relevance">[s2]</a></td><td></td><td>31de9b3dd6106ce6eec9a35991b2b9083395fd0b</td></tr><tr><td>feret</td><td>FERET</td><td>The FERET Evaluation Methodology for Face-Recognition Algorithms</td><td>The FERET Evaluation Methodology for Face-Recognition Algorithms</td><td><a href="http://pdfs.semanticscholar.org/0f0f/cf041559703998abf310e56f8a2f90ee6f21.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the feret evaluation methodology for face-recognition algorithms&sort=relevance">[s2]</a></td><td></td><td>0f0fcf041559703998abf310e56f8a2f90ee6f21</td></tr><tr><td>ferplus</td><td>FER+</td><td>Training Deep Networks for Facial Expression Recognition with Crowd-Sourced Label Distribution</td><td>Training deep networks for facial expression recognition with crowd-sourced label distribution</td><td><a href="http://arxiv.org/pdf/1608.01041v1.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=training deep networks for facial expression recognition with crowd-sourced label distribution&sort=relevance">[s2]</a></td><td></td><td>298cbc3dfbbb3a20af4eed97906650a4ea1c29e0</td></tr><tr><td>fia</td><td>CMU FiA</td><td>The CMU Face In Action (FIA) Database</td><td>The CMU Face In Action (FIA) Database</td><td><a href="http://pdfs.semanticscholar.org/bb47/a03401811f9d2ca2da12138697acbc7b97a3.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the cmu face in action (fia) database&sort=relevance">[s2]</a></td><td></td><td>47662d1a368daf70ba70ef2d59eb6209f98b675d</td></tr><tr><td>frav3d</td><td>FRAV3D</td><td>MULTIMODAL 2D, 2.5D & 3D FACE VERIFICATION</td><td>2D and 3D face recognition: A survey</td><td><a href="http://pdfs.semanticscholar.org/2f5d/44dc3e1b5955942133ff872ebd31716ec604.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multimodal 2d, 2.5d & 3d face verification&sort=relevance">[s2]</a></td><td></td><td>2f5d44dc3e1b5955942133ff872ebd31716ec604</td></tr><tr><td>frgc</td><td>FRGC</td><td>Overview of the Face Recognition Grand Challenge</td><td>Overview of the face recognition grand challenge</td><td><a href="http://www3.nd.edu/~kwb/PhillipsEtAlCVPR_2005.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=overview of the face recognition grand challenge&sort=relevance">[s2]</a></td><td></td><td>18ae7c9a4bbc832b8b14bc4122070d7939f5e00e</td></tr><tr><td>gallagher</td><td>Gallagher</td><td>Clothing Cosegmentation for Recognizing People</td><td>Clothing Cosegmentation for Shopping Images With Cluttered Background</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7423747">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clothing cosegmentation for recognizing people&sort=relevance">[s2]</a></td><td></td><td>6dbe8e5121c534339d6e41f8683e85f87e6abf81</td></tr><tr><td>gavab_db</td><td>Gavab</td><td>GavabDB: a 3D face database</td><td>Expression invariant 3D face recognition with a Morphable Model</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4813376">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=gavabdb: a 3d face database&sort=relevance">[s2]</a></td><td></td><td>42505464808dfb446f521fc6ff2cfeffd4d68ff1</td></tr><tr><td>georgia_tech_face_database</td><td>Georgia Tech Face</td><td>Maximum likelihood training of the embedded HMM for face detection and recognition</td><td>MAXIMUM LIKELIHOOD TRAINING OF THE EMBEDDED HMM FOR FACE DETECTION AND RECOGNITION Ara V. Ne an and Monson H. Hayes III Center for Signal and Image Processing School of Electrical and Computer Engineering</td><td><a href="http://pdfs.semanticscholar.org/3dc3/f0b64ef80f573e3a5f96e456e52ee980b877.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=maximum likelihood training of the embedded hmm for face detection and recognition&sort=relevance">[s2]</a></td><td></td><td>3dc3f0b64ef80f573e3a5f96e456e52ee980b877</td></tr><tr><td>graz</td><td>Graz Pedestrian</td><td>Generic Object Recognition with Boosting</td><td>Object recognition using segmentation for feature detection</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=generic object recognition with boosting&sort=relevance">[s2]</a></td><td></td><td>12ad3b5bbbf407f8e54ea692c07633d1a867c566</td></tr><tr><td>graz</td><td>Graz Pedestrian</td><td>Weak Hypotheses and Boosting for Generic Object Detection and Recognition</td><td>Object recognition using segmentation for feature detection</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=weak hypotheses and boosting for generic object detection and recognition&sort=relevance">[s2]</a></td><td></td><td>12ad3b5bbbf407f8e54ea692c07633d1a867c566</td></tr><tr><td>graz</td><td>Graz Pedestrian</td><td>Object Recognition Using Segmentation for Feature Detection</td><td>Object recognition using segmentation for feature detection</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=object recognition using segmentation for feature detection&sort=relevance">[s2]</a></td><td></td><td>12ad3b5bbbf407f8e54ea692c07633d1a867c566</td></tr><tr><td>h3d</td><td>H3D</td><td>Poselets: Body Part Detectors Trained Using 3D Human Pose Annotations</td><td>Poselets: Body part detectors trained using 3D human pose annotations</td><td><a href="http://vision.stanford.edu/teaching/cs231b_spring1213/papers/ICCV09_BourdevMalik.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=poselets: body part detectors trained using 3d human pose annotations&sort=relevance">[s2]</a></td><td></td><td>2830fb5282de23d7784b4b4bc37065d27839a412</td></tr><tr><td>hda_plus</td><td>HDA+</td><td>The HDA+ data set for research on fully automated re-identification systems</td><td>HDA dataset-DRAFT 1 A Multi-camera video data set for research on High-Definition surveillance</td><td><a href="http://pdfs.semanticscholar.org/bd88/bb2e4f351352d88ee7375af834360e223498.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the hda+ data set for research on fully automated re-identification systems&sort=relevance">[s2]</a></td><td></td><td>bd88bb2e4f351352d88ee7375af834360e223498</td></tr><tr><td>hda_plus</td><td>HDA+</td><td>A Multi-camera video data set for research on High-Definition surveillance</td><td>HDA dataset-DRAFT 1 A Multi-camera video data set for research on High-Definition surveillance</td><td><a href="http://pdfs.semanticscholar.org/bd88/bb2e4f351352d88ee7375af834360e223498.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a multi-camera video data set for research on high-definition surveillance&sort=relevance">[s2]</a></td><td></td><td>bd88bb2e4f351352d88ee7375af834360e223498</td></tr><tr><td>hi4d_adsip</td><td>Hi4D-ADSIP</td><td>Hi4D-ADSIP 3-D dynamic facial articulation database</td><td>High-resolution comprehensive 3-D dynamic database for facial articulation analysis</td><td><a href="http://www.researchgate.net/profile/Wei_Quan3/publication/221430048_High-resolution_comprehensive_3-D_dynamic_database_for_facial_articulation_analysis/links/0deec534309495805d000000.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=hi4d-adsip 3-d dynamic facial articulation database&sort=relevance">[s2]</a></td><td></td><td>24830e3979d4ed01b9fd0feebf4a8fd22e0c35fd</td></tr><tr><td>hollywood_headset</td><td>HollywoodHeads</td><td>Context-aware CNNs for person head detection</td><td>Context-Aware CNNs for Person Head Detection</td><td><a href="http://arxiv.org/pdf/1511.07917v1.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=context-aware cnns for person head detection&sort=relevance">[s2]</a></td><td></td><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td></tr><tr><td>hrt_transgender</td><td>HRT Transgender</td><td>Is the Eye Region More Reliable Than the Face? A Preliminary Study of Face-based Recognition on a Transgender Dataset</td><td>Face recognition: A literature survey</td><td><a href="http://pdfs.semanticscholar.org/2831/2c3a47c1be3a67365700744d3d6665b86f22.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=is the eye region more reliable than the face? a preliminary study of face-based recognition on a transgender dataset&sort=relevance">[s2]</a></td><td></td><td>28312c3a47c1be3a67365700744d3d6665b86f22</td></tr><tr><td>hrt_transgender</td><td>HRT Transgender</td><td>Investigating the Periocular-Based Face Recognition Across Gender Transformation</td><td>Face recognition: A literature survey</td><td><a href="http://pdfs.semanticscholar.org/2831/2c3a47c1be3a67365700744d3d6665b86f22.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=investigating the periocular-based face recognition across gender transformation&sort=relevance">[s2]</a></td><td></td><td>28312c3a47c1be3a67365700744d3d6665b86f22</td></tr><tr><td>hrt_transgender</td><td>HRT Transgender</td><td>Face recognition across gender transformation using SVM Classifier</td><td>Face recognition: A literature survey</td><td><a href="http://pdfs.semanticscholar.org/2831/2c3a47c1be3a67365700744d3d6665b86f22.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=face recognition across gender transformation using svm classifier&sort=relevance">[s2]</a></td><td></td><td>28312c3a47c1be3a67365700744d3d6665b86f22</td></tr><tr><td>ifad</td><td>IFAD</td><td>Indian Face Age Database: A Database for Face Recognition with Age Variation</td><td>Indian Face Age Database : A Database for Face Recognition with Age Variation</td><td><a href="https://pdfs.semanticscholar.org/55c4/0cbcf49a0225e72d911d762c27bb1c2d14aa.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=indian face age database: a database for face recognition with age variation&sort=relevance">[s2]</a></td><td></td><td>55c40cbcf49a0225e72d911d762c27bb1c2d14aa</td></tr><tr><td>ifdb</td><td>IFDB</td><td>Iranian Face Database with age, pose and expression</td><td>Iranian Face Database and Evaluation with a New Detection Algorithm</td><td><a href="http://pdfs.semanticscholar.org/066d/71fcd997033dce4ca58df924397dfe0b5fd1.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=iranian face database with age, pose and expression&sort=relevance">[s2]</a></td><td></td><td>066d71fcd997033dce4ca58df924397dfe0b5fd1</td></tr><tr><td>ifdb</td><td>IFDB</td><td>Iranian Face Database and Evaluation with a New Detection Algorithm</td><td>Iranian Face Database and Evaluation with a New Detection Algorithm</td><td><a href="http://pdfs.semanticscholar.org/066d/71fcd997033dce4ca58df924397dfe0b5fd1.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=iranian face database and evaluation with a new detection algorithm&sort=relevance">[s2]</a></td><td></td><td>066d71fcd997033dce4ca58df924397dfe0b5fd1</td></tr><tr><td>iit_dehli_ear</td><td>IIT Dehli Ear</td><td>Automated human identification using ear imaging</td><td>Automated Human Identification Using Ear Imaging</td><td><a href="https://pdfs.semanticscholar.org/faf4/0ce28857aedf183e193486f5b4b0a8c478a2.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=automated human identification using ear imaging&sort=relevance">[s2]</a></td><td></td><td>faf40ce28857aedf183e193486f5b4b0a8c478a2</td></tr><tr><td>ijb_a</td><td>IJB-A</td><td>Pushing the Frontiers of Unconstrained Face Detection and Recognition: IARPA Janus Benchmark A</td><td>Pushing the frontiers of unconstrained face detection and recognition: IARPA Janus Benchmark A</td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/1B_089_ext.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pushing the frontiers of unconstrained face detection and recognition: iarpa janus benchmark a&sort=relevance">[s2]</a></td><td></td><td>140c95e53c619eac594d70f6369f518adfea12ef</td></tr><tr><td>ijb_b</td><td>IJB-B</td><td>IARPA Janus Benchmark-B Face Dataset</td><td>IARPA Janus Benchmark-B Face Dataset</td><td><a href="http://www.vislab.ucr.edu/Biometrics2017/program_slides/Noblis_CVPRW_IJBB.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=iarpa janus benchmark-b face dataset&sort=relevance">[s2]</a></td><td></td><td>0cb2dd5f178e3a297a0c33068961018659d0f443</td></tr><tr><td>ijb_c</td><td>IJB-C</td><td>IARPA Janus Benchmark C</td><td>IARPA Janus Benchmark - C: Face Dataset and Protocol</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411217">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=iarpa janus benchmark c&sort=relevance">[s2]</a></td><td></td><td>57178b36c21fd7f4529ac6748614bb3374714e91</td></tr><tr><td>ilids_mcts</td><td></td><td>Imagery Library for Intelligent Detection Systems:
+The i-LIDS User Guide</td><td>Imagery Library for Intelligent Detection Systems (i-LIDS); A Standard for Testing Video Based Detection Systems</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=imagery library for intelligent detection systems:
+the i-lids user guide&sort=relevance">[s2]</a></td><td></td><td>0297448f3ed948e136bb06ceff10eccb34e5bb77</td></tr><tr><td>ilids_vid_reid</td><td>iLIDS-VID</td><td>Person Re-Identi cation by Video Ranking</td><td>Person Re-identification by Exploiting Spatio-Temporal Cues and Multi-view Metric Learning</td><td><a href="https://doi.org/10.1109/LSP.2016.2574323">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person re-identi cation by video ranking&sort=relevance">[s2]</a></td><td></td><td>99eb4cea0d9bc9fe777a5c5172f8638a37a7f262</td></tr><tr><td>images_of_groups</td><td>Images of Groups</td><td>Understanding Groups of Images of People</td><td>Understanding images of groups of people</td><td><a href="http://amp.ece.cmu.edu/people/Andy/Andy_files/cvpr09.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=understanding groups of images of people&sort=relevance">[s2]</a></td><td></td><td>21d9d0deed16f0ad62a4865e9acf0686f4f15492</td></tr><tr><td>imdb_wiki</td><td>IMDB</td><td>Deep expectation of real and apparent age from a single image without facial landmarks</td><td>Deep Expectation of Real and Apparent Age from a Single Image Without Facial Landmarks</td><td><a href="https://doi.org/10.1007/s11263-016-0940-3">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=deep expectation of real and apparent age from a single image without facial landmarks&sort=relevance">[s2]</a></td><td></td><td>10195a163ab6348eef37213a46f60a3d87f289c5</td></tr><tr><td>imdb_wiki</td><td>IMDB</td><td>DEX: Deep EXpectation of apparent age from a single image</td><td>DEX: Deep EXpectation of Apparent Age from a Single Image</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406390">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=dex: deep expectation of apparent age from a single image&sort=relevance">[s2]</a></td><td></td><td>8355d095d3534ef511a9af68a3b2893339e3f96b</td></tr><tr><td>imfdb</td><td>IMFDB</td><td>Indian Movie Face Database: A Benchmark for Face Recognition Under Wide Variations</td><td>Indian Movie Face Database: A benchmark for face recognition under wide variations</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=indian movie face database: a benchmark for face recognition under wide variations&sort=relevance">[s2]</a></td><td></td><td>ca3e88d87e1344d076c964ea89d91a75c417f5ee</td></tr><tr><td>imm_face</td><td>IMM Face Dataset</td><td>The IMM Face Database - An Annotated Dataset of 240 Face Images</td><td>Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</td><td><a href="http://lrs.icg.tugraz.at/pubs/koestinger_befit_11.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the imm face database - an annotated dataset of 240 face images&sort=relevance">[s2]</a></td><td></td><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td></tr><tr><td>immediacy</td><td>Immediacy</td><td>Multi-task Recurrent Neural Network for Immediacy Prediction</td><td>Multi-task Recurrent Neural Network for Immediacy Prediction</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.383">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-task recurrent neural network for immediacy prediction&sort=relevance">[s2]</a></td><td></td><td>1e3df3ca8feab0b36fd293fe689f93bb2aaac591</td></tr><tr><td>inria_person</td><td>INRIA Pedestrian</td><td>Histograms of Oriented Gradients for Human Detection</td><td>Histograms of oriented gradients for human detection</td><td><a href="http://nichol.as/papers/Dalai/Histograms%20of%20oriented%20gradients%20for%20human%20detection.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=histograms of oriented gradients for human detection&sort=relevance">[s2]</a></td><td></td><td>10d6b12fa07c7c8d6c8c3f42c7f1c061c131d4c5</td></tr><tr><td>jiku_mobile</td><td>Jiku Mobile Video Dataset</td><td>The Jiku Mobile Video Dataset</td><td>A Synchronization Ground Truth for the Jiku Mobile Video Dataset</td><td><a href="http://pdfs.semanticscholar.org/ad62/c6e17bc39b4dec20d32f6ac667ae42d2c118.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the jiku mobile video dataset&sort=relevance">[s2]</a></td><td></td><td>ad62c6e17bc39b4dec20d32f6ac667ae42d2c118</td></tr><tr><td>jpl_pose</td><td>JPL-Interaction dataset</td><td>First-Person Activity Recognition: What Are They Doing to Me?</td><td>First-Person Activity Recognition: What Are They Doing to Me?</td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2013/papers/Ryoo_First-Person_Activity_Recognition_2013_CVPR_paper.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=first-person activity recognition: what are they doing to me?&sort=relevance">[s2]</a></td><td></td><td>1aad2da473888cb7ebc1bfaa15bfa0f1502ce005</td></tr><tr><td>kdef</td><td>KDEF</td><td>The Karolinska Directed Emotional Faces – KDEF</td><td>Gaze fixation and the neural circuitry of face processing in autism</td><td><a href="http://doi.org/10.1038/nn1421">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the karolinska directed emotional faces – kdef&sort=relevance">[s2]</a></td><td></td><td>93884e46c49f7ae1c7c34046fbc28882f2bd6341</td></tr><tr><td>kin_face</td><td>UB KinFace</td><td>Genealogical Face Recognition based on UB KinFace Database</td><td>Understanding Kin Relationships in a Photo</td><td><a href="http://www1.ece.neu.edu/~yunfu/papers/Kinship-TMM.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=genealogical face recognition based on ub kinface database&sort=relevance">[s2]</a></td><td></td><td>08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7</td></tr><tr><td>kin_face</td><td>UB KinFace</td><td>Kinship Verification through Transfer Learning</td><td>Understanding Kin Relationships in a Photo</td><td><a href="http://www1.ece.neu.edu/~yunfu/papers/Kinship-TMM.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=kinship verification through transfer learning&sort=relevance">[s2]</a></td><td></td><td>08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7</td></tr><tr><td>kin_face</td><td>UB KinFace</td><td>Understanding Kin Relationships in a Photo</td><td>Understanding Kin Relationships in a Photo</td><td><a href="http://www1.ece.neu.edu/~yunfu/papers/Kinship-TMM.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=understanding kin relationships in a photo&sort=relevance">[s2]</a></td><td></td><td>08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7</td></tr><tr><td>kinectface</td><td>KinectFaceDB</td><td>KinectFaceDB: A Kinect Database for Face Recognition</td><td>KinectFaceDB: A Kinect Database for Face Recognition</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6866883">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=kinectfacedb: a kinect database for face recognition&sort=relevance">[s2]</a></td><td></td><td>0b440695c822a8e35184fb2f60dcdaa8a6de84ae</td></tr><tr><td>kitti</td><td>KITTI</td><td>Vision meets Robotics: The KITTI Dataset</td><td>The Role of Machine Vision for Intelligent Vehicles</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=vision meets robotics: the kitti dataset&sort=relevance">[s2]</a></td><td></td><td>35ba4ebfd017a56b51e967105af9ae273c9b0178</td></tr><tr><td>lag</td><td>LAG</td><td>Large Age-Gap Face Verification by Feature Injection in Deep Networks</td><td>Large age-gap face verification by feature injection in deep networks</td><td><a href="http://pdfs.semanticscholar.org/0d2d/d4fc016cb6a517d8fb43a7cc3ff62964832e.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=large age-gap face verification by feature injection in deep networks&sort=relevance">[s2]</a></td><td></td><td>0d2dd4fc016cb6a517d8fb43a7cc3ff62964832e</td></tr><tr><td>large_scale_person_search</td><td>Large Scale Person Search</td><td>End-to-End Deep Learning for Person Search</td><td>End-to-End Deep Learning for Person Search</td><td><a href="https://pdfs.semanticscholar.org/2161/f6b7ee3c0acc81603b01dc0df689683577b9.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=end-to-end deep learning for person search&sort=relevance">[s2]</a></td><td></td><td>2161f6b7ee3c0acc81603b01dc0df689683577b9</td></tr><tr><td>leeds_sports_pose</td><td>Leeds Sports Pose</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td><a href="http://pdfs.semanticscholar.org/4b1d/23d17476fcf78f4cbadf69fb130b1aa627c0.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clustered pose and nonlinear appearance models for human pose estimation&sort=relevance">[s2]</a></td><td></td><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td></tr><tr><td>leeds_sports_pose_extended</td><td>Leeds Sports Pose Extended</td><td>Learning Effective Human Pose Estimation from Inaccurate Annotation</td><td>Learning effective human pose estimation from inaccurate annotation</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995318">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning effective human pose estimation from inaccurate annotation&sort=relevance">[s2]</a></td><td></td><td>4e4746094bf60ee83e40d8597a6191e463b57f76</td></tr><tr><td>lfw</td><td>LFW</td><td>Labeled Faces in the Wild: A Survey</td><td>Labeled Faces in the Wild : A Survey</td><td><a href="http://pdfs.semanticscholar.org/7de6/e81d775e9cd7becbfd1bd685f4e2a5eebb22.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=labeled faces in the wild: a survey&sort=relevance">[s2]</a></td><td></td><td>7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22</td></tr><tr><td>lfw</td><td>LFW</td><td>Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments</td><td>Labeled Faces in the Wild : A Database for Studying Face Recognition in Unconstrained Environments</td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=labeled faces in the wild: a database for studying face recognition in unconstrained environments&sort=relevance">[s2]</a></td><td></td><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td></tr><tr><td>lfw_a</td><td>LFW-a</td><td>Effective Unconstrained Face Recognition by
+ Combining Multiple Descriptors and Learned
+ Background Statistics</td><td>Effective Unconstrained Face Recognition by Combining Multiple Descriptors and Learned Background Statistics</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/TPAMI.2010.230">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=effective unconstrained face recognition by
+ combining multiple descriptors and learned
+ background statistics&sort=relevance">[s2]</a></td><td></td><td>133f01aec1534604d184d56de866a4bd531dac87</td></tr><tr><td>m2vts</td><td>m2vts</td><td>The M2VTS Multimodal Face Database (Release 1.00)</td><td>The CAS-PEAL Large-Scale Chinese Face Database and Baseline Evaluations</td><td><a href="https://doi.org/10.1109/TSMCA.2007.909557">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the m2vts multimodal face database (release 1.00)&sort=relevance">[s2]</a></td><td></td><td>2485c98aa44131d1a2f7d1355b1e372f2bb148ad</td></tr><tr><td>m2vtsdb_extended</td><td>xm2vtsdb</td><td>XM2VTSDB: The Extended M2VTS Database</td><td>Labeled Faces in the Wild : A Database for Studying Face Recognition in Unconstrained Environments</td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=xm2vtsdb: the extended m2vts database&sort=relevance">[s2]</a></td><td></td><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td></tr><tr><td>mafl</td><td>MAFL</td><td>Facial Landmark Detection by Deep Multi-task Learning</td><td>Learning Deep Representation for Face Alignment with Auxiliary Attributes</td><td><a href="https://arxiv.org/pdf/1408.3967.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=facial landmark detection by deep multi-task learning&sort=relevance">[s2]</a></td><td></td><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td></tr><tr><td>mafl</td><td>MAFL</td><td>Learning Deep Representation for Face Alignment with Auxiliary Attributes</td><td>Learning Deep Representation for Face Alignment with Auxiliary Attributes</td><td><a href="https://arxiv.org/pdf/1408.3967.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning deep representation for face alignment with auxiliary attributes&sort=relevance">[s2]</a></td><td></td><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td></tr><tr><td>mapillary</td><td>Mapillary</td><td>The Mapillary Vistas Dataset for Semantic Understanding of Street Scenes</td><td>The Mapillary Vistas Dataset for Semantic Understanding of Street Scenes</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237796">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the mapillary vistas dataset for semantic understanding of street scenes&sort=relevance">[s2]</a></td><td></td><td>79828e6e9f137a583082b8b5a9dfce0c301989b8</td></tr><tr><td>market_1501</td><td>Market 1501</td><td>Scalable Person Re-identification: A Benchmark</td><td>Scalable Person Re-identification: A Benchmark</td><td><a href="https://www.microsoft.com/en-us/research/wp-content/uploads/2017/01/ICCV15-ReIDDataset.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=scalable person re-identification: a benchmark&sort=relevance">[s2]</a></td><td></td><td>4308bd8c28e37e2ed9a3fcfe74d5436cce34b410</td></tr><tr><td>market1203</td><td>Market 1203</td><td>Orientation Driven Bag of Appearances for Person Re-identification</td><td>Orientation Driven Bag of Appearances for Person Re-identification</td><td><a href="http://pdfs.semanticscholar.org/a7fe/834a0af614ce6b50dc093132b031dd9a856b.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=orientation driven bag of appearances for person re-identification&sort=relevance">[s2]</a></td><td></td><td>a7fe834a0af614ce6b50dc093132b031dd9a856b</td></tr><tr><td>mars</td><td>MARS</td><td>MARS: A Video Benchmark for Large-Scale Person Re-identification</td><td>MARS: A Video Benchmark for Large-Scale Person Re-Identification</td><td><a href="http://pdfs.semanticscholar.org/c038/7e788a52f10bf35d4d50659cfa515d89fbec.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=mars: a video benchmark for large-scale person re-identification&sort=relevance">[s2]</a></td><td></td><td>c0387e788a52f10bf35d4d50659cfa515d89fbec</td></tr><tr><td>megaface</td><td>MegaFace</td><td>Level Playing Field for Million Scale Face Recognition</td><td>Level Playing Field for Million Scale Face Recognition</td><td><a href="https://arxiv.org/pdf/1705.00393.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=level playing field for million scale face recognition&sort=relevance">[s2]</a></td><td></td><td>28d4e027c7e90b51b7d8908fce68128d1964668a</td></tr><tr><td>mit_cbcl</td><td>MIT CBCL</td><td>Component-based Face Recognition with 3D Morphable Models</td><td>Component-Based Face Recognition with 3D Morphable Models</td><td><a href="http://www.bheisele.com/avbpa2003.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=component-based face recognition with 3d morphable models&sort=relevance">[s2]</a></td><td></td><td>079a0a3bf5200994e1f972b1b9197bf2f90e87d4</td></tr><tr><td>mmi_facial_expression</td><td>MMI Facial Expression Dataset</td><td>WEB-BASED DATABASE FOR FACIAL EXPRESSION ANALYSIS</td><td>Web-based database for facial expression analysis</td><td><a href="http://ibug.doc.ic.ac.uk/media/uploads/documents/PanticEtAl-ICME2005-final.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=web-based database for facial expression analysis&sort=relevance">[s2]</a></td><td></td><td>2a75f34663a60ab1b04a0049ed1d14335129e908</td></tr><tr><td>moments_in_time</td><td>Moments in Time</td><td>Moments in Time Dataset: one million videos for event understanding</td><td>Moments in Time Dataset: one million videos for event understanding</td><td><a href="https://arxiv.org/pdf/1801.03150.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=moments in time dataset: one million videos for event understanding&sort=relevance">[s2]</a></td><td></td><td>a5a44a32a91474f00a3cda671a802e87c899fbb4</td></tr><tr><td>morph</td><td>MORPH Commercial</td><td>MORPH: A Longitudinal Image Database of Normal Adult Age-Progression</td><td>MORPH: a longitudinal image database of normal adult age-progression</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/FGR.2006.78">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=morph: a longitudinal image database of normal adult age-progression&sort=relevance">[s2]</a></td><td></td><td>9055b155cbabdce3b98e16e5ac9c0edf00f9552f</td></tr><tr><td>morph_nc</td><td>MORPH Non-Commercial</td><td>MORPH: A Longitudinal Image Database of Normal Adult Age-Progression</td><td>MORPH: a longitudinal image database of normal adult age-progression</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/FGR.2006.78">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=morph: a longitudinal image database of normal adult age-progression&sort=relevance">[s2]</a></td><td></td><td>9055b155cbabdce3b98e16e5ac9c0edf00f9552f</td></tr><tr><td>mot</td><td>MOT</td><td>Evaluating Multiple Object Tracking Performance: The CLEAR MOT Metrics</td><td>Learning to associate: HybridBoosted multi-target tracker for crowded scene</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=evaluating multiple object tracking performance: the clear mot metrics&sort=relevance">[s2]</a></td><td></td><td>5981e6479c3fd4e31644db35d236bfb84ae46514</td></tr><tr><td>mot</td><td>MOT</td><td>Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</td><td>Learning to associate: HybridBoosted multi-target tracker for crowded scene</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=performance measures and a data set for multi-target, multi-camera tracking&sort=relevance">[s2]</a></td><td></td><td>5981e6479c3fd4e31644db35d236bfb84ae46514</td></tr><tr><td>mot</td><td>MOT</td><td>Learning to associate: HybridBoosted multi-target tracker for crowded scene</td><td>Learning to associate: HybridBoosted multi-target tracker for crowded scene</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning to associate: hybridboosted multi-target tracker for crowded scene&sort=relevance">[s2]</a></td><td></td><td>5981e6479c3fd4e31644db35d236bfb84ae46514</td></tr><tr><td>mpi_large</td><td>Large MPI Facial Expression</td><td>The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</td><td>The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</td><td><a href="http://pdfs.semanticscholar.org/ea05/0801199f98a1c7c1df6769f23f658299a3ae.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the mpi facial expression database — a validated database of emotional and conversational facial expressions&sort=relevance">[s2]</a></td><td></td><td>ea050801199f98a1c7c1df6769f23f658299a3ae</td></tr><tr><td>mpi_small</td><td>Small MPI Facial Expression</td><td>The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</td><td>The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</td><td><a href="http://pdfs.semanticscholar.org/ea05/0801199f98a1c7c1df6769f23f658299a3ae.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the mpi facial expression database — a validated database of emotional and conversational facial expressions&sort=relevance">[s2]</a></td><td></td><td>ea050801199f98a1c7c1df6769f23f658299a3ae</td></tr><tr><td>mpii_human_pose</td><td>MPII Human Pose</td><td>2D Human Pose Estimation: New Benchmark and State of the Art Analysis</td><td>2D Human Pose Estimation: New Benchmark and State of the Art Analysis</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6909866">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=2d human pose estimation: new benchmark and state of the art analysis&sort=relevance">[s2]</a></td><td></td><td>3325860c0c82a93b2eac654f5324dd6a776f609e</td></tr><tr><td>mr2</td><td>MR2</td><td>The MR2: A multi-racial mega-resolution database of facial stimuli</td><td>The MR2: A multi-racial, mega-resolution database of facial stimuli.</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the mr2: a multi-racial mega-resolution database of facial stimuli&sort=relevance">[s2]</a></td><td></td><td>578d4ad74818086bb64f182f72e2c8bd31e3d426</td></tr><tr><td>mrp_drone</td><td>MRP Drone</td><td>Investigating Open-World Person Re-identification Using a Drone</td><td>Investigating Open-World Person Re-identification Using a Drone</td><td><a href="http://pdfs.semanticscholar.org/ad01/687649d95cd5b56d7399a9603c4b8e2217d7.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=investigating open-world person re-identification using a drone&sort=relevance">[s2]</a></td><td></td><td>ad01687649d95cd5b56d7399a9603c4b8e2217d7</td></tr><tr><td>msceleb</td><td>MsCeleb</td><td>MS-Celeb-1M: A Dataset and Benchmark for Large-Scale Face Recognition</td><td>MS-Celeb-1M: A Dataset and Benchmark for Large-Scale Face Recognition</td><td><a href="http://pdfs.semanticscholar.org/4603/cb8e05258bb0572ae912ad20903b8f99f4b1.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=ms-celeb-1m: a dataset and benchmark for large-scale face recognition&sort=relevance">[s2]</a></td><td></td><td>291265db88023e92bb8c8e6390438e5da148e8f5</td></tr><tr><td>msmt_17</td><td>MSMT17</td><td>Person Transfer GAN to Bridge Domain Gap for Person Re-Identification</td><td>Person Transfer GAN to Bridge Domain Gap for Person Re-Identification</td><td><a href="https://arxiv.org/pdf/1711.08565.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person transfer gan to bridge domain gap for person re-identification&sort=relevance">[s2]</a></td><td></td><td>ec792ad2433b6579f2566c932ee414111e194537</td></tr><tr><td>mtfl</td><td>MTFL</td><td>Facial Landmark Detection by Deep Multi-task Learning</td><td>Learning Deep Representation for Face Alignment with Auxiliary Attributes</td><td><a href="https://arxiv.org/pdf/1408.3967.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=facial landmark detection by deep multi-task learning&sort=relevance">[s2]</a></td><td></td><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td></tr><tr><td>mtfl</td><td>MTFL</td><td>Learning Deep Representation for Face Alignment with Auxiliary Attributes</td><td>Learning Deep Representation for Face Alignment with Auxiliary Attributes</td><td><a href="https://arxiv.org/pdf/1408.3967.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning deep representation for face alignment with auxiliary attributes&sort=relevance">[s2]</a></td><td></td><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td></tr><tr><td>muct</td><td>MUCT</td><td>The MUCT Landmarked Face Database</td><td>Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</td><td><a href="http://lrs.icg.tugraz.at/pubs/koestinger_befit_11.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the muct landmarked face database&sort=relevance">[s2]</a></td><td></td><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td></tr><tr><td>multi_pie</td><td>MULTIPIE</td><td>Multi-PIE</td><td>Scheduling heterogeneous multi-cores through performance impact estimation (PIE)</td><td><a href="http://dl.acm.org/citation.cfm?id=2337184">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-pie&sort=relevance">[s2]</a></td><td></td><td>109df0e8e5969ddf01e073143e83599228a1163f</td></tr><tr><td>names_and_faces_news</td><td>News Dataset</td><td>Names and Faces</td><td>Names and faces in the news</td><td><a href="http://www.cs.utexas.edu/~grauman/courses/spring2007/395T/papers/berg_names_and_faces.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=names and faces&sort=relevance">[s2]</a></td><td></td><td>2fda164863a06a92d3a910b96eef927269aeb730</td></tr><tr><td>nd_2006</td><td>ND-2006</td><td>Using a Multi-Instance Enrollment Representation to Improve 3D Face Recognition</td><td>Using a Multi-Instance Enrollment Representation to Improve 3D Face Recognition</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4401928">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=using a multi-instance enrollment representation to improve 3d face recognition&sort=relevance">[s2]</a></td><td></td><td>fd8168f1c50de85bac58a8d328df0a50248b16ae</td></tr><tr><td>nova_emotions</td><td>Novaemötions Dataset</td><td>Crowdsourcing facial expressions for affective-interaction</td><td>Competitive affective gaming: winning with a smile</td><td><a href="http://doi.acm.org/10.1145/2502081.2502115">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=crowdsourcing facial expressions for affective-interaction&sort=relevance">[s2]</a></td><td></td><td>7f4040b482d16354d5938c1d1b926b544652bf5b</td></tr><tr><td>nova_emotions</td><td>Novaemötions Dataset</td><td>Competitive affective gamming: Winning with a smile</td><td>Competitive affective gaming: winning with a smile</td><td><a href="http://doi.acm.org/10.1145/2502081.2502115">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=competitive affective gamming: winning with a smile&sort=relevance">[s2]</a></td><td></td><td>7f4040b482d16354d5938c1d1b926b544652bf5b</td></tr><tr><td>nudedetection</td><td>Nude Detection</td><td>A Bag-of-Features Approach based on Hue-SIFT Descriptor for Nude Detection</td><td>A bag-of-features approach based on Hue-SIFT descriptor for nude detection</td><td><a href="http://ieeexplore.ieee.org/document/7077625/">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a bag-of-features approach based on hue-sift descriptor for nude detection&sort=relevance">[s2]</a></td><td></td><td>7ace44190729927e5cb0dd5d363fcae966fe13f7</td></tr><tr><td>orl</td><td>ORL</td><td>Parameterisation of a Stochastic Model for Human Face Identification</td><td>Parameterisation of a stochastic model for human face identification</td><td><a href="http://pdfs.semanticscholar.org/5520/6f0b5f57ce17358999145506cd01e570358c.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=parameterisation of a stochastic model for human face identification&sort=relevance">[s2]</a></td><td></td><td>55206f0b5f57ce17358999145506cd01e570358c</td></tr><tr><td>penn_fudan</td><td>Penn Fudan</td><td>Object Detection Combining Recognition and Segmentation</td><td>Object Detection Combining Recognition and Segmentation</td><td><a href="http://pdfs.semanticscholar.org/f531/a554cade14b9b340de6730683a28c292dd74.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=object detection combining recognition and segmentation&sort=relevance">[s2]</a></td><td></td><td>3394168ff0719b03ff65bcea35336a76b21fe5e4</td></tr><tr><td>peta</td><td>PETA</td><td>Pedestrian Attribute Recognition At Far Distance</td><td>Pedestrian Attribute Recognition At Far Distance</td><td><a href="http://personal.ie.cuhk.edu.hk/~pluo/pdf/mm14.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pedestrian attribute recognition at far distance&sort=relevance">[s2]</a></td><td></td><td>2a4bbee0b4cf52d5aadbbc662164f7efba89566c</td></tr><tr><td>pets</td><td>PETS 2017</td><td>PETS 2017: Dataset and Challenge</td><td>PETS 2017: Dataset and Challenge</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014998">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pets 2017: dataset and challenge&sort=relevance">[s2]</a></td><td></td><td>22909dd19a0ec3b6065334cb5be5392cb24d839d</td></tr><tr><td>pilot_parliament</td><td>PPB</td><td>Gender Shades: Intersectional Accuracy Disparities in Commercial Gender Classi cation</td><td>Summary of Research on Informant Accuracy in Network Data, 11 and on the Reverse Small World Problem</td><td><a href="http://pdfs.semanticscholar.org/fb82/681ac5d3487bd8e52dbb3d1fa220eeac855e.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=gender shades: intersectional accuracy disparities in commercial gender classi cation&sort=relevance">[s2]</a></td><td></td><td>fb82681ac5d3487bd8e52dbb3d1fa220eeac855e</td></tr><tr><td>pipa</td><td>PIPA</td><td>Beyond Frontal Faces: Improving Person Recognition Using Multiple Cues</td><td>Beyond frontal faces: Improving Person Recognition using multiple cues</td><td><a href="https://doi.org/10.1109/CVPR.2015.7299113">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=beyond frontal faces: improving person recognition using multiple cues&sort=relevance">[s2]</a></td><td></td><td>0a85bdff552615643dd74646ac881862a7c7072d</td></tr><tr><td>pku</td><td>PKU</td><td>Swiss-System Based Cascade Ranking for Gait-based Person Re-identification</td><td>Swiss-System Based Cascade Ranking for Gait-Based Person Re-Identification</td><td><a href="http://pdfs.semanticscholar.org/f6c8/d5e35d7e4d60a0104f233ac1a3ab757da53f.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=swiss-system based cascade ranking for gait-based person re-identification&sort=relevance">[s2]</a></td><td></td><td>f6c8d5e35d7e4d60a0104f233ac1a3ab757da53f</td></tr><tr><td>pku_reid</td><td>PKU-Reid</td><td>Orientation driven bag of appearances for person re-identification</td><td>Orientation Driven Bag of Appearances for Person Re-identification</td><td><a href="http://pdfs.semanticscholar.org/a7fe/834a0af614ce6b50dc093132b031dd9a856b.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=orientation driven bag of appearances for person re-identification&sort=relevance">[s2]</a></td><td></td><td>a7fe834a0af614ce6b50dc093132b031dd9a856b</td></tr><tr><td>pornodb</td><td>Pornography DB</td><td>Pooling in Image Representation: the Visual Codeword Point of View</td><td>Pooling in image representation: The visual codeword point of view</td><td><a href="http://pdfs.semanticscholar.org/b92a/1ed9622b8268ae3ac9090e25789fc41cc9b8.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pooling in image representation: the visual codeword point of view&sort=relevance">[s2]</a></td><td></td><td>b92a1ed9622b8268ae3ac9090e25789fc41cc9b8</td></tr><tr><td>precarious</td><td>Precarious</td><td>Expecting the Unexpected: Training Detectors for Unusual Pedestrians With Adversarial Imposters</td><td>Expecting the Unexpected: Training Detectors for Unusual Pedestrians with Adversarial Imposters</td><td><a href="https://arxiv.org/pdf/1703.06283.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=expecting the unexpected: training detectors for unusual pedestrians with adversarial imposters&sort=relevance">[s2]</a></td><td></td><td>9e5378e7b336c89735d3bb15cf67eff96f86d39a</td></tr><tr><td>prid</td><td>PRID</td><td>Person Re-Identification by Descriptive and Discriminative Classification</td><td>Person Re-identification by Descriptive and Discriminative Classification</td><td><a href="http://pdfs.semanticscholar.org/4c1b/f0592be3e535faf256c95e27982db9b3d3d3.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person re-identification by descriptive and discriminative classification&sort=relevance">[s2]</a></td><td></td><td>16c7c31a7553d99f1837fc6e88e77b5ccbb346b8</td></tr><tr><td>psu</td><td>PSU</td><td>Vision-based Analysis of Small Groups in Pedestrian Crowds</td><td>Vision-Based Analysis of Small Groups in Pedestrian Crowds</td><td><a href="http://vision.cse.psu.edu/publications/pdfs/GeCollinsRubackPAMI2011.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=vision-based analysis of small groups in pedestrian crowds&sort=relevance">[s2]</a></td><td></td><td>066000d44d6691d27202896691f08b27117918b9</td></tr><tr><td>put_face</td><td>Put Face</td><td>The PUT face database</td><td>Labeled Faces in the Wild : A Database for Studying Face Recognition in Unconstrained Environments</td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the put face database&sort=relevance">[s2]</a></td><td></td><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td></tr><tr><td>qmul_grid</td><td>GRID</td><td>Multi-Camera Activity Correlation Analysis</td><td>Time-Delayed Correlation Analysis for Multi-Camera Activity Understanding</td><td><a href="https://doi.org/10.1007/s11263-010-0347-5">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-camera activity correlation analysis&sort=relevance">[s2]</a></td><td></td><td>2edb87494278ad11641b6cf7a3f8996de12b8e14</td></tr><tr><td>qmul_grid</td><td>GRID</td><td>Time-delayed correlation analysis for multi-camera activity understanding</td><td>Time-Delayed Correlation Analysis for Multi-Camera Activity Understanding</td><td><a href="https://doi.org/10.1007/s11263-010-0347-5">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=time-delayed correlation analysis for multi-camera activity understanding&sort=relevance">[s2]</a></td><td></td><td>2edb87494278ad11641b6cf7a3f8996de12b8e14</td></tr><tr><td>qmul_surv_face</td><td>QMUL-SurvFace</td><td>Surveillance Face Recognition Challenge</td><td>Surveillance Face Recognition Challenge</td><td><a href="https://arxiv.org/pdf/1804.09691.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=surveillance face recognition challenge&sort=relevance">[s2]</a></td><td></td><td>c866a2afc871910e3282fd9498dce4ab20f6a332</td></tr><tr><td>rafd</td><td>RaFD</td><td>Presentation and validation of the Radboud Faces Database</td><td>Presentation and validation of the Radboud Faces Database</td><td><a href="https://pdfs.semanticscholar.org/3765/df816dc5a061bc261e190acc8bdd9d47bec0.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=presentation and validation of the radboud faces database&sort=relevance">[s2]</a></td><td></td><td>3765df816dc5a061bc261e190acc8bdd9d47bec0</td></tr><tr><td>raid</td><td>RAiD</td><td>Consistent Re-identification in a Camera Network</td><td>Consistent Re-identification in a Camera Network</td><td><a href="http://pdfs.semanticscholar.org/c27f/099e6e7e3f7f9979cbe9e0a5175fc5848ea0.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=consistent re-identification in a camera network&sort=relevance">[s2]</a></td><td></td><td>09d78009687bec46e70efcf39d4612822e61cb8c</td></tr><tr><td>rap_pedestrian</td><td>RAP</td><td>A Richly Annotated Dataset for Pedestrian Attribute Recognition</td><td>A Richly Annotated Dataset for Pedestrian Attribute Recognition</td><td><a href="http://pdfs.semanticscholar.org/221c/18238b829c12b911706947ab38fd017acef7.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a richly annotated dataset for pedestrian attribute recognition&sort=relevance">[s2]</a></td><td></td><td>221c18238b829c12b911706947ab38fd017acef7</td></tr><tr><td>reseed</td><td>ReSEED</td><td>ReSEED: Social Event dEtection Dataset</td><td>ReSEED: social event dEtection dataset</td><td><a href="https://pub.uni-bielefeld.de/download/2663466/2686734">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=reseed: social event detection dataset&sort=relevance">[s2]</a></td><td></td><td>54983972aafc8e149259d913524581357b0f91c3</td></tr><tr><td>saivt</td><td>SAIVT SoftBio</td><td>A Database for Person Re-Identification in Multi-Camera Surveillance Networks</td><td>A Database for Person Re-Identification in Multi-Camera Surveillance Networks</td><td><a href="http://eprints.qut.edu.au/53437/3/Bialkowski_Database4PersonReID_DICTA.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a database for person re-identification in multi-camera surveillance networks&sort=relevance">[s2]</a></td><td></td><td>22646e00a7ba34d1b5fbe3b1efcd91a1e1be3c2b</td></tr><tr><td>sarc3d</td><td>Sarc3D</td><td>SARC3D: a new 3D body model for People Tracking and Re-identification</td><td>SARC3D: A New 3D Body Model for People Tracking and Re-identification</td><td><a href="https://pdfs.semanticscholar.org/e27e/f52c641c2b5100a1b34fd0b819e84a31b4df.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=sarc3d: a new 3d body model for people tracking and re-identification&sort=relevance">[s2]</a></td><td></td><td>e27ef52c641c2b5100a1b34fd0b819e84a31b4df</td></tr><tr><td>scface</td><td>SCface</td><td>SCface – surveillance cameras face database</td><td>Large Variability Surveillance Camera Face Database</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=scface – surveillance cameras face database&sort=relevance">[s2]</a></td><td></td><td>f3b84a03985de3890b400b68e2a92c0a00afd9d0</td></tr><tr><td>scut_head</td><td>SCUT HEAD</td><td>Detecting Heads using Feature Refine Net and Cascaded Multi-scale Architecture</td><td>Detecting Heads using Feature Refine Net and Cascaded Multi-scale Architecture</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=detecting heads using feature refine net and cascaded multi-scale architecture&sort=relevance">[s2]</a></td><td></td><td>dfdcd8c7c91813ba1624c9a21d2d01ef06a49afd</td></tr><tr><td>sdu_vid</td><td>SDU-VID</td><td>A Spatio-Temporal Appearance Representation for Video-Based Pedestrian Re-Identification</td><td>Person Re-identification by Video Ranking</td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a spatio-temporal appearance representation for video-based pedestrian re-identification&sort=relevance">[s2]</a></td><td></td><td>98bb029afe2a1239c3fdab517323066f0957b81b</td></tr><tr><td>sdu_vid</td><td>SDU-VID</td><td>Local descriptors encoded by Fisher vectors for person re-identification</td><td>Person Re-identification by Video Ranking</td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=local descriptors encoded by fisher vectors for person re-identification&sort=relevance">[s2]</a></td><td></td><td>98bb029afe2a1239c3fdab517323066f0957b81b</td></tr><tr><td>sdu_vid</td><td>SDU-VID</td><td>Person reidentification by video ranking</td><td>Person Re-identification by Video Ranking</td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person reidentification by video ranking&sort=relevance">[s2]</a></td><td></td><td>98bb029afe2a1239c3fdab517323066f0957b81b</td></tr><tr><td>sheffield</td><td>Sheffield Face</td><td>Face Recognition: From Theory to Applications</td><td>Face Description with Local Binary Patterns: Application to Face Recognition</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/TPAMI.2006.244">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=face recognition: from theory to applications&sort=relevance">[s2]</a></td><td></td><td>3607afdb204de9a5a9300ae98aa4635d9effcda2</td></tr><tr><td>soton</td><td>SOTON HiD</td><td>On a Large Sequence-Based Human Gait Database</td><td>On a large sequence-based human gait database</td><td><a href="http://pdfs.semanticscholar.org/4f93/cd09785c6e77bf4bc5a788e079df524c8d21.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=on a large sequence-based human gait database&sort=relevance">[s2]</a></td><td></td><td>4f93cd09785c6e77bf4bc5a788e079df524c8d21</td></tr><tr><td>stair_actions</td><td>STAIR Action</td><td>STAIR Actions: A Video Dataset of Everyday Home Actions</td><td>STAIR Actions: A Video Dataset of Everyday Home Actions</td><td><a href="https://arxiv.org/pdf/1804.04326.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=stair actions: a video dataset of everyday home actions&sort=relevance">[s2]</a></td><td></td><td>d3f5a1848b0028d8ab51d0b0673732cad2e3c8c9</td></tr><tr><td>stanford_drone</td><td>Stanford Drone</td><td>Learning Social Etiquette: Human Trajectory Prediction In Crowded Scenes</td><td>Learning to Predict Human Behavior in Crowded Scenes</td><td><a href="http://pdfs.semanticscholar.org/c9bd/a86e23cab9e4f15ea0c4cbb6cc02b9dfb709.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning social etiquette: human trajectory prediction in crowded scenes&sort=relevance">[s2]</a></td><td></td><td>c9bda86e23cab9e4f15ea0c4cbb6cc02b9dfb709</td></tr><tr><td>stickmen_buffy</td><td>Buffy Stickmen</td><td>Learning to Parse Images of Articulated Objects</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td><a href="http://pdfs.semanticscholar.org/4b1d/23d17476fcf78f4cbadf69fb130b1aa627c0.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning to parse images of articulated objects&sort=relevance">[s2]</a></td><td></td><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td></tr><tr><td>stickmen_buffy</td><td>Buffy Stickmen</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td><a href="http://pdfs.semanticscholar.org/4b1d/23d17476fcf78f4cbadf69fb130b1aa627c0.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clustered pose and nonlinear appearance models for human pose estimation&sort=relevance">[s2]</a></td><td></td><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td></tr><tr><td>stickmen_family</td><td>We Are Family Stickmen</td><td>We Are Family: Joint Pose Estimation of Multiple Persons</td><td>We Are Family: Joint Pose Estimation of Multiple Persons</td><td><a href="http://pdfs.semanticscholar.org/0dc1/1a37cadda92886c56a6fb5191ded62099c28.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=we are family: joint pose estimation of multiple persons&sort=relevance">[s2]</a></td><td></td><td>0dc11a37cadda92886c56a6fb5191ded62099c28</td></tr><tr><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td>Learning to parse images of articulated bodies</td><td><a href="http://pdfs.semanticscholar.org/9cd7/4c43dbf9be0b9caae4606ee53e6d45850471.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clustered pose and nonlinear appearance models for human pose estimation&sort=relevance">[s2]</a></td><td></td><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td></tr><tr><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td>Learning to Parse Images of Articulated Objects</td><td>Learning to parse images of articulated bodies</td><td><a href="http://pdfs.semanticscholar.org/9cd7/4c43dbf9be0b9caae4606ee53e6d45850471.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning to parse images of articulated objects&sort=relevance">[s2]</a></td><td></td><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td></tr><tr><td>sun_attributes</td><td>SUN</td><td>The SUN Attribute Database: Beyond Categories for Deeper Scene Understanding</td><td>SUN attribute database: Discovering, annotating, and recognizing scene attributes</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2012.6247998">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the sun attribute database: beyond categories for deeper scene understanding&sort=relevance">[s2]</a></td><td></td><td>833fa04463d90aab4a9fe2870d480f0b40df446e</td></tr><tr><td>sun_attributes</td><td>SUN</td><td>SUN Attribute Database:
+Discovering, Annotating, and Recognizing Scene Attributes</td><td>SUN attribute database: Discovering, annotating, and recognizing scene attributes</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2012.6247998">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=sun attribute database:
+discovering, annotating, and recognizing scene attributes&sort=relevance">[s2]</a></td><td></td><td>833fa04463d90aab4a9fe2870d480f0b40df446e</td></tr><tr><td>svs</td><td>SVS</td><td>Pedestrian Attribute Classification in Surveillance: Database and Evaluation</td><td>Pedestrian Attribute Classification in Surveillance: Database and Evaluation</td><td><a href="http://www.cv-foundation.org/openaccess/content_iccv_workshops_2013/W10/papers/Zhu_Pedestrian_Attribute_Classification_2013_ICCV_paper.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pedestrian attribute classification in surveillance: database and evaluation&sort=relevance">[s2]</a></td><td></td><td>488e475eeb3bb39a145f23ede197cd3620f1d98a</td></tr><tr><td>texas_3dfrd</td><td>Texas 3DFRD</td><td>Texas 3D Face Recognition Database</td><td>Anthropometric 3D Face Recognition</td><td><a href="http://live.ece.utexas.edu/publications/2010/sg_ijcv_june10.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=texas 3d face recognition database&sort=relevance">[s2]</a></td><td></td><td>2ce2560cf59db59ce313bbeb004e8ce55c5ce928</td></tr><tr><td>texas_3dfrd</td><td>Texas 3DFRD</td><td>Anthropometric 3D Face Recognition</td><td>Anthropometric 3D Face Recognition</td><td><a href="http://live.ece.utexas.edu/publications/2010/sg_ijcv_june10.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=anthropometric 3d face recognition&sort=relevance">[s2]</a></td><td></td><td>2ce2560cf59db59ce313bbeb004e8ce55c5ce928</td></tr><tr><td>tiny_faces</td><td>TinyFace</td><td>Low-Resolution Face Recognition</td><td>Low-Resolution Face Recognition</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=low-resolution face recognition&sort=relevance">[s2]</a></td><td></td><td>8990cdce3f917dad622e43e033db686b354d057c</td></tr><tr><td>tiny_images</td><td>Tiny Images</td><td>80 million tiny images: a large dataset for non-parametric object and scene recognition</td><td>80 Million Tiny Images: A Large Data Set for Nonparametric Object and Scene Recognition</td><td><a href="http://pdfs.semanticscholar.org/31b5/8ced31f22eab10bd3ee2d9174e7c14c27c01.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=80 million tiny images: a large dataset for non-parametric object and scene recognition&sort=relevance">[s2]</a></td><td></td><td>31b58ced31f22eab10bd3ee2d9174e7c14c27c01</td></tr><tr><td>towncenter</td><td>TownCenter</td><td>Stable Multi-Target Tracking in Real-Time Surveillance Video</td><td>Stable multi-target tracking in real-time surveillance video</td><td><a href="http://pdfs.semanticscholar.org/9361/b784e73e9238d5cefbea5ac40d35d1e3103f.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=stable multi-target tracking in real-time surveillance video&sort=relevance">[s2]</a></td><td></td><td>9361b784e73e9238d5cefbea5ac40d35d1e3103f</td></tr><tr><td>tud_brussels</td><td>TUD-Brussels</td><td>Multi-Cue Onboard Pedestrian Detection</td><td>Multi-cue onboard pedestrian detection</td><td><a href="https://www.mpi-inf.mpg.de/fileadmin/inf/d2/wojek/poster_cwojek_cvpr09.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-cue onboard pedestrian detection&sort=relevance">[s2]</a></td><td></td><td>6ad5a38df8dd4cdddd74f31996ce096d41219f72</td></tr><tr><td>tud_campus</td><td>TUD-Campus</td><td>People-Tracking-by-Detection and People-Detection-by-Tracking</td><td>People-tracking-by-detection and people-detection-by-tracking</td><td><a href="http://mplab.ucsd.edu/wp-content/uploads/CVPR2008/Conference/data/papers/243.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=people-tracking-by-detection and people-detection-by-tracking&sort=relevance">[s2]</a></td><td></td><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td></tr><tr><td>tud_crossing</td><td>TUD-Crossing</td><td>People-Tracking-by-Detection and People-Detection-by-Tracking</td><td>People-tracking-by-detection and people-detection-by-tracking</td><td><a href="http://mplab.ucsd.edu/wp-content/uploads/CVPR2008/Conference/data/papers/243.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=people-tracking-by-detection and people-detection-by-tracking&sort=relevance">[s2]</a></td><td></td><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td></tr><tr><td>tud_motionpairs</td><td>TUD-Motionparis</td><td>Multi-Cue Onboard Pedestrian Detection</td><td>Multi-cue onboard pedestrian detection</td><td><a href="https://www.mpi-inf.mpg.de/fileadmin/inf/d2/wojek/poster_cwojek_cvpr09.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-cue onboard pedestrian detection&sort=relevance">[s2]</a></td><td></td><td>6ad5a38df8dd4cdddd74f31996ce096d41219f72</td></tr><tr><td>tud_multiview</td><td>TUD-Multiview</td><td>Monocular 3D Pose Estimation and Tracking by Detection</td><td>Monocular 3D pose estimation and tracking by detection</td><td><a href="http://lmb.informatik.uni-freiburg.de/lectures/seminar_brox/seminar_ws1011/cvpr10_andriluka.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=monocular 3d pose estimation and tracking by detection&sort=relevance">[s2]</a></td><td></td><td>436f798d1a4e54e5947c1e7d7375c31b2bdb4064</td></tr><tr><td>tud_pedestrian</td><td>TUD-Pedestrian</td><td>People-Tracking-by-Detection and People-Detection-by-Tracking</td><td>People-tracking-by-detection and people-detection-by-tracking</td><td><a href="http://mplab.ucsd.edu/wp-content/uploads/CVPR2008/Conference/data/papers/243.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=people-tracking-by-detection and people-detection-by-tracking&sort=relevance">[s2]</a></td><td></td><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td></tr><tr><td>tud_stadtmitte</td><td>TUD-Stadtmitte</td><td>Monocular 3D Pose Estimation and Tracking by Detection</td><td>Monocular 3D pose estimation and tracking by detection</td><td><a href="http://lmb.informatik.uni-freiburg.de/lectures/seminar_brox/seminar_ws1011/cvpr10_andriluka.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=monocular 3d pose estimation and tracking by detection&sort=relevance">[s2]</a></td><td></td><td>436f798d1a4e54e5947c1e7d7375c31b2bdb4064</td></tr><tr><td>tvhi</td><td>TVHI</td><td>High Five: Recognising human interactions in TV shows</td><td>High Five: Recognising human interactions in TV shows</td><td><a href="http://pdfs.semanticscholar.org/3cd4/0bfa1ff193a96bde0207e5140a399476466c.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=high five: recognising human interactions in tv shows&sort=relevance">[s2]</a></td><td></td><td>3cd40bfa1ff193a96bde0207e5140a399476466c</td></tr><tr><td>ucf_crowd</td><td>UCF-CC-50</td><td>Multi-Source Multi-Scale Counting in Extremely Dense Crowd Images</td><td>Multi-source Multi-scale Counting in Extremely Dense Crowd Images</td><td><a href="http://www.cs.ucf.edu/~haroon/datafiles/Idrees_Counting_CVPR_2013.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-source multi-scale counting in extremely dense crowd images&sort=relevance">[s2]</a></td><td></td><td>32c801cb7fbeb742edfd94cccfca4934baec71da</td></tr><tr><td>ucf_selfie</td><td>UCF Selfie</td><td>How to Take a Good Selfie?</td><td>How to Take a Good Selfie?</td><td><a href="http://doi.acm.org/10.1145/2733373.2806365">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=how to take a good selfie?&sort=relevance">[s2]</a></td><td></td><td>041d3eedf5e45ce5c5229f0181c5c576ed1fafd6</td></tr><tr><td>umb</td><td>UMB</td><td>UMB-DB: A Database of Partially Occluded 3D Faces</td><td>UMB-DB: A database of partially occluded 3D faces</td><td><a href="https://doi.org/10.1109/ICCVW.2011.6130509">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=umb-db: a database of partially occluded 3d faces&sort=relevance">[s2]</a></td><td></td><td>16e8b0a1e8451d5f697b94c0c2b32a00abee1d52</td></tr><tr><td>umd_faces</td><td>UMD</td><td>UMDFaces: An Annotated Face Dataset for Training Deep Networks</td><td>UMDFaces: An annotated face dataset for training deep networks</td><td><a href="http://arxiv.org/abs/1611.01484">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=umdfaces: an annotated face dataset for training deep networks&sort=relevance">[s2]</a></td><td></td><td>31b05f65405534a696a847dd19c621b7b8588263</td></tr><tr><td>umd_faces</td><td>UMD</td><td>The Do's and Don'ts for CNN-based Face Verification</td><td>The Do’s and Don’ts for CNN-Based Face Verification</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the do's and don'ts for cnn-based face verification&sort=relevance">[s2]</a></td><td></td><td>71b7fc715e2f1bb24c0030af8d7e7b6e7cd128a6</td></tr><tr><td>unbc_shoulder_pain</td><td>UNBC-McMaster Pain</td><td>PAINFUL DATA: The UNBC-McMaster Shoulder Pain Expression Archive Database</td><td>Painful data: The UNBC-McMaster shoulder pain expression archive database</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5771462">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=painful data: the unbc-mcmaster shoulder pain expression archive database&sort=relevance">[s2]</a></td><td></td><td>56ffa7d906b08d02d6d5a12c7377a57e24ef3391</td></tr><tr><td>used</td><td>USED Social Event Dataset</td><td>USED: A Large-scale Social Event Detection Dataset</td><td>USED: a large-scale social event detection dataset</td><td><a href="http://doi.acm.org/10.1145/2910017.2910624">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=used: a large-scale social event detection dataset&sort=relevance">[s2]</a></td><td></td><td>8627f019882b024aef92e4eb9355c499c733e5b7</td></tr><tr><td>v47</td><td>V47</td><td>Re-identification of Pedestrians with Variable Occlusion and Scale</td><td>Re-identification of pedestrians with variable occlusion and scale</td><td><a href="https://doi.org/10.1109/ICCVW.2011.6130477">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=re-identification of pedestrians with variable occlusion and scale&sort=relevance">[s2]</a></td><td></td><td>922e0a51a3b8c67c4c6ac09a577ff674cbd28b34</td></tr><tr><td>vadana</td><td>VADANA</td><td>VADANA: A dense dataset for facial image analysis</td><td>VADANA: A dense dataset for facial image analysis</td><td><a href="http://vims.cis.udel.edu/publications/VADANA_BeFIT2011.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=vadana: a dense dataset for facial image analysis&sort=relevance">[s2]</a></td><td></td><td>4563b46d42079242f06567b3f2e2f7a80cb3befe</td></tr><tr><td>vgg_faces</td><td>VGG Face</td><td>Deep Face Recognition</td><td>Deep Face Recognition</td><td><a href="https://pdfs.semanticscholar.org/f372/ab9b3270d4e4f6a0258c83c2736c3a5c0454.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=deep face recognition&sort=relevance">[s2]</a></td><td></td><td>162ea969d1929ed180cc6de9f0bf116993ff6e06</td></tr><tr><td>violent_flows</td><td>Violent Flows</td><td>Violent Flows: Real-Time Detection of Violent Crowd Behavior</td><td>Violent flows: Real-time detection of violent crowd behavior</td><td><a href="http://www.wisdom.weizmann.ac.il/mathusers/kliper/Papers/violent_flows.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=violent flows: real-time detection of violent crowd behavior&sort=relevance">[s2]</a></td><td></td><td>5194cbd51f9769ab25260446b4fa17204752e799</td></tr><tr><td>viper</td><td>VIPeR</td><td>Evaluating Appearance Models for Recognition, Reacquisition, and Tracking</td><td>Evaluating Appearance Models for Recognition , Reacquisition , and Tracking</td><td><a href="http://pdfs.semanticscholar.org/6273/b3491e94ea4dd1ce42b791d77bdc96ee73a8.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=evaluating appearance models for recognition, reacquisition, and tracking&sort=relevance">[s2]</a></td><td></td><td>6273b3491e94ea4dd1ce42b791d77bdc96ee73a8</td></tr><tr><td>visual_phrases</td><td>Phrasal Recognition</td><td>Recognition using Visual Phrases</td><td>Recognition using visual phrases</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995711">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=recognition using visual phrases&sort=relevance">[s2]</a></td><td></td><td>e8de844fefd54541b71c9823416daa238be65546</td></tr><tr><td>vmu</td><td>VMU</td><td>Can Facial Cosmetics Affect the Matching Accuracy of Face Recognition Systems?</td><td>Can facial cosmetics affect the matching accuracy of face recognition systems?</td><td><a href="http://www.cse.msu.edu/~climer/DantchevaChenRossFaceCosmetics_BTAS2012.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=can facial cosmetics affect the matching accuracy of face recognition systems?&sort=relevance">[s2]</a></td><td></td><td>37d6f0eb074d207b53885bd2eb78ccc8a04be597</td></tr><tr><td>voc</td><td>VOC</td><td>The PASCAL Visual Object Classes (VOC) Challenge</td><td>The Pascal Visual Object Classes Challenge: A Retrospective</td><td><a href="http://homepages.inf.ed.ac.uk/ckiw/postscript/ijcv_voc14.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the pascal visual object classes (voc) challenge&sort=relevance">[s2]</a></td><td></td><td>abe9f3b91fd26fa1b50cd685c0d20debfb372f73</td></tr><tr><td>vqa</td><td>VQA</td><td>VQA: Visual Question Answering</td><td>VQA: Visual Question Answering</td><td><a href="http://arxiv.org/pdf/1505.00468v3.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=vqa: visual question answering&sort=relevance">[s2]</a></td><td></td><td>01959ef569f74c286956024866c1d107099199f7</td></tr><tr><td>ward</td><td>WARD</td><td>Re-identify people in wide area camera network</td><td>Re-identify people in wide area camera network</td><td><a href="https://doi.org/10.1109/CVPRW.2012.6239203">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=re-identify people in wide area camera network&sort=relevance">[s2]</a></td><td></td><td>6f3c76b7c0bd8e1d122c6ea808a271fd4749c951</td></tr><tr><td>who_goes_there</td><td>WGT</td><td>Who Goes There? Approaches to Mapping Facial Appearance Diversity</td><td>Who goes there?: approaches to mapping facial appearance diversity</td><td><a href="http://doi.acm.org/10.1145/2996913.2996997">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=who goes there? approaches to mapping facial appearance diversity&sort=relevance">[s2]</a></td><td></td><td>9b9bf5e623cb8af7407d2d2d857bc3f1b531c182</td></tr><tr><td>wildtrack</td><td>WildTrack</td><td>WILDTRACK: A Multi-camera HD Dataset for Dense Unscripted Pedestrian Detection</td><td>WILDTRACK : A Multi-camera HD Dataset for Dense Unscripted Pedestrian Detection</td><td><a href="https://pdfs.semanticscholar.org/fe1c/ec4e4995b8615855572374ae3efc94949105.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=wildtrack: a multi-camera hd dataset for dense unscripted pedestrian detection&sort=relevance">[s2]</a></td><td></td><td>77c81c13a110a341c140995bedb98101b9e84f7f</td></tr><tr><td>wlfdb</td><td></td><td>WLFDB: Weakly Labeled Face Databases</td><td>WLFDB: Weakly Labeled Face Databases</td><td><a href="https://pdfs.semanticscholar.org/5ad4/e9f947c1653c247d418f05dad758a3f9277b.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=wlfdb: weakly labeled face databases&sort=relevance">[s2]</a></td><td></td><td>5ad4e9f947c1653c247d418f05dad758a3f9277b</td></tr><tr><td>yale_faces</td><td>YaleFaces</td><td>Acquiring Linear Subspaces for Face Recognition under Variable Lighting</td><td>From Few to Many: Illumination Cone Models for Face Recognition under Variable Lighting and Pose</td><td><a href="http://pdfs.semanticscholar.org/97bb/c2b439a79d4dc0dc7199d71ed96ad5e3fd0e.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=acquiring linear subspaces for face recognition under variable lighting&sort=relevance">[s2]</a></td><td></td><td>18c72175ddbb7d5956d180b65a96005c100f6014</td></tr><tr><td>yale_faces</td><td>YaleFaces</td><td>From Few to Many: Illumination Cone Models for Face Recognition under Variable Lighting and Pose</td><td>From Few to Many: Illumination Cone Models for Face Recognition under Variable Lighting and Pose</td><td><a href="http://pdfs.semanticscholar.org/97bb/c2b439a79d4dc0dc7199d71ed96ad5e3fd0e.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=from few to many: illumination cone models for face recognition under variable lighting and pose&sort=relevance">[s2]</a></td><td></td><td>18c72175ddbb7d5956d180b65a96005c100f6014</td></tr><tr><td>yawdd</td><td>YawDD</td><td>YawDD: A Yawning Detection Dataset</td><td>YawDD: a yawning detection dataset</td><td><a href="http://www.site.uottawa.ca/~shervin/pubs/CogniVue-Dataset-ACM-MMSys2014.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=yawdd: a yawning detection dataset&sort=relevance">[s2]</a></td><td></td><td>a94cae786d515d3450d48267e12ca954aab791c4</td></tr><tr><td>yfcc_100m</td><td>YFCC100M</td><td>YFCC100M: The New Data in Multimedia Research</td><td>The New Data and New Challenges in Multimedia Research</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=yfcc100m: the new data in multimedia research&sort=relevance">[s2]</a></td><td></td><td>a6e695ddd07aad719001c0fc1129328452385949</td></tr><tr><td>york_3d</td><td>UOY 3D Face Database</td><td>Three-Dimensional Face Recognition: An Eigensurface Approach</td><td>Three-dimensional face recognition: an eigensurface approach</td><td><a href="http://www-users.cs.york.ac.uk/~nep/research/3Dface/tomh/3DFaceRecognition-Eigensurface-ICIP(web)2.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=three-dimensional face recognition: an eigensurface approach&sort=relevance">[s2]</a></td><td></td><td>19d1b811df60f86cbd5e04a094b07f32fff7a32a</td></tr><tr><td>youtube_poses</td><td>YouTube Pose</td><td>Personalizing Human Video Pose Estimation</td><td>Personalizing Human Video Pose Estimation</td><td><a href="http://arxiv.org/pdf/1511.06676v1.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=personalizing human video pose estimation&sort=relevance">[s2]</a></td><td></td><td>1c2802c2199b6d15ecefe7ba0c39bfe44363de38</td></tr></table></body></html> \ No newline at end of file
diff --git a/scraper/reports/paper_title_report_nonmatching.html b/scraper/reports/paper_title_report_nonmatching.html
new file mode 100644
index 00000000..a9651307
--- /dev/null
+++ b/scraper/reports/paper_title_report_nonmatching.html
@@ -0,0 +1,9 @@
+<!doctype html><html><head><meta charset='utf-8'><title>Paper Titles that do not match</title><link rel='stylesheet' href='reports.css'></head><body><h2>Paper Titles that do not match</h2><table border='1' cellpadding='3' cellspacing='3'><th>key</th><th>name</th><th>our title</th><th>found title</th><th></th><th></th><th>address</th><th>s2 id</th><tr><td>10k_US_adult_faces</td><td>10K US Adult Faces</td><td>The intrinsic memorability of face images</td><td>The intrinsic memorability of face photographs.</td><td><a href="https://pdfs.semanticscholar.org/8b2d/d5c61b23ead5ae5508bb8ce808b5ea266730.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the intrinsic memorability of face images&sort=relevance">[s2]</a></td><td></td><td>8b2dd5c61b23ead5ae5508bb8ce808b5ea266730</td></tr><tr><td>3dddb_unconstrained</td><td>3D Dynamic</td><td>A 3D Dynamic Database for Unconstrained Face Recognition</td><td>Labeled Faces in the Wild : A Database for Studying Face Recognition in Unconstrained Environments</td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a 3d dynamic database for unconstrained face recognition&sort=relevance">[s2]</a></td><td></td><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td></tr><tr><td>afad</td><td>AFAD</td><td>Ordinal Regression with a Multiple Output CNN for Age Estimation</td><td>Ordinal Regression with Multiple Output CNN for Age Estimation</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.532">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=ordinal regression with a multiple output cnn for age estimation&sort=relevance">[s2]</a></td><td></td><td>6618cff7f2ed440a0d2fa9e74ad5469df5cdbe4c</td></tr><tr><td>afew_va</td><td>AFEW-VA</td><td>AFEW-VA database for valence and arousal estimation in-the-wild</td><td>Collecting Large, Richly Annotated Facial-Expression Databases from Movies</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6200254">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=afew-va database for valence and arousal estimation in-the-wild&sort=relevance">[s2]</a></td><td>Australian National University</td><td>b1f4423c227fa37b9680787be38857069247a307</td></tr><tr><td>affectnet</td><td>AffectNet</td><td>AffectNet: A New Database for Facial Expression, Valence, and Arousal Computation in the Wild</td><td>Skybiometry and AffectNet on Facial Emotion Recognition Using Supervised Machine Learning Algorithms</td><td><a href="http://dl.acm.org/citation.cfm?id=3232665">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=affectnet: a new database for facial expression, valence, and arousal computation in the wild&sort=relevance">[s2]</a></td><td></td><td>f152b6ee251cca940dd853c54e6a7b78fbc6b235</td></tr><tr><td>afw</td><td>AFW</td><td>Face detection, pose estimation and landmark localization in the wild</td><td>Face detection, pose estimation, and landmark localization in the wild</td><td><a href="http://vision.ics.uci.edu/papers/ZhuR_CVPR_2012/ZhuR_CVPR_2012.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=face detection, pose estimation and landmark localization in the wild&sort=relevance">[s2]</a></td><td>University of California, Irvine</td><td>0e986f51fe45b00633de9fd0c94d082d2be51406</td></tr><tr><td>alert_airport</td><td>ALERT Airport</td><td>A Systematic Evaluation and Benchmark for Person Re-Identification: Features, Metrics, and Datasets</td><td>A Systematic Evaluation and Benchmark for Person Re-Identification: Features, Metrics, and Datasets.</td><td><a href="https://arxiv.org/pdf/1605.09653.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a systematic evaluation and benchmark for person re-identification: features, metrics, and datasets&sort=relevance">[s2]</a></td><td></td><td>6403117f9c005ae81f1e8e6d1302f4a045e3d99d</td></tr><tr><td>am_fed</td><td>AM-FED</td><td>Affectiva MIT Facial Expression Dataset (AM-FED): Naturalistic and Spontaneous Facial Expressions Collected “In the Wild”</td><td>Affectiva-MIT Facial Expression Dataset (AM-FED): Naturalistic and Spontaneous Facial Expressions Collected "In-the-Wild"</td><td><a href="http://pdfs.semanticscholar.org/5d06/437656dd94616d7d87260d5eb77513ded30f.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=affectiva mit facial expression dataset (am-fed): naturalistic and spontaneous facial expressions collected “in the wild”&sort=relevance">[s2]</a></td><td></td><td>47aeb3b82f54b5ae8142b4bdda7b614433e69b9a</td></tr><tr><td>ar_facedb</td><td>AR Face</td><td>The AR Face Database</td><td>Labeled Faces in the Wild : A Database for Studying Face Recognition in Unconstrained Environments</td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the ar face database&sort=relevance">[s2]</a></td><td></td><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td></tr><tr><td>bjut_3d</td><td>BJUT-3D</td><td>The BJUT-3D Large-Scale Chinese Face Database</td><td>A novel face recognition method based on 3D face model</td><td><a href="https://doi.org/10.1109/ROBIO.2007.4522202">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the bjut-3d large-scale chinese face database&sort=relevance">[s2]</a></td><td></td><td>1ed1a49534ad8dd00f81939449f6389cfbc25321</td></tr><tr><td>bp4d_spontanous</td><td>BP4D-Spontanous</td><td>A high resolution spontaneous 3D dynamic facial expression database</td><td>A high-resolution spontaneous 3D dynamic facial expression database</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6553788">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a high resolution spontaneous 3d dynamic facial expression database&sort=relevance">[s2]</a></td><td>SUNY Binghamton</td><td>b91f54e1581fbbf60392364323d00a0cd43e493c</td></tr><tr><td>brainwash</td><td>Brainwash</td><td>Brainwash dataset</td><td>Brainwash: A Data System for Feature Engineering</td><td><a href="http://pdfs.semanticscholar.org/ae44/8015b2ff2bd3b8a5c9a3266f954f5af9ffa9.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=brainwash dataset&sort=relevance">[s2]</a></td><td></td><td>214c966d1f9c2a4b66f4535d9a0d4078e63a5867</td></tr><tr><td>caltech_pedestrians</td><td>Caltech Pedestrians</td><td>Pedestrian Detection: A Benchmark</td><td>Pedestrian Detection: An Evaluation of the State of the Art</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5975165">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=pedestrian detection: a benchmark&sort=relevance">[s2]</a></td><td></td><td>f72f6a45ee240cc99296a287ff725aaa7e7ebb35</td></tr><tr><td>camel</td><td>CAMEL</td><td>CAMEL Dataset for Visual and Thermal Infrared Multiple Object Detection and Tracking</td><td>Application of Object Based Classification and High Resolution Satellite Imagery for Savanna Ecosystem Analysis</td><td><a href="http://pdfs.semanticscholar.org/5801/690199c1917fa58c35c3dead177c0b8f9f2d.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=camel dataset for visual and thermal infrared multiple object detection and tracking&sort=relevance">[s2]</a></td><td></td><td>5801690199c1917fa58c35c3dead177c0b8f9f2d</td></tr><tr><td>casablanca</td><td>Casablanca</td><td>Context-aware {CNNs} for person head detection</td><td>Context-Aware CNNs for Person Head Detection</td><td><a href="http://arxiv.org/pdf/1511.07917v1.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=context-aware {cnns} for person head detection&sort=relevance">[s2]</a></td><td></td><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td></tr><tr><td>celeba_plus</td><td>CelebFaces+</td><td>Deep Learning Face Representation from Predicting 10,000 Classes</td><td>Learning Deep Representation for Imbalanced Classification</td><td><a href="http://personal.ie.cuhk.edu.hk/~ccloy/files/cvpr_2016_imbalanced.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=deep learning face representation from predicting 10,000 classes&sort=relevance">[s2]</a></td><td>Shenzhen Institutes of Advanced Technology</td><td>69a68f9cf874c69e2232f47808016c2736b90c35</td></tr><tr><td>cfd</td><td>CFD</td><td>The Chicago face database: A free stimulus set of faces and norming data</td><td>The Chicago face database: A free stimulus set of faces and norming data.</td><td><a href="http://pdfs.semanticscholar.org/4df3/143922bcdf7db78eb91e6b5359d6ada004d2.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the chicago face database: a free stimulus set of faces and norming data&sort=relevance">[s2]</a></td><td></td><td>4df3143922bcdf7db78eb91e6b5359d6ada004d2</td></tr><tr><td>cityscapes</td><td>Cityscapes</td><td>The Cityscapes Dataset</td><td>The Cityscapes Dataset for Semantic Urban Scene Understanding</td><td><a href="https://arxiv.org/pdf/1604.01685.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the cityscapes dataset&sort=relevance">[s2]</a></td><td></td><td>32cde90437ab5a70cf003ea36f66f2de0e24b3ab</td></tr><tr><td>cmu_pie</td><td>CMU PIE</td><td>The CMU Pose, Illumination, and Expression Database</td><td>The CMU Pose, Illumination, and Expression (PIE) Database</td><td><a href="http://pdfs.semanticscholar.org/4d42/3acc78273b75134e2afd1777ba6d3a398973.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the cmu pose, illumination, and expression database&sort=relevance">[s2]</a></td><td></td><td>4d423acc78273b75134e2afd1777ba6d3a398973</td></tr><tr><td>coco</td><td>COCO</td><td>Microsoft COCO: Common Objects in Context</td><td>Microsoft COCO Captions: Data Collection and Evaluation Server</td><td><a href="http://pdfs.semanticscholar.org/ba95/81c33a7eebe87c50e61763e4c8d1723538f2.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=microsoft coco: common objects in context&sort=relevance">[s2]</a></td><td></td><td>696ca58d93f6404fea0fc75c62d1d7b378f47628</td></tr><tr><td>columbia_gaze</td><td>Columbia Gaze</td><td>Gaze Locking: Passive Eye Contact Detection for Human–Object Interaction</td><td>A 3D Morphable Eye Region Model for Gaze Estimation</td><td><a href="https://pdfs.semanticscholar.org/0d43/3b9435b874a1eea6d7999e86986c910fa285.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=gaze locking: passive eye contact detection for human–object interaction&sort=relevance">[s2]</a></td><td>Carnegie Mellon University</td><td>c34532fe6bfbd1e6df477c9ffdbb043b77e7804d</td></tr><tr><td>cvc_01_barcelona</td><td>CVC-01</td><td>Adaptive Image Sampling and Windows Classification for On-board Pedestrian Detection</td><td>Adaptive Image Sampling and Windows Classification for On – board Pedestrian Detection</td><td><a href="http://pdfs.semanticscholar.org/57fe/081950f21ca03b5b375ae3e84b399c015861.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=adaptive image sampling and windows classification for on-board pedestrian detection&sort=relevance">[s2]</a></td><td></td><td>57fe081950f21ca03b5b375ae3e84b399c015861</td></tr><tr><td>d3dfacs</td><td>D3DFACS</td><td>A FACS Valid 3D Dynamic Action Unit database with Applications to 3D Dynamic Morphable Facial Modelling</td><td>A FACS valid 3D dynamic action unit database with applications to 3D dynamic morphable facial modeling</td><td><a href="http://www.cs.bath.ac.uk/~dpc/D3DFACS/ICCV_final_2011.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a facs valid 3d dynamic action unit database with applications to 3d dynamic morphable facial modelling&sort=relevance">[s2]</a></td><td>Jacobs University</td><td>070de852bc6eb275d7ca3a9cdde8f6be8795d1a3</td></tr><tr><td>dartmouth_children</td><td>Dartmouth Children</td><td>The Dartmouth Database of Children's Faces: Acquisition and validation of a new face stimulus set</td><td>The Dartmouth Database of Children’s Faces: Acquisition and Validation of a New Face Stimulus Set</td><td><a href="http://pdfs.semanticscholar.org/4e6e/e936eb50dd032f7138702fa39b7c18ee8907.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the dartmouth database of children's faces: acquisition and validation of a new face stimulus set&sort=relevance">[s2]</a></td><td></td><td>4e6ee936eb50dd032f7138702fa39b7c18ee8907</td></tr><tr><td>deep_fashion</td><td>DeepFashion</td><td>DeepFashion: Powering Robust Clothes Recognition and Retrieval with Rich Annotations</td><td>Fashion Landmark Detection in the Wild</td><td><a href="http://pdfs.semanticscholar.org/d8ca/e259c1c5bba0c096f480dc7322bbaebfac1a.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=deepfashion: powering robust clothes recognition and retrieval with rich annotations&sort=relevance">[s2]</a></td><td>Chinese University of Hong Kong</td><td>4fefd1bc8dc4e0ab37ee3324ddfa43ad9d6a04a7</td></tr><tr><td>disfa</td><td>DISFA</td><td>DISFA: A Spontaneous Facial Action Intensity Database</td><td>Extended DISFA Dataset: Investigating Posed and Spontaneous Facial Expressions</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7789672">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=disfa: a spontaneous facial action intensity database&sort=relevance">[s2]</a></td><td></td><td>a5acda0e8c0937bfed013e6382da127103e41395</td></tr><tr><td>expw</td><td>ExpW</td><td>Learning Social Relation Traits from Face Images</td><td>From Facial Expression Recognition to Interpersonal Relation Prediction</td><td><a href="http://arxiv.org/abs/1609.06426">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning social relation traits from face images&sort=relevance">[s2]</a></td><td></td><td>22f656d0f8426c84a33a267977f511f127bfd7f3</td></tr><tr><td>face_research_lab</td><td>Face Research Lab London</td><td>Face Research Lab London Set. figshare</td><td>Anxiety promotes memory for mood-congruent faces but does not alter loss aversion.</td><td><a href="http://pdfs.semanticscholar.org/c652/6dd3060d63a6c90e8b7ff340383c4e0e0dd8.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=face research lab london set. figshare&sort=relevance">[s2]</a></td><td>University College London</td><td>c6526dd3060d63a6c90e8b7ff340383c4e0e0dd8</td></tr><tr><td>face_tracer</td><td>FaceTracer</td><td>FaceTracer: A Search Engine for Large Collections of Images with Faces</td><td>Face swapping: automatically replacing faces in photographs</td><td><a href="https://classes.cs.uoregon.edu/16F/cis607photo/faces.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=facetracer: a search engine for large collections of images with faces&sort=relevance">[s2]</a></td><td>Columbia University</td><td>670637d0303a863c1548d5b19f705860a23e285c</td></tr><tr><td>fddb</td><td>FDDB</td><td>FDDB: A Benchmark for Face Detection in Unconstrained Settings</td><td>A Benchmark for Face Detection in Unconstrained Settings</td><td><a href="http://pdfs.semanticscholar.org/75da/1df4ed319926c544eefe17ec8d720feef8c0.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=fddb: a benchmark for face detection in unconstrained settings&sort=relevance">[s2]</a></td><td>University of Massachusetts</td><td>75da1df4ed319926c544eefe17ec8d720feef8c0</td></tr><tr><td>fei</td><td>FEI</td><td>Captura e Alinhamento de Imagens: Um Banco de Faces Brasileiro</td><td>Generalização cartográfica automatizada para um banco de dados cadastral</td><td><a href="https://pdfs.semanticscholar.org/b6b1/b0632eb9d4ab1427278f5e5c46f97753c73d.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=captura e alinhamento de imagens: um banco de faces brasileiro&sort=relevance">[s2]</a></td><td></td><td>b6b1b0632eb9d4ab1427278f5e5c46f97753c73d</td></tr><tr><td>fiw_300</td><td>300-W</td><td>300 faces In-the-wild challenge: Database and results</td><td>A Semi-automatic Methodology for Facial Landmark Annotation</td><td><a href="http://ibug.doc.ic.ac.uk/media/uploads/documents/sagonas_cvpr_2013_amfg_w.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=300 faces in-the-wild challenge: database and results&sort=relevance">[s2]</a></td><td>University of Twente</td><td>013909077ad843eb6df7a3e8e290cfd5575999d2</td></tr><tr><td>fiw_300</td><td>300-W</td><td>300 Faces in-the-Wild Challenge: The first facial landmark localization Challenge</td><td>A Semi-automatic Methodology for Facial Landmark Annotation</td><td><a href="http://ibug.doc.ic.ac.uk/media/uploads/documents/sagonas_cvpr_2013_amfg_w.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=300 faces in-the-wild challenge: the first facial landmark localization challenge&sort=relevance">[s2]</a></td><td>University of Twente</td><td>013909077ad843eb6df7a3e8e290cfd5575999d2</td></tr><tr><td>frav3d</td><td>FRAV3D</td><td>MULTIMODAL 2D, 2.5D & 3D FACE VERIFICATION</td><td>2D and 3D face recognition: A survey</td><td><a href="http://pdfs.semanticscholar.org/2f5d/44dc3e1b5955942133ff872ebd31716ec604.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multimodal 2d, 2.5d & 3d face verification&sort=relevance">[s2]</a></td><td></td><td>2f5d44dc3e1b5955942133ff872ebd31716ec604</td></tr><tr><td>frgc</td><td>FRGC</td><td>Overview of the Face Recognition Grand Challenge</td><td>Overview of the face recognition grand challenge</td><td><a href="http://www3.nd.edu/~kwb/PhillipsEtAlCVPR_2005.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=overview of the face recognition grand challenge&sort=relevance">[s2]</a></td><td></td><td>18ae7c9a4bbc832b8b14bc4122070d7939f5e00e</td></tr><tr><td>gallagher</td><td>Gallagher</td><td>Clothing Cosegmentation for Recognizing People</td><td>Clothing Cosegmentation for Shopping Images With Cluttered Background</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7423747">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clothing cosegmentation for recognizing people&sort=relevance">[s2]</a></td><td></td><td>6dbe8e5121c534339d6e41f8683e85f87e6abf81</td></tr><tr><td>gavab_db</td><td>Gavab</td><td>GavabDB: a 3D face database</td><td>Expression invariant 3D face recognition with a Morphable Model</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4813376">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=gavabdb: a 3d face database&sort=relevance">[s2]</a></td><td></td><td>42505464808dfb446f521fc6ff2cfeffd4d68ff1</td></tr><tr><td>georgia_tech_face_database</td><td>Georgia Tech Face</td><td>Maximum likelihood training of the embedded HMM for face detection and recognition</td><td>MAXIMUM LIKELIHOOD TRAINING OF THE EMBEDDED HMM FOR FACE DETECTION AND RECOGNITION Ara V. Ne an and Monson H. Hayes III Center for Signal and Image Processing School of Electrical and Computer Engineering</td><td><a href="http://pdfs.semanticscholar.org/3dc3/f0b64ef80f573e3a5f96e456e52ee980b877.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=maximum likelihood training of the embedded hmm for face detection and recognition&sort=relevance">[s2]</a></td><td></td><td>3dc3f0b64ef80f573e3a5f96e456e52ee980b877</td></tr><tr><td>graz</td><td>Graz Pedestrian</td><td>Generic Object Recognition with Boosting</td><td>Object recognition using segmentation for feature detection</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=generic object recognition with boosting&sort=relevance">[s2]</a></td><td></td><td>12ad3b5bbbf407f8e54ea692c07633d1a867c566</td></tr><tr><td>graz</td><td>Graz Pedestrian</td><td>Weak Hypotheses and Boosting for Generic Object Detection and Recognition</td><td>Object recognition using segmentation for feature detection</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=weak hypotheses and boosting for generic object detection and recognition&sort=relevance">[s2]</a></td><td></td><td>12ad3b5bbbf407f8e54ea692c07633d1a867c566</td></tr><tr><td>hda_plus</td><td>HDA+</td><td>The HDA+ data set for research on fully automated re-identification systems</td><td>HDA dataset-DRAFT 1 A Multi-camera video data set for research on High-Definition surveillance</td><td><a href="http://pdfs.semanticscholar.org/bd88/bb2e4f351352d88ee7375af834360e223498.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the hda+ data set for research on fully automated re-identification systems&sort=relevance">[s2]</a></td><td></td><td>bd88bb2e4f351352d88ee7375af834360e223498</td></tr><tr><td>hda_plus</td><td>HDA+</td><td>A Multi-camera video data set for research on High-Definition surveillance</td><td>HDA dataset-DRAFT 1 A Multi-camera video data set for research on High-Definition surveillance</td><td><a href="http://pdfs.semanticscholar.org/bd88/bb2e4f351352d88ee7375af834360e223498.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a multi-camera video data set for research on high-definition surveillance&sort=relevance">[s2]</a></td><td></td><td>bd88bb2e4f351352d88ee7375af834360e223498</td></tr><tr><td>hi4d_adsip</td><td>Hi4D-ADSIP</td><td>Hi4D-ADSIP 3-D dynamic facial articulation database</td><td>High-resolution comprehensive 3-D dynamic database for facial articulation analysis</td><td><a href="http://www.researchgate.net/profile/Wei_Quan3/publication/221430048_High-resolution_comprehensive_3-D_dynamic_database_for_facial_articulation_analysis/links/0deec534309495805d000000.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=hi4d-adsip 3-d dynamic facial articulation database&sort=relevance">[s2]</a></td><td></td><td>24830e3979d4ed01b9fd0feebf4a8fd22e0c35fd</td></tr><tr><td>hrt_transgender</td><td>HRT Transgender</td><td>Is the Eye Region More Reliable Than the Face? A Preliminary Study of Face-based Recognition on a Transgender Dataset</td><td>Face recognition: A literature survey</td><td><a href="http://pdfs.semanticscholar.org/2831/2c3a47c1be3a67365700744d3d6665b86f22.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=is the eye region more reliable than the face? a preliminary study of face-based recognition on a transgender dataset&sort=relevance">[s2]</a></td><td></td><td>28312c3a47c1be3a67365700744d3d6665b86f22</td></tr><tr><td>hrt_transgender</td><td>HRT Transgender</td><td>Investigating the Periocular-Based Face Recognition Across Gender Transformation</td><td>Face recognition: A literature survey</td><td><a href="http://pdfs.semanticscholar.org/2831/2c3a47c1be3a67365700744d3d6665b86f22.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=investigating the periocular-based face recognition across gender transformation&sort=relevance">[s2]</a></td><td></td><td>28312c3a47c1be3a67365700744d3d6665b86f22</td></tr><tr><td>hrt_transgender</td><td>HRT Transgender</td><td>Face recognition across gender transformation using SVM Classifier</td><td>Face recognition: A literature survey</td><td><a href="http://pdfs.semanticscholar.org/2831/2c3a47c1be3a67365700744d3d6665b86f22.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=face recognition across gender transformation using svm classifier&sort=relevance">[s2]</a></td><td></td><td>28312c3a47c1be3a67365700744d3d6665b86f22</td></tr><tr><td>ifad</td><td>IFAD</td><td>Indian Face Age Database: A Database for Face Recognition with Age Variation</td><td>Indian Face Age Database : A Database for Face Recognition with Age Variation</td><td><a href="https://pdfs.semanticscholar.org/55c4/0cbcf49a0225e72d911d762c27bb1c2d14aa.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=indian face age database: a database for face recognition with age variation&sort=relevance">[s2]</a></td><td></td><td>55c40cbcf49a0225e72d911d762c27bb1c2d14aa</td></tr><tr><td>ifdb</td><td>IFDB</td><td>Iranian Face Database with age, pose and expression</td><td>Iranian Face Database and Evaluation with a New Detection Algorithm</td><td><a href="http://pdfs.semanticscholar.org/066d/71fcd997033dce4ca58df924397dfe0b5fd1.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=iranian face database with age, pose and expression&sort=relevance">[s2]</a></td><td></td><td>066d71fcd997033dce4ca58df924397dfe0b5fd1</td></tr><tr><td>ijb_c</td><td>IJB-C</td><td>IARPA Janus Benchmark C</td><td>IARPA Janus Benchmark - C: Face Dataset and Protocol</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411217">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=iarpa janus benchmark c&sort=relevance">[s2]</a></td><td></td><td>57178b36c21fd7f4529ac6748614bb3374714e91</td></tr><tr><td>ilids_mcts</td><td></td><td>Imagery Library for Intelligent Detection Systems:
+The i-LIDS User Guide</td><td>Imagery Library for Intelligent Detection Systems (i-LIDS); A Standard for Testing Video Based Detection Systems</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=imagery library for intelligent detection systems:
+the i-lids user guide&sort=relevance">[s2]</a></td><td></td><td>0297448f3ed948e136bb06ceff10eccb34e5bb77</td></tr><tr><td>ilids_vid_reid</td><td>iLIDS-VID</td><td>Person Re-Identi cation by Video Ranking</td><td>Person Re-identification by Exploiting Spatio-Temporal Cues and Multi-view Metric Learning</td><td><a href="https://doi.org/10.1109/LSP.2016.2574323">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person re-identi cation by video ranking&sort=relevance">[s2]</a></td><td></td><td>99eb4cea0d9bc9fe777a5c5172f8638a37a7f262</td></tr><tr><td>images_of_groups</td><td>Images of Groups</td><td>Understanding Groups of Images of People</td><td>Understanding images of groups of people</td><td><a href="http://amp.ece.cmu.edu/people/Andy/Andy_files/cvpr09.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=understanding groups of images of people&sort=relevance">[s2]</a></td><td></td><td>21d9d0deed16f0ad62a4865e9acf0686f4f15492</td></tr><tr><td>imm_face</td><td>IMM Face Dataset</td><td>The IMM Face Database - An Annotated Dataset of 240 Face Images</td><td>Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</td><td><a href="http://lrs.icg.tugraz.at/pubs/koestinger_befit_11.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the imm face database - an annotated dataset of 240 face images&sort=relevance">[s2]</a></td><td></td><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td></tr><tr><td>jiku_mobile</td><td>Jiku Mobile Video Dataset</td><td>The Jiku Mobile Video Dataset</td><td>A Synchronization Ground Truth for the Jiku Mobile Video Dataset</td><td><a href="http://pdfs.semanticscholar.org/ad62/c6e17bc39b4dec20d32f6ac667ae42d2c118.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the jiku mobile video dataset&sort=relevance">[s2]</a></td><td></td><td>ad62c6e17bc39b4dec20d32f6ac667ae42d2c118</td></tr><tr><td>kdef</td><td>KDEF</td><td>The Karolinska Directed Emotional Faces – KDEF</td><td>Gaze fixation and the neural circuitry of face processing in autism</td><td><a href="http://doi.org/10.1038/nn1421">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the karolinska directed emotional faces – kdef&sort=relevance">[s2]</a></td><td></td><td>93884e46c49f7ae1c7c34046fbc28882f2bd6341</td></tr><tr><td>kin_face</td><td>UB KinFace</td><td>Genealogical Face Recognition based on UB KinFace Database</td><td>Understanding Kin Relationships in a Photo</td><td><a href="http://www1.ece.neu.edu/~yunfu/papers/Kinship-TMM.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=genealogical face recognition based on ub kinface database&sort=relevance">[s2]</a></td><td></td><td>08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7</td></tr><tr><td>kin_face</td><td>UB KinFace</td><td>Kinship Verification through Transfer Learning</td><td>Understanding Kin Relationships in a Photo</td><td><a href="http://www1.ece.neu.edu/~yunfu/papers/Kinship-TMM.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=kinship verification through transfer learning&sort=relevance">[s2]</a></td><td></td><td>08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7</td></tr><tr><td>kitti</td><td>KITTI</td><td>Vision meets Robotics: The KITTI Dataset</td><td>The Role of Machine Vision for Intelligent Vehicles</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=vision meets robotics: the kitti dataset&sort=relevance">[s2]</a></td><td></td><td>35ba4ebfd017a56b51e967105af9ae273c9b0178</td></tr><tr><td>lfw</td><td>LFW</td><td>Labeled Faces in the Wild: A Survey</td><td>Labeled Faces in the Wild : A Survey</td><td><a href="http://pdfs.semanticscholar.org/7de6/e81d775e9cd7becbfd1bd685f4e2a5eebb22.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=labeled faces in the wild: a survey&sort=relevance">[s2]</a></td><td></td><td>7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22</td></tr><tr><td>lfw</td><td>LFW</td><td>Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments</td><td>Labeled Faces in the Wild : A Database for Studying Face Recognition in Unconstrained Environments</td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=labeled faces in the wild: a database for studying face recognition in unconstrained environments&sort=relevance">[s2]</a></td><td></td><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td></tr><tr><td>lfw</td><td>LFW</td><td>Labeled Faces in the Wild: Updates and New Reporting Procedures</td><td>Labeled Faces in the Wild : Updates and New Reporting Procedures</td><td><a href="http://pdfs.semanticscholar.org/2d34/82dcff69c7417c7b933f22de606a0e8e42d4.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=labeled faces in the wild: updates and new reporting procedures&sort=relevance">[s2]</a></td><td>University of Massachusetts</td><td>2d3482dcff69c7417c7b933f22de606a0e8e42d4</td></tr><tr><td>lfw_a</td><td>LFW-a</td><td>Effective Unconstrained Face Recognition by
+ Combining Multiple Descriptors and Learned
+ Background Statistics</td><td>Effective Unconstrained Face Recognition by Combining Multiple Descriptors and Learned Background Statistics</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/TPAMI.2010.230">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=effective unconstrained face recognition by
+ combining multiple descriptors and learned
+ background statistics&sort=relevance">[s2]</a></td><td></td><td>133f01aec1534604d184d56de866a4bd531dac87</td></tr><tr><td>m2vts</td><td>m2vts</td><td>The M2VTS Multimodal Face Database (Release 1.00)</td><td>The CAS-PEAL Large-Scale Chinese Face Database and Baseline Evaluations</td><td><a href="https://doi.org/10.1109/TSMCA.2007.909557">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the m2vts multimodal face database (release 1.00)&sort=relevance">[s2]</a></td><td></td><td>2485c98aa44131d1a2f7d1355b1e372f2bb148ad</td></tr><tr><td>m2vtsdb_extended</td><td>xm2vtsdb</td><td>XM2VTSDB: The Extended M2VTS Database</td><td>Labeled Faces in the Wild : A Database for Studying Face Recognition in Unconstrained Environments</td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=xm2vtsdb: the extended m2vts database&sort=relevance">[s2]</a></td><td></td><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td></tr><tr><td>mafl</td><td>MAFL</td><td>Facial Landmark Detection by Deep Multi-task Learning</td><td>Learning Deep Representation for Face Alignment with Auxiliary Attributes</td><td><a href="https://arxiv.org/pdf/1408.3967.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=facial landmark detection by deep multi-task learning&sort=relevance">[s2]</a></td><td></td><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td></tr><tr><td>malf</td><td>MALF</td><td>Fine-grained Evaluation on Face Detection in the Wild.</td><td>Fine-grained evaluation on face detection in the wild</td><td><a href="http://www.cs.toronto.edu/~byang/papers/malf_fg15.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=fine-grained evaluation on face detection in the wild.&sort=relevance">[s2]</a></td><td>Chinese Academy of Sciences</td><td>45e616093a92e5f1e61a7c6037d5f637aa8964af</td></tr><tr><td>mcgill</td><td>McGill Real World</td><td>Hierarchical Temporal Graphical Model for Head Pose Estimation and Subsequent Attribute Classification in Real-World Videos</td><td>Robust semi-automatic head pose labeling for real-world face video sequences</td><td><a href="https://doi.org/10.1007/s11042-012-1352-1">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=hierarchical temporal graphical model for head pose estimation and subsequent attribute classification in real-world videos&sort=relevance">[s2]</a></td><td>McGill University</td><td>c570d1247e337f91e555c3be0e8c8a5aba539d9f</td></tr><tr><td>mot</td><td>MOT</td><td>Evaluating Multiple Object Tracking Performance: The CLEAR MOT Metrics</td><td>Learning to associate: HybridBoosted multi-target tracker for crowded scene</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=evaluating multiple object tracking performance: the clear mot metrics&sort=relevance">[s2]</a></td><td></td><td>5981e6479c3fd4e31644db35d236bfb84ae46514</td></tr><tr><td>mot</td><td>MOT</td><td>Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</td><td>Learning to associate: HybridBoosted multi-target tracker for crowded scene</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=performance measures and a data set for multi-target, multi-camera tracking&sort=relevance">[s2]</a></td><td></td><td>5981e6479c3fd4e31644db35d236bfb84ae46514</td></tr><tr><td>mr2</td><td>MR2</td><td>The MR2: A multi-racial mega-resolution database of facial stimuli</td><td>The MR2: A multi-racial, mega-resolution database of facial stimuli.</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the mr2: a multi-racial mega-resolution database of facial stimuli&sort=relevance">[s2]</a></td><td></td><td>578d4ad74818086bb64f182f72e2c8bd31e3d426</td></tr><tr><td>mtfl</td><td>MTFL</td><td>Facial Landmark Detection by Deep Multi-task Learning</td><td>Learning Deep Representation for Face Alignment with Auxiliary Attributes</td><td><a href="https://arxiv.org/pdf/1408.3967.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=facial landmark detection by deep multi-task learning&sort=relevance">[s2]</a></td><td></td><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td></tr><tr><td>muct</td><td>MUCT</td><td>The MUCT Landmarked Face Database</td><td>Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</td><td><a href="http://lrs.icg.tugraz.at/pubs/koestinger_befit_11.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the muct landmarked face database&sort=relevance">[s2]</a></td><td></td><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td></tr><tr><td>multi_pie</td><td>MULTIPIE</td><td>Multi-PIE</td><td>Scheduling heterogeneous multi-cores through performance impact estimation (PIE)</td><td><a href="http://dl.acm.org/citation.cfm?id=2337184">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-pie&sort=relevance">[s2]</a></td><td></td><td>109df0e8e5969ddf01e073143e83599228a1163f</td></tr><tr><td>names_and_faces_news</td><td>News Dataset</td><td>Names and Faces</td><td>Names and faces in the news</td><td><a href="http://www.cs.utexas.edu/~grauman/courses/spring2007/395T/papers/berg_names_and_faces.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=names and faces&sort=relevance">[s2]</a></td><td></td><td>2fda164863a06a92d3a910b96eef927269aeb730</td></tr><tr><td>nova_emotions</td><td>Novaemötions Dataset</td><td>Crowdsourcing facial expressions for affective-interaction</td><td>Competitive affective gaming: winning with a smile</td><td><a href="http://doi.acm.org/10.1145/2502081.2502115">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=crowdsourcing facial expressions for affective-interaction&sort=relevance">[s2]</a></td><td></td><td>7f4040b482d16354d5938c1d1b926b544652bf5b</td></tr><tr><td>nova_emotions</td><td>Novaemötions Dataset</td><td>Competitive affective gamming: Winning with a smile</td><td>Competitive affective gaming: winning with a smile</td><td><a href="http://doi.acm.org/10.1145/2502081.2502115">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=competitive affective gamming: winning with a smile&sort=relevance">[s2]</a></td><td></td><td>7f4040b482d16354d5938c1d1b926b544652bf5b</td></tr><tr><td>pilot_parliament</td><td>PPB</td><td>Gender Shades: Intersectional Accuracy Disparities in Commercial Gender Classi cation</td><td>Summary of Research on Informant Accuracy in Network Data, 11 and on the Reverse Small World Problem</td><td><a href="http://pdfs.semanticscholar.org/fb82/681ac5d3487bd8e52dbb3d1fa220eeac855e.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=gender shades: intersectional accuracy disparities in commercial gender classi cation&sort=relevance">[s2]</a></td><td></td><td>fb82681ac5d3487bd8e52dbb3d1fa220eeac855e</td></tr><tr><td>put_face</td><td>Put Face</td><td>The PUT face database</td><td>Labeled Faces in the Wild : A Database for Studying Face Recognition in Unconstrained Environments</td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the put face database&sort=relevance">[s2]</a></td><td></td><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td></tr><tr><td>qmul_grid</td><td>GRID</td><td>Multi-Camera Activity Correlation Analysis</td><td>Time-Delayed Correlation Analysis for Multi-Camera Activity Understanding</td><td><a href="https://doi.org/10.1007/s11263-010-0347-5">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=multi-camera activity correlation analysis&sort=relevance">[s2]</a></td><td></td><td>2edb87494278ad11641b6cf7a3f8996de12b8e14</td></tr><tr><td>scface</td><td>SCface</td><td>SCface – surveillance cameras face database</td><td>Large Variability Surveillance Camera Face Database</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=scface – surveillance cameras face database&sort=relevance">[s2]</a></td><td></td><td>f3b84a03985de3890b400b68e2a92c0a00afd9d0</td></tr><tr><td>sdu_vid</td><td>SDU-VID</td><td>A Spatio-Temporal Appearance Representation for Video-Based Pedestrian Re-Identification</td><td>Person Re-identification by Video Ranking</td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=a spatio-temporal appearance representation for video-based pedestrian re-identification&sort=relevance">[s2]</a></td><td></td><td>98bb029afe2a1239c3fdab517323066f0957b81b</td></tr><tr><td>sdu_vid</td><td>SDU-VID</td><td>Local descriptors encoded by Fisher vectors for person re-identification</td><td>Person Re-identification by Video Ranking</td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=local descriptors encoded by fisher vectors for person re-identification&sort=relevance">[s2]</a></td><td></td><td>98bb029afe2a1239c3fdab517323066f0957b81b</td></tr><tr><td>sdu_vid</td><td>SDU-VID</td><td>Person reidentification by video ranking</td><td>Person Re-identification by Video Ranking</td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=person reidentification by video ranking&sort=relevance">[s2]</a></td><td></td><td>98bb029afe2a1239c3fdab517323066f0957b81b</td></tr><tr><td>sheffield</td><td>Sheffield Face</td><td>Face Recognition: From Theory to Applications</td><td>Face Description with Local Binary Patterns: Application to Face Recognition</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/TPAMI.2006.244">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=face recognition: from theory to applications&sort=relevance">[s2]</a></td><td></td><td>3607afdb204de9a5a9300ae98aa4635d9effcda2</td></tr><tr><td>social_relation</td><td>Social Relation</td><td>From Facial Expression Recognition to Interpersonal Relation Prediction</td><td>Learning Social Relation Traits from Face Images</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.414">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=from facial expression recognition to interpersonal relation prediction&sort=relevance">[s2]</a></td><td>Chinese University of Hong Kong</td><td>2a171f8d14b6b8735001a11c217af9587d095848</td></tr><tr><td>stanford_drone</td><td>Stanford Drone</td><td>Learning Social Etiquette: Human Trajectory Prediction In Crowded Scenes</td><td>Learning to Predict Human Behavior in Crowded Scenes</td><td><a href="http://pdfs.semanticscholar.org/c9bd/a86e23cab9e4f15ea0c4cbb6cc02b9dfb709.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning social etiquette: human trajectory prediction in crowded scenes&sort=relevance">[s2]</a></td><td></td><td>c9bda86e23cab9e4f15ea0c4cbb6cc02b9dfb709</td></tr><tr><td>stickmen_buffy</td><td>Buffy Stickmen</td><td>Learning to Parse Images of Articulated Objects</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td><a href="http://pdfs.semanticscholar.org/4b1d/23d17476fcf78f4cbadf69fb130b1aa627c0.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning to parse images of articulated objects&sort=relevance">[s2]</a></td><td></td><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td></tr><tr><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td>Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</td><td>Learning to parse images of articulated bodies</td><td><a href="http://pdfs.semanticscholar.org/9cd7/4c43dbf9be0b9caae4606ee53e6d45850471.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=clustered pose and nonlinear appearance models for human pose estimation&sort=relevance">[s2]</a></td><td></td><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td></tr><tr><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td>Learning to Parse Images of Articulated Objects</td><td>Learning to parse images of articulated bodies</td><td><a href="http://pdfs.semanticscholar.org/9cd7/4c43dbf9be0b9caae4606ee53e6d45850471.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=learning to parse images of articulated objects&sort=relevance">[s2]</a></td><td></td><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td></tr><tr><td>sun_attributes</td><td>SUN</td><td>The SUN Attribute Database: Beyond Categories for Deeper Scene Understanding</td><td>SUN attribute database: Discovering, annotating, and recognizing scene attributes</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2012.6247998">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the sun attribute database: beyond categories for deeper scene understanding&sort=relevance">[s2]</a></td><td></td><td>833fa04463d90aab4a9fe2870d480f0b40df446e</td></tr><tr><td>sun_attributes</td><td>SUN</td><td>SUN Attribute Database:
+Discovering, Annotating, and Recognizing Scene Attributes</td><td>SUN attribute database: Discovering, annotating, and recognizing scene attributes</td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2012.6247998">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=sun attribute database:
+discovering, annotating, and recognizing scene attributes&sort=relevance">[s2]</a></td><td></td><td>833fa04463d90aab4a9fe2870d480f0b40df446e</td></tr><tr><td>texas_3dfrd</td><td>Texas 3DFRD</td><td>Texas 3D Face Recognition Database</td><td>Anthropometric 3D Face Recognition</td><td><a href="http://live.ece.utexas.edu/publications/2010/sg_ijcv_june10.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=texas 3d face recognition database&sort=relevance">[s2]</a></td><td></td><td>2ce2560cf59db59ce313bbeb004e8ce55c5ce928</td></tr><tr><td>tiny_images</td><td>Tiny Images</td><td>80 million tiny images: a large dataset for non-parametric object and scene recognition</td><td>80 Million Tiny Images: A Large Data Set for Nonparametric Object and Scene Recognition</td><td><a href="http://pdfs.semanticscholar.org/31b5/8ced31f22eab10bd3ee2d9174e7c14c27c01.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=80 million tiny images: a large dataset for non-parametric object and scene recognition&sort=relevance">[s2]</a></td><td></td><td>31b58ced31f22eab10bd3ee2d9174e7c14c27c01</td></tr><tr><td>umd_faces</td><td>UMD</td><td>The Do's and Don'ts for CNN-based Face Verification</td><td>The Do’s and Don’ts for CNN-Based Face Verification</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the do's and don'ts for cnn-based face verification&sort=relevance">[s2]</a></td><td></td><td>71b7fc715e2f1bb24c0030af8d7e7b6e7cd128a6</td></tr><tr><td>viper</td><td>VIPeR</td><td>Evaluating Appearance Models for Recognition, Reacquisition, and Tracking</td><td>Evaluating Appearance Models for Recognition , Reacquisition , and Tracking</td><td><a href="http://pdfs.semanticscholar.org/6273/b3491e94ea4dd1ce42b791d77bdc96ee73a8.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=evaluating appearance models for recognition, reacquisition, and tracking&sort=relevance">[s2]</a></td><td></td><td>6273b3491e94ea4dd1ce42b791d77bdc96ee73a8</td></tr><tr><td>voc</td><td>VOC</td><td>The PASCAL Visual Object Classes (VOC) Challenge</td><td>The Pascal Visual Object Classes Challenge: A Retrospective</td><td><a href="http://homepages.inf.ed.ac.uk/ckiw/postscript/ijcv_voc14.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=the pascal visual object classes (voc) challenge&sort=relevance">[s2]</a></td><td></td><td>abe9f3b91fd26fa1b50cd685c0d20debfb372f73</td></tr><tr><td>who_goes_there</td><td>WGT</td><td>Who Goes There? Approaches to Mapping Facial Appearance Diversity</td><td>Who goes there?: approaches to mapping facial appearance diversity</td><td><a href="http://doi.acm.org/10.1145/2996913.2996997">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=who goes there? approaches to mapping facial appearance diversity&sort=relevance">[s2]</a></td><td></td><td>9b9bf5e623cb8af7407d2d2d857bc3f1b531c182</td></tr><tr><td>wildtrack</td><td>WildTrack</td><td>WILDTRACK: A Multi-camera HD Dataset for Dense Unscripted Pedestrian Detection</td><td>WILDTRACK : A Multi-camera HD Dataset for Dense Unscripted Pedestrian Detection</td><td><a href="https://pdfs.semanticscholar.org/fe1c/ec4e4995b8615855572374ae3efc94949105.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=wildtrack: a multi-camera hd dataset for dense unscripted pedestrian detection&sort=relevance">[s2]</a></td><td></td><td>77c81c13a110a341c140995bedb98101b9e84f7f</td></tr><tr><td>yale_faces</td><td>YaleFaces</td><td>Acquiring Linear Subspaces for Face Recognition under Variable Lighting</td><td>From Few to Many: Illumination Cone Models for Face Recognition under Variable Lighting and Pose</td><td><a href="http://pdfs.semanticscholar.org/97bb/c2b439a79d4dc0dc7199d71ed96ad5e3fd0e.pdf">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=acquiring linear subspaces for face recognition under variable lighting&sort=relevance">[s2]</a></td><td></td><td>18c72175ddbb7d5956d180b65a96005c100f6014</td></tr><tr><td>yfcc_100m</td><td>YFCC100M</td><td>YFCC100M: The New Data in Multimedia Research</td><td>The New Data and New Challenges in Multimedia Research</td><td><span class="gray">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=yfcc100m: the new data in multimedia research&sort=relevance">[s2]</a></td><td></td><td>a6e695ddd07aad719001c0fc1129328452385949</td></tr><tr><td>youtube_makeup</td><td>YMU</td><td>Can Facial Cosmetics Affect the Matching Accuracy of Face Recognition Systems?</td><td>Automatic facial makeup detection with application in face recognition</td><td><a href="http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6612994">[pdf]</a></td><td><a href="https://www.semanticscholar.org/search?q=can facial cosmetics affect the matching accuracy of face recognition systems?&sort=relevance">[s2]</a></td><td>West Virginia University</td><td>fcc6fe6007c322641796cb8792718641856a22a7</td></tr></table></body></html> \ No newline at end of file
diff --git a/scraper/s2-fetch-pdf.py b/scraper/s2-fetch-pdf.py
index 5477cbd5..30bc5a40 100644
--- a/scraper/s2-fetch-pdf.py
+++ b/scraper/s2-fetch-pdf.py
@@ -31,10 +31,8 @@ def fetch_pdf(paper_id, url):
size = s2.fetch_file(url, pdf_fn)
if size is None:
print("{} empty?".format(paper_id))
- time.sleep(random.randint(5, 10))
return None
print("{} {} kb".format(paper_id, int(size / 1024)))
- time.sleep(random.randint(5, 10))
return
# return paper
diff --git a/scraper/s2-final-report.py b/scraper/s2-final-report.py
new file mode 100644
index 00000000..f81d1835
--- /dev/null
+++ b/scraper/s2-final-report.py
@@ -0,0 +1,202 @@
+import os
+import re
+import glob
+import simplejson as json
+import math
+import operator
+import click
+import subprocess
+from util import *
+
+DIR_PUBLIC_CITATIONS = "../site/datasets/final"
+
+@click.command()
+def s2_final_report():
+ addresses = AddressBook()
+ megapixels = load_megapixels_lookup()
+ ft_lookup = load_ft_lookup()
+ for key, row in megapixels.items():
+ print(key)
+ ft_share = ft_lookup[key]
+ if ft_share:
+ paper_data = process_paper(row, addresses)
+
+def process_paper(row, addresses):
+ aggregate_citations = {}
+ papers = []
+ for paper_id in row['paper_ids']:
+ res = process_single_paper(row, addresses, aggregate_citations)
+ if res:
+ papers.append(res)
+ if not len(papers):
+ return
+ with open('{}/{}.json'.format(DIR_PUBLIC_CITATIONS, row['key']), 'w') as f:
+ json.dump({
+ 'id': papers[0]['paper_id'],
+ 'paper': papers[0],
+ 'address': papers[0]['address'],
+ 'additional_papers': papers[1:],
+ 'citations': [aggregate_citations[key] for key in aggregate_citations.keys()],
+ }, f)
+
+def process_single_paper(row, addresses, aggregate_citations):
+ res = {
+ 'paper_id': '',
+ 'key': '',
+ 'title': '',
+ # 'journal': '',
+ 'year': '',
+ 'pdf': '',
+ 'address': '',
+ # 'citation_count': 0,
+ # 'citations_geocoded': 0,
+ # 'citations_unknown': 0,
+ # 'citations_empty': 0,
+ # 'citations_pdf': 0,
+ # 'citations_doi': 0,
+ }
+
+ geocoded_citations = []
+ unknown_citations = []
+ empty_citations = []
+ pdf_count = 0
+ doi_count = 0
+ address_count = 0
+ paper_id = row['paper_id']
+
+ fn = file_path('papers', paper_id, 'paper.json')
+
+ with open(fn, 'r') as f:
+ data = json.load(f)
+ print('>> {} {}'.format(data['paperId'], row['key']))
+ paper = load_paper(data['paperId'])
+ if paper is None:
+ print("Paper missing! {}".format(data['paperId']))
+ return
+
+ res['key'] = row['key']
+ res['name'] = row['name']
+ res['paper_id'] = paper.paper_id
+ res['title'] = paper.title
+ # res['journal'] = paper.journal
+ res['year'] = paper.year
+ res['pdf'] = paper.pdf_link
+ # res['authors'] = ', '.join(paper.authors)
+ # res['citations'] = []
+
+ paper_institutions = load_institutions(paper.paper_id)
+ paper_address = None
+ for inst in sorted(paper_institutions, key=operator.itemgetter(1)):
+ # print(inst[1])
+ institution = inst[1]
+ if paper_address is None:
+ paper_address = addresses.findObject(institution)
+
+ if paper_address:
+ # print(paper_address)
+ res['address'] = paper_address
+
+ for cite in data['citations']:
+ citationId = cite['paperId']
+ if citationId in aggregate_citations:
+ continue
+ seen_here = {}
+ citation = load_paper(citationId)
+ has_pdf = os.path.exists(file_path('pdf', citationId, 'paper.txt'))
+ has_doi = os.path.exists(file_path('doi', citationId, 'paper.doi'))
+ if has_pdf:
+ pdf_count += 1
+ if has_doi:
+ doi_count += 1
+ if citation is None or citation.data is None:
+ print("Citation missing! {}".format(cite['paperId']))
+ continue
+ institutions = load_institutions(citationId)
+ geocoded_addresses = []
+ geocoded_institutions = []
+ institution = ''
+ address = None
+ for inst in sorted(institutions, key=operator.itemgetter(1)):
+ address_count += 1
+ institution = inst[1]
+ next_address = addresses.findObject(institution)
+ if next_address and next_address['address'] not in seen_here:
+ seen_here[next_address['address']] = True
+ address = next_address
+ geocoded_addresses.append(next_address)
+ if not address:
+ if has_pdf:
+ headings, found_abstract = read_headings(file_path('pdf', citationId, 'paper.txt'), citation)
+ heading_string = '\n'.join(headings[0:20])
+ found_addresses = []
+ if len(headings):
+ for heading in headings:
+ l = heading.lower().strip()
+ if l:
+ next_address = addresses.findObject(l)
+ if next_address and next_address['address'] not in seen_here:
+ seen_here[next_address['address']] = True
+ address = next_address
+ geocoded_addresses.append(next_address)
+ if address:
+ if citationId not in aggregate_citations:
+ aggregate_citations[citationId] = {
+ 'id': citationId,
+ 'title': citation.title,
+ 'addresses': geocoded_addresses,
+ 'year': citation.year,
+ 'pdf': citation.pdf_link,
+ }
+
+ # res['citation_count'] = len(data['citations'])
+ # res['citations_geocoded'] = len(geocoded_citations)
+ # res['citations_unknown'] = len(unknown_citations)
+ # res['citations_empty'] = len(empty_citations)
+ # res['citations_pdf'] = pdf_count
+ # res['citations_doi'] = doi_count
+
+ return res
+
+def load_ft_lookup():
+ keys, rows = fetch_google_sheet('datasets')
+ lookup = {}
+ for row in rows:
+ rec = {}
+ for index, key in enumerate(keys):
+ rec[key] = row[index]
+ if rec['ft_share'] == '1' or rec['ft_share'] == 1:
+ lookup[rec['key']] = True
+ else:
+ lookup[rec['key']] = False
+ return lookup
+
+def load_megapixels_lookup():
+ keys, rows = read_csv('datasets/citation_lookup.csv')
+ lookup = {}
+ for row in rows:
+ rec = {}
+ for index, key in enumerate(keys):
+ rec[key] = row[index]
+ paper_key = rec['key']
+ if paper_key not in lookup:
+ rec['paper_ids'] = []
+ lookup[paper_key] = rec
+ lookup[paper_key]['paper_ids'].append(rec['paper_id'])
+ # recs.append(rec)
+ return lookup
+
+def load_institutions(paperId):
+ if os.path.exists(file_path('pdf', paperId, 'institutions.json')):
+ return read_json(file_path('pdf', paperId, 'institutions.json'))['institutions']
+ elif os.path.exists(file_path('doi', paperId, 'institutions.json')):
+ return read_json(file_path('doi', paperId, 'institutions.json'))['institutions']
+ else:
+ return []
+
+def data_path(key, paper_id):
+ return 'datasets/s2/{}/{}/{}'.format(key, paper_id[0:2], paper_id)
+def file_path(key, paper_id, fn):
+ return os.path.join(data_path(key, paper_id), fn)
+
+if __name__ == '__main__':
+ s2_final_report()
diff --git a/scraper/s2-papers.py b/scraper/s2-papers.py
index bf77a734..86e2d614 100644
--- a/scraper/s2-papers.py
+++ b/scraper/s2-papers.py
@@ -5,42 +5,74 @@ import subprocess
import time
import random
import re
-import simplejson as json
+import operator
import click
from s2 import SemanticScholarAPI
from util import *
-'''
-s2 search API format:
-results
-matchedAuthors
-matchedPresentations
-query
-querySuggestions
-results
-stats
-totalPages
-totalResults
-'''
-
s2 = SemanticScholarAPI()
@click.command()
-@click.option('--index', '-n', default=0, help='Index of CSV (query,)')
-@click.option('--depth', '-d', default=1, help='Depth to recurse (not implemented).')
-def fetch_papers(index, depth):
- keys, lines = read_citation_list(index)
+def fetch_papers():
+ addresses = AddressBook()
+ lookup_keys, lines = read_csv('./datasets/citation_lookup.csv')
+ report_keys = [
+ "key", "name", "our title", 'found title', '', '', 'address', 's2 id'
+ ]
+ all_rows = []
+ no_location_rows = []
+ nonmatching_rows = []
for line in lines:
- label = line[0]
- title = re.sub(r'[^-0-9a-zA-Z ]+', '', line[1])
- entry_fn = './datasets/s2/entries/{}.json'.format(title)
- if not os.path.exists(entry_fn):
- print('not found: {}'.format(entry_fn))
- continue
- result = read_json(entry_fn)
- paper_id = result['id']
- paper = fetch_paper(paper_id)
- # get all of the paper's citations
+ key, name, title, paper_id = line
+ paper = fetch_paper(s2, paper_id)
+ db_paper = load_paper(paper_id)
+ pdf_link = db_paper.pdf_link if db_paper else ""
+
+ paper_institutions = load_institutions(paper_id)
+ paper_address = None
+ for inst in sorted(paper_institutions, key=operator.itemgetter(1)):
+ # print(inst[1])
+ institution = inst[1]
+ if paper_address is None:
+ paper_address = addresses.findObject(institution)
+
+ if paper_address is None:
+ paper_address = ""
+ else:
+ paper_address = paper_address['address']
+
+ s2_link = "https://www.semanticscholar.org/search?q={}&sort=relevance".format(title.strip().lower())
+ row = [
+ key,
+ name,
+ title,
+ paper['title'],
+ LinkLine(pdf_link, '[pdf]'),
+ LinkLine(s2_link, '[s2]'),
+ paper_address,
+ paper['paperId'],
+ ]
+ all_rows.append(row)
+ if title.strip().lower() != paper['title'].strip().lower():
+ nonmatching_rows.append(row)
+ if paper_address == '':
+ no_location_rows.append(row)
+ write_report('./reports/paper_title_report.html', 'Paper Title Sanity Check', report_keys, all_rows)
+ write_report('./reports/paper_title_report_nonmatching.html', 'Paper Titles that do not match', report_keys, nonmatching_rows)
+ write_report('./reports/paper_title_report_no_location.html', 'Papers with no location', report_keys, no_location_rows)
+
+def load_institutions(paperId):
+ if os.path.exists(file_path('pdf', paperId, 'institutions.json')):
+ return read_json(file_path('pdf', paperId, 'institutions.json'))['institutions']
+ elif os.path.exists(file_path('doi', paperId, 'institutions.json')):
+ return read_json(file_path('doi', paperId, 'institutions.json'))['institutions']
+ else:
+ return []
+
+def data_path(key, paper_id):
+ return 'datasets/s2/{}/{}/{}'.format(key, paper_id[0:2], paper_id)
+def file_path(key, paper_id, fn):
+ return os.path.join(data_path(key, paper_id), fn)
if __name__ == '__main__':
fetch_papers()
diff --git a/scraper/s2-pdf-first-pages.py b/scraper/s2-pdf-first-pages.py
index 0a6b20bd..6f1d81e3 100644
--- a/scraper/s2-pdf-first-pages.py
+++ b/scraper/s2-pdf-first-pages.py
@@ -30,7 +30,7 @@ def report_first_pages():
write_report('reports/first_pages.html', title='First pages', keys=None, rows=rows)
write_report('reports/institutions.html', title='Institutions', keys=None, rows=sorted(institutions, key=lambda x: x[1]))
write_report('reports/institutions_missing.html', title='Institutions', keys=None, rows=no_institutions)
- write_csv('reports/institution_names.csv', keys=None, rows=[(name,) for name in deduped_institutions])
+ write_csv('reports/institution_names_extracted.csv', keys=None, rows=[(name,) for name in deduped_institutions])
print("{} deduped institutions".format(len(deduped_institutions)))
def dedupe(a):
diff --git a/scraper/s2-search.py b/scraper/s2-search.py
index d9b1beca..77800e32 100644
--- a/scraper/s2-search.py
+++ b/scraper/s2-search.py
@@ -59,23 +59,5 @@ def fetch_entries(index, refresh):
citation_lookup.append([key, name, title, paper_id])
write_csv("datasets/citation_lookup.csv", keys=['key', 'name', 'title', 'paper_id'], rows=citation_lookup)
-def fetch_paper(s2, paper_id):
- os.makedirs('./datasets/s2/papers/{}/{}'.format(paper_id[0:2], paper_id), exist_ok=True)
- paper_fn = './datasets/s2/papers/{}/{}/paper.json'.format(paper_id[0:2], paper_id)
- if os.path.exists(paper_fn):
- return read_json(paper_fn)
- print(paper_id)
- paper = s2.paper(paper_id)
- if paper is None:
- print("Got none paper??")
- # time.sleep(random.randint(1, 2))
- paper = s2.paper(paper_id)
- if paper is None:
- print("Paper not found")
- return None
- write_json(paper_fn, paper)
- # time.sleep(random.randint(1, 2))
- return paper
-
if __name__ == '__main__':
fetch_entries()
diff --git a/scraper/util.py b/scraper/util.py
index 47e5a4aa..7b55afae 100644
--- a/scraper/util.py
+++ b/scraper/util.py
@@ -156,11 +156,16 @@ class DbPaper(object):
return [ (author['ids'][0] if len(author['ids']) else '', author['name']) for author in self.data['authors'] ]
@property
def pdf_link(self):
+ link = None
if self.data['s2PdfUrl']:
- return self.data['s2PdfUrl']
- if len(self.data['pdfUrls']):
- return self.data['pdfUrls'][0]
- return None
+ link = self.data['s2PdfUrl']
+ elif len(self.data['pdfUrls']):
+ link = self.data['pdfUrls'][0]
+ if link is None:
+ return None
+ if type(link) == dict and 'url' in link:
+ return link['url']
+ return link
def record(self):
return [ self.paper_id, self.title, self.journal, self.year ]
@@ -192,7 +197,10 @@ class RawPaper(object):
@property
def pdf_link(self):
if 'primaryPaperLink' in self.data:
- return self.data['primaryPaperLink']
+ link = self.data['primaryPaperLink']
+ if type(link) == dict and 'url' in link:
+ return link['url']
+ return link
return None
def record(self):
return [ self.paper_id, self.title, self.journal, self.year ]
@@ -283,6 +291,35 @@ class AddressBook (object):
return self.data[index]
return None
+ def findObject(self, address):
+ row = self.find(address)
+ if row is not None:
+ return {
+ 'address': row[0],
+ 'lat': row[3],
+ 'lng': row[4],
+ 'type': row[5],
+ }
+ return None
+
+def fetch_paper(s2, paper_id):
+ os.makedirs('./datasets/s2/papers/{}/{}'.format(paper_id[0:2], paper_id), exist_ok=True)
+ paper_fn = './datasets/s2/papers/{}/{}/paper.json'.format(paper_id[0:2], paper_id)
+ if os.path.exists(paper_fn):
+ return read_json(paper_fn)
+ print(paper_id)
+ paper = s2.paper(paper_id)
+ if paper is None:
+ print("Got none paper??")
+ # time.sleep(random.randint(1, 2))
+ paper = s2.paper(paper_id)
+ if paper is None:
+ print("Paper not found")
+ return None
+ write_json(paper_fn, paper)
+ # time.sleep(random.randint(1, 2))
+ return paper
+
def fetch_spreadsheet():
scope = ['https://spreadsheets.google.com/feeds','https://www.googleapis.com/auth/drive']
credentials = ServiceAccountCredentials.from_json_keyfile_name('./.creds/Megapixels-ef28f91112a9.json', scope)
diff --git a/site/assets/css/css.css b/site/assets/css/css.css
index e29f2595..7544fd9d 100644
--- a/site/assets/css/css.css
+++ b/site/assets/css/css.css
@@ -76,6 +76,7 @@ header .links {
flex-direction: row;
font-family: 'Roboto Mono', monospace;
}
+header .links span,
header .links a {
display: block;
color: #777;
@@ -84,6 +85,7 @@ header .links a {
margin-right: 32px;
transition: color 0.1s cubic-bezier(0,0,1,1), border-color 0.1s cubic-bezier(0,0,1,1);
border-bottom: 1px solid rgba(255,255,255,0);
+ padding: 3px;
}
header .links a.active {
color: #bbb;
diff --git a/site/assets/css/splash.css b/site/assets/css/splash.css
index 238139a4..9dd5afba 100644
--- a/site/assets/css/splash.css
+++ b/site/assets/css/splash.css
@@ -70,4 +70,65 @@ header .links a.activeLink {
}
.about a {
color: #fff;
-} \ No newline at end of file
+}
+
+@media screen and (max-width: 700px) {
+ header, footer {
+ transition: background 0.4s;
+ }
+ .modalOpen header,
+ .modalOpen footer {
+ background: rgba(25, 25, 25, 1.0);
+ }
+ header .slogan {
+ padding-left: 10px;
+ }
+ footer {
+ display: block;
+ background: rgba(25, 25, 25, 0.8);
+ padding: 10px 20px;
+ }
+ footer div {
+ background: transparent;
+ text-align: center;
+ width: 100%;
+ display: block;
+ padding: 0;
+ }
+ header .links {
+ display: block;
+ text-align: right;
+ padding: 0;
+ padding-right: 5px;
+ }
+ header .links span,
+ header .links a {
+ display: inline-block;
+ text-align: right;
+ margin: 0;
+ font-size: 12px;
+ background: rgba(25, 25, 25, 0.5);
+ text-shadow: 0 0 0 rgba(0,0,0,0);
+ border-bottom: 0;
+ }
+ .about {
+ display: block;
+ }
+ .about .inner {
+ max-width: 100%;
+ padding: 20px;
+ padding-top: 80px;
+ padding-bottom: 50px;
+ }
+}
+@media screen and (max-height: 500px) {
+ .about {
+ display: block;
+ }
+ .about .inner {
+ max-width: 100%;
+ padding: 20px;
+ padding-top: 70px;
+ padding-bottom: 50px;
+ }
+}
diff --git a/site/assets/demo/splash/index.html b/site/assets/demo/splash/index.html
index e285c861..32517d97 100644
--- a/site/assets/demo/splash/index.html
+++ b/site/assets/demo/splash/index.html
@@ -18,7 +18,7 @@
<div class='site_name'>MegaPixels</div>
</a>
<div class='links'>
- <a href="#" class='aboutLink'>LAUNCHING MAY 2019</a>
+ <span>LAUNCHING MAY 2019</span>
<a href="#" class='aboutLink activeLink'>ABOUT</a>
</div>
</header>
diff --git a/site/datasets/final/10k_US_adult_faces.json b/site/datasets/final/10k_US_adult_faces.json
new file mode 100644
index 00000000..8bcd8190
--- /dev/null
+++ b/site/datasets/final/10k_US_adult_faces.json
@@ -0,0 +1 @@
+{"id": "8b2dd5c61b23ead5ae5508bb8ce808b5ea266730", "paper": {"paper_id": "8b2dd5c61b23ead5ae5508bb8ce808b5ea266730", "key": "10k_US_adult_faces", "title": "The intrinsic memorability of face photographs.", "year": "2013", "pdf": "https://pdfs.semanticscholar.org/8b2d/d5c61b23ead5ae5508bb8ce808b5ea266730.pdf", "address": "", "name": "10K US Adult Faces"}, "address": "", "additional_papers": [], "citations": [{"id": "df969647a0ee9ea25b23589f44be5240b5097236", "title": "How robust is familiar face recognition? A repeat detection study of more than 1000 faces", "addresses": [{"address": "Victoria University of Wellington", "lat": "-41.29052775", "lng": "174.76846919", "type": "edu"}, {"address": "University of California, San Diego", "lat": "32.87935255", "lng": "-117.23110049", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/df96/9647a0ee9ea25b23589f44be5240b5097236.pdf"}, {"id": "a5c8fc1ca4f06a344b53dc81ebc6d87f54896722", "title": "Learning to see people like people", "addresses": [{"address": "Purdue University", "lat": "40.43197220", "lng": "-86.92389368", "type": "edu"}, {"address": "University of California, San Diego", "lat": "32.87935255", "lng": "-117.23110049", "type": "edu"}], "year": 2017, "pdf": "http://pdfs.semanticscholar.org/a5c8/fc1ca4f06a344b53dc81ebc6d87f54896722.pdf"}, {"id": "b1429e4d3dd3412e92a37d2f9e0721ea719a9b9e", "title": "Person re-identification using multiple first-person-views on wearable devices", "addresses": [{"address": "Nanyang Technological University", "lat": "1.34841040", "lng": "103.68297965", "type": "edu"}], "year": 2016, "pdf": "http://doi.ieeecomputersociety.org/10.1109/WACV.2016.7477676"}, {"id": "2dc7d439e99f15a499cd2dcbdfbc1c0c7648964d", "title": "Computational Understanding of Image Memorability by Zoya Bylinskii", "addresses": [{"address": "MIT", "lat": "42.35839610", "lng": "-71.09567788", "type": "edu"}], "year": "2015", "pdf": "https://pdfs.semanticscholar.org/2dc7/d439e99f15a499cd2dcbdfbc1c0c7648964d.pdf"}, {"id": "19c0069f075b5b2d8ac48ad28a7409179bd08b86", "title": "Modifying the Memorability of Face Photographs", "addresses": [{"address": "MIT", "lat": "42.35839610", "lng": "-71.09567788", "type": "edu"}], "year": 2013, "pdf": "http://people.csail.mit.edu/torralba/publications/iccv2013_khosla.pdf"}, {"id": "f91388f87e10674226f4def4cda411adc01da496", "title": "Failure to Affect Decision Criteria During Recognition Memory With Continuous Theta Burst Stimulation", "addresses": [{"address": "University of Wollongong", "lat": "-34.40505545", "lng": "150.87834655", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/f913/88f87e10674226f4def4cda411adc01da496.pdf"}, {"id": "850d84e4c73a8f0762c8c798b2b7fd6f2787263a", "title": "The Discovery of Perceptual Structure from Visual Co - occurrences in Space and Time", "addresses": [{"address": "Yale University", "lat": "41.25713055", "lng": "-72.98966960", "type": "edu"}, {"address": "MIT", "lat": "42.35839610", "lng": "-71.09567788", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/850d/84e4c73a8f0762c8c798b2b7fd6f2787263a.pdf"}, {"id": "05a26e093a101a9e6d9cac4e39a29afd6f1ca77e", "title": "Computational modeling of social face perception in humans : Leveraging the active appearance model", "addresses": [{"address": "University of California, San Diego", "lat": "32.87935255", "lng": "-117.23110049", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/05a2/6e093a101a9e6d9cac4e39a29afd6f1ca77e.pdf"}, {"id": "dbda7c3a09ada41ad45f6dfa1aa803e2a87ddbcd", "title": "From what we perceive to what we remember: Characterizing representational dynamics of visual memorability", "addresses": [{"address": "MIT", "lat": "42.35839610", "lng": "-71.09567788", "type": "edu"}, {"address": "McGovern Institute for Brain Research", "lat": "42.36262950", "lng": "-71.09144810", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/dbda/7c3a09ada41ad45f6dfa1aa803e2a87ddbcd.pdf"}, {"id": "7566032327a19f9ba770022677de34d7e7aeaac8", "title": "What Makes Natural Scene Memorable?", "addresses": [{"address": "Beihang University", "lat": "39.98083330", "lng": "116.34101249", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1808.08754.pdf"}, {"id": "e607b91f69ea2bff3194b07c5d22b4625bbe306e", "title": "Learning to See People Like People: Predicting Social Impressions of Faces", "addresses": [{"address": "Purdue University", "lat": "40.43197220", "lng": "-86.92389368", "type": "edu"}], "year": 2017, "pdf": "http://pdfs.semanticscholar.org/e607/b91f69ea2bff3194b07c5d22b4625bbe306e.pdf"}, {"id": "f942739b7f9bc3c0b84f760bb2fd4895e1363ec0", "title": "Students Wearing Police Uniforms Exhibit Biased Attention toward Individuals Wearing Hoodies", "addresses": [{"address": "McMaster University", "lat": "43.26336945", "lng": "-79.91809684", "type": "edu"}, {"address": "Curtin University", "lat": "-32.00686365", "lng": "115.89691775", "type": "edu"}, {"address": "University of Hong Kong", "lat": "22.20814690", "lng": "114.25964115", "type": "edu"}], "year": 2017, "pdf": "http://pdfs.semanticscholar.org/f942/739b7f9bc3c0b84f760bb2fd4895e1363ec0.pdf"}, {"id": "2c7946d5d2f1572c20e9843eb2033b8eb9771bf3", "title": "THEORETICAL REVIEW Mechanisms for Widespread Hippocampal Involvement in Cognition", "addresses": [{"address": "Columbia University", "lat": "40.84198360", "lng": "-73.94368971", "type": "edu"}, {"address": "Princeton University", "lat": "40.34829285", "lng": "-74.66308325", "type": "edu"}], "year": 2013, "pdf": "http://pdfs.semanticscholar.org/c54d/aaa8c240b2a787eedf6a7aaa1d3fdec8dfd0.pdf"}, {"id": "0c7844b63a05ec086fba231ad9eb3114ffb4139e", "title": "Automated Facial Trait Judgment and Election Outcome Prediction: Social Dimensions of Face", "addresses": [{"address": "University of California, Los Angeles", "lat": "34.06877880", "lng": "-118.44500940", "type": "edu"}, {"address": "Facebook", "lat": "37.39367170", "lng": "-122.08072620", "type": "company"}], "year": 2015, "pdf": "http://www.stat.ucla.edu/~sczhu/papers/Conf_2015/face_trait_ICCV15.pdf"}, {"id": "9f5ce56dd0900368ff6f0bc4a4055e6f4ceb0bc7", "title": "Beauty-in-averageness and its contextual modulations : A Bayesian statistical account", "addresses": [{"address": "University of California, San Diego", "lat": "32.87935255", "lng": "-117.23110049", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/9f5c/e56dd0900368ff6f0bc4a4055e6f4ceb0bc7.pdf"}, {"id": "2d21e6f8bd9e9f647f3517f51347ad89b4381a7f", "title": "Identifying Individual Facial Expressions by Deconstructing a Neural Network", "addresses": [{"address": "Korea University", "lat": "37.59014110", "lng": "127.03623180", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/2d21/e6f8bd9e9f647f3517f51347ad89b4381a7f.pdf"}, {"id": "f106ff6b2dd497650e7e2096b24a23d620a2306b", "title": "Toward A Deep Understanding of What Makes a Scientific Visualization Memorable", "addresses": [{"address": "Ohio State University", "lat": "40.00471095", "lng": "-83.02859368", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1808.00607.pdf"}]} \ No newline at end of file
diff --git a/site/datasets/final/feret.json b/site/datasets/final/feret.json
new file mode 100644
index 00000000..bd7fbb75
--- /dev/null
+++ b/site/datasets/final/feret.json
@@ -0,0 +1 @@
+{"id": "0f0fcf041559703998abf310e56f8a2f90ee6f21", "paper": {"paper_id": "0f0fcf041559703998abf310e56f8a2f90ee6f21", "key": "feret", "title": "The FERET Evaluation Methodology for Face-Recognition Algorithms", "year": 1997, "pdf": "http://pdfs.semanticscholar.org/0f0f/cf041559703998abf310e56f8a2f90ee6f21.pdf", "address": "", "name": "FERET"}, "address": "", "additional_papers": [{"paper_id": "0f0fcf041559703998abf310e56f8a2f90ee6f21", "key": "feret", "title": "The FERET Evaluation Methodology for Face-Recognition Algorithms", "year": 1997, "pdf": "http://pdfs.semanticscholar.org/0f0f/cf041559703998abf310e56f8a2f90ee6f21.pdf", "address": "", "name": "FERET"}, {"paper_id": "0f0fcf041559703998abf310e56f8a2f90ee6f21", "key": "feret", "title": "The FERET Evaluation Methodology for Face-Recognition Algorithms", "year": 1997, "pdf": "http://pdfs.semanticscholar.org/0f0f/cf041559703998abf310e56f8a2f90ee6f21.pdf", "address": "", "name": "FERET"}, {"paper_id": "0f0fcf041559703998abf310e56f8a2f90ee6f21", "key": "feret", "title": "The FERET Evaluation Methodology for Face-Recognition Algorithms", "year": 1997, "pdf": "http://pdfs.semanticscholar.org/0f0f/cf041559703998abf310e56f8a2f90ee6f21.pdf", "address": "", "name": "FERET"}], "citations": [{"id": "2fd007088a75916d0bf50c493d94f950bf55c5e6", "title": "Projective Representation Learning for Discriminative Face Recognition", "addresses": [{"address": "Harbin Institute of Technology", "lat": "45.74139210", "lng": "126.62552755", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1007/978-981-10-7302-1_1"}, {"id": "3b64efa817fd609d525c7244a0e00f98feacc8b4", "title": "A Comprehensive Survey on Pose-Invariant Face Recognition", "addresses": [{"address": "University of Technology Sydney", "lat": "-33.88096510", "lng": "151.20107299", "type": "edu"}], "year": 2016, "pdf": "http://doi.acm.org/10.1145/2845089"}, {"id": "91e507d2d8375bf474f6ffa87788aa3e742333ce", "title": "Robust Face Recognition Using Probabilistic Facial Trait Code", "addresses": [{"address": "National Taiwan University", "lat": "25.01682835", "lng": "121.53846924", "type": "edu"}], "year": 2010, "pdf": "http://pdfs.semanticscholar.org/91e5/07d2d8375bf474f6ffa87788aa3e742333ce.pdf"}, {"id": "021469757d626a39639e260492eea7d3e8563820", "title": "3D Face Processing", "addresses": [{"address": "University of Illinois, Urbana-Champaign", "lat": "40.11116745", "lng": "-88.22587665", "type": "edu"}], "year": 2004, "pdf": "https://doi.org/10.1007/b116723"}, {"id": "60a006bdfe5b8bf3243404fae8a5f4a9d58fa892", "title": "A reference-based framework for pose invariant face recognition", "addresses": [{"address": "University of North Carolina at Chapel Hill", "lat": "35.91139710", "lng": "-79.05045290", "type": "edu"}], "year": 2015, "pdf": "http://alumni.cs.ucr.edu/~mkafai/papers/Paper_bwild.pdf"}, {"id": "841bf196ee0086c805bd5d1d0bddfadc87e424ec", "title": "Locally Kernel-based Nonlinear Regression for Face Recognition", "addresses": [{"address": "Amirkabir University of Technology", "lat": "35.70451400", "lng": "51.40972058", "type": "edu"}, {"address": "Islamic Azad University", "lat": "34.84529990", "lng": "48.55962120", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/841b/f196ee0086c805bd5d1d0bddfadc87e424ec.pdf"}, {"id": "4cf0c6d3da8e20d6f184a4eaa6865d61680982b8", "title": "Face recognition based on 3D mesh model", "addresses": [{"address": "Hong Kong University of Science and Technology", "lat": "22.33863040", "lng": "114.26203370", "type": "edu"}], "year": 2004, "pdf": "http://pdfs.semanticscholar.org/4cf0/c6d3da8e20d6f184a4eaa6865d61680982b8.pdf"}, {"id": "e9e39e31419d9a22790b327bc1d6107fa832bdab", "title": "Face recognition using adaptively weighted patch PZM array from a single exemplar image per person", "addresses": [{"address": "Griffith University", "lat": "-27.55339750", "lng": "153.05336234", "type": "edu"}], "year": 2008, "pdf": "http://pdfs.semanticscholar.org/e9e3/9e31419d9a22790b327bc1d6107fa832bdab.pdf"}, {"id": "272e487dfa32f241b622ac625f42eae783b7d9aa", "title": "Face recognition via semi-supervised discriminant local analysis", "addresses": [{"address": "Multimedia University", "lat": "2.92749755", "lng": "101.64185301", "type": "edu"}], "year": 2015, "pdf": "https://doi.org/10.1109/ICSIPA.2015.7412207"}, {"id": "279acfde0286bb76dd7717abebc3c8acf12d2c5f", "title": "Local Gradient Order Pattern for Face Representation and Recognition", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": 2014, "pdf": "http://www.cbsr.ia.ac.cn/users/zlei/papers/ICPR2014/Lei-ICPR-14.pdf"}, {"id": "dc7203d64a985b86f2f44bf064220801ef279382", "title": "Multi-scale local Binary Pattern Histogram for Face Recognition", "addresses": [{"address": "University of Surrey", "lat": "51.24303255", "lng": "-0.59001382", "type": "edu"}], "year": 2007, "pdf": "http://pdfs.semanticscholar.org/dc72/03d64a985b86f2f44bf064220801ef279382.pdf"}, {"id": "13a994d489c15d440c1238fc1ac37dad06dd928c", "title": "Learning Discriminant Face Descriptor for Face Recognition", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/13a9/94d489c15d440c1238fc1ac37dad06dd928c.pdf"}, {"id": "b53485dbdd2dc5e4f3c7cff26bd8707964bb0503", "title": "Pose-Invariant Face Alignment via CNN-Based Dense 3D Model Fitting", "addresses": [{"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}], "year": "2017", "pdf": "http://doi.org/10.1007/s11263-017-1012-z"}, {"id": "759a3b3821d9f0e08e0b0a62c8b693230afc3f8d", "title": "Attribute and simile classifiers for face verification", "addresses": [{"address": "Columbia University", "lat": "40.84198360", "lng": "-73.94368971", "type": "edu"}], "year": 2009, "pdf": "http://homes.cs.washington.edu/~neeraj/projects/faceverification/base/papers/nk_iccv2009_attrs.pdf"}, {"id": "0eac652139f7ab44ff1051584b59f2dc1757f53b", "title": "Efficient Branching Cascaded Regression for Face Alignment under Significant Head Rotation", "addresses": [{"address": "University of Wisconsin Madison", "lat": "43.07982815", "lng": "-89.43066425", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/0eac/652139f7ab44ff1051584b59f2dc1757f53b.pdf"}, {"id": "4a2ba5d7b41ae1d8334c5b8bb1e76ce29e4367ee", "title": "Relational divergence based classification on Riemannian manifolds", "addresses": [{"address": "CSIRO, Australia", "lat": "-37.90627370", "lng": "145.13194490", "type": "edu"}, {"address": "University of Queensland", "lat": "-27.49741805", "lng": "153.01316956", "type": "edu"}], "year": 2013, "pdf": "http://conradsanderson.id.au/pdfs/alavi_divergence_riemannian_wacv_2013.pdf"}, {"id": "7c7ab59a82b766929defd7146fd039b89d67e984", "title": "Improving multiview face detection with multi-task deep convolutional neural networks", "addresses": [{"address": "Microsoft", "lat": "47.64233180", "lng": "-122.13693020", "type": "company"}], "year": 2014, "pdf": "https://www.microsoft.com/en-us/research/wp-content/uploads/2016/11/wacv2014_ChaZhang.pdf"}, {"id": "d3b550e587379c481392fb07f2cbbe11728cf7a6", "title": "Small Sample Size Face Recognition using Random Quad-Tree based Ensemble Algorithm", "addresses": [{"address": "Kyoto University", "lat": "35.02749960", "lng": "135.78154513", "type": "edu"}], "year": 2013, "pdf": "http://pdfs.semanticscholar.org/d3b5/50e587379c481392fb07f2cbbe11728cf7a6.pdf"}, {"id": "cfdc4d0f8e1b4b9ced35317d12b4229f2e3311ab", "title": "Quaero at TRECVID 2010: Semantic Indexing", "addresses": [{"address": "Karlsruhe Institute of Technology", "lat": "49.10184375", "lng": "8.43312560", "type": "edu"}], "year": "2010", "pdf": "https://pdfs.semanticscholar.org/cfdc/4d0f8e1b4b9ced35317d12b4229f2e3311ab.pdf"}, {"id": "8a3eaaef13bdaee26142fd2784de07e1d24926ca", "title": "Design and evaluation of photometric image quality measures for effective face recognition", "addresses": [{"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/ea22/0319021e4eaee97c24a7acf1df54ffa3700b.pdf"}, {"id": "d8288322f32ee4501cef5a9b667e5bb79ebd7018", "title": "Facing scalability: Naming faces in an online social network", "addresses": [{"address": "University of Twente", "lat": "52.23801390", "lng": "6.85667610", "type": "edu"}], "year": 2012, "pdf": "https://doi.org/10.1016/j.patcog.2011.12.018"}, {"id": "939123cf21dc9189a03671484c734091b240183e", "title": "Within- and cross- database evaluations for face gender classification via befit protocols", "addresses": [{"address": "IDIAP Research Institute", "lat": "46.10923700", "lng": "7.08453549", "type": "edu"}], "year": 2014, "pdf": "http://publications.idiap.ch/downloads/papers/2015/Erdogmus_MMSP_2015.pdf"}, {"id": "ae1de0359f4ed53918824271c888b7b36b8a5d41", "title": "Low-cost Automatic Inpainting for Artifact Suppression in Facial Images", "addresses": [{"address": "University of Groningen", "lat": "53.21967825", "lng": "6.56251482", "type": "edu"}], "year": 2013, "pdf": "http://pdfs.semanticscholar.org/ae1d/e0359f4ed53918824271c888b7b36b8a5d41.pdf"}, {"id": "2020e8c0be8fa00d773fd99b6da55029a6a83e3d", "title": "An Evaluation of the Invariance Properties of a Biologically-Inspired System for Unconstrained Face Recognition", "addresses": [{"address": "MIT", "lat": "42.35839610", "lng": "-71.09567788", "type": "edu"}], "year": 2010, "pdf": "http://pdfs.semanticscholar.org/9ca3/806dd01f8aded02e88c7022716b7fef46423.pdf"}, {"id": "9fc993aeb0a007ccfaca369a9a8c0ccf7697261d", "title": "Context-Aware Local Binary Feature Learning for Face Recognition", "addresses": [{"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}], "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7936534"}, {"id": "64a44e1d5cbefbb403811360a88f4d93e569ffbd", "title": "Perspective distortion modeling, learning and compensation", "addresses": [{"address": "University of California, Los Angeles", "lat": "34.06877880", "lng": "-118.44500940", "type": "edu"}], "year": 2015, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPRW.2015.7301314"}, {"id": "fb7bf10cbc583db5d5eee945aa633fcb968e01ad", "title": "A novel weighted fuzzy LDA for face recognition using the genetic algorithm", "addresses": [{"address": "Curtin University", "lat": "-32.00686365", "lng": "115.89691775", "type": "edu"}], "year": 2012, "pdf": "https://doi.org/10.1007/s00521-012-0962-x"}, {"id": "e69ac130e3c7267cce5e1e3d9508ff76eb0e0eef", "title": "Addressing the illumination challenge in two-dimensional face recognition: a survey", "addresses": [{"address": "University of Houston", "lat": "29.72079020", "lng": "-95.34406271", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/e69a/c130e3c7267cce5e1e3d9508ff76eb0e0eef.pdf"}, {"id": "7b47dd9302b3085cd6705614b88d7bdbc8ae5c13", "title": "Face Recognition Using Gabor-Based Feature Extraction and Feature Space Transformation Fusion Method for Single Image per Person Problem", "addresses": [{"address": "Jiangnan University", "lat": "31.48542550", "lng": "120.27395810", "type": "edu"}, {"address": "University of Pennsylvania", "lat": "39.94923440", "lng": "-75.19198985", "type": "edu"}], "year": "2017", "pdf": "http://doi.org/10.1007/s11063-017-9693-4"}, {"id": "9cda3e56cec21bd8f91f7acfcefc04ac10973966", "title": "Periocular biometrics: databases, algorithms and directions", "addresses": [{"address": "Halmstad University", "lat": "56.66340325", "lng": "12.87929727", "type": "edu"}], "year": 2016, "pdf": "https://doi.org/10.1109/IWBF.2016.7449688"}, {"id": "35d42f4e7a1d898bc8e2d052c38e1106f3e80188", "title": "Human and algorithm performance on the PaSC face Recognition Challenge", "addresses": [{"address": "National Institute of Standards and Technology", "lat": "39.12549380", "lng": "-77.22293475", "type": "edu"}, {"address": "University of Texas at Dallas", "lat": "32.98207990", "lng": "-96.75662780", "type": "edu"}], "year": 2015, "pdf": "https://doi.org/10.1109/BTAS.2015.7358765"}, {"id": "82ccd62f70e669ec770daf11d9611cab0a13047e", "title": "Sparse Variation Pattern for Texture Classification", "addresses": [{"address": "Azad University", "lat": "36.31734320", "lng": "50.03672860", "type": "edu"}, {"address": "Tafresh University", "lat": "34.68092465", "lng": "50.05341352", "type": "edu"}, {"address": "University of Western Australia", "lat": "-31.95040445", "lng": "115.79790037", "type": "edu"}], "year": 2013, "pdf": "http://www.csse.uwa.edu.au/~ajmal/papers/Farshid_DICTA2013.pdf"}, {"id": "016a8ed8f6ba49bc669dbd44de4ff31a79963078", "title": "Face relighting for face recognition under generic illumination", "addresses": [{"address": "Harbin Institute of Technology", "lat": "45.74139210", "lng": "126.62552755", "type": "edu"}], "year": 2004, "pdf": "https://doi.org/10.1109/ICASSP.2004.1327215"}, {"id": "2c17d36bab56083293456fe14ceff5497cc97d75", "title": "Unconstrained Face Alignment via Cascaded Compositional Learning", "addresses": [{"address": "Shenzhen Institutes of Advanced Technology", "lat": "22.59805605", "lng": "113.98533784", "type": "edu"}, {"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}], "year": 2016, "pdf": "http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Zhu_Unconstrained_Face_Alignment_CVPR_2016_paper.pdf"}, {"id": "6bcee7dba5ed67b3f9926d2ae49f9a54dee64643", "title": "Assessment of Time Dependency in Face Recognition: An Initial Study", "addresses": [{"address": "University of Notre Dame", "lat": "41.70456775", "lng": "-86.23822026", "type": "edu"}], "year": 2003, "pdf": "http://pdfs.semanticscholar.org/6bce/e7dba5ed67b3f9926d2ae49f9a54dee64643.pdf"}, {"id": "d9e66b877b277d73f8876f537206395e71f58269", "title": "Learning Stacked Image Descriptor for Face Recognition", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": "2016", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7225130"}, {"id": "acc550d31b50c8d95794dc35dd1e271f979a0854", "title": "Optimized Kernel-based Projection Space of Riemannian Manifolds", "addresses": [{"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}, {"address": "Rutgers University", "lat": "40.47913175", "lng": "-74.43168868", "type": "edu"}], "year": "2016", "pdf": "https://arxiv.org/pdf/1602.03570.pdf"}, {"id": "d12bea587989fc78b47584470fd8f689b6ab81d2", "title": "Robust Face Representation Using Hybrid Spatial Feature Interdependence Matrix", "addresses": [{"address": "Intel Laboratory China, Beijing, China", "lat": "39.90419990", "lng": "116.40739630", "type": "company"}], "year": 2013, "pdf": "https://doi.org/10.1109/TIP.2013.2246523"}, {"id": "297d3df0cf84d24f7efea44f87c090c7d9be4bed", "title": "Appearance-Based 3-D Face Recognition from Video", "addresses": [{"address": "University of Maryland College Park", "lat": "38.99203005", "lng": "-76.94610290", "type": "edu"}, {"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}, {"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": 2002, "pdf": "http://pdfs.semanticscholar.org/297d/3df0cf84d24f7efea44f87c090c7d9be4bed.pdf"}, {"id": "c207fd762728f3da4cddcfcf8bf19669809ab284", "title": "Face Alignment Using Boosting and Evolutionary Search", "addresses": [{"address": "Southeast University", "lat": "32.05752790", "lng": "118.78682252", "type": "edu"}, {"address": "University of Twente", "lat": "52.23801390", "lng": "6.85667610", "type": "edu"}], "year": 2009, "pdf": "http://pdfs.semanticscholar.org/c207/fd762728f3da4cddcfcf8bf19669809ab284.pdf"}, {"id": "916498961a51f56a592c3551b0acc25978571fa7", "title": "Optimal landmark detection using shape models and branch and bound", "addresses": [{"address": "University of Basel", "lat": "47.56126510", "lng": "7.57529610", "type": "edu"}], "year": 2011, "pdf": "http://doi.ieeecomputersociety.org/10.1109/ICCV.2011.6126275"}, {"id": "928ccc8c4ae415202d187a229009dd48e57871ba", "title": "Winner-Take-All Multiple Category Boosting for Multi-view Face Detection", "addresses": [{"address": "Microsoft", "lat": "47.64233180", "lng": "-122.13693020", "type": "company"}], "year": 2009, "pdf": "http://pdfs.semanticscholar.org/d457/279c1ef4920c1f359783a5eb671eff3ab625.pdf"}, {"id": "407806f5fe3c5ecc2dc15b75d3d2b0359b4ee7e0", "title": "Enhanced independent spectral histogram representations in face recognition", "addresses": [{"address": "Multimedia University", "lat": "2.92749755", "lng": "101.64185301", "type": "edu"}, {"address": "Yonsei University", "lat": "37.56004060", "lng": "126.93692480", "type": "edu"}], "year": "2017", "pdf": "http://doi.org/10.1007/s11042-017-5028-8"}, {"id": "3ac3a714042d3ebc159546c26321a1f8f4f5f80c", "title": "Clustering lightened deep representation for large scale face identification", "addresses": [{"address": "Beijing University of Posts and Telecommunications", "lat": "39.96014880", "lng": "116.35193921", "type": "edu"}], "year": "2017", "pdf": "http://dl.acm.org/citation.cfm?id=3025149"}, {"id": "afe9cfba90d4b1dbd7db1cf60faf91f24d12b286", "title": "Principal Directions of Synthetic Exact Filters for Robust Real-Time Eye Localization", "addresses": [{"address": "University of Ljubljana", "lat": "46.05015580", "lng": "14.46907327", "type": "edu"}], "year": 2011, "pdf": "http://pdfs.semanticscholar.org/afe9/cfba90d4b1dbd7db1cf60faf91f24d12b286.pdf"}, {"id": "e506cdb250eba5e70c5147eb477fbd069714765b", "title": "Heterogeneous Face Recognition", "addresses": [{"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}], "year": "2012", "pdf": "https://pdfs.semanticscholar.org/e506/cdb250eba5e70c5147eb477fbd069714765b.pdf"}, {"id": "bd5c222323d6b46ea71f329cafe11d38533f6f3a", "title": "Repetition Suppression and Memory for Faces is Reduced in Adults with Autism Spectrum Conditions", "addresses": [{"address": "Cardiff University", "lat": "51.48799610", "lng": "-3.17969747", "type": "edu"}], "year": "2017", "pdf": "https://pdfs.semanticscholar.org/2f67/08086c82c58b739a87900fccbd0b9d9911ac.pdf"}, {"id": "07c90e85ac0f74b977babe245dea0f0abcf177e3", "title": "An Image Preprocessing Algorithm for Illumination Invariant Face Recognition", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}], "year": 2003, "pdf": "http://pdfs.semanticscholar.org/07c9/0e85ac0f74b977babe245dea0f0abcf177e3.pdf"}, {"id": "982fcead58be419e4f34df6e806204674a4bc579", "title": "Performance improvement of face recognition algorithms using occluded-region detection", "addresses": [{"address": "Azbil Corporation, Kawana, Japan", "lat": "35.33414870", "lng": "139.49433560", "type": "company"}, {"address": "Tohoku University", "lat": "38.25309450", "lng": "140.87365930", "type": "edu"}], "year": "2013", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6613012"}, {"id": "64fd48fae4d859583c4a031b51ce76ecb5de614c", "title": "Illuminated face normalization technique by using wavelet fusion and local binary patterns", "addresses": [{"address": "Yonsei University", "lat": "37.56004060", "lng": "126.93692480", "type": "edu"}, {"address": "Multimedia University", "lat": "2.92749755", "lng": "101.64185301", "type": "edu"}], "year": 2008, "pdf": "https://doi.org/10.1109/ICARCV.2008.4795556"}, {"id": "44d93039eec244083ac7c46577b9446b3a071f3e", "title": "Empirical comparisons of several preprocessing methods for illumination insensitive face recognition", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": "2005", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1415571"}, {"id": "65b1760d9b1541241c6c0222cc4ee9df078b593a", "title": "Enhanced Pictorial Structures for Precise Eye Localization Under Uncontrolled Conditions", "addresses": [{"address": "Nanjing University", "lat": "32.05659570", "lng": "118.77408833", "type": "edu"}], "year": 2009, "pdf": "http://pdfs.semanticscholar.org/65b1/760d9b1541241c6c0222cc4ee9df078b593a.pdf"}, {"id": "acaa781f353c769ae5f6101aab140f51b2d33cd2", "title": "Recent advances in correlation filter theory and applications", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}, {"address": "Robotics Institute", "lat": "13.65450525", "lng": "100.49423171", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/acaa/781f353c769ae5f6101aab140f51b2d33cd2.pdf"}, {"id": "100428708e4884300e4c1ac1f84cbb16e7644ccf", "title": "Regularized Shearlet Network for face recognition using single sample per person", "addresses": [{"address": "University of Sfax, Tunisia", "lat": "34.73610660", "lng": "10.74272750", "type": "edu"}, {"address": "University of Houston", "lat": "29.72079020", "lng": "-95.34406271", "type": "edu"}], "year": 2014, "pdf": "https://doi.org/10.1109/ICASSP.2014.6853649"}, {"id": "c1cf5dda56c72b65e86f3a678f76644f22212748", "title": "Face Hallucination via Semi-kernel Partial Least Squares", "addresses": [{"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/c1cf/5dda56c72b65e86f3a678f76644f22212748.pdf"}, {"id": "085ceda1c65caf11762b3452f87660703f914782", "title": "Large-Pose Face Alignment via CNN-Based Dense 3D Model Fitting", "addresses": [{"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}], "year": 2016, "pdf": "http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Jourabloo_Large-Pose_Face_Alignment_CVPR_2016_paper.pdf"}, {"id": "337dd4aaca2c5f9b5d2de8e0e2401b5a8feb9958", "title": "Data-specific Adaptive Threshold for Face Recognition and Authentication", "addresses": [{"address": "Institute of Information Science", "lat": "25.04107280", "lng": "121.61475620", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1810.11160.pdf"}, {"id": "2afdda6fb85732d830cea242c1ff84497cd5f3cb", "title": "Face image retrieval by using Haar features", "addresses": [{"address": "National Taiwan University", "lat": "25.01682835", "lng": "121.53846924", "type": "edu"}, {"address": "Tamkang University", "lat": "25.17500615", "lng": "121.45076751", "type": "edu"}], "year": 2008, "pdf": "http://www.iis.sinica.edu.tw/papers/song/11489-F.pdf"}, {"id": "49570b41bd9574bd9c600e24b269d945c645b7bd", "title": "A Framework for Performance Evaluation of Face Recognition Algorithms", "addresses": [{"address": "Arizona State University", "lat": "33.30715065", "lng": "-111.67653157", "type": "edu"}], "year": 2002, "pdf": "http://pdfs.semanticscholar.org/4957/0b41bd9574bd9c600e24b269d945c645b7bd.pdf"}, {"id": "642a386c451e94d9c44134e03052219a7512b9de", "title": "Taking the bite out of automated naming of characters in TV video", "addresses": [{"address": "University of Oxford", "lat": "51.75345380", "lng": "-1.25400997", "type": "edu"}], "year": "2009", "pdf": "http://doi.org/10.1016/j.imavis.2008.04.018"}, {"id": "bc9003ad368cb79d8a8ac2ad025718da5ea36bc4", "title": "Facial expression recognition with a three-dimensional face model", "addresses": [{"address": "Technical University Munich", "lat": "48.14955455", "lng": "11.56775314", "type": "edu"}], "year": "2011", "pdf": "https://pdfs.semanticscholar.org/bc90/03ad368cb79d8a8ac2ad025718da5ea36bc4.pdf"}, {"id": "fd9feb21b3d1fab470ff82e3f03efce6a0e67a1f", "title": "Deep Verification Learning", "addresses": [{"address": "University of Twente", "lat": "52.23801390", "lng": "6.85667610", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/fd9f/eb21b3d1fab470ff82e3f03efce6a0e67a1f.pdf"}, {"id": "0294f992f8dfd8748703f953925f9aee14e1b2a2", "title": "Blur-Robust Face Recognition via Transformation Learning", "addresses": [{"address": "Beijing University of Posts and Telecommunications", "lat": "39.96014880", "lng": "116.35193921", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/0294/f992f8dfd8748703f953925f9aee14e1b2a2.pdf"}, {"id": "9264b390aa00521f9bd01095ba0ba4b42bf84d7e", "title": "Displacement Template with Divide-&-Conquer Algorithm for Significantly Improving Descriptor Based Face Recognition Approaches", "addresses": [{"address": "Aberystwyth University", "lat": "52.41073580", "lng": "-4.05295501", "type": "edu"}, {"address": "University of Northern British Columbia", "lat": "53.89256620", "lng": "-122.81471592", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/9264/b390aa00521f9bd01095ba0ba4b42bf84d7e.pdf"}, {"id": "4813d9332a1f3ef2bf5846e81005895322310bed", "title": "3D Face Recognition", "addresses": [{"address": "Imperial College London", "lat": "51.49887085", "lng": "-0.17560797", "type": "edu"}], "year": 2007, "pdf": "http://pdfs.semanticscholar.org/4813/d9332a1f3ef2bf5846e81005895322310bed.pdf"}, {"id": "c79cf7f61441195404472102114bcf079a72138a", "title": "Pose-Invariant 2 D Face Recognition by Matching Using Graphical Models", "addresses": [{"address": "University of Surrey", "lat": "51.24303255", "lng": "-0.59001382", "type": "edu"}], "year": "2010", "pdf": "https://pdfs.semanticscholar.org/9704/8d901389535b122f82a6a949bd8f596790f2.pdf"}, {"id": "25c3068e7964d3b894916a82b1fa93c9d6792886", "title": "Face Recognition with Histograms of Oriented Gradients", "addresses": [{"address": "Robotics Institute", "lat": "13.65450525", "lng": "100.49423171", "type": "edu"}], "year": "2010", "pdf": "https://pdfs.semanticscholar.org/25c3/068e7964d3b894916a82b1fa93c9d6792886.pdf"}, {"id": "fc798314994bf94d1cde8d615ba4d5e61b6268b6", "title": "Face Recognition : face in video , age invariance , and facial marks", "addresses": [{"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}], "year": 2009, "pdf": "http://pdfs.semanticscholar.org/fc79/8314994bf94d1cde8d615ba4d5e61b6268b6.pdf"}, {"id": "c5c1575565e04cd0afc57d7ac7f7a154c573b38f", "title": "Face Refinement through a Gradient Descent Alignment Approach", "addresses": [{"address": "Robotics Institute", "lat": "13.65450525", "lng": "100.49423171", "type": "edu"}], "year": "2006", "pdf": "https://pdfs.semanticscholar.org/010a/f49ddb10c51b7913c2533910dd28ca39411c.pdf"}, {"id": "3cc3cf57326eceb5f20a02aefae17108e8c8ab57", "title": "Benchmark for Evaluating Biological Image Analysis Tools", "addresses": [{"address": "University of California, Santa Barbara", "lat": "34.41459370", "lng": "-119.84581950", "type": "edu"}], "year": 2007, "pdf": "http://pdfs.semanticscholar.org/3cc3/cf57326eceb5f20a02aefae17108e8c8ab57.pdf"}, {"id": "3dfb822e16328e0f98a47209d7ecd242e4211f82", "title": "Cross-Age LFW: A Database for Studying Cross-Age Face Recognition in Unconstrained Environments", "addresses": [{"address": "Beijing University of Posts and Telecommunications", "lat": "39.96014880", "lng": "116.35193921", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1708.08197.pdf"}, {"id": "7e6b11674e989d6a86afda241a51f7fa3790b93e", "title": "Optimized Kernel-based Projection Space of Riemannian Manifolds", "addresses": [{"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}, {"address": "Rutgers University", "lat": "40.47913175", "lng": "-74.43168868", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/acc5/50d31b50c8d95794dc35dd1e271f979a0854.pdf"}, {"id": "588bed36b3cc9e2f26c39b5d99d6687f36ae1177", "title": "Sparsely Encoded Local Descriptor for face recognition", "addresses": [{"address": "Hong Kong Polytechnic University", "lat": "22.30457200", "lng": "114.17976285", "type": "edu"}, {"address": "Chinese Academy of Science", "lat": "39.90419990", "lng": "116.40739630", "type": "edu"}], "year": "2011", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5771389"}, {"id": "ba21fd28003994480f713b0a1276160fea2e89b5", "title": "Identification of Individuals from Ears in Real World Conditions", "addresses": [{"address": "University of South Florida", "lat": "28.05999990", "lng": "-82.41383619", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/ba21/fd28003994480f713b0a1276160fea2e89b5.pdf"}, {"id": "4c842fbd4c032dd4d931eb6ff1eaa2a13450b7af", "title": "A review of recent advances in visual speech decoding", "addresses": [{"address": "University of Oulu", "lat": "65.05921570", "lng": "25.46632601", "type": "edu"}], "year": 2014, "pdf": "https://doi.org/10.1016/j.imavis.2014.06.004"}, {"id": "46e866f58419ff4259c65e8256c1d4f14927b2c6", "title": "On the Generalization Power of Face and Gait Gender Recognition Methods", "addresses": [{"address": "University of Warwick", "lat": "52.37931310", "lng": "-1.56042520", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/f03d/cfd956cf4404ec9f0c7fb451479d72a63e03.pdf"}, {"id": "dfe823d9851d222f299ad26283c7de4b4a3941e8", "title": "Kernel Fisher Discriminant Analysis in Full Eigenspace", "addresses": [{"address": "Nanyang Technological University", "lat": "1.34841040", "lng": "103.68297965", "type": "edu"}], "year": 2007, "pdf": "http://pdfs.semanticscholar.org/dfe8/23d9851d222f299ad26283c7de4b4a3941e8.pdf"}, {"id": "2cd7821fcf5fae53a185624f7eeda007434ae037", "title": "Exploring the geo-dependence of human face appearance", "addresses": [{"address": "University of Kentucky", "lat": "38.03337420", "lng": "-84.50177580", "type": "edu"}], "year": 2014, "pdf": "http://cs.uky.edu/~jacobs/papers/islam2014faces.pdf"}, {"id": "4d15254f6f31356963cc70319ce416d28d8924a3", "title": "Quo vadis Face Recognition?", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}, {"address": "Robotics Institute", "lat": "13.65450525", "lng": "100.49423171", "type": "edu"}, {"address": "University of Pittsburgh", "lat": "40.44415295", "lng": "-79.96243993", "type": "edu"}], "year": 2001, "pdf": "http://pdfs.semanticscholar.org/4d15/254f6f31356963cc70319ce416d28d8924a3.pdf"}, {"id": "1afef6b389bd727c566cd6fbcd99adefe4c0cf32", "title": "Towards resolution invariant face recognition in uncontrolled scenarios", "addresses": [{"address": "Sichuan University, Chengdu", "lat": "30.64276900", "lng": "104.06751175", "type": "edu"}], "year": 2016, "pdf": "https://doi.org/10.1109/ICB.2016.7550087"}, {"id": "38e7f3fe450b126367ec358be9b4cc04e82fa8c7", "title": "Maximal Likelihood Correspondence Estimation for Face Recognition Across Pose", "addresses": [{"address": "OMRON Corporation, Kyoto, Japan", "lat": "35.01163630", "lng": "135.76802940", "type": "company"}, {"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": 2014, "pdf": "https://doi.org/10.1109/TIP.2014.2351265"}, {"id": "90ea3a35e946af97372c3f32a170b179fe8352aa", "title": "Discriminant Learning for Face Recognition", "addresses": [{"address": "University of Toronto", "lat": "43.66333345", "lng": "-79.39769975", "type": "edu"}], "year": 2004, "pdf": "http://pdfs.semanticscholar.org/90ea/3a35e946af97372c3f32a170b179fe8352aa.pdf"}, {"id": "badcfb7d4e2ef0d3e332a19a3f93d59b4f85668e", "title": "The Application of Extended Geodesic Distance in Head Poses Estimation", "addresses": [{"address": "Harbin Institute of Technology", "lat": "45.74139210", "lng": "126.62552755", "type": "edu"}, {"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": 2006, "pdf": "http://pdfs.semanticscholar.org/badc/fb7d4e2ef0d3e332a19a3f93d59b4f85668e.pdf"}, {"id": "373813010983b274401b9b65157df57ce50f7011", "title": "Focus on quality, predicting FRVT 2006 performance", "addresses": [{"address": "Colorado State University", "lat": "40.57093580", "lng": "-105.08655256", "type": "edu"}, {"address": "National Institute of Standards and Technology", "lat": "39.12549380", "lng": "-77.22293475", "type": "edu"}], "year": 2008, "pdf": "http://www.cs.colostate.edu/~draper/papers/beveridge_fg08.pdf"}, {"id": "eb240521d008d582af37f0497f12c51f4bab16c8", "title": "Statistical Richness of Visual Phase Information: Update on Recognizing Persons by Iris Patterns", "addresses": [{"address": "University of Cambridge", "lat": "52.17638955", "lng": "0.14308882", "type": "edu"}], "year": 2001, "pdf": "https://doi.org/10.1023/A:1012365806338"}, {"id": "0aae88cf63090ea5b2c80cd014ef4837bcbaadd8", "title": "3D Face Structure Extraction from Images at Arbitrary Poses and under Arbitrary Illumination Conditions", "addresses": [{"address": "Drexel University", "lat": "39.95740000", "lng": "-75.19026706", "type": "edu"}], "year": 2006, "pdf": "http://pdfs.semanticscholar.org/0aae/88cf63090ea5b2c80cd014ef4837bcbaadd8.pdf"}, {"id": "35b9f09ed66955765dc7703e9cada605948c71d0", "title": "Similarity Measure Using Local Phase Features and Its Application to Biometric Recognition", "addresses": [{"address": "Tohoku University", "lat": "38.25309450", "lng": "140.87365930", "type": "edu"}], "year": 2013, "pdf": "http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2013/W02/papers/Aoyama_Similarity_Measure_Using_2013_CVPR_paper.pdf"}, {"id": "3f5741b49573122d278d1bff416ec34e1067a75a", "title": "A systemic approach to automatic metadata extraction from multimedia content", "addresses": [{"address": "National Technical University of Athens", "lat": "37.98782705", "lng": "23.73179733", "type": "edu"}, {"address": "University of Lincoln", "lat": "53.22853665", "lng": "-0.54873472", "type": "edu"}], "year": 2016, "pdf": "http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_SSCI_2016/pdf/SSCI16_paper_271.pdf"}, {"id": "e0dc6f1b740479098c1d397a7bc0962991b5e294", "title": "Face Detection: a Survey", "addresses": [{"address": "Beijing University of Technology", "lat": "39.87391435", "lng": "116.47722285", "type": "edu"}, {"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": 2004, "pdf": "http://pdfs.semanticscholar.org/e0dc/6f1b740479098c1d397a7bc0962991b5e294.pdf"}, {"id": "b1e218046a28d10ec0be3272809608dea378eddc", "title": "Overview of the Multiple Biometrics Grand Challenge", "addresses": [{"address": "National Institute of Standards and Technology", "lat": "39.12549380", "lng": "-77.22293475", "type": "edu"}], "year": "2009", "pdf": "https://pdfs.semanticscholar.org/12c5/66e2eee7bbaf45b894e7282f87f00f1db20a.pdf"}, {"id": "d2f2b10a8f29165d815e652f8d44955a12d057e6", "title": "Multiscale binarised statistical image features for symmetric face matching using multiple descriptor fusion based on class-specific LDA", "addresses": [{"address": "Urmia University", "lat": "37.52914535", "lng": "45.04886077", "type": "edu"}], "year": "2015", "pdf": "http://doi.org/10.1007/s10044-015-0475-1"}, {"id": "555f75077a02f33a05841f9b63a1388ec5fbcba5", "title": "A Survey on Periocular Biometrics Research", "addresses": [{"address": "Halmstad University", "lat": "56.66340325", "lng": "12.87929727", "type": "edu"}], "year": "2016", "pdf": "https://arxiv.org/pdf/1810.03360.pdf"}, {"id": "db3acf0653d6e69887d184c7ebb1958f74a4d0b1", "title": "Weighting Deep and Classic Representation via l 2 Regularization for Image Classification", "addresses": [{"address": "University of Macau", "lat": "22.12401870", "lng": "113.54510901", "type": "edu"}, {"address": "University of Southampton", "lat": "50.89273635", "lng": "-1.39464295", "type": "edu"}, {"address": "Jiangsu University", "lat": "32.20302965", "lng": "119.50968362", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/db3a/cf0653d6e69887d184c7ebb1958f74a4d0b1.pdf"}, {"id": "a5766dd5f2efe0b44879799dd5499edfb6b44839", "title": "Illumination Quality Assessment for Face Images: A Benchmark and a Convolutional Neural Networks Based Model", "addresses": [{"address": "Tongji University", "lat": "31.28473925", "lng": "121.49694909", "type": "edu"}, {"address": "Hong Kong Polytechnic University", "lat": "22.30457200", "lng": "114.17976285", "type": "edu"}], "year": 2017, "pdf": "http://pdfs.semanticscholar.org/a576/6dd5f2efe0b44879799dd5499edfb6b44839.pdf"}, {"id": "ad784332cc37720f03df1c576e442c9c828a587a", "title": "Face recognition based on face-specific subspace", "addresses": [{"address": "Harbin Institute of Technology", "lat": "45.74139210", "lng": "126.62552755", "type": "edu"}, {"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": 2003, "pdf": "http://pdfs.semanticscholar.org/ad78/4332cc37720f03df1c576e442c9c828a587a.pdf"}, {"id": "544c06584c95bfdcafbd62e04fb796e575981476", "title": "Human Identification from Body Shape", "addresses": [{"address": "National Institute of Standards and Technology", "lat": "39.12549380", "lng": "-77.22293475", "type": "edu"}], "year": 2003, "pdf": "http://pdfs.semanticscholar.org/544c/06584c95bfdcafbd62e04fb796e575981476.pdf"}, {"id": "06c956d4aac65752672ce4bd5a379f10a7fd6148", "title": "Stacking PCANet +: An Overly Simplified ConvNets Baseline for Face Recognition", "addresses": [{"address": "Yonsei University", "lat": "37.56004060", "lng": "126.93692480", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/LSP.2017.2749763"}, {"id": "ac9516a589901f1421e8ce905dd8bc5b689317ca", "title": "A Practical Framework for Executing Complex Queries over Encrypted Multimedia Data", "addresses": [{"address": "University of Texas at Dallas", "lat": "32.98207990", "lng": "-96.75662780", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/ac95/16a589901f1421e8ce905dd8bc5b689317ca.pdf"}, {"id": "1da1299088a6bf28167c58bbd46ca247de41eb3c", "title": "Face identification from a single example image based on Face-Specific Subspace (FSS)", "addresses": [{"address": "Harbin Institute of Technology", "lat": "45.74139210", "lng": "126.62552755", "type": "edu"}, {"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": 2002, "pdf": "https://doi.org/10.1109/ICASSP.2002.5745055"}, {"id": "bca39960ba46dc3193defe0b286ee0bea4424041", "title": "A decision-boundary-oriented feature selection method and its application to face recognition", "addresses": [{"address": "Korea Advanced Institute of Science and Technology", "lat": "36.36971910", "lng": "127.36253700", "type": "edu"}], "year": 2009, "pdf": "https://doi.org/10.1016/j.patrec.2009.05.018"}, {"id": "ccd7a6b9f23e983a3fc6a70cc3b9c9673d70bf2c", "title": "Symmetrical Two-Dimensional PCA with Image Measures in Face Recognition", "addresses": [{"address": "University of Electronic Science and Technology of China", "lat": "40.01419050", "lng": "-83.03091430", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/ccd7/a6b9f23e983a3fc6a70cc3b9c9673d70bf2c.pdf"}, {"id": "3c18fb8ff0f5003fefa8e9dc9bebaf88908d255c", "title": "Is block matching an alternative tool to LBP for face recognition?", "addresses": [{"address": "KTH Royal Institute of Technology, Stockholm", "lat": "59.34986645", "lng": "18.07063213", "type": "edu"}], "year": 2014, "pdf": "https://doi.org/10.1109/ICIP.2014.7025145"}, {"id": "0a2ddf88bd1a6c093aad87a8c7f4150bfcf27112", "title": "Patch-based models for visual object classes", "addresses": [{"address": "University College London", "lat": "51.52316070", "lng": "-0.12820370", "type": "edu"}], "year": 2011, "pdf": "http://pdfs.semanticscholar.org/0a2d/df88bd1a6c093aad87a8c7f4150bfcf27112.pdf"}, {"id": "919bdc161485615d5ee571b1585c1eb0539822c8", "title": "A ranking model for face alignment with Pseudo Census Transform", "addresses": [{"address": "Karlsruhe Institute of Technology", "lat": "49.10184375", "lng": "8.43312560", "type": "edu"}], "year": 2012, "pdf": "http://ieeexplore.ieee.org/document/6460332/"}, {"id": "c6bceb0eb8aded28edbe2607ecbe2f5ee2b57bdc", "title": "Random projections on manifolds of Symmetric Positive Definite matrices for image classification", "addresses": [{"address": "University of Queensland", "lat": "-27.49741805", "lng": "153.01316956", "type": "edu"}, {"address": "Queensland University of Technology", "lat": "-27.47715625", "lng": "153.02841004", "type": "edu"}], "year": 2014, "pdf": "http://doi.ieeecomputersociety.org/10.1109/WACV.2014.6836085"}, {"id": "0fae5d9d2764a8d6ea691b9835d497dd680bbccd", "title": "Face Recognition using Canonical Correlation Analysis", "addresses": [{"address": "Indian Institute of Technology Delhi", "lat": "28.54632595", "lng": "77.27325504", "type": "edu"}], "year": 2006, "pdf": "http://pdfs.semanticscholar.org/0fae/5d9d2764a8d6ea691b9835d497dd680bbccd.pdf"}, {"id": "bc6a7390135bf127b93b90a21b1fdebbfb56ad30", "title": "Bimodal Vein Data Mining via Cross-Selected-Domain Knowledge Transfer", "addresses": [{"address": "China University of Mining and Technology", "lat": "34.21525380", "lng": "117.13985410", "type": "edu"}, {"address": "East China Normal University", "lat": "31.22849230", "lng": "121.40211389", "type": "edu"}], "year": 2018, "pdf": "https://doi.org/10.1109/TIFS.2017.2766039"}, {"id": "8a3bb63925ac2cdf7f9ecf43f71d65e210416e17", "title": "ShearFace: Efficient Extraction of Anisotropic Features for Face Recognition", "addresses": [{"address": "University of Sfax, Tunisia", "lat": "34.73610660", "lng": "10.74272750", "type": "edu"}], "year": 2014, "pdf": "https://www.math.uh.edu/~dlabate/ShearFace_ICPR2014.pdf"}, {"id": "9039b8097a78f460db9718bc961fdc7d89784092", "title": "3D Face Recognition Based on Local Shape Patterns and Sparse Representation Classifier", "addresses": [{"address": "Beihang University", "lat": "39.98083330", "lng": "116.34101249", "type": "edu"}], "year": 2011, "pdf": "http://pdfs.semanticscholar.org/9039/b8097a78f460db9718bc961fdc7d89784092.pdf"}]} \ No newline at end of file
diff --git a/site/datasets/final/ijb_c.json b/site/datasets/final/ijb_c.json
new file mode 100644
index 00000000..b8594b2a
--- /dev/null
+++ b/site/datasets/final/ijb_c.json
@@ -0,0 +1 @@
+{"id": "57178b36c21fd7f4529ac6748614bb3374714e91", "paper": {"paper_id": "57178b36c21fd7f4529ac6748614bb3374714e91", "key": "ijb_c", "title": "IARPA Janus Benchmark - C: Face Dataset and Protocol", "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411217", "address": "", "name": "IJB-C"}, "address": "", "additional_papers": [], "citations": [{"id": "fca9ebaa30d69ccec8bb577c31693c936c869e72", "title": "Look Across Elapse: Disentangled Representation Learning and Photorealistic Cross-Age Face Synthesis for Age-Invariant Face Recognition", "addresses": [{"address": "National University of Singapore", "lat": "1.29620180", "lng": "103.77689944", "type": "edu"}, {"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1809.00338.pdf"}, {"id": "450c6a57f19f5aa45626bb08d7d5d6acdb863b4b", "title": "Towards Interpretable Face Recognition", "addresses": [{"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1805.00611.pdf"}, {"id": "eb027969f9310e0ae941e2adee2d42cdf07d938c", "title": "VGGFace2: A Dataset for Recognising Faces across Pose and Age", "addresses": [{"address": "University of Oxford", "lat": "51.75345380", "lng": "-1.25400997", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1710.08092.pdf"}]} \ No newline at end of file
diff --git a/site/datasets/final/images_of_groups.json b/site/datasets/final/images_of_groups.json
new file mode 100644
index 00000000..e1735fea
--- /dev/null
+++ b/site/datasets/final/images_of_groups.json
@@ -0,0 +1 @@
+{"id": "21d9d0deed16f0ad62a4865e9acf0686f4f15492", "paper": {"paper_id": "21d9d0deed16f0ad62a4865e9acf0686f4f15492", "key": "images_of_groups", "title": "Understanding images of groups of people", "year": 2009, "pdf": "http://amp.ece.cmu.edu/people/Andy/Andy_files/cvpr09.pdf", "address": "", "name": "Images of Groups"}, "address": "", "additional_papers": [], "citations": [{"id": "49e2c1bae80e6b75233348102dc44671ee52b548", "title": "Age and gender recognition using informative features of various types", "addresses": [{"address": "University of Ottawa", "lat": "45.42580475", "lng": "-75.68740118", "type": "edu"}], "year": 2014, "pdf": "http://www.site.uottawa.ca/~laganier/publications/esmaeelICIP2014.pdf"}, {"id": "0235b2d2ae306b7755483ac4f564044f46387648", "title": "Recognition of Facial Attributes Using Adaptive Sparse Representations of Random Patches", "addresses": [{"address": "University of Notre Dame", "lat": "41.70456775", "lng": "-86.23822026", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/0235/b2d2ae306b7755483ac4f564044f46387648.pdf"}, {"id": "27a299b834a18e45d73e0bf784bbb5b304c197b3", "title": "Social Role Discovery in Human Events", "addresses": [{"address": "Stanford University", "lat": "37.43131385", "lng": "-122.16936535", "type": "edu"}], "year": 2013, "pdf": "http://ai.stanford.edu/~vigneshr/cvpr_13/cvpr13_social_roles.pdf"}, {"id": "d84230a2fc9950fccfd37f0291d65e634b5ffc32", "title": "Historical and Modern Image-to-Image Translation with Generative Adversarial Networks", "addresses": [{"address": "Stanford University", "lat": "37.43131385", "lng": "-122.16936535", "type": "edu"}], "year": 2017, "pdf": "http://pdfs.semanticscholar.org/d842/30a2fc9950fccfd37f0291d65e634b5ffc32.pdf"}, {"id": "046865a5f822346c77e2865668ec014ec3282033", "title": "Discovering informative social subgraphs and predicting pairwise relationships from group photos", "addresses": [{"address": "National Taiwan University", "lat": "25.01682835", "lng": "121.53846924", "type": "edu"}], "year": 2012, "pdf": "http://www.csie.ntu.edu.tw/~winston/papers/chen12discovering.pdf"}, {"id": "0aa303109a3402aa5a203877847d549c4a24d933", "title": "Who Do I Look Like? Determining Parent-Offspring Resemblance via Gated Autoencoders", "addresses": [{"address": "University of Central Florida", "lat": "28.59899755", "lng": "-81.19712501", "type": "edu"}], "year": 2014, "pdf": "http://crcv-web.eecs.ucf.edu/papers/cvpr2014/Resemblance_CVPR14.pdf"}, {"id": "c6096986b4d6c374ab2d20031e026b581e7bf7e9", "title": "A Framework for Using Context to Understand Images of People", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}], "year": 2009, "pdf": "http://pdfs.semanticscholar.org/c609/6986b4d6c374ab2d20031e026b581e7bf7e9.pdf"}, {"id": "6aaa77e241fe55ae0c4ad281e27886ea778f9e23", "title": "F-Formation Detection: Individuating Free-Standing Conversational Groups in Images", "addresses": [{"address": "University College London", "lat": "51.52316070", "lng": "-0.12820370", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/b562/ad2ae12920cb318c5309a35000b4d5eb27b8.pdf"}, {"id": "ffe4bb47ec15f768e1744bdf530d5796ba56cfc1", "title": "AFIF4: Deep Gender Classification based on AdaBoost-based Fusion of Isolated Facial Features and Foggy Faces", "addresses": [{"address": "York University", "lat": "43.77439110", "lng": "-79.50481085", "type": "edu"}, {"address": "Assiut University", "lat": "27.18794105", "lng": "31.17009498", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1706.04277.pdf"}, {"id": "14e9158daf17985ccbb15c9cd31cf457e5551990", "title": "ConvNets with Smooth Adaptive Activation Functions for Regression", "addresses": [{"address": "Stony Brook University", "lat": "40.91531960", "lng": "-73.12706260", "type": "edu"}, {"address": "Stony Brook University Hospital", "lat": "40.90826665", "lng": "-73.11520891", "type": "edu"}], "year": 2017, "pdf": "http://pdfs.semanticscholar.org/14e9/158daf17985ccbb15c9cd31cf457e5551990.pdf"}, {"id": "b161d261fabb507803a9e5834571d56a3b87d147", "title": "Gender recognition from face images using a geometric descriptor", "addresses": [{"address": "University of Campinas (UNICAMP)", "lat": "-22.81483740", "lng": "-47.06477080", "type": "edu"}], "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8122913"}, {"id": "1190cba0cae3c8bb81bf80d6a0a83ae8c41240bc", "title": "Squared Earth Mover \u2019 s Distance Loss for Training Deep Neural Networks on Ordered-Classes", "addresses": [{"address": "Stony Brook University", "lat": "40.91531960", "lng": "-73.12706260", "type": "edu"}], "year": "2017", "pdf": "https://pdfs.semanticscholar.org/1190/cba0cae3c8bb81bf80d6a0a83ae8c41240bc.pdf"}, {"id": "30cc1ddd7a9b4878cca7783a59086bdc49dc4044", "title": "Intensity contrast masks for gender classification", "addresses": [{"address": "National Taipei University", "lat": "24.94314825", "lng": "121.36862979", "type": "edu"}], "year": 2015, "pdf": "https://doi.org/10.1007/s11042-015-2599-0"}, {"id": "49e541e0bbc7a082e5c952fc70716e66e5713080", "title": "Group expression intensity estimation in videos via Gaussian Processes", "addresses": [{"address": "Australian National University", "lat": "-35.27769990", "lng": "149.11852700", "type": "edu"}], "year": 2012, "pdf": "http://ieeexplore.ieee.org/document/6460925/"}, {"id": "8bed7ff2f75d956652320270eaf331e1f73efb35", "title": "Emotion recognition in the wild using deep neural networks and Bayesian classifiers", "addresses": [{"address": "Plymouth University", "lat": "50.37552690", "lng": "-4.13937687", "type": "edu"}, {"address": "University of Calabria", "lat": "39.36502160", "lng": "16.22571770", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1709.03820.pdf"}, {"id": "0be8b12f194fb604be69c139a195799e8ab53fd3", "title": "Talking Heads: Detecting Humans and Recognizing Their Interactions", "addresses": [{"address": "University of Oxford", "lat": "51.75345380", "lng": "-1.25400997", "type": "edu"}], "year": 2014, "pdf": "http://www.robots.ox.ac.uk/~vgg/publications/2014/Hoai14/poster.pdf"}, {"id": "0d3068b352c3733c9e1cc75e449bf7df1f7b10a4", "title": "Context Based Facial Expression Analysis in the Wild", "addresses": [{"address": "Australian National University", "lat": "-35.27769990", "lng": "149.11852700", "type": "edu"}], "year": 2013, "pdf": "http://doi.ieeecomputersociety.org/10.1109/ACII.2013.111"}, {"id": "16820ccfb626dcdc893cc7735784aed9f63cbb70", "title": "Real-time embedded age and gender classification in unconstrained video", "addresses": [{"address": "University of Ottawa", "lat": "45.42580475", "lng": "-75.68740118", "type": "edu"}], "year": 2015, "pdf": "http://www.cv-foundation.org/openaccess/content_cvpr_workshops_2015/W12/papers/Azarmehr_Real-Time_Embedded_Age_2015_CVPR_paper.pdf"}, {"id": "fcc82154067dfe778423c2df4ed69f0bec6e1534", "title": "Automatic Analysis of Affect and Membership in Group Settings", "addresses": [{"address": "Queen Mary University of London", "lat": "51.52472720", "lng": "-0.03931035", "type": "edu"}, {"address": "University of Cambridge", "lat": "52.17638955", "lng": "0.14308882", "type": "edu"}], "year": "2017", "pdf": "https://pdfs.semanticscholar.org/fcc8/2154067dfe778423c2df4ed69f0bec6e1534.pdf"}, {"id": "45513d0f2f5c0dac5b61f9ff76c7e46cce62f402", "title": "Face Discovery with Social Context", "addresses": [{"address": "University of Texas at Austin", "lat": "30.28415100", "lng": "-97.73195598", "type": "edu"}], "year": 2011, "pdf": "http://pdfs.semanticscholar.org/4551/3d0f2f5c0dac5b61f9ff76c7e46cce62f402.pdf"}, {"id": "282503fa0285240ef42b5b4c74ae0590fe169211", "title": "Feeding Hand-Crafted Features for Enhancing the Performance of Convolutional Neural Networks", "addresses": [{"address": "Seoul National University", "lat": "37.26728000", "lng": "126.98411510", "type": "edu"}], "year": 2018, "pdf": "http://pdfs.semanticscholar.org/2825/03fa0285240ef42b5b4c74ae0590fe169211.pdf"}, {"id": "1ab881ec87167af9071b2ad8ff6d4ce3eee38477", "title": "Finding Happiest Moments in a Social Context", "addresses": [{"address": "Australian National University", "lat": "-35.27769990", "lng": "149.11852700", "type": "edu"}, {"address": "University of Canberra", "lat": "-35.23656905", "lng": "149.08446994", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/1ab8/81ec87167af9071b2ad8ff6d4ce3eee38477.pdf"}, {"id": "572dbaee6648eefa4c9de9b42551204b985ff863", "title": "The more the merrier: Analysing the affect of a group of people in images", "addresses": [{"address": "University of Canberra", "lat": "-35.23656905", "lng": "149.08446994", "type": "edu"}, {"address": "University of California, San Diego", "lat": "32.87935255", "lng": "-117.23110049", "type": "edu"}, {"address": "University of Trento", "lat": "46.06588360", "lng": "11.11598940", "type": "edu"}], "year": "2015", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7163151"}, {"id": "dbf6d2619bd41ce4c36488e15d114a2da31b51c9", "title": "Data-Driven Modeling of Group Entitativity in Virtual Environments", "addresses": [{"address": "University of North Carolina", "lat": "35.90503535", "lng": "-79.04775327", "type": "edu"}, {"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1810.00028.pdf"}, {"id": "b593f13f974cf444a5781bbd487e1c69e056a1f7", "title": "Query Image Query Image Retrievals Retrievals Transferred Poses Transferred Poses", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/b593/f13f974cf444a5781bbd487e1c69e056a1f7.pdf"}, {"id": "02cc96ad997102b7c55e177ac876db3b91b4e72c", "title": "MuseumVisitors: A dataset for pedestrian and group detection, gaze estimation and behavior understanding", "addresses": [{"address": "University of Florence", "lat": "43.77764260", "lng": "11.25976500", "type": "edu"}, {"address": "Columbia University", "lat": "40.84198360", "lng": "-73.94368971", "type": "edu"}], "year": 2015, "pdf": "http://www.micc.unifi.it/wp-content/uploads/2015/12/2015_museum-visitors-dataset.pdf"}, {"id": "1b248ed8e7c9514648cd598960fadf9ab17e7fe8", "title": "From apparent to real age: gender, age, ethnic, makeup, and expression bias analysis in real age estimation", "addresses": [{"address": "University of Tartu", "lat": "58.38131405", "lng": "26.72078081", "type": "edu"}, {"address": "University of Barcelona", "lat": "41.38689130", "lng": "2.16352385", "type": "edu"}], "year": "", "pdf": "https://pdfs.semanticscholar.org/1b24/8ed8e7c9514648cd598960fadf9ab17e7fe8.pdf"}, {"id": "54bb25a213944b08298e4e2de54f2ddea890954a", "title": "AgeDB: The First Manually Collected, In-the-Wild Age Database", "addresses": [{"address": "Imperial College London", "lat": "51.49887085", "lng": "-0.17560797", "type": "edu"}, {"address": "Middlesex University", "lat": "51.59029705", "lng": "-0.22963221", "type": "edu"}], "year": 2017, "pdf": "http://openaccess.thecvf.com/content_cvpr_2017_workshops/w33/papers/Moschoglou_AgeDB_The_First_CVPR_2017_paper.pdf"}, {"id": "3b092733f428b12f1f920638f868ed1e8663fe57", "title": "On the size of Convolutional Neural Networks and generalization performance", "addresses": [{"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": 2016, "pdf": "http://www.math.jhu.edu/~data/RamaPapers/PerformanceBounds.pdf"}, {"id": "3991223b1dc3b87883cec7af97cf56534178f74a", "title": "A unified framework for context assisted face clustering", "addresses": [{"address": "University of California, Irvine", "lat": "33.64319010", "lng": "-117.84016494", "type": "edu"}], "year": 2013, "pdf": "http://doi.acm.org/10.1145/2461466.2461469"}, {"id": "1e516273554d87bbe1902fa0298179c493299035", "title": "Age Classification in Unconstrained Conditions Using LBP Variants", "addresses": [{"address": "University of Oulu", "lat": "65.05921570", "lng": "25.46632601", "type": "edu"}], "year": 2012, "pdf": "http://www.ee.oulu.fi/~hadid/Age-ICPR2012.pdf"}, {"id": "fd67d0efbd94c9d8f9d2f0a972edd7320bc7604f", "title": "Real-Time Semantic Clothing Segmentation", "addresses": [{"address": "University of Southampton", "lat": "50.89273635", "lng": "-1.39464295", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/fd67/d0efbd94c9d8f9d2f0a972edd7320bc7604f.pdf"}, {"id": "f2c30594d917ea915028668bc2a481371a72a14d", "title": "Scene Understanding Using Internet Photo Collections", "addresses": [{"address": "University of Washington", "lat": "47.65432380", "lng": "-122.30800894", "type": "edu"}], "year": 2010, "pdf": "http://pdfs.semanticscholar.org/f2c3/0594d917ea915028668bc2a481371a72a14d.pdf"}, {"id": "31f1e711fcf82c855f27396f181bf5e565a2f58d", "title": "Unconstrained Age Estimation with Deep Convolutional Neural Networks", "addresses": [{"address": "Rutgers University", "lat": "40.47913175", "lng": "-74.43168868", "type": "edu"}, {"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": 2015, "pdf": "http://doi.ieeecomputersociety.org/10.1109/ICCVW.2015.54"}, {"id": "2cf3564d7421b661e84251d280d159d4b3ebb336", "title": "Discriminating projections for estimating face age in wild images", "addresses": [{"address": "Oak Ridge National Laboratory", "lat": "35.93006535", "lng": "-84.31240032", "type": "edu"}, {"address": "UNCW, USA", "lat": "34.22398690", "lng": "-77.87013250", "type": "edu"}, {"address": "University of North Carolina at Wilmington", "lat": "34.22498270", "lng": "-77.86907744", "type": "edu"}], "year": 2014, "pdf": "https://doi.org/10.1109/BTAS.2014.6996287"}, {"id": "500fbe18afd44312738cab91b4689c12b4e0eeee", "title": "ChaLearn looking at people 2015 new competitions: Age estimation and cultural event recognition", "addresses": [{"address": "University of Barcelona", "lat": "41.38689130", "lng": "2.16352385", "type": "edu"}, {"address": "University of Venezia", "lat": "45.43127420", "lng": "12.32653770", "type": "edu"}], "year": 2015, "pdf": "http://www.maia.ub.es/~sergio/linked/ijcnn_age_and_cultural_2015.pdf"}, {"id": "0d57d3d2d04fc96d731cac99a7a8ef79050dac75", "title": "Not Everybody's Special: Using Neighbors in Referring Expressions with Uncertain Attributes", "addresses": [{"address": "Cornell University", "lat": "42.45055070", "lng": "-76.47835130", "type": "edu"}], "year": 2013, "pdf": "http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_CVPR2013/data/Papers/Workshops/4990a269.pdf"}, {"id": "fbc9ba70e36768efff130c7d970ce52810b044ff", "title": "Face-graph matching for classifying groups of people", "addresses": [{"address": "Cornell University", "lat": "42.45055070", "lng": "-76.47835130", "type": "edu"}, {"address": "Stanford University", "lat": "37.43131385", "lng": "-122.16936535", "type": "edu"}], "year": "2013", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6738500"}, {"id": "00823e6c0b6f1cf22897b8d0b2596743723ec51c", "title": "Understanding and Comparing Deep Neural Networks for Age and Gender Classification", "addresses": [{"address": "Singapore University of Technology and Design", "lat": "1.34021600", "lng": "103.96508900", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1708.07689.pdf"}, {"id": "ac9a331327cceda4e23f9873f387c9fd161fad76", "title": "Deep Convolutional Neural Network for Age Estimation based on VGG-Face Model", "addresses": [{"address": "University of Bridgeport", "lat": "41.16648580", "lng": "-73.19205640", "type": "edu"}], "year": 2017, "pdf": "http://pdfs.semanticscholar.org/ac9a/331327cceda4e23f9873f387c9fd161fad76.pdf"}, {"id": "5aad56cfa2bac5d6635df4184047e809f8fecca2", "title": "A visual dictionary attack on Picture Passwords", "addresses": [{"address": "Cornell University", "lat": "42.45055070", "lng": "-76.47835130", "type": "edu"}], "year": 2013, "pdf": "http://chenlab.ece.cornell.edu/people/Amir/publications/picture_password.pdf"}, {"id": "4793f11fbca4a7dba898b9fff68f70d868e2497c", "title": "Kinship Verification through Transfer Learning", "addresses": [{"address": "SUNY Buffalo", "lat": "42.93362780", "lng": "-78.88394479", "type": "edu"}], "year": 2011, "pdf": "http://pdfs.semanticscholar.org/4793/f11fbca4a7dba898b9fff68f70d868e2497c.pdf"}, {"id": "eddc4989cdb20c8cdfb22e989bdb2cb9031d0439", "title": "Binge Watching: Scaling Affordance Learning from Sitcoms", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1804.03080.pdf"}, {"id": "090e4713bcccff52dcd0c01169591affd2af7e76", "title": "What Do You Do? Occupation Recognition in a Photo via Social Context", "addresses": [{"address": "Northeastern University", "lat": "42.33836680", "lng": "-71.08793524", "type": "edu"}], "year": 2013, "pdf": "http://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Shao_What_Do_You_2013_ICCV_paper.pdf"}, {"id": "4ff4c27e47b0aa80d6383427642bb8ee9d01c0ac", "title": "Deep Convolutional Neural Networks and Support Vector Machines for Gender Recognition", "addresses": [{"address": "University of Groningen", "lat": "53.21967825", "lng": "6.56251482", "type": "edu"}], "year": 2015, "pdf": "https://doi.org/10.1109/SSCI.2015.37"}, {"id": "291f527598c589fb0519f890f1beb2749082ddfd", "title": "Seeing People in Social Context: Recognizing People and Social Relationships", "addresses": [{"address": "University of Illinois, Urbana-Champaign", "lat": "40.11116745", "lng": "-88.22587665", "type": "edu"}], "year": 2010, "pdf": "http://pdfs.semanticscholar.org/3215/ceb94227451a958bcf6b1205c710d17e53f5.pdf"}, {"id": "28d06fd508d6f14cd15f251518b36da17909b79e", "title": "What's in a Name? First Names as Facial Attributes", "addresses": [{"address": "Cornell University", "lat": "42.45055070", "lng": "-76.47835130", "type": "edu"}, {"address": "Stanford University", "lat": "37.43131385", "lng": "-122.16936535", "type": "edu"}], "year": 2013, "pdf": "http://www.cv-foundation.org/openaccess/content_cvpr_2013/papers/Chen_Whats_in_a_2013_CVPR_paper.pdf"}, {"id": "34022637860443c052375c45c4f700afcb438cd0", "title": "Automatic Recognition of Emotions and Membership in Group Videos", "addresses": [{"address": "Queen Mary University", "lat": "47.05702220", "lng": "21.92270900", "type": "edu"}, {"address": "University of Cambridge", "lat": "52.17638955", "lng": "0.14308882", "type": "edu"}], "year": 2016, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPRW.2016.185"}, {"id": "e3e2c106ccbd668fb9fca851498c662add257036", "title": "Appearance, context and co-occurrence ensembles for identity recognition in personal photo collections", "addresses": [{"address": "University of Colorado at Colorado Springs", "lat": "38.89646790", "lng": "-104.80505940", "type": "edu"}], "year": 2013, "pdf": "http://www.vast.uccs.edu/~tboult/PAPERS/BTAS13-Sapkota-et-al-Ensembles.pdf"}, {"id": "8ba67f45fbb1ce47a90df38f21834db37c840079", "title": "People search and activity mining in large-scale community-contributed photos", "addresses": [{"address": "National Taiwan University", "lat": "25.01682835", "lng": "121.53846924", "type": "edu"}], "year": 2012, "pdf": "http://www.cmlab.csie.ntu.edu.tw/~yanying/paper/dsp006-chen.pdf"}, {"id": "a5219fff98dfe3ec81dee95c4ead69a8e24cc802", "title": "Dual-Glance Model for Deciphering Social Relationships", "addresses": [{"address": "National University of Singapore", "lat": "1.29620180", "lng": "103.77689944", "type": "edu"}, {"address": "University of Minnesota", "lat": "44.97308605", "lng": "-93.23708813", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1708.00634.pdf"}, {"id": "1e1d7cbbef67e9e042a3a0a9a1bcefcc4a9adacf", "title": "A Multi-level Contextual Model for Person Recognition in Photo Albums", "addresses": [{"address": "Stevens Institute of Technology", "lat": "40.74225200", "lng": "-74.02709490", "type": "edu"}], "year": 2016, "pdf": "http://personal.stevens.edu/~hli18//data/papers/CVPR2016_CameraReady.pdf"}, {"id": "3d24b386d003bee176a942c26336dbe8f427aadd", "title": "Sequential Person Recognition in Photo Albums with a Recurrent Network", "addresses": [{"address": "University of Adelaide", "lat": "-34.91892260", "lng": "138.60423668", "type": "edu"}], "year": 2017, "pdf": "http://arxiv.org/abs/1611.09967"}, {"id": "111ae23b60284927f2545dfc59b0147bb3423792", "title": "Classroom Data Collection and Analysis using Computer Vision", "addresses": [{"address": "Stanford University", "lat": "37.43131385", "lng": "-122.16936535", "type": "edu"}], "year": "2016", "pdf": "https://pdfs.semanticscholar.org/111a/e23b60284927f2545dfc59b0147bb3423792.pdf"}, {"id": "dfbf941adeea19f5dff4a70a466ddd1b77f3b727", "title": "Models for supervised learning in sequence data", "addresses": [{"address": "Delft University of Technology", "lat": "51.99882735", "lng": "4.37396037", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/dfbf/941adeea19f5dff4a70a466ddd1b77f3b727.pdf"}, {"id": "774cbb45968607a027ae4729077734db000a1ec5", "title": "From Bikers to Surfers: Visual Recognition of Urban Tribes", "addresses": [{"address": "Columbia University", "lat": "40.84198360", "lng": "-73.94368971", "type": "edu"}, {"address": "University of California, San Diego", "lat": "32.87935255", "lng": "-117.23110049", "type": "edu"}], "year": 2013, "pdf": "http://pdfs.semanticscholar.org/774c/bb45968607a027ae4729077734db000a1ec5.pdf"}, {"id": "5b2bc289b607ca1a0634555158464f28fe68a6d3", "title": "Where's Waldo: Matching people in images of crowds", "addresses": [{"address": "University of Washington", "lat": "47.65432380", "lng": "-122.30800894", "type": "edu"}, {"address": "Cornell University", "lat": "42.45055070", "lng": "-76.47835130", "type": "edu"}], "year": 2011, "pdf": "http://vision.ics.uci.edu/papers/GargRSS_CVPR_2011/GargRSS_CVPR_2011.pdf"}, {"id": "b185f0a39384ceb3c4923196aeed6d68830a069f", "title": "Describing Clothing by Semantic Attributes", "addresses": [{"address": "Cornell University", "lat": "42.45055070", "lng": "-76.47835130", "type": "edu"}, {"address": "Stanford University", "lat": "37.43131385", "lng": "-122.16936535", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/b185/f0a39384ceb3c4923196aeed6d68830a069f.pdf"}, {"id": "14c37ea85ba8d74d053a34aedd7e484659fd54d4", "title": "Beyond trees: MRF inference via outer-planar decomposition", "addresses": [{"address": "Cornell University", "lat": "42.45055070", "lng": "-76.47835130", "type": "edu"}], "year": 2010, "pdf": "http://users.ece.cmu.edu/~dbatra/publications/assets/opd_cvpr10.pdf"}, {"id": "8d95317d0e366cecae1dd3f7c1ba69fe3fc4a8e0", "title": "Riesz-based Volume Local Binary Pattern and A Novel Group Expression Model for Group Happiness Intensity Analysis", "addresses": [{"address": "University of Oulu", "lat": "65.05921570", "lng": "25.46632601", "type": "edu"}, {"address": "University of Canberra", "lat": "-35.23656905", "lng": "149.08446994", "type": "edu"}, {"address": "Australian National University", "lat": "-35.27769990", "lng": "149.11852700", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/f7c7/f4494f73f2fe845be3b82ee711bc00be7508.pdf"}, {"id": "65293ecf6a4c5ab037a2afb4a9a1def95e194e5f", "title": "Face , Age and Gender Recognition using Local Descriptors", "addresses": [{"address": "University of Ottawa", "lat": "45.42580475", "lng": "-75.68740118", "type": "edu"}], "year": 2014, "pdf": "http://pdfs.semanticscholar.org/6529/3ecf6a4c5ab037a2afb4a9a1def95e194e5f.pdf"}, {"id": "854b1f0581f5d3340f15eb79452363cbf38c04c8", "title": "Directional Age-Primitive Pattern (DAPP) for Human Age Group Recognition and Age Estimation", "addresses": [{"address": "Kyung Hee University", "lat": "32.85363330", "lng": "-117.20352860", "type": "edu"}, {"address": "King Saud University", "lat": "24.72464030", "lng": "46.62335012", "type": "edu"}, {"address": "Institute of Information Technology", "lat": "23.72898990", "lng": "90.39826820", "type": "edu"}], "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7903648"}, {"id": "2eb84aaba316b095d4bb51da1a3e4365bbf9ab1d", "title": "Genealogical face recognition based on UB KinFace database", "addresses": [{"address": "SUNY Buffalo", "lat": "42.93362780", "lng": "-78.88394479", "type": "edu"}], "year": 2011, "pdf": "https://doi.org/10.1109/CVPRW.2011.5981801"}, {"id": "aea50d3414ecb20dc2ba77b0277d0df59bde2c2c", "title": "The #selfiestation: Design and Use of a Kiosk for Taking Selfies in the Enterprise", "addresses": [{"address": "IBM Research, North Carolina", "lat": "35.90422720", "lng": "-78.85565763", "type": "company"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/aea5/0d3414ecb20dc2ba77b0277d0df59bde2c2c.pdf"}, {"id": "0a0d5283439f088c158fcec732e2593bb3cd57ad", "title": "Who Blocks Who: Simultaneous clothing segmentation for grouping images", "addresses": [{"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}], "year": 2011, "pdf": "http://media.cs.tsinghua.edu.cn/~ahz/papers/whoblockswho_iccv2011_final.pdf"}, {"id": "0ba402af3b8682e2aa89f76bd823ddffdf89fa0a", "title": "Squared Earth Mover's Distance-based Loss for Training Deep Neural Networks", "addresses": [{"address": "Harvard University", "lat": "42.36782045", "lng": "-71.12666653", "type": "edu"}, {"address": "Stony Brook University", "lat": "40.91531960", "lng": "-73.12706260", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/c0d8/4377168c554cb8e83099bed940091fe49dec.pdf"}, {"id": "b9eb55c2c573e2fffd686b00a39185f0142ef816", "title": "The participation payoff: challenges and opportunities for multimedia access in networked communities", "addresses": [{"address": "University of London", "lat": "51.52176680", "lng": "-0.13019072", "type": "edu"}, {"address": "Delft University of Technology", "lat": "51.99882735", "lng": "4.37396037", "type": "edu"}], "year": 2010, "pdf": "http://elvera.nue.tu-berlin.de/files/1241Ramzan2010.pdf"}, {"id": "cc3ef62b4a7eb6c4e45302deb89df2e547b6efcc", "title": "Creating Picture Legends for Group Photos", "addresses": [{"address": "National University of Singapore", "lat": "1.29620180", "lng": "103.77689944", "type": "edu"}], "year": 2012, "pdf": "http://pdfs.semanticscholar.org/cc3e/f62b4a7eb6c4e45302deb89df2e547b6efcc.pdf"}, {"id": "683f5c838ea2c9c50f3f5c5fa064c00868751733", "title": "3D Visual Proxemics: Recognizing Human Interactions in 3D from a Single Image", "addresses": [{"address": "SRI International", "lat": "37.45857960", "lng": "-122.17560525", "type": "edu"}], "year": 2013, "pdf": "http://www.cv-foundation.org/openaccess/content_cvpr_2013/papers/Chakraborty_3D_Visual_Proxemics_2013_CVPR_paper.pdf"}, {"id": "d00e9a6339e34c613053d3b2c132fccbde547b56", "title": "A cascaded convolutional neural network for age estimation of unconstrained faces", "addresses": [{"address": "State University of New Jersey", "lat": "40.51865195", "lng": "-74.44099801", "type": "edu"}, {"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": "2016", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7791154"}, {"id": "5364e58ba1f4cdfcffb247c2421e8f56a75fad8d", "title": "Facial age estimation through self-paced learning", "addresses": [{"address": "East China Normal University", "lat": "31.22849230", "lng": "121.40211389", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/VCIP.2017.8305113"}, {"id": "c9f588d295437009994ddaabb64fd4e4c499b294", "title": "Predicting Professions through Probabilistic Model under Social Context", "addresses": [{"address": "Northeastern University", "lat": "42.33836680", "lng": "-71.08793524", "type": "edu"}], "year": 2013, "pdf": "http://pdfs.semanticscholar.org/c9f5/88d295437009994ddaabb64fd4e4c499b294.pdf"}]} \ No newline at end of file
diff --git a/site/datasets/final/imdb_wiki.json b/site/datasets/final/imdb_wiki.json
new file mode 100644
index 00000000..009a6815
--- /dev/null
+++ b/site/datasets/final/imdb_wiki.json
@@ -0,0 +1 @@
+{"id": "8355d095d3534ef511a9af68a3b2893339e3f96b", "paper": {"paper_id": "8355d095d3534ef511a9af68a3b2893339e3f96b", "key": "imdb_wiki", "title": "DEX: Deep EXpectation of Apparent Age from a Single Image", "year": "2015", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406390", "address": "", "name": "IMDB"}, "address": "", "additional_papers": [{"paper_id": "8355d095d3534ef511a9af68a3b2893339e3f96b", "key": "imdb_wiki", "title": "DEX: Deep EXpectation of Apparent Age from a Single Image", "year": "2015", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406390", "address": "", "name": "IMDB"}], "citations": [{"id": "56f231fc40424ed9a7c93cbc9f5a99d022e1d242", "title": "Age Estimation Based on a Single Network with Soft Softmax of Aging Modeling", "addresses": [{"address": "Macau University of Science and Technology", "lat": "22.15263985", "lng": "113.56803206", "type": "edu"}, {"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}, {"address": "University of Chinese Academy of Sciences", "lat": "39.90828040", "lng": "116.24585270", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/d060/f2f3641c6a89ade021eea749414a5c6b443f.pdf"}, {"id": "d5b0e73b584be507198b6665bcddeba92b62e1e5", "title": "Multi-Region Ensemble Convolutional Neural Networks for High-Accuracy Age Estimation", "addresses": [{"address": "University of Dundee", "lat": "56.45796755", "lng": "-2.98214831", "type": "edu"}, {"address": "Macau University of Science and Technology", "lat": "22.15263985", "lng": "113.56803206", "type": "edu"}], "year": 2017, "pdf": "http://pdfs.semanticscholar.org/d5b0/e73b584be507198b6665bcddeba92b62e1e5.pdf"}, {"id": "b20cfbb2348984b4e25b6b9174f3c7b65b6aed9e", "title": "Learning with Ambiguous Label Distribution for Apparent Age Estimation", "addresses": [{"address": "Tampere University of Technology", "lat": "61.44964205", "lng": "23.85877462", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/b20c/fbb2348984b4e25b6b9174f3c7b65b6aed9e.pdf"}, {"id": "3a2c90e0963bfb07fc7cd1b5061383e9a99c39d2", "title": "End-to-End Deep Learning for Steering Autonomous Vehicles Considering Temporal Dependencies", "addresses": [{"address": "American University in Cairo", "lat": "30.04287695", "lng": "31.23664139", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1710.03804.pdf"}, {"id": "5b64584d6b01e66dfd0b6025b2552db1447ccdeb", "title": "Deep expectation for estimation of fingerprint orientation fields", "addresses": [{"address": "Dermalog Identification Systems, Hamburg, Germany", "lat": "53.57227000", "lng": "9.99472000", "type": "company"}, {"address": "Norwegian Biometrics Lab, NTNU, Norway", "lat": "60.78973180", "lng": "10.68219270", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/BTAS.2017.8272697"}, {"id": "3c4f6d24b55b1fd3c5b85c70308d544faef3f69a", "title": "A Hybrid Deep Learning Architecture for Privacy-Preserving Mobile Analytics", "addresses": [{"address": "University College London", "lat": "51.52316070", "lng": "-0.12820370", "type": "edu"}], "year": 2017, "pdf": "http://pdfs.semanticscholar.org/3c4f/6d24b55b1fd3c5b85c70308d544faef3f69a.pdf"}, {"id": "62fd622b3ca97eb5577fd423fb9efde9a849cbef", "title": "Turning a Blind Eye: Explicit Removal of Biases and Variation from Deep Neural Network Embeddings", "addresses": [{"address": "University of Oxford", "lat": "51.75345380", "lng": "-1.25400997", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1809.02169.pdf"}, {"id": "493c8591d6a1bef5d7b84164a73761cefb9f5a25", "title": "User Profiling through Deep Multimodal Fusion", "addresses": [{"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}, {"address": "University of Washington", "lat": "47.65432380", "lng": "-122.30800894", "type": "edu"}], "year": "2018", "pdf": "http://dl.acm.org/citation.cfm?id=3159691"}, {"id": "1648cf24c042122af2f429641ba9599a2187d605", "title": "Boosting cross-age face verification via generative age normalization", "addresses": [{"address": "EURECOM", "lat": "43.61438600", "lng": "7.07112500", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/BTAS.2017.8272698"}, {"id": "fd53be2e0a9f33080a9db4b5a5e416e24ae8e198", "title": "Apparent Age Estimation Using Ensemble of Deep Learning Models", "addresses": [{"address": "Istanbul Technical University", "lat": "41.10427915", "lng": "29.02231159", "type": "edu"}], "year": "2016", "pdf": "https://arxiv.org/pdf/1606.02909.pdf"}, {"id": "cf736f596bf881ca97ec4b29776baaa493b9d50e", "title": "Low Dimensional Deep Features for facial landmark alignment", "addresses": [{"address": "Samsung R&D Institute, Bangalore, India", "lat": "12.98035370", "lng": "77.69751010", "type": "company"}], "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7952629"}, {"id": "54bb25a213944b08298e4e2de54f2ddea890954a", "title": "AgeDB: The First Manually Collected, In-the-Wild Age Database", "addresses": [{"address": "Imperial College London", "lat": "51.49887085", "lng": "-0.17560797", "type": "edu"}, {"address": "Middlesex University", "lat": "51.59029705", "lng": "-0.22963221", "type": "edu"}], "year": 2017, "pdf": "http://openaccess.thecvf.com/content_cvpr_2017_workshops/w33/papers/Moschoglou_AgeDB_The_First_CVPR_2017_paper.pdf"}, {"id": "f7b422df567ce9813926461251517761e3e6cda0", "title": "Face aging with conditional generative adversarial networks", "addresses": [{"address": "EURECOM", "lat": "43.61438600", "lng": "7.07112500", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1702.01983.pdf"}, {"id": "e475deadd1e284428b5e6efd8fe0e6a5b83b9dcd", "title": "Are you eligible? Predicting adulthood from face images via class specific mean autoencoder", "addresses": [{"address": "IIIT Delhi, India", "lat": "28.54562820", "lng": "77.27315050", "type": "edu"}], "year": 2018, "pdf": "http://pdfs.semanticscholar.org/e475/deadd1e284428b5e6efd8fe0e6a5b83b9dcd.pdf"}, {"id": "ca37933b6297cdca211aa7250cbe6b59f8be40e5", "title": "Multi-task learning for smile detection, emotion recognition and gender classification", "addresses": [{"address": "Hanoi University of Science and Technology", "lat": "21.00395200", "lng": "105.84360183", "type": "edu"}], "year": 2017, "pdf": "http://doi.acm.org/10.1145/3155133.3155207"}, {"id": "4b9ec224949c79a980a5a66664d0ac6233c3d575", "title": "Human Facial Age Estimation by Cost-Sensitive Label Ranking and Trace Norm Regularization", "addresses": [{"address": "Beijing Jiaotong University", "lat": "39.94976005", "lng": "116.33629046", "type": "edu"}, {"address": "University of Rochester", "lat": "43.15769690", "lng": "-77.58829158", "type": "edu"}, {"address": "National University of Singapore", "lat": "1.29620180", "lng": "103.77689944", "type": "edu"}], "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7565501"}, {"id": "ebb3d5c70bedf2287f9b26ac0031004f8f617b97", "title": "Deep Learning for Understanding Faces: Machines May Be Just as Good, or Better, than Humans", "addresses": [{"address": "Electrical and Computer Engineering", "lat": "33.58667840", "lng": "-101.87539204", "type": "edu"}, {"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": 2018, "pdf": "https://doi.org/10.1109/MSP.2017.2764116"}, {"id": "a713a01971e73d0c3118d0409dc7699a24f521d6", "title": "Age estimation based on face images and pre-trained convolutional neural networks", "addresses": [{"address": "Universit\u00e0 degli Studi di Milano", "lat": "45.47567215", "lng": "9.23336232", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/SSCI.2017.8285381"}, {"id": "4b61d8490bf034a2ee8aa26601d13c83ad7f843a", "title": "A Modulation Module for Multi-task Learning with Applications in Image Retrieval", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}, {"address": "Northwestern University", "lat": "42.05511640", "lng": "-87.67581113", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1807.06708.pdf"}, {"id": "26a44feb7a64db7986473ca801c251aa88748477", "title": "Unsupervised Learning of Mixture Models with a Uniform Background Component", "addresses": [{"address": "Florida State University", "lat": "30.44235995", "lng": "-84.29747867", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1804.02744.pdf"}, {"id": "8efda5708bbcf658d4f567e3866e3549fe045bbb", "title": "Pre-trained Deep Convolutional Neural Networks for Face Recognition", "addresses": [{"address": "University of Groningen", "lat": "53.21967825", "lng": "6.56251482", "type": "edu"}], "year": 2018, "pdf": "http://pdfs.semanticscholar.org/8efd/a5708bbcf658d4f567e3866e3549fe045bbb.pdf"}, {"id": "93420d9212dd15b3ef37f566e4d57e76bb2fab2f", "title": "An All-In-One Convolutional Neural Network for Face Analysis", "addresses": [{"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1611.00851.pdf"}, {"id": "eb027969f9310e0ae941e2adee2d42cdf07d938c", "title": "VGGFace2: A Dataset for Recognising Faces across Pose and Age", "addresses": [{"address": "University of Oxford", "lat": "51.75345380", "lng": "-1.25400997", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1710.08092.pdf"}, {"id": "08d41d2f68a2bf0091dc373573ca379de9b16385", "title": "Recursive Chaining of Reversible Image-to-image Translators For Face Aging", "addresses": [{"address": "Aalto University", "lat": "60.18558755", "lng": "24.82427330", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1802.05023.pdf"}, {"id": "6cefb70f4668ee6c0bf0c18ea36fd49dd60e8365", "title": "Privacy-Preserving Deep Inference for Rich User Data on The Cloud", "addresses": [{"address": "Queen Mary University of London", "lat": "51.52472720", "lng": "-0.03931035", "type": "edu"}, {"address": "Sharif University of Technology", "lat": "35.70362270", "lng": "51.35125097", "type": "edu"}], "year": 2017, "pdf": "http://pdfs.semanticscholar.org/6cef/b70f4668ee6c0bf0c18ea36fd49dd60e8365.pdf"}, {"id": "b417b90fa0c288bbaab1aceb8ebc7ec1d3f33172", "title": "Face Aging with Contextual Generative Adversarial Nets", "addresses": [{"address": "National University of Singapore", "lat": "1.29620180", "lng": "103.77689944", "type": "edu"}, {"address": "University of Trento", "lat": "46.06588360", "lng": "11.11598940", "type": "edu"}], "year": 2017, "pdf": "http://arxiv.org/abs/1802.00237"}, {"id": "854b1f0581f5d3340f15eb79452363cbf38c04c8", "title": "Directional Age-Primitive Pattern (DAPP) for Human Age Group Recognition and Age Estimation", "addresses": [{"address": "Kyung Hee University", "lat": "32.85363330", "lng": "-117.20352860", "type": "edu"}, {"address": "King Saud University", "lat": "24.72464030", "lng": "46.62335012", "type": "edu"}, {"address": "Institute of Information Technology", "lat": "23.72898990", "lng": "90.39826820", "type": "edu"}], "year": "2017", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7903648"}, {"id": "a06b6d30e2b31dc600f622ab15afe5e2929581a7", "title": "Robust Joint and Individual Variance Explained", "addresses": [{"address": "Imperial College London", "lat": "51.49887085", "lng": "-0.17560797", "type": "edu"}, {"address": "Middlesex University", "lat": "51.59029705", "lng": "-0.22963221", "type": "edu"}], "year": 2017, "pdf": "https://ibug.doc.ic.ac.uk/media/uploads/documents/2209.pdf"}, {"id": "f519723238701849f1160d5a9cedebd31017da89", "title": "Impact of multi-focused images on recognition of soft biometric traits", "addresses": [{"address": "EURECOM", "lat": "43.61438600", "lng": "7.07112500", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/f519/723238701849f1160d5a9cedebd31017da89.pdf"}, {"id": "3dfb822e16328e0f98a47209d7ecd242e4211f82", "title": "Cross-Age LFW: A Database for Studying Cross-Age Face Recognition in Unconstrained Environments", "addresses": [{"address": "Beijing University of Posts and Telecommunications", "lat": "39.96014880", "lng": "116.35193921", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1708.08197.pdf"}, {"id": "36a3a96ef54000a0cd63de867a5eb7e84396de09", "title": "Automatic Photo Orientation Detection with Convolutional Neural Networks", "addresses": [{"address": "University of Toronto", "lat": "43.66333345", "lng": "-79.39769975", "type": "edu"}], "year": 2017, "pdf": "http://www.cs.toronto.edu/~guerzhoy/oriviz/crv17.pdf"}, {"id": "47cd161546c59ab1e05f8841b82e985f72e5ddcb", "title": "Gender classification in live videos", "addresses": [{"address": "University of Science and Technology of China", "lat": "31.83907195", "lng": "117.26420748", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/ICIP.2017.8296552"}, {"id": "70569810e46f476515fce80a602a210f8d9a2b95", "title": "Apparent Age Estimation from Face Images Combining General and Children-Specialized Deep Learning Models", "addresses": [{"address": "EURECOM", "lat": "43.61438600", "lng": "7.07112500", "type": "edu"}], "year": 2016, "pdf": "http://doi.ieeecomputersociety.org/10.1109/CVPRW.2016.105"}, {"id": "d00e9a6339e34c613053d3b2c132fccbde547b56", "title": "A cascaded convolutional neural network for age estimation of unconstrained faces", "addresses": [{"address": "State University of New Jersey", "lat": "40.51865195", "lng": "-74.44099801", "type": "edu"}, {"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": "2016", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7791154"}, {"id": "93af335bf8c610f34ce0cadc15d1dd592debc706", "title": "Auxiliary Demographic Information Assisted Age Estimation With Cascaded Structure", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}, {"address": "West Virginia University", "lat": "39.65404635", "lng": "-79.96475355", "type": "edu"}], "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8267475"}, {"id": "a16fb74ea66025d1f346045fda00bd287c20af0e", "title": "A Coupled Evolutionary Network for Age Estimation", "addresses": [{"address": "University of Chinese Academy of Sciences", "lat": "39.90828040", "lng": "116.24585270", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1809.07447.pdf"}, {"id": "7fab17ef7e25626643f1d55257a3e13348e435bd", "title": "Age Progression/Regression by Conditional Adversarial Autoencoder", "addresses": [{"address": "University of Tennessee", "lat": "35.95424930", "lng": "-83.93073950", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1702.08423.pdf"}, {"id": "2149d49c84a83848d6051867290d9c8bfcef0edb", "title": "Label-Sensitive Deep Metric Learning for Facial Age Estimation", "addresses": [{"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}], "year": 2018, "pdf": "https://doi.org/10.1109/TIFS.2017.2746062"}, {"id": "0951f42abbf649bb564a21d4ff5dddf9a5ea54d9", "title": "Joint Estimation of Age and Gender from Unconstrained Face Images Using Lightweight Multi-Task CNN for Mobile Applications", "addresses": [{"address": "Institute of Information Science", "lat": "25.04107280", "lng": "121.61475620", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1806.02023.pdf"}, {"id": "5364e58ba1f4cdfcffb247c2421e8f56a75fad8d", "title": "Facial age estimation through self-paced learning", "addresses": [{"address": "East China Normal University", "lat": "31.22849230", "lng": "121.40211389", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/VCIP.2017.8305113"}, {"id": "7143518f847b0ec57a0ff80e0304c89d7e924d9a", "title": "Speeding-Up Age Estimation in Intelligent Demographics System via Network Optimization", "addresses": [{"address": "Nanyang Technological University", "lat": "1.34841040", "lng": "103.68297965", "type": "edu"}, {"address": "Hefei University of Technology", "lat": "31.84691800", "lng": "117.29053367", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1805.08373.pdf"}, {"id": "6feafc5c1d8b0e9d65ebe4c1512b7860c538fbdc", "title": "Smart Facial Age Estimation with Stacked Deep Network Fusion", "addresses": [{"address": "National Chung Hsing University", "lat": "24.12084345", "lng": "120.67571165", "type": "edu"}, {"address": "National Taichung University of Science and Technology", "lat": "24.15031065", "lng": "120.68325501", "type": "edu"}], "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8448885"}, {"id": "ec5c63609cf56496715b0eba0e906de3231ad6d1", "title": "Private and Scalable Personal Data Analytics Using Hybrid Edge-to-Cloud Deep Learning", "addresses": [{"address": "Imperial College London", "lat": "51.49887085", "lng": "-0.17560797", "type": "edu"}, {"address": "Queen Mary University of London", "lat": "51.52472720", "lng": "-0.03931035", "type": "edu"}, {"address": "Sharif University of Technology", "lat": "35.70362270", "lng": "51.35125097", "type": "edu"}], "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8364651"}, {"id": "975978ee6a32383d6f4f026b944099e7739e5890", "title": "Privacy-Preserving Age Estimation for Content Rating", "addresses": [{"address": "Simon Fraser University", "lat": "49.27674540", "lng": "-122.91777375", "type": "edu"}, {"address": "University of Manitoba", "lat": "49.80915360", "lng": "-97.13304179", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/9759/78ee6a32383d6f4f026b944099e7739e5890.pdf"}, {"id": "64ec0c53dd1aa51eb15e8c2a577701e165b8517b", "title": "Online Regression with Feature Selection in Stochastic Data Streams", "addresses": [{"address": "Florida State University", "lat": "30.44235995", "lng": "-84.29747867", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1803.11521.pdf"}]} \ No newline at end of file
diff --git a/site/datasets/final/lfw.json b/site/datasets/final/lfw.json
new file mode 100644
index 00000000..07820167
--- /dev/null
+++ b/site/datasets/final/lfw.json
@@ -0,0 +1 @@
+{"id": "2d3482dcff69c7417c7b933f22de606a0e8e42d4", "paper": {"paper_id": "2d3482dcff69c7417c7b933f22de606a0e8e42d4", "key": "lfw", "title": "Labeled Faces in the Wild : Updates and New Reporting Procedures", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/2d34/82dcff69c7417c7b933f22de606a0e8e42d4.pdf", "address": {"address": "University of Massachusetts", "lat": "42.38897850", "lng": "-72.52869870", "type": "edu"}, "name": "LFW"}, "address": {"address": "University of Massachusetts", "lat": "42.38897850", "lng": "-72.52869870", "type": "edu"}, "additional_papers": [{"paper_id": "2d3482dcff69c7417c7b933f22de606a0e8e42d4", "key": "lfw", "title": "Labeled Faces in the Wild : Updates and New Reporting Procedures", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/2d34/82dcff69c7417c7b933f22de606a0e8e42d4.pdf", "address": {"address": "University of Massachusetts", "lat": "42.38897850", "lng": "-72.52869870", "type": "edu"}, "name": "LFW"}, {"paper_id": "2d3482dcff69c7417c7b933f22de606a0e8e42d4", "key": "lfw", "title": "Labeled Faces in the Wild : Updates and New Reporting Procedures", "year": 2014, "pdf": "http://pdfs.semanticscholar.org/2d34/82dcff69c7417c7b933f22de606a0e8e42d4.pdf", "address": {"address": "University of Massachusetts", "lat": "42.38897850", "lng": "-72.52869870", "type": "edu"}, "name": "LFW"}], "citations": [{"id": "71ca8b6e84c17b3e68f980bfb8cddc837100f8bf", "title": "Effective 3D based frontalization for unconstrained face recognition", "addresses": [{"address": "University of Florence", "lat": "43.77764260", "lng": "11.25976500", "type": "edu"}], "year": "2016", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7899774"}, {"id": "bd8f77b7d3b9d272f7a68defc1412f73e5ac3135", "title": "SphereFace: Deep Hypersphere Embedding for Face Recognition", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}, {"address": "Georgia Institute of Technology", "lat": "33.77603300", "lng": "-84.39884086", "type": "edu"}, {"address": "Sun Yat-Sen University", "lat": "23.09461185", "lng": "113.28788994", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1704.08063.pdf"}, {"id": "81884e1de00e59f24bc20254584d73a1a1806933", "title": "Super-Identity Convolutional Neural Network for Face Hallucination", "addresses": [{"address": "National Taiwan University", "lat": "25.01682835", "lng": "121.53846924", "type": "edu"}, {"address": "SenseTime", "lat": "39.99300800", "lng": "116.32988200", "type": "company"}, {"address": "University of Texas at Austin", "lat": "30.28415100", "lng": "-97.73195598", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1811.02328.pdf"}, {"id": "5180df9d5eb26283fb737f491623395304d57497", "title": "Scalable Angular Discriminative Deep Metric Learning for Face Recognition", "addresses": [{"address": "Tianjin University", "lat": "36.20304395", "lng": "117.05842113", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1804.10899.pdf"}, {"id": "3b64efa817fd609d525c7244a0e00f98feacc8b4", "title": "A Comprehensive Survey on Pose-Invariant Face Recognition", "addresses": [{"address": "University of Technology Sydney", "lat": "-33.88096510", "lng": "151.20107299", "type": "edu"}], "year": 2016, "pdf": "http://doi.acm.org/10.1145/2845089"}, {"id": "84ae55603bffda40c225fe93029d39f04793e01f", "title": "ICB-RW 2016: International challenge on biometric recognition in the wild", "addresses": [{"address": "University of Beira Interior", "lat": "40.27730770", "lng": "-7.50958010", "type": "edu"}], "year": 2016, "pdf": "https://doi.org/10.1109/ICB.2016.7550066"}, {"id": "e1630014a5ae3d2fb7ff6618f1470a567f4d90f5", "title": "Look, Listen and Learn - A Multimodal LSTM for Speaker Identification", "addresses": [{"address": "University of Hong Kong", "lat": "22.20814690", "lng": "114.25964115", "type": "edu"}], "year": "2016", "pdf": "https://arxiv.org/pdf/1602.04364.pdf"}, {"id": "32d8e555441c47fc27249940991f80502cb70bd5", "title": "Machine Learning Models that Remember Too Much", "addresses": [{"address": "Cornell University", "lat": "42.45055070", "lng": "-76.47835130", "type": "edu"}], "year": 2017, "pdf": "https://arxiv.org/pdf/1709.07886v1.pdf"}, {"id": "55089f9bc858ae7e9addf30502ac11be4347c05a", "title": "A Privacy-Preserving Deep Learning Approach for Face Recognition with Edge Computing", "addresses": [{"address": "Nanjing University", "lat": "32.05659570", "lng": "118.77408833", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/5508/9f9bc858ae7e9addf30502ac11be4347c05a.pdf"}, {"id": "17423fe480b109e1d924314c1dddb11b084e8a42", "title": "Deep Disguised Faces Recognition", "addresses": [{"address": "National Taiwan University", "lat": "25.01682835", "lng": "121.53846924", "type": "edu"}], "year": "", "pdf": "https://pdfs.semanticscholar.org/1742/3fe480b109e1d924314c1dddb11b084e8a42.pdf"}, {"id": "1e1d7cbbef67e9e042a3a0a9a1bcefcc4a9adacf", "title": "A Multi-level Contextual Model for Person Recognition in Photo Albums", "addresses": [{"address": "Stevens Institute of Technology", "lat": "40.74225200", "lng": "-74.02709490", "type": "edu"}], "year": 2016, "pdf": "http://personal.stevens.edu/~hli18//data/papers/CVPR2016_CameraReady.pdf"}, {"id": "8c66378df977606d332fc3b0047989e890a6ac76", "title": "Hierarchical-PEP model for real-world face recognition", "addresses": [{"address": "Stevens Institute of Technology", "lat": "40.74225200", "lng": "-74.02709490", "type": "edu"}], "year": 2015, "pdf": "http://www.cv-foundation.org/openaccess/content_cvpr_2015/ext/2B_078_ext.pdf"}, {"id": "72a7eb68f0955564e1ceafa75aeeb6b5bbb14e7e", "title": "Face Recognition with Contrastive Convolution", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}, {"address": "University of Chinese Academy of Sciences", "lat": "39.90828040", "lng": "116.24585270", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/72a7/eb68f0955564e1ceafa75aeeb6b5bbb14e7e.pdf"}, {"id": "345cc31c85e19cea9f8b8521be6a37937efd41c2", "title": "Deep Manifold Traversal: Changing Labels with Convolutional Features", "addresses": [{"address": "Cornell University", "lat": "42.45055070", "lng": "-76.47835130", "type": "edu"}], "year": "2015", "pdf": "https://arxiv.org/pdf/1511.06421.pdf"}, {"id": "3c563542db664321aa77a9567c1601f425500f94", "title": "TV-GAN: Generative Adversarial Network Based Thermal to Visible Face Recognition", "addresses": [{"address": "University of Queensland", "lat": "-27.49741805", "lng": "153.01316956", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1712.02514.pdf"}, {"id": "86afb1e38a96f2ac00e792ef353a971fd13c8474", "title": "How interesting images are: An atypicality approach for social networks", "addresses": [{"address": "University of Hawaii", "lat": "21.29827950", "lng": "-157.81869230", "type": "edu"}], "year": 2016, "pdf": "https://doi.org/10.1109/BigData.2016.7840742"}, {"id": "565f7c767e6b150ebda491e04e6b1de759fda2d4", "title": "Fine-grained face verification: FGLFW database, baselines, and human-DCMN partnership", "addresses": [{"address": "Beijing University of Posts and Telecommunications", "lat": "39.96014880", "lng": "116.35193921", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1016/j.patcog.2016.11.023"}, {"id": "492f41e800c52614c5519f830e72561db205e86c", "title": "A Deep Regression Architecture with Two-Stage Re-initialization for High Performance Facial Landmark Detection", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}, {"address": "University of Chinese Academy of Sciences", "lat": "39.90828040", "lng": "116.24585270", "type": "edu"}], "year": 2017, "pdf": "http://openaccess.thecvf.com/content_cvpr_2017/papers/Lv_A_Deep_Regression_CVPR_2017_paper.pdf"}, {"id": "2f16baddac6af536451b3216b02d3480fc361ef4", "title": "Web-scale training for face identification", "addresses": [{"address": "Tel Aviv University", "lat": "32.11198890", "lng": "34.80459702", "type": "edu"}], "year": 2015, "pdf": "http://cs.nyu.edu/~fergus/teaching/vision/10_facerec.pdf"}, {"id": "b2e308649c7a502456a8e3c95ac7fbe6f8216e51", "title": "Recurrent Regression for Face Recognition", "addresses": [{"address": "Southeast University", "lat": "32.05752790", "lng": "118.78682252", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/b2e3/08649c7a502456a8e3c95ac7fbe6f8216e51.pdf"}, {"id": "06c956d4aac65752672ce4bd5a379f10a7fd6148", "title": "Stacking PCANet +: An Overly Simplified ConvNets Baseline for Face Recognition", "addresses": [{"address": "Yonsei University", "lat": "37.56004060", "lng": "126.93692480", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/LSP.2017.2749763"}, {"id": "a06b6d30e2b31dc600f622ab15afe5e2929581a7", "title": "Robust Joint and Individual Variance Explained", "addresses": [{"address": "Imperial College London", "lat": "51.49887085", "lng": "-0.17560797", "type": "edu"}, {"address": "Middlesex University", "lat": "51.59029705", "lng": "-0.22963221", "type": "edu"}], "year": 2017, "pdf": "https://ibug.doc.ic.ac.uk/media/uploads/documents/2209.pdf"}, {"id": "4cfd770ccecae1c0b4248bc800d7fd35c817bbbd", "title": "A Discriminative Feature Learning Approach for Deep Face Recognition", "addresses": [{"address": "Shenzhen Institutes of Advanced Technology", "lat": "22.59805605", "lng": "113.98533784", "type": "edu"}, {"address": "Chinese University of Hong Kong", "lat": "22.42031295", "lng": "114.20788644", "type": "edu"}], "year": "2016", "pdf": "https://pdfs.semanticscholar.org/8774/e206564df3bf9050f8c2be6b434cc2469c5b.pdf"}, {"id": "f92ade569cbe54344ffd3bb25efd366dcd8ad659", "title": "Effect of Super Resolution on High Dimensional Features for Unsupervised Face Recognition in the Wild", "addresses": [{"address": "University of Bridgeport", "lat": "41.16648580", "lng": "-73.19205640", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1704.01464.pdf"}, {"id": "e9a5a38e7da3f0aa5d21499149536199f2e0e1f7", "title": "A Bayesian Scene-Prior-Based Deep Network Model for Face Verification", "addresses": [{"address": "Curtin University", "lat": "-32.00686365", "lng": "115.89691775", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/e9a5/a38e7da3f0aa5d21499149536199f2e0e1f7.pdf"}, {"id": "63a6c256ec2cf2e0e0c9a43a085f5bc94af84265", "title": "Complexity of multiverse networks and their multilayer generalization", "addresses": [{"address": "Tel Aviv University", "lat": "32.11198890", "lng": "34.80459702", "type": "edu"}], "year": 2016, "pdf": "https://doi.org/10.1109/ICPR.2016.7899662"}, {"id": "bc910ca355277359130da841a589a36446616262", "title": "Conditional High-Order Boltzmann Machine: A Supervised Learning Model for Relation Learning", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": 2015, "pdf": "http://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Huang_Conditional_High-Order_Boltzmann_ICCV_2015_paper.pdf"}, {"id": "344a5802999dddd0a6d1c4d511910af2eb922231", "title": "DroneFace: An Open Dataset for Drone Research", "addresses": [{"address": "Feng Chia University", "lat": "24.18005755", "lng": "120.64836072", "type": "edu"}], "year": 2017, "pdf": "http://pdfs.semanticscholar.org/f0ba/552418698d1b881c6f9f02e2c84f969e66f3.pdf"}, {"id": "0be418e63d111e3b94813875f75909e4dc27d13a", "title": "Fine-grained LFW database", "addresses": [{"address": "Beijing University of Posts and Telecommunications", "lat": "39.96014880", "lng": "116.35193921", "type": "edu"}], "year": 2016, "pdf": "https://doi.org/10.1109/ICB.2016.7550057"}, {"id": "16b9d258547f1eccdb32111c9f45e2e4bbee79af", "title": "NormFace: L2 Hypersphere Embedding for Face Verification", "addresses": [{"address": "University of Electronic Science and Technology of China", "lat": "40.01419050", "lng": "-83.03091430", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1704.06369.pdf"}, {"id": "291265db88023e92bb8c8e6390438e5da148e8f5", "title": "MS-Celeb-1M: A Dataset and Benchmark for Large-Scale Face Recognition", "addresses": [{"address": "Microsoft", "lat": "47.64233180", "lng": "-122.13693020", "type": "company"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/4603/cb8e05258bb0572ae912ad20903b8f99f4b1.pdf"}, {"id": "a73405038fdc0d8bf986539ef755a80ebd341e97", "title": "Conditional High-Order Boltzmann Machines for Supervised Relation Learning", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/TIP.2017.2698918"}, {"id": "47cd161546c59ab1e05f8841b82e985f72e5ddcb", "title": "Gender classification in live videos", "addresses": [{"address": "University of Science and Technology of China", "lat": "31.83907195", "lng": "117.26420748", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/ICIP.2017.8296552"}, {"id": "93eb3963bc20e28af26c53ef3bce1e76b15e3209", "title": "Occlusion robust face recognition based on mask learning", "addresses": [{"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}], "year": 2017, "pdf": "https://doi.org/10.1109/ICIP.2017.8296992"}, {"id": "5922e26c9eaaee92d1d70eae36275bb226ecdb2e", "title": "Boosting Classification Based Similarity Learning by using Standard Distances", "addresses": [{"address": "Universitat de Val\u00e8ncia", "lat": "39.47787665", "lng": "-0.34257711", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/5922/e26c9eaaee92d1d70eae36275bb226ecdb2e.pdf"}, {"id": "a52a69bf304d49fba6eac6a73c5169834c77042d", "title": "Margin Loss: Making Faces More Separable", "addresses": [{"address": "Tsinghua University", "lat": "40.00229045", "lng": "116.32098908", "type": "edu"}], "year": 2018, "pdf": "https://doi.org/10.1109/LSP.2017.2789251"}, {"id": "1fe121925668743762ce9f6e157081e087171f4c", "title": "Unsupervised learning of overcomplete face descriptors", "addresses": [{"address": "University of Oulu", "lat": "65.05921570", "lng": "25.46632601", "type": "edu"}], "year": 2015, "pdf": "https://www.cv-foundation.org/openaccess/content_cvpr_workshops_2015/W02/papers/Ylioinas_Unsupervised_Learning_of_2015_CVPR_paper.pdf"}, {"id": "628a3f027b7646f398c68a680add48c7969ab1d9", "title": "Plan for Final Year Project : HKU-Face : A Large Scale Dataset for Deep Face Recognition", "addresses": [{"address": "Facebook", "lat": "37.39367170", "lng": "-122.08072620", "type": "company"}], "year": "2017", "pdf": "https://pdfs.semanticscholar.org/628a/3f027b7646f398c68a680add48c7969ab1d9.pdf"}, {"id": "740e095a65524d569244947f6eea3aefa3cca526", "title": "Towards Human-like Performance Face Detection: A Convolutional Neural Network Approach", "addresses": [{"address": "University of Twente", "lat": "52.23801390", "lng": "6.85667610", "type": "edu"}], "year": 2016, "pdf": "http://pdfs.semanticscholar.org/740e/095a65524d569244947f6eea3aefa3cca526.pdf"}, {"id": "7859667ed6c05a467dfc8a322ecd0f5e2337db56", "title": "Web-Scale Transfer Learning for Unconstrained 1:N Face Identification", "addresses": [{"address": "Tel Aviv University", "lat": "32.11198890", "lng": "34.80459702", "type": "edu"}], "year": 2015, "pdf": "http://pdfs.semanticscholar.org/7859/667ed6c05a467dfc8a322ecd0f5e2337db56.pdf"}, {"id": "afdf9a3464c3b015f040982750f6b41c048706f5", "title": "A Recurrent Encoder-Decoder Network for Sequential Face Alignment", "addresses": [{"address": "Rutgers University", "lat": "40.47913175", "lng": "-74.43168868", "type": "edu"}], "year": "2016", "pdf": "https://arxiv.org/pdf/1608.05477.pdf"}, {"id": "c222f8079c246ead285894c47bdbb2dfc7741044", "title": "Face de-identification with expressions preservation", "addresses": [{"address": "Bordeaux INP, France", "lat": "44.80557160", "lng": "-0.60519720", "type": "edu"}, {"address": "University of Bordeaux, France", "lat": "44.80837500", "lng": "-0.59670500", "type": "edu"}], "year": 2015, "pdf": "https://doi.org/10.1109/ICIP.2015.7351631"}, {"id": "1e6ed6ca8209340573a5e907a6e2e546a3bf2d28", "title": "Pooling Faces: Template Based Face Recognition with Pooled Face Images", "addresses": [{"address": "Open University of Israel", "lat": "32.77824165", "lng": "34.99565673", "type": "edu"}], "year": 2016, "pdf": "http://arxiv.org/pdf/1607.01450v1.pdf"}, {"id": "600f164c81dbaa0327e7bd659fd9eb7f511f9e9a", "title": "A benchmark study of large-scale unconstrained face recognition", "addresses": [{"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": 2014, "pdf": "https://doi.org/10.1109/BTAS.2014.6996301"}, {"id": "adaed4e92c93eb005198e41f87cf079e46050b5a", "title": "Discriminative Invariant Kernel Features: A Bells-and-Whistles-Free Approach to Unsupervised Face Recognition and Pose Estimation", "addresses": [{"address": "Carnegie Mellon University", "lat": "37.41021930", "lng": "-122.05965487", "type": "edu"}], "year": 2016, "pdf": "http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Pal_Discriminative_Invariant_Kernel_CVPR_2016_paper.pdf"}, {"id": "29db046dd1f8100b279c3f5f5c5ef19bdbf5af9a", "title": "Recent Progress of Face Image Synthesis", "addresses": [{"address": "University of Chinese Academy of Sciences", "lat": "39.90828040", "lng": "116.24585270", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1706.04717.pdf"}, {"id": "7788fa76f1488b1597ee2bebc462f628e659f61e", "title": "A Privacy-Aware Architecture at the Edge for Autonomous Real-Time Identity Reidentification in Crowds", "addresses": [{"address": "University of Texas at San Antonio", "lat": "29.58333105", "lng": "-98.61944505", "type": "edu"}], "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8063888"}]} \ No newline at end of file
diff --git a/site/datasets/final/megaface.json b/site/datasets/final/megaface.json
new file mode 100644
index 00000000..1ec269c4
--- /dev/null
+++ b/site/datasets/final/megaface.json
@@ -0,0 +1 @@
+{"id": "28d4e027c7e90b51b7d8908fce68128d1964668a", "paper": {"paper_id": "28d4e027c7e90b51b7d8908fce68128d1964668a", "key": "megaface", "title": "Level Playing Field for Million Scale Face Recognition", "year": "2017", "pdf": "https://arxiv.org/pdf/1705.00393.pdf", "address": "", "name": "MegaFace"}, "address": "", "additional_papers": [{"paper_id": "28d4e027c7e90b51b7d8908fce68128d1964668a", "key": "megaface", "title": "Level Playing Field for Million Scale Face Recognition", "year": "2017", "pdf": "https://arxiv.org/pdf/1705.00393.pdf", "address": "", "name": "MegaFace"}], "citations": [{"id": "f3a59d85b7458394e3c043d8277aa1ffe3cdac91", "title": "Query-Free Attacks on Industry-Grade Face Recognition Systems under Resource Constraints", "addresses": [{"address": "Indiana University", "lat": "39.86948105", "lng": "-84.87956905", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1802.09900.pdf"}, {"id": "8efda5708bbcf658d4f567e3866e3549fe045bbb", "title": "Pre-trained Deep Convolutional Neural Networks for Face Recognition", "addresses": [{"address": "University of Groningen", "lat": "53.21967825", "lng": "6.56251482", "type": "edu"}], "year": 2018, "pdf": "http://pdfs.semanticscholar.org/8efd/a5708bbcf658d4f567e3866e3549fe045bbb.pdf"}, {"id": "9f65319b8a33c8ec11da2f034731d928bf92e29d", "title": "Taking Roll: a Pipeline for Face Recognition", "addresses": [{"address": "Louisiana State University", "lat": "30.40550035", "lng": "-91.18620474", "type": "edu"}], "year": 2018, "pdf": "http://pdfs.semanticscholar.org/9f65/319b8a33c8ec11da2f034731d928bf92e29d.pdf"}, {"id": "b59cee1f647737ec3296ccb3daa25c890359c307", "title": "Continuously Reproducing Toolchains in Pattern Recognition and Machine Learning Experiments", "addresses": [{"address": "IDIAP Research Institute", "lat": "46.10923700", "lng": "7.08453549", "type": "edu"}], "year": "2017", "pdf": "https://pdfs.semanticscholar.org/b59c/ee1f647737ec3296ccb3daa25c890359c307.pdf"}, {"id": "66dcd855a6772d2731b45cfdd75f084327b055c2", "title": "Quality Classified Image Analysis with Application to Face Detection and Recognition", "addresses": [{"address": "Shenzhen University", "lat": "22.53521465", "lng": "113.93159110", "type": "edu"}], "year": 2018, "pdf": "http://pdfs.semanticscholar.org/66dc/d855a6772d2731b45cfdd75f084327b055c2.pdf"}, {"id": "841855205818d3a6d6f85ec17a22515f4f062882", "title": "Low Resolution Face Recognition in the Wild", "addresses": [{"address": "University of Notre Dame", "lat": "41.70456775", "lng": "-86.23822026", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1805.11529.pdf"}, {"id": "1275d6a800f8cf93c092603175fdad362b69c191", "title": "Deep Face Recognition: A Survey", "addresses": [{"address": "Beijing University of Posts and Telecommunications", "lat": "39.96014880", "lng": "116.35193921", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1804.06655.pdf"}, {"id": "ebb3d5c70bedf2287f9b26ac0031004f8f617b97", "title": "Deep Learning for Understanding Faces: Machines May Be Just as Good, or Better, than Humans", "addresses": [{"address": "Electrical and Computer Engineering", "lat": "33.58667840", "lng": "-101.87539204", "type": "edu"}, {"address": "University of Maryland", "lat": "39.28996850", "lng": "-76.62196103", "type": "edu"}], "year": 2018, "pdf": "https://doi.org/10.1109/MSP.2017.2764116"}, {"id": "626913b8fcbbaee8932997d6c4a78fe1ce646127", "title": "Learning from Millions of 3D Scans for Large-scale 3D Face Recognition", "addresses": [{"address": "University of Western Australia", "lat": "-31.95040445", "lng": "115.79790037", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1711.05942.pdf"}, {"id": "9865fe20df8fe11717d92b5ea63469f59cf1635a", "title": "Wildest Faces: Face Detection and Recognition in Violent Settings", "addresses": [{"address": "Hacettepe University", "lat": "39.86742125", "lng": "32.73519072", "type": "edu"}, {"address": "Middle East Technical University", "lat": "39.87549675", "lng": "32.78553506", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1805.07566.pdf"}, {"id": "47190d213caef85e8b9dd0d271dbadc29ed0a953", "title": "The Devil of Face Recognition is in the Noise", "addresses": [{"address": "Nanyang Technological University", "lat": "1.34841040", "lng": "103.68297965", "type": "edu"}, {"address": "University of California, San Diego", "lat": "32.87935255", "lng": "-117.23110049", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1807.11649.pdf"}]} \ No newline at end of file
diff --git a/site/datasets/final/umd_faces.json b/site/datasets/final/umd_faces.json
new file mode 100644
index 00000000..0e0583cf
--- /dev/null
+++ b/site/datasets/final/umd_faces.json
@@ -0,0 +1 @@
+{"id": "447d8893a4bdc29fa1214e53499ffe67b28a6db5", "paper": {"paper_id": "447d8893a4bdc29fa1214e53499ffe67b28a6db5", "key": "umd_faces", "title": "Electronic Transport in Quantum Confined Systems", "year": 2008, "pdf": "http://pdfs.semanticscholar.org/447d/8893a4bdc29fa1214e53499ffe67b28a6db5.pdf", "address": "", "name": "UMD"}, "address": "", "additional_papers": [{"paper_id": "447d8893a4bdc29fa1214e53499ffe67b28a6db5", "key": "umd_faces", "title": "Electronic Transport in Quantum Confined Systems", "year": 2008, "pdf": "http://pdfs.semanticscholar.org/447d/8893a4bdc29fa1214e53499ffe67b28a6db5.pdf", "address": "", "name": "UMD"}], "citations": []} \ No newline at end of file
diff --git a/site/datasets/final/vgg_faces2.json b/site/datasets/final/vgg_faces2.json
new file mode 100644
index 00000000..b25051ef
--- /dev/null
+++ b/site/datasets/final/vgg_faces2.json
@@ -0,0 +1 @@
+{"id": "eb027969f9310e0ae941e2adee2d42cdf07d938c", "paper": {"paper_id": "eb027969f9310e0ae941e2adee2d42cdf07d938c", "key": "vgg_faces2", "title": "VGGFace2: A Dataset for Recognising Faces across Pose and Age", "year": "2018", "pdf": "https://arxiv.org/pdf/1710.08092.pdf", "address": {"address": "University of Oxford", "lat": "51.75345380", "lng": "-1.25400997", "type": "edu"}, "name": "VGG Face2"}, "address": {"address": "University of Oxford", "lat": "51.75345380", "lng": "-1.25400997", "type": "edu"}, "additional_papers": [], "citations": [{"id": "0b6314e9e741d19346d936eaaa7d6fcf46dd3ed7", "title": "Deep Learning in the Wild", "addresses": [{"address": "Ulm University", "lat": "48.38044335", "lng": "10.01010115", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1807.04950.pdf"}, {"id": "cd6aaa37fffd0b5c2320f386be322b8adaa1cc68", "title": "Deep Face Recognition: A Survey", "addresses": [{"address": "Beijing University of Posts and Telecommunications", "lat": "39.96014880", "lng": "116.35193921", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1804.06655.pdf"}, {"id": "44e6ce12b857aeade03a6e5d1b7fb81202c39489", "title": "VoxCeleb2: Deep Speaker Recognition", "addresses": [{"address": "University of Oxford", "lat": "51.75345380", "lng": "-1.25400997", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1806.05622.pdf"}, {"id": "809ea255d144cff780300440d0f22c96e98abd53", "title": "ArcFace: Additive Angular Margin Loss for Deep Face Recognition", "addresses": [{"address": "Imperial College London", "lat": "51.49887085", "lng": "-0.17560797", "type": "edu"}], "year": 2018, "pdf": "http://pdfs.semanticscholar.org/809e/a255d144cff780300440d0f22c96e98abd53.pdf"}, {"id": "361eaef45fccfffd5b7df12fba902490a7d24a8d", "title": "Robust deep learning features for face recognition under mismatched conditions", "addresses": [{"address": "Istanbul Technical University", "lat": "41.10427915", "lng": "29.02231159", "type": "edu"}], "year": "2018", "pdf": "http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8404319"}, {"id": "f1245d318eb3d775e101355f5f085a9bc4a0339b", "title": "Face Verification with Disguise Variations via Deep Disguise", "addresses": [{"address": "West Virginia University", "lat": "39.65404635", "lng": "-79.96475355", "type": "edu"}], "year": "", "pdf": "https://pdfs.semanticscholar.org/f124/5d318eb3d775e101355f5f085a9bc4a0339b.pdf"}, {"id": "9fb93b7c2bae866608f26c4254e5bd69cc5031d6", "title": "Fast Geometrically-Perturbed Adversarial Faces", "addresses": [{"address": "West Virginia University", "lat": "39.65404635", "lng": "-79.96475355", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1809.08999.pdf"}, {"id": "cca9ae621e8228cfa787ec7954bb375536160e0d", "title": "Learning to Collaborate for User-Controlled Privacy", "addresses": [{"address": "Duke University", "lat": "35.99905220", "lng": "-78.92906290", "type": "edu"}, {"address": "University College London", "lat": "51.52316070", "lng": "-0.12820370", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1805.07410.pdf"}, {"id": "fca9ebaa30d69ccec8bb577c31693c936c869e72", "title": "Look Across Elapse: Disentangled Representation Learning and Photorealistic Cross-Age Face Synthesis for Age-Invariant Face Recognition", "addresses": [{"address": "National University of Singapore", "lat": "1.29620180", "lng": "103.77689944", "type": "edu"}, {"address": "Chinese Academy of Sciences", "lat": "40.00447950", "lng": "116.37023800", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1809.00338.pdf"}, {"id": "61efeb64e8431cfbafa4b02eb76bf0c58e61a0fa", "title": "Merging datasets through deep learning", "addresses": [{"address": "IBM Research, North Carolina", "lat": "35.90422720", "lng": "-78.85565763", "type": "company"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1809.01604.pdf"}, {"id": "4e8b61165c8908284619acc62c46c7afac85d8a0", "title": "Deep unsupervised multi-view detection of video game stream highlights", "addresses": [{"address": "University of London", "lat": "51.52176680", "lng": "-0.13019072", "type": "edu"}, {"address": "London, United Kingdom", "lat": "51.50732190", "lng": "-0.12764740", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1807.09715.pdf"}, {"id": "313d5eba97fe064bdc1f00b7587a4b3543ef712a", "title": "Compact Deep Aggregation for Set Retrieval", "addresses": [{"address": "University of Oxford", "lat": "51.75345380", "lng": "-1.25400997", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/cb7f/93467b0ec1afd43d995e511f5d7bf052a5af.pdf"}, {"id": "05455f5e3c3989be4991cb74b73cdfd0d6522622", "title": "Learning Warped Guidance for Blind Face Restoration", "addresses": [{"address": "Harbin Institute of Technology", "lat": "45.74139210", "lng": "126.62552755", "type": "edu"}, {"address": "Sun Yat-Sen University", "lat": "23.09461185", "lng": "113.28788994", "type": "edu"}, {"address": "University of Kentucky", "lat": "38.03337420", "lng": "-84.50177580", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1804.04829.pdf"}, {"id": "486c9a0e5eb1e0bf107c31c2bf9689b25e18383b", "title": "Face Recognition: Primates in the Wild", "addresses": [{"address": "Michigan State University", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1804.08790.pdf"}, {"id": "540831094fd9b80469c8dacb9320b7e342b50e03", "title": "Emotion Recognition in Speech using Cross-Modal Transfer in the Wild", "addresses": [{"address": "University of Oxford", "lat": "51.75345380", "lng": "-1.25400997", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1808.05561.pdf"}, {"id": "29c340c83b3bbef9c43b0c50b4d571d5ed037cbd", "title": "Stacked Dense U-Nets with Dual Transformers for Robust Face Alignment", "addresses": [{"address": "Imperial College London", "lat": "51.49887085", "lng": "-0.17560797", "type": "edu"}], "year": "2018", "pdf": "https://pdfs.semanticscholar.org/29c3/40c83b3bbef9c43b0c50b4d571d5ed037cbd.pdf"}, {"id": "9865fe20df8fe11717d92b5ea63469f59cf1635a", "title": "Wildest Faces: Face Detection and Recognition in Violent Settings", "addresses": [{"address": "Hacettepe University", "lat": "39.86742125", "lng": "32.73519072", "type": "edu"}, {"address": "Middle East Technical University", "lat": "39.87549675", "lng": "32.78553506", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1805.07566.pdf"}, {"id": "5812d8239d691e99d4108396f8c26ec0619767a6", "title": "GhostVLAD for set-based face recognition", "addresses": [{"address": "University of Oxford", "lat": "51.75345380", "lng": "-1.25400997", "type": "edu"}], "year": "2018", "pdf": "https://arxiv.org/pdf/1810.09951.pdf"}, {"id": "17423fe480b109e1d924314c1dddb11b084e8a42", "title": "Deep Disguised Faces Recognition", "addresses": [{"address": "National Taiwan University", "lat": "25.01682835", "lng": "121.53846924", "type": "edu"}], "year": "", "pdf": "https://pdfs.semanticscholar.org/1742/3fe480b109e1d924314c1dddb11b084e8a42.pdf"}, {"id": "9f65319b8a33c8ec11da2f034731d928bf92e29d", "title": "Taking Roll: a Pipeline for Face Recognition", "addresses": [{"address": "Louisiana State University", "lat": "30.40550035", "lng": "-91.18620474", "type": "edu"}], "year": 2018, "pdf": "http://pdfs.semanticscholar.org/9f65/319b8a33c8ec11da2f034731d928bf92e29d.pdf"}, {"id": "8e0ab1b08964393e4f9f42ca037220fe98aad7ac", "title": "UV-GAN: Adversarial Facial UV Map Completion for Pose-invariant Face Recognition", "addresses": [{"address": "Imperial College London", "lat": "51.49887085", "lng": "-0.17560797", "type": "edu"}], "year": "2017", "pdf": "https://arxiv.org/pdf/1712.04695.pdf"}]} \ No newline at end of file