diff options
Diffstat (limited to 'scraper')
| -rw-r--r-- | scraper/reports/doi_institutions_geocoded.csv | 2 | ||||
| -rw-r--r-- | scraper/reports/report_coverage.html | 2 | ||||
| -rw-r--r-- | scraper/reports/report_index.html | 2 |
3 files changed, 3 insertions, 3 deletions
diff --git a/scraper/reports/doi_institutions_geocoded.csv b/scraper/reports/doi_institutions_geocoded.csv index b7a6dc0f..42933088 100644 --- a/scraper/reports/doi_institutions_geocoded.csv +++ b/scraper/reports/doi_institutions_geocoded.csv @@ -6725,7 +6725,7 @@ c7835d080337efb17a174f175c0fd889a255578b,Unsupervised learning of face detection c7ee0eca1a9af0a2ce31d5a745e49a638bd5d59b,A robust invariant bipolar representation for R 3 surfaces: applied to the face description,La Manouba University,"GRIFT Research Group, CRISTAL Laboratory, Ecole Nationale des Sciences de l’Informatique (ENSI), La Manouba University, La Manouba, Tunisia","Campus Universitaire de la Manouba، Manouba 2010, Tunisia",36.81374960,10.06376590,edu,,Tunisia
c77bf2471d578707114d50d3e426df9aed70d1f0,Monocular optical flow navigation using sparse SURF flow with multi-layer bucketing screener,Beihang University,Beihang University,"北京航空航天大学, 37, 学院路, 五道口, 后八家, 海淀区, 100083, 中国",39.98083330,116.34101249,edu,,China
c7c0da2219c4324c5e8d5bc062c499286489c437,GEFE: genetic & evolutionary feature extraction for periocular-based biometric recognition,North Carolina A&T State University,"North Carolina A&T State University, Greensboro, NC 27411, USA","120, Gibbs Hall, 1601 E Market St, Greensboro, NC 27411, United States",36.07211420,-79.77448420,edu,,United States
-f28ef0a61a45a8b9cd03aa0ca81863e1d54a31d1,An accurate eye pupil localization approach based on adaptive gradient boosting decision tree,"State Grid Shanghai Electric Power Company, Shanghai, China","Electric Power Research Institute, State Grid Shanghai Electric Power Company Shanghai, 200093, China","Shanghai, China",31.23039040,121.47370210,edu,,China
+f28ef0a61a45a8b9cd03aa0ca81863e1d54a31d1,An accurate eye pupil localization approach based on adaptive gradient boosting decision tree,"State Grid Shanghai Electric Power Company, Shanghai, China","Electric Power Research Institute, State Grid Shanghai Electric Power Company Shanghai, 200093, China","Shanghai, China",31.23039040,121.47370210,company,,China
f2ba98d57341a11a7c193cf7b4e82d22947644b2,Discriminant-component eigenfaces for privacy-preserving face recognition,Iowa State University,Iowa State University,"Iowa State University, Farm House Road, Ames, Story County, Iowa, 50014, USA",42.02791015,-93.64464415,edu,,United States
f24e13dd7b70daaeca7a7395cd83c2ece4587b20,Art Critic: Multisignal Vision and Speech Interaction System in a Gaming Context,SUNY Binghamton,State University of New York at Binghamton,"State University of New York at Binghamton, East Drive, Hinman, Willow Point, Vestal Town, Broome County, New York, 13790, USA",42.08779975,-75.97066066,edu,,United States
f23cdb221627cab6d0804e58cefc0d55a7d9a97f,Experimental Evaluation of Matching-Score Normalization Techniques on Different Multimodal Biometric Systems,University of Zagreb,"University of Zagreb, Faculty of Electrical Engineering and Computing, Croatia","Unska ul. 3, 10000, Zagreb, Croatia",45.80112100,15.97084090,edu,,Croatia
diff --git a/scraper/reports/report_coverage.html b/scraper/reports/report_coverage.html index 1a91fa35..ecee4212 100644 --- a/scraper/reports/report_coverage.html +++ b/scraper/reports/report_coverage.html @@ -1 +1 @@ -<!doctype html><html><head><meta charset='utf-8'><title>Coverage</title><link rel='stylesheet' href='reports.css'></head><body><h2>Coverage</h2><table border='1' cellpadding='3' cellspacing='3'><th>Paper ID</th><th>Megapixels Key</th><th>Megapixels Name</th><th>Report Link</th><th>PDF Link</th><th>Journal</th><th>Type</th><th>Address</th><th>Country</th><th>Lat</th><th>Lng</th><th>Coverage</th><th>Total Citations</th><th>Geocoded Citations</th><th>Unknown Citations</th><th>Empty Citations</th><th>With PDF</th><th>With DOI</th><tr><td>b5f2846a506fc417e7da43f6a7679146d99c5e96</td><td>ucf_101</td><td>UCF101</td><td><a href="papers/b5f2846a506fc417e7da43f6a7679146d99c5e96.html" target="_blank">UCF101: A Dataset of 101 Human Actions Classes From Videos in The Wild</a></td><td><a href="https://arxiv.org/pdf/1212.0402.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>999</td><td>607</td><td>392</td><td>56</td><td>628</td><td>362</td></tr><tr><td>0e986f51fe45b00633de9fd0c94d082d2be51406</td><td>afw</td><td>AFW</td><td><a href="papers/0e986f51fe45b00633de9fd0c94d082d2be51406.html" target="_blank">Face detection, pose estimation, and landmark localization in the wild</a></td><td><span class="gray">[pdf]</a></td><td>2012 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>999</td><td>605</td><td>394</td><td>45</td><td>576</td><td>422</td></tr><tr><td>026e3363b7f76b51cc711886597a44d5f1fd1de2</td><td>kitti</td><td>KITTI</td><td><a href="papers/026e3363b7f76b51cc711886597a44d5f1fd1de2.html" target="_blank">Vision meets robotics: The KITTI dataset</a></td><td><a href="https://pdfs.semanticscholar.org/026e/3363b7f76b51cc711886597a44d5f1fd1de2.pdf" target="_blank">[pdf]</a></td><td>I. J. Robotics Res.</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>999</td><td>547</td><td>452</td><td>37</td><td>553</td><td>462</td></tr><tr><td>5e0f8c355a37a5a89351c02f174e7a5ddcb98683</td><td>coco</td><td>COCO</td><td><a href="papers/5e0f8c355a37a5a89351c02f174e7a5ddcb98683.html" target="_blank">Microsoft COCO: Common Objects in Context</a></td><td><a href="https://arxiv.org/pdf/1405.0312.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>999</td><td>545</td><td>454</td><td>26</td><td>722</td><td>259</td></tr><tr><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td><td>lfw</td><td>LFW</td><td><a href="papers/370b5757a5379b15e30d619e4d3fb9e8e13f3256.html" target="_blank">Labeled Faces in the Wild: A Database forStudying Face Recognition in Unconstrained Environments</a></td><td><a href="https://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>999</td><td>545</td><td>454</td><td>64</td><td>598</td><td>382</td></tr><tr><td>162ea969d1929ed180cc6de9f0bf116993ff6e06</td><td>vgg_faces</td><td>VGG Face</td><td><a href="papers/162ea969d1929ed180cc6de9f0bf116993ff6e06.html" target="_blank">Deep Face Recognition</a></td><td><a href="https://pdfs.semanticscholar.org/f372/ab9b3270d4e4f6a0258c83c2736c3a5c0454.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>999</td><td>544</td><td>455</td><td>58</td><td>558</td><td>429</td></tr><tr><td>0ee1916a0cb2dc7d3add086b5f1092c3d4beb38a</td><td>voc</td><td>VOC</td><td><a href="papers/0ee1916a0cb2dc7d3add086b5f1092c3d4beb38a.html" target="_blank">The Pascal Visual Object Classes (VOC) Challenge</a></td><td><a href="http://eprints.pascal-network.org/archive/00006187/01/PascalVOC_IJCV2009.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>999</td><td>544</td><td>454</td><td>30</td><td>557</td><td>422</td></tr><tr><td>4d9a02d080636e9666c4d1cc438b9893391ec6c7</td><td>cohn_kanade_plus</td><td>CK+</td><td><a href="papers/4d9a02d080636e9666c4d1cc438b9893391ec6c7.html" target="_blank">The Extended Cohn-Kanade Dataset (CK+): A complete dataset for action unit and emotion-specified expression</a></td><td><span class="gray">[pdf]</a></td><td>2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition - Workshops</td><td>edu</td><td>University of Pittsburgh</td><td>United States</td><td>40.44415295</td><td>-79.96243993</td><td>54%</td><td>999</td><td>543</td><td>456</td><td>58</td><td>470</td><td>518</td></tr><tr><td>759a3b3821d9f0e08e0b0a62c8b693230afc3f8d</td><td>pubfig</td><td>PubFig</td><td><a href="papers/759a3b3821d9f0e08e0b0a62c8b693230afc3f8d.html" target="_blank">Attribute and simile classifiers for face verification</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE 12th International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>914</td><td>534</td><td>380</td><td>49</td><td>586</td><td>316</td></tr><tr><td>31b58ced31f22eab10bd3ee2d9174e7c14c27c01</td><td>tiny_images</td><td>Tiny Images</td><td><a href="papers/31b58ced31f22eab10bd3ee2d9174e7c14c27c01.html" target="_blank">80 Million Tiny Images: A Large Data Set for Nonparametric Object and Scene Recognition</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>53%</td><td>999</td><td>529</td><td>470</td><td>89</td><td>644</td><td>337</td></tr><tr><td>6d96f946aaabc734af7fe3fc4454cf8547fcd5ed</td><td>ar_facedb</td><td>AR Face</td><td><a href="papers/6d96f946aaabc734af7fe3fc4454cf8547fcd5ed.html" target="_blank">The AR face database</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>999</td><td>518</td><td>481</td><td>56</td><td>454</td><td>530</td></tr><tr><td>18ae7c9a4bbc832b8b14bc4122070d7939f5e00e</td><td>frgc</td><td>FRGC</td><td><a href="papers/18ae7c9a4bbc832b8b14bc4122070d7939f5e00e.html" target="_blank">Overview of the face recognition grand challenge</a></td><td><span class="gray">[pdf]</a></td><td>2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)</td><td>edu</td><td>NIST</td><td>United States</td><td>39.14004000</td><td>-77.21850600</td><td>52%</td><td>999</td><td>518</td><td>480</td><td>86</td><td>549</td><td>442</td></tr><tr><td>18c72175ddbb7d5956d180b65a96005c100f6014</td><td>yale_faces</td><td>YaleFaces</td><td><a href="papers/18c72175ddbb7d5956d180b65a96005c100f6014.html" target="_blank">From Few to Many: Illumination Cone Models for Face Recognition under Variable Lighting and Pose</a></td><td><a href="http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.147.1487&rep=rep1&type=pdf" target="_blank">[pdf]</a></td><td>IEEE Trans. Pattern Anal. Mach. Intell.</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>999</td><td>515</td><td>484</td><td>66</td><td>496</td><td>462</td></tr><tr><td>f72f6a45ee240cc99296a287ff725aaa7e7ebb35</td><td>caltech_pedestrians</td><td>Caltech Pedestrians</td><td><a href="papers/f72f6a45ee240cc99296a287ff725aaa7e7ebb35.html" target="_blank">Pedestrian Detection: An Evaluation of the State of the Art</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td>edu</td><td>California Institute of Technology</td><td>United States</td><td>34.13710185</td><td>-118.12527487</td><td>51%</td><td>999</td><td>510</td><td>489</td><td>71</td><td>526</td><td>466</td></tr><tr><td>10d6b12fa07c7c8d6c8c3f42c7f1c061c131d4c5</td><td>inria_person</td><td>INRIA Pedestrian</td><td><a href="papers/10d6b12fa07c7c8d6c8c3f42c7f1c061c131d4c5.html" target="_blank">Histograms of oriented gradients for human detection</a></td><td><span class="gray">[pdf]</a></td><td>2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)</td><td>edu</td><td>INRIA Rhone-Alps, Montbonnot, France</td><td>France</td><td>45.21788600</td><td>5.80736900</td><td>51%</td><td>999</td><td>510</td><td>489</td><td>44</td><td>419</td><td>509</td></tr><tr><td>2ad0ee93d029e790ebb50574f403a09854b65b7e</td><td>yale_faces</td><td>YaleFaces</td><td><a href="papers/2ad0ee93d029e790ebb50574f403a09854b65b7e.html" target="_blank">Acquiring linear subspaces for face recognition under variable lighting</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>999</td><td>497</td><td>502</td><td>94</td><td>493</td><td>491</td></tr><tr><td>23fc83c8cfff14a16df7ca497661264fc54ed746</td><td>cohn_kanade</td><td>CK</td><td><a href="papers/23fc83c8cfff14a16df7ca497661264fc54ed746.html" target="_blank">Comprehensive Database for Facial Expression Analysis</a></td><td><a href="https://pdfs.semanticscholar.org/23fc/83c8cfff14a16df7ca497661264fc54ed746.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>49%</td><td>999</td><td>492</td><td>507</td><td>69</td><td>539</td><td>439</td></tr><tr><td>b62628ac06bbac998a3ab825324a41a11bc3a988</td><td>m2vtsdb_extended</td><td>xm2vtsdb</td><td><a href="papers/b62628ac06bbac998a3ab825324a41a11bc3a988.html" target="_blank">XM2VTSDB : The extended M2VTS database</a></td><td><a href="https://pdfs.semanticscholar.org/b626/28ac06bbac998a3ab825324a41a11bc3a988.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>53%</td><td>864</td><td>462</td><td>402</td><td>40</td><td>493</td><td>404</td></tr><tr><td>6424b69f3ff4d35249c0bb7ef912fbc2c86f4ff4</td><td>celeba</td><td>CelebA</td><td><a href="papers/6424b69f3ff4d35249c0bb7ef912fbc2c86f4ff4.html" target="_blank">Deep Learning Face Attributes in the Wild</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td>edu</td><td>Chinese University of Hong Kong</td><td>China</td><td>22.41626320</td><td>114.21093180</td><td>50%</td><td>919</td><td>455</td><td>463</td><td>64</td><td>694</td><td>201</td></tr><tr><td>dc8b25e35a3acb812beb499844734081722319b4</td><td>feret</td><td>FERET</td><td><a href="papers/dc8b25e35a3acb812beb499844734081722319b4.html" target="_blank">The FERET database and evaluation procedure for face-recognition algorithms</a></td><td><a href="http://biometrics.nist.gov/cs_links/face/frvt/feret/FERET_Database_evaluation_procedure.pdf" target="_blank">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td></td><td>45%</td><td>999</td><td>454</td><td>545</td><td>103</td><td>589</td><td>421</td></tr><tr><td>55206f0b5f57ce17358999145506cd01e570358c</td><td>orl</td><td>ORL</td><td><a href="papers/55206f0b5f57ce17358999145506cd01e570358c.html" target="_blank">Parameterisation of a stochastic model for human face identification</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>45%</td><td>999</td><td>451</td><td>548</td><td>94</td><td>543</td><td>427</td></tr><tr><td>45c31cde87258414f33412b3b12fc5bec7cb3ba9</td><td>jaffe</td><td>JAFFE</td><td><a href="papers/45c31cde87258414f33412b3b12fc5bec7cb3ba9.html" target="_blank">Coding Facial Expressions with Gabor Wavelets</a></td><td><a href="http://physics.lbl.gov/patrecog/images/Facerecog_gabor.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>49%</td><td>899</td><td>443</td><td>456</td><td>51</td><td>431</td><td>451</td></tr><tr><td>4d423acc78273b75134e2afd1777ba6d3a398973</td><td>cmu_pie</td><td>CMU PIE</td><td><a href="papers/4d423acc78273b75134e2afd1777ba6d3a398973.html" target="_blank">The CMU Pose, Illumination, and Expression (PIE) Database</a></td><td><a href="http://www.comp.nus.edu.sg/~tsim/piedb.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>760</td><td>408</td><td>351</td><td>50</td><td>404</td><td>345</td></tr><tr><td>4d423acc78273b75134e2afd1777ba6d3a398973</td><td>multi_pie</td><td>MULTIPIE</td><td><a href="papers/4d423acc78273b75134e2afd1777ba6d3a398973.html" target="_blank">The CMU Pose, Illumination, and Expression (PIE) Database</a></td><td><a href="http://www.comp.nus.edu.sg/~tsim/piedb.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>760</td><td>408</td><td>351</td><td>50</td><td>404</td><td>345</td></tr><tr><td>2830fb5282de23d7784b4b4bc37065d27839a412</td><td>h3d</td><td>H3D</td><td><a href="papers/2830fb5282de23d7784b4b4bc37065d27839a412.html" target="_blank">Poselets: Body part detectors trained using 3D human pose annotations</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE 12th International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>716</td><td>370</td><td>346</td><td>60</td><td>492</td><td>222</td></tr><tr><td>6273b3491e94ea4dd1ce42b791d77bdc96ee73a8</td><td>viper</td><td>VIPeR</td><td><a href="papers/6273b3491e94ea4dd1ce42b791d77bdc96ee73a8.html" target="_blank">Evaluating Appearance Models for Recognition, Reacquisition, and Tracking</a></td><td><a href="https://pdfs.semanticscholar.org/7847/b1fbccadb780b655e72c66d3f9e93ddb880c.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>624</td><td>352</td><td>272</td><td>35</td><td>342</td><td>276</td></tr><tr><td>6bd36e9fd0ef20a3074e1430a6cc601e6d407fc3</td><td>cuhk_campus_03</td><td>CUHK03 Campus</td><td><a href="papers/6bd36e9fd0ef20a3074e1430a6cc601e6d407fc3.html" target="_blank">DeepReID: Deep Filter Pairing Neural Network for Person Re-identification</a></td><td><span class="gray">[pdf]</a></td><td>2014 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>568</td><td>319</td><td>249</td><td>26</td><td>320</td><td>235</td></tr><tr><td>2258e01865367018ed6f4262c880df85b94959f8</td><td>mot</td><td>MOT</td><td><a href="papers/2258e01865367018ed6f4262c880df85b94959f8.html" target="_blank">Evaluating Multiple Object Tracking Performance: The CLEAR MOT Metrics</a></td><td><a href="https://cvhci.anthropomatik.kit.edu/images/stories/msmmi/papers/eurasip2008.pdf" target="_blank">[pdf]</a></td><td>EURASIP J. Image and Video Processing</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>632</td><td>313</td><td>317</td><td>50</td><td>358</td><td>264</td></tr><tr><td>560e0e58d0059259ddf86fcec1fa7975dee6a868</td><td>youtube_faces</td><td>YouTubeFaces</td><td><a href="papers/560e0e58d0059259ddf86fcec1fa7975dee6a868.html" target="_blank">Face recognition in unconstrained videos with matched background similarity</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>509</td><td>290</td><td>218</td><td>28</td><td>294</td><td>216</td></tr><tr><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td><td>tud_campus</td><td>TUD-Campus</td><td><a href="papers/3316521a5527c7700af8ae6aef32a79a8b83672c.html" target="_blank">People-tracking-by-detection and people-detection-by-tracking</a></td><td><span class="gray">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>53%</td><td>545</td><td>287</td><td>257</td><td>40</td><td>330</td><td>218</td></tr><tr><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td><td>tud_crossing</td><td>TUD-Crossing</td><td><a href="papers/3316521a5527c7700af8ae6aef32a79a8b83672c.html" target="_blank">People-tracking-by-detection and people-detection-by-tracking</a></td><td><span class="gray">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>53%</td><td>545</td><td>287</td><td>257</td><td>40</td><td>330</td><td>218</td></tr><tr><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td><td>tud_pedestrian</td><td>TUD-Pedestrian</td><td><a href="papers/3316521a5527c7700af8ae6aef32a79a8b83672c.html" target="_blank">People-tracking-by-detection and people-detection-by-tracking</a></td><td><span class="gray">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>53%</td><td>545</td><td>287</td><td>257</td><td>40</td><td>330</td><td>218</td></tr><tr><td>853bd61bc48a431b9b1c7cab10c603830c488e39</td><td>casia_webface</td><td>CASIA Webface</td><td><a href="papers/853bd61bc48a431b9b1c7cab10c603830c488e39.html" target="_blank">Learning Face Representation from Scratch</a></td><td><a href="https://arxiv.org/pdf/1411.7923.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>476</td><td>276</td><td>200</td><td>26</td><td>290</td><td>182</td></tr><tr><td>cc589c499dcf323fe4a143bbef0074c3e31f9b60</td><td>bu_3dfe</td><td>BU-3DFE</td><td><a href="papers/cc589c499dcf323fe4a143bbef0074c3e31f9b60.html" target="_blank">A 3D facial expression database for facial behavior research</a></td><td><span class="gray">[pdf]</a></td><td>7th International Conference on Automatic Face and Gesture Recognition (FGR06)</td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>588</td><td>275</td><td>312</td><td>45</td><td>306</td><td>282</td></tr><tr><td>4308bd8c28e37e2ed9a3fcfe74d5436cce34b410</td><td>market_1501</td><td>Market 1501</td><td><a href="papers/4308bd8c28e37e2ed9a3fcfe74d5436cce34b410.html" target="_blank">Scalable Person Re-identification: A Benchmark</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>460</td><td>253</td><td>207</td><td>16</td><td>263</td><td>185</td></tr><tr><td>4053e3423fb70ad9140ca89351df49675197196a</td><td>bio_id</td><td>BioID Face</td><td><a href="papers/4053e3423fb70ad9140ca89351df49675197196a.html" target="_blank">Robust Face Detection Using the Hausdorff Distance</a></td><td><a href="https://pdfs.semanticscholar.org/4053/e3423fb70ad9140ca89351df49675197196a.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>48%</td><td>511</td><td>247</td><td>264</td><td>51</td><td>329</td><td>182</td></tr><tr><td>8a3c5507237957d013a0fe0f082cab7f757af6ee</td><td>mafl</td><td>MAFL</td><td><a href="papers/8a3c5507237957d013a0fe0f082cab7f757af6ee.html" target="_blank">Facial Landmark Detection by Deep Multi-task Learning</a></td><td><a href="https://pdfs.semanticscholar.org/fcd7/1c18192928a2e0b264edd4d919ab2f8f652a.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>407</td><td>238</td><td>169</td><td>18</td><td>252</td><td>153</td></tr><tr><td>8a3c5507237957d013a0fe0f082cab7f757af6ee</td><td>mtfl</td><td>MTFL</td><td><a href="papers/8a3c5507237957d013a0fe0f082cab7f757af6ee.html" target="_blank">Facial Landmark Detection by Deep Multi-task Learning</a></td><td><a href="https://pdfs.semanticscholar.org/fcd7/1c18192928a2e0b264edd4d919ab2f8f652a.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>407</td><td>238</td><td>169</td><td>18</td><td>252</td><td>153</td></tr><tr><td>9055b155cbabdce3b98e16e5ac9c0edf00f9552f</td><td>morph</td><td>MORPH Commercial</td><td><a href="papers/9055b155cbabdce3b98e16e5ac9c0edf00f9552f.html" target="_blank">MORPH: a longitudinal image database of normal adult age-progression</a></td><td><span class="gray">[pdf]</a></td><td>7th International Conference on Automatic Face and Gesture Recognition (FGR06)</td><td></td><td></td><td></td><td></td><td></td><td>51%</td><td>437</td><td>224</td><td>212</td><td>24</td><td>228</td><td>203</td></tr><tr><td>9055b155cbabdce3b98e16e5ac9c0edf00f9552f</td><td>morph_nc</td><td>MORPH Non-Commercial</td><td><a href="papers/9055b155cbabdce3b98e16e5ac9c0edf00f9552f.html" target="_blank">MORPH: a longitudinal image database of normal adult age-progression</a></td><td><span class="gray">[pdf]</a></td><td>7th International Conference on Automatic Face and Gesture Recognition (FGR06)</td><td></td><td></td><td></td><td></td><td></td><td>51%</td><td>437</td><td>224</td><td>212</td><td>24</td><td>228</td><td>203</td></tr><tr><td>3325860c0c82a93b2eac654f5324dd6a776f609e</td><td>mpii_human_pose</td><td>MPII Human Pose</td><td><a href="papers/3325860c0c82a93b2eac654f5324dd6a776f609e.html" target="_blank">2D Human Pose Estimation: New Benchmark and State of the Art Analysis</a></td><td><span class="gray">[pdf]</a></td><td>2014 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>387</td><td>224</td><td>163</td><td>21</td><td>291</td><td>96</td></tr><tr><td>16c7c31a7553d99f1837fc6e88e77b5ccbb346b8</td><td>prid</td><td>PRID</td><td><a href="papers/16c7c31a7553d99f1837fc6e88e77b5ccbb346b8.html" target="_blank">Person Re-identification by Descriptive and Discriminative Classification</a></td><td><a href="https://pdfs.semanticscholar.org/4c1b/f0592be3e535faf256c95e27982db9b3d3d3.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>386</td><td>220</td><td>166</td><td>25</td><td>204</td><td>180</td></tr><tr><td>2485c98aa44131d1a2f7d1355b1e372f2bb148ad</td><td>cas_peal</td><td>CAS-PEAL</td><td><a href="papers/2485c98aa44131d1a2f7d1355b1e372f2bb148ad.html" target="_blank">The CAS-PEAL Large-Scale Chinese Face Database and Baseline Evaluations</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Systems, Man, and Cybernetics - Part A: Systems and Humans</td><td></td><td></td><td></td><td></td><td></td><td>51%</td><td>429</td><td>217</td><td>212</td><td>39</td><td>198</td><td>234</td></tr><tr><td>2a75f34663a60ab1b04a0049ed1d14335129e908</td><td>mmi_facial_expression</td><td>MMI Facial Expression Dataset</td><td><a href="papers/2a75f34663a60ab1b04a0049ed1d14335129e908.html" target="_blank">Web-based database for facial expression analysis</a></td><td><span class="gray">[pdf]</a></td><td>2005 IEEE International Conference on Multimedia and Expo</td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>464</td><td>217</td><td>247</td><td>45</td><td>282</td><td>188</td></tr><tr><td>75da1df4ed319926c544eefe17ec8d720feef8c0</td><td>fddb</td><td>FDDB</td><td><a href="papers/75da1df4ed319926c544eefe17ec8d720feef8c0.html" target="_blank">FDDB: A benchmark for face detection in unconstrained settings</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>380</td><td>214</td><td>166</td><td>22</td><td>202</td><td>164</td></tr><tr><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td><td>stickmen_buffy</td><td>Buffy Stickmen</td><td><a href="papers/6dd0597f8513dc100cd0bc1b493768cde45098a9.html" target="_blank">Learning to parse images of articulated bodies</a></td><td><a href="http://machinelearning.wustl.edu/mlpapers/paper_files/NIPS2006_899.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>369</td><td>207</td><td>161</td><td>32</td><td>237</td><td>131</td></tr><tr><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td><a href="papers/6dd0597f8513dc100cd0bc1b493768cde45098a9.html" target="_blank">Learning to parse images of articulated bodies</a></td><td><a href="http://machinelearning.wustl.edu/mlpapers/paper_files/NIPS2006_899.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>369</td><td>207</td><td>161</td><td>32</td><td>237</td><td>131</td></tr><tr><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td><a href="papers/6dd0597f8513dc100cd0bc1b493768cde45098a9.html" target="_blank">Learning to parse images of articulated bodies</a></td><td><a href="http://machinelearning.wustl.edu/mlpapers/paper_files/NIPS2006_899.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>369</td><td>207</td><td>161</td><td>32</td><td>237</td><td>131</td></tr><tr><td>95f12d27c3b4914e0668a268360948bce92f7db3</td><td>helen</td><td>Helen</td><td><a href="papers/95f12d27c3b4914e0668a268360948bce92f7db3.html" target="_blank">Interactive Facial Feature Localization</a></td><td><a href="https://pdfs.semanticscholar.org/95f1/2d27c3b4914e0668a268360948bce92f7db3.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>company</td><td>Facebook</td><td>United States</td><td>37.39367170</td><td>-122.08072620</td><td>57%</td><td>352</td><td>200</td><td>152</td><td>26</td><td>212</td><td>146</td></tr><tr><td>044d9a8c61383312cdafbcc44b9d00d650b21c70</td><td>fiw_300</td><td>300-W</td><td><a href="papers/044d9a8c61383312cdafbcc44b9d00d650b21c70.html" target="_blank">300 Faces in-the-Wild Challenge: The First Facial Landmark Localization Challenge</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision Workshops</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>323</td><td>198</td><td>125</td><td>27</td><td>208</td><td>120</td></tr><tr><td>3765df816dc5a061bc261e190acc8bdd9d47bec0</td><td>rafd</td><td>RaFD</td><td><a href="papers/3765df816dc5a061bc261e190acc8bdd9d47bec0.html" target="_blank">Presentation and validation of the Radboud Faces Database</a></td><td><a href="https://pdfs.semanticscholar.org/3765/df816dc5a061bc261e190acc8bdd9d47bec0.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>39%</td><td>487</td><td>192</td><td>295</td><td>39</td><td>342</td><td>144</td></tr><tr><td>2724ba85ec4a66de18da33925e537f3902f21249</td><td>cofw</td><td>COFW</td><td><a href="papers/2724ba85ec4a66de18da33925e537f3902f21249.html" target="_blank">Robust Face Landmark Estimation under Occlusion</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision</td><td>edu</td><td>California Institute of Technology</td><td>United States</td><td>34.13710185</td><td>-118.12527487</td><td>57%</td><td>325</td><td>185</td><td>140</td><td>19</td><td>194</td><td>133</td></tr><tr><td>639937b3a1b8bded3f7e9a40e85bd3770016cf3c</td><td>bfm</td><td>BFM</td><td><a href="papers/639937b3a1b8bded3f7e9a40e85bd3770016cf3c.html" target="_blank">A 3D Face Model for Pose and Illumination Invariant Face Recognition</a></td><td><span class="gray">[pdf]</a></td><td>2009 Sixth IEEE International Conference on Advanced Video and Signal Based Surveillance</td><td></td><td></td><td></td><td></td><td></td><td>53%</td><td>343</td><td>183</td><td>160</td><td>25</td><td>223</td><td>114</td></tr><tr><td>13f06b08f371ba8b5d31c3e288b4deb61335b462</td><td>eth_andreas_ess</td><td>ETHZ Pedestrian</td><td><a href="papers/13f06b08f371ba8b5d31c3e288b4deb61335b462.html" target="_blank">Depth and Appearance for Mobile Scene Analysis</a></td><td><span class="gray">[pdf]</a></td><td>2007 IEEE 11th International Conference on Computer Vision</td><td>edu</td><td>ETH Zurich</td><td>Switzerland</td><td>47.37631300</td><td>8.54766990</td><td>55%</td><td>324</td><td>179</td><td>145</td><td>26</td><td>193</td><td>127</td></tr><tr><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td><td>aflw</td><td>AFLW</td><td><a href="papers/a74251efa970b92925b89eeef50a5e37d9281ad0.html" target="_blank">Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</a></td><td><span class="gray">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>318</td><td>177</td><td>141</td><td>35</td><td>211</td><td>107</td></tr><tr><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td><td>leeds_sports_pose</td><td>Leeds Sports Pose</td><td><a href="papers/4b1d23d17476fcf78f4cbadf69fb130b1aa627c0.html" target="_blank">Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</a></td><td><a href="http://www.bmva.org/bmvc/2010/conference/paper12/abstract12.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>285</td><td>174</td><td>111</td><td>13</td><td>197</td><td>93</td></tr><tr><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td><td>stickmen_buffy</td><td>Buffy Stickmen</td><td><a href="papers/4b1d23d17476fcf78f4cbadf69fb130b1aa627c0.html" target="_blank">Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</a></td><td><a href="http://www.bmva.org/bmvc/2010/conference/paper12/abstract12.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>285</td><td>174</td><td>111</td><td>13</td><td>197</td><td>93</td></tr><tr><td>5981e6479c3fd4e31644db35d236bfb84ae46514</td><td>mot</td><td>MOT</td><td><a href="papers/5981e6479c3fd4e31644db35d236bfb84ae46514.html" target="_blank">Learning to associate: HybridBoosted multi-target tracker for crowded scene</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>University of Southern California</td><td>United States</td><td>34.02241490</td><td>-118.28634407</td><td>53%</td><td>326</td><td>172</td><td>153</td><td>27</td><td>190</td><td>137</td></tr><tr><td>436f798d1a4e54e5947c1e7d7375c31b2bdb4064</td><td>tud_multiview</td><td>TUD-Multiview</td><td><a href="papers/436f798d1a4e54e5947c1e7d7375c31b2bdb4064.html" target="_blank">Monocular 3D pose estimation and tracking by detection</a></td><td><span class="gray">[pdf]</a></td><td>2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>TU Darmstadt</td><td>Germany</td><td>49.87482770</td><td>8.65632810</td><td>55%</td><td>311</td><td>172</td><td>139</td><td>35</td><td>208</td><td>105</td></tr><tr><td>436f798d1a4e54e5947c1e7d7375c31b2bdb4064</td><td>tud_stadtmitte</td><td>TUD-Stadtmitte</td><td><a href="papers/436f798d1a4e54e5947c1e7d7375c31b2bdb4064.html" target="_blank">Monocular 3D pose estimation and tracking by detection</a></td><td><span class="gray">[pdf]</a></td><td>2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>TU Darmstadt</td><td>Germany</td><td>49.87482770</td><td>8.65632810</td><td>55%</td><td>311</td><td>172</td><td>139</td><td>35</td><td>208</td><td>105</td></tr><tr><td>2acf7e58f0a526b957be2099c10aab693f795973</td><td>bosphorus</td><td>The Bosphorus</td><td><a href="papers/2acf7e58f0a526b957be2099c10aab693f795973.html" target="_blank">Bosphorus Database for 3D Face Analysis</a></td><td><a href="https://pdfs.semanticscholar.org/4254/fbba3846008f50671edc9cf70b99d7304543.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>49%</td><td>352</td><td>171</td><td>181</td><td>17</td><td>162</td><td>188</td></tr><tr><td>44484d2866f222bbb9b6b0870890f9eea1ffb2d0</td><td>cuhk_campus_03</td><td>CUHK03 Campus</td><td><a href="papers/44484d2866f222bbb9b6b0870890f9eea1ffb2d0.html" target="_blank">Human Reidentification with Transferred Metric Learning</a></td><td><a href="http://www.ee.cuhk.edu.hk/~xgwang/papers/liZWaccv12.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>280</td><td>167</td><td>113</td><td>12</td><td>139</td><td>137</td></tr><tr><td>833fa04463d90aab4a9fe2870d480f0b40df446e</td><td>sun_attributes</td><td>SUN</td><td><a href="papers/833fa04463d90aab4a9fe2870d480f0b40df446e.html" target="_blank">SUN attribute database: Discovering, annotating, and recognizing scene attributes</a></td><td><span class="gray">[pdf]</a></td><td>2012 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>Brown University</td><td>United States</td><td>41.82686820</td><td>-71.40123146</td><td>58%</td><td>264</td><td>154</td><td>110</td><td>27</td><td>206</td><td>56</td></tr><tr><td>010f0f4929e6a6644fb01f0e43820f91d0fad292</td><td>yfcc_100m</td><td>YFCC100M</td><td><a href="papers/010f0f4929e6a6644fb01f0e43820f91d0fad292.html" target="_blank">YFCC100M: the new data in multimedia research</a></td><td><span class="gray">[pdf]</a></td><td>Commun. ACM</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>274</td><td>154</td><td>120</td><td>24</td><td>172</td><td>100</td></tr><tr><td>9361b784e73e9238d5cefbea5ac40d35d1e3103f</td><td>town_centre</td><td>TownCentre</td><td><a href="papers/9361b784e73e9238d5cefbea5ac40d35d1e3103f.html" target="_blank">Stable multi-target tracking in real-time surveillance video</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011</td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>328</td><td>153</td><td>175</td><td>24</td><td>186</td><td>140</td></tr><tr><td>6204776d31359d129a582057c2d788a14f8aadeb</td><td>youtube_celebrities</td><td>YouTube Celebrities</td><td><a href="papers/6204776d31359d129a582057c2d788a14f8aadeb.html" target="_blank">Face tracking and recognition with visual constraints in real-world videos</a></td><td><span class="gray">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>Rutgers University</td><td>United States</td><td>40.47913175</td><td>-74.43168868</td><td>53%</td><td>267</td><td>141</td><td>125</td><td>14</td><td>125</td><td>121</td></tr><tr><td>2eed184680edcdec8a3b605ad1a3ba8e8f7cc2e9</td><td>graz</td><td>Graz Pedestrian</td><td><a href="papers/2eed184680edcdec8a3b605ad1a3ba8e8f7cc2e9.html" target="_blank">Generic object recognition with boosting</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td>edu</td><td>TU Graz</td><td>Austria</td><td>47.07071400</td><td>15.43950400</td><td>47%</td><td>293</td><td>138</td><td>155</td><td>16</td><td>195</td><td>97</td></tr><tr><td>e8de844fefd54541b71c9823416daa238be65546</td><td>visual_phrases</td><td>Phrasal Recognition</td><td><a href="papers/e8de844fefd54541b71c9823416daa238be65546.html" target="_blank">Recognition using visual phrases</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011</td><td>edu</td><td>University of Illinois, Urbana-Champaign</td><td>United States</td><td>40.11116745</td><td>-88.22587665</td><td>55%</td><td>246</td><td>135</td><td>111</td><td>18</td><td>170</td><td>68</td></tr><tr><td>38b55d95189c5e69cf4ab45098a48fba407609b4</td><td>cuhk_campus_03</td><td>CUHK03 Campus</td><td><a href="papers/38b55d95189c5e69cf4ab45098a48fba407609b4.html" target="_blank">Locally Aligned Feature Transforms across Views</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>258</td><td>134</td><td>124</td><td>17</td><td>136</td><td>117</td></tr><tr><td>7808937b46acad36e43c30ae4e9f3fd57462853d</td><td>bpad</td><td>BPAD</td><td><a href="papers/7808937b46acad36e43c30ae4e9f3fd57462853d.html" target="_blank">Describing people: A poselet-based approach to attribute classification</a></td><td><span class="gray">[pdf]</a></td><td>2011 International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>230</td><td>128</td><td>102</td><td>14</td><td>163</td><td>66</td></tr><tr><td>0c91808994a250d7be332400a534a9291ca3b60e</td><td>graz</td><td>Graz Pedestrian</td><td><a href="papers/0c91808994a250d7be332400a534a9291ca3b60e.html" target="_blank">Weak Hypotheses and Boosting for Generic Object Detection and Recognition</a></td><td><a href="https://pdfs.semanticscholar.org/0c91/808994a250d7be332400a534a9291ca3b60e.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>236</td><td>127</td><td>109</td><td>17</td><td>161</td><td>77</td></tr><tr><td>4c170a0dcc8de75587dae21ca508dab2f9343974</td><td>face_tracer</td><td>FaceTracer</td><td><a href="papers/4c170a0dcc8de75587dae21ca508dab2f9343974.html" target="_blank">FaceTracer: A Search Engine for Large Collections of Images with Faces</a></td><td><a href="https://pdfs.semanticscholar.org/73a8/1d311eedac8dea3ca24dc15b6990fa4a725e.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>225</td><td>126</td><td>99</td><td>17</td><td>146</td><td>77</td></tr><tr><td>140c95e53c619eac594d70f6369f518adfea12ef</td><td>ijb_a</td><td>IJB-A</td><td><a href="papers/140c95e53c619eac594d70f6369f518adfea12ef.html" target="_blank">Pushing the frontiers of unconstrained face detection and recognition: IARPA Janus Benchmark A</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>51%</td><td>237</td><td>121</td><td>116</td><td>22</td><td>159</td><td>76</td></tr><tr><td>98bb029afe2a1239c3fdab517323066f0957b81b</td><td>ilids_mcts_vid</td><td>iLIDS-VID</td><td><a href="papers/98bb029afe2a1239c3fdab517323066f0957b81b.html" target="_blank">Person Re-identification by Video Ranking</a></td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>209</td><td>120</td><td>89</td><td>9</td><td>111</td><td>97</td></tr><tr><td>98bb029afe2a1239c3fdab517323066f0957b81b</td><td>sdu_vid</td><td>SDU-VID</td><td><a href="papers/98bb029afe2a1239c3fdab517323066f0957b81b.html" target="_blank">Person Re-identification by Video Ranking</a></td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>209</td><td>120</td><td>89</td><td>9</td><td>111</td><td>97</td></tr><tr><td>35b0331dfcd2897abd5749b49ff5e2b8ba0f7a62</td><td>coco_qa</td><td>COCO QA</td><td><a href="papers/35b0331dfcd2897abd5749b49ff5e2b8ba0f7a62.html" target="_blank">Exploring Models and Data for Image Question Answering</a></td><td><a href="https://arxiv.org/pdf/1505.02074.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>206</td><td>115</td><td>91</td><td>11</td><td>162</td><td>39</td></tr><tr><td>46a01565e6afe7c074affb752e7069ee3bf2e4ef</td><td>sdu_vid</td><td>SDU-VID</td><td><a href="papers/46a01565e6afe7c074affb752e7069ee3bf2e4ef.html" target="_blank">Local Descriptors Encoded by Fisher Vectors for Person Re-identification</a></td><td><a href="https://pdfs.semanticscholar.org/a105/f1ef67b4b02da38eadce8ffb4e13aa301a93.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>197</td><td>112</td><td>85</td><td>16</td><td>108</td><td>88</td></tr><tr><td>b1f4423c227fa37b9680787be38857069247a307</td><td>afew_va</td><td>AFEW-VA</td><td><a href="papers/b1f4423c227fa37b9680787be38857069247a307.html" target="_blank">Collecting Large, Richly Annotated Facial-Expression Databases from Movies</a></td><td><span class="gray">[pdf]</a></td><td>IEEE MultiMedia</td><td>edu</td><td>Australian National University</td><td>Australia</td><td>-35.27769990</td><td>149.11852700</td><td>61%</td><td>181</td><td>111</td><td>70</td><td>8</td><td>87</td><td>97</td></tr><tr><td>570f37ed63142312e6ccdf00ecc376341ec72b9f</td><td>stanford_drone</td><td>Stanford Drone</td><td><a href="papers/570f37ed63142312e6ccdf00ecc376341ec72b9f.html" target="_blank">Social LSTM: Human Trajectory Prediction in Crowded Spaces</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>49%</td><td>224</td><td>110</td><td>114</td><td>3</td><td>140</td><td>81</td></tr><tr><td>013909077ad843eb6df7a3e8e290cfd5575999d2</td><td>fiw_300</td><td>300-W</td><td><a href="papers/013909077ad843eb6df7a3e8e290cfd5575999d2.html" target="_blank">A Semi-automatic Methodology for Facial Landmark Annotation</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition Workshops</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>184</td><td>107</td><td>77</td><td>14</td><td>120</td><td>67</td></tr><tr><td>18010284894ed0edcca74e5bf768ee2e15ef7841</td><td>deep_fashion</td><td>DeepFashion</td><td><a href="papers/18010284894ed0edcca74e5bf768ee2e15ef7841.html" target="_blank">DeepFashion: Powering Robust Clothes Recognition and Retrieval with Rich Annotations</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>176</td><td>105</td><td>71</td><td>2</td><td>113</td><td>62</td></tr><tr><td>22ad2c8c0f4d6aa4328b38d894b814ec22579761</td><td>gallagher</td><td>Gallagher</td><td><a href="papers/22ad2c8c0f4d6aa4328b38d894b814ec22579761.html" target="_blank">Clothing cosegmentation for recognizing people</a></td><td><span class="gray">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>Carnegie Mellon University Silicon Valley</td><td>United States</td><td>37.41021930</td><td>-122.05965487</td><td>59%</td><td>178</td><td>105</td><td>73</td><td>7</td><td>100</td><td>86</td></tr><tr><td>52d7eb0fbc3522434c13cc247549f74bb9609c5d</td><td>wider_face</td><td>WIDER FACE</td><td><a href="papers/52d7eb0fbc3522434c13cc247549f74bb9609c5d.html" target="_blank">WIDER FACE: A Face Detection Benchmark</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>Chinese University of Hong Kong</td><td>China</td><td>22.41626320</td><td>114.21093180</td><td>58%</td><td>178</td><td>104</td><td>74</td><td>13</td><td>112</td><td>66</td></tr><tr><td>4e4746094bf60ee83e40d8597a6191e463b57f76</td><td>leeds_sports_pose_extended</td><td>Leeds Sports Pose Extended</td><td><a href="papers/4e4746094bf60ee83e40d8597a6191e463b57f76.html" target="_blank">Learning effective human pose estimation from inaccurate annotation</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011</td><td>edu</td><td>University of Leeds</td><td>United Kingdom</td><td>53.80387185</td><td>-1.55245712</td><td>61%</td><td>169</td><td>103</td><td>66</td><td>8</td><td>108</td><td>65</td></tr><tr><td>133f01aec1534604d184d56de866a4bd531dac87</td><td>lfw</td><td>LFW</td><td><a href="papers/133f01aec1534604d184d56de866a4bd531dac87.html" target="_blank">Effective Unconstrained Face Recognition by Combining Multiple Descriptors and Learned Background Statistics</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>183</td><td>101</td><td>82</td><td>14</td><td>103</td><td>77</td></tr><tr><td>1aad2da473888cb7ebc1bfaa15bfa0f1502ce005</td><td>jpl_pose</td><td>JPL-Interaction dataset</td><td><a href="papers/1aad2da473888cb7ebc1bfaa15bfa0f1502ce005.html" target="_blank">First-Person Activity Recognition: What Are They Doing to Me?</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>66%</td><td>148</td><td>98</td><td>50</td><td>7</td><td>105</td><td>43</td></tr><tr><td>56ffa7d906b08d02d6d5a12c7377a57e24ef3391</td><td>unbc_shoulder_pain</td><td>UNBC-McMaster Pain</td><td><a href="papers/56ffa7d906b08d02d6d5a12c7377a57e24ef3391.html" target="_blank">Painful data: The UNBC-McMaster shoulder pain expression archive database</a></td><td><span class="gray">[pdf]</a></td><td>Face and Gesture 2011</td><td>edu</td><td>Carnegie Mellon University Silicon Valley</td><td>United States</td><td>37.41021930</td><td>-122.05965487</td><td>51%</td><td>189</td><td>97</td><td>92</td><td>22</td><td>108</td><td>78</td></tr><tr><td>0df0d1adea39a5bef318b74faa37de7f3e00b452</td><td>mpii_gaze</td><td>MPIIGaze</td><td><a href="papers/0df0d1adea39a5bef318b74faa37de7f3e00b452.html" target="_blank">Appearance-based gaze estimation in the wild</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>149</td><td>95</td><td>54</td><td>3</td><td>94</td><td>54</td></tr><tr><td>32c801cb7fbeb742edfd94cccfca4934baec71da</td><td>ucf_crowd</td><td>UCF-CC-50</td><td><a href="papers/32c801cb7fbeb742edfd94cccfca4934baec71da.html" target="_blank">Multi-source Multi-scale Counting in Extremely Dense Crowd Images</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>148</td><td>95</td><td>53</td><td>5</td><td>80</td><td>65</td></tr><tr><td>1be498d4bbc30c3bfd0029114c784bc2114d67c0</td><td>adience</td><td>Adience</td><td><a href="papers/1be498d4bbc30c3bfd0029114c784bc2114d67c0.html" target="_blank">Age and Gender Estimation of Unfiltered Faces</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Information Forensics and Security</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>179</td><td>93</td><td>86</td><td>7</td><td>98</td><td>80</td></tr><tr><td>29a705a5fa76641e0d8963f1fdd67ee4c0d92d3d</td><td>scface</td><td>SCface</td><td><a href="papers/29a705a5fa76641e0d8963f1fdd67ee4c0d92d3d.html" target="_blank">SCface – surveillance cameras face database</a></td><td><a href="http://scface.org/SCface%20-%20Surveillance%20Cameras%20Face%20Database.pdf" target="_blank">[pdf]</a></td><td>Multimedia Tools and Applications</td><td></td><td></td><td></td><td></td><td></td><td>51%</td><td>179</td><td>91</td><td>88</td><td>15</td><td>88</td><td>89</td></tr><tr><td>5a5f0287484f0d480fed1ce585dbf729586f0edc</td><td>disfa</td><td>DISFA</td><td><a href="papers/5a5f0287484f0d480fed1ce585dbf729586f0edc.html" target="_blank">DISFA: A Spontaneous Facial Action Intensity Database</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Affective Computing</td><td>edu</td><td>University of Denver</td><td>United States</td><td>39.67665410</td><td>-104.96220300</td><td>49%</td><td>184</td><td>90</td><td>94</td><td>19</td><td>96</td><td>89</td></tr><tr><td>4f93cd09785c6e77bf4bc5a788e079df524c8d21</td><td>soton</td><td>SOTON HiD</td><td><a href="papers/4f93cd09785c6e77bf4bc5a788e079df524c8d21.html" target="_blank">On a Large Sequence-Based Human Gait Database</a></td><td><a href="https://pdfs.semanticscholar.org/4f93/cd09785c6e77bf4bc5a788e079df524c8d21.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>150</td><td>90</td><td>60</td><td>17</td><td>103</td><td>51</td></tr><tr><td>27a2fad58dd8727e280f97036e0d2bc55ef5424c</td><td>duke_mtmc</td><td>Duke MTMC</td><td><a href="papers/27a2fad58dd8727e280f97036e0d2bc55ef5424c.html" target="_blank">Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</a></td><td><a href="https://arxiv.org/pdf/1609.01775.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Duke University</td><td>United States</td><td>35.99905220</td><td>-78.92906290</td><td>53%</td><td>169</td><td>89</td><td>80</td><td>6</td><td>113</td><td>54</td></tr><tr><td>27a2fad58dd8727e280f97036e0d2bc55ef5424c</td><td>mot</td><td>MOT</td><td><a href="papers/27a2fad58dd8727e280f97036e0d2bc55ef5424c.html" target="_blank">Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</a></td><td><a href="https://arxiv.org/pdf/1609.01775.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Duke University</td><td>United States</td><td>35.99905220</td><td>-78.92906290</td><td>53%</td><td>169</td><td>89</td><td>80</td><td>6</td><td>113</td><td>54</td></tr><tr><td>291265db88023e92bb8c8e6390438e5da148e8f5</td><td>msceleb</td><td>MsCeleb</td><td><a href="papers/291265db88023e92bb8c8e6390438e5da148e8f5.html" target="_blank">MS-Celeb-1M: A Dataset and Benchmark for Large-Scale Face Recognition</a></td><td><a href="https://arxiv.org/pdf/1607.08221.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>48%</td><td>180</td><td>87</td><td>93</td><td>16</td><td>120</td><td>59</td></tr><tr><td>3b5b6d19d4733ab606c39c69a889f9e67967f151</td><td>qmul_grid</td><td>GRID</td><td><a href="papers/3b5b6d19d4733ab606c39c69a889f9e67967f151.html" target="_blank">Multi-camera activity correlation analysis</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>Queen Mary University of London</td><td>United Kingdom</td><td>51.52472720</td><td>-0.03931035</td><td>61%</td><td>142</td><td>87</td><td>55</td><td>7</td><td>77</td><td>64</td></tr><tr><td>c0387e788a52f10bf35d4d50659cfa515d89fbec</td><td>mars</td><td>MARS</td><td><a href="papers/c0387e788a52f10bf35d4d50659cfa515d89fbec.html" target="_blank">MARS: A Video Benchmark for Large-Scale Person Re-Identification</a></td><td><a href="https://pdfs.semanticscholar.org/c038/7e788a52f10bf35d4d50659cfa515d89fbec.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>168</td><td>84</td><td>84</td><td>5</td><td>97</td><td>69</td></tr><tr><td>066000d44d6691d27202896691f08b27117918b9</td><td>psu</td><td>PSU</td><td><a href="papers/066000d44d6691d27202896691f08b27117918b9.html" target="_blank">Vision-Based Analysis of Small Groups in Pedestrian Crowds</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>49%</td><td>168</td><td>83</td><td>85</td><td>11</td><td>85</td><td>79</td></tr><tr><td>8b56e33f33e582f3e473dba573a16b598ed9bcdc</td><td>fei</td><td>FEI</td><td><a href="papers/8b56e33f33e582f3e473dba573a16b598ed9bcdc.html" target="_blank">A new ranking method for principal components analysis and its application to face image analysis</a></td><td><span class="gray">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>169</td><td>80</td><td>89</td><td>6</td><td>69</td><td>102</td></tr><tr><td>10195a163ab6348eef37213a46f60a3d87f289c5</td><td>imdb_wiki</td><td>IMDB</td><td><a href="papers/10195a163ab6348eef37213a46f60a3d87f289c5.html" target="_blank">Deep Expectation of Real and Apparent Age from a Single Image Without Facial Landmarks</a></td><td><a href="http://www.vision.ee.ethz.ch/en/publications/papers/articles/eth_biwi_01299.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td>edu</td><td>ETH Zurich</td><td>Switzerland</td><td>47.37631300</td><td>8.54766990</td><td>52%</td><td>145</td><td>76</td><td>69</td><td>11</td><td>93</td><td>51</td></tr><tr><td>96e0cfcd81cdeb8282e29ef9ec9962b125f379b0</td><td>megaface</td><td>MegaFace</td><td><a href="papers/96e0cfcd81cdeb8282e29ef9ec9962b125f379b0.html" target="_blank">The MegaFace Benchmark: 1 Million Faces for Recognition at Scale</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>53%</td><td>139</td><td>73</td><td>66</td><td>9</td><td>100</td><td>37</td></tr><tr><td>5a4df9bef1872865f0b619ac3aacc97f49e4a035</td><td>cuhk_train_station</td><td>CUHK Train Station Dataset</td><td><a href="papers/5a4df9bef1872865f0b619ac3aacc97f49e4a035.html" target="_blank">Understanding collective crowd behaviors: Learning a Mixture model of Dynamic pedestrian-Agents</a></td><td><span class="gray">[pdf]</a></td><td>2012 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>Chinese University of Hong Kong</td><td>China</td><td>22.41626320</td><td>114.21093180</td><td>51%</td><td>141</td><td>72</td><td>69</td><td>6</td><td>60</td><td>75</td></tr><tr><td>b91f54e1581fbbf60392364323d00a0cd43e493c</td><td>bp4d_spontanous</td><td>BP4D-Spontanous</td><td><a href="papers/b91f54e1581fbbf60392364323d00a0cd43e493c.html" target="_blank">A high-resolution spontaneous 3D dynamic facial expression database</a></td><td><span class="gray">[pdf]</a></td><td>2013 10th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)</td><td>edu</td><td>SUNY Binghamton</td><td>United States</td><td>42.08779975</td><td>-75.97066066</td><td>46%</td><td>154</td><td>71</td><td>83</td><td>7</td><td>80</td><td>75</td></tr><tr><td>0486214fb58ee9a04edfe7d6a74c6d0f661a7668</td><td>chokepoint</td><td>ChokePoint</td><td><a href="papers/0486214fb58ee9a04edfe7d6a74c6d0f661a7668.html" target="_blank">Patch-based probabilistic image quality assessment for face selection and improved video-based face recognition</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td></td><td></td><td></td><td></td><td></td><td>51%</td><td>138</td><td>71</td><td>67</td><td>6</td><td>76</td><td>63</td></tr><tr><td>2d3482dcff69c7417c7b933f22de606a0e8e42d4</td><td>lfw</td><td>LFW</td><td><a href="papers/2d3482dcff69c7417c7b933f22de606a0e8e42d4.html" target="_blank">Labeled Faces in the Wild : Updates and New Reporting Procedures</a></td><td><a href="https://pdfs.semanticscholar.org/2d34/82dcff69c7417c7b933f22de606a0e8e42d4.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>123</td><td>71</td><td>52</td><td>3</td><td>71</td><td>51</td></tr><tr><td>66e6f08873325d37e0ec20a4769ce881e04e964e</td><td>sun_attributes</td><td>SUN</td><td><a href="papers/66e6f08873325d37e0ec20a4769ce881e04e964e.html" target="_blank">The SUN Attribute Database: Beyond Categories for Deeper Scene Understanding</a></td><td><a href="http://www.cc.gatech.edu/~hays/papers/attribute_ijcv.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>59%</td><td>116</td><td>69</td><td>47</td><td>14</td><td>84</td><td>31</td></tr><tr><td>8355d095d3534ef511a9af68a3b2893339e3f96b</td><td>imdb_wiki</td><td>IMDB</td><td><a href="papers/8355d095d3534ef511a9af68a3b2893339e3f96b.html" target="_blank">DEX: Deep EXpectation of Apparent Age from a Single Image</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision Workshop (ICCVW)</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>122</td><td>68</td><td>54</td><td>6</td><td>75</td><td>48</td></tr><tr><td>0d3bb75852098b25d90f31d2f48fd0cb4944702b</td><td>face_scrub</td><td>FaceScrub</td><td><a href="papers/0d3bb75852098b25d90f31d2f48fd0cb4944702b.html" target="_blank">A data-driven approach to cleaning large face datasets</a></td><td><span class="gray">[pdf]</a></td><td>2014 IEEE International Conference on Image Processing (ICIP)</td><td></td><td></td><td></td><td></td><td></td><td>48%</td><td>138</td><td>66</td><td>72</td><td>5</td><td>95</td><td>41</td></tr><tr><td>e4754afaa15b1b53e70743880484b8d0736990ff</td><td>fiw_300</td><td>300-W</td><td><a href="papers/e4754afaa15b1b53e70743880484b8d0736990ff.html" target="_blank">300 Faces In-The-Wild Challenge: database and results</a></td><td><a href="http://ibug.doc.ic.ac.uk/media/uploads/documents/1-s2.0-s0262885616000147-main.pdf" target="_blank">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td></td><td>51%</td><td>129</td><td>66</td><td>63</td><td>9</td><td>74</td><td>55</td></tr><tr><td>2e8d0f1802e50cccfd3c0aabac0d0beab3a7846e</td><td>3dpes</td><td>3DPeS</td><td><a href="papers/2e8d0f1802e50cccfd3c0aabac0d0beab3a7846e.html" target="_blank">3DPeS: 3D people dataset for surveillance and forensics</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>48%</td><td>133</td><td>64</td><td>69</td><td>11</td><td>73</td><td>58</td></tr><tr><td>04c2cda00e5536f4b1508cbd80041e9552880e67</td><td>hipsterwars</td><td>Hipsterwars</td><td><a href="papers/04c2cda00e5536f4b1508cbd80041e9552880e67.html" target="_blank">Hipster Wars: Discovering Elements of Fashion Styles</a></td><td><a href="https://pdfs.semanticscholar.org/04c2/cda00e5536f4b1508cbd80041e9552880e67.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>63%</td><td>95</td><td>60</td><td>35</td><td>4</td><td>59</td><td>35</td></tr><tr><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td><td>mafl</td><td>MAFL</td><td><a href="papers/a0fd85b3400c7b3e11122f44dc5870ae2de9009a.html" target="_blank">Learning Deep Representation for Face Alignment with Auxiliary Attributes</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>108</td><td>56</td><td>52</td><td>11</td><td>66</td><td>44</td></tr><tr><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td><td>mtfl</td><td>MTFL</td><td><a href="papers/a0fd85b3400c7b3e11122f44dc5870ae2de9009a.html" target="_blank">Learning Deep Representation for Face Alignment with Auxiliary Attributes</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>108</td><td>56</td><td>52</td><td>11</td><td>66</td><td>44</td></tr><tr><td>3b4ec8af470948a72a6ed37a9fd226719a874ebc</td><td>sdu_vid</td><td>SDU-VID</td><td><a href="papers/3b4ec8af470948a72a6ed37a9fd226719a874ebc.html" target="_blank">A Spatio-Temporal Appearance Representation for Video-Based Pedestrian Re-Identification</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>59%</td><td>95</td><td>56</td><td>39</td><td>7</td><td>50</td><td>45</td></tr><tr><td>06f02199690961ba52997cde1527e714d2b3bf8f</td><td>columbia_gaze</td><td>Columbia Gaze</td><td><a href="papers/06f02199690961ba52997cde1527e714d2b3bf8f.html" target="_blank">Gaze locking: passive eye contact detection for human-object interaction</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Columbia University</td><td>United States</td><td>40.84198360</td><td>-73.94368971</td><td>70%</td><td>79</td><td>55</td><td>24</td><td>0</td><td>49</td><td>34</td></tr><tr><td>08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7</td><td>kin_face</td><td>UB KinFace</td><td><a href="papers/08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7.html" target="_blank">Understanding Kin Relationships in a Photo</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Multimedia</td><td></td><td></td><td></td><td></td><td></td><td>59%</td><td>94</td><td>55</td><td>39</td><td>1</td><td>33</td><td>61</td></tr><tr><td>2a4bbee0b4cf52d5aadbbc662164f7efba89566c</td><td>peta</td><td>PETA</td><td><a href="papers/2a4bbee0b4cf52d5aadbbc662164f7efba89566c.html" target="_blank">Pedestrian Attribute Recognition At Far Distance</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>88</td><td>55</td><td>33</td><td>1</td><td>50</td><td>36</td></tr><tr><td>3394168ff0719b03ff65bcea35336a76b21fe5e4</td><td>penn_fudan</td><td>Penn Fudan</td><td><a href="papers/3394168ff0719b03ff65bcea35336a76b21fe5e4.html" target="_blank">Object Detection Combining Recognition and Segmentation</a></td><td><a href="https://pdfs.semanticscholar.org/3394/168ff0719b03ff65bcea35336a76b21fe5e4.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>105</td><td>53</td><td>52</td><td>9</td><td>58</td><td>43</td></tr><tr><td>4df3143922bcdf7db78eb91e6b5359d6ada004d2</td><td>cfd</td><td>CFD</td><td><a href="papers/4df3143922bcdf7db78eb91e6b5359d6ada004d2.html" target="_blank">The Chicago face database: A free stimulus set of faces and norming data.</a></td><td><a href="https://pdfs.semanticscholar.org/4df3/143922bcdf7db78eb91e6b5359d6ada004d2.pdf" target="_blank">[pdf]</a></td><td>Behavior research methods</td><td></td><td></td><td></td><td></td><td></td><td>53%</td><td>99</td><td>52</td><td>47</td><td>1</td><td>73</td><td>21</td></tr><tr><td>0c4a139bb87c6743c7905b29a3cfec27a5130652</td><td>feret</td><td>FERET</td><td><a href="papers/0c4a139bb87c6743c7905b29a3cfec27a5130652.html" target="_blank">The FERET Verification Testing Protocol for Face Recognition Algorithms</a></td><td><a href="https://pdfs.semanticscholar.org/8d2a/1c768fce6f71584dd993fb97e7b6419aaf60.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>115</td><td>49</td><td>66</td><td>8</td><td>75</td><td>37</td></tr><tr><td>3cd40bfa1ff193a96bde0207e5140a399476466c</td><td>tvhi</td><td>TVHI</td><td><a href="papers/3cd40bfa1ff193a96bde0207e5140a399476466c.html" target="_blank">High Five: Recognising human interactions in TV shows</a></td><td><a href="https://pdfs.semanticscholar.org/3cd4/0bfa1ff193a96bde0207e5140a399476466c.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>98</td><td>49</td><td>49</td><td>10</td><td>66</td><td>28</td></tr><tr><td>5194cbd51f9769ab25260446b4fa17204752e799</td><td>violent_flows</td><td>Violent Flows</td><td><a href="papers/5194cbd51f9769ab25260446b4fa17204752e799.html" target="_blank">Violent flows: Real-time detection of violent crowd behavior</a></td><td><span class="gray">[pdf]</a></td><td>2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops</td><td>edu</td><td>Open University of Israel</td><td>Israel</td><td>32.77824165</td><td>34.99565673</td><td>55%</td><td>88</td><td>48</td><td>40</td><td>6</td><td>45</td><td>44</td></tr><tr><td>2160788824c4c29ffe213b2cbeb3f52972d73f37</td><td>3d_rma</td><td>3D-RMA</td><td><a href="papers/2160788824c4c29ffe213b2cbeb3f52972d73f37.html" target="_blank">Automatic 3D face authentication</a></td><td><a href="http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.31.9190&rep=rep1&type=pdf" target="_blank">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>100</td><td>47</td><td>53</td><td>8</td><td>63</td><td>36</td></tr><tr><td>2ce2560cf59db59ce313bbeb004e8ce55c5ce928</td><td>texas_3dfrd</td><td>Texas 3DFRD</td><td><a href="papers/2ce2560cf59db59ce313bbeb004e8ce55c5ce928.html" target="_blank">Anthropometric 3D Face Recognition</a></td><td><a href="http://live.ece.utexas.edu/publications/2010/sg_ijcv_june10.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>91</td><td>47</td><td>44</td><td>5</td><td>60</td><td>31</td></tr><tr><td>0b440695c822a8e35184fb2f60dcdaa8a6de84ae</td><td>kinectface</td><td>KinectFaceDB</td><td><a href="papers/0b440695c822a8e35184fb2f60dcdaa8a6de84ae.html" target="_blank">KinectFaceDB: A Kinect Database for Face Recognition</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Systems, Man, and Cybernetics: Systems</td><td>edu</td><td>University of North Carolina at Chapel Hill</td><td>United States</td><td>35.91139710</td><td>-79.05045290</td><td>56%</td><td>82</td><td>46</td><td>36</td><td>6</td><td>28</td><td>52</td></tr><tr><td>7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22</td><td>lfw</td><td>LFW</td><td><a href="papers/7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22.html" target="_blank">Labeled Faces in the Wild: A Survey</a></td><td><a href="https://pdfs.semanticscholar.org/7de6/e81d775e9cd7becbfd1bd685f4e2a5eebb22.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>42%</td><td>109</td><td>46</td><td>63</td><td>9</td><td>66</td><td>43</td></tr><tr><td>0dc11a37cadda92886c56a6fb5191ded62099c28</td><td>stickmen_family</td><td>We Are Family Stickmen</td><td><a href="papers/0dc11a37cadda92886c56a6fb5191ded62099c28.html" target="_blank">We Are Family: Joint Pose Estimation of Multiple Persons</a></td><td><a href="http://eprints.pascal-network.org/archive/00007964/01/eichner10eccv.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>78</td><td>45</td><td>33</td><td>6</td><td>54</td><td>23</td></tr><tr><td>ae0aee03d946efffdc7af2362a42d3750e7dd48a</td><td>put_face</td><td>Put Face</td><td><a href="papers/ae0aee03d946efffdc7af2362a42d3750e7dd48a.html" target="_blank">The put face database</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>99</td><td>43</td><td>56</td><td>7</td><td>54</td><td>48</td></tr><tr><td>2edb87494278ad11641b6cf7a3f8996de12b8e14</td><td>qmul_grid</td><td>GRID</td><td><a href="papers/2edb87494278ad11641b6cf7a3f8996de12b8e14.html" target="_blank">Time-Delayed Correlation Analysis for Multi-Camera Activity Understanding</a></td><td><a href="http://www.eecs.qmul.ac.uk/~ccloy/files/ijcv_2010.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td>edu</td><td>Queen Mary University of London</td><td>United Kingdom</td><td>51.52472720</td><td>-0.03931035</td><td>51%</td><td>84</td><td>43</td><td>41</td><td>6</td><td>51</td><td>33</td></tr><tr><td>2bf8541199728262f78d4dced6fb91479b39b738</td><td>clothing_co_parsing</td><td>CCP</td><td><a href="papers/2bf8541199728262f78d4dced6fb91479b39b738.html" target="_blank">Clothing Co-parsing by Joint Image Segmentation and Labeling</a></td><td><span class="gray">[pdf]</a></td><td>2014 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>70%</td><td>60</td><td>42</td><td>18</td><td>0</td><td>34</td><td>28</td></tr><tr><td>7f23a4bb0c777dd72cca7665a5f370ac7980217e</td><td>duke_mtmc</td><td>Duke MTMC</td><td><a href="papers/7f23a4bb0c777dd72cca7665a5f370ac7980217e.html" target="_blank">Improving Person Re-identification by Attribute and Identity Learning</a></td><td><a href="https://arxiv.org/pdf/1703.07220.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>48%</td><td>87</td><td>42</td><td>45</td><td>0</td><td>43</td><td>42</td></tr><tr><td>636b8ffc09b1b23ff714ac8350bb35635e49fa3c</td><td>caltech_10k_web_faces</td><td>Caltech 10K Web Faces</td><td><a href="papers/636b8ffc09b1b23ff714ac8350bb35635e49fa3c.html" target="_blank">Pruning training sets for learning of object categories</a></td><td><span class="gray">[pdf]</a></td><td>2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)</td><td></td><td></td><td></td><td></td><td></td><td>63%</td><td>63</td><td>40</td><td>23</td><td>5</td><td>42</td><td>20</td></tr><tr><td>c900e0ad4c95948baaf0acd8449fde26f9b4952a</td><td>emotio_net</td><td>EmotioNet Database</td><td><a href="papers/c900e0ad4c95948baaf0acd8449fde26f9b4952a.html" target="_blank">EmotioNet: An Accurate, Real-Time Algorithm for the Automatic Annotation of a Million Facial Expressions in the Wild</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>86</td><td>40</td><td>46</td><td>7</td><td>54</td><td>29</td></tr><tr><td>4793f11fbca4a7dba898b9fff68f70d868e2497c</td><td>kin_face</td><td>UB KinFace</td><td><a href="papers/4793f11fbca4a7dba898b9fff68f70d868e2497c.html" target="_blank">Kinship Verification through Transfer Learning</a></td><td><a href="https://pdfs.semanticscholar.org/4793/f11fbca4a7dba898b9fff68f70d868e2497c.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>71</td><td>40</td><td>31</td><td>2</td><td>29</td><td>42</td></tr><tr><td>70c59dc3470ae867016f6ab0e008ac8ba03774a1</td><td>vgg_faces2</td><td>VGG Face2</td><td><a href="papers/70c59dc3470ae867016f6ab0e008ac8ba03774a1.html" target="_blank">VGGFace2: A Dataset for Recognising Faces across Pose and Age</a></td><td><a href="https://arxiv.org/pdf/1710.08092.pdf" target="_blank">[pdf]</a></td><td>2018 13th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2018)</td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>83</td><td>39</td><td>44</td><td>6</td><td>61</td><td>20</td></tr><tr><td>47aeb3b82f54b5ae8142b4bdda7b614433e69b9a</td><td>am_fed</td><td>AM-FED</td><td><a href="papers/47aeb3b82f54b5ae8142b4bdda7b614433e69b9a.html" target="_blank">Affectiva-MIT Facial Expression Dataset (AM-FED): Naturalistic and Spontaneous Facial Expressions Collected "In-the-Wild"</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition Workshops</td><td></td><td></td><td></td><td></td><td></td><td>45%</td><td>83</td><td>37</td><td>46</td><td>6</td><td>43</td><td>39</td></tr><tr><td>0a85bdff552615643dd74646ac881862a7c7072d</td><td>pipa</td><td>PIPA</td><td><a href="papers/0a85bdff552615643dd74646ac881862a7c7072d.html" target="_blank">Beyond frontal faces: Improving Person Recognition using multiple cues</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>company</td><td>Facebook</td><td>United States</td><td>37.39367170</td><td>-122.08072620</td><td>69%</td><td>54</td><td>37</td><td>16</td><td>1</td><td>41</td><td>12</td></tr><tr><td>6618cff7f2ed440a0d2fa9e74ad5469df5cdbe4c</td><td>afad</td><td>AFAD</td><td><a href="papers/6618cff7f2ed440a0d2fa9e74ad5469df5cdbe4c.html" target="_blank">Ordinal Regression with Multiple Output CNN for Age Estimation</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>45%</td><td>78</td><td>35</td><td>43</td><td>8</td><td>44</td><td>31</td></tr><tr><td>0b84f07af44f964817675ad961def8a51406dd2e</td><td>prw</td><td>PRW</td><td><a href="papers/0b84f07af44f964817675ad961def8a51406dd2e.html" target="_blank">Person Re-identification in the Wild</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>45%</td><td>77</td><td>35</td><td>42</td><td>2</td><td>47</td><td>27</td></tr><tr><td>31de9b3dd6106ce6eec9a35991b2b9083395fd0b</td><td>feret</td><td>FERET</td><td><a href="papers/31de9b3dd6106ce6eec9a35991b2b9083395fd0b.html" target="_blank">FERET ( Face Recognition Technology ) Recognition Algorithm Development and Test Results</a></td><td><a href="https://pdfs.semanticscholar.org/31de/9b3dd6106ce6eec9a35991b2b9083395fd0b.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>45%</td><td>75</td><td>34</td><td>41</td><td>5</td><td>54</td><td>20</td></tr><tr><td>fcc6fe6007c322641796cb8792718641856a22a7</td><td>miw</td><td>MIW</td><td><a href="papers/fcc6fe6007c322641796cb8792718641856a22a7.html" target="_blank">Automatic facial makeup detection with application in face recognition</a></td><td><span class="gray">[pdf]</a></td><td>2013 International Conference on Biometrics (ICB)</td><td>edu</td><td>West Virginia University</td><td>United States</td><td>39.65404635</td><td>-79.96475355</td><td>69%</td><td>49</td><td>34</td><td>15</td><td>1</td><td>19</td><td>29</td></tr><tr><td>f1af714b92372c8e606485a3982eab2f16772ad8</td><td>mug_faces</td><td>MUG Faces</td><td><a href="papers/f1af714b92372c8e606485a3982eab2f16772ad8.html" target="_blank">The MUG facial expression database</a></td><td><span class="gray">[pdf]</a></td><td>11th International Workshop on Image Analysis for Multimedia Interactive Services WIAMIS 10</td><td>edu</td><td>Aristotle University of Thessaloniki</td><td>Greece</td><td>40.62984145</td><td>22.95889350</td><td>41%</td><td>82</td><td>34</td><td>48</td><td>5</td><td>34</td><td>47</td></tr><tr><td>fcc6fe6007c322641796cb8792718641856a22a7</td><td>youtube_makeup</td><td>YMU</td><td><a href="papers/fcc6fe6007c322641796cb8792718641856a22a7.html" target="_blank">Automatic facial makeup detection with application in face recognition</a></td><td><span class="gray">[pdf]</a></td><td>2013 International Conference on Biometrics (ICB)</td><td>edu</td><td>West Virginia University</td><td>United States</td><td>39.65404635</td><td>-79.96475355</td><td>69%</td><td>49</td><td>34</td><td>15</td><td>1</td><td>19</td><td>29</td></tr><tr><td>51eba481dac6b229a7490f650dff7b17ce05df73</td><td>imsitu</td><td>imSitu</td><td><a href="papers/51eba481dac6b229a7490f650dff7b17ce05df73.html" target="_blank">Situation Recognition: Visual Semantic Role Labeling for Image Understanding</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>52</td><td>32</td><td>20</td><td>1</td><td>46</td><td>6</td></tr><tr><td>8be57cdad86fdf8c8290df4ca3149592f3c46dd3</td><td>m2vts</td><td>m2vts</td><td><a href="papers/8be57cdad86fdf8c8290df4ca3149592f3c46dd3.html" target="_blank">The M2VTS Multimodal Face Database (Release 1.00)</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>44%</td><td>73</td><td>32</td><td>41</td><td>2</td><td>39</td><td>33</td></tr><tr><td>37d6f0eb074d207b53885bd2eb78ccc8a04be597</td><td>vmu</td><td>VMU</td><td><a href="papers/37d6f0eb074d207b53885bd2eb78ccc8a04be597.html" target="_blank">Can facial cosmetics affect the matching accuracy of face recognition systems?</a></td><td><span class="gray">[pdf]</a></td><td>2012 IEEE Fifth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td>edu</td><td>West Virginia University</td><td>United States</td><td>39.65404635</td><td>-79.96475355</td><td>60%</td><td>53</td><td>32</td><td>21</td><td>0</td><td>19</td><td>31</td></tr><tr><td>37d6f0eb074d207b53885bd2eb78ccc8a04be597</td><td>youtube_makeup</td><td>YMU</td><td><a href="papers/37d6f0eb074d207b53885bd2eb78ccc8a04be597.html" target="_blank">Can facial cosmetics affect the matching accuracy of face recognition systems?</a></td><td><span class="gray">[pdf]</a></td><td>2012 IEEE Fifth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td>edu</td><td>West Virginia University</td><td>United States</td><td>39.65404635</td><td>-79.96475355</td><td>60%</td><td>53</td><td>32</td><td>21</td><td>0</td><td>19</td><td>31</td></tr><tr><td>faf40ce28857aedf183e193486f5b4b0a8c478a2</td><td>iit_dehli_ear</td><td>IIT Dehli Ear</td><td><a href="papers/faf40ce28857aedf183e193486f5b4b0a8c478a2.html" target="_blank">Automated Human Identification Using Ear Imaging</a></td><td><a href="https://pdfs.semanticscholar.org/faf4/0ce28857aedf183e193486f5b4b0a8c478a2.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>38%</td><td>80</td><td>30</td><td>50</td><td>6</td><td>35</td><td>44</td></tr><tr><td>f41c7bb02fc97d5fb9cadd7a49c3e558a1c58a44</td><td>pa_100k</td><td>PA-100K</td><td><a href="papers/f41c7bb02fc97d5fb9cadd7a49c3e558a1c58a44.html" target="_blank">HydraPlus-Net: Attentive Deep Features for Pedestrian Analysis</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>55</td><td>30</td><td>25</td><td>0</td><td>36</td><td>17</td></tr><tr><td>18858cc936947fc96b5c06bbe3c6c2faa5614540</td><td>pilot_parliament</td><td>PPB</td><td><a href="papers/18858cc936947fc96b5c06bbe3c6c2faa5614540.html" target="_blank">Gender Shades: Intersectional Accuracy Disparities in Commercial Gender Classification</a></td><td><a href="http://proceedings.mlr.press/v81/buolamwini18a/buolamwini18a-supp.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>49%</td><td>59</td><td>29</td><td>30</td><td>0</td><td>47</td><td>10</td></tr><tr><td>4d58f886f5150b2d5e48fd1b5a49e09799bf895d</td><td>texas_3dfrd</td><td>Texas 3DFRD</td><td><a href="papers/4d58f886f5150b2d5e48fd1b5a49e09799bf895d.html" target="_blank">Texas 3D Face Recognition Database</a></td><td><span class="gray">[pdf]</a></td><td>2010 IEEE Southwest Symposium on Image Analysis & Interpretation (SSIAI)</td><td></td><td></td><td></td><td></td><td></td><td>44%</td><td>66</td><td>29</td><td>37</td><td>3</td><td>40</td><td>27</td></tr><tr><td>16e8b0a1e8451d5f697b94c0c2b32a00abee1d52</td><td>umb</td><td>UMB</td><td><a href="papers/16e8b0a1e8451d5f697b94c0c2b32a00abee1d52.html" target="_blank">UMB-DB: A database of partially occluded 3D faces</a></td><td><span class="gray">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>47</td><td>29</td><td>18</td><td>2</td><td>22</td><td>24</td></tr><tr><td>356b431d4f7a2a0a38cf971c84568207dcdbf189</td><td>wider</td><td>WIDER</td><td><a href="papers/356b431d4f7a2a0a38cf971c84568207dcdbf189.html" target="_blank">Recognize complex events from static images by fusing deep channels</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>66%</td><td>44</td><td>29</td><td>15</td><td>1</td><td>29</td><td>15</td></tr><tr><td>3dc3f0b64ef80f573e3a5f96e456e52ee980b877</td><td>georgia_tech_face_database</td><td>Georgia Tech Face</td><td><a href="papers/3dc3f0b64ef80f573e3a5f96e456e52ee980b877.html" target="_blank">Maximum Likelihood Training of the Embedded HMM for Face Detection and Recognition</a></td><td><a href="http://www.researchgate.net/profile/Monson_Hayes/publication/221124512_Maximum_Likelihood_Training_of_the_Embedded_HMM_for_Face_Detection_and_Recognition/links/0deec53509be9d6f55000000.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>42%</td><td>67</td><td>28</td><td>39</td><td>4</td><td>29</td><td>28</td></tr><tr><td>79828e6e9f137a583082b8b5a9dfce0c301989b8</td><td>mapillary</td><td>Mapillary</td><td><a href="papers/79828e6e9f137a583082b8b5a9dfce0c301989b8.html" target="_blank">The Mapillary Vistas Dataset for Semantic Understanding of Street Scenes</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>46%</td><td>61</td><td>28</td><td>33</td><td>0</td><td>43</td><td>16</td></tr><tr><td>8b2dd5c61b23ead5ae5508bb8ce808b5ea266730</td><td>10k_US_adult_faces</td><td>10K US Adult Faces</td><td><a href="papers/8b2dd5c61b23ead5ae5508bb8ce808b5ea266730.html" target="_blank">The intrinsic memorability of face photographs.</a></td><td><a href="https://pdfs.semanticscholar.org/8b2d/d5c61b23ead5ae5508bb8ce808b5ea266730.pdf" target="_blank">[pdf]</a></td><td>Journal of experimental psychology. General</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>52</td><td>27</td><td>25</td><td>2</td><td>36</td><td>14</td></tr><tr><td>9c23859ec7313f2e756a3e85575735e0c52249f4</td><td>facebook_100</td><td>Facebook100</td><td><a href="papers/9c23859ec7313f2e756a3e85575735e0c52249f4.html" target="_blank">Scaling up biologically-inspired computer vision: A case study in unconstrained face recognition on facebook</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td>edu</td><td>Harvard University</td><td>United States</td><td>42.36782045</td><td>-71.12666653</td><td>52%</td><td>52</td><td>27</td><td>25</td><td>3</td><td>38</td><td>13</td></tr><tr><td>9c23859ec7313f2e756a3e85575735e0c52249f4</td><td>pubfig_83</td><td>pubfig83</td><td><a href="papers/9c23859ec7313f2e756a3e85575735e0c52249f4.html" target="_blank">Scaling up biologically-inspired computer vision: A case study in unconstrained face recognition on facebook</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td>edu</td><td>Harvard University</td><td>United States</td><td>42.36782045</td><td>-71.12666653</td><td>52%</td><td>52</td><td>27</td><td>25</td><td>3</td><td>38</td><td>13</td></tr><tr><td>09d78009687bec46e70efcf39d4612822e61cb8c</td><td>raid</td><td>RAiD</td><td><a href="papers/09d78009687bec46e70efcf39d4612822e61cb8c.html" target="_blank">Consistent Re-identification in a Camera Network</a></td><td><a href="https://pdfs.semanticscholar.org/c27f/099e6e7e3f7f9979cbe9e0a5175fc5848ea0.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>53%</td><td>49</td><td>26</td><td>23</td><td>5</td><td>34</td><td>13</td></tr><tr><td>22646e00a7ba34d1b5fbe3b1efcd91a1e1be3c2b</td><td>saivt</td><td>SAIVT SoftBio</td><td><a href="papers/22646e00a7ba34d1b5fbe3b1efcd91a1e1be3c2b.html" target="_blank">A Database for Person Re-Identification in Multi-Camera Surveillance Networks</a></td><td><span class="gray">[pdf]</a></td><td>2012 International Conference on Digital Image Computing Techniques and Applications (DICTA)</td><td></td><td></td><td></td><td></td><td></td><td>40%</td><td>65</td><td>26</td><td>39</td><td>7</td><td>45</td><td>20</td></tr><tr><td>070de852bc6eb275d7ca3a9cdde8f6be8795d1a3</td><td>d3dfacs</td><td>D3DFACS</td><td><a href="papers/070de852bc6eb275d7ca3a9cdde8f6be8795d1a3.html" target="_blank">A FACS valid 3D dynamic action unit database with applications to 3D dynamic morphable facial modeling</a></td><td><span class="gray">[pdf]</a></td><td>2011 International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>50</td><td>25</td><td>25</td><td>5</td><td>31</td><td>18</td></tr><tr><td>298cbc3dfbbb3a20af4eed97906650a4ea1c29e0</td><td>ferplus</td><td>FER+</td><td><a href="papers/298cbc3dfbbb3a20af4eed97906650a4ea1c29e0.html" target="_blank">Training deep networks for facial expression recognition with crowd-sourced label distribution</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>71%</td><td>34</td><td>24</td><td>10</td><td>0</td><td>18</td><td>16</td></tr><tr><td>2161f6b7ee3c0acc81603b01dc0df689683577b9</td><td>large_scale_person_search</td><td>Large Scale Person Search</td><td><a href="papers/2161f6b7ee3c0acc81603b01dc0df689683577b9.html" target="_blank">End-to-End Deep Learning for Person Search</a></td><td><a href="https://pdfs.semanticscholar.org/2161/f6b7ee3c0acc81603b01dc0df689683577b9.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>46</td><td>23</td><td>23</td><td>1</td><td>27</td><td>16</td></tr><tr><td>758d7e1be64cc668c59ef33ba8882c8597406e53</td><td>affectnet</td><td>AffectNet</td><td><a href="papers/758d7e1be64cc668c59ef33ba8882c8597406e53.html" target="_blank">AffectNet: A Database for Facial Expression, Valence, and Arousal Computing in the Wild</a></td><td><a href="https://arxiv.org/pdf/1708.03985.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>59%</td><td>37</td><td>22</td><td>15</td><td>0</td><td>25</td><td>11</td></tr><tr><td>6f3c76b7c0bd8e1d122c6ea808a271fd4749c951</td><td>ward</td><td>WARD</td><td><a href="papers/6f3c76b7c0bd8e1d122c6ea808a271fd4749c951.html" target="_blank">Re-identify people in wide area camera network</a></td><td><span class="gray">[pdf]</a></td><td>2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops</td><td>edu</td><td>University of Udine</td><td>Italy</td><td>46.08107230</td><td>13.21194740</td><td>37%</td><td>60</td><td>22</td><td>38</td><td>1</td><td>38</td><td>21</td></tr><tr><td>1bd1645a629f1b612960ab9bba276afd4cf7c666</td><td>brainwash</td><td>Brainwash</td><td><a href="papers/1bd1645a629f1b612960ab9bba276afd4cf7c666.html" target="_blank">End-to-End People Detection in Crowded Scenes</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>42</td><td>21</td><td>21</td><td>1</td><td>19</td><td>19</td></tr><tr><td>47662d1a368daf70ba70ef2d59eb6209f98b675d</td><td>fia</td><td>CMU FiA</td><td><a href="papers/47662d1a368daf70ba70ef2d59eb6209f98b675d.html" target="_blank">The CMU Face In Action (FIA) Database</a></td><td><a href="https://pdfs.semanticscholar.org/4766/2d1a368daf70ba70ef2d59eb6209f98b675d.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>39%</td><td>54</td><td>21</td><td>33</td><td>5</td><td>40</td><td>16</td></tr><tr><td>fd8168f1c50de85bac58a8d328df0a50248b16ae</td><td>nd_2006</td><td>ND-2006</td><td><a href="papers/fd8168f1c50de85bac58a8d328df0a50248b16ae.html" target="_blank">Using a Multi-Instance Enrollment Representation to Improve 3D Face Recognition</a></td><td><span class="gray">[pdf]</a></td><td>2007 First IEEE International Conference on Biometrics: Theory, Applications, and Systems</td><td>edu</td><td>University of Notre Dame</td><td>United States</td><td>41.70456775</td><td>-86.23822026</td><td>60%</td><td>35</td><td>21</td><td>14</td><td>3</td><td>18</td><td>15</td></tr><tr><td>53ae38a6bb2b21b42bac4f0c4c8ed1f9fa02f9d4</td><td>bp4d_plus</td><td>BP4D+</td><td><a href="papers/53ae38a6bb2b21b42bac4f0c4c8ed1f9fa02f9d4.html" target="_blank">Multimodal Spontaneous Emotion Corpus for Human Behavior Analysis</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>48%</td><td>42</td><td>20</td><td>22</td><td>0</td><td>17</td><td>26</td></tr><tr><td>1c2802c2199b6d15ecefe7ba0c39bfe44363de38</td><td>youtube_poses</td><td>YouTube Pose</td><td><a href="papers/1c2802c2199b6d15ecefe7ba0c39bfe44363de38.html" target="_blank">Personalizing Human Video Pose Estimation</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>36</td><td>20</td><td>16</td><td>2</td><td>30</td><td>8</td></tr><tr><td>488e475eeb3bb39a145f23ede197cd3620f1d98a</td><td>apis</td><td>APiS1.0</td><td><a href="papers/488e475eeb3bb39a145f23ede197cd3620f1d98a.html" target="_blank">Pedestrian Attribute Classification in Surveillance: Database and Evaluation</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision Workshops</td><td></td><td></td><td></td><td></td><td></td><td>68%</td><td>28</td><td>19</td><td>9</td><td>0</td><td>13</td><td>15</td></tr><tr><td>d08cc366a4a0192a01e9a7495af1eb5d9f9e73ae</td><td>b3d_ac</td><td>B3D(AC)</td><td><a href="papers/d08cc366a4a0192a01e9a7495af1eb5d9f9e73ae.html" target="_blank">A 3-D Audio-Visual Corpus of Affective Communication</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Multimedia</td><td></td><td></td><td></td><td></td><td></td><td>45%</td><td>42</td><td>19</td><td>23</td><td>2</td><td>26</td><td>15</td></tr><tr><td>57fe081950f21ca03b5b375ae3e84b399c015861</td><td>cvc_01_barcelona</td><td>CVC-01</td><td><a href="papers/57fe081950f21ca03b5b375ae3e84b399c015861.html" target="_blank">Adaptive Image Sampling and Windows Classification for On-board Pedestrian Detection</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>40%</td><td>47</td><td>19</td><td>28</td><td>1</td><td>23</td><td>24</td></tr><tr><td>41976ebc8ab76d9a6861487c97cc7fcbe3b6015f</td><td>moments_in_time</td><td>Moments in Time</td><td><a href="papers/41976ebc8ab76d9a6861487c97cc7fcbe3b6015f.html" target="_blank">Moments in Time Dataset: one million videos for event understanding</a></td><td><a href="https://arxiv.org/pdf/1801.03150.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>66%</td><td>29</td><td>19</td><td>10</td><td>2</td><td>27</td><td>2</td></tr><tr><td>488e475eeb3bb39a145f23ede197cd3620f1d98a</td><td>svs</td><td>SVS</td><td><a href="papers/488e475eeb3bb39a145f23ede197cd3620f1d98a.html" target="_blank">Pedestrian Attribute Classification in Surveillance: Database and Evaluation</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision Workshops</td><td></td><td></td><td></td><td></td><td></td><td>68%</td><td>28</td><td>19</td><td>9</td><td>0</td><td>13</td><td>15</td></tr><tr><td>31b05f65405534a696a847dd19c621b7b8588263</td><td>umd_faces</td><td>UMD</td><td><a href="papers/31b05f65405534a696a847dd19c621b7b8588263.html" target="_blank">UMDFaces: An annotated face dataset for training deep networks</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE International Joint Conference on Biometrics (IJCB)</td><td></td><td></td><td></td><td></td><td></td><td>45%</td><td>42</td><td>19</td><td>23</td><td>5</td><td>30</td><td>11</td></tr><tr><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td><td>casablanca</td><td>Casablanca</td><td><a href="papers/0ceda9dae8b9f322df65ca2ef02caca9758aec6f.html" target="_blank">Context-Aware CNNs for Person Head Detection</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>33</td><td>18</td><td>15</td><td>1</td><td>23</td><td>11</td></tr><tr><td>4fefd1bc8dc4e0ab37ee3324ddfa43ad9d6a04a7</td><td>deep_fashion</td><td>DeepFashion</td><td><a href="papers/4fefd1bc8dc4e0ab37ee3324ddfa43ad9d6a04a7.html" target="_blank">Fashion Landmark Detection in the Wild</a></td><td><a href="https://arxiv.org/pdf/1608.03049.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>69%</td><td>26</td><td>18</td><td>8</td><td>1</td><td>16</td><td>10</td></tr><tr><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td><td>hollywood_headset</td><td>HollywoodHeads</td><td><a href="papers/0ceda9dae8b9f322df65ca2ef02caca9758aec6f.html" target="_blank">Context-Aware CNNs for Person Head Detection</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>33</td><td>18</td><td>15</td><td>1</td><td>23</td><td>11</td></tr><tr><td>84fe5b4ac805af63206012d29523a1e033bc827e</td><td>awe_ears</td><td>AWE Ears</td><td><a href="papers/84fe5b4ac805af63206012d29523a1e033bc827e.html" target="_blank">Ear Recognition: More Than a Survey</a></td><td><a href="https://arxiv.org/pdf/1611.06203.pdf" target="_blank">[pdf]</a></td><td>Neurocomputing</td><td></td><td></td><td></td><td></td><td></td><td>65%</td><td>26</td><td>17</td><td>9</td><td>0</td><td>10</td><td>16</td></tr><tr><td>213a579af9e4f57f071b884aa872651372b661fd</td><td>bbc_pose</td><td>BBC Pose</td><td><a href="papers/213a579af9e4f57f071b884aa872651372b661fd.html" target="_blank">Automatic and Efficient Human Pose Estimation for Sign Language Videos</a></td><td><a href="http://tomas.pfister.fi/files/charles13ijcv.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>65%</td><td>26</td><td>17</td><td>9</td><td>1</td><td>16</td><td>11</td></tr><tr><td>20388099cc415c772926e47bcbbe554e133343d1</td><td>cafe</td><td>#N/A</td><td><a href="papers/20388099cc415c772926e47bcbbe554e133343d1.html" target="_blank">The Child Affective Facial Expression (CAFE) set: validity and reliability from untrained adults</a></td><td><a href="https://pdfs.semanticscholar.org/2038/8099cc415c772926e47bcbbe554e133343d1.pdf" target="_blank">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td></td><td>46%</td><td>37</td><td>17</td><td>20</td><td>3</td><td>30</td><td>7</td></tr><tr><td>d178cde92ab3dc0dd2ebee5a76a33d556c39448b</td><td>jiku_mobile</td><td>Jiku Mobile Video Dataset</td><td><a href="papers/d178cde92ab3dc0dd2ebee5a76a33d556c39448b.html" target="_blank">The jiku mobile video dataset</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td>edu</td><td>National University of Singapore</td><td>Singapore</td><td>1.29620180</td><td>103.77689944</td><td>71%</td><td>24</td><td>17</td><td>7</td><td>0</td><td>6</td><td>19</td></tr><tr><td>2eb84aaba316b095d4bb51da1a3e4365bbf9ab1d</td><td>kin_face</td><td>UB KinFace</td><td><a href="papers/2eb84aaba316b095d4bb51da1a3e4365bbf9ab1d.html" target="_blank">Genealogical face recognition based on UB KinFace database</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td>edu</td><td>SUNY Buffalo</td><td>United States</td><td>42.93362780</td><td>-78.88394479</td><td>55%</td><td>31</td><td>17</td><td>14</td><td>0</td><td>11</td><td>21</td></tr><tr><td>28d4e027c7e90b51b7d8908fce68128d1964668a</td><td>megaface</td><td>MegaFace</td><td><a href="papers/28d4e027c7e90b51b7d8908fce68128d1964668a.html" target="_blank">Level Playing Field for Million Scale Face Recognition</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>44%</td><td>39</td><td>17</td><td>22</td><td>2</td><td>29</td><td>9</td></tr><tr><td>a0cc5f73a37723a6dd465924143f1cb4976d0169</td><td>msmt_17</td><td>MSMT17</td><td><a href="papers/a0cc5f73a37723a6dd465924143f1cb4976d0169.html" target="_blank">Person Transfer GAN to Bridge Domain Gap for Person Re-identification</a></td><td><span class="gray">[pdf]</a></td><td>2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>71%</td><td>24</td><td>17</td><td>7</td><td>1</td><td>20</td><td>4</td></tr><tr><td>4946ba10a4d5a7d0a38372f23e6622bd347ae273</td><td>coco_action</td><td>COCO-a</td><td><a href="papers/4946ba10a4d5a7d0a38372f23e6622bd347ae273.html" target="_blank">Describing Common Human Visual Actions in Images</a></td><td><a href="https://arxiv.org/pdf/1506.02203.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>25</td><td>16</td><td>9</td><td>0</td><td>23</td><td>2</td></tr><tr><td>b6c293f0420f7e945b5916ae44269fb53e139275</td><td>erce</td><td>ERCe</td><td><a href="papers/b6c293f0420f7e945b5916ae44269fb53e139275.html" target="_blank">Video Synopsis by Heterogeneous Multi-source Correlation</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>29</td><td>16</td><td>13</td><td>2</td><td>14</td><td>13</td></tr><tr><td>b6c293f0420f7e945b5916ae44269fb53e139275</td><td>tisi</td><td>Times Square Intersection</td><td><a href="papers/b6c293f0420f7e945b5916ae44269fb53e139275.html" target="_blank">Video Synopsis by Heterogeneous Multi-source Correlation</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>29</td><td>16</td><td>13</td><td>2</td><td>14</td><td>13</td></tr><tr><td>1e3df3ca8feab0b36fd293fe689f93bb2aaac591</td><td>immediacy</td><td>Immediacy</td><td><a href="papers/1e3df3ca8feab0b36fd293fe689f93bb2aaac591.html" target="_blank">Multi-task Recurrent Neural Network for Immediacy Prediction</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>26</td><td>15</td><td>11</td><td>2</td><td>21</td><td>6</td></tr><tr><td>ea050801199f98a1c7c1df6769f23f658299a3ae</td><td>mpi_large</td><td>Large MPI Facial Expression</td><td><a href="papers/ea050801199f98a1c7c1df6769f23f658299a3ae.html" target="_blank">The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</a></td><td><a href="https://pdfs.semanticscholar.org/ea05/0801199f98a1c7c1df6769f23f658299a3ae.pdf" target="_blank">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td></td><td>45%</td><td>33</td><td>15</td><td>18</td><td>4</td><td>29</td><td>4</td></tr><tr><td>ea050801199f98a1c7c1df6769f23f658299a3ae</td><td>mpi_small</td><td>Small MPI Facial Expression</td><td><a href="papers/ea050801199f98a1c7c1df6769f23f658299a3ae.html" target="_blank">The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</a></td><td><a href="https://pdfs.semanticscholar.org/ea05/0801199f98a1c7c1df6769f23f658299a3ae.pdf" target="_blank">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td></td><td>45%</td><td>33</td><td>15</td><td>18</td><td>4</td><td>29</td><td>4</td></tr><tr><td>e27ef52c641c2b5100a1b34fd0b819e84a31b4df</td><td>sarc3d</td><td>Sarc3D</td><td><a href="papers/e27ef52c641c2b5100a1b34fd0b819e84a31b4df.html" target="_blank">SARC3D: A New 3D Body Model for People Tracking and Re-identification</a></td><td><a href="https://pdfs.semanticscholar.org/e27e/f52c641c2b5100a1b34fd0b819e84a31b4df.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>44%</td><td>34</td><td>15</td><td>19</td><td>3</td><td>21</td><td>12</td></tr><tr><td>d818568838433a6d6831adde49a58cef05e0c89f</td><td>agedb</td><td>AgeDB</td><td><a href="papers/d818568838433a6d6831adde49a58cef05e0c89f.html" target="_blank">AgeDB: The First Manually Collected, In-the-Wild Age Database</a></td><td><a href="http://eprints.mdx.ac.uk/22044/1/agedb_kotsia.pdf" target="_blank">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td></td><td></td><td></td><td></td><td></td><td>78%</td><td>18</td><td>14</td><td>4</td><td>0</td><td>14</td><td>3</td></tr><tr><td>0297448f3ed948e136bb06ceff10eccb34e5bb77</td><td>ilids_mcts</td><td>i-LIDS Multiple-Camera</td><td><a href="papers/0297448f3ed948e136bb06ceff10eccb34e5bb77.html" target="_blank">Imagery Library for Intelligent Detection Systems (i-LIDS); A Standard for Testing Video Based Detection Systems</a></td><td><span class="gray">[pdf]</a></td><td>Proceedings 40th Annual 2006 International Carnahan Conference on Security Technology</td><td></td><td></td><td></td><td></td><td></td><td>40%</td><td>35</td><td>14</td><td>21</td><td>2</td><td>21</td><td>14</td></tr><tr><td>5753b2b5e442eaa3be066daa4a2ca8d8a0bb1725</td><td>50_people_one_question</td><td>50 People One Question</td><td><a href="papers/5753b2b5e442eaa3be066daa4a2ca8d8a0bb1725.html" target="_blank">Merging Pose Estimates Across Space and Time</a></td><td><a href="http://authors.library.caltech.edu/41565/1/tracking_bmvc.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>81%</td><td>16</td><td>13</td><td>3</td><td>0</td><td>13</td><td>4</td></tr><tr><td>19d1b811df60f86cbd5e04a094b07f32fff7a32a</td><td>york_3d</td><td>UOY 3D Face Database</td><td><a href="papers/19d1b811df60f86cbd5e04a094b07f32fff7a32a.html" target="_blank">Three-dimensional face recognition: an eigensurface approach</a></td><td><span class="gray">[pdf]</a></td><td>2004 International Conference on Image Processing, 2004. ICIP '04.</td><td></td><td></td><td></td><td></td><td></td><td>34%</td><td>38</td><td>13</td><td>25</td><td>4</td><td>24</td><td>13</td></tr><tr><td>221c18238b829c12b911706947ab38fd017acef7</td><td>rap_pedestrian</td><td>RAP</td><td><a href="papers/221c18238b829c12b911706947ab38fd017acef7.html" target="_blank">A Richly Annotated Dataset for Pedestrian Attribute Recognition</a></td><td><a href="https://arxiv.org/pdf/1603.07054.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>46%</td><td>26</td><td>12</td><td>14</td><td>0</td><td>16</td><td>10</td></tr><tr><td>774cbb45968607a027ae4729077734db000a1ec5</td><td>urban_tribes</td><td>Urban Tribes</td><td><a href="papers/774cbb45968607a027ae4729077734db000a1ec5.html" target="_blank">From Bikers to Surfers: Visual Recognition of Urban Tribes</a></td><td><a href="http://vision.cornell.edu/se3/wp-content/uploads/2014/09/utribes_bmvc13_final.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>67%</td><td>18</td><td>12</td><td>6</td><td>1</td><td>12</td><td>6</td></tr><tr><td>44d23df380af207f5ac5b41459c722c87283e1eb</td><td>wider_attribute</td><td>WIDER Attribute</td><td><a href="papers/44d23df380af207f5ac5b41459c722c87283e1eb.html" target="_blank">Human Attribute Recognition by Deep Hierarchical Contexts</a></td><td><a href="https://pdfs.semanticscholar.org/8e28/07f2dd53b03a759e372e07f7191cae65c9fd.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>67%</td><td>18</td><td>12</td><td>6</td><td>0</td><td>14</td><td>4</td></tr><tr><td>45e616093a92e5f1e61a7c6037d5f637aa8964af</td><td>malf</td><td>MALF</td><td><a href="papers/45e616093a92e5f1e61a7c6037d5f637aa8964af.html" target="_blank">Fine-grained evaluation on face detection in the wild</a></td><td><span class="gray">[pdf]</a></td><td>2015 11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)</td><td></td><td></td><td></td><td></td><td></td><td>65%</td><td>17</td><td>11</td><td>6</td><td>0</td><td>12</td><td>5</td></tr><tr><td>4156b7e88f2e0ab0a7c095b9bab199ae2b23bd06</td><td>distance_nighttime</td><td>Long Distance Heterogeneous Face</td><td><a href="papers/4156b7e88f2e0ab0a7c095b9bab199ae2b23bd06.html" target="_blank">Nighttime Face Recognition at Long Distance: Cross-Distance and Cross-Spectral Matching</a></td><td><a href="https://pdfs.semanticscholar.org/4156/b7e88f2e0ab0a7c095b9bab199ae2b23bd06.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>45%</td><td>22</td><td>10</td><td>12</td><td>3</td><td>11</td><td>10</td></tr><tr><td>0cb2dd5f178e3a297a0c33068961018659d0f443</td><td>ijb_b</td><td>IJB-B</td><td><a href="papers/0cb2dd5f178e3a297a0c33068961018659d0f443.html" target="_blank">IARPA Janus Benchmark-B Face Dataset</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td></td><td></td><td></td><td></td><td></td><td>29%</td><td>35</td><td>10</td><td>25</td><td>6</td><td>25</td><td>8</td></tr><tr><td>ca3e88d87e1344d076c964ea89d91a75c417f5ee</td><td>imfdb</td><td>IMFDB</td><td><a href="papers/ca3e88d87e1344d076c964ea89d91a75c417f5ee.html" target="_blank">Indian Movie Face Database: A benchmark for face recognition under wide variations</a></td><td><span class="gray">[pdf]</a></td><td>2013 Fourth National Conference on Computer Vision, Pattern Recognition, Image Processing and Graphics (NCVPRIPG)</td><td>edu</td><td>BVBCET, Hubli, India</td><td>India</td><td>15.36883320</td><td>75.12137960</td><td>59%</td><td>17</td><td>10</td><td>7</td><td>0</td><td>11</td><td>5</td></tr><tr><td>2a171f8d14b6b8735001a11c217af9587d095848</td><td>social_relation</td><td>Social Relation</td><td><a href="papers/2a171f8d14b6b8735001a11c217af9587d095848.html" target="_blank">Learning Social Relation Traits from Face Images</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>23</td><td>10</td><td>13</td><td>4</td><td>16</td><td>7</td></tr><tr><td>64e0690dd176a93de9d4328f6e31fc4afe1e7536</td><td>duke_mtmc</td><td>Duke MTMC</td><td><a href="papers/64e0690dd176a93de9d4328f6e31fc4afe1e7536.html" target="_blank">Tracking Multiple People Online and in Real Time</a></td><td><a href="https://pdfs.semanticscholar.org/64e0/690dd176a93de9d4328f6e31fc4afe1e7536.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>39%</td><td>23</td><td>9</td><td>14</td><td>2</td><td>12</td><td>10</td></tr><tr><td>25474c21613607f6bb7687a281d5f9d4ffa1f9f3</td><td>faceplace</td><td>Face Place</td><td><a href="papers/25474c21613607f6bb7687a281d5f9d4ffa1f9f3.html" target="_blank">Recognizing disguised faces</a></td><td><a href="https://pdfs.semanticscholar.org/d936/7ceb0be378c3a9ddf7cb741c678c1a3c574c.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>31%</td><td>29</td><td>9</td><td>20</td><td>0</td><td>18</td><td>10</td></tr><tr><td>a8d0b149c2eadaa02204d3e4356fbc8eccf3b315</td><td>hi4d_adsip</td><td>Hi4D-ADSIP</td><td><a href="papers/a8d0b149c2eadaa02204d3e4356fbc8eccf3b315.html" target="_blank">Hi4D-ADSIP 3-D dynamic facial articulation database</a></td><td><span class="gray">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>15</td><td>9</td><td>6</td><td>1</td><td>4</td><td>11</td></tr><tr><td>2f43b614607163abf41dfe5d17ef6749a1b61304</td><td>hrt_transgender</td><td>HRT Transgender</td><td><a href="papers/2f43b614607163abf41dfe5d17ef6749a1b61304.html" target="_blank">Investigating the Periocular-Based Face Recognition Across Gender Transformation</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Information Forensics and Security</td><td>edu</td><td>University of North Carolina at Wilmington</td><td>United States</td><td>34.22498270</td><td>-77.86907744</td><td>69%</td><td>13</td><td>9</td><td>4</td><td>0</td><td>6</td><td>8</td></tr><tr><td>bd26dabab576adb6af30484183c9c9c8379bf2e0</td><td>scut_fbp</td><td>SCUT-FBP</td><td><a href="papers/bd26dabab576adb6af30484183c9c9c8379bf2e0.html" target="_blank">SCUT-FBP: A Benchmark Dataset for Facial Beauty Perception</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Systems, Man, and Cybernetics</td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>19</td><td>9</td><td>10</td><td>2</td><td>6</td><td>13</td></tr><tr><td>71b7fc715e2f1bb24c0030af8d7e7b6e7cd128a6</td><td>umd_faces</td><td>UMD</td><td><a href="papers/71b7fc715e2f1bb24c0030af8d7e7b6e7cd128a6.html" target="_blank">The Do’s and Don’ts for CNN-Based Face Verification</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE International Conference on Computer Vision Workshops (ICCVW)</td><td></td><td></td><td></td><td></td><td></td><td>35%</td><td>26</td><td>9</td><td>17</td><td>3</td><td>16</td><td>8</td></tr><tr><td>2624d84503bc2f8e190e061c5480b6aa4d89277a</td><td>afew_va</td><td>AFEW-VA</td><td><a href="papers/2624d84503bc2f8e190e061c5480b6aa4d89277a.html" target="_blank">AFEW-VA database for valence and arousal estimation in-the-wild</a></td><td><a href="https://ibug.doc.ic.ac.uk/media/uploads/documents/afew-va.pdf" target="_blank">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td></td><td>44%</td><td>18</td><td>8</td><td>10</td><td>0</td><td>12</td><td>5</td></tr><tr><td>4e6ee936eb50dd032f7138702fa39b7c18ee8907</td><td>dartmouth_children</td><td>Dartmouth Children</td><td><a href="papers/4e6ee936eb50dd032f7138702fa39b7c18ee8907.html" target="_blank">The Dartmouth Database of Children’s Faces: Acquisition and Validation of a New Face Stimulus Set</a></td><td><a href="https://pdfs.semanticscholar.org/4e6e/e936eb50dd032f7138702fa39b7c18ee8907.pdf" target="_blank">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td></td><td>38%</td><td>21</td><td>8</td><td>13</td><td>2</td><td>18</td><td>3</td></tr><tr><td>0f0fcf041559703998abf310e56f8a2f90ee6f21</td><td>feret</td><td>FERET</td><td><a href="papers/0f0fcf041559703998abf310e56f8a2f90ee6f21.html" target="_blank">The FERET Evaluation Methodology for Face-Recognition Algorithms</a></td><td><a href="https://pdfs.semanticscholar.org/5099/7a5605c1f61e09e9a96789ed7495be6625aa.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>28%</td><td>29</td><td>8</td><td>21</td><td>3</td><td>18</td><td>9</td></tr><tr><td>c570d1247e337f91e555c3be0e8c8a5aba539d9f</td><td>mcgill</td><td>McGill Real World</td><td><a href="papers/c570d1247e337f91e555c3be0e8c8a5aba539d9f.html" target="_blank">Robust semi-automatic head pose labeling for real-world face video sequences</a></td><td><span class="gray">[pdf]</a></td><td>Multimedia Tools and Applications</td><td>edu</td><td>McGill University</td><td>Canada</td><td>45.50397610</td><td>-73.57496870</td><td>44%</td><td>18</td><td>8</td><td>10</td><td>0</td><td>13</td><td>7</td></tr><tr><td>041d3eedf5e45ce5c5229f0181c5c576ed1fafd6</td><td>ucf_selfie</td><td>UCF Selfie</td><td><a href="papers/041d3eedf5e45ce5c5229f0181c5c576ed1fafd6.html" target="_blank">How to Take a Good Selfie?</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>73%</td><td>11</td><td>8</td><td>3</td><td>0</td><td>7</td><td>5</td></tr><tr><td>a94cae786d515d3450d48267e12ca954aab791c4</td><td>yawdd</td><td>YawDD</td><td><a href="papers/a94cae786d515d3450d48267e12ca954aab791c4.html" target="_blank">YawDD: a yawning detection dataset</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>53%</td><td>15</td><td>8</td><td>7</td><td>1</td><td>2</td><td>13</td></tr><tr><td>6403117f9c005ae81f1e8e6d1302f4a045e3d99d</td><td>alert_airport</td><td>ALERT Airport</td><td><a href="papers/6403117f9c005ae81f1e8e6d1302f4a045e3d99d.html" target="_blank">A Systematic Evaluation and Benchmark for Person Re-Identification: Features, Metrics, and Datasets</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>35%</td><td>20</td><td>7</td><td>13</td><td>0</td><td>9</td><td>11</td></tr><tr><td>2b926b3586399d028b46315d7d9fb9d879e4f79c</td><td>frav3d</td><td>FRAV3D</td><td><a href="papers/2b926b3586399d028b46315d7d9fb9d879e4f79c.html" target="_blank">Multimodal 2D, 2.5D & 3D Face Verification</a></td><td><span class="gray">[pdf]</a></td><td>2006 International Conference on Image Processing</td><td>edu</td><td>Universidad Rey Juan Carlos, Spain</td><td>Spain</td><td>40.33586610</td><td>-3.87694320</td><td>50%</td><td>14</td><td>7</td><td>7</td><td>0</td><td>2</td><td>12</td></tr><tr><td>2cd7821fcf5fae53a185624f7eeda007434ae037</td><td>geofaces</td><td>GeoFaces</td><td><a href="papers/2cd7821fcf5fae53a185624f7eeda007434ae037.html" target="_blank">Exploring the geo-dependence of human face appearance</a></td><td><a href="http://cs.uky.edu/~jacobs/papers/islam2014faces.pdf" target="_blank">[pdf]</a></td><td>IEEE Winter Conference on Applications of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>88%</td><td>8</td><td>7</td><td>1</td><td>0</td><td>5</td><td>3</td></tr><tr><td>2cd7821fcf5fae53a185624f7eeda007434ae037</td><td>geofaces</td><td>GeoFaces</td><td><a href="papers/2cd7821fcf5fae53a185624f7eeda007434ae037.html" target="_blank">Exploring the geo-dependence of human face appearance</a></td><td><a href="http://cs.uky.edu/~jacobs/papers/islam2014faces.pdf" target="_blank">[pdf]</a></td><td>IEEE Winter Conference on Applications of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>88%</td><td>8</td><td>7</td><td>1</td><td>0</td><td>5</td><td>3</td></tr><tr><td>b71d1aa90dcbe3638888725314c0d56640c1fef1</td><td>ifdb</td><td>IFDB</td><td><a href="papers/b71d1aa90dcbe3638888725314c0d56640c1fef1.html" target="_blank">Iranian Face Database with age, pose and expression</a></td><td><span class="gray">[pdf]</a></td><td>2007 International Conference on Machine Vision</td><td>edu</td><td>Islamic Azad University</td><td>Iran</td><td>34.84529990</td><td>48.55962120</td><td>30%</td><td>23</td><td>7</td><td>16</td><td>2</td><td>14</td><td>9</td></tr><tr><td>4563b46d42079242f06567b3f2e2f7a80cb3befe</td><td>vadana</td><td>VADANA</td><td><a href="papers/4563b46d42079242f06567b3f2e2f7a80cb3befe.html" target="_blank">VADANA: A dense dataset for facial image analysis</a></td><td><span class="gray">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td>edu</td><td>University of Delaware</td><td>United States</td><td>39.68103280</td><td>-75.75401840</td><td>47%</td><td>15</td><td>7</td><td>8</td><td>0</td><td>5</td><td>10</td></tr><tr><td>060820f110a72cbf02c14a6d1085bd6e1d994f6a</td><td>caltech_crp</td><td>Caltech CRP</td><td><a href="papers/060820f110a72cbf02c14a6d1085bd6e1d994f6a.html" target="_blank">Fine-grained classification of pedestrians in video: Benchmark and state of the art</a></td><td><a href="https://arxiv.org/pdf/1605.06177.pdf" target="_blank">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>35%</td><td>17</td><td>6</td><td>11</td><td>0</td><td>9</td><td>8</td></tr><tr><td>8d5998cd984e7cce307da7d46f155f9db99c6590</td><td>chalearn</td><td>ChaLearn</td><td><a href="papers/8d5998cd984e7cce307da7d46f155f9db99c6590.html" target="_blank">ChaLearn looking at people: A review of events and resources</a></td><td><span class="gray">[pdf]</a></td><td>2017 International Joint Conference on Neural Networks (IJCNN)</td><td></td><td></td><td></td><td></td><td></td><td>46%</td><td>13</td><td>6</td><td>7</td><td>1</td><td>8</td><td>4</td></tr><tr><td>22f656d0f8426c84a33a267977f511f127bfd7f3</td><td>expw</td><td>ExpW</td><td><a href="papers/22f656d0f8426c84a33a267977f511f127bfd7f3.html" target="_blank">From Facial Expression Recognition to Interpersonal Relation Prediction</a></td><td><a href="https://arxiv.org/pdf/1609.06426.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>11</td><td>6</td><td>5</td><td>0</td><td>5</td><td>4</td></tr><tr><td>7f4040b482d16354d5938c1d1b926b544652bf5b</td><td>nova_emotions</td><td>Novaemötions Dataset</td><td><a href="papers/7f4040b482d16354d5938c1d1b926b544652bf5b.html" target="_blank">Competitive affective gaming: winning with a smile</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Universidade NOVA de Lisboa, Caparica, Portugal</td><td>Portugal</td><td>38.66096400</td><td>-9.20581300</td><td>67%</td><td>9</td><td>6</td><td>3</td><td>0</td><td>5</td><td>4</td></tr><tr><td>1a40092b493c6b8840257ab7f96051d1a4dbfeb2</td><td>sports_videos_in_the_wild</td><td>SVW</td><td><a href="papers/1a40092b493c6b8840257ab7f96051d1a4dbfeb2.html" target="_blank">Sports Videos in the Wild (SVW): A video dataset for sports analysis</a></td><td><span class="gray">[pdf]</a></td><td>2015 11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)</td><td></td><td></td><td></td><td></td><td></td><td>86%</td><td>7</td><td>6</td><td>1</td><td>1</td><td>5</td><td>2</td></tr><tr><td>633c851ebf625ad7abdda2324e9de093cf623141</td><td>appa_real</td><td>APPA-REAL</td><td><a href="papers/633c851ebf625ad7abdda2324e9de093cf623141.html" target="_blank">Apparent and Real Age Estimation in Still Images with Deep Residual Regressors on Appa-Real Database</a></td><td><span class="gray">[pdf]</a></td><td>2017 12th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2017)</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>10</td><td>5</td><td>5</td><td>0</td><td>8</td><td>3</td></tr><tr><td>4af89578ac237278be310f7660a408b03f12d603</td><td>geofaces</td><td>GeoFaces</td><td><a href="papers/4af89578ac237278be310f7660a408b03f12d603.html" target="_blank">Large-scale geo-facial image analysis</a></td><td><a href="https://pdfs.semanticscholar.org/3ede/3ed28329bf48fbd06438a69c4f855bef003f.pdf" target="_blank">[pdf]</a></td><td>EURASIP J. Image and Video Processing</td><td></td><td></td><td></td><td></td><td></td><td>83%</td><td>6</td><td>5</td><td>1</td><td>0</td><td>4</td><td>2</td></tr><tr><td>8627f019882b024aef92e4eb9355c499c733e5b7</td><td>used</td><td>USED Social Event Dataset</td><td><a href="papers/8627f019882b024aef92e4eb9355c499c733e5b7.html" target="_blank">USED: a large-scale social event detection dataset</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td>edu</td><td>University of Trento</td><td>Italy</td><td>46.06588360</td><td>11.11598940</td><td>71%</td><td>7</td><td>5</td><td>2</td><td>0</td><td>3</td><td>4</td></tr><tr><td>56ae6d94fc6097ec4ca861f0daa87941d1c10b70</td><td>cmdp</td><td>CMDP</td><td><a href="papers/56ae6d94fc6097ec4ca861f0daa87941d1c10b70.html" target="_blank">Distance Estimation of an Unknown Person from a Portrait</a></td><td><a href="http://authors.library.caltech.edu/49084/13/FaceDistanceEstimation_RONCHI.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>44%</td><td>9</td><td>4</td><td>5</td><td>0</td><td>6</td><td>3</td></tr><tr><td>4b4106614c1d553365bad75d7866bff0de6056ed</td><td>ufi</td><td>UFI</td><td><a href="papers/4b4106614c1d553365bad75d7866bff0de6056ed.html" target="_blank">Unconstrained Facial Images: Database for Face Recognition Under Real-World Conditions</a></td><td><a href="http://home.zcu.cz/~pkral/papers/kral_micai15.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>33%</td><td>12</td><td>4</td><td>8</td><td>0</td><td>4</td><td>6</td></tr><tr><td>563c940054e4b456661762c1ab858e6f730c3159</td><td>data_61</td><td>Data61 Pedestrian</td><td><a href="papers/563c940054e4b456661762c1ab858e6f730c3159.html" target="_blank">A Multi-modal Graphical Model for Scene Analysis</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE Winter Conference on Applications of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>8</td><td>4</td><td>4</td><td>0</td><td>5</td><td>3</td></tr><tr><td>287ddcb3db5562235d83aee318f318b8d5e43fb1</td><td>erce</td><td>ERCe</td><td><a href="papers/287ddcb3db5562235d83aee318f318b8d5e43fb1.html" target="_blank">Learning from Multiple Sources for Video Summarisation</a></td><td><a href="https://arxiv.org/pdf/1501.03069.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>7</td><td>4</td><td>3</td><td>0</td><td>4</td><td>3</td></tr><tr><td>dd65f71dac86e36eecbd3ed225d016c3336b4a13</td><td>families_in_the_wild</td><td>FIW</td><td><a href="papers/dd65f71dac86e36eecbd3ed225d016c3336b4a13.html" target="_blank">Visual Kinship Recognition of Families in the Wild</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td>edu</td><td>University of Massachusetts Dartmouth</td><td>United States</td><td>41.62772475</td><td>-71.00724501</td><td>80%</td><td>5</td><td>4</td><td>1</td><td>0</td><td>2</td><td>3</td></tr><tr><td>8f02ec0be21461fbcedf51d864f944cfc42c875f</td><td>hda_plus</td><td>HDA+</td><td><a href="papers/8f02ec0be21461fbcedf51d864f944cfc42c875f.html" target="_blank">The HDA+ Data Set for Research on Fully Automated Re-identification Systems</a></td><td><a href="https://pdfs.semanticscholar.org/8f02/ec0be21461fbcedf51d864f944cfc42c875f.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>25%</td><td>16</td><td>4</td><td>12</td><td>2</td><td>10</td><td>6</td></tr><tr><td>137aa2f891d474fce1e7a1d1e9b3aefe21e22b34</td><td>hrt_transgender</td><td>HRT Transgender</td><td><a href="papers/137aa2f891d474fce1e7a1d1e9b3aefe21e22b34.html" target="_blank">Is the eye region more reliable than the face? A preliminary study of face-based recognition on a transgender dataset</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE Sixth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>7</td><td>4</td><td>3</td><td>1</td><td>3</td><td>5</td></tr><tr><td>57178b36c21fd7f4529ac6748614bb3374714e91</td><td>ijb_c</td><td>IJB-C</td><td><a href="papers/57178b36c21fd7f4529ac6748614bb3374714e91.html" target="_blank">IARPA Janus Benchmark - C: Face Dataset and Protocol</a></td><td><span class="gray">[pdf]</a></td><td>2018 International Conference on Biometrics (ICB)</td><td></td><td></td><td></td><td></td><td></td><td>29%</td><td>14</td><td>4</td><td>10</td><td>2</td><td>12</td><td>1</td></tr><tr><td>2d45cfd838016a6e39f6b766ffe85acd649440c7</td><td>mcgill</td><td>McGill Real World</td><td><a href="papers/2d45cfd838016a6e39f6b766ffe85acd649440c7.html" target="_blank">Hierarchical temporal graphical model for head pose estimation and subsequent attribute classification in real-world videos</a></td><td><span class="gray">[pdf]</a></td><td>Computer Vision and Image Understanding</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>8</td><td>4</td><td>4</td><td>1</td><td>5</td><td>3</td></tr><tr><td>23e824d1dfc33f3780dd18076284f07bd99f1c43</td><td>mifs</td><td>MIFS</td><td><a href="papers/23e824d1dfc33f3780dd18076284f07bd99f1c43.html" target="_blank">Spoofing faces using makeup: An investigative study</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE International Conference on Identity, Security and Behavior Analysis (ISBA)</td><td>edu</td><td>INRIA Méditerranée</td><td>France</td><td>43.61581310</td><td>7.06838000</td><td>67%</td><td>6</td><td>4</td><td>2</td><td>0</td><td>1</td><td>5</td></tr><tr><td>54983972aafc8e149259d913524581357b0f91c3</td><td>reseed</td><td>ReSEED</td><td><a href="papers/54983972aafc8e149259d913524581357b0f91c3.html" target="_blank">ReSEED: social event dEtection dataset</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>67%</td><td>6</td><td>4</td><td>2</td><td>1</td><td>1</td><td>5</td></tr><tr><td>287ddcb3db5562235d83aee318f318b8d5e43fb1</td><td>tisi</td><td>Times Square Intersection</td><td><a href="papers/287ddcb3db5562235d83aee318f318b8d5e43fb1.html" target="_blank">Learning from Multiple Sources for Video Summarisation</a></td><td><a href="https://arxiv.org/pdf/1501.03069.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>7</td><td>4</td><td>3</td><td>0</td><td>4</td><td>3</td></tr><tr><td>07fcbae86f7a3ad3ea1cf95178459ee9eaf77cb1</td><td>uccs</td><td>UCCS</td><td><a href="papers/07fcbae86f7a3ad3ea1cf95178459ee9eaf77cb1.html" target="_blank">Large scale unconstrained open set face database</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE Sixth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td></td><td></td><td></td><td></td><td></td><td>67%</td><td>6</td><td>4</td><td>2</td><td>0</td><td>4</td><td>2</td></tr><tr><td>7ebb153704706e457ab57b432793d2b6e5d12592</td><td>vgg_celebs_in_places</td><td>CIP</td><td><a href="papers/7ebb153704706e457ab57b432793d2b6e5d12592.html" target="_blank">Faces in Places: compound query retrieval</a></td><td><a href="https://pdfs.semanticscholar.org/7ebb/153704706e457ab57b432793d2b6e5d12592.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>80%</td><td>5</td><td>4</td><td>1</td><td>0</td><td>3</td><td>2</td></tr><tr><td>0d2dd4fc016cb6a517d8fb43a7cc3ff62964832e</td><td>lag</td><td>LAG</td><td><a href="papers/0d2dd4fc016cb6a517d8fb43a7cc3ff62964832e.html" target="_blank">Large age-gap face verification by feature injection in deep networks</a></td><td><a href="https://arxiv.org/pdf/1602.06149.pdf" target="_blank">[pdf]</a></td><td>Pattern Recognition Letters</td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>7</td><td>3</td><td>4</td><td>0</td><td>3</td><td>4</td></tr><tr><td>9cc8cf0c7d7fa7607659921b6ff657e17e135ecc</td><td>mafa</td><td>MAsked FAces</td><td><a href="papers/9cc8cf0c7d7fa7607659921b6ff657e17e135ecc.html" target="_blank">Detecting Masked Faces in the Wild with LLE-CNNs</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>5</td><td>3</td><td>2</td><td>1</td><td>4</td><td>1</td></tr><tr><td>a7fe834a0af614ce6b50dc093132b031dd9a856b</td><td>market_1501</td><td>Market 1501</td><td><a href="papers/a7fe834a0af614ce6b50dc093132b031dd9a856b.html" target="_blank">Orientation Driven Bag of Appearances for Person Re-identification</a></td><td><a href="https://arxiv.org/pdf/1605.02464.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>7</td><td>3</td><td>4</td><td>0</td><td>4</td><td>4</td></tr><tr><td>a7fe834a0af614ce6b50dc093132b031dd9a856b</td><td>pku_reid</td><td>PKU-Reid</td><td><a href="papers/a7fe834a0af614ce6b50dc093132b031dd9a856b.html" target="_blank">Orientation Driven Bag of Appearances for Person Re-identification</a></td><td><a href="https://arxiv.org/pdf/1605.02464.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>7</td><td>3</td><td>4</td><td>0</td><td>4</td><td>4</td></tr><tr><td>9e5378e7b336c89735d3bb15cf67eff96f86d39a</td><td>precarious</td><td>Precarious</td><td><a href="papers/9e5378e7b336c89735d3bb15cf67eff96f86d39a.html" target="_blank">Expecting the Unexpected: Training Detectors for Unusual Pedestrians with Adversarial Imposters</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>21%</td><td>14</td><td>3</td><td>11</td><td>1</td><td>12</td><td>1</td></tr><tr><td>d4f1eb008eb80595bcfdac368e23ae9754e1e745</td><td>uccs</td><td>UCCS</td><td><a href="papers/d4f1eb008eb80595bcfdac368e23ae9754e1e745.html" target="_blank">Unconstrained Face Detection and Open-Set Face Recognition Challenge</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE International Joint Conference on Biometrics (IJCB)</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>5</td><td>3</td><td>2</td><td>0</td><td>4</td><td>1</td></tr><tr><td>17b46e2dad927836c689d6787ddb3387c6159ece</td><td>geofaces</td><td>GeoFaces</td><td><a href="papers/17b46e2dad927836c689d6787ddb3387c6159ece.html" target="_blank">GeoFaceExplorer: exploring the geo-dependence of facial attributes</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>2</td><td>2</td><td>0</td><td>0</td><td>1</td><td>1</td></tr><tr><td>ad01687649d95cd5b56d7399a9603c4b8e2217d7</td><td>mrp_drone</td><td>MRP Drone</td><td><a href="papers/ad01687649d95cd5b56d7399a9603c4b8e2217d7.html" target="_blank">Investigating Open-World Person Re-identification Using a Drone</a></td><td><a href="https://pdfs.semanticscholar.org/ad01/687649d95cd5b56d7399a9603c4b8e2217d7.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>29%</td><td>7</td><td>2</td><td>5</td><td>1</td><td>5</td><td>2</td></tr><tr><td>f6c8d5e35d7e4d60a0104f233ac1a3ab757da53f</td><td>pku_reid</td><td>PKU-Reid</td><td><a href="papers/f6c8d5e35d7e4d60a0104f233ac1a3ab757da53f.html" target="_blank">Swiss-System Based Cascade Ranking for Gait-Based Person Re-Identification</a></td><td><a href="https://pdfs.semanticscholar.org/f6c8/d5e35d7e4d60a0104f233ac1a3ab757da53f.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>4</td><td>2</td><td>2</td><td>0</td><td>1</td><td>2</td></tr><tr><td>4d4bb462c9f1d4e4ab1e4aa6a75cc0bc71b38461</td><td>3dddb_unconstrained</td><td>3D Dynamic</td><td><a href="papers/4d4bb462c9f1d4e4ab1e4aa6a75cc0bc71b38461.html" target="_blank">A 3D Dynamic Database for Unconstrained Face Recognition</a></td><td><a href="https://pdfs.semanticscholar.org/4d4b/b462c9f1d4e4ab1e4aa6a75cc0bc71b38461.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>2</td><td>1</td><td>1</td><td>0</td><td>1</td><td>1</td></tr><tr><td>65355cbb581a219bd7461d48b3afd115263ea760</td><td>complex_activities</td><td>Ongoing Complex Activities</td><td><a href="papers/65355cbb581a219bd7461d48b3afd115263ea760.html" target="_blank">Recognition of ongoing complex activities by sequence prediction over a hierarchical label space</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Winter Conference on Applications of Computer Vision (WACV)</td><td></td><td></td><td></td><td></td><td></td><td>33%</td><td>3</td><td>1</td><td>2</td><td>0</td><td>3</td><td>0</td></tr><tr><td>e58dd160a76349d46f881bd6ddbc2921f08d1050</td><td>gfw</td><td>Grouping Face in the Wild</td><td><a href="papers/e58dd160a76349d46f881bd6ddbc2921f08d1050.html" target="_blank">Merge or Not? Learning to Group Faces via Imitation Learning</a></td><td><a href="https://arxiv.org/pdf/1707.03986.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>2</td><td>1</td><td>1</td><td>0</td><td>2</td><td>0</td></tr><tr><td>55c40cbcf49a0225e72d911d762c27bb1c2d14aa</td><td>ifad</td><td>IFAD</td><td><a href="papers/55c40cbcf49a0225e72d911d762c27bb1c2d14aa.html" target="_blank">Indian Face Age Database: A Database for Face Recognition with Age Variation</a></td><td><a href="https://pdfs.semanticscholar.org/025e/4cf3fd3fdeced91e9373b56ee14af7ca432c.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>2</td><td>1</td><td>1</td><td>0</td><td>2</td><td>0</td></tr><tr><td>9e31e77f9543ab42474ba4e9330676e18c242e72</td><td>imdb_face</td><td>IMDb Face</td><td><a href="papers/9e31e77f9543ab42474ba4e9330676e18c242e72.html" target="_blank">The Devil of Face Recognition is in the Noise</a></td><td><a href="https://arxiv.org/pdf/1807.11649.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>company</td><td>SenseTime</td><td>China</td><td>39.99300800</td><td>116.32988200</td><td>17%</td><td>6</td><td>1</td><td>5</td><td>0</td><td>4</td><td>1</td></tr><tr><td>4eab317b5ac436a949849ed286baa3de2a541eef</td><td>laofiw</td><td>LAOFIW</td><td><a href="papers/4eab317b5ac436a949849ed286baa3de2a541eef.html" target="_blank">Turning a Blind Eye: Explicit Removal of Biases and Variation from Deep Neural Network Embeddings</a></td><td><a href="https://arxiv.org/pdf/1809.02169.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>2</td><td>1</td><td>1</td><td>0</td><td>2</td><td>0</td></tr><tr><td>c72a2ea819df9b0e8cd267eebcc6528b8741e03d</td><td>megaage</td><td>MegaAge</td><td><a href="papers/c72a2ea819df9b0e8cd267eebcc6528b8741e03d.html" target="_blank">Quantifying Facial Age by Posterior of Age Comparisons</a></td><td><a href="https://arxiv.org/pdf/1708.09687.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>25%</td><td>4</td><td>1</td><td>3</td><td>1</td><td>4</td><td>0</td></tr><tr><td>578d4ad74818086bb64f182f72e2c8bd31e3d426</td><td>mr2</td><td>MR2</td><td><a href="papers/578d4ad74818086bb64f182f72e2c8bd31e3d426.html" target="_blank">The MR2: A multi-racial, mega-resolution database of facial stimuli.</a></td><td><a href="http://www.mpmlab.org/The%20MR2%20face%20database.pdf" target="_blank">[pdf]</a></td><td>Behavior research methods</td><td></td><td></td><td></td><td></td><td></td><td>14%</td><td>7</td><td>1</td><td>6</td><td>0</td><td>7</td><td>0</td></tr><tr><td>c06b13d0ec3f5c43e2782cd22542588e233733c3</td><td>nova_emotions</td><td>Novaemötions Dataset</td><td><a href="papers/c06b13d0ec3f5c43e2782cd22542588e233733c3.html" target="_blank">Crowdsourcing facial expressions for affective-interaction</a></td><td><span class="gray">[pdf]</a></td><td>Computer Vision and Image Understanding</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>1</td><td>1</td><td>0</td><td>0</td><td>1</td><td>0</td></tr><tr><td>22909dd19a0ec3b6065334cb5be5392cb24d839d</td><td>pets</td><td>PETS 2017</td><td><a href="papers/22909dd19a0ec3b6065334cb5be5392cb24d839d.html" target="_blank">PETS 2017: Dataset and Challenge</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td></td><td></td><td></td><td></td><td></td><td>11%</td><td>9</td><td>1</td><td>8</td><td>0</td><td>1</td><td>8</td></tr><tr><td>2306b2a8fba28539306052764a77a0d0f5d1236a</td><td>qmul_surv_face</td><td>QMUL-SurvFace</td><td><a href="papers/2306b2a8fba28539306052764a77a0d0f5d1236a.html" target="_blank">Surveillance Face Recognition Challenge</a></td><td><a href="https://arxiv.org/pdf/1804.09691.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>1</td><td>1</td><td>0</td><td>0</td><td>1</td><td>0</td></tr><tr><td>d3f5a1848b0028d8ab51d0b0673732cad2e3c8c9</td><td>stair_actions</td><td>STAIR Action</td><td><a href="papers/d3f5a1848b0028d8ab51d0b0673732cad2e3c8c9.html" target="_blank">STAIR Actions: A Video Dataset of Everyday Home Actions</a></td><td><a href="https://arxiv.org/pdf/1804.04326.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>1</td><td>1</td><td>0</td><td>0</td><td>1</td><td>0</td></tr><tr><td>3531332efe19be21e7401ba1f04570a142617236</td><td>ufdd</td><td>UFDD</td><td><a href="papers/3531332efe19be21e7401ba1f04570a142617236.html" target="_blank">Pushing the Limits of Unconstrained Face Detection: a Challenge Dataset and Baseline Results</a></td><td><a href="https://arxiv.org/pdf/1804.10275.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>25%</td><td>4</td><td>1</td><td>3</td><td>1</td><td>4</td><td>0</td></tr><tr><td>922e0a51a3b8c67c4c6ac09a577ff674cbd28b34</td><td>v47</td><td>V47</td><td><a href="papers/922e0a51a3b8c67c4c6ac09a577ff674cbd28b34.html" target="_blank">Re-identification of pedestrians with variable occlusion and scale</a></td><td><span class="gray">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td>edu</td><td>Kingston University</td><td>United Kingdom</td><td>51.42930860</td><td>-0.26840440</td><td>11%</td><td>9</td><td>1</td><td>8</td><td>2</td><td>5</td><td>4</td></tr><tr><td>5ad4e9f947c1653c247d418f05dad758a3f9277b</td><td>wlfdb</td><td>WLFDB</td><td><a href="papers/5ad4e9f947c1653c247d418f05dad758a3f9277b.html" target="_blank">WLFDB : Weakly Labeled Face Databases</a></td><td><a href="https://pdfs.semanticscholar.org/5ad4/e9f947c1653c247d418f05dad758a3f9277b.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>1</td><td>1</td><td>0</td><td>0</td><td>0</td><td>1</td></tr><tr><td>a40f9bfd3c45658ee8da70e1f2dfbe1f0c744d43</td><td>4dfab</td><td>4DFAB</td><td><a href="papers/a40f9bfd3c45658ee8da70e1f2dfbe1f0c744d43.html" target="_blank">4DFAB: A Large Scale 4D Facial Expression Database for Biometric Applications</a></td><td><a href="https://arxiv.org/pdf/1712.01443.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>4</td><td>0</td><td>4</td><td>0</td><td>2</td><td>2</td></tr><tr><td>7b92d1e53cc87f7a4256695de590098a2f30261e</td><td>appa_real</td><td>APPA-REAL</td><td><a href="papers/7b92d1e53cc87f7a4256695de590098a2f30261e.html" target="_blank">From Apparent to Real Age: Gender, Age, Ethnic, Makeup, and Expression Bias Analysis in Real Age Estimation</a></td><td><span class="gray">[pdf]</a></td><td>2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>1dc35905a1deff8bc74688f2d7e2f48fd2273275</td><td>caltech_pedestrians</td><td>Caltech Pedestrians</td><td><a href="papers/1dc35905a1deff8bc74688f2d7e2f48fd2273275.html" target="_blank">Pedestrian detection: A benchmark</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>15e1af79939dbf90790b03d8aa02477783fb1d0f</td><td>duke_mtmc</td><td>Duke MTMC</td><td><a href="papers/15e1af79939dbf90790b03d8aa02477783fb1d0f.html" target="_blank">Unlabeled Samples Generated by GAN Improve the Person Re-identification Baseline in Vitro</a></td><td><a href="https://arxiv.org/pdf/1701.07717.pdf" target="_blank">[pdf]</a></td><td>2017 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>72a155c987816ae81c858fddbd6beab656d86220</td><td>europersons</td><td>EuroCity Persons</td><td><a href="papers/72a155c987816ae81c858fddbd6beab656d86220.html" target="_blank">The EuroCity Persons Dataset: A Novel Benchmark for Object Detection</a></td><td><a href="https://arxiv.org/pdf/1805.07193.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>2</td><td>0</td><td>2</td><td>0</td><td>2</td><td>0</td></tr><tr><td>670637d0303a863c1548d5b19f705860a23e285c</td><td>face_tracer</td><td>FaceTracer</td><td><a href="papers/670637d0303a863c1548d5b19f705860a23e285c.html" target="_blank">Face swapping: automatically replacing faces in photographs</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>12ad3b5bbbf407f8e54ea692c07633d1a867c566</td><td>graz</td><td>Graz Pedestrian</td><td><a href="papers/12ad3b5bbbf407f8e54ea692c07633d1a867c566.html" target="_blank">Object recognition using segmentation for feature detection</a></td><td><span class="gray">[pdf]</a></td><td>Proceedings of the 17th International Conference on Pattern Recognition, 2004. ICPR 2004.</td><td>edu</td><td>Inst. of Comput. Sci., Univ. of Leoben, Austria</td><td>Austria</td><td>47.38473720</td><td>15.09302010</td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>bd88bb2e4f351352d88ee7375af834360e223498</td><td>hda_plus</td><td>HDA+</td><td><a href="papers/bd88bb2e4f351352d88ee7375af834360e223498.html" target="_blank">HDA dataset-DRAFT 1 A Multi-camera video data set for research on High-Definition surveillance</a></td><td><a href="https://pdfs.semanticscholar.org/bd88/bb2e4f351352d88ee7375af834360e223498.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>2</td><td>0</td><td>2</td><td>0</td><td>1</td><td>2</td></tr><tr><td>0ab7cff2ccda7269b73ff6efd9d37e1318f7db25</td><td>ibm_dif</td><td>IBM Diversity in Faces</td><td><a href="papers/0ab7cff2ccda7269b73ff6efd9d37e1318f7db25.html" target="_blank">Facial Coding Scheme Reference 1 Craniofacial Distances</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>066d71fcd997033dce4ca58df924397dfe0b5fd1</td><td>ifdb</td><td>IFDB</td><td><a href="papers/066d71fcd997033dce4ca58df924397dfe0b5fd1.html" target="_blank">Iranian Face Database and Evaluation with a New Detection Algorithm</a></td><td><a href="https://pdfs.semanticscholar.org/066d/71fcd997033dce4ca58df924397dfe0b5fd1.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>21d9d0deed16f0ad62a4865e9acf0686f4f15492</td><td>images_of_groups</td><td>Images of Groups</td><td><a href="papers/21d9d0deed16f0ad62a4865e9acf0686f4f15492.html" target="_blank">Understanding images of groups of people</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>Carnegie Mellon University Silicon Valley</td><td>United States</td><td>37.41021930</td><td>-122.05965487</td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>140438a77a771a8fb656b39a78ff488066eb6b50</td><td>lfpw</td><td>LFWP</td><td><a href="papers/140438a77a771a8fb656b39a78ff488066eb6b50.html" target="_blank">Localizing Parts of Faces Using a Consensus of Exemplars</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>079a0a3bf5200994e1f972b1b9197bf2f90e87d4</td><td>mit_cbcl</td><td>MIT CBCL</td><td><a href="papers/079a0a3bf5200994e1f972b1b9197bf2f90e87d4.html" target="_blank">Component-Based Face Recognition with 3D Morphable Models</a></td><td><span class="gray">[pdf]</a></td><td>2004 Conference on Computer Vision and Pattern Recognition Workshop</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>2fda164863a06a92d3a910b96eef927269aeb730</td><td>names_and_faces</td><td>News Dataset</td><td><a href="papers/2fda164863a06a92d3a910b96eef927269aeb730.html" target="_blank">Names and faces in the news</a></td><td><span class="gray">[pdf]</a></td><td>Proceedings of the 2004 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2004. CVPR 2004.</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>d3200d49a19a4a4e4e9745ee39649b65d80c834b</td><td>scut_head</td><td>SCUT HEAD</td><td><a href="papers/d3200d49a19a4a4e4e9745ee39649b65d80c834b.html" target="_blank">Detecting Heads using Feature Refine Net and Cascaded Multi-scale Architecture</a></td><td><a href="https://arxiv.org/pdf/1803.09256.pdf" target="_blank">[pdf]</a></td><td>2018 24th International Conference on Pattern Recognition (ICPR)</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>8990cdce3f917dad622e43e033db686b354d057c</td><td>tiny_faces</td><td>TinyFace</td><td><a href="papers/8990cdce3f917dad622e43e033db686b354d057c.html" target="_blank">Low-Resolution Face Recognition</a></td><td><a href="https://arxiv.org/pdf/1811.08965.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>6ad5a38df8dd4cdddd74f31996ce096d41219f72</td><td>tud_brussels</td><td>TUD-Brussels</td><td><a href="papers/6ad5a38df8dd4cdddd74f31996ce096d41219f72.html" target="_blank">Multi-cue onboard pedestrian detection</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>6ad5a38df8dd4cdddd74f31996ce096d41219f72</td><td>tud_motionpairs</td><td>TUD-Motionparis</td><td><a href="papers/6ad5a38df8dd4cdddd74f31996ce096d41219f72.html" target="_blank">Multi-cue onboard pedestrian detection</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>01959ef569f74c286956024866c1d107099199f7</td><td>vqa</td><td>VQA</td><td><a href="papers/01959ef569f74c286956024866c1d107099199f7.html" target="_blank">VQA: Visual Question Answering</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>9b9bf5e623cb8af7407d2d2d857bc3f1b531c182</td><td>who_goes_there</td><td>WGT</td><td><a href="papers/9b9bf5e623cb8af7407d2d2d857bc3f1b531c182.html" target="_blank">Who goes there?: approaches to mapping facial appearance diversity</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td>edu</td><td>University of Kentucky</td><td>United States</td><td>38.03337420</td><td>-84.50177580</td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>36bccfb2ad847096bc76777e544f305813cd8f5b</td><td>wildtrack</td><td>WildTrack</td><td><a href="papers/36bccfb2ad847096bc76777e544f305813cd8f5b.html" target="_blank">WILDTRACK: A Multi-camera HD Dataset for Dense Unscripted Pedestrian Detection</a></td><td><a href="http://openaccess.thecvf.com/content_cvpr_2018/Supplemental/1562-supp.pdf" target="_blank">[pdf]</a></td><td>2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr></table></body></html>
\ No newline at end of file +<!doctype html><html><head><meta charset='utf-8'><title>Coverage</title><link rel='stylesheet' href='reports.css'></head><body><h2>Coverage</h2><table border='1' cellpadding='3' cellspacing='3'><th>Paper ID</th><th>Megapixels Key</th><th>Megapixels Name</th><th>Report Link</th><th>PDF Link</th><th>Journal</th><th>Type</th><th>Address</th><th>Country</th><th>Lat</th><th>Lng</th><th>Coverage</th><th>Total Citations</th><th>Geocoded Citations</th><th>Unknown Citations</th><th>Empty Citations</th><th>With PDF</th><th>With DOI</th><tr><td>b5f2846a506fc417e7da43f6a7679146d99c5e96</td><td>ucf_101</td><td>UCF101</td><td><a href="papers/b5f2846a506fc417e7da43f6a7679146d99c5e96.html" target="_blank">UCF101: A Dataset of 101 Human Actions Classes From Videos in The Wild</a></td><td><a href="https://arxiv.org/pdf/1212.0402.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>999</td><td>607</td><td>392</td><td>56</td><td>628</td><td>362</td></tr><tr><td>0e986f51fe45b00633de9fd0c94d082d2be51406</td><td>afw</td><td>AFW</td><td><a href="papers/0e986f51fe45b00633de9fd0c94d082d2be51406.html" target="_blank">Face detection, pose estimation, and landmark localization in the wild</a></td><td><span class="gray">[pdf]</a></td><td>2012 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>999</td><td>605</td><td>394</td><td>45</td><td>576</td><td>422</td></tr><tr><td>026e3363b7f76b51cc711886597a44d5f1fd1de2</td><td>kitti</td><td>KITTI</td><td><a href="papers/026e3363b7f76b51cc711886597a44d5f1fd1de2.html" target="_blank">Vision meets robotics: The KITTI dataset</a></td><td><a href="https://pdfs.semanticscholar.org/026e/3363b7f76b51cc711886597a44d5f1fd1de2.pdf" target="_blank">[pdf]</a></td><td>I. J. Robotics Res.</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>999</td><td>547</td><td>452</td><td>37</td><td>553</td><td>462</td></tr><tr><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td><td>lfw</td><td>LFW</td><td><a href="papers/370b5757a5379b15e30d619e4d3fb9e8e13f3256.html" target="_blank">Labeled Faces in the Wild: A Database forStudying Face Recognition in Unconstrained Environments</a></td><td><a href="https://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>999</td><td>547</td><td>452</td><td>64</td><td>598</td><td>382</td></tr><tr><td>162ea969d1929ed180cc6de9f0bf116993ff6e06</td><td>vgg_faces</td><td>VGG Face</td><td><a href="papers/162ea969d1929ed180cc6de9f0bf116993ff6e06.html" target="_blank">Deep Face Recognition</a></td><td><a href="https://pdfs.semanticscholar.org/f372/ab9b3270d4e4f6a0258c83c2736c3a5c0454.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>999</td><td>546</td><td>453</td><td>58</td><td>558</td><td>429</td></tr><tr><td>5e0f8c355a37a5a89351c02f174e7a5ddcb98683</td><td>coco</td><td>COCO</td><td><a href="papers/5e0f8c355a37a5a89351c02f174e7a5ddcb98683.html" target="_blank">Microsoft COCO: Common Objects in Context</a></td><td><a href="https://arxiv.org/pdf/1405.0312.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>999</td><td>545</td><td>454</td><td>26</td><td>722</td><td>259</td></tr><tr><td>0ee1916a0cb2dc7d3add086b5f1092c3d4beb38a</td><td>voc</td><td>VOC</td><td><a href="papers/0ee1916a0cb2dc7d3add086b5f1092c3d4beb38a.html" target="_blank">The Pascal Visual Object Classes (VOC) Challenge</a></td><td><a href="http://eprints.pascal-network.org/archive/00006187/01/PascalVOC_IJCV2009.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>999</td><td>544</td><td>454</td><td>30</td><td>557</td><td>422</td></tr><tr><td>4d9a02d080636e9666c4d1cc438b9893391ec6c7</td><td>cohn_kanade_plus</td><td>CK+</td><td><a href="papers/4d9a02d080636e9666c4d1cc438b9893391ec6c7.html" target="_blank">The Extended Cohn-Kanade Dataset (CK+): A complete dataset for action unit and emotion-specified expression</a></td><td><span class="gray">[pdf]</a></td><td>2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition - Workshops</td><td>edu</td><td>University of Pittsburgh</td><td>United States</td><td>40.44415295</td><td>-79.96243993</td><td>54%</td><td>999</td><td>543</td><td>456</td><td>58</td><td>470</td><td>518</td></tr><tr><td>759a3b3821d9f0e08e0b0a62c8b693230afc3f8d</td><td>pubfig</td><td>PubFig</td><td><a href="papers/759a3b3821d9f0e08e0b0a62c8b693230afc3f8d.html" target="_blank">Attribute and simile classifiers for face verification</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE 12th International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>59%</td><td>914</td><td>535</td><td>379</td><td>49</td><td>586</td><td>316</td></tr><tr><td>31b58ced31f22eab10bd3ee2d9174e7c14c27c01</td><td>tiny_images</td><td>Tiny Images</td><td><a href="papers/31b58ced31f22eab10bd3ee2d9174e7c14c27c01.html" target="_blank">80 Million Tiny Images: A Large Data Set for Nonparametric Object and Scene Recognition</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>53%</td><td>999</td><td>529</td><td>470</td><td>89</td><td>644</td><td>337</td></tr><tr><td>6d96f946aaabc734af7fe3fc4454cf8547fcd5ed</td><td>ar_facedb</td><td>AR Face</td><td><a href="papers/6d96f946aaabc734af7fe3fc4454cf8547fcd5ed.html" target="_blank">The AR face database</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>999</td><td>519</td><td>480</td><td>56</td><td>455</td><td>530</td></tr><tr><td>18ae7c9a4bbc832b8b14bc4122070d7939f5e00e</td><td>frgc</td><td>FRGC</td><td><a href="papers/18ae7c9a4bbc832b8b14bc4122070d7939f5e00e.html" target="_blank">Overview of the face recognition grand challenge</a></td><td><span class="gray">[pdf]</a></td><td>2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)</td><td>edu</td><td>NIST</td><td>United States</td><td>39.14004000</td><td>-77.21850600</td><td>52%</td><td>999</td><td>519</td><td>479</td><td>86</td><td>549</td><td>442</td></tr><tr><td>18c72175ddbb7d5956d180b65a96005c100f6014</td><td>yale_faces</td><td>YaleFaces</td><td><a href="papers/18c72175ddbb7d5956d180b65a96005c100f6014.html" target="_blank">From Few to Many: Illumination Cone Models for Face Recognition under Variable Lighting and Pose</a></td><td><a href="http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.147.1487&rep=rep1&type=pdf" target="_blank">[pdf]</a></td><td>IEEE Trans. Pattern Anal. Mach. Intell.</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>999</td><td>516</td><td>483</td><td>66</td><td>497</td><td>462</td></tr><tr><td>f72f6a45ee240cc99296a287ff725aaa7e7ebb35</td><td>caltech_pedestrians</td><td>Caltech Pedestrians</td><td><a href="papers/f72f6a45ee240cc99296a287ff725aaa7e7ebb35.html" target="_blank">Pedestrian Detection: An Evaluation of the State of the Art</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td>edu</td><td>California Institute of Technology</td><td>United States</td><td>34.13710185</td><td>-118.12527487</td><td>51%</td><td>999</td><td>510</td><td>489</td><td>71</td><td>526</td><td>466</td></tr><tr><td>10d6b12fa07c7c8d6c8c3f42c7f1c061c131d4c5</td><td>inria_person</td><td>INRIA Pedestrian</td><td><a href="papers/10d6b12fa07c7c8d6c8c3f42c7f1c061c131d4c5.html" target="_blank">Histograms of oriented gradients for human detection</a></td><td><span class="gray">[pdf]</a></td><td>2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)</td><td>edu</td><td>INRIA Rhone-Alps, Montbonnot, France</td><td>France</td><td>45.21788600</td><td>5.80736900</td><td>51%</td><td>999</td><td>510</td><td>489</td><td>44</td><td>419</td><td>509</td></tr><tr><td>2ad0ee93d029e790ebb50574f403a09854b65b7e</td><td>yale_faces</td><td>YaleFaces</td><td><a href="papers/2ad0ee93d029e790ebb50574f403a09854b65b7e.html" target="_blank">Acquiring linear subspaces for face recognition under variable lighting</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>999</td><td>498</td><td>501</td><td>94</td><td>495</td><td>491</td></tr><tr><td>23fc83c8cfff14a16df7ca497661264fc54ed746</td><td>cohn_kanade</td><td>CK</td><td><a href="papers/23fc83c8cfff14a16df7ca497661264fc54ed746.html" target="_blank">Comprehensive Database for Facial Expression Analysis</a></td><td><a href="https://pdfs.semanticscholar.org/23fc/83c8cfff14a16df7ca497661264fc54ed746.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>49%</td><td>999</td><td>492</td><td>507</td><td>69</td><td>539</td><td>439</td></tr><tr><td>b62628ac06bbac998a3ab825324a41a11bc3a988</td><td>m2vtsdb_extended</td><td>xm2vtsdb</td><td><a href="papers/b62628ac06bbac998a3ab825324a41a11bc3a988.html" target="_blank">XM2VTSDB : The extended M2VTS database</a></td><td><a href="https://pdfs.semanticscholar.org/b626/28ac06bbac998a3ab825324a41a11bc3a988.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>864</td><td>463</td><td>401</td><td>40</td><td>493</td><td>404</td></tr><tr><td>6424b69f3ff4d35249c0bb7ef912fbc2c86f4ff4</td><td>celeba</td><td>CelebA</td><td><a href="papers/6424b69f3ff4d35249c0bb7ef912fbc2c86f4ff4.html" target="_blank">Deep Learning Face Attributes in the Wild</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td>edu</td><td>Chinese University of Hong Kong</td><td>China</td><td>22.41626320</td><td>114.21093180</td><td>50%</td><td>919</td><td>456</td><td>462</td><td>64</td><td>694</td><td>201</td></tr><tr><td>dc8b25e35a3acb812beb499844734081722319b4</td><td>feret</td><td>FERET</td><td><a href="papers/dc8b25e35a3acb812beb499844734081722319b4.html" target="_blank">The FERET database and evaluation procedure for face-recognition algorithms</a></td><td><a href="http://biometrics.nist.gov/cs_links/face/frvt/feret/FERET_Database_evaluation_procedure.pdf" target="_blank">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td></td><td>46%</td><td>999</td><td>455</td><td>544</td><td>103</td><td>590</td><td>421</td></tr><tr><td>55206f0b5f57ce17358999145506cd01e570358c</td><td>orl</td><td>ORL</td><td><a href="papers/55206f0b5f57ce17358999145506cd01e570358c.html" target="_blank">Parameterisation of a stochastic model for human face identification</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>45%</td><td>999</td><td>452</td><td>547</td><td>94</td><td>543</td><td>427</td></tr><tr><td>45c31cde87258414f33412b3b12fc5bec7cb3ba9</td><td>jaffe</td><td>JAFFE</td><td><a href="papers/45c31cde87258414f33412b3b12fc5bec7cb3ba9.html" target="_blank">Coding Facial Expressions with Gabor Wavelets</a></td><td><a href="http://physics.lbl.gov/patrecog/images/Facerecog_gabor.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>49%</td><td>899</td><td>443</td><td>456</td><td>51</td><td>431</td><td>451</td></tr><tr><td>4d423acc78273b75134e2afd1777ba6d3a398973</td><td>cmu_pie</td><td>CMU PIE</td><td><a href="papers/4d423acc78273b75134e2afd1777ba6d3a398973.html" target="_blank">The CMU Pose, Illumination, and Expression (PIE) Database</a></td><td><a href="http://www.comp.nus.edu.sg/~tsim/piedb.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>760</td><td>409</td><td>350</td><td>50</td><td>404</td><td>345</td></tr><tr><td>4d423acc78273b75134e2afd1777ba6d3a398973</td><td>multi_pie</td><td>MULTIPIE</td><td><a href="papers/4d423acc78273b75134e2afd1777ba6d3a398973.html" target="_blank">The CMU Pose, Illumination, and Expression (PIE) Database</a></td><td><a href="http://www.comp.nus.edu.sg/~tsim/piedb.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>760</td><td>409</td><td>350</td><td>50</td><td>404</td><td>345</td></tr><tr><td>2830fb5282de23d7784b4b4bc37065d27839a412</td><td>h3d</td><td>H3D</td><td><a href="papers/2830fb5282de23d7784b4b4bc37065d27839a412.html" target="_blank">Poselets: Body part detectors trained using 3D human pose annotations</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE 12th International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>716</td><td>370</td><td>346</td><td>60</td><td>492</td><td>222</td></tr><tr><td>6273b3491e94ea4dd1ce42b791d77bdc96ee73a8</td><td>viper</td><td>VIPeR</td><td><a href="papers/6273b3491e94ea4dd1ce42b791d77bdc96ee73a8.html" target="_blank">Evaluating Appearance Models for Recognition, Reacquisition, and Tracking</a></td><td><a href="https://pdfs.semanticscholar.org/7847/b1fbccadb780b655e72c66d3f9e93ddb880c.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>624</td><td>352</td><td>272</td><td>35</td><td>342</td><td>276</td></tr><tr><td>6bd36e9fd0ef20a3074e1430a6cc601e6d407fc3</td><td>cuhk_campus_03</td><td>CUHK03 Campus</td><td><a href="papers/6bd36e9fd0ef20a3074e1430a6cc601e6d407fc3.html" target="_blank">DeepReID: Deep Filter Pairing Neural Network for Person Re-identification</a></td><td><span class="gray">[pdf]</a></td><td>2014 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>568</td><td>320</td><td>248</td><td>26</td><td>320</td><td>235</td></tr><tr><td>2258e01865367018ed6f4262c880df85b94959f8</td><td>mot</td><td>MOT</td><td><a href="papers/2258e01865367018ed6f4262c880df85b94959f8.html" target="_blank">Evaluating Multiple Object Tracking Performance: The CLEAR MOT Metrics</a></td><td><a href="https://cvhci.anthropomatik.kit.edu/images/stories/msmmi/papers/eurasip2008.pdf" target="_blank">[pdf]</a></td><td>EURASIP J. Image and Video Processing</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>632</td><td>313</td><td>317</td><td>50</td><td>358</td><td>264</td></tr><tr><td>560e0e58d0059259ddf86fcec1fa7975dee6a868</td><td>youtube_faces</td><td>YouTubeFaces</td><td><a href="papers/560e0e58d0059259ddf86fcec1fa7975dee6a868.html" target="_blank">Face recognition in unconstrained videos with matched background similarity</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>509</td><td>291</td><td>217</td><td>28</td><td>294</td><td>216</td></tr><tr><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td><td>tud_campus</td><td>TUD-Campus</td><td><a href="papers/3316521a5527c7700af8ae6aef32a79a8b83672c.html" target="_blank">People-tracking-by-detection and people-detection-by-tracking</a></td><td><span class="gray">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>53%</td><td>545</td><td>287</td><td>257</td><td>40</td><td>330</td><td>218</td></tr><tr><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td><td>tud_crossing</td><td>TUD-Crossing</td><td><a href="papers/3316521a5527c7700af8ae6aef32a79a8b83672c.html" target="_blank">People-tracking-by-detection and people-detection-by-tracking</a></td><td><span class="gray">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>53%</td><td>545</td><td>287</td><td>257</td><td>40</td><td>330</td><td>218</td></tr><tr><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td><td>tud_pedestrian</td><td>TUD-Pedestrian</td><td><a href="papers/3316521a5527c7700af8ae6aef32a79a8b83672c.html" target="_blank">People-tracking-by-detection and people-detection-by-tracking</a></td><td><span class="gray">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>53%</td><td>545</td><td>287</td><td>257</td><td>40</td><td>330</td><td>218</td></tr><tr><td>853bd61bc48a431b9b1c7cab10c603830c488e39</td><td>casia_webface</td><td>CASIA Webface</td><td><a href="papers/853bd61bc48a431b9b1c7cab10c603830c488e39.html" target="_blank">Learning Face Representation from Scratch</a></td><td><a href="https://arxiv.org/pdf/1411.7923.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>476</td><td>277</td><td>199</td><td>26</td><td>290</td><td>182</td></tr><tr><td>cc589c499dcf323fe4a143bbef0074c3e31f9b60</td><td>bu_3dfe</td><td>BU-3DFE</td><td><a href="papers/cc589c499dcf323fe4a143bbef0074c3e31f9b60.html" target="_blank">A 3D facial expression database for facial behavior research</a></td><td><span class="gray">[pdf]</a></td><td>7th International Conference on Automatic Face and Gesture Recognition (FGR06)</td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>588</td><td>275</td><td>312</td><td>45</td><td>306</td><td>282</td></tr><tr><td>4308bd8c28e37e2ed9a3fcfe74d5436cce34b410</td><td>market_1501</td><td>Market 1501</td><td><a href="papers/4308bd8c28e37e2ed9a3fcfe74d5436cce34b410.html" target="_blank">Scalable Person Re-identification: A Benchmark</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>460</td><td>254</td><td>206</td><td>16</td><td>263</td><td>185</td></tr><tr><td>4053e3423fb70ad9140ca89351df49675197196a</td><td>bio_id</td><td>BioID Face</td><td><a href="papers/4053e3423fb70ad9140ca89351df49675197196a.html" target="_blank">Robust Face Detection Using the Hausdorff Distance</a></td><td><a href="https://pdfs.semanticscholar.org/4053/e3423fb70ad9140ca89351df49675197196a.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>48%</td><td>511</td><td>247</td><td>264</td><td>51</td><td>329</td><td>182</td></tr><tr><td>8a3c5507237957d013a0fe0f082cab7f757af6ee</td><td>mafl</td><td>MAFL</td><td><a href="papers/8a3c5507237957d013a0fe0f082cab7f757af6ee.html" target="_blank">Facial Landmark Detection by Deep Multi-task Learning</a></td><td><a href="https://pdfs.semanticscholar.org/fcd7/1c18192928a2e0b264edd4d919ab2f8f652a.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>59%</td><td>407</td><td>239</td><td>168</td><td>18</td><td>252</td><td>153</td></tr><tr><td>8a3c5507237957d013a0fe0f082cab7f757af6ee</td><td>mtfl</td><td>MTFL</td><td><a href="papers/8a3c5507237957d013a0fe0f082cab7f757af6ee.html" target="_blank">Facial Landmark Detection by Deep Multi-task Learning</a></td><td><a href="https://pdfs.semanticscholar.org/fcd7/1c18192928a2e0b264edd4d919ab2f8f652a.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>59%</td><td>407</td><td>239</td><td>168</td><td>18</td><td>252</td><td>153</td></tr><tr><td>9055b155cbabdce3b98e16e5ac9c0edf00f9552f</td><td>morph</td><td>MORPH Commercial</td><td><a href="papers/9055b155cbabdce3b98e16e5ac9c0edf00f9552f.html" target="_blank">MORPH: a longitudinal image database of normal adult age-progression</a></td><td><span class="gray">[pdf]</a></td><td>7th International Conference on Automatic Face and Gesture Recognition (FGR06)</td><td></td><td></td><td></td><td></td><td></td><td>51%</td><td>437</td><td>225</td><td>211</td><td>24</td><td>228</td><td>203</td></tr><tr><td>9055b155cbabdce3b98e16e5ac9c0edf00f9552f</td><td>morph_nc</td><td>MORPH Non-Commercial</td><td><a href="papers/9055b155cbabdce3b98e16e5ac9c0edf00f9552f.html" target="_blank">MORPH: a longitudinal image database of normal adult age-progression</a></td><td><span class="gray">[pdf]</a></td><td>7th International Conference on Automatic Face and Gesture Recognition (FGR06)</td><td></td><td></td><td></td><td></td><td></td><td>51%</td><td>437</td><td>225</td><td>211</td><td>24</td><td>228</td><td>203</td></tr><tr><td>3325860c0c82a93b2eac654f5324dd6a776f609e</td><td>mpii_human_pose</td><td>MPII Human Pose</td><td><a href="papers/3325860c0c82a93b2eac654f5324dd6a776f609e.html" target="_blank">2D Human Pose Estimation: New Benchmark and State of the Art Analysis</a></td><td><span class="gray">[pdf]</a></td><td>2014 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>387</td><td>224</td><td>163</td><td>21</td><td>291</td><td>96</td></tr><tr><td>16c7c31a7553d99f1837fc6e88e77b5ccbb346b8</td><td>prid</td><td>PRID</td><td><a href="papers/16c7c31a7553d99f1837fc6e88e77b5ccbb346b8.html" target="_blank">Person Re-identification by Descriptive and Discriminative Classification</a></td><td><a href="https://pdfs.semanticscholar.org/4c1b/f0592be3e535faf256c95e27982db9b3d3d3.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>386</td><td>220</td><td>166</td><td>25</td><td>204</td><td>180</td></tr><tr><td>2485c98aa44131d1a2f7d1355b1e372f2bb148ad</td><td>cas_peal</td><td>CAS-PEAL</td><td><a href="papers/2485c98aa44131d1a2f7d1355b1e372f2bb148ad.html" target="_blank">The CAS-PEAL Large-Scale Chinese Face Database and Baseline Evaluations</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Systems, Man, and Cybernetics - Part A: Systems and Humans</td><td></td><td></td><td></td><td></td><td></td><td>51%</td><td>429</td><td>218</td><td>211</td><td>39</td><td>198</td><td>234</td></tr><tr><td>2a75f34663a60ab1b04a0049ed1d14335129e908</td><td>mmi_facial_expression</td><td>MMI Facial Expression Dataset</td><td><a href="papers/2a75f34663a60ab1b04a0049ed1d14335129e908.html" target="_blank">Web-based database for facial expression analysis</a></td><td><span class="gray">[pdf]</a></td><td>2005 IEEE International Conference on Multimedia and Expo</td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>464</td><td>217</td><td>247</td><td>45</td><td>282</td><td>188</td></tr><tr><td>75da1df4ed319926c544eefe17ec8d720feef8c0</td><td>fddb</td><td>FDDB</td><td><a href="papers/75da1df4ed319926c544eefe17ec8d720feef8c0.html" target="_blank">FDDB: A benchmark for face detection in unconstrained settings</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>380</td><td>214</td><td>166</td><td>22</td><td>202</td><td>164</td></tr><tr><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td><td>stickmen_buffy</td><td>Buffy Stickmen</td><td><a href="papers/6dd0597f8513dc100cd0bc1b493768cde45098a9.html" target="_blank">Learning to parse images of articulated bodies</a></td><td><a href="http://machinelearning.wustl.edu/mlpapers/paper_files/NIPS2006_899.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>369</td><td>207</td><td>161</td><td>32</td><td>237</td><td>131</td></tr><tr><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td><a href="papers/6dd0597f8513dc100cd0bc1b493768cde45098a9.html" target="_blank">Learning to parse images of articulated bodies</a></td><td><a href="http://machinelearning.wustl.edu/mlpapers/paper_files/NIPS2006_899.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>369</td><td>207</td><td>161</td><td>32</td><td>237</td><td>131</td></tr><tr><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td><a href="papers/6dd0597f8513dc100cd0bc1b493768cde45098a9.html" target="_blank">Learning to parse images of articulated bodies</a></td><td><a href="http://machinelearning.wustl.edu/mlpapers/paper_files/NIPS2006_899.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>369</td><td>207</td><td>161</td><td>32</td><td>237</td><td>131</td></tr><tr><td>95f12d27c3b4914e0668a268360948bce92f7db3</td><td>helen</td><td>Helen</td><td><a href="papers/95f12d27c3b4914e0668a268360948bce92f7db3.html" target="_blank">Interactive Facial Feature Localization</a></td><td><a href="https://pdfs.semanticscholar.org/95f1/2d27c3b4914e0668a268360948bce92f7db3.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>company</td><td>Facebook</td><td>United States</td><td>37.39367170</td><td>-122.08072620</td><td>57%</td><td>352</td><td>200</td><td>152</td><td>26</td><td>212</td><td>146</td></tr><tr><td>044d9a8c61383312cdafbcc44b9d00d650b21c70</td><td>fiw_300</td><td>300-W</td><td><a href="papers/044d9a8c61383312cdafbcc44b9d00d650b21c70.html" target="_blank">300 Faces in-the-Wild Challenge: The First Facial Landmark Localization Challenge</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision Workshops</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>323</td><td>198</td><td>125</td><td>27</td><td>208</td><td>120</td></tr><tr><td>3765df816dc5a061bc261e190acc8bdd9d47bec0</td><td>rafd</td><td>RaFD</td><td><a href="papers/3765df816dc5a061bc261e190acc8bdd9d47bec0.html" target="_blank">Presentation and validation of the Radboud Faces Database</a></td><td><a href="https://pdfs.semanticscholar.org/3765/df816dc5a061bc261e190acc8bdd9d47bec0.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>39%</td><td>487</td><td>192</td><td>295</td><td>39</td><td>342</td><td>144</td></tr><tr><td>2724ba85ec4a66de18da33925e537f3902f21249</td><td>cofw</td><td>COFW</td><td><a href="papers/2724ba85ec4a66de18da33925e537f3902f21249.html" target="_blank">Robust Face Landmark Estimation under Occlusion</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision</td><td>edu</td><td>California Institute of Technology</td><td>United States</td><td>34.13710185</td><td>-118.12527487</td><td>57%</td><td>325</td><td>185</td><td>140</td><td>19</td><td>194</td><td>133</td></tr><tr><td>639937b3a1b8bded3f7e9a40e85bd3770016cf3c</td><td>bfm</td><td>BFM</td><td><a href="papers/639937b3a1b8bded3f7e9a40e85bd3770016cf3c.html" target="_blank">A 3D Face Model for Pose and Illumination Invariant Face Recognition</a></td><td><span class="gray">[pdf]</a></td><td>2009 Sixth IEEE International Conference on Advanced Video and Signal Based Surveillance</td><td></td><td></td><td></td><td></td><td></td><td>53%</td><td>343</td><td>183</td><td>160</td><td>25</td><td>223</td><td>114</td></tr><tr><td>13f06b08f371ba8b5d31c3e288b4deb61335b462</td><td>eth_andreas_ess</td><td>ETHZ Pedestrian</td><td><a href="papers/13f06b08f371ba8b5d31c3e288b4deb61335b462.html" target="_blank">Depth and Appearance for Mobile Scene Analysis</a></td><td><span class="gray">[pdf]</a></td><td>2007 IEEE 11th International Conference on Computer Vision</td><td>edu</td><td>ETH Zurich</td><td>Switzerland</td><td>47.37631300</td><td>8.54766990</td><td>55%</td><td>324</td><td>179</td><td>145</td><td>26</td><td>193</td><td>127</td></tr><tr><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td><td>aflw</td><td>AFLW</td><td><a href="papers/a74251efa970b92925b89eeef50a5e37d9281ad0.html" target="_blank">Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</a></td><td><span class="gray">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>318</td><td>177</td><td>141</td><td>35</td><td>211</td><td>107</td></tr><tr><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td><td>leeds_sports_pose</td><td>Leeds Sports Pose</td><td><a href="papers/4b1d23d17476fcf78f4cbadf69fb130b1aa627c0.html" target="_blank">Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</a></td><td><a href="http://www.bmva.org/bmvc/2010/conference/paper12/abstract12.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>285</td><td>174</td><td>111</td><td>13</td><td>197</td><td>93</td></tr><tr><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td><td>stickmen_buffy</td><td>Buffy Stickmen</td><td><a href="papers/4b1d23d17476fcf78f4cbadf69fb130b1aa627c0.html" target="_blank">Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</a></td><td><a href="http://www.bmva.org/bmvc/2010/conference/paper12/abstract12.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>285</td><td>174</td><td>111</td><td>13</td><td>197</td><td>93</td></tr><tr><td>5981e6479c3fd4e31644db35d236bfb84ae46514</td><td>mot</td><td>MOT</td><td><a href="papers/5981e6479c3fd4e31644db35d236bfb84ae46514.html" target="_blank">Learning to associate: HybridBoosted multi-target tracker for crowded scene</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>University of Southern California</td><td>United States</td><td>34.02241490</td><td>-118.28634407</td><td>53%</td><td>326</td><td>172</td><td>153</td><td>27</td><td>190</td><td>137</td></tr><tr><td>436f798d1a4e54e5947c1e7d7375c31b2bdb4064</td><td>tud_multiview</td><td>TUD-Multiview</td><td><a href="papers/436f798d1a4e54e5947c1e7d7375c31b2bdb4064.html" target="_blank">Monocular 3D pose estimation and tracking by detection</a></td><td><span class="gray">[pdf]</a></td><td>2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>TU Darmstadt</td><td>Germany</td><td>49.87482770</td><td>8.65632810</td><td>55%</td><td>311</td><td>172</td><td>139</td><td>35</td><td>208</td><td>105</td></tr><tr><td>436f798d1a4e54e5947c1e7d7375c31b2bdb4064</td><td>tud_stadtmitte</td><td>TUD-Stadtmitte</td><td><a href="papers/436f798d1a4e54e5947c1e7d7375c31b2bdb4064.html" target="_blank">Monocular 3D pose estimation and tracking by detection</a></td><td><span class="gray">[pdf]</a></td><td>2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>TU Darmstadt</td><td>Germany</td><td>49.87482770</td><td>8.65632810</td><td>55%</td><td>311</td><td>172</td><td>139</td><td>35</td><td>208</td><td>105</td></tr><tr><td>2acf7e58f0a526b957be2099c10aab693f795973</td><td>bosphorus</td><td>The Bosphorus</td><td><a href="papers/2acf7e58f0a526b957be2099c10aab693f795973.html" target="_blank">Bosphorus Database for 3D Face Analysis</a></td><td><a href="https://pdfs.semanticscholar.org/4254/fbba3846008f50671edc9cf70b99d7304543.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>49%</td><td>352</td><td>171</td><td>181</td><td>17</td><td>162</td><td>188</td></tr><tr><td>44484d2866f222bbb9b6b0870890f9eea1ffb2d0</td><td>cuhk_campus_03</td><td>CUHK03 Campus</td><td><a href="papers/44484d2866f222bbb9b6b0870890f9eea1ffb2d0.html" target="_blank">Human Reidentification with Transferred Metric Learning</a></td><td><a href="http://www.ee.cuhk.edu.hk/~xgwang/papers/liZWaccv12.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>280</td><td>167</td><td>113</td><td>12</td><td>139</td><td>137</td></tr><tr><td>833fa04463d90aab4a9fe2870d480f0b40df446e</td><td>sun_attributes</td><td>SUN</td><td><a href="papers/833fa04463d90aab4a9fe2870d480f0b40df446e.html" target="_blank">SUN attribute database: Discovering, annotating, and recognizing scene attributes</a></td><td><span class="gray">[pdf]</a></td><td>2012 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>Brown University</td><td>United States</td><td>41.82686820</td><td>-71.40123146</td><td>58%</td><td>264</td><td>154</td><td>110</td><td>27</td><td>206</td><td>56</td></tr><tr><td>010f0f4929e6a6644fb01f0e43820f91d0fad292</td><td>yfcc_100m</td><td>YFCC100M</td><td><a href="papers/010f0f4929e6a6644fb01f0e43820f91d0fad292.html" target="_blank">YFCC100M: the new data in multimedia research</a></td><td><span class="gray">[pdf]</a></td><td>Commun. ACM</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>274</td><td>154</td><td>120</td><td>24</td><td>172</td><td>100</td></tr><tr><td>9361b784e73e9238d5cefbea5ac40d35d1e3103f</td><td>town_centre</td><td>TownCentre</td><td><a href="papers/9361b784e73e9238d5cefbea5ac40d35d1e3103f.html" target="_blank">Stable multi-target tracking in real-time surveillance video</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011</td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>328</td><td>153</td><td>175</td><td>24</td><td>186</td><td>140</td></tr><tr><td>6204776d31359d129a582057c2d788a14f8aadeb</td><td>youtube_celebrities</td><td>YouTube Celebrities</td><td><a href="papers/6204776d31359d129a582057c2d788a14f8aadeb.html" target="_blank">Face tracking and recognition with visual constraints in real-world videos</a></td><td><span class="gray">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>Rutgers University</td><td>United States</td><td>40.47913175</td><td>-74.43168868</td><td>53%</td><td>267</td><td>142</td><td>124</td><td>14</td><td>125</td><td>121</td></tr><tr><td>2eed184680edcdec8a3b605ad1a3ba8e8f7cc2e9</td><td>graz</td><td>Graz Pedestrian</td><td><a href="papers/2eed184680edcdec8a3b605ad1a3ba8e8f7cc2e9.html" target="_blank">Generic object recognition with boosting</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td>edu</td><td>TU Graz</td><td>Austria</td><td>47.07071400</td><td>15.43950400</td><td>47%</td><td>293</td><td>138</td><td>155</td><td>16</td><td>195</td><td>97</td></tr><tr><td>e8de844fefd54541b71c9823416daa238be65546</td><td>visual_phrases</td><td>Phrasal Recognition</td><td><a href="papers/e8de844fefd54541b71c9823416daa238be65546.html" target="_blank">Recognition using visual phrases</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011</td><td>edu</td><td>University of Illinois, Urbana-Champaign</td><td>United States</td><td>40.11116745</td><td>-88.22587665</td><td>55%</td><td>246</td><td>135</td><td>111</td><td>18</td><td>170</td><td>68</td></tr><tr><td>38b55d95189c5e69cf4ab45098a48fba407609b4</td><td>cuhk_campus_03</td><td>CUHK03 Campus</td><td><a href="papers/38b55d95189c5e69cf4ab45098a48fba407609b4.html" target="_blank">Locally Aligned Feature Transforms across Views</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>258</td><td>134</td><td>124</td><td>17</td><td>136</td><td>117</td></tr><tr><td>7808937b46acad36e43c30ae4e9f3fd57462853d</td><td>bpad</td><td>BPAD</td><td><a href="papers/7808937b46acad36e43c30ae4e9f3fd57462853d.html" target="_blank">Describing people: A poselet-based approach to attribute classification</a></td><td><span class="gray">[pdf]</a></td><td>2011 International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>230</td><td>128</td><td>102</td><td>14</td><td>163</td><td>66</td></tr><tr><td>0c91808994a250d7be332400a534a9291ca3b60e</td><td>graz</td><td>Graz Pedestrian</td><td><a href="papers/0c91808994a250d7be332400a534a9291ca3b60e.html" target="_blank">Weak Hypotheses and Boosting for Generic Object Detection and Recognition</a></td><td><a href="https://pdfs.semanticscholar.org/0c91/808994a250d7be332400a534a9291ca3b60e.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>236</td><td>127</td><td>109</td><td>17</td><td>161</td><td>77</td></tr><tr><td>4c170a0dcc8de75587dae21ca508dab2f9343974</td><td>face_tracer</td><td>FaceTracer</td><td><a href="papers/4c170a0dcc8de75587dae21ca508dab2f9343974.html" target="_blank">FaceTracer: A Search Engine for Large Collections of Images with Faces</a></td><td><a href="https://pdfs.semanticscholar.org/73a8/1d311eedac8dea3ca24dc15b6990fa4a725e.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>225</td><td>126</td><td>99</td><td>17</td><td>146</td><td>77</td></tr><tr><td>140c95e53c619eac594d70f6369f518adfea12ef</td><td>ijb_a</td><td>IJB-A</td><td><a href="papers/140c95e53c619eac594d70f6369f518adfea12ef.html" target="_blank">Pushing the frontiers of unconstrained face detection and recognition: IARPA Janus Benchmark A</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>237</td><td>123</td><td>114</td><td>22</td><td>159</td><td>76</td></tr><tr><td>98bb029afe2a1239c3fdab517323066f0957b81b</td><td>ilids_mcts_vid</td><td>iLIDS-VID</td><td><a href="papers/98bb029afe2a1239c3fdab517323066f0957b81b.html" target="_blank">Person Re-identification by Video Ranking</a></td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>209</td><td>121</td><td>88</td><td>9</td><td>111</td><td>97</td></tr><tr><td>98bb029afe2a1239c3fdab517323066f0957b81b</td><td>sdu_vid</td><td>SDU-VID</td><td><a href="papers/98bb029afe2a1239c3fdab517323066f0957b81b.html" target="_blank">Person Re-identification by Video Ranking</a></td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>209</td><td>121</td><td>88</td><td>9</td><td>111</td><td>97</td></tr><tr><td>35b0331dfcd2897abd5749b49ff5e2b8ba0f7a62</td><td>coco_qa</td><td>COCO QA</td><td><a href="papers/35b0331dfcd2897abd5749b49ff5e2b8ba0f7a62.html" target="_blank">Exploring Models and Data for Image Question Answering</a></td><td><a href="https://arxiv.org/pdf/1505.02074.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>206</td><td>115</td><td>91</td><td>11</td><td>162</td><td>39</td></tr><tr><td>46a01565e6afe7c074affb752e7069ee3bf2e4ef</td><td>sdu_vid</td><td>SDU-VID</td><td><a href="papers/46a01565e6afe7c074affb752e7069ee3bf2e4ef.html" target="_blank">Local Descriptors Encoded by Fisher Vectors for Person Re-identification</a></td><td><a href="https://pdfs.semanticscholar.org/a105/f1ef67b4b02da38eadce8ffb4e13aa301a93.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>197</td><td>112</td><td>85</td><td>16</td><td>108</td><td>88</td></tr><tr><td>b1f4423c227fa37b9680787be38857069247a307</td><td>afew_va</td><td>AFEW-VA</td><td><a href="papers/b1f4423c227fa37b9680787be38857069247a307.html" target="_blank">Collecting Large, Richly Annotated Facial-Expression Databases from Movies</a></td><td><span class="gray">[pdf]</a></td><td>IEEE MultiMedia</td><td>edu</td><td>Australian National University</td><td>Australia</td><td>-35.27769990</td><td>149.11852700</td><td>61%</td><td>181</td><td>111</td><td>70</td><td>8</td><td>87</td><td>97</td></tr><tr><td>570f37ed63142312e6ccdf00ecc376341ec72b9f</td><td>stanford_drone</td><td>Stanford Drone</td><td><a href="papers/570f37ed63142312e6ccdf00ecc376341ec72b9f.html" target="_blank">Social LSTM: Human Trajectory Prediction in Crowded Spaces</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>49%</td><td>224</td><td>110</td><td>114</td><td>3</td><td>140</td><td>81</td></tr><tr><td>013909077ad843eb6df7a3e8e290cfd5575999d2</td><td>fiw_300</td><td>300-W</td><td><a href="papers/013909077ad843eb6df7a3e8e290cfd5575999d2.html" target="_blank">A Semi-automatic Methodology for Facial Landmark Annotation</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition Workshops</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>184</td><td>107</td><td>77</td><td>14</td><td>120</td><td>67</td></tr><tr><td>18010284894ed0edcca74e5bf768ee2e15ef7841</td><td>deep_fashion</td><td>DeepFashion</td><td><a href="papers/18010284894ed0edcca74e5bf768ee2e15ef7841.html" target="_blank">DeepFashion: Powering Robust Clothes Recognition and Retrieval with Rich Annotations</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>176</td><td>105</td><td>71</td><td>2</td><td>113</td><td>62</td></tr><tr><td>22ad2c8c0f4d6aa4328b38d894b814ec22579761</td><td>gallagher</td><td>Gallagher</td><td><a href="papers/22ad2c8c0f4d6aa4328b38d894b814ec22579761.html" target="_blank">Clothing cosegmentation for recognizing people</a></td><td><span class="gray">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>Carnegie Mellon University Silicon Valley</td><td>United States</td><td>37.41021930</td><td>-122.05965487</td><td>59%</td><td>178</td><td>105</td><td>73</td><td>7</td><td>100</td><td>86</td></tr><tr><td>52d7eb0fbc3522434c13cc247549f74bb9609c5d</td><td>wider_face</td><td>WIDER FACE</td><td><a href="papers/52d7eb0fbc3522434c13cc247549f74bb9609c5d.html" target="_blank">WIDER FACE: A Face Detection Benchmark</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>Chinese University of Hong Kong</td><td>China</td><td>22.41626320</td><td>114.21093180</td><td>58%</td><td>178</td><td>104</td><td>74</td><td>13</td><td>112</td><td>66</td></tr><tr><td>4e4746094bf60ee83e40d8597a6191e463b57f76</td><td>leeds_sports_pose_extended</td><td>Leeds Sports Pose Extended</td><td><a href="papers/4e4746094bf60ee83e40d8597a6191e463b57f76.html" target="_blank">Learning effective human pose estimation from inaccurate annotation</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011</td><td>edu</td><td>University of Leeds</td><td>United Kingdom</td><td>53.80387185</td><td>-1.55245712</td><td>61%</td><td>169</td><td>103</td><td>66</td><td>8</td><td>108</td><td>65</td></tr><tr><td>133f01aec1534604d184d56de866a4bd531dac87</td><td>lfw</td><td>LFW</td><td><a href="papers/133f01aec1534604d184d56de866a4bd531dac87.html" target="_blank">Effective Unconstrained Face Recognition by Combining Multiple Descriptors and Learned Background Statistics</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>183</td><td>101</td><td>82</td><td>14</td><td>103</td><td>77</td></tr><tr><td>1aad2da473888cb7ebc1bfaa15bfa0f1502ce005</td><td>jpl_pose</td><td>JPL-Interaction dataset</td><td><a href="papers/1aad2da473888cb7ebc1bfaa15bfa0f1502ce005.html" target="_blank">First-Person Activity Recognition: What Are They Doing to Me?</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>66%</td><td>148</td><td>98</td><td>50</td><td>7</td><td>105</td><td>43</td></tr><tr><td>56ffa7d906b08d02d6d5a12c7377a57e24ef3391</td><td>unbc_shoulder_pain</td><td>UNBC-McMaster Pain</td><td><a href="papers/56ffa7d906b08d02d6d5a12c7377a57e24ef3391.html" target="_blank">Painful data: The UNBC-McMaster shoulder pain expression archive database</a></td><td><span class="gray">[pdf]</a></td><td>Face and Gesture 2011</td><td>edu</td><td>Carnegie Mellon University Silicon Valley</td><td>United States</td><td>37.41021930</td><td>-122.05965487</td><td>51%</td><td>189</td><td>97</td><td>92</td><td>22</td><td>108</td><td>78</td></tr><tr><td>0df0d1adea39a5bef318b74faa37de7f3e00b452</td><td>mpii_gaze</td><td>MPIIGaze</td><td><a href="papers/0df0d1adea39a5bef318b74faa37de7f3e00b452.html" target="_blank">Appearance-based gaze estimation in the wild</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>149</td><td>95</td><td>54</td><td>3</td><td>94</td><td>54</td></tr><tr><td>32c801cb7fbeb742edfd94cccfca4934baec71da</td><td>ucf_crowd</td><td>UCF-CC-50</td><td><a href="papers/32c801cb7fbeb742edfd94cccfca4934baec71da.html" target="_blank">Multi-source Multi-scale Counting in Extremely Dense Crowd Images</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>148</td><td>95</td><td>53</td><td>5</td><td>80</td><td>65</td></tr><tr><td>29a705a5fa76641e0d8963f1fdd67ee4c0d92d3d</td><td>scface</td><td>SCface</td><td><a href="papers/29a705a5fa76641e0d8963f1fdd67ee4c0d92d3d.html" target="_blank">SCface – surveillance cameras face database</a></td><td><a href="http://scface.org/SCface%20-%20Surveillance%20Cameras%20Face%20Database.pdf" target="_blank">[pdf]</a></td><td>Multimedia Tools and Applications</td><td></td><td></td><td></td><td></td><td></td><td>53%</td><td>179</td><td>94</td><td>85</td><td>15</td><td>88</td><td>89</td></tr><tr><td>1be498d4bbc30c3bfd0029114c784bc2114d67c0</td><td>adience</td><td>Adience</td><td><a href="papers/1be498d4bbc30c3bfd0029114c784bc2114d67c0.html" target="_blank">Age and Gender Estimation of Unfiltered Faces</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Information Forensics and Security</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>179</td><td>93</td><td>86</td><td>7</td><td>98</td><td>80</td></tr><tr><td>5a5f0287484f0d480fed1ce585dbf729586f0edc</td><td>disfa</td><td>DISFA</td><td><a href="papers/5a5f0287484f0d480fed1ce585dbf729586f0edc.html" target="_blank">DISFA: A Spontaneous Facial Action Intensity Database</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Affective Computing</td><td>edu</td><td>University of Denver</td><td>United States</td><td>39.67665410</td><td>-104.96220300</td><td>49%</td><td>184</td><td>90</td><td>94</td><td>19</td><td>96</td><td>89</td></tr><tr><td>4f93cd09785c6e77bf4bc5a788e079df524c8d21</td><td>soton</td><td>SOTON HiD</td><td><a href="papers/4f93cd09785c6e77bf4bc5a788e079df524c8d21.html" target="_blank">On a Large Sequence-Based Human Gait Database</a></td><td><a href="https://pdfs.semanticscholar.org/4f93/cd09785c6e77bf4bc5a788e079df524c8d21.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>150</td><td>90</td><td>60</td><td>17</td><td>103</td><td>51</td></tr><tr><td>27a2fad58dd8727e280f97036e0d2bc55ef5424c</td><td>duke_mtmc</td><td>Duke MTMC</td><td><a href="papers/27a2fad58dd8727e280f97036e0d2bc55ef5424c.html" target="_blank">Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</a></td><td><a href="https://arxiv.org/pdf/1609.01775.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Duke University</td><td>United States</td><td>35.99905220</td><td>-78.92906290</td><td>53%</td><td>169</td><td>89</td><td>80</td><td>6</td><td>113</td><td>54</td></tr><tr><td>27a2fad58dd8727e280f97036e0d2bc55ef5424c</td><td>mot</td><td>MOT</td><td><a href="papers/27a2fad58dd8727e280f97036e0d2bc55ef5424c.html" target="_blank">Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</a></td><td><a href="https://arxiv.org/pdf/1609.01775.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Duke University</td><td>United States</td><td>35.99905220</td><td>-78.92906290</td><td>53%</td><td>169</td><td>89</td><td>80</td><td>6</td><td>113</td><td>54</td></tr><tr><td>291265db88023e92bb8c8e6390438e5da148e8f5</td><td>msceleb</td><td>MsCeleb</td><td><a href="papers/291265db88023e92bb8c8e6390438e5da148e8f5.html" target="_blank">MS-Celeb-1M: A Dataset and Benchmark for Large-Scale Face Recognition</a></td><td><a href="https://arxiv.org/pdf/1607.08221.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>49%</td><td>180</td><td>88</td><td>92</td><td>16</td><td>120</td><td>59</td></tr><tr><td>3b5b6d19d4733ab606c39c69a889f9e67967f151</td><td>qmul_grid</td><td>GRID</td><td><a href="papers/3b5b6d19d4733ab606c39c69a889f9e67967f151.html" target="_blank">Multi-camera activity correlation analysis</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>Queen Mary University of London</td><td>United Kingdom</td><td>51.52472720</td><td>-0.03931035</td><td>62%</td><td>142</td><td>88</td><td>54</td><td>7</td><td>77</td><td>64</td></tr><tr><td>c0387e788a52f10bf35d4d50659cfa515d89fbec</td><td>mars</td><td>MARS</td><td><a href="papers/c0387e788a52f10bf35d4d50659cfa515d89fbec.html" target="_blank">MARS: A Video Benchmark for Large-Scale Person Re-Identification</a></td><td><a href="https://pdfs.semanticscholar.org/c038/7e788a52f10bf35d4d50659cfa515d89fbec.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>168</td><td>84</td><td>84</td><td>5</td><td>97</td><td>69</td></tr><tr><td>066000d44d6691d27202896691f08b27117918b9</td><td>psu</td><td>PSU</td><td><a href="papers/066000d44d6691d27202896691f08b27117918b9.html" target="_blank">Vision-Based Analysis of Small Groups in Pedestrian Crowds</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>49%</td><td>168</td><td>83</td><td>85</td><td>11</td><td>85</td><td>79</td></tr><tr><td>8b56e33f33e582f3e473dba573a16b598ed9bcdc</td><td>fei</td><td>FEI</td><td><a href="papers/8b56e33f33e582f3e473dba573a16b598ed9bcdc.html" target="_blank">A new ranking method for principal components analysis and its application to face image analysis</a></td><td><span class="gray">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>169</td><td>80</td><td>89</td><td>6</td><td>69</td><td>102</td></tr><tr><td>10195a163ab6348eef37213a46f60a3d87f289c5</td><td>imdb_wiki</td><td>IMDB</td><td><a href="papers/10195a163ab6348eef37213a46f60a3d87f289c5.html" target="_blank">Deep Expectation of Real and Apparent Age from a Single Image Without Facial Landmarks</a></td><td><a href="http://www.vision.ee.ethz.ch/en/publications/papers/articles/eth_biwi_01299.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td>edu</td><td>ETH Zurich</td><td>Switzerland</td><td>47.37631300</td><td>8.54766990</td><td>52%</td><td>145</td><td>76</td><td>69</td><td>11</td><td>93</td><td>51</td></tr><tr><td>96e0cfcd81cdeb8282e29ef9ec9962b125f379b0</td><td>megaface</td><td>MegaFace</td><td><a href="papers/96e0cfcd81cdeb8282e29ef9ec9962b125f379b0.html" target="_blank">The MegaFace Benchmark: 1 Million Faces for Recognition at Scale</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>53%</td><td>139</td><td>74</td><td>65</td><td>9</td><td>100</td><td>37</td></tr><tr><td>0486214fb58ee9a04edfe7d6a74c6d0f661a7668</td><td>chokepoint</td><td>ChokePoint</td><td><a href="papers/0486214fb58ee9a04edfe7d6a74c6d0f661a7668.html" target="_blank">Patch-based probabilistic image quality assessment for face selection and improved video-based face recognition</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>138</td><td>72</td><td>66</td><td>6</td><td>76</td><td>63</td></tr><tr><td>5a4df9bef1872865f0b619ac3aacc97f49e4a035</td><td>cuhk_train_station</td><td>CUHK Train Station Dataset</td><td><a href="papers/5a4df9bef1872865f0b619ac3aacc97f49e4a035.html" target="_blank">Understanding collective crowd behaviors: Learning a Mixture model of Dynamic pedestrian-Agents</a></td><td><span class="gray">[pdf]</a></td><td>2012 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>Chinese University of Hong Kong</td><td>China</td><td>22.41626320</td><td>114.21093180</td><td>51%</td><td>141</td><td>72</td><td>69</td><td>6</td><td>60</td><td>75</td></tr><tr><td>b91f54e1581fbbf60392364323d00a0cd43e493c</td><td>bp4d_spontanous</td><td>BP4D-Spontanous</td><td><a href="papers/b91f54e1581fbbf60392364323d00a0cd43e493c.html" target="_blank">A high-resolution spontaneous 3D dynamic facial expression database</a></td><td><span class="gray">[pdf]</a></td><td>2013 10th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)</td><td>edu</td><td>SUNY Binghamton</td><td>United States</td><td>42.08779975</td><td>-75.97066066</td><td>46%</td><td>154</td><td>71</td><td>83</td><td>7</td><td>80</td><td>75</td></tr><tr><td>2d3482dcff69c7417c7b933f22de606a0e8e42d4</td><td>lfw</td><td>LFW</td><td><a href="papers/2d3482dcff69c7417c7b933f22de606a0e8e42d4.html" target="_blank">Labeled Faces in the Wild : Updates and New Reporting Procedures</a></td><td><a href="https://pdfs.semanticscholar.org/2d34/82dcff69c7417c7b933f22de606a0e8e42d4.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>123</td><td>71</td><td>52</td><td>3</td><td>71</td><td>51</td></tr><tr><td>66e6f08873325d37e0ec20a4769ce881e04e964e</td><td>sun_attributes</td><td>SUN</td><td><a href="papers/66e6f08873325d37e0ec20a4769ce881e04e964e.html" target="_blank">The SUN Attribute Database: Beyond Categories for Deeper Scene Understanding</a></td><td><a href="http://www.cc.gatech.edu/~hays/papers/attribute_ijcv.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>59%</td><td>116</td><td>69</td><td>47</td><td>14</td><td>84</td><td>31</td></tr><tr><td>8355d095d3534ef511a9af68a3b2893339e3f96b</td><td>imdb_wiki</td><td>IMDB</td><td><a href="papers/8355d095d3534ef511a9af68a3b2893339e3f96b.html" target="_blank">DEX: Deep EXpectation of Apparent Age from a Single Image</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision Workshop (ICCVW)</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>122</td><td>68</td><td>54</td><td>6</td><td>75</td><td>48</td></tr><tr><td>0d3bb75852098b25d90f31d2f48fd0cb4944702b</td><td>face_scrub</td><td>FaceScrub</td><td><a href="papers/0d3bb75852098b25d90f31d2f48fd0cb4944702b.html" target="_blank">A data-driven approach to cleaning large face datasets</a></td><td><span class="gray">[pdf]</a></td><td>2014 IEEE International Conference on Image Processing (ICIP)</td><td></td><td></td><td></td><td></td><td></td><td>49%</td><td>138</td><td>67</td><td>71</td><td>5</td><td>95</td><td>41</td></tr><tr><td>e4754afaa15b1b53e70743880484b8d0736990ff</td><td>fiw_300</td><td>300-W</td><td><a href="papers/e4754afaa15b1b53e70743880484b8d0736990ff.html" target="_blank">300 Faces In-The-Wild Challenge: database and results</a></td><td><a href="http://ibug.doc.ic.ac.uk/media/uploads/documents/1-s2.0-s0262885616000147-main.pdf" target="_blank">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td></td><td>51%</td><td>129</td><td>66</td><td>63</td><td>9</td><td>74</td><td>55</td></tr><tr><td>2e8d0f1802e50cccfd3c0aabac0d0beab3a7846e</td><td>3dpes</td><td>3DPeS</td><td><a href="papers/2e8d0f1802e50cccfd3c0aabac0d0beab3a7846e.html" target="_blank">3DPeS: 3D people dataset for surveillance and forensics</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>49%</td><td>133</td><td>65</td><td>68</td><td>11</td><td>73</td><td>58</td></tr><tr><td>04c2cda00e5536f4b1508cbd80041e9552880e67</td><td>hipsterwars</td><td>Hipsterwars</td><td><a href="papers/04c2cda00e5536f4b1508cbd80041e9552880e67.html" target="_blank">Hipster Wars: Discovering Elements of Fashion Styles</a></td><td><a href="https://pdfs.semanticscholar.org/04c2/cda00e5536f4b1508cbd80041e9552880e67.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>63%</td><td>95</td><td>60</td><td>35</td><td>4</td><td>59</td><td>35</td></tr><tr><td>3b4ec8af470948a72a6ed37a9fd226719a874ebc</td><td>sdu_vid</td><td>SDU-VID</td><td><a href="papers/3b4ec8af470948a72a6ed37a9fd226719a874ebc.html" target="_blank">A Spatio-Temporal Appearance Representation for Video-Based Pedestrian Re-Identification</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>95</td><td>57</td><td>38</td><td>7</td><td>50</td><td>45</td></tr><tr><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td><td>mafl</td><td>MAFL</td><td><a href="papers/a0fd85b3400c7b3e11122f44dc5870ae2de9009a.html" target="_blank">Learning Deep Representation for Face Alignment with Auxiliary Attributes</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>108</td><td>56</td><td>52</td><td>11</td><td>66</td><td>44</td></tr><tr><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td><td>mtfl</td><td>MTFL</td><td><a href="papers/a0fd85b3400c7b3e11122f44dc5870ae2de9009a.html" target="_blank">Learning Deep Representation for Face Alignment with Auxiliary Attributes</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>108</td><td>56</td><td>52</td><td>11</td><td>66</td><td>44</td></tr><tr><td>06f02199690961ba52997cde1527e714d2b3bf8f</td><td>columbia_gaze</td><td>Columbia Gaze</td><td><a href="papers/06f02199690961ba52997cde1527e714d2b3bf8f.html" target="_blank">Gaze locking: passive eye contact detection for human-object interaction</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Columbia University</td><td>United States</td><td>40.84198360</td><td>-73.94368971</td><td>70%</td><td>79</td><td>55</td><td>24</td><td>0</td><td>49</td><td>34</td></tr><tr><td>08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7</td><td>kin_face</td><td>UB KinFace</td><td><a href="papers/08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7.html" target="_blank">Understanding Kin Relationships in a Photo</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Multimedia</td><td></td><td></td><td></td><td></td><td></td><td>59%</td><td>94</td><td>55</td><td>39</td><td>1</td><td>33</td><td>61</td></tr><tr><td>2a4bbee0b4cf52d5aadbbc662164f7efba89566c</td><td>peta</td><td>PETA</td><td><a href="papers/2a4bbee0b4cf52d5aadbbc662164f7efba89566c.html" target="_blank">Pedestrian Attribute Recognition At Far Distance</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>88</td><td>55</td><td>33</td><td>1</td><td>50</td><td>36</td></tr><tr><td>3394168ff0719b03ff65bcea35336a76b21fe5e4</td><td>penn_fudan</td><td>Penn Fudan</td><td><a href="papers/3394168ff0719b03ff65bcea35336a76b21fe5e4.html" target="_blank">Object Detection Combining Recognition and Segmentation</a></td><td><a href="https://pdfs.semanticscholar.org/3394/168ff0719b03ff65bcea35336a76b21fe5e4.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>105</td><td>53</td><td>52</td><td>9</td><td>58</td><td>43</td></tr><tr><td>4df3143922bcdf7db78eb91e6b5359d6ada004d2</td><td>cfd</td><td>CFD</td><td><a href="papers/4df3143922bcdf7db78eb91e6b5359d6ada004d2.html" target="_blank">The Chicago face database: A free stimulus set of faces and norming data.</a></td><td><a href="https://pdfs.semanticscholar.org/4df3/143922bcdf7db78eb91e6b5359d6ada004d2.pdf" target="_blank">[pdf]</a></td><td>Behavior research methods</td><td></td><td></td><td></td><td></td><td></td><td>53%</td><td>99</td><td>52</td><td>47</td><td>1</td><td>73</td><td>21</td></tr><tr><td>0c4a139bb87c6743c7905b29a3cfec27a5130652</td><td>feret</td><td>FERET</td><td><a href="papers/0c4a139bb87c6743c7905b29a3cfec27a5130652.html" target="_blank">The FERET Verification Testing Protocol for Face Recognition Algorithms</a></td><td><a href="https://pdfs.semanticscholar.org/8d2a/1c768fce6f71584dd993fb97e7b6419aaf60.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>115</td><td>49</td><td>66</td><td>8</td><td>75</td><td>37</td></tr><tr><td>3cd40bfa1ff193a96bde0207e5140a399476466c</td><td>tvhi</td><td>TVHI</td><td><a href="papers/3cd40bfa1ff193a96bde0207e5140a399476466c.html" target="_blank">High Five: Recognising human interactions in TV shows</a></td><td><a href="https://pdfs.semanticscholar.org/3cd4/0bfa1ff193a96bde0207e5140a399476466c.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>98</td><td>49</td><td>49</td><td>10</td><td>66</td><td>28</td></tr><tr><td>5194cbd51f9769ab25260446b4fa17204752e799</td><td>violent_flows</td><td>Violent Flows</td><td><a href="papers/5194cbd51f9769ab25260446b4fa17204752e799.html" target="_blank">Violent flows: Real-time detection of violent crowd behavior</a></td><td><span class="gray">[pdf]</a></td><td>2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops</td><td>edu</td><td>Open University of Israel</td><td>Israel</td><td>32.77824165</td><td>34.99565673</td><td>55%</td><td>88</td><td>48</td><td>40</td><td>6</td><td>45</td><td>44</td></tr><tr><td>2160788824c4c29ffe213b2cbeb3f52972d73f37</td><td>3d_rma</td><td>3D-RMA</td><td><a href="papers/2160788824c4c29ffe213b2cbeb3f52972d73f37.html" target="_blank">Automatic 3D face authentication</a></td><td><a href="http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.31.9190&rep=rep1&type=pdf" target="_blank">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>100</td><td>47</td><td>53</td><td>8</td><td>63</td><td>36</td></tr><tr><td>7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22</td><td>lfw</td><td>LFW</td><td><a href="papers/7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22.html" target="_blank">Labeled Faces in the Wild: A Survey</a></td><td><a href="https://pdfs.semanticscholar.org/7de6/e81d775e9cd7becbfd1bd685f4e2a5eebb22.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>109</td><td>47</td><td>62</td><td>9</td><td>66</td><td>43</td></tr><tr><td>2ce2560cf59db59ce313bbeb004e8ce55c5ce928</td><td>texas_3dfrd</td><td>Texas 3DFRD</td><td><a href="papers/2ce2560cf59db59ce313bbeb004e8ce55c5ce928.html" target="_blank">Anthropometric 3D Face Recognition</a></td><td><a href="http://live.ece.utexas.edu/publications/2010/sg_ijcv_june10.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>91</td><td>47</td><td>44</td><td>5</td><td>60</td><td>31</td></tr><tr><td>0b440695c822a8e35184fb2f60dcdaa8a6de84ae</td><td>kinectface</td><td>KinectFaceDB</td><td><a href="papers/0b440695c822a8e35184fb2f60dcdaa8a6de84ae.html" target="_blank">KinectFaceDB: A Kinect Database for Face Recognition</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Systems, Man, and Cybernetics: Systems</td><td>edu</td><td>University of North Carolina at Chapel Hill</td><td>United States</td><td>35.91139710</td><td>-79.05045290</td><td>56%</td><td>82</td><td>46</td><td>36</td><td>6</td><td>28</td><td>52</td></tr><tr><td>0dc11a37cadda92886c56a6fb5191ded62099c28</td><td>stickmen_family</td><td>We Are Family Stickmen</td><td><a href="papers/0dc11a37cadda92886c56a6fb5191ded62099c28.html" target="_blank">We Are Family: Joint Pose Estimation of Multiple Persons</a></td><td><a href="http://eprints.pascal-network.org/archive/00007964/01/eichner10eccv.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>78</td><td>45</td><td>33</td><td>6</td><td>54</td><td>23</td></tr><tr><td>ae0aee03d946efffdc7af2362a42d3750e7dd48a</td><td>put_face</td><td>Put Face</td><td><a href="papers/ae0aee03d946efffdc7af2362a42d3750e7dd48a.html" target="_blank">The put face database</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>99</td><td>43</td><td>56</td><td>7</td><td>54</td><td>48</td></tr><tr><td>2edb87494278ad11641b6cf7a3f8996de12b8e14</td><td>qmul_grid</td><td>GRID</td><td><a href="papers/2edb87494278ad11641b6cf7a3f8996de12b8e14.html" target="_blank">Time-Delayed Correlation Analysis for Multi-Camera Activity Understanding</a></td><td><a href="http://www.eecs.qmul.ac.uk/~ccloy/files/ijcv_2010.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td>edu</td><td>Queen Mary University of London</td><td>United Kingdom</td><td>51.52472720</td><td>-0.03931035</td><td>51%</td><td>84</td><td>43</td><td>41</td><td>6</td><td>51</td><td>33</td></tr><tr><td>2bf8541199728262f78d4dced6fb91479b39b738</td><td>clothing_co_parsing</td><td>CCP</td><td><a href="papers/2bf8541199728262f78d4dced6fb91479b39b738.html" target="_blank">Clothing Co-parsing by Joint Image Segmentation and Labeling</a></td><td><span class="gray">[pdf]</a></td><td>2014 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>70%</td><td>60</td><td>42</td><td>18</td><td>0</td><td>34</td><td>28</td></tr><tr><td>7f23a4bb0c777dd72cca7665a5f370ac7980217e</td><td>duke_mtmc</td><td>Duke MTMC</td><td><a href="papers/7f23a4bb0c777dd72cca7665a5f370ac7980217e.html" target="_blank">Improving Person Re-identification by Attribute and Identity Learning</a></td><td><a href="https://arxiv.org/pdf/1703.07220.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>48%</td><td>87</td><td>42</td><td>45</td><td>0</td><td>43</td><td>42</td></tr><tr><td>70c59dc3470ae867016f6ab0e008ac8ba03774a1</td><td>vgg_faces2</td><td>VGG Face2</td><td><a href="papers/70c59dc3470ae867016f6ab0e008ac8ba03774a1.html" target="_blank">VGGFace2: A Dataset for Recognising Faces across Pose and Age</a></td><td><a href="https://arxiv.org/pdf/1710.08092.pdf" target="_blank">[pdf]</a></td><td>2018 13th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2018)</td><td></td><td></td><td></td><td></td><td></td><td>49%</td><td>83</td><td>41</td><td>42</td><td>6</td><td>61</td><td>20</td></tr><tr><td>636b8ffc09b1b23ff714ac8350bb35635e49fa3c</td><td>caltech_10k_web_faces</td><td>Caltech 10K Web Faces</td><td><a href="papers/636b8ffc09b1b23ff714ac8350bb35635e49fa3c.html" target="_blank">Pruning training sets for learning of object categories</a></td><td><span class="gray">[pdf]</a></td><td>2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)</td><td></td><td></td><td></td><td></td><td></td><td>63%</td><td>63</td><td>40</td><td>23</td><td>5</td><td>42</td><td>20</td></tr><tr><td>c900e0ad4c95948baaf0acd8449fde26f9b4952a</td><td>emotio_net</td><td>EmotioNet Database</td><td><a href="papers/c900e0ad4c95948baaf0acd8449fde26f9b4952a.html" target="_blank">EmotioNet: An Accurate, Real-Time Algorithm for the Automatic Annotation of a Million Facial Expressions in the Wild</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>86</td><td>40</td><td>46</td><td>7</td><td>54</td><td>29</td></tr><tr><td>4793f11fbca4a7dba898b9fff68f70d868e2497c</td><td>kin_face</td><td>UB KinFace</td><td><a href="papers/4793f11fbca4a7dba898b9fff68f70d868e2497c.html" target="_blank">Kinship Verification through Transfer Learning</a></td><td><a href="https://pdfs.semanticscholar.org/4793/f11fbca4a7dba898b9fff68f70d868e2497c.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>71</td><td>40</td><td>31</td><td>2</td><td>29</td><td>42</td></tr><tr><td>47aeb3b82f54b5ae8142b4bdda7b614433e69b9a</td><td>am_fed</td><td>AM-FED</td><td><a href="papers/47aeb3b82f54b5ae8142b4bdda7b614433e69b9a.html" target="_blank">Affectiva-MIT Facial Expression Dataset (AM-FED): Naturalistic and Spontaneous Facial Expressions Collected "In-the-Wild"</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition Workshops</td><td></td><td></td><td></td><td></td><td></td><td>45%</td><td>83</td><td>37</td><td>46</td><td>6</td><td>43</td><td>39</td></tr><tr><td>0a85bdff552615643dd74646ac881862a7c7072d</td><td>pipa</td><td>PIPA</td><td><a href="papers/0a85bdff552615643dd74646ac881862a7c7072d.html" target="_blank">Beyond frontal faces: Improving Person Recognition using multiple cues</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>company</td><td>Facebook</td><td>United States</td><td>37.39367170</td><td>-122.08072620</td><td>69%</td><td>54</td><td>37</td><td>16</td><td>1</td><td>41</td><td>12</td></tr><tr><td>6618cff7f2ed440a0d2fa9e74ad5469df5cdbe4c</td><td>afad</td><td>AFAD</td><td><a href="papers/6618cff7f2ed440a0d2fa9e74ad5469df5cdbe4c.html" target="_blank">Ordinal Regression with Multiple Output CNN for Age Estimation</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>45%</td><td>78</td><td>35</td><td>43</td><td>8</td><td>44</td><td>31</td></tr><tr><td>0b84f07af44f964817675ad961def8a51406dd2e</td><td>prw</td><td>PRW</td><td><a href="papers/0b84f07af44f964817675ad961def8a51406dd2e.html" target="_blank">Person Re-identification in the Wild</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>45%</td><td>77</td><td>35</td><td>42</td><td>2</td><td>47</td><td>27</td></tr><tr><td>31de9b3dd6106ce6eec9a35991b2b9083395fd0b</td><td>feret</td><td>FERET</td><td><a href="papers/31de9b3dd6106ce6eec9a35991b2b9083395fd0b.html" target="_blank">FERET ( Face Recognition Technology ) Recognition Algorithm Development and Test Results</a></td><td><a href="https://pdfs.semanticscholar.org/31de/9b3dd6106ce6eec9a35991b2b9083395fd0b.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>45%</td><td>75</td><td>34</td><td>41</td><td>5</td><td>54</td><td>20</td></tr><tr><td>fcc6fe6007c322641796cb8792718641856a22a7</td><td>miw</td><td>MIW</td><td><a href="papers/fcc6fe6007c322641796cb8792718641856a22a7.html" target="_blank">Automatic facial makeup detection with application in face recognition</a></td><td><span class="gray">[pdf]</a></td><td>2013 International Conference on Biometrics (ICB)</td><td>edu</td><td>West Virginia University</td><td>United States</td><td>39.65404635</td><td>-79.96475355</td><td>69%</td><td>49</td><td>34</td><td>15</td><td>1</td><td>19</td><td>29</td></tr><tr><td>f1af714b92372c8e606485a3982eab2f16772ad8</td><td>mug_faces</td><td>MUG Faces</td><td><a href="papers/f1af714b92372c8e606485a3982eab2f16772ad8.html" target="_blank">The MUG facial expression database</a></td><td><span class="gray">[pdf]</a></td><td>11th International Workshop on Image Analysis for Multimedia Interactive Services WIAMIS 10</td><td>edu</td><td>Aristotle University of Thessaloniki</td><td>Greece</td><td>40.62984145</td><td>22.95889350</td><td>41%</td><td>82</td><td>34</td><td>48</td><td>5</td><td>34</td><td>47</td></tr><tr><td>fcc6fe6007c322641796cb8792718641856a22a7</td><td>youtube_makeup</td><td>YMU</td><td><a href="papers/fcc6fe6007c322641796cb8792718641856a22a7.html" target="_blank">Automatic facial makeup detection with application in face recognition</a></td><td><span class="gray">[pdf]</a></td><td>2013 International Conference on Biometrics (ICB)</td><td>edu</td><td>West Virginia University</td><td>United States</td><td>39.65404635</td><td>-79.96475355</td><td>69%</td><td>49</td><td>34</td><td>15</td><td>1</td><td>19</td><td>29</td></tr><tr><td>51eba481dac6b229a7490f650dff7b17ce05df73</td><td>imsitu</td><td>imSitu</td><td><a href="papers/51eba481dac6b229a7490f650dff7b17ce05df73.html" target="_blank">Situation Recognition: Visual Semantic Role Labeling for Image Understanding</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>52</td><td>32</td><td>20</td><td>1</td><td>46</td><td>6</td></tr><tr><td>8be57cdad86fdf8c8290df4ca3149592f3c46dd3</td><td>m2vts</td><td>m2vts</td><td><a href="papers/8be57cdad86fdf8c8290df4ca3149592f3c46dd3.html" target="_blank">The M2VTS Multimodal Face Database (Release 1.00)</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>44%</td><td>73</td><td>32</td><td>41</td><td>2</td><td>39</td><td>33</td></tr><tr><td>37d6f0eb074d207b53885bd2eb78ccc8a04be597</td><td>vmu</td><td>VMU</td><td><a href="papers/37d6f0eb074d207b53885bd2eb78ccc8a04be597.html" target="_blank">Can facial cosmetics affect the matching accuracy of face recognition systems?</a></td><td><span class="gray">[pdf]</a></td><td>2012 IEEE Fifth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td>edu</td><td>West Virginia University</td><td>United States</td><td>39.65404635</td><td>-79.96475355</td><td>60%</td><td>53</td><td>32</td><td>21</td><td>0</td><td>19</td><td>31</td></tr><tr><td>37d6f0eb074d207b53885bd2eb78ccc8a04be597</td><td>youtube_makeup</td><td>YMU</td><td><a href="papers/37d6f0eb074d207b53885bd2eb78ccc8a04be597.html" target="_blank">Can facial cosmetics affect the matching accuracy of face recognition systems?</a></td><td><span class="gray">[pdf]</a></td><td>2012 IEEE Fifth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td>edu</td><td>West Virginia University</td><td>United States</td><td>39.65404635</td><td>-79.96475355</td><td>60%</td><td>53</td><td>32</td><td>21</td><td>0</td><td>19</td><td>31</td></tr><tr><td>faf40ce28857aedf183e193486f5b4b0a8c478a2</td><td>iit_dehli_ear</td><td>IIT Dehli Ear</td><td><a href="papers/faf40ce28857aedf183e193486f5b4b0a8c478a2.html" target="_blank">Automated Human Identification Using Ear Imaging</a></td><td><a href="https://pdfs.semanticscholar.org/faf4/0ce28857aedf183e193486f5b4b0a8c478a2.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>38%</td><td>80</td><td>30</td><td>50</td><td>6</td><td>35</td><td>44</td></tr><tr><td>f41c7bb02fc97d5fb9cadd7a49c3e558a1c58a44</td><td>pa_100k</td><td>PA-100K</td><td><a href="papers/f41c7bb02fc97d5fb9cadd7a49c3e558a1c58a44.html" target="_blank">HydraPlus-Net: Attentive Deep Features for Pedestrian Analysis</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>55</td><td>30</td><td>25</td><td>0</td><td>36</td><td>17</td></tr><tr><td>18858cc936947fc96b5c06bbe3c6c2faa5614540</td><td>pilot_parliament</td><td>PPB</td><td><a href="papers/18858cc936947fc96b5c06bbe3c6c2faa5614540.html" target="_blank">Gender Shades: Intersectional Accuracy Disparities in Commercial Gender Classification</a></td><td><a href="http://proceedings.mlr.press/v81/buolamwini18a/buolamwini18a-supp.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>49%</td><td>59</td><td>29</td><td>30</td><td>0</td><td>47</td><td>10</td></tr><tr><td>4d58f886f5150b2d5e48fd1b5a49e09799bf895d</td><td>texas_3dfrd</td><td>Texas 3DFRD</td><td><a href="papers/4d58f886f5150b2d5e48fd1b5a49e09799bf895d.html" target="_blank">Texas 3D Face Recognition Database</a></td><td><span class="gray">[pdf]</a></td><td>2010 IEEE Southwest Symposium on Image Analysis & Interpretation (SSIAI)</td><td></td><td></td><td></td><td></td><td></td><td>44%</td><td>66</td><td>29</td><td>37</td><td>3</td><td>40</td><td>27</td></tr><tr><td>16e8b0a1e8451d5f697b94c0c2b32a00abee1d52</td><td>umb</td><td>UMB</td><td><a href="papers/16e8b0a1e8451d5f697b94c0c2b32a00abee1d52.html" target="_blank">UMB-DB: A database of partially occluded 3D faces</a></td><td><span class="gray">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>47</td><td>29</td><td>18</td><td>2</td><td>22</td><td>24</td></tr><tr><td>356b431d4f7a2a0a38cf971c84568207dcdbf189</td><td>wider</td><td>WIDER</td><td><a href="papers/356b431d4f7a2a0a38cf971c84568207dcdbf189.html" target="_blank">Recognize complex events from static images by fusing deep channels</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>66%</td><td>44</td><td>29</td><td>15</td><td>1</td><td>29</td><td>15</td></tr><tr><td>3dc3f0b64ef80f573e3a5f96e456e52ee980b877</td><td>georgia_tech_face_database</td><td>Georgia Tech Face</td><td><a href="papers/3dc3f0b64ef80f573e3a5f96e456e52ee980b877.html" target="_blank">Maximum Likelihood Training of the Embedded HMM for Face Detection and Recognition</a></td><td><a href="http://www.researchgate.net/profile/Monson_Hayes/publication/221124512_Maximum_Likelihood_Training_of_the_Embedded_HMM_for_Face_Detection_and_Recognition/links/0deec53509be9d6f55000000.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>42%</td><td>67</td><td>28</td><td>39</td><td>4</td><td>29</td><td>28</td></tr><tr><td>79828e6e9f137a583082b8b5a9dfce0c301989b8</td><td>mapillary</td><td>Mapillary</td><td><a href="papers/79828e6e9f137a583082b8b5a9dfce0c301989b8.html" target="_blank">The Mapillary Vistas Dataset for Semantic Understanding of Street Scenes</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>46%</td><td>61</td><td>28</td><td>33</td><td>0</td><td>43</td><td>16</td></tr><tr><td>8b2dd5c61b23ead5ae5508bb8ce808b5ea266730</td><td>10k_US_adult_faces</td><td>10K US Adult Faces</td><td><a href="papers/8b2dd5c61b23ead5ae5508bb8ce808b5ea266730.html" target="_blank">The intrinsic memorability of face photographs.</a></td><td><a href="https://pdfs.semanticscholar.org/8b2d/d5c61b23ead5ae5508bb8ce808b5ea266730.pdf" target="_blank">[pdf]</a></td><td>Journal of experimental psychology. General</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>52</td><td>27</td><td>25</td><td>2</td><td>36</td><td>14</td></tr><tr><td>9c23859ec7313f2e756a3e85575735e0c52249f4</td><td>facebook_100</td><td>Facebook100</td><td><a href="papers/9c23859ec7313f2e756a3e85575735e0c52249f4.html" target="_blank">Scaling up biologically-inspired computer vision: A case study in unconstrained face recognition on facebook</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td>edu</td><td>Harvard University</td><td>United States</td><td>42.36782045</td><td>-71.12666653</td><td>52%</td><td>52</td><td>27</td><td>25</td><td>3</td><td>38</td><td>13</td></tr><tr><td>9c23859ec7313f2e756a3e85575735e0c52249f4</td><td>pubfig_83</td><td>pubfig83</td><td><a href="papers/9c23859ec7313f2e756a3e85575735e0c52249f4.html" target="_blank">Scaling up biologically-inspired computer vision: A case study in unconstrained face recognition on facebook</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td>edu</td><td>Harvard University</td><td>United States</td><td>42.36782045</td><td>-71.12666653</td><td>52%</td><td>52</td><td>27</td><td>25</td><td>3</td><td>38</td><td>13</td></tr><tr><td>09d78009687bec46e70efcf39d4612822e61cb8c</td><td>raid</td><td>RAiD</td><td><a href="papers/09d78009687bec46e70efcf39d4612822e61cb8c.html" target="_blank">Consistent Re-identification in a Camera Network</a></td><td><a href="https://pdfs.semanticscholar.org/c27f/099e6e7e3f7f9979cbe9e0a5175fc5848ea0.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>49</td><td>27</td><td>22</td><td>5</td><td>34</td><td>13</td></tr><tr><td>22646e00a7ba34d1b5fbe3b1efcd91a1e1be3c2b</td><td>saivt</td><td>SAIVT SoftBio</td><td><a href="papers/22646e00a7ba34d1b5fbe3b1efcd91a1e1be3c2b.html" target="_blank">A Database for Person Re-Identification in Multi-Camera Surveillance Networks</a></td><td><span class="gray">[pdf]</a></td><td>2012 International Conference on Digital Image Computing Techniques and Applications (DICTA)</td><td></td><td></td><td></td><td></td><td></td><td>40%</td><td>65</td><td>26</td><td>39</td><td>7</td><td>45</td><td>20</td></tr><tr><td>070de852bc6eb275d7ca3a9cdde8f6be8795d1a3</td><td>d3dfacs</td><td>D3DFACS</td><td><a href="papers/070de852bc6eb275d7ca3a9cdde8f6be8795d1a3.html" target="_blank">A FACS valid 3D dynamic action unit database with applications to 3D dynamic morphable facial modeling</a></td><td><span class="gray">[pdf]</a></td><td>2011 International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>50</td><td>25</td><td>25</td><td>5</td><td>31</td><td>18</td></tr><tr><td>298cbc3dfbbb3a20af4eed97906650a4ea1c29e0</td><td>ferplus</td><td>FER+</td><td><a href="papers/298cbc3dfbbb3a20af4eed97906650a4ea1c29e0.html" target="_blank">Training deep networks for facial expression recognition with crowd-sourced label distribution</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>71%</td><td>34</td><td>24</td><td>10</td><td>0</td><td>18</td><td>16</td></tr><tr><td>2161f6b7ee3c0acc81603b01dc0df689683577b9</td><td>large_scale_person_search</td><td>Large Scale Person Search</td><td><a href="papers/2161f6b7ee3c0acc81603b01dc0df689683577b9.html" target="_blank">End-to-End Deep Learning for Person Search</a></td><td><a href="https://pdfs.semanticscholar.org/2161/f6b7ee3c0acc81603b01dc0df689683577b9.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>46</td><td>24</td><td>22</td><td>1</td><td>27</td><td>16</td></tr><tr><td>6f3c76b7c0bd8e1d122c6ea808a271fd4749c951</td><td>ward</td><td>WARD</td><td><a href="papers/6f3c76b7c0bd8e1d122c6ea808a271fd4749c951.html" target="_blank">Re-identify people in wide area camera network</a></td><td><span class="gray">[pdf]</a></td><td>2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops</td><td>edu</td><td>University of Udine</td><td>Italy</td><td>46.08107230</td><td>13.21194740</td><td>38%</td><td>60</td><td>23</td><td>37</td><td>1</td><td>38</td><td>21</td></tr><tr><td>758d7e1be64cc668c59ef33ba8882c8597406e53</td><td>affectnet</td><td>AffectNet</td><td><a href="papers/758d7e1be64cc668c59ef33ba8882c8597406e53.html" target="_blank">AffectNet: A Database for Facial Expression, Valence, and Arousal Computing in the Wild</a></td><td><a href="https://arxiv.org/pdf/1708.03985.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>59%</td><td>37</td><td>22</td><td>15</td><td>0</td><td>25</td><td>11</td></tr><tr><td>1bd1645a629f1b612960ab9bba276afd4cf7c666</td><td>brainwash</td><td>Brainwash</td><td><a href="papers/1bd1645a629f1b612960ab9bba276afd4cf7c666.html" target="_blank">End-to-End People Detection in Crowded Scenes</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>42</td><td>21</td><td>21</td><td>1</td><td>19</td><td>19</td></tr><tr><td>47662d1a368daf70ba70ef2d59eb6209f98b675d</td><td>fia</td><td>CMU FiA</td><td><a href="papers/47662d1a368daf70ba70ef2d59eb6209f98b675d.html" target="_blank">The CMU Face In Action (FIA) Database</a></td><td><a href="https://pdfs.semanticscholar.org/4766/2d1a368daf70ba70ef2d59eb6209f98b675d.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>39%</td><td>54</td><td>21</td><td>33</td><td>5</td><td>40</td><td>16</td></tr><tr><td>fd8168f1c50de85bac58a8d328df0a50248b16ae</td><td>nd_2006</td><td>ND-2006</td><td><a href="papers/fd8168f1c50de85bac58a8d328df0a50248b16ae.html" target="_blank">Using a Multi-Instance Enrollment Representation to Improve 3D Face Recognition</a></td><td><span class="gray">[pdf]</a></td><td>2007 First IEEE International Conference on Biometrics: Theory, Applications, and Systems</td><td>edu</td><td>University of Notre Dame</td><td>United States</td><td>41.70456775</td><td>-86.23822026</td><td>60%</td><td>35</td><td>21</td><td>14</td><td>3</td><td>18</td><td>15</td></tr><tr><td>31b05f65405534a696a847dd19c621b7b8588263</td><td>umd_faces</td><td>UMD</td><td><a href="papers/31b05f65405534a696a847dd19c621b7b8588263.html" target="_blank">UMDFaces: An annotated face dataset for training deep networks</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE International Joint Conference on Biometrics (IJCB)</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>42</td><td>21</td><td>21</td><td>5</td><td>30</td><td>11</td></tr><tr><td>53ae38a6bb2b21b42bac4f0c4c8ed1f9fa02f9d4</td><td>bp4d_plus</td><td>BP4D+</td><td><a href="papers/53ae38a6bb2b21b42bac4f0c4c8ed1f9fa02f9d4.html" target="_blank">Multimodal Spontaneous Emotion Corpus for Human Behavior Analysis</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>48%</td><td>42</td><td>20</td><td>22</td><td>0</td><td>17</td><td>26</td></tr><tr><td>1c2802c2199b6d15ecefe7ba0c39bfe44363de38</td><td>youtube_poses</td><td>YouTube Pose</td><td><a href="papers/1c2802c2199b6d15ecefe7ba0c39bfe44363de38.html" target="_blank">Personalizing Human Video Pose Estimation</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>36</td><td>20</td><td>16</td><td>2</td><td>30</td><td>8</td></tr><tr><td>488e475eeb3bb39a145f23ede197cd3620f1d98a</td><td>apis</td><td>APiS1.0</td><td><a href="papers/488e475eeb3bb39a145f23ede197cd3620f1d98a.html" target="_blank">Pedestrian Attribute Classification in Surveillance: Database and Evaluation</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision Workshops</td><td></td><td></td><td></td><td></td><td></td><td>68%</td><td>28</td><td>19</td><td>9</td><td>0</td><td>13</td><td>15</td></tr><tr><td>d08cc366a4a0192a01e9a7495af1eb5d9f9e73ae</td><td>b3d_ac</td><td>B3D(AC)</td><td><a href="papers/d08cc366a4a0192a01e9a7495af1eb5d9f9e73ae.html" target="_blank">A 3-D Audio-Visual Corpus of Affective Communication</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Multimedia</td><td></td><td></td><td></td><td></td><td></td><td>45%</td><td>42</td><td>19</td><td>23</td><td>2</td><td>26</td><td>15</td></tr><tr><td>57fe081950f21ca03b5b375ae3e84b399c015861</td><td>cvc_01_barcelona</td><td>CVC-01</td><td><a href="papers/57fe081950f21ca03b5b375ae3e84b399c015861.html" target="_blank">Adaptive Image Sampling and Windows Classification for On-board Pedestrian Detection</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>40%</td><td>47</td><td>19</td><td>28</td><td>1</td><td>23</td><td>24</td></tr><tr><td>28d4e027c7e90b51b7d8908fce68128d1964668a</td><td>megaface</td><td>MegaFace</td><td><a href="papers/28d4e027c7e90b51b7d8908fce68128d1964668a.html" target="_blank">Level Playing Field for Million Scale Face Recognition</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>49%</td><td>39</td><td>19</td><td>20</td><td>2</td><td>29</td><td>9</td></tr><tr><td>41976ebc8ab76d9a6861487c97cc7fcbe3b6015f</td><td>moments_in_time</td><td>Moments in Time</td><td><a href="papers/41976ebc8ab76d9a6861487c97cc7fcbe3b6015f.html" target="_blank">Moments in Time Dataset: one million videos for event understanding</a></td><td><a href="https://arxiv.org/pdf/1801.03150.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>66%</td><td>29</td><td>19</td><td>10</td><td>2</td><td>27</td><td>2</td></tr><tr><td>488e475eeb3bb39a145f23ede197cd3620f1d98a</td><td>svs</td><td>SVS</td><td><a href="papers/488e475eeb3bb39a145f23ede197cd3620f1d98a.html" target="_blank">Pedestrian Attribute Classification in Surveillance: Database and Evaluation</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision Workshops</td><td></td><td></td><td></td><td></td><td></td><td>68%</td><td>28</td><td>19</td><td>9</td><td>0</td><td>13</td><td>15</td></tr><tr><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td><td>casablanca</td><td>Casablanca</td><td><a href="papers/0ceda9dae8b9f322df65ca2ef02caca9758aec6f.html" target="_blank">Context-Aware CNNs for Person Head Detection</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>33</td><td>18</td><td>15</td><td>1</td><td>23</td><td>11</td></tr><tr><td>4fefd1bc8dc4e0ab37ee3324ddfa43ad9d6a04a7</td><td>deep_fashion</td><td>DeepFashion</td><td><a href="papers/4fefd1bc8dc4e0ab37ee3324ddfa43ad9d6a04a7.html" target="_blank">Fashion Landmark Detection in the Wild</a></td><td><a href="https://arxiv.org/pdf/1608.03049.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>69%</td><td>26</td><td>18</td><td>8</td><td>1</td><td>16</td><td>10</td></tr><tr><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td><td>hollywood_headset</td><td>HollywoodHeads</td><td><a href="papers/0ceda9dae8b9f322df65ca2ef02caca9758aec6f.html" target="_blank">Context-Aware CNNs for Person Head Detection</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>33</td><td>18</td><td>15</td><td>1</td><td>23</td><td>11</td></tr><tr><td>84fe5b4ac805af63206012d29523a1e033bc827e</td><td>awe_ears</td><td>AWE Ears</td><td><a href="papers/84fe5b4ac805af63206012d29523a1e033bc827e.html" target="_blank">Ear Recognition: More Than a Survey</a></td><td><a href="https://arxiv.org/pdf/1611.06203.pdf" target="_blank">[pdf]</a></td><td>Neurocomputing</td><td></td><td></td><td></td><td></td><td></td><td>65%</td><td>26</td><td>17</td><td>9</td><td>0</td><td>10</td><td>16</td></tr><tr><td>213a579af9e4f57f071b884aa872651372b661fd</td><td>bbc_pose</td><td>BBC Pose</td><td><a href="papers/213a579af9e4f57f071b884aa872651372b661fd.html" target="_blank">Automatic and Efficient Human Pose Estimation for Sign Language Videos</a></td><td><a href="http://tomas.pfister.fi/files/charles13ijcv.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>65%</td><td>26</td><td>17</td><td>9</td><td>1</td><td>16</td><td>11</td></tr><tr><td>20388099cc415c772926e47bcbbe554e133343d1</td><td>cafe</td><td>#N/A</td><td><a href="papers/20388099cc415c772926e47bcbbe554e133343d1.html" target="_blank">The Child Affective Facial Expression (CAFE) set: validity and reliability from untrained adults</a></td><td><a href="https://pdfs.semanticscholar.org/2038/8099cc415c772926e47bcbbe554e133343d1.pdf" target="_blank">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td></td><td>46%</td><td>37</td><td>17</td><td>20</td><td>3</td><td>30</td><td>7</td></tr><tr><td>d178cde92ab3dc0dd2ebee5a76a33d556c39448b</td><td>jiku_mobile</td><td>Jiku Mobile Video Dataset</td><td><a href="papers/d178cde92ab3dc0dd2ebee5a76a33d556c39448b.html" target="_blank">The jiku mobile video dataset</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td>edu</td><td>National University of Singapore</td><td>Singapore</td><td>1.29620180</td><td>103.77689944</td><td>71%</td><td>24</td><td>17</td><td>7</td><td>0</td><td>6</td><td>19</td></tr><tr><td>2eb84aaba316b095d4bb51da1a3e4365bbf9ab1d</td><td>kin_face</td><td>UB KinFace</td><td><a href="papers/2eb84aaba316b095d4bb51da1a3e4365bbf9ab1d.html" target="_blank">Genealogical face recognition based on UB KinFace database</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td>edu</td><td>SUNY Buffalo</td><td>United States</td><td>42.93362780</td><td>-78.88394479</td><td>55%</td><td>31</td><td>17</td><td>14</td><td>0</td><td>11</td><td>21</td></tr><tr><td>a0cc5f73a37723a6dd465924143f1cb4976d0169</td><td>msmt_17</td><td>MSMT17</td><td><a href="papers/a0cc5f73a37723a6dd465924143f1cb4976d0169.html" target="_blank">Person Transfer GAN to Bridge Domain Gap for Person Re-identification</a></td><td><span class="gray">[pdf]</a></td><td>2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>71%</td><td>24</td><td>17</td><td>7</td><td>1</td><td>20</td><td>4</td></tr><tr><td>4946ba10a4d5a7d0a38372f23e6622bd347ae273</td><td>coco_action</td><td>COCO-a</td><td><a href="papers/4946ba10a4d5a7d0a38372f23e6622bd347ae273.html" target="_blank">Describing Common Human Visual Actions in Images</a></td><td><a href="https://arxiv.org/pdf/1506.02203.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>25</td><td>16</td><td>9</td><td>0</td><td>23</td><td>2</td></tr><tr><td>b6c293f0420f7e945b5916ae44269fb53e139275</td><td>erce</td><td>ERCe</td><td><a href="papers/b6c293f0420f7e945b5916ae44269fb53e139275.html" target="_blank">Video Synopsis by Heterogeneous Multi-source Correlation</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>29</td><td>16</td><td>13</td><td>2</td><td>14</td><td>13</td></tr><tr><td>e27ef52c641c2b5100a1b34fd0b819e84a31b4df</td><td>sarc3d</td><td>Sarc3D</td><td><a href="papers/e27ef52c641c2b5100a1b34fd0b819e84a31b4df.html" target="_blank">SARC3D: A New 3D Body Model for People Tracking and Re-identification</a></td><td><a href="https://pdfs.semanticscholar.org/e27e/f52c641c2b5100a1b34fd0b819e84a31b4df.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>34</td><td>16</td><td>18</td><td>3</td><td>21</td><td>12</td></tr><tr><td>b6c293f0420f7e945b5916ae44269fb53e139275</td><td>tisi</td><td>Times Square Intersection</td><td><a href="papers/b6c293f0420f7e945b5916ae44269fb53e139275.html" target="_blank">Video Synopsis by Heterogeneous Multi-source Correlation</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>29</td><td>16</td><td>13</td><td>2</td><td>14</td><td>13</td></tr><tr><td>1e3df3ca8feab0b36fd293fe689f93bb2aaac591</td><td>immediacy</td><td>Immediacy</td><td><a href="papers/1e3df3ca8feab0b36fd293fe689f93bb2aaac591.html" target="_blank">Multi-task Recurrent Neural Network for Immediacy Prediction</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>26</td><td>15</td><td>11</td><td>2</td><td>21</td><td>6</td></tr><tr><td>ea050801199f98a1c7c1df6769f23f658299a3ae</td><td>mpi_large</td><td>Large MPI Facial Expression</td><td><a href="papers/ea050801199f98a1c7c1df6769f23f658299a3ae.html" target="_blank">The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</a></td><td><a href="https://pdfs.semanticscholar.org/ea05/0801199f98a1c7c1df6769f23f658299a3ae.pdf" target="_blank">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td></td><td>45%</td><td>33</td><td>15</td><td>18</td><td>4</td><td>29</td><td>4</td></tr><tr><td>ea050801199f98a1c7c1df6769f23f658299a3ae</td><td>mpi_small</td><td>Small MPI Facial Expression</td><td><a href="papers/ea050801199f98a1c7c1df6769f23f658299a3ae.html" target="_blank">The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</a></td><td><a href="https://pdfs.semanticscholar.org/ea05/0801199f98a1c7c1df6769f23f658299a3ae.pdf" target="_blank">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td></td><td>45%</td><td>33</td><td>15</td><td>18</td><td>4</td><td>29</td><td>4</td></tr><tr><td>d818568838433a6d6831adde49a58cef05e0c89f</td><td>agedb</td><td>AgeDB</td><td><a href="papers/d818568838433a6d6831adde49a58cef05e0c89f.html" target="_blank">AgeDB: The First Manually Collected, In-the-Wild Age Database</a></td><td><a href="http://eprints.mdx.ac.uk/22044/1/agedb_kotsia.pdf" target="_blank">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td></td><td></td><td></td><td></td><td></td><td>78%</td><td>18</td><td>14</td><td>4</td><td>0</td><td>14</td><td>3</td></tr><tr><td>0297448f3ed948e136bb06ceff10eccb34e5bb77</td><td>ilids_mcts</td><td>i-LIDS Multiple-Camera</td><td><a href="papers/0297448f3ed948e136bb06ceff10eccb34e5bb77.html" target="_blank">Imagery Library for Intelligent Detection Systems (i-LIDS); A Standard for Testing Video Based Detection Systems</a></td><td><span class="gray">[pdf]</a></td><td>Proceedings 40th Annual 2006 International Carnahan Conference on Security Technology</td><td></td><td></td><td></td><td></td><td></td><td>40%</td><td>35</td><td>14</td><td>21</td><td>2</td><td>21</td><td>14</td></tr><tr><td>5753b2b5e442eaa3be066daa4a2ca8d8a0bb1725</td><td>50_people_one_question</td><td>50 People One Question</td><td><a href="papers/5753b2b5e442eaa3be066daa4a2ca8d8a0bb1725.html" target="_blank">Merging Pose Estimates Across Space and Time</a></td><td><a href="http://authors.library.caltech.edu/41565/1/tracking_bmvc.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>81%</td><td>16</td><td>13</td><td>3</td><td>0</td><td>13</td><td>4</td></tr><tr><td>19d1b811df60f86cbd5e04a094b07f32fff7a32a</td><td>york_3d</td><td>UOY 3D Face Database</td><td><a href="papers/19d1b811df60f86cbd5e04a094b07f32fff7a32a.html" target="_blank">Three-dimensional face recognition: an eigensurface approach</a></td><td><span class="gray">[pdf]</a></td><td>2004 International Conference on Image Processing, 2004. ICIP '04.</td><td></td><td></td><td></td><td></td><td></td><td>34%</td><td>38</td><td>13</td><td>25</td><td>4</td><td>24</td><td>13</td></tr><tr><td>0cb2dd5f178e3a297a0c33068961018659d0f443</td><td>ijb_b</td><td>IJB-B</td><td><a href="papers/0cb2dd5f178e3a297a0c33068961018659d0f443.html" target="_blank">IARPA Janus Benchmark-B Face Dataset</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td></td><td></td><td></td><td></td><td></td><td>34%</td><td>35</td><td>12</td><td>23</td><td>6</td><td>25</td><td>8</td></tr><tr><td>221c18238b829c12b911706947ab38fd017acef7</td><td>rap_pedestrian</td><td>RAP</td><td><a href="papers/221c18238b829c12b911706947ab38fd017acef7.html" target="_blank">A Richly Annotated Dataset for Pedestrian Attribute Recognition</a></td><td><a href="https://arxiv.org/pdf/1603.07054.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>46%</td><td>26</td><td>12</td><td>14</td><td>0</td><td>16</td><td>10</td></tr><tr><td>774cbb45968607a027ae4729077734db000a1ec5</td><td>urban_tribes</td><td>Urban Tribes</td><td><a href="papers/774cbb45968607a027ae4729077734db000a1ec5.html" target="_blank">From Bikers to Surfers: Visual Recognition of Urban Tribes</a></td><td><a href="http://vision.cornell.edu/se3/wp-content/uploads/2014/09/utribes_bmvc13_final.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>67%</td><td>18</td><td>12</td><td>6</td><td>1</td><td>12</td><td>6</td></tr><tr><td>44d23df380af207f5ac5b41459c722c87283e1eb</td><td>wider_attribute</td><td>WIDER Attribute</td><td><a href="papers/44d23df380af207f5ac5b41459c722c87283e1eb.html" target="_blank">Human Attribute Recognition by Deep Hierarchical Contexts</a></td><td><a href="https://pdfs.semanticscholar.org/8e28/07f2dd53b03a759e372e07f7191cae65c9fd.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>67%</td><td>18</td><td>12</td><td>6</td><td>0</td><td>14</td><td>4</td></tr><tr><td>45e616093a92e5f1e61a7c6037d5f637aa8964af</td><td>malf</td><td>MALF</td><td><a href="papers/45e616093a92e5f1e61a7c6037d5f637aa8964af.html" target="_blank">Fine-grained evaluation on face detection in the wild</a></td><td><span class="gray">[pdf]</a></td><td>2015 11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)</td><td></td><td></td><td></td><td></td><td></td><td>65%</td><td>17</td><td>11</td><td>6</td><td>0</td><td>12</td><td>5</td></tr><tr><td>4156b7e88f2e0ab0a7c095b9bab199ae2b23bd06</td><td>distance_nighttime</td><td>Long Distance Heterogeneous Face</td><td><a href="papers/4156b7e88f2e0ab0a7c095b9bab199ae2b23bd06.html" target="_blank">Nighttime Face Recognition at Long Distance: Cross-Distance and Cross-Spectral Matching</a></td><td><a href="https://pdfs.semanticscholar.org/4156/b7e88f2e0ab0a7c095b9bab199ae2b23bd06.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>45%</td><td>22</td><td>10</td><td>12</td><td>3</td><td>11</td><td>10</td></tr><tr><td>ca3e88d87e1344d076c964ea89d91a75c417f5ee</td><td>imfdb</td><td>IMFDB</td><td><a href="papers/ca3e88d87e1344d076c964ea89d91a75c417f5ee.html" target="_blank">Indian Movie Face Database: A benchmark for face recognition under wide variations</a></td><td><span class="gray">[pdf]</a></td><td>2013 Fourth National Conference on Computer Vision, Pattern Recognition, Image Processing and Graphics (NCVPRIPG)</td><td>edu</td><td>BVBCET, Hubli, India</td><td>India</td><td>15.36883320</td><td>75.12137960</td><td>59%</td><td>17</td><td>10</td><td>7</td><td>0</td><td>11</td><td>5</td></tr><tr><td>2a171f8d14b6b8735001a11c217af9587d095848</td><td>social_relation</td><td>Social Relation</td><td><a href="papers/2a171f8d14b6b8735001a11c217af9587d095848.html" target="_blank">Learning Social Relation Traits from Face Images</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>23</td><td>10</td><td>13</td><td>4</td><td>16</td><td>7</td></tr><tr><td>71b7fc715e2f1bb24c0030af8d7e7b6e7cd128a6</td><td>umd_faces</td><td>UMD</td><td><a href="papers/71b7fc715e2f1bb24c0030af8d7e7b6e7cd128a6.html" target="_blank">The Do’s and Don’ts for CNN-Based Face Verification</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE International Conference on Computer Vision Workshops (ICCVW)</td><td></td><td></td><td></td><td></td><td></td><td>38%</td><td>26</td><td>10</td><td>16</td><td>3</td><td>16</td><td>8</td></tr><tr><td>64e0690dd176a93de9d4328f6e31fc4afe1e7536</td><td>duke_mtmc</td><td>Duke MTMC</td><td><a href="papers/64e0690dd176a93de9d4328f6e31fc4afe1e7536.html" target="_blank">Tracking Multiple People Online and in Real Time</a></td><td><a href="https://pdfs.semanticscholar.org/64e0/690dd176a93de9d4328f6e31fc4afe1e7536.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>39%</td><td>23</td><td>9</td><td>14</td><td>2</td><td>12</td><td>10</td></tr><tr><td>25474c21613607f6bb7687a281d5f9d4ffa1f9f3</td><td>faceplace</td><td>Face Place</td><td><a href="papers/25474c21613607f6bb7687a281d5f9d4ffa1f9f3.html" target="_blank">Recognizing disguised faces</a></td><td><a href="https://pdfs.semanticscholar.org/d936/7ceb0be378c3a9ddf7cb741c678c1a3c574c.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>31%</td><td>29</td><td>9</td><td>20</td><td>0</td><td>18</td><td>10</td></tr><tr><td>a8d0b149c2eadaa02204d3e4356fbc8eccf3b315</td><td>hi4d_adsip</td><td>Hi4D-ADSIP</td><td><a href="papers/a8d0b149c2eadaa02204d3e4356fbc8eccf3b315.html" target="_blank">Hi4D-ADSIP 3-D dynamic facial articulation database</a></td><td><span class="gray">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>15</td><td>9</td><td>6</td><td>1</td><td>4</td><td>11</td></tr><tr><td>2f43b614607163abf41dfe5d17ef6749a1b61304</td><td>hrt_transgender</td><td>HRT Transgender</td><td><a href="papers/2f43b614607163abf41dfe5d17ef6749a1b61304.html" target="_blank">Investigating the Periocular-Based Face Recognition Across Gender Transformation</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Information Forensics and Security</td><td>edu</td><td>University of North Carolina at Wilmington</td><td>United States</td><td>34.22498270</td><td>-77.86907744</td><td>69%</td><td>13</td><td>9</td><td>4</td><td>0</td><td>6</td><td>8</td></tr><tr><td>bd26dabab576adb6af30484183c9c9c8379bf2e0</td><td>scut_fbp</td><td>SCUT-FBP</td><td><a href="papers/bd26dabab576adb6af30484183c9c9c8379bf2e0.html" target="_blank">SCUT-FBP: A Benchmark Dataset for Facial Beauty Perception</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Systems, Man, and Cybernetics</td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>19</td><td>9</td><td>10</td><td>2</td><td>6</td><td>13</td></tr><tr><td>2624d84503bc2f8e190e061c5480b6aa4d89277a</td><td>afew_va</td><td>AFEW-VA</td><td><a href="papers/2624d84503bc2f8e190e061c5480b6aa4d89277a.html" target="_blank">AFEW-VA database for valence and arousal estimation in-the-wild</a></td><td><a href="https://ibug.doc.ic.ac.uk/media/uploads/documents/afew-va.pdf" target="_blank">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td></td><td>44%</td><td>18</td><td>8</td><td>10</td><td>0</td><td>12</td><td>5</td></tr><tr><td>4e6ee936eb50dd032f7138702fa39b7c18ee8907</td><td>dartmouth_children</td><td>Dartmouth Children</td><td><a href="papers/4e6ee936eb50dd032f7138702fa39b7c18ee8907.html" target="_blank">The Dartmouth Database of Children’s Faces: Acquisition and Validation of a New Face Stimulus Set</a></td><td><a href="https://pdfs.semanticscholar.org/4e6e/e936eb50dd032f7138702fa39b7c18ee8907.pdf" target="_blank">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td></td><td>38%</td><td>21</td><td>8</td><td>13</td><td>2</td><td>18</td><td>3</td></tr><tr><td>0f0fcf041559703998abf310e56f8a2f90ee6f21</td><td>feret</td><td>FERET</td><td><a href="papers/0f0fcf041559703998abf310e56f8a2f90ee6f21.html" target="_blank">The FERET Evaluation Methodology for Face-Recognition Algorithms</a></td><td><a href="https://pdfs.semanticscholar.org/5099/7a5605c1f61e09e9a96789ed7495be6625aa.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>28%</td><td>29</td><td>8</td><td>21</td><td>3</td><td>18</td><td>9</td></tr><tr><td>c570d1247e337f91e555c3be0e8c8a5aba539d9f</td><td>mcgill</td><td>McGill Real World</td><td><a href="papers/c570d1247e337f91e555c3be0e8c8a5aba539d9f.html" target="_blank">Robust semi-automatic head pose labeling for real-world face video sequences</a></td><td><span class="gray">[pdf]</a></td><td>Multimedia Tools and Applications</td><td>edu</td><td>McGill University</td><td>Canada</td><td>45.50397610</td><td>-73.57496870</td><td>44%</td><td>18</td><td>8</td><td>10</td><td>0</td><td>13</td><td>7</td></tr><tr><td>041d3eedf5e45ce5c5229f0181c5c576ed1fafd6</td><td>ucf_selfie</td><td>UCF Selfie</td><td><a href="papers/041d3eedf5e45ce5c5229f0181c5c576ed1fafd6.html" target="_blank">How to Take a Good Selfie?</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>73%</td><td>11</td><td>8</td><td>3</td><td>0</td><td>7</td><td>5</td></tr><tr><td>a94cae786d515d3450d48267e12ca954aab791c4</td><td>yawdd</td><td>YawDD</td><td><a href="papers/a94cae786d515d3450d48267e12ca954aab791c4.html" target="_blank">YawDD: a yawning detection dataset</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>53%</td><td>15</td><td>8</td><td>7</td><td>1</td><td>2</td><td>13</td></tr><tr><td>6403117f9c005ae81f1e8e6d1302f4a045e3d99d</td><td>alert_airport</td><td>ALERT Airport</td><td><a href="papers/6403117f9c005ae81f1e8e6d1302f4a045e3d99d.html" target="_blank">A Systematic Evaluation and Benchmark for Person Re-Identification: Features, Metrics, and Datasets</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>35%</td><td>20</td><td>7</td><td>13</td><td>0</td><td>9</td><td>11</td></tr><tr><td>2b926b3586399d028b46315d7d9fb9d879e4f79c</td><td>frav3d</td><td>FRAV3D</td><td><a href="papers/2b926b3586399d028b46315d7d9fb9d879e4f79c.html" target="_blank">Multimodal 2D, 2.5D & 3D Face Verification</a></td><td><span class="gray">[pdf]</a></td><td>2006 International Conference on Image Processing</td><td>edu</td><td>Universidad Rey Juan Carlos, Spain</td><td>Spain</td><td>40.33586610</td><td>-3.87694320</td><td>50%</td><td>14</td><td>7</td><td>7</td><td>0</td><td>2</td><td>12</td></tr><tr><td>2cd7821fcf5fae53a185624f7eeda007434ae037</td><td>geofaces</td><td>GeoFaces</td><td><a href="papers/2cd7821fcf5fae53a185624f7eeda007434ae037.html" target="_blank">Exploring the geo-dependence of human face appearance</a></td><td><a href="http://cs.uky.edu/~jacobs/papers/islam2014faces.pdf" target="_blank">[pdf]</a></td><td>IEEE Winter Conference on Applications of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>88%</td><td>8</td><td>7</td><td>1</td><td>0</td><td>5</td><td>3</td></tr><tr><td>2cd7821fcf5fae53a185624f7eeda007434ae037</td><td>geofaces</td><td>GeoFaces</td><td><a href="papers/2cd7821fcf5fae53a185624f7eeda007434ae037.html" target="_blank">Exploring the geo-dependence of human face appearance</a></td><td><a href="http://cs.uky.edu/~jacobs/papers/islam2014faces.pdf" target="_blank">[pdf]</a></td><td>IEEE Winter Conference on Applications of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>88%</td><td>8</td><td>7</td><td>1</td><td>0</td><td>5</td><td>3</td></tr><tr><td>b71d1aa90dcbe3638888725314c0d56640c1fef1</td><td>ifdb</td><td>IFDB</td><td><a href="papers/b71d1aa90dcbe3638888725314c0d56640c1fef1.html" target="_blank">Iranian Face Database with age, pose and expression</a></td><td><span class="gray">[pdf]</a></td><td>2007 International Conference on Machine Vision</td><td>edu</td><td>Islamic Azad University</td><td>Iran</td><td>34.84529990</td><td>48.55962120</td><td>30%</td><td>23</td><td>7</td><td>16</td><td>2</td><td>14</td><td>9</td></tr><tr><td>4563b46d42079242f06567b3f2e2f7a80cb3befe</td><td>vadana</td><td>VADANA</td><td><a href="papers/4563b46d42079242f06567b3f2e2f7a80cb3befe.html" target="_blank">VADANA: A dense dataset for facial image analysis</a></td><td><span class="gray">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td>edu</td><td>University of Delaware</td><td>United States</td><td>39.68103280</td><td>-75.75401840</td><td>47%</td><td>15</td><td>7</td><td>8</td><td>0</td><td>5</td><td>10</td></tr><tr><td>060820f110a72cbf02c14a6d1085bd6e1d994f6a</td><td>caltech_crp</td><td>Caltech CRP</td><td><a href="papers/060820f110a72cbf02c14a6d1085bd6e1d994f6a.html" target="_blank">Fine-grained classification of pedestrians in video: Benchmark and state of the art</a></td><td><a href="https://arxiv.org/pdf/1605.06177.pdf" target="_blank">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>35%</td><td>17</td><td>6</td><td>11</td><td>0</td><td>9</td><td>8</td></tr><tr><td>8d5998cd984e7cce307da7d46f155f9db99c6590</td><td>chalearn</td><td>ChaLearn</td><td><a href="papers/8d5998cd984e7cce307da7d46f155f9db99c6590.html" target="_blank">ChaLearn looking at people: A review of events and resources</a></td><td><span class="gray">[pdf]</a></td><td>2017 International Joint Conference on Neural Networks (IJCNN)</td><td></td><td></td><td></td><td></td><td></td><td>46%</td><td>13</td><td>6</td><td>7</td><td>1</td><td>8</td><td>4</td></tr><tr><td>22f656d0f8426c84a33a267977f511f127bfd7f3</td><td>expw</td><td>ExpW</td><td><a href="papers/22f656d0f8426c84a33a267977f511f127bfd7f3.html" target="_blank">From Facial Expression Recognition to Interpersonal Relation Prediction</a></td><td><a href="https://arxiv.org/pdf/1609.06426.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>11</td><td>6</td><td>5</td><td>0</td><td>5</td><td>4</td></tr><tr><td>7f4040b482d16354d5938c1d1b926b544652bf5b</td><td>nova_emotions</td><td>Novaemötions Dataset</td><td><a href="papers/7f4040b482d16354d5938c1d1b926b544652bf5b.html" target="_blank">Competitive affective gaming: winning with a smile</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Universidade NOVA de Lisboa, Caparica, Portugal</td><td>Portugal</td><td>38.66096400</td><td>-9.20581300</td><td>67%</td><td>9</td><td>6</td><td>3</td><td>0</td><td>5</td><td>4</td></tr><tr><td>1a40092b493c6b8840257ab7f96051d1a4dbfeb2</td><td>sports_videos_in_the_wild</td><td>SVW</td><td><a href="papers/1a40092b493c6b8840257ab7f96051d1a4dbfeb2.html" target="_blank">Sports Videos in the Wild (SVW): A video dataset for sports analysis</a></td><td><span class="gray">[pdf]</a></td><td>2015 11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)</td><td></td><td></td><td></td><td></td><td></td><td>86%</td><td>7</td><td>6</td><td>1</td><td>1</td><td>5</td><td>2</td></tr><tr><td>633c851ebf625ad7abdda2324e9de093cf623141</td><td>appa_real</td><td>APPA-REAL</td><td><a href="papers/633c851ebf625ad7abdda2324e9de093cf623141.html" target="_blank">Apparent and Real Age Estimation in Still Images with Deep Residual Regressors on Appa-Real Database</a></td><td><span class="gray">[pdf]</a></td><td>2017 12th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2017)</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>10</td><td>5</td><td>5</td><td>0</td><td>8</td><td>3</td></tr><tr><td>4af89578ac237278be310f7660a408b03f12d603</td><td>geofaces</td><td>GeoFaces</td><td><a href="papers/4af89578ac237278be310f7660a408b03f12d603.html" target="_blank">Large-scale geo-facial image analysis</a></td><td><a href="https://pdfs.semanticscholar.org/3ede/3ed28329bf48fbd06438a69c4f855bef003f.pdf" target="_blank">[pdf]</a></td><td>EURASIP J. Image and Video Processing</td><td></td><td></td><td></td><td></td><td></td><td>83%</td><td>6</td><td>5</td><td>1</td><td>0</td><td>4</td><td>2</td></tr><tr><td>57178b36c21fd7f4529ac6748614bb3374714e91</td><td>ijb_c</td><td>IJB-C</td><td><a href="papers/57178b36c21fd7f4529ac6748614bb3374714e91.html" target="_blank">IARPA Janus Benchmark - C: Face Dataset and Protocol</a></td><td><span class="gray">[pdf]</a></td><td>2018 International Conference on Biometrics (ICB)</td><td></td><td></td><td></td><td></td><td></td><td>36%</td><td>14</td><td>5</td><td>9</td><td>2</td><td>12</td><td>1</td></tr><tr><td>07fcbae86f7a3ad3ea1cf95178459ee9eaf77cb1</td><td>uccs</td><td>UCCS</td><td><a href="papers/07fcbae86f7a3ad3ea1cf95178459ee9eaf77cb1.html" target="_blank">Large scale unconstrained open set face database</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE Sixth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td></td><td></td><td></td><td></td><td></td><td>83%</td><td>6</td><td>5</td><td>1</td><td>0</td><td>4</td><td>2</td></tr><tr><td>d4f1eb008eb80595bcfdac368e23ae9754e1e745</td><td>uccs</td><td>UCCS</td><td><a href="papers/d4f1eb008eb80595bcfdac368e23ae9754e1e745.html" target="_blank">Unconstrained Face Detection and Open-Set Face Recognition Challenge</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE International Joint Conference on Biometrics (IJCB)</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>5</td><td>5</td><td>0</td><td>0</td><td>4</td><td>1</td></tr><tr><td>8627f019882b024aef92e4eb9355c499c733e5b7</td><td>used</td><td>USED Social Event Dataset</td><td><a href="papers/8627f019882b024aef92e4eb9355c499c733e5b7.html" target="_blank">USED: a large-scale social event detection dataset</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td>edu</td><td>University of Trento</td><td>Italy</td><td>46.06588360</td><td>11.11598940</td><td>71%</td><td>7</td><td>5</td><td>2</td><td>0</td><td>3</td><td>4</td></tr><tr><td>56ae6d94fc6097ec4ca861f0daa87941d1c10b70</td><td>cmdp</td><td>CMDP</td><td><a href="papers/56ae6d94fc6097ec4ca861f0daa87941d1c10b70.html" target="_blank">Distance Estimation of an Unknown Person from a Portrait</a></td><td><a href="http://authors.library.caltech.edu/49084/13/FaceDistanceEstimation_RONCHI.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>44%</td><td>9</td><td>4</td><td>5</td><td>0</td><td>6</td><td>3</td></tr><tr><td>4b4106614c1d553365bad75d7866bff0de6056ed</td><td>ufi</td><td>UFI</td><td><a href="papers/4b4106614c1d553365bad75d7866bff0de6056ed.html" target="_blank">Unconstrained Facial Images: Database for Face Recognition Under Real-World Conditions</a></td><td><a href="http://home.zcu.cz/~pkral/papers/kral_micai15.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>33%</td><td>12</td><td>4</td><td>8</td><td>0</td><td>4</td><td>6</td></tr><tr><td>563c940054e4b456661762c1ab858e6f730c3159</td><td>data_61</td><td>Data61 Pedestrian</td><td><a href="papers/563c940054e4b456661762c1ab858e6f730c3159.html" target="_blank">A Multi-modal Graphical Model for Scene Analysis</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE Winter Conference on Applications of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>8</td><td>4</td><td>4</td><td>0</td><td>5</td><td>3</td></tr><tr><td>287ddcb3db5562235d83aee318f318b8d5e43fb1</td><td>erce</td><td>ERCe</td><td><a href="papers/287ddcb3db5562235d83aee318f318b8d5e43fb1.html" target="_blank">Learning from Multiple Sources for Video Summarisation</a></td><td><a href="https://arxiv.org/pdf/1501.03069.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>7</td><td>4</td><td>3</td><td>0</td><td>4</td><td>3</td></tr><tr><td>dd65f71dac86e36eecbd3ed225d016c3336b4a13</td><td>families_in_the_wild</td><td>FIW</td><td><a href="papers/dd65f71dac86e36eecbd3ed225d016c3336b4a13.html" target="_blank">Visual Kinship Recognition of Families in the Wild</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td>edu</td><td>University of Massachusetts Dartmouth</td><td>United States</td><td>41.62772475</td><td>-71.00724501</td><td>80%</td><td>5</td><td>4</td><td>1</td><td>0</td><td>2</td><td>3</td></tr><tr><td>8f02ec0be21461fbcedf51d864f944cfc42c875f</td><td>hda_plus</td><td>HDA+</td><td><a href="papers/8f02ec0be21461fbcedf51d864f944cfc42c875f.html" target="_blank">The HDA+ Data Set for Research on Fully Automated Re-identification Systems</a></td><td><a href="https://pdfs.semanticscholar.org/8f02/ec0be21461fbcedf51d864f944cfc42c875f.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>25%</td><td>16</td><td>4</td><td>12</td><td>2</td><td>10</td><td>6</td></tr><tr><td>137aa2f891d474fce1e7a1d1e9b3aefe21e22b34</td><td>hrt_transgender</td><td>HRT Transgender</td><td><a href="papers/137aa2f891d474fce1e7a1d1e9b3aefe21e22b34.html" target="_blank">Is the eye region more reliable than the face? A preliminary study of face-based recognition on a transgender dataset</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE Sixth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>7</td><td>4</td><td>3</td><td>1</td><td>3</td><td>5</td></tr><tr><td>2d45cfd838016a6e39f6b766ffe85acd649440c7</td><td>mcgill</td><td>McGill Real World</td><td><a href="papers/2d45cfd838016a6e39f6b766ffe85acd649440c7.html" target="_blank">Hierarchical temporal graphical model for head pose estimation and subsequent attribute classification in real-world videos</a></td><td><span class="gray">[pdf]</a></td><td>Computer Vision and Image Understanding</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>8</td><td>4</td><td>4</td><td>1</td><td>5</td><td>3</td></tr><tr><td>23e824d1dfc33f3780dd18076284f07bd99f1c43</td><td>mifs</td><td>MIFS</td><td><a href="papers/23e824d1dfc33f3780dd18076284f07bd99f1c43.html" target="_blank">Spoofing faces using makeup: An investigative study</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE International Conference on Identity, Security and Behavior Analysis (ISBA)</td><td>edu</td><td>INRIA Méditerranée</td><td>France</td><td>43.61581310</td><td>7.06838000</td><td>67%</td><td>6</td><td>4</td><td>2</td><td>0</td><td>1</td><td>5</td></tr><tr><td>54983972aafc8e149259d913524581357b0f91c3</td><td>reseed</td><td>ReSEED</td><td><a href="papers/54983972aafc8e149259d913524581357b0f91c3.html" target="_blank">ReSEED: social event dEtection dataset</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>67%</td><td>6</td><td>4</td><td>2</td><td>1</td><td>1</td><td>5</td></tr><tr><td>287ddcb3db5562235d83aee318f318b8d5e43fb1</td><td>tisi</td><td>Times Square Intersection</td><td><a href="papers/287ddcb3db5562235d83aee318f318b8d5e43fb1.html" target="_blank">Learning from Multiple Sources for Video Summarisation</a></td><td><a href="https://arxiv.org/pdf/1501.03069.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>7</td><td>4</td><td>3</td><td>0</td><td>4</td><td>3</td></tr><tr><td>7ebb153704706e457ab57b432793d2b6e5d12592</td><td>vgg_celebs_in_places</td><td>CIP</td><td><a href="papers/7ebb153704706e457ab57b432793d2b6e5d12592.html" target="_blank">Faces in Places: compound query retrieval</a></td><td><a href="https://pdfs.semanticscholar.org/7ebb/153704706e457ab57b432793d2b6e5d12592.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>80%</td><td>5</td><td>4</td><td>1</td><td>0</td><td>3</td><td>2</td></tr><tr><td>0d2dd4fc016cb6a517d8fb43a7cc3ff62964832e</td><td>lag</td><td>LAG</td><td><a href="papers/0d2dd4fc016cb6a517d8fb43a7cc3ff62964832e.html" target="_blank">Large age-gap face verification by feature injection in deep networks</a></td><td><a href="https://arxiv.org/pdf/1602.06149.pdf" target="_blank">[pdf]</a></td><td>Pattern Recognition Letters</td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>7</td><td>3</td><td>4</td><td>0</td><td>3</td><td>4</td></tr><tr><td>9cc8cf0c7d7fa7607659921b6ff657e17e135ecc</td><td>mafa</td><td>MAsked FAces</td><td><a href="papers/9cc8cf0c7d7fa7607659921b6ff657e17e135ecc.html" target="_blank">Detecting Masked Faces in the Wild with LLE-CNNs</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>5</td><td>3</td><td>2</td><td>1</td><td>4</td><td>1</td></tr><tr><td>a7fe834a0af614ce6b50dc093132b031dd9a856b</td><td>market_1501</td><td>Market 1501</td><td><a href="papers/a7fe834a0af614ce6b50dc093132b031dd9a856b.html" target="_blank">Orientation Driven Bag of Appearances for Person Re-identification</a></td><td><a href="https://arxiv.org/pdf/1605.02464.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>7</td><td>3</td><td>4</td><td>0</td><td>4</td><td>4</td></tr><tr><td>a7fe834a0af614ce6b50dc093132b031dd9a856b</td><td>pku_reid</td><td>PKU-Reid</td><td><a href="papers/a7fe834a0af614ce6b50dc093132b031dd9a856b.html" target="_blank">Orientation Driven Bag of Appearances for Person Re-identification</a></td><td><a href="https://arxiv.org/pdf/1605.02464.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>7</td><td>3</td><td>4</td><td>0</td><td>4</td><td>4</td></tr><tr><td>9e5378e7b336c89735d3bb15cf67eff96f86d39a</td><td>precarious</td><td>Precarious</td><td><a href="papers/9e5378e7b336c89735d3bb15cf67eff96f86d39a.html" target="_blank">Expecting the Unexpected: Training Detectors for Unusual Pedestrians with Adversarial Imposters</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>21%</td><td>14</td><td>3</td><td>11</td><td>1</td><td>12</td><td>1</td></tr><tr><td>17b46e2dad927836c689d6787ddb3387c6159ece</td><td>geofaces</td><td>GeoFaces</td><td><a href="papers/17b46e2dad927836c689d6787ddb3387c6159ece.html" target="_blank">GeoFaceExplorer: exploring the geo-dependence of facial attributes</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>2</td><td>2</td><td>0</td><td>0</td><td>1</td><td>1</td></tr><tr><td>ad01687649d95cd5b56d7399a9603c4b8e2217d7</td><td>mrp_drone</td><td>MRP Drone</td><td><a href="papers/ad01687649d95cd5b56d7399a9603c4b8e2217d7.html" target="_blank">Investigating Open-World Person Re-identification Using a Drone</a></td><td><a href="https://pdfs.semanticscholar.org/ad01/687649d95cd5b56d7399a9603c4b8e2217d7.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>29%</td><td>7</td><td>2</td><td>5</td><td>1</td><td>5</td><td>2</td></tr><tr><td>f6c8d5e35d7e4d60a0104f233ac1a3ab757da53f</td><td>pku_reid</td><td>PKU-Reid</td><td><a href="papers/f6c8d5e35d7e4d60a0104f233ac1a3ab757da53f.html" target="_blank">Swiss-System Based Cascade Ranking for Gait-Based Person Re-Identification</a></td><td><a href="https://pdfs.semanticscholar.org/f6c8/d5e35d7e4d60a0104f233ac1a3ab757da53f.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>4</td><td>2</td><td>2</td><td>0</td><td>1</td><td>2</td></tr><tr><td>4d4bb462c9f1d4e4ab1e4aa6a75cc0bc71b38461</td><td>3dddb_unconstrained</td><td>3D Dynamic</td><td><a href="papers/4d4bb462c9f1d4e4ab1e4aa6a75cc0bc71b38461.html" target="_blank">A 3D Dynamic Database for Unconstrained Face Recognition</a></td><td><a href="https://pdfs.semanticscholar.org/4d4b/b462c9f1d4e4ab1e4aa6a75cc0bc71b38461.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>2</td><td>1</td><td>1</td><td>0</td><td>1</td><td>1</td></tr><tr><td>65355cbb581a219bd7461d48b3afd115263ea760</td><td>complex_activities</td><td>Ongoing Complex Activities</td><td><a href="papers/65355cbb581a219bd7461d48b3afd115263ea760.html" target="_blank">Recognition of ongoing complex activities by sequence prediction over a hierarchical label space</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Winter Conference on Applications of Computer Vision (WACV)</td><td></td><td></td><td></td><td></td><td></td><td>33%</td><td>3</td><td>1</td><td>2</td><td>0</td><td>3</td><td>0</td></tr><tr><td>e58dd160a76349d46f881bd6ddbc2921f08d1050</td><td>gfw</td><td>Grouping Face in the Wild</td><td><a href="papers/e58dd160a76349d46f881bd6ddbc2921f08d1050.html" target="_blank">Merge or Not? Learning to Group Faces via Imitation Learning</a></td><td><a href="https://arxiv.org/pdf/1707.03986.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>2</td><td>1</td><td>1</td><td>0</td><td>2</td><td>0</td></tr><tr><td>55c40cbcf49a0225e72d911d762c27bb1c2d14aa</td><td>ifad</td><td>IFAD</td><td><a href="papers/55c40cbcf49a0225e72d911d762c27bb1c2d14aa.html" target="_blank">Indian Face Age Database: A Database for Face Recognition with Age Variation</a></td><td><a href="https://pdfs.semanticscholar.org/025e/4cf3fd3fdeced91e9373b56ee14af7ca432c.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>2</td><td>1</td><td>1</td><td>0</td><td>2</td><td>0</td></tr><tr><td>9e31e77f9543ab42474ba4e9330676e18c242e72</td><td>imdb_face</td><td>IMDb Face</td><td><a href="papers/9e31e77f9543ab42474ba4e9330676e18c242e72.html" target="_blank">The Devil of Face Recognition is in the Noise</a></td><td><a href="https://arxiv.org/pdf/1807.11649.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>company</td><td>SenseTime</td><td>China</td><td>39.99300800</td><td>116.32988200</td><td>17%</td><td>6</td><td>1</td><td>5</td><td>0</td><td>4</td><td>1</td></tr><tr><td>4eab317b5ac436a949849ed286baa3de2a541eef</td><td>laofiw</td><td>LAOFIW</td><td><a href="papers/4eab317b5ac436a949849ed286baa3de2a541eef.html" target="_blank">Turning a Blind Eye: Explicit Removal of Biases and Variation from Deep Neural Network Embeddings</a></td><td><a href="https://arxiv.org/pdf/1809.02169.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>2</td><td>1</td><td>1</td><td>0</td><td>2</td><td>0</td></tr><tr><td>c72a2ea819df9b0e8cd267eebcc6528b8741e03d</td><td>megaage</td><td>MegaAge</td><td><a href="papers/c72a2ea819df9b0e8cd267eebcc6528b8741e03d.html" target="_blank">Quantifying Facial Age by Posterior of Age Comparisons</a></td><td><a href="https://arxiv.org/pdf/1708.09687.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>25%</td><td>4</td><td>1</td><td>3</td><td>1</td><td>4</td><td>0</td></tr><tr><td>578d4ad74818086bb64f182f72e2c8bd31e3d426</td><td>mr2</td><td>MR2</td><td><a href="papers/578d4ad74818086bb64f182f72e2c8bd31e3d426.html" target="_blank">The MR2: A multi-racial, mega-resolution database of facial stimuli.</a></td><td><a href="http://www.mpmlab.org/The%20MR2%20face%20database.pdf" target="_blank">[pdf]</a></td><td>Behavior research methods</td><td></td><td></td><td></td><td></td><td></td><td>14%</td><td>7</td><td>1</td><td>6</td><td>0</td><td>7</td><td>0</td></tr><tr><td>c06b13d0ec3f5c43e2782cd22542588e233733c3</td><td>nova_emotions</td><td>Novaemötions Dataset</td><td><a href="papers/c06b13d0ec3f5c43e2782cd22542588e233733c3.html" target="_blank">Crowdsourcing facial expressions for affective-interaction</a></td><td><span class="gray">[pdf]</a></td><td>Computer Vision and Image Understanding</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>1</td><td>1</td><td>0</td><td>0</td><td>1</td><td>0</td></tr><tr><td>22909dd19a0ec3b6065334cb5be5392cb24d839d</td><td>pets</td><td>PETS 2017</td><td><a href="papers/22909dd19a0ec3b6065334cb5be5392cb24d839d.html" target="_blank">PETS 2017: Dataset and Challenge</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td></td><td></td><td></td><td></td><td></td><td>11%</td><td>9</td><td>1</td><td>8</td><td>0</td><td>1</td><td>8</td></tr><tr><td>2306b2a8fba28539306052764a77a0d0f5d1236a</td><td>qmul_surv_face</td><td>QMUL-SurvFace</td><td><a href="papers/2306b2a8fba28539306052764a77a0d0f5d1236a.html" target="_blank">Surveillance Face Recognition Challenge</a></td><td><a href="https://arxiv.org/pdf/1804.09691.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td>edu</td><td>Queen Mary University of London</td><td>United Kingdom</td><td>51.52472720</td><td>-0.03931035</td><td>100%</td><td>1</td><td>1</td><td>0</td><td>0</td><td>1</td><td>0</td></tr><tr><td>d3f5a1848b0028d8ab51d0b0673732cad2e3c8c9</td><td>stair_actions</td><td>STAIR Action</td><td><a href="papers/d3f5a1848b0028d8ab51d0b0673732cad2e3c8c9.html" target="_blank">STAIR Actions: A Video Dataset of Everyday Home Actions</a></td><td><a href="https://arxiv.org/pdf/1804.04326.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>1</td><td>1</td><td>0</td><td>0</td><td>1</td><td>0</td></tr><tr><td>3531332efe19be21e7401ba1f04570a142617236</td><td>ufdd</td><td>UFDD</td><td><a href="papers/3531332efe19be21e7401ba1f04570a142617236.html" target="_blank">Pushing the Limits of Unconstrained Face Detection: a Challenge Dataset and Baseline Results</a></td><td><a href="https://arxiv.org/pdf/1804.10275.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>25%</td><td>4</td><td>1</td><td>3</td><td>1</td><td>4</td><td>0</td></tr><tr><td>922e0a51a3b8c67c4c6ac09a577ff674cbd28b34</td><td>v47</td><td>V47</td><td><a href="papers/922e0a51a3b8c67c4c6ac09a577ff674cbd28b34.html" target="_blank">Re-identification of pedestrians with variable occlusion and scale</a></td><td><span class="gray">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td>edu</td><td>Kingston University</td><td>United Kingdom</td><td>51.42930860</td><td>-0.26840440</td><td>11%</td><td>9</td><td>1</td><td>8</td><td>2</td><td>5</td><td>4</td></tr><tr><td>5ad4e9f947c1653c247d418f05dad758a3f9277b</td><td>wlfdb</td><td>WLFDB</td><td><a href="papers/5ad4e9f947c1653c247d418f05dad758a3f9277b.html" target="_blank">WLFDB : Weakly Labeled Face Databases</a></td><td><a href="https://pdfs.semanticscholar.org/5ad4/e9f947c1653c247d418f05dad758a3f9277b.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>1</td><td>1</td><td>0</td><td>0</td><td>0</td><td>1</td></tr><tr><td>a40f9bfd3c45658ee8da70e1f2dfbe1f0c744d43</td><td>4dfab</td><td>4DFAB</td><td><a href="papers/a40f9bfd3c45658ee8da70e1f2dfbe1f0c744d43.html" target="_blank">4DFAB: A Large Scale 4D Facial Expression Database for Biometric Applications</a></td><td><a href="https://arxiv.org/pdf/1712.01443.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>4</td><td>0</td><td>4</td><td>0</td><td>2</td><td>2</td></tr><tr><td>7b92d1e53cc87f7a4256695de590098a2f30261e</td><td>appa_real</td><td>APPA-REAL</td><td><a href="papers/7b92d1e53cc87f7a4256695de590098a2f30261e.html" target="_blank">From Apparent to Real Age: Gender, Age, Ethnic, Makeup, and Expression Bias Analysis in Real Age Estimation</a></td><td><span class="gray">[pdf]</a></td><td>2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>1dc35905a1deff8bc74688f2d7e2f48fd2273275</td><td>caltech_pedestrians</td><td>Caltech Pedestrians</td><td><a href="papers/1dc35905a1deff8bc74688f2d7e2f48fd2273275.html" target="_blank">Pedestrian detection: A benchmark</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>15e1af79939dbf90790b03d8aa02477783fb1d0f</td><td>duke_mtmc</td><td>Duke MTMC</td><td><a href="papers/15e1af79939dbf90790b03d8aa02477783fb1d0f.html" target="_blank">Unlabeled Samples Generated by GAN Improve the Person Re-identification Baseline in Vitro</a></td><td><a href="https://arxiv.org/pdf/1701.07717.pdf" target="_blank">[pdf]</a></td><td>2017 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>72a155c987816ae81c858fddbd6beab656d86220</td><td>europersons</td><td>EuroCity Persons</td><td><a href="papers/72a155c987816ae81c858fddbd6beab656d86220.html" target="_blank">The EuroCity Persons Dataset: A Novel Benchmark for Object Detection</a></td><td><a href="https://arxiv.org/pdf/1805.07193.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>2</td><td>0</td><td>2</td><td>0</td><td>2</td><td>0</td></tr><tr><td>670637d0303a863c1548d5b19f705860a23e285c</td><td>face_tracer</td><td>FaceTracer</td><td><a href="papers/670637d0303a863c1548d5b19f705860a23e285c.html" target="_blank">Face swapping: automatically replacing faces in photographs</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>12ad3b5bbbf407f8e54ea692c07633d1a867c566</td><td>graz</td><td>Graz Pedestrian</td><td><a href="papers/12ad3b5bbbf407f8e54ea692c07633d1a867c566.html" target="_blank">Object recognition using segmentation for feature detection</a></td><td><span class="gray">[pdf]</a></td><td>Proceedings of the 17th International Conference on Pattern Recognition, 2004. ICPR 2004.</td><td>edu</td><td>Inst. of Comput. Sci., Univ. of Leoben, Austria</td><td>Austria</td><td>47.38473720</td><td>15.09302010</td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>bd88bb2e4f351352d88ee7375af834360e223498</td><td>hda_plus</td><td>HDA+</td><td><a href="papers/bd88bb2e4f351352d88ee7375af834360e223498.html" target="_blank">HDA dataset-DRAFT 1 A Multi-camera video data set for research on High-Definition surveillance</a></td><td><a href="https://pdfs.semanticscholar.org/bd88/bb2e4f351352d88ee7375af834360e223498.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>2</td><td>0</td><td>2</td><td>0</td><td>1</td><td>2</td></tr><tr><td>0ab7cff2ccda7269b73ff6efd9d37e1318f7db25</td><td>ibm_dif</td><td>IBM Diversity in Faces</td><td><a href="papers/0ab7cff2ccda7269b73ff6efd9d37e1318f7db25.html" target="_blank">Facial Coding Scheme Reference 1 Craniofacial Distances</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>066d71fcd997033dce4ca58df924397dfe0b5fd1</td><td>ifdb</td><td>IFDB</td><td><a href="papers/066d71fcd997033dce4ca58df924397dfe0b5fd1.html" target="_blank">Iranian Face Database and Evaluation with a New Detection Algorithm</a></td><td><a href="https://pdfs.semanticscholar.org/066d/71fcd997033dce4ca58df924397dfe0b5fd1.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>21d9d0deed16f0ad62a4865e9acf0686f4f15492</td><td>images_of_groups</td><td>Images of Groups</td><td><a href="papers/21d9d0deed16f0ad62a4865e9acf0686f4f15492.html" target="_blank">Understanding images of groups of people</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>Carnegie Mellon University Silicon Valley</td><td>United States</td><td>37.41021930</td><td>-122.05965487</td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>140438a77a771a8fb656b39a78ff488066eb6b50</td><td>lfpw</td><td>LFWP</td><td><a href="papers/140438a77a771a8fb656b39a78ff488066eb6b50.html" target="_blank">Localizing Parts of Faces Using a Consensus of Exemplars</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>079a0a3bf5200994e1f972b1b9197bf2f90e87d4</td><td>mit_cbcl</td><td>MIT CBCL</td><td><a href="papers/079a0a3bf5200994e1f972b1b9197bf2f90e87d4.html" target="_blank">Component-Based Face Recognition with 3D Morphable Models</a></td><td><span class="gray">[pdf]</a></td><td>2004 Conference on Computer Vision and Pattern Recognition Workshop</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>2fda164863a06a92d3a910b96eef927269aeb730</td><td>names_and_faces</td><td>News Dataset</td><td><a href="papers/2fda164863a06a92d3a910b96eef927269aeb730.html" target="_blank">Names and faces in the news</a></td><td><span class="gray">[pdf]</a></td><td>Proceedings of the 2004 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2004. CVPR 2004.</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>d3200d49a19a4a4e4e9745ee39649b65d80c834b</td><td>scut_head</td><td>SCUT HEAD</td><td><a href="papers/d3200d49a19a4a4e4e9745ee39649b65d80c834b.html" target="_blank">Detecting Heads using Feature Refine Net and Cascaded Multi-scale Architecture</a></td><td><a href="https://arxiv.org/pdf/1803.09256.pdf" target="_blank">[pdf]</a></td><td>2018 24th International Conference on Pattern Recognition (ICPR)</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>8990cdce3f917dad622e43e033db686b354d057c</td><td>tiny_faces</td><td>TinyFace</td><td><a href="papers/8990cdce3f917dad622e43e033db686b354d057c.html" target="_blank">Low-Resolution Face Recognition</a></td><td><a href="https://arxiv.org/pdf/1811.08965.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>6ad5a38df8dd4cdddd74f31996ce096d41219f72</td><td>tud_brussels</td><td>TUD-Brussels</td><td><a href="papers/6ad5a38df8dd4cdddd74f31996ce096d41219f72.html" target="_blank">Multi-cue onboard pedestrian detection</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>6ad5a38df8dd4cdddd74f31996ce096d41219f72</td><td>tud_motionpairs</td><td>TUD-Motionparis</td><td><a href="papers/6ad5a38df8dd4cdddd74f31996ce096d41219f72.html" target="_blank">Multi-cue onboard pedestrian detection</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>01959ef569f74c286956024866c1d107099199f7</td><td>vqa</td><td>VQA</td><td><a href="papers/01959ef569f74c286956024866c1d107099199f7.html" target="_blank">VQA: Visual Question Answering</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>9b9bf5e623cb8af7407d2d2d857bc3f1b531c182</td><td>who_goes_there</td><td>WGT</td><td><a href="papers/9b9bf5e623cb8af7407d2d2d857bc3f1b531c182.html" target="_blank">Who goes there?: approaches to mapping facial appearance diversity</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td>edu</td><td>University of Kentucky</td><td>United States</td><td>38.03337420</td><td>-84.50177580</td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>36bccfb2ad847096bc76777e544f305813cd8f5b</td><td>wildtrack</td><td>WildTrack</td><td><a href="papers/36bccfb2ad847096bc76777e544f305813cd8f5b.html" target="_blank">WILDTRACK: A Multi-camera HD Dataset for Dense Unscripted Pedestrian Detection</a></td><td><a href="http://openaccess.thecvf.com/content_cvpr_2018/Supplemental/1562-supp.pdf" target="_blank">[pdf]</a></td><td>2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr></table></body></html>
\ No newline at end of file diff --git a/scraper/reports/report_index.html b/scraper/reports/report_index.html index b0bc3de7..c6b92593 100644 --- a/scraper/reports/report_index.html +++ b/scraper/reports/report_index.html @@ -1 +1 @@ -<!doctype html><html><head><meta charset='utf-8'><title>All Papers</title><link rel='stylesheet' href='reports.css'></head><body><h2>All Papers</h2><table border='1' cellpadding='3' cellspacing='3'><th>Paper ID</th><th>Megapixels Key</th><th>Megapixels Name</th><th>Report Link</th><th>PDF Link</th><th>Journal</th><th>Type</th><th>Address</th><th>Country</th><th>Lat</th><th>Lng</th><th>Coverage</th><th>Total Citations</th><th>Geocoded Citations</th><th>Unknown Citations</th><th>Empty Citations</th><th>With PDF</th><th>With DOI</th><tr><td>3325860c0c82a93b2eac654f5324dd6a776f609e</td><td>mpii_human_pose</td><td>MPII Human Pose</td><td><a href="papers/3325860c0c82a93b2eac654f5324dd6a776f609e.html" target="_blank">2D Human Pose Estimation: New Benchmark and State of the Art Analysis</a></td><td><span class="gray">[pdf]</a></td><td>2014 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>387</td><td>224</td><td>163</td><td>21</td><td>291</td><td>96</td></tr><tr><td>e4754afaa15b1b53e70743880484b8d0736990ff</td><td>fiw_300</td><td>300-W</td><td><a href="papers/e4754afaa15b1b53e70743880484b8d0736990ff.html" target="_blank">300 Faces In-The-Wild Challenge: database and results</a></td><td><a href="http://ibug.doc.ic.ac.uk/media/uploads/documents/1-s2.0-s0262885616000147-main.pdf" target="_blank">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td></td><td>51%</td><td>129</td><td>66</td><td>63</td><td>9</td><td>74</td><td>55</td></tr><tr><td>044d9a8c61383312cdafbcc44b9d00d650b21c70</td><td>fiw_300</td><td>300-W</td><td><a href="papers/044d9a8c61383312cdafbcc44b9d00d650b21c70.html" target="_blank">300 Faces in-the-Wild Challenge: The First Facial Landmark Localization Challenge</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision Workshops</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>323</td><td>198</td><td>125</td><td>27</td><td>208</td><td>120</td></tr><tr><td>2e8d0f1802e50cccfd3c0aabac0d0beab3a7846e</td><td>3dpes</td><td>3DPeS</td><td><a href="papers/2e8d0f1802e50cccfd3c0aabac0d0beab3a7846e.html" target="_blank">3DPeS: 3D people dataset for surveillance and forensics</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>48%</td><td>133</td><td>64</td><td>69</td><td>11</td><td>73</td><td>58</td></tr><tr><td>a40f9bfd3c45658ee8da70e1f2dfbe1f0c744d43</td><td>4dfab</td><td>4DFAB</td><td><a href="papers/a40f9bfd3c45658ee8da70e1f2dfbe1f0c744d43.html" target="_blank">4DFAB: A Large Scale 4D Facial Expression Database for Biometric Applications</a></td><td><a href="https://arxiv.org/pdf/1712.01443.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>4</td><td>0</td><td>4</td><td>0</td><td>2</td><td>2</td></tr><tr><td>31b58ced31f22eab10bd3ee2d9174e7c14c27c01</td><td>tiny_images</td><td>Tiny Images</td><td><a href="papers/31b58ced31f22eab10bd3ee2d9174e7c14c27c01.html" target="_blank">80 Million Tiny Images: A Large Data Set for Nonparametric Object and Scene Recognition</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>53%</td><td>999</td><td>529</td><td>470</td><td>89</td><td>644</td><td>337</td></tr><tr><td>d08cc366a4a0192a01e9a7495af1eb5d9f9e73ae</td><td>b3d_ac</td><td>B3D(AC)</td><td><a href="papers/d08cc366a4a0192a01e9a7495af1eb5d9f9e73ae.html" target="_blank">A 3-D Audio-Visual Corpus of Affective Communication</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Multimedia</td><td></td><td></td><td></td><td></td><td></td><td>45%</td><td>42</td><td>19</td><td>23</td><td>2</td><td>26</td><td>15</td></tr><tr><td>4d4bb462c9f1d4e4ab1e4aa6a75cc0bc71b38461</td><td>3dddb_unconstrained</td><td>3D Dynamic</td><td><a href="papers/4d4bb462c9f1d4e4ab1e4aa6a75cc0bc71b38461.html" target="_blank">A 3D Dynamic Database for Unconstrained Face Recognition</a></td><td><a href="https://pdfs.semanticscholar.org/4d4b/b462c9f1d4e4ab1e4aa6a75cc0bc71b38461.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>2</td><td>1</td><td>1</td><td>0</td><td>1</td><td>1</td></tr><tr><td>639937b3a1b8bded3f7e9a40e85bd3770016cf3c</td><td>bfm</td><td>BFM</td><td><a href="papers/639937b3a1b8bded3f7e9a40e85bd3770016cf3c.html" target="_blank">A 3D Face Model for Pose and Illumination Invariant Face Recognition</a></td><td><span class="gray">[pdf]</a></td><td>2009 Sixth IEEE International Conference on Advanced Video and Signal Based Surveillance</td><td></td><td></td><td></td><td></td><td></td><td>53%</td><td>343</td><td>183</td><td>160</td><td>25</td><td>223</td><td>114</td></tr><tr><td>cc589c499dcf323fe4a143bbef0074c3e31f9b60</td><td>bu_3dfe</td><td>BU-3DFE</td><td><a href="papers/cc589c499dcf323fe4a143bbef0074c3e31f9b60.html" target="_blank">A 3D facial expression database for facial behavior research</a></td><td><span class="gray">[pdf]</a></td><td>7th International Conference on Automatic Face and Gesture Recognition (FGR06)</td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>588</td><td>275</td><td>312</td><td>45</td><td>306</td><td>282</td></tr><tr><td>22646e00a7ba34d1b5fbe3b1efcd91a1e1be3c2b</td><td>saivt</td><td>SAIVT SoftBio</td><td><a href="papers/22646e00a7ba34d1b5fbe3b1efcd91a1e1be3c2b.html" target="_blank">A Database for Person Re-Identification in Multi-Camera Surveillance Networks</a></td><td><span class="gray">[pdf]</a></td><td>2012 International Conference on Digital Image Computing Techniques and Applications (DICTA)</td><td></td><td></td><td></td><td></td><td></td><td>40%</td><td>65</td><td>26</td><td>39</td><td>7</td><td>45</td><td>20</td></tr><tr><td>070de852bc6eb275d7ca3a9cdde8f6be8795d1a3</td><td>d3dfacs</td><td>D3DFACS</td><td><a href="papers/070de852bc6eb275d7ca3a9cdde8f6be8795d1a3.html" target="_blank">A FACS valid 3D dynamic action unit database with applications to 3D dynamic morphable facial modeling</a></td><td><span class="gray">[pdf]</a></td><td>2011 International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>50</td><td>25</td><td>25</td><td>5</td><td>31</td><td>18</td></tr><tr><td>563c940054e4b456661762c1ab858e6f730c3159</td><td>data_61</td><td>Data61 Pedestrian</td><td><a href="papers/563c940054e4b456661762c1ab858e6f730c3159.html" target="_blank">A Multi-modal Graphical Model for Scene Analysis</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE Winter Conference on Applications of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>8</td><td>4</td><td>4</td><td>0</td><td>5</td><td>3</td></tr><tr><td>221c18238b829c12b911706947ab38fd017acef7</td><td>rap_pedestrian</td><td>RAP</td><td><a href="papers/221c18238b829c12b911706947ab38fd017acef7.html" target="_blank">A Richly Annotated Dataset for Pedestrian Attribute Recognition</a></td><td><a href="https://arxiv.org/pdf/1603.07054.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>46%</td><td>26</td><td>12</td><td>14</td><td>0</td><td>16</td><td>10</td></tr><tr><td>013909077ad843eb6df7a3e8e290cfd5575999d2</td><td>fiw_300</td><td>300-W</td><td><a href="papers/013909077ad843eb6df7a3e8e290cfd5575999d2.html" target="_blank">A Semi-automatic Methodology for Facial Landmark Annotation</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition Workshops</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>184</td><td>107</td><td>77</td><td>14</td><td>120</td><td>67</td></tr><tr><td>3b4ec8af470948a72a6ed37a9fd226719a874ebc</td><td>sdu_vid</td><td>SDU-VID</td><td><a href="papers/3b4ec8af470948a72a6ed37a9fd226719a874ebc.html" target="_blank">A Spatio-Temporal Appearance Representation for Video-Based Pedestrian Re-Identification</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>59%</td><td>95</td><td>56</td><td>39</td><td>7</td><td>50</td><td>45</td></tr><tr><td>6403117f9c005ae81f1e8e6d1302f4a045e3d99d</td><td>alert_airport</td><td>ALERT Airport</td><td><a href="papers/6403117f9c005ae81f1e8e6d1302f4a045e3d99d.html" target="_blank">A Systematic Evaluation and Benchmark for Person Re-Identification: Features, Metrics, and Datasets</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>35%</td><td>20</td><td>7</td><td>13</td><td>0</td><td>9</td><td>11</td></tr><tr><td>0d3bb75852098b25d90f31d2f48fd0cb4944702b</td><td>face_scrub</td><td>FaceScrub</td><td><a href="papers/0d3bb75852098b25d90f31d2f48fd0cb4944702b.html" target="_blank">A data-driven approach to cleaning large face datasets</a></td><td><span class="gray">[pdf]</a></td><td>2014 IEEE International Conference on Image Processing (ICIP)</td><td></td><td></td><td></td><td></td><td></td><td>48%</td><td>138</td><td>66</td><td>72</td><td>5</td><td>95</td><td>41</td></tr><tr><td>b91f54e1581fbbf60392364323d00a0cd43e493c</td><td>bp4d_spontanous</td><td>BP4D-Spontanous</td><td><a href="papers/b91f54e1581fbbf60392364323d00a0cd43e493c.html" target="_blank">A high-resolution spontaneous 3D dynamic facial expression database</a></td><td><span class="gray">[pdf]</a></td><td>2013 10th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)</td><td>edu</td><td>SUNY Binghamton</td><td>United States</td><td>42.08779975</td><td>-75.97066066</td><td>46%</td><td>154</td><td>71</td><td>83</td><td>7</td><td>80</td><td>75</td></tr><tr><td>8b56e33f33e582f3e473dba573a16b598ed9bcdc</td><td>fei</td><td>FEI</td><td><a href="papers/8b56e33f33e582f3e473dba573a16b598ed9bcdc.html" target="_blank">A new ranking method for principal components analysis and its application to face image analysis</a></td><td><span class="gray">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>169</td><td>80</td><td>89</td><td>6</td><td>69</td><td>102</td></tr><tr><td>2624d84503bc2f8e190e061c5480b6aa4d89277a</td><td>afew_va</td><td>AFEW-VA</td><td><a href="papers/2624d84503bc2f8e190e061c5480b6aa4d89277a.html" target="_blank">AFEW-VA database for valence and arousal estimation in-the-wild</a></td><td><a href="https://ibug.doc.ic.ac.uk/media/uploads/documents/afew-va.pdf" target="_blank">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td></td><td>44%</td><td>18</td><td>8</td><td>10</td><td>0</td><td>12</td><td>5</td></tr><tr><td>2ad0ee93d029e790ebb50574f403a09854b65b7e</td><td>yale_faces</td><td>YaleFaces</td><td><a href="papers/2ad0ee93d029e790ebb50574f403a09854b65b7e.html" target="_blank">Acquiring linear subspaces for face recognition under variable lighting</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>999</td><td>497</td><td>502</td><td>94</td><td>493</td><td>491</td></tr><tr><td>57fe081950f21ca03b5b375ae3e84b399c015861</td><td>cvc_01_barcelona</td><td>CVC-01</td><td><a href="papers/57fe081950f21ca03b5b375ae3e84b399c015861.html" target="_blank">Adaptive Image Sampling and Windows Classification for On-board Pedestrian Detection</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>40%</td><td>47</td><td>19</td><td>28</td><td>1</td><td>23</td><td>24</td></tr><tr><td>758d7e1be64cc668c59ef33ba8882c8597406e53</td><td>affectnet</td><td>AffectNet</td><td><a href="papers/758d7e1be64cc668c59ef33ba8882c8597406e53.html" target="_blank">AffectNet: A Database for Facial Expression, Valence, and Arousal Computing in the Wild</a></td><td><a href="https://arxiv.org/pdf/1708.03985.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>59%</td><td>37</td><td>22</td><td>15</td><td>0</td><td>25</td><td>11</td></tr><tr><td>47aeb3b82f54b5ae8142b4bdda7b614433e69b9a</td><td>am_fed</td><td>AM-FED</td><td><a href="papers/47aeb3b82f54b5ae8142b4bdda7b614433e69b9a.html" target="_blank">Affectiva-MIT Facial Expression Dataset (AM-FED): Naturalistic and Spontaneous Facial Expressions Collected "In-the-Wild"</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition Workshops</td><td></td><td></td><td></td><td></td><td></td><td>45%</td><td>83</td><td>37</td><td>46</td><td>6</td><td>43</td><td>39</td></tr><tr><td>1be498d4bbc30c3bfd0029114c784bc2114d67c0</td><td>adience</td><td>Adience</td><td><a href="papers/1be498d4bbc30c3bfd0029114c784bc2114d67c0.html" target="_blank">Age and Gender Estimation of Unfiltered Faces</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Information Forensics and Security</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>179</td><td>93</td><td>86</td><td>7</td><td>98</td><td>80</td></tr><tr><td>d818568838433a6d6831adde49a58cef05e0c89f</td><td>agedb</td><td>AgeDB</td><td><a href="papers/d818568838433a6d6831adde49a58cef05e0c89f.html" target="_blank">AgeDB: The First Manually Collected, In-the-Wild Age Database</a></td><td><a href="http://eprints.mdx.ac.uk/22044/1/agedb_kotsia.pdf" target="_blank">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td></td><td></td><td></td><td></td><td></td><td>78%</td><td>18</td><td>14</td><td>4</td><td>0</td><td>14</td><td>3</td></tr><tr><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td><td>aflw</td><td>AFLW</td><td><a href="papers/a74251efa970b92925b89eeef50a5e37d9281ad0.html" target="_blank">Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</a></td><td><span class="gray">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>318</td><td>177</td><td>141</td><td>35</td><td>211</td><td>107</td></tr><tr><td>2ce2560cf59db59ce313bbeb004e8ce55c5ce928</td><td>texas_3dfrd</td><td>Texas 3DFRD</td><td><a href="papers/2ce2560cf59db59ce313bbeb004e8ce55c5ce928.html" target="_blank">Anthropometric 3D Face Recognition</a></td><td><a href="http://live.ece.utexas.edu/publications/2010/sg_ijcv_june10.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>91</td><td>47</td><td>44</td><td>5</td><td>60</td><td>31</td></tr><tr><td>633c851ebf625ad7abdda2324e9de093cf623141</td><td>appa_real</td><td>APPA-REAL</td><td><a href="papers/633c851ebf625ad7abdda2324e9de093cf623141.html" target="_blank">Apparent and Real Age Estimation in Still Images with Deep Residual Regressors on Appa-Real Database</a></td><td><span class="gray">[pdf]</a></td><td>2017 12th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2017)</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>10</td><td>5</td><td>5</td><td>0</td><td>8</td><td>3</td></tr><tr><td>0df0d1adea39a5bef318b74faa37de7f3e00b452</td><td>mpii_gaze</td><td>MPIIGaze</td><td><a href="papers/0df0d1adea39a5bef318b74faa37de7f3e00b452.html" target="_blank">Appearance-based gaze estimation in the wild</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>149</td><td>95</td><td>54</td><td>3</td><td>94</td><td>54</td></tr><tr><td>759a3b3821d9f0e08e0b0a62c8b693230afc3f8d</td><td>pubfig</td><td>PubFig</td><td><a href="papers/759a3b3821d9f0e08e0b0a62c8b693230afc3f8d.html" target="_blank">Attribute and simile classifiers for face verification</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE 12th International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>914</td><td>534</td><td>380</td><td>49</td><td>586</td><td>316</td></tr><tr><td>faf40ce28857aedf183e193486f5b4b0a8c478a2</td><td>iit_dehli_ear</td><td>IIT Dehli Ear</td><td><a href="papers/faf40ce28857aedf183e193486f5b4b0a8c478a2.html" target="_blank">Automated Human Identification Using Ear Imaging</a></td><td><a href="https://pdfs.semanticscholar.org/faf4/0ce28857aedf183e193486f5b4b0a8c478a2.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>38%</td><td>80</td><td>30</td><td>50</td><td>6</td><td>35</td><td>44</td></tr><tr><td>2160788824c4c29ffe213b2cbeb3f52972d73f37</td><td>3d_rma</td><td>3D-RMA</td><td><a href="papers/2160788824c4c29ffe213b2cbeb3f52972d73f37.html" target="_blank">Automatic 3D face authentication</a></td><td><a href="http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.31.9190&rep=rep1&type=pdf" target="_blank">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>100</td><td>47</td><td>53</td><td>8</td><td>63</td><td>36</td></tr><tr><td>213a579af9e4f57f071b884aa872651372b661fd</td><td>bbc_pose</td><td>BBC Pose</td><td><a href="papers/213a579af9e4f57f071b884aa872651372b661fd.html" target="_blank">Automatic and Efficient Human Pose Estimation for Sign Language Videos</a></td><td><a href="http://tomas.pfister.fi/files/charles13ijcv.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>65%</td><td>26</td><td>17</td><td>9</td><td>1</td><td>16</td><td>11</td></tr><tr><td>fcc6fe6007c322641796cb8792718641856a22a7</td><td>miw</td><td>MIW</td><td><a href="papers/fcc6fe6007c322641796cb8792718641856a22a7.html" target="_blank">Automatic facial makeup detection with application in face recognition</a></td><td><span class="gray">[pdf]</a></td><td>2013 International Conference on Biometrics (ICB)</td><td>edu</td><td>West Virginia University</td><td>United States</td><td>39.65404635</td><td>-79.96475355</td><td>69%</td><td>49</td><td>34</td><td>15</td><td>1</td><td>19</td><td>29</td></tr><tr><td>fcc6fe6007c322641796cb8792718641856a22a7</td><td>youtube_makeup</td><td>YMU</td><td><a href="papers/fcc6fe6007c322641796cb8792718641856a22a7.html" target="_blank">Automatic facial makeup detection with application in face recognition</a></td><td><span class="gray">[pdf]</a></td><td>2013 International Conference on Biometrics (ICB)</td><td>edu</td><td>West Virginia University</td><td>United States</td><td>39.65404635</td><td>-79.96475355</td><td>69%</td><td>49</td><td>34</td><td>15</td><td>1</td><td>19</td><td>29</td></tr><tr><td>0a85bdff552615643dd74646ac881862a7c7072d</td><td>pipa</td><td>PIPA</td><td><a href="papers/0a85bdff552615643dd74646ac881862a7c7072d.html" target="_blank">Beyond frontal faces: Improving Person Recognition using multiple cues</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>company</td><td>Facebook</td><td>United States</td><td>37.39367170</td><td>-122.08072620</td><td>69%</td><td>54</td><td>37</td><td>16</td><td>1</td><td>41</td><td>12</td></tr><tr><td>2acf7e58f0a526b957be2099c10aab693f795973</td><td>bosphorus</td><td>The Bosphorus</td><td><a href="papers/2acf7e58f0a526b957be2099c10aab693f795973.html" target="_blank">Bosphorus Database for 3D Face Analysis</a></td><td><a href="https://pdfs.semanticscholar.org/4254/fbba3846008f50671edc9cf70b99d7304543.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>49%</td><td>352</td><td>171</td><td>181</td><td>17</td><td>162</td><td>188</td></tr><tr><td>37d6f0eb074d207b53885bd2eb78ccc8a04be597</td><td>vmu</td><td>VMU</td><td><a href="papers/37d6f0eb074d207b53885bd2eb78ccc8a04be597.html" target="_blank">Can facial cosmetics affect the matching accuracy of face recognition systems?</a></td><td><span class="gray">[pdf]</a></td><td>2012 IEEE Fifth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td>edu</td><td>West Virginia University</td><td>United States</td><td>39.65404635</td><td>-79.96475355</td><td>60%</td><td>53</td><td>32</td><td>21</td><td>0</td><td>19</td><td>31</td></tr><tr><td>37d6f0eb074d207b53885bd2eb78ccc8a04be597</td><td>youtube_makeup</td><td>YMU</td><td><a href="papers/37d6f0eb074d207b53885bd2eb78ccc8a04be597.html" target="_blank">Can facial cosmetics affect the matching accuracy of face recognition systems?</a></td><td><span class="gray">[pdf]</a></td><td>2012 IEEE Fifth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td>edu</td><td>West Virginia University</td><td>United States</td><td>39.65404635</td><td>-79.96475355</td><td>60%</td><td>53</td><td>32</td><td>21</td><td>0</td><td>19</td><td>31</td></tr><tr><td>8d5998cd984e7cce307da7d46f155f9db99c6590</td><td>chalearn</td><td>ChaLearn</td><td><a href="papers/8d5998cd984e7cce307da7d46f155f9db99c6590.html" target="_blank">ChaLearn looking at people: A review of events and resources</a></td><td><span class="gray">[pdf]</a></td><td>2017 International Joint Conference on Neural Networks (IJCNN)</td><td></td><td></td><td></td><td></td><td></td><td>46%</td><td>13</td><td>6</td><td>7</td><td>1</td><td>8</td><td>4</td></tr><tr><td>2bf8541199728262f78d4dced6fb91479b39b738</td><td>clothing_co_parsing</td><td>CCP</td><td><a href="papers/2bf8541199728262f78d4dced6fb91479b39b738.html" target="_blank">Clothing Co-parsing by Joint Image Segmentation and Labeling</a></td><td><span class="gray">[pdf]</a></td><td>2014 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>70%</td><td>60</td><td>42</td><td>18</td><td>0</td><td>34</td><td>28</td></tr><tr><td>22ad2c8c0f4d6aa4328b38d894b814ec22579761</td><td>gallagher</td><td>Gallagher</td><td><a href="papers/22ad2c8c0f4d6aa4328b38d894b814ec22579761.html" target="_blank">Clothing cosegmentation for recognizing people</a></td><td><span class="gray">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>Carnegie Mellon University Silicon Valley</td><td>United States</td><td>37.41021930</td><td>-122.05965487</td><td>59%</td><td>178</td><td>105</td><td>73</td><td>7</td><td>100</td><td>86</td></tr><tr><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td><td>leeds_sports_pose</td><td>Leeds Sports Pose</td><td><a href="papers/4b1d23d17476fcf78f4cbadf69fb130b1aa627c0.html" target="_blank">Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</a></td><td><a href="http://www.bmva.org/bmvc/2010/conference/paper12/abstract12.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>285</td><td>174</td><td>111</td><td>13</td><td>197</td><td>93</td></tr><tr><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td><td>stickmen_buffy</td><td>Buffy Stickmen</td><td><a href="papers/4b1d23d17476fcf78f4cbadf69fb130b1aa627c0.html" target="_blank">Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</a></td><td><a href="http://www.bmva.org/bmvc/2010/conference/paper12/abstract12.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>285</td><td>174</td><td>111</td><td>13</td><td>197</td><td>93</td></tr><tr><td>45c31cde87258414f33412b3b12fc5bec7cb3ba9</td><td>jaffe</td><td>JAFFE</td><td><a href="papers/45c31cde87258414f33412b3b12fc5bec7cb3ba9.html" target="_blank">Coding Facial Expressions with Gabor Wavelets</a></td><td><a href="http://physics.lbl.gov/patrecog/images/Facerecog_gabor.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>49%</td><td>899</td><td>443</td><td>456</td><td>51</td><td>431</td><td>451</td></tr><tr><td>b1f4423c227fa37b9680787be38857069247a307</td><td>afew_va</td><td>AFEW-VA</td><td><a href="papers/b1f4423c227fa37b9680787be38857069247a307.html" target="_blank">Collecting Large, Richly Annotated Facial-Expression Databases from Movies</a></td><td><span class="gray">[pdf]</a></td><td>IEEE MultiMedia</td><td>edu</td><td>Australian National University</td><td>Australia</td><td>-35.27769990</td><td>149.11852700</td><td>61%</td><td>181</td><td>111</td><td>70</td><td>8</td><td>87</td><td>97</td></tr><tr><td>7f4040b482d16354d5938c1d1b926b544652bf5b</td><td>nova_emotions</td><td>Novaemötions Dataset</td><td><a href="papers/7f4040b482d16354d5938c1d1b926b544652bf5b.html" target="_blank">Competitive affective gaming: winning with a smile</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Universidade NOVA de Lisboa, Caparica, Portugal</td><td>Portugal</td><td>38.66096400</td><td>-9.20581300</td><td>67%</td><td>9</td><td>6</td><td>3</td><td>0</td><td>5</td><td>4</td></tr><tr><td>079a0a3bf5200994e1f972b1b9197bf2f90e87d4</td><td>mit_cbcl</td><td>MIT CBCL</td><td><a href="papers/079a0a3bf5200994e1f972b1b9197bf2f90e87d4.html" target="_blank">Component-Based Face Recognition with 3D Morphable Models</a></td><td><span class="gray">[pdf]</a></td><td>2004 Conference on Computer Vision and Pattern Recognition Workshop</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>23fc83c8cfff14a16df7ca497661264fc54ed746</td><td>cohn_kanade</td><td>CK</td><td><a href="papers/23fc83c8cfff14a16df7ca497661264fc54ed746.html" target="_blank">Comprehensive Database for Facial Expression Analysis</a></td><td><a href="https://pdfs.semanticscholar.org/23fc/83c8cfff14a16df7ca497661264fc54ed746.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>49%</td><td>999</td><td>492</td><td>507</td><td>69</td><td>539</td><td>439</td></tr><tr><td>09d78009687bec46e70efcf39d4612822e61cb8c</td><td>raid</td><td>RAiD</td><td><a href="papers/09d78009687bec46e70efcf39d4612822e61cb8c.html" target="_blank">Consistent Re-identification in a Camera Network</a></td><td><a href="https://pdfs.semanticscholar.org/c27f/099e6e7e3f7f9979cbe9e0a5175fc5848ea0.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>53%</td><td>49</td><td>26</td><td>23</td><td>5</td><td>34</td><td>13</td></tr><tr><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td><td>casablanca</td><td>Casablanca</td><td><a href="papers/0ceda9dae8b9f322df65ca2ef02caca9758aec6f.html" target="_blank">Context-Aware CNNs for Person Head Detection</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>33</td><td>18</td><td>15</td><td>1</td><td>23</td><td>11</td></tr><tr><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td><td>hollywood_headset</td><td>HollywoodHeads</td><td><a href="papers/0ceda9dae8b9f322df65ca2ef02caca9758aec6f.html" target="_blank">Context-Aware CNNs for Person Head Detection</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>33</td><td>18</td><td>15</td><td>1</td><td>23</td><td>11</td></tr><tr><td>c06b13d0ec3f5c43e2782cd22542588e233733c3</td><td>nova_emotions</td><td>Novaemötions Dataset</td><td><a href="papers/c06b13d0ec3f5c43e2782cd22542588e233733c3.html" target="_blank">Crowdsourcing facial expressions for affective-interaction</a></td><td><span class="gray">[pdf]</a></td><td>Computer Vision and Image Understanding</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>1</td><td>1</td><td>0</td><td>0</td><td>1</td><td>0</td></tr><tr><td>8355d095d3534ef511a9af68a3b2893339e3f96b</td><td>imdb_wiki</td><td>IMDB</td><td><a href="papers/8355d095d3534ef511a9af68a3b2893339e3f96b.html" target="_blank">DEX: Deep EXpectation of Apparent Age from a Single Image</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision Workshop (ICCVW)</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>122</td><td>68</td><td>54</td><td>6</td><td>75</td><td>48</td></tr><tr><td>5a5f0287484f0d480fed1ce585dbf729586f0edc</td><td>disfa</td><td>DISFA</td><td><a href="papers/5a5f0287484f0d480fed1ce585dbf729586f0edc.html" target="_blank">DISFA: A Spontaneous Facial Action Intensity Database</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Affective Computing</td><td>edu</td><td>University of Denver</td><td>United States</td><td>39.67665410</td><td>-104.96220300</td><td>49%</td><td>184</td><td>90</td><td>94</td><td>19</td><td>96</td><td>89</td></tr><tr><td>10195a163ab6348eef37213a46f60a3d87f289c5</td><td>imdb_wiki</td><td>IMDB</td><td><a href="papers/10195a163ab6348eef37213a46f60a3d87f289c5.html" target="_blank">Deep Expectation of Real and Apparent Age from a Single Image Without Facial Landmarks</a></td><td><a href="http://www.vision.ee.ethz.ch/en/publications/papers/articles/eth_biwi_01299.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td>edu</td><td>ETH Zurich</td><td>Switzerland</td><td>47.37631300</td><td>8.54766990</td><td>52%</td><td>145</td><td>76</td><td>69</td><td>11</td><td>93</td><td>51</td></tr><tr><td>162ea969d1929ed180cc6de9f0bf116993ff6e06</td><td>vgg_faces</td><td>VGG Face</td><td><a href="papers/162ea969d1929ed180cc6de9f0bf116993ff6e06.html" target="_blank">Deep Face Recognition</a></td><td><a href="https://pdfs.semanticscholar.org/f372/ab9b3270d4e4f6a0258c83c2736c3a5c0454.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>999</td><td>544</td><td>455</td><td>58</td><td>558</td><td>429</td></tr><tr><td>6424b69f3ff4d35249c0bb7ef912fbc2c86f4ff4</td><td>celeba</td><td>CelebA</td><td><a href="papers/6424b69f3ff4d35249c0bb7ef912fbc2c86f4ff4.html" target="_blank">Deep Learning Face Attributes in the Wild</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td>edu</td><td>Chinese University of Hong Kong</td><td>China</td><td>22.41626320</td><td>114.21093180</td><td>50%</td><td>919</td><td>455</td><td>463</td><td>64</td><td>694</td><td>201</td></tr><tr><td>18010284894ed0edcca74e5bf768ee2e15ef7841</td><td>deep_fashion</td><td>DeepFashion</td><td><a href="papers/18010284894ed0edcca74e5bf768ee2e15ef7841.html" target="_blank">DeepFashion: Powering Robust Clothes Recognition and Retrieval with Rich Annotations</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>176</td><td>105</td><td>71</td><td>2</td><td>113</td><td>62</td></tr><tr><td>6bd36e9fd0ef20a3074e1430a6cc601e6d407fc3</td><td>cuhk_campus_03</td><td>CUHK03 Campus</td><td><a href="papers/6bd36e9fd0ef20a3074e1430a6cc601e6d407fc3.html" target="_blank">DeepReID: Deep Filter Pairing Neural Network for Person Re-identification</a></td><td><span class="gray">[pdf]</a></td><td>2014 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>568</td><td>319</td><td>249</td><td>26</td><td>320</td><td>235</td></tr><tr><td>13f06b08f371ba8b5d31c3e288b4deb61335b462</td><td>eth_andreas_ess</td><td>ETHZ Pedestrian</td><td><a href="papers/13f06b08f371ba8b5d31c3e288b4deb61335b462.html" target="_blank">Depth and Appearance for Mobile Scene Analysis</a></td><td><span class="gray">[pdf]</a></td><td>2007 IEEE 11th International Conference on Computer Vision</td><td>edu</td><td>ETH Zurich</td><td>Switzerland</td><td>47.37631300</td><td>8.54766990</td><td>55%</td><td>324</td><td>179</td><td>145</td><td>26</td><td>193</td><td>127</td></tr><tr><td>4946ba10a4d5a7d0a38372f23e6622bd347ae273</td><td>coco_action</td><td>COCO-a</td><td><a href="papers/4946ba10a4d5a7d0a38372f23e6622bd347ae273.html" target="_blank">Describing Common Human Visual Actions in Images</a></td><td><a href="https://arxiv.org/pdf/1506.02203.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>25</td><td>16</td><td>9</td><td>0</td><td>23</td><td>2</td></tr><tr><td>7808937b46acad36e43c30ae4e9f3fd57462853d</td><td>bpad</td><td>BPAD</td><td><a href="papers/7808937b46acad36e43c30ae4e9f3fd57462853d.html" target="_blank">Describing people: A poselet-based approach to attribute classification</a></td><td><span class="gray">[pdf]</a></td><td>2011 International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>230</td><td>128</td><td>102</td><td>14</td><td>163</td><td>66</td></tr><tr><td>d3200d49a19a4a4e4e9745ee39649b65d80c834b</td><td>scut_head</td><td>SCUT HEAD</td><td><a href="papers/d3200d49a19a4a4e4e9745ee39649b65d80c834b.html" target="_blank">Detecting Heads using Feature Refine Net and Cascaded Multi-scale Architecture</a></td><td><a href="https://arxiv.org/pdf/1803.09256.pdf" target="_blank">[pdf]</a></td><td>2018 24th International Conference on Pattern Recognition (ICPR)</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>9cc8cf0c7d7fa7607659921b6ff657e17e135ecc</td><td>mafa</td><td>MAsked FAces</td><td><a href="papers/9cc8cf0c7d7fa7607659921b6ff657e17e135ecc.html" target="_blank">Detecting Masked Faces in the Wild with LLE-CNNs</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>5</td><td>3</td><td>2</td><td>1</td><td>4</td><td>1</td></tr><tr><td>56ae6d94fc6097ec4ca861f0daa87941d1c10b70</td><td>cmdp</td><td>CMDP</td><td><a href="papers/56ae6d94fc6097ec4ca861f0daa87941d1c10b70.html" target="_blank">Distance Estimation of an Unknown Person from a Portrait</a></td><td><a href="http://authors.library.caltech.edu/49084/13/FaceDistanceEstimation_RONCHI.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>44%</td><td>9</td><td>4</td><td>5</td><td>0</td><td>6</td><td>3</td></tr><tr><td>84fe5b4ac805af63206012d29523a1e033bc827e</td><td>awe_ears</td><td>AWE Ears</td><td><a href="papers/84fe5b4ac805af63206012d29523a1e033bc827e.html" target="_blank">Ear Recognition: More Than a Survey</a></td><td><a href="https://arxiv.org/pdf/1611.06203.pdf" target="_blank">[pdf]</a></td><td>Neurocomputing</td><td></td><td></td><td></td><td></td><td></td><td>65%</td><td>26</td><td>17</td><td>9</td><td>0</td><td>10</td><td>16</td></tr><tr><td>133f01aec1534604d184d56de866a4bd531dac87</td><td>lfw</td><td>LFW</td><td><a href="papers/133f01aec1534604d184d56de866a4bd531dac87.html" target="_blank">Effective Unconstrained Face Recognition by Combining Multiple Descriptors and Learned Background Statistics</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>183</td><td>101</td><td>82</td><td>14</td><td>103</td><td>77</td></tr><tr><td>c900e0ad4c95948baaf0acd8449fde26f9b4952a</td><td>emotio_net</td><td>EmotioNet Database</td><td><a href="papers/c900e0ad4c95948baaf0acd8449fde26f9b4952a.html" target="_blank">EmotioNet: An Accurate, Real-Time Algorithm for the Automatic Annotation of a Million Facial Expressions in the Wild</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>86</td><td>40</td><td>46</td><td>7</td><td>54</td><td>29</td></tr><tr><td>2161f6b7ee3c0acc81603b01dc0df689683577b9</td><td>large_scale_person_search</td><td>Large Scale Person Search</td><td><a href="papers/2161f6b7ee3c0acc81603b01dc0df689683577b9.html" target="_blank">End-to-End Deep Learning for Person Search</a></td><td><a href="https://pdfs.semanticscholar.org/2161/f6b7ee3c0acc81603b01dc0df689683577b9.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>46</td><td>23</td><td>23</td><td>1</td><td>27</td><td>16</td></tr><tr><td>1bd1645a629f1b612960ab9bba276afd4cf7c666</td><td>brainwash</td><td>Brainwash</td><td><a href="papers/1bd1645a629f1b612960ab9bba276afd4cf7c666.html" target="_blank">End-to-End People Detection in Crowded Scenes</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>42</td><td>21</td><td>21</td><td>1</td><td>19</td><td>19</td></tr><tr><td>6273b3491e94ea4dd1ce42b791d77bdc96ee73a8</td><td>viper</td><td>VIPeR</td><td><a href="papers/6273b3491e94ea4dd1ce42b791d77bdc96ee73a8.html" target="_blank">Evaluating Appearance Models for Recognition, Reacquisition, and Tracking</a></td><td><a href="https://pdfs.semanticscholar.org/7847/b1fbccadb780b655e72c66d3f9e93ddb880c.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>624</td><td>352</td><td>272</td><td>35</td><td>342</td><td>276</td></tr><tr><td>2258e01865367018ed6f4262c880df85b94959f8</td><td>mot</td><td>MOT</td><td><a href="papers/2258e01865367018ed6f4262c880df85b94959f8.html" target="_blank">Evaluating Multiple Object Tracking Performance: The CLEAR MOT Metrics</a></td><td><a href="https://cvhci.anthropomatik.kit.edu/images/stories/msmmi/papers/eurasip2008.pdf" target="_blank">[pdf]</a></td><td>EURASIP J. Image and Video Processing</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>632</td><td>313</td><td>317</td><td>50</td><td>358</td><td>264</td></tr><tr><td>9e5378e7b336c89735d3bb15cf67eff96f86d39a</td><td>precarious</td><td>Precarious</td><td><a href="papers/9e5378e7b336c89735d3bb15cf67eff96f86d39a.html" target="_blank">Expecting the Unexpected: Training Detectors for Unusual Pedestrians with Adversarial Imposters</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>21%</td><td>14</td><td>3</td><td>11</td><td>1</td><td>12</td><td>1</td></tr><tr><td>35b0331dfcd2897abd5749b49ff5e2b8ba0f7a62</td><td>coco_qa</td><td>COCO QA</td><td><a href="papers/35b0331dfcd2897abd5749b49ff5e2b8ba0f7a62.html" target="_blank">Exploring Models and Data for Image Question Answering</a></td><td><a href="https://arxiv.org/pdf/1505.02074.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>206</td><td>115</td><td>91</td><td>11</td><td>162</td><td>39</td></tr><tr><td>2cd7821fcf5fae53a185624f7eeda007434ae037</td><td>geofaces</td><td>GeoFaces</td><td><a href="papers/2cd7821fcf5fae53a185624f7eeda007434ae037.html" target="_blank">Exploring the geo-dependence of human face appearance</a></td><td><a href="http://cs.uky.edu/~jacobs/papers/islam2014faces.pdf" target="_blank">[pdf]</a></td><td>IEEE Winter Conference on Applications of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>88%</td><td>8</td><td>7</td><td>1</td><td>0</td><td>5</td><td>3</td></tr><tr><td>2cd7821fcf5fae53a185624f7eeda007434ae037</td><td>geofaces</td><td>GeoFaces</td><td><a href="papers/2cd7821fcf5fae53a185624f7eeda007434ae037.html" target="_blank">Exploring the geo-dependence of human face appearance</a></td><td><a href="http://cs.uky.edu/~jacobs/papers/islam2014faces.pdf" target="_blank">[pdf]</a></td><td>IEEE Winter Conference on Applications of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>88%</td><td>8</td><td>7</td><td>1</td><td>0</td><td>5</td><td>3</td></tr><tr><td>75da1df4ed319926c544eefe17ec8d720feef8c0</td><td>fddb</td><td>FDDB</td><td><a href="papers/75da1df4ed319926c544eefe17ec8d720feef8c0.html" target="_blank">FDDB: A benchmark for face detection in unconstrained settings</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>380</td><td>214</td><td>166</td><td>22</td><td>202</td><td>164</td></tr><tr><td>31de9b3dd6106ce6eec9a35991b2b9083395fd0b</td><td>feret</td><td>FERET</td><td><a href="papers/31de9b3dd6106ce6eec9a35991b2b9083395fd0b.html" target="_blank">FERET ( Face Recognition Technology ) Recognition Algorithm Development and Test Results</a></td><td><a href="https://pdfs.semanticscholar.org/31de/9b3dd6106ce6eec9a35991b2b9083395fd0b.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>45%</td><td>75</td><td>34</td><td>41</td><td>5</td><td>54</td><td>20</td></tr><tr><td>0e986f51fe45b00633de9fd0c94d082d2be51406</td><td>afw</td><td>AFW</td><td><a href="papers/0e986f51fe45b00633de9fd0c94d082d2be51406.html" target="_blank">Face detection, pose estimation, and landmark localization in the wild</a></td><td><span class="gray">[pdf]</a></td><td>2012 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>999</td><td>605</td><td>394</td><td>45</td><td>576</td><td>422</td></tr><tr><td>560e0e58d0059259ddf86fcec1fa7975dee6a868</td><td>youtube_faces</td><td>YouTubeFaces</td><td><a href="papers/560e0e58d0059259ddf86fcec1fa7975dee6a868.html" target="_blank">Face recognition in unconstrained videos with matched background similarity</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>509</td><td>290</td><td>218</td><td>28</td><td>294</td><td>216</td></tr><tr><td>670637d0303a863c1548d5b19f705860a23e285c</td><td>face_tracer</td><td>FaceTracer</td><td><a href="papers/670637d0303a863c1548d5b19f705860a23e285c.html" target="_blank">Face swapping: automatically replacing faces in photographs</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>6204776d31359d129a582057c2d788a14f8aadeb</td><td>youtube_celebrities</td><td>YouTube Celebrities</td><td><a href="papers/6204776d31359d129a582057c2d788a14f8aadeb.html" target="_blank">Face tracking and recognition with visual constraints in real-world videos</a></td><td><span class="gray">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>Rutgers University</td><td>United States</td><td>40.47913175</td><td>-74.43168868</td><td>53%</td><td>267</td><td>141</td><td>125</td><td>14</td><td>125</td><td>121</td></tr><tr><td>4c170a0dcc8de75587dae21ca508dab2f9343974</td><td>face_tracer</td><td>FaceTracer</td><td><a href="papers/4c170a0dcc8de75587dae21ca508dab2f9343974.html" target="_blank">FaceTracer: A Search Engine for Large Collections of Images with Faces</a></td><td><a href="https://pdfs.semanticscholar.org/73a8/1d311eedac8dea3ca24dc15b6990fa4a725e.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>225</td><td>126</td><td>99</td><td>17</td><td>146</td><td>77</td></tr><tr><td>7ebb153704706e457ab57b432793d2b6e5d12592</td><td>vgg_celebs_in_places</td><td>CIP</td><td><a href="papers/7ebb153704706e457ab57b432793d2b6e5d12592.html" target="_blank">Faces in Places: compound query retrieval</a></td><td><a href="https://pdfs.semanticscholar.org/7ebb/153704706e457ab57b432793d2b6e5d12592.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>80%</td><td>5</td><td>4</td><td>1</td><td>0</td><td>3</td><td>2</td></tr><tr><td>0ab7cff2ccda7269b73ff6efd9d37e1318f7db25</td><td>ibm_dif</td><td>IBM Diversity in Faces</td><td><a href="papers/0ab7cff2ccda7269b73ff6efd9d37e1318f7db25.html" target="_blank">Facial Coding Scheme Reference 1 Craniofacial Distances</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>8a3c5507237957d013a0fe0f082cab7f757af6ee</td><td>mafl</td><td>MAFL</td><td><a href="papers/8a3c5507237957d013a0fe0f082cab7f757af6ee.html" target="_blank">Facial Landmark Detection by Deep Multi-task Learning</a></td><td><a href="https://pdfs.semanticscholar.org/fcd7/1c18192928a2e0b264edd4d919ab2f8f652a.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>407</td><td>238</td><td>169</td><td>18</td><td>252</td><td>153</td></tr><tr><td>8a3c5507237957d013a0fe0f082cab7f757af6ee</td><td>mtfl</td><td>MTFL</td><td><a href="papers/8a3c5507237957d013a0fe0f082cab7f757af6ee.html" target="_blank">Facial Landmark Detection by Deep Multi-task Learning</a></td><td><a href="https://pdfs.semanticscholar.org/fcd7/1c18192928a2e0b264edd4d919ab2f8f652a.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>407</td><td>238</td><td>169</td><td>18</td><td>252</td><td>153</td></tr><tr><td>4fefd1bc8dc4e0ab37ee3324ddfa43ad9d6a04a7</td><td>deep_fashion</td><td>DeepFashion</td><td><a href="papers/4fefd1bc8dc4e0ab37ee3324ddfa43ad9d6a04a7.html" target="_blank">Fashion Landmark Detection in the Wild</a></td><td><a href="https://arxiv.org/pdf/1608.03049.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>69%</td><td>26</td><td>18</td><td>8</td><td>1</td><td>16</td><td>10</td></tr><tr><td>060820f110a72cbf02c14a6d1085bd6e1d994f6a</td><td>caltech_crp</td><td>Caltech CRP</td><td><a href="papers/060820f110a72cbf02c14a6d1085bd6e1d994f6a.html" target="_blank">Fine-grained classification of pedestrians in video: Benchmark and state of the art</a></td><td><a href="https://arxiv.org/pdf/1605.06177.pdf" target="_blank">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>35%</td><td>17</td><td>6</td><td>11</td><td>0</td><td>9</td><td>8</td></tr><tr><td>45e616093a92e5f1e61a7c6037d5f637aa8964af</td><td>malf</td><td>MALF</td><td><a href="papers/45e616093a92e5f1e61a7c6037d5f637aa8964af.html" target="_blank">Fine-grained evaluation on face detection in the wild</a></td><td><span class="gray">[pdf]</a></td><td>2015 11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)</td><td></td><td></td><td></td><td></td><td></td><td>65%</td><td>17</td><td>11</td><td>6</td><td>0</td><td>12</td><td>5</td></tr><tr><td>1aad2da473888cb7ebc1bfaa15bfa0f1502ce005</td><td>jpl_pose</td><td>JPL-Interaction dataset</td><td><a href="papers/1aad2da473888cb7ebc1bfaa15bfa0f1502ce005.html" target="_blank">First-Person Activity Recognition: What Are They Doing to Me?</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>66%</td><td>148</td><td>98</td><td>50</td><td>7</td><td>105</td><td>43</td></tr><tr><td>7b92d1e53cc87f7a4256695de590098a2f30261e</td><td>appa_real</td><td>APPA-REAL</td><td><a href="papers/7b92d1e53cc87f7a4256695de590098a2f30261e.html" target="_blank">From Apparent to Real Age: Gender, Age, Ethnic, Makeup, and Expression Bias Analysis in Real Age Estimation</a></td><td><span class="gray">[pdf]</a></td><td>2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>774cbb45968607a027ae4729077734db000a1ec5</td><td>urban_tribes</td><td>Urban Tribes</td><td><a href="papers/774cbb45968607a027ae4729077734db000a1ec5.html" target="_blank">From Bikers to Surfers: Visual Recognition of Urban Tribes</a></td><td><a href="http://vision.cornell.edu/se3/wp-content/uploads/2014/09/utribes_bmvc13_final.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>67%</td><td>18</td><td>12</td><td>6</td><td>1</td><td>12</td><td>6</td></tr><tr><td>22f656d0f8426c84a33a267977f511f127bfd7f3</td><td>expw</td><td>ExpW</td><td><a href="papers/22f656d0f8426c84a33a267977f511f127bfd7f3.html" target="_blank">From Facial Expression Recognition to Interpersonal Relation Prediction</a></td><td><a href="https://arxiv.org/pdf/1609.06426.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>11</td><td>6</td><td>5</td><td>0</td><td>5</td><td>4</td></tr><tr><td>18c72175ddbb7d5956d180b65a96005c100f6014</td><td>yale_faces</td><td>YaleFaces</td><td><a href="papers/18c72175ddbb7d5956d180b65a96005c100f6014.html" target="_blank">From Few to Many: Illumination Cone Models for Face Recognition under Variable Lighting and Pose</a></td><td><a href="http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.147.1487&rep=rep1&type=pdf" target="_blank">[pdf]</a></td><td>IEEE Trans. Pattern Anal. Mach. Intell.</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>999</td><td>515</td><td>484</td><td>66</td><td>496</td><td>462</td></tr><tr><td>06f02199690961ba52997cde1527e714d2b3bf8f</td><td>columbia_gaze</td><td>Columbia Gaze</td><td><a href="papers/06f02199690961ba52997cde1527e714d2b3bf8f.html" target="_blank">Gaze locking: passive eye contact detection for human-object interaction</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Columbia University</td><td>United States</td><td>40.84198360</td><td>-73.94368971</td><td>70%</td><td>79</td><td>55</td><td>24</td><td>0</td><td>49</td><td>34</td></tr><tr><td>18858cc936947fc96b5c06bbe3c6c2faa5614540</td><td>pilot_parliament</td><td>PPB</td><td><a href="papers/18858cc936947fc96b5c06bbe3c6c2faa5614540.html" target="_blank">Gender Shades: Intersectional Accuracy Disparities in Commercial Gender Classification</a></td><td><a href="http://proceedings.mlr.press/v81/buolamwini18a/buolamwini18a-supp.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>49%</td><td>59</td><td>29</td><td>30</td><td>0</td><td>47</td><td>10</td></tr><tr><td>2eb84aaba316b095d4bb51da1a3e4365bbf9ab1d</td><td>kin_face</td><td>UB KinFace</td><td><a href="papers/2eb84aaba316b095d4bb51da1a3e4365bbf9ab1d.html" target="_blank">Genealogical face recognition based on UB KinFace database</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td>edu</td><td>SUNY Buffalo</td><td>United States</td><td>42.93362780</td><td>-78.88394479</td><td>55%</td><td>31</td><td>17</td><td>14</td><td>0</td><td>11</td><td>21</td></tr><tr><td>2eed184680edcdec8a3b605ad1a3ba8e8f7cc2e9</td><td>graz</td><td>Graz Pedestrian</td><td><a href="papers/2eed184680edcdec8a3b605ad1a3ba8e8f7cc2e9.html" target="_blank">Generic object recognition with boosting</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td>edu</td><td>TU Graz</td><td>Austria</td><td>47.07071400</td><td>15.43950400</td><td>47%</td><td>293</td><td>138</td><td>155</td><td>16</td><td>195</td><td>97</td></tr><tr><td>17b46e2dad927836c689d6787ddb3387c6159ece</td><td>geofaces</td><td>GeoFaces</td><td><a href="papers/17b46e2dad927836c689d6787ddb3387c6159ece.html" target="_blank">GeoFaceExplorer: exploring the geo-dependence of facial attributes</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>2</td><td>2</td><td>0</td><td>0</td><td>1</td><td>1</td></tr><tr><td>bd88bb2e4f351352d88ee7375af834360e223498</td><td>hda_plus</td><td>HDA+</td><td><a href="papers/bd88bb2e4f351352d88ee7375af834360e223498.html" target="_blank">HDA dataset-DRAFT 1 A Multi-camera video data set for research on High-Definition surveillance</a></td><td><a href="https://pdfs.semanticscholar.org/bd88/bb2e4f351352d88ee7375af834360e223498.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>2</td><td>0</td><td>2</td><td>0</td><td>1</td><td>2</td></tr><tr><td>a8d0b149c2eadaa02204d3e4356fbc8eccf3b315</td><td>hi4d_adsip</td><td>Hi4D-ADSIP</td><td><a href="papers/a8d0b149c2eadaa02204d3e4356fbc8eccf3b315.html" target="_blank">Hi4D-ADSIP 3-D dynamic facial articulation database</a></td><td><span class="gray">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>15</td><td>9</td><td>6</td><td>1</td><td>4</td><td>11</td></tr><tr><td>2d45cfd838016a6e39f6b766ffe85acd649440c7</td><td>mcgill</td><td>McGill Real World</td><td><a href="papers/2d45cfd838016a6e39f6b766ffe85acd649440c7.html" target="_blank">Hierarchical temporal graphical model for head pose estimation and subsequent attribute classification in real-world videos</a></td><td><span class="gray">[pdf]</a></td><td>Computer Vision and Image Understanding</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>8</td><td>4</td><td>4</td><td>1</td><td>5</td><td>3</td></tr><tr><td>3cd40bfa1ff193a96bde0207e5140a399476466c</td><td>tvhi</td><td>TVHI</td><td><a href="papers/3cd40bfa1ff193a96bde0207e5140a399476466c.html" target="_blank">High Five: Recognising human interactions in TV shows</a></td><td><a href="https://pdfs.semanticscholar.org/3cd4/0bfa1ff193a96bde0207e5140a399476466c.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>98</td><td>49</td><td>49</td><td>10</td><td>66</td><td>28</td></tr><tr><td>04c2cda00e5536f4b1508cbd80041e9552880e67</td><td>hipsterwars</td><td>Hipsterwars</td><td><a href="papers/04c2cda00e5536f4b1508cbd80041e9552880e67.html" target="_blank">Hipster Wars: Discovering Elements of Fashion Styles</a></td><td><a href="https://pdfs.semanticscholar.org/04c2/cda00e5536f4b1508cbd80041e9552880e67.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>63%</td><td>95</td><td>60</td><td>35</td><td>4</td><td>59</td><td>35</td></tr><tr><td>10d6b12fa07c7c8d6c8c3f42c7f1c061c131d4c5</td><td>inria_person</td><td>INRIA Pedestrian</td><td><a href="papers/10d6b12fa07c7c8d6c8c3f42c7f1c061c131d4c5.html" target="_blank">Histograms of oriented gradients for human detection</a></td><td><span class="gray">[pdf]</a></td><td>2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)</td><td>edu</td><td>INRIA Rhone-Alps, Montbonnot, France</td><td>France</td><td>45.21788600</td><td>5.80736900</td><td>51%</td><td>999</td><td>510</td><td>489</td><td>44</td><td>419</td><td>509</td></tr><tr><td>041d3eedf5e45ce5c5229f0181c5c576ed1fafd6</td><td>ucf_selfie</td><td>UCF Selfie</td><td><a href="papers/041d3eedf5e45ce5c5229f0181c5c576ed1fafd6.html" target="_blank">How to Take a Good Selfie?</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>73%</td><td>11</td><td>8</td><td>3</td><td>0</td><td>7</td><td>5</td></tr><tr><td>44d23df380af207f5ac5b41459c722c87283e1eb</td><td>wider_attribute</td><td>WIDER Attribute</td><td><a href="papers/44d23df380af207f5ac5b41459c722c87283e1eb.html" target="_blank">Human Attribute Recognition by Deep Hierarchical Contexts</a></td><td><a href="https://pdfs.semanticscholar.org/8e28/07f2dd53b03a759e372e07f7191cae65c9fd.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>67%</td><td>18</td><td>12</td><td>6</td><td>0</td><td>14</td><td>4</td></tr><tr><td>44484d2866f222bbb9b6b0870890f9eea1ffb2d0</td><td>cuhk_campus_03</td><td>CUHK03 Campus</td><td><a href="papers/44484d2866f222bbb9b6b0870890f9eea1ffb2d0.html" target="_blank">Human Reidentification with Transferred Metric Learning</a></td><td><a href="http://www.ee.cuhk.edu.hk/~xgwang/papers/liZWaccv12.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>280</td><td>167</td><td>113</td><td>12</td><td>139</td><td>137</td></tr><tr><td>f41c7bb02fc97d5fb9cadd7a49c3e558a1c58a44</td><td>pa_100k</td><td>PA-100K</td><td><a href="papers/f41c7bb02fc97d5fb9cadd7a49c3e558a1c58a44.html" target="_blank">HydraPlus-Net: Attentive Deep Features for Pedestrian Analysis</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>55</td><td>30</td><td>25</td><td>0</td><td>36</td><td>17</td></tr><tr><td>57178b36c21fd7f4529ac6748614bb3374714e91</td><td>ijb_c</td><td>IJB-C</td><td><a href="papers/57178b36c21fd7f4529ac6748614bb3374714e91.html" target="_blank">IARPA Janus Benchmark - C: Face Dataset and Protocol</a></td><td><span class="gray">[pdf]</a></td><td>2018 International Conference on Biometrics (ICB)</td><td></td><td></td><td></td><td></td><td></td><td>29%</td><td>14</td><td>4</td><td>10</td><td>2</td><td>12</td><td>1</td></tr><tr><td>0cb2dd5f178e3a297a0c33068961018659d0f443</td><td>ijb_b</td><td>IJB-B</td><td><a href="papers/0cb2dd5f178e3a297a0c33068961018659d0f443.html" target="_blank">IARPA Janus Benchmark-B Face Dataset</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td></td><td></td><td></td><td></td><td></td><td>29%</td><td>35</td><td>10</td><td>25</td><td>6</td><td>25</td><td>8</td></tr><tr><td>0297448f3ed948e136bb06ceff10eccb34e5bb77</td><td>ilids_mcts</td><td>i-LIDS Multiple-Camera</td><td><a href="papers/0297448f3ed948e136bb06ceff10eccb34e5bb77.html" target="_blank">Imagery Library for Intelligent Detection Systems (i-LIDS); A Standard for Testing Video Based Detection Systems</a></td><td><span class="gray">[pdf]</a></td><td>Proceedings 40th Annual 2006 International Carnahan Conference on Security Technology</td><td></td><td></td><td></td><td></td><td></td><td>40%</td><td>35</td><td>14</td><td>21</td><td>2</td><td>21</td><td>14</td></tr><tr><td>7f23a4bb0c777dd72cca7665a5f370ac7980217e</td><td>duke_mtmc</td><td>Duke MTMC</td><td><a href="papers/7f23a4bb0c777dd72cca7665a5f370ac7980217e.html" target="_blank">Improving Person Re-identification by Attribute and Identity Learning</a></td><td><a href="https://arxiv.org/pdf/1703.07220.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>48%</td><td>87</td><td>42</td><td>45</td><td>0</td><td>43</td><td>42</td></tr><tr><td>55c40cbcf49a0225e72d911d762c27bb1c2d14aa</td><td>ifad</td><td>IFAD</td><td><a href="papers/55c40cbcf49a0225e72d911d762c27bb1c2d14aa.html" target="_blank">Indian Face Age Database: A Database for Face Recognition with Age Variation</a></td><td><a href="https://pdfs.semanticscholar.org/025e/4cf3fd3fdeced91e9373b56ee14af7ca432c.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>2</td><td>1</td><td>1</td><td>0</td><td>2</td><td>0</td></tr><tr><td>ca3e88d87e1344d076c964ea89d91a75c417f5ee</td><td>imfdb</td><td>IMFDB</td><td><a href="papers/ca3e88d87e1344d076c964ea89d91a75c417f5ee.html" target="_blank">Indian Movie Face Database: A benchmark for face recognition under wide variations</a></td><td><span class="gray">[pdf]</a></td><td>2013 Fourth National Conference on Computer Vision, Pattern Recognition, Image Processing and Graphics (NCVPRIPG)</td><td>edu</td><td>BVBCET, Hubli, India</td><td>India</td><td>15.36883320</td><td>75.12137960</td><td>59%</td><td>17</td><td>10</td><td>7</td><td>0</td><td>11</td><td>5</td></tr><tr><td>95f12d27c3b4914e0668a268360948bce92f7db3</td><td>helen</td><td>Helen</td><td><a href="papers/95f12d27c3b4914e0668a268360948bce92f7db3.html" target="_blank">Interactive Facial Feature Localization</a></td><td><a href="https://pdfs.semanticscholar.org/95f1/2d27c3b4914e0668a268360948bce92f7db3.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>company</td><td>Facebook</td><td>United States</td><td>37.39367170</td><td>-122.08072620</td><td>57%</td><td>352</td><td>200</td><td>152</td><td>26</td><td>212</td><td>146</td></tr><tr><td>ad01687649d95cd5b56d7399a9603c4b8e2217d7</td><td>mrp_drone</td><td>MRP Drone</td><td><a href="papers/ad01687649d95cd5b56d7399a9603c4b8e2217d7.html" target="_blank">Investigating Open-World Person Re-identification Using a Drone</a></td><td><a href="https://pdfs.semanticscholar.org/ad01/687649d95cd5b56d7399a9603c4b8e2217d7.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>29%</td><td>7</td><td>2</td><td>5</td><td>1</td><td>5</td><td>2</td></tr><tr><td>2f43b614607163abf41dfe5d17ef6749a1b61304</td><td>hrt_transgender</td><td>HRT Transgender</td><td><a href="papers/2f43b614607163abf41dfe5d17ef6749a1b61304.html" target="_blank">Investigating the Periocular-Based Face Recognition Across Gender Transformation</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Information Forensics and Security</td><td>edu</td><td>University of North Carolina at Wilmington</td><td>United States</td><td>34.22498270</td><td>-77.86907744</td><td>69%</td><td>13</td><td>9</td><td>4</td><td>0</td><td>6</td><td>8</td></tr><tr><td>066d71fcd997033dce4ca58df924397dfe0b5fd1</td><td>ifdb</td><td>IFDB</td><td><a href="papers/066d71fcd997033dce4ca58df924397dfe0b5fd1.html" target="_blank">Iranian Face Database and Evaluation with a New Detection Algorithm</a></td><td><a href="https://pdfs.semanticscholar.org/066d/71fcd997033dce4ca58df924397dfe0b5fd1.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>b71d1aa90dcbe3638888725314c0d56640c1fef1</td><td>ifdb</td><td>IFDB</td><td><a href="papers/b71d1aa90dcbe3638888725314c0d56640c1fef1.html" target="_blank">Iranian Face Database with age, pose and expression</a></td><td><span class="gray">[pdf]</a></td><td>2007 International Conference on Machine Vision</td><td>edu</td><td>Islamic Azad University</td><td>Iran</td><td>34.84529990</td><td>48.55962120</td><td>30%</td><td>23</td><td>7</td><td>16</td><td>2</td><td>14</td><td>9</td></tr><tr><td>137aa2f891d474fce1e7a1d1e9b3aefe21e22b34</td><td>hrt_transgender</td><td>HRT Transgender</td><td><a href="papers/137aa2f891d474fce1e7a1d1e9b3aefe21e22b34.html" target="_blank">Is the eye region more reliable than the face? A preliminary study of face-based recognition on a transgender dataset</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE Sixth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>7</td><td>4</td><td>3</td><td>1</td><td>3</td><td>5</td></tr><tr><td>0b440695c822a8e35184fb2f60dcdaa8a6de84ae</td><td>kinectface</td><td>KinectFaceDB</td><td><a href="papers/0b440695c822a8e35184fb2f60dcdaa8a6de84ae.html" target="_blank">KinectFaceDB: A Kinect Database for Face Recognition</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Systems, Man, and Cybernetics: Systems</td><td>edu</td><td>University of North Carolina at Chapel Hill</td><td>United States</td><td>35.91139710</td><td>-79.05045290</td><td>56%</td><td>82</td><td>46</td><td>36</td><td>6</td><td>28</td><td>52</td></tr><tr><td>4793f11fbca4a7dba898b9fff68f70d868e2497c</td><td>kin_face</td><td>UB KinFace</td><td><a href="papers/4793f11fbca4a7dba898b9fff68f70d868e2497c.html" target="_blank">Kinship Verification through Transfer Learning</a></td><td><a href="https://pdfs.semanticscholar.org/4793/f11fbca4a7dba898b9fff68f70d868e2497c.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>71</td><td>40</td><td>31</td><td>2</td><td>29</td><td>42</td></tr><tr><td>2d3482dcff69c7417c7b933f22de606a0e8e42d4</td><td>lfw</td><td>LFW</td><td><a href="papers/2d3482dcff69c7417c7b933f22de606a0e8e42d4.html" target="_blank">Labeled Faces in the Wild : Updates and New Reporting Procedures</a></td><td><a href="https://pdfs.semanticscholar.org/2d34/82dcff69c7417c7b933f22de606a0e8e42d4.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>123</td><td>71</td><td>52</td><td>3</td><td>71</td><td>51</td></tr><tr><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td><td>lfw</td><td>LFW</td><td><a href="papers/370b5757a5379b15e30d619e4d3fb9e8e13f3256.html" target="_blank">Labeled Faces in the Wild: A Database forStudying Face Recognition in Unconstrained Environments</a></td><td><a href="https://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>999</td><td>545</td><td>454</td><td>64</td><td>598</td><td>382</td></tr><tr><td>7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22</td><td>lfw</td><td>LFW</td><td><a href="papers/7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22.html" target="_blank">Labeled Faces in the Wild: A Survey</a></td><td><a href="https://pdfs.semanticscholar.org/7de6/e81d775e9cd7becbfd1bd685f4e2a5eebb22.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>42%</td><td>109</td><td>46</td><td>63</td><td>9</td><td>66</td><td>43</td></tr><tr><td>0d2dd4fc016cb6a517d8fb43a7cc3ff62964832e</td><td>lag</td><td>LAG</td><td><a href="papers/0d2dd4fc016cb6a517d8fb43a7cc3ff62964832e.html" target="_blank">Large age-gap face verification by feature injection in deep networks</a></td><td><a href="https://arxiv.org/pdf/1602.06149.pdf" target="_blank">[pdf]</a></td><td>Pattern Recognition Letters</td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>7</td><td>3</td><td>4</td><td>0</td><td>3</td><td>4</td></tr><tr><td>07fcbae86f7a3ad3ea1cf95178459ee9eaf77cb1</td><td>uccs</td><td>UCCS</td><td><a href="papers/07fcbae86f7a3ad3ea1cf95178459ee9eaf77cb1.html" target="_blank">Large scale unconstrained open set face database</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE Sixth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td></td><td></td><td></td><td></td><td></td><td>67%</td><td>6</td><td>4</td><td>2</td><td>0</td><td>4</td><td>2</td></tr><tr><td>4af89578ac237278be310f7660a408b03f12d603</td><td>geofaces</td><td>GeoFaces</td><td><a href="papers/4af89578ac237278be310f7660a408b03f12d603.html" target="_blank">Large-scale geo-facial image analysis</a></td><td><a href="https://pdfs.semanticscholar.org/3ede/3ed28329bf48fbd06438a69c4f855bef003f.pdf" target="_blank">[pdf]</a></td><td>EURASIP J. Image and Video Processing</td><td></td><td></td><td></td><td></td><td></td><td>83%</td><td>6</td><td>5</td><td>1</td><td>0</td><td>4</td><td>2</td></tr><tr><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td><td>mafl</td><td>MAFL</td><td><a href="papers/a0fd85b3400c7b3e11122f44dc5870ae2de9009a.html" target="_blank">Learning Deep Representation for Face Alignment with Auxiliary Attributes</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>108</td><td>56</td><td>52</td><td>11</td><td>66</td><td>44</td></tr><tr><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td><td>mtfl</td><td>MTFL</td><td><a href="papers/a0fd85b3400c7b3e11122f44dc5870ae2de9009a.html" target="_blank">Learning Deep Representation for Face Alignment with Auxiliary Attributes</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>108</td><td>56</td><td>52</td><td>11</td><td>66</td><td>44</td></tr><tr><td>853bd61bc48a431b9b1c7cab10c603830c488e39</td><td>casia_webface</td><td>CASIA Webface</td><td><a href="papers/853bd61bc48a431b9b1c7cab10c603830c488e39.html" target="_blank">Learning Face Representation from Scratch</a></td><td><a href="https://arxiv.org/pdf/1411.7923.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>476</td><td>276</td><td>200</td><td>26</td><td>290</td><td>182</td></tr><tr><td>2a171f8d14b6b8735001a11c217af9587d095848</td><td>social_relation</td><td>Social Relation</td><td><a href="papers/2a171f8d14b6b8735001a11c217af9587d095848.html" target="_blank">Learning Social Relation Traits from Face Images</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>23</td><td>10</td><td>13</td><td>4</td><td>16</td><td>7</td></tr><tr><td>4e4746094bf60ee83e40d8597a6191e463b57f76</td><td>leeds_sports_pose_extended</td><td>Leeds Sports Pose Extended</td><td><a href="papers/4e4746094bf60ee83e40d8597a6191e463b57f76.html" target="_blank">Learning effective human pose estimation from inaccurate annotation</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011</td><td>edu</td><td>University of Leeds</td><td>United Kingdom</td><td>53.80387185</td><td>-1.55245712</td><td>61%</td><td>169</td><td>103</td><td>66</td><td>8</td><td>108</td><td>65</td></tr><tr><td>287ddcb3db5562235d83aee318f318b8d5e43fb1</td><td>erce</td><td>ERCe</td><td><a href="papers/287ddcb3db5562235d83aee318f318b8d5e43fb1.html" target="_blank">Learning from Multiple Sources for Video Summarisation</a></td><td><a href="https://arxiv.org/pdf/1501.03069.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>7</td><td>4</td><td>3</td><td>0</td><td>4</td><td>3</td></tr><tr><td>287ddcb3db5562235d83aee318f318b8d5e43fb1</td><td>tisi</td><td>Times Square Intersection</td><td><a href="papers/287ddcb3db5562235d83aee318f318b8d5e43fb1.html" target="_blank">Learning from Multiple Sources for Video Summarisation</a></td><td><a href="https://arxiv.org/pdf/1501.03069.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>7</td><td>4</td><td>3</td><td>0</td><td>4</td><td>3</td></tr><tr><td>5981e6479c3fd4e31644db35d236bfb84ae46514</td><td>mot</td><td>MOT</td><td><a href="papers/5981e6479c3fd4e31644db35d236bfb84ae46514.html" target="_blank">Learning to associate: HybridBoosted multi-target tracker for crowded scene</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>University of Southern California</td><td>United States</td><td>34.02241490</td><td>-118.28634407</td><td>53%</td><td>326</td><td>172</td><td>153</td><td>27</td><td>190</td><td>137</td></tr><tr><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td><td>stickmen_buffy</td><td>Buffy Stickmen</td><td><a href="papers/6dd0597f8513dc100cd0bc1b493768cde45098a9.html" target="_blank">Learning to parse images of articulated bodies</a></td><td><a href="http://machinelearning.wustl.edu/mlpapers/paper_files/NIPS2006_899.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>369</td><td>207</td><td>161</td><td>32</td><td>237</td><td>131</td></tr><tr><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td><a href="papers/6dd0597f8513dc100cd0bc1b493768cde45098a9.html" target="_blank">Learning to parse images of articulated bodies</a></td><td><a href="http://machinelearning.wustl.edu/mlpapers/paper_files/NIPS2006_899.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>369</td><td>207</td><td>161</td><td>32</td><td>237</td><td>131</td></tr><tr><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td><a href="papers/6dd0597f8513dc100cd0bc1b493768cde45098a9.html" target="_blank">Learning to parse images of articulated bodies</a></td><td><a href="http://machinelearning.wustl.edu/mlpapers/paper_files/NIPS2006_899.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>369</td><td>207</td><td>161</td><td>32</td><td>237</td><td>131</td></tr><tr><td>28d4e027c7e90b51b7d8908fce68128d1964668a</td><td>megaface</td><td>MegaFace</td><td><a href="papers/28d4e027c7e90b51b7d8908fce68128d1964668a.html" target="_blank">Level Playing Field for Million Scale Face Recognition</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>44%</td><td>39</td><td>17</td><td>22</td><td>2</td><td>29</td><td>9</td></tr><tr><td>46a01565e6afe7c074affb752e7069ee3bf2e4ef</td><td>sdu_vid</td><td>SDU-VID</td><td><a href="papers/46a01565e6afe7c074affb752e7069ee3bf2e4ef.html" target="_blank">Local Descriptors Encoded by Fisher Vectors for Person Re-identification</a></td><td><a href="https://pdfs.semanticscholar.org/a105/f1ef67b4b02da38eadce8ffb4e13aa301a93.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>197</td><td>112</td><td>85</td><td>16</td><td>108</td><td>88</td></tr><tr><td>140438a77a771a8fb656b39a78ff488066eb6b50</td><td>lfpw</td><td>LFWP</td><td><a href="papers/140438a77a771a8fb656b39a78ff488066eb6b50.html" target="_blank">Localizing Parts of Faces Using a Consensus of Exemplars</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>38b55d95189c5e69cf4ab45098a48fba407609b4</td><td>cuhk_campus_03</td><td>CUHK03 Campus</td><td><a href="papers/38b55d95189c5e69cf4ab45098a48fba407609b4.html" target="_blank">Locally Aligned Feature Transforms across Views</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>258</td><td>134</td><td>124</td><td>17</td><td>136</td><td>117</td></tr><tr><td>8990cdce3f917dad622e43e033db686b354d057c</td><td>tiny_faces</td><td>TinyFace</td><td><a href="papers/8990cdce3f917dad622e43e033db686b354d057c.html" target="_blank">Low-Resolution Face Recognition</a></td><td><a href="https://arxiv.org/pdf/1811.08965.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>c0387e788a52f10bf35d4d50659cfa515d89fbec</td><td>mars</td><td>MARS</td><td><a href="papers/c0387e788a52f10bf35d4d50659cfa515d89fbec.html" target="_blank">MARS: A Video Benchmark for Large-Scale Person Re-Identification</a></td><td><a href="https://pdfs.semanticscholar.org/c038/7e788a52f10bf35d4d50659cfa515d89fbec.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>168</td><td>84</td><td>84</td><td>5</td><td>97</td><td>69</td></tr><tr><td>9055b155cbabdce3b98e16e5ac9c0edf00f9552f</td><td>morph</td><td>MORPH Commercial</td><td><a href="papers/9055b155cbabdce3b98e16e5ac9c0edf00f9552f.html" target="_blank">MORPH: a longitudinal image database of normal adult age-progression</a></td><td><span class="gray">[pdf]</a></td><td>7th International Conference on Automatic Face and Gesture Recognition (FGR06)</td><td></td><td></td><td></td><td></td><td></td><td>51%</td><td>437</td><td>224</td><td>212</td><td>24</td><td>228</td><td>203</td></tr><tr><td>9055b155cbabdce3b98e16e5ac9c0edf00f9552f</td><td>morph_nc</td><td>MORPH Non-Commercial</td><td><a href="papers/9055b155cbabdce3b98e16e5ac9c0edf00f9552f.html" target="_blank">MORPH: a longitudinal image database of normal adult age-progression</a></td><td><span class="gray">[pdf]</a></td><td>7th International Conference on Automatic Face and Gesture Recognition (FGR06)</td><td></td><td></td><td></td><td></td><td></td><td>51%</td><td>437</td><td>224</td><td>212</td><td>24</td><td>228</td><td>203</td></tr><tr><td>291265db88023e92bb8c8e6390438e5da148e8f5</td><td>msceleb</td><td>MsCeleb</td><td><a href="papers/291265db88023e92bb8c8e6390438e5da148e8f5.html" target="_blank">MS-Celeb-1M: A Dataset and Benchmark for Large-Scale Face Recognition</a></td><td><a href="https://arxiv.org/pdf/1607.08221.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>48%</td><td>180</td><td>87</td><td>93</td><td>16</td><td>120</td><td>59</td></tr><tr><td>3dc3f0b64ef80f573e3a5f96e456e52ee980b877</td><td>georgia_tech_face_database</td><td>Georgia Tech Face</td><td><a href="papers/3dc3f0b64ef80f573e3a5f96e456e52ee980b877.html" target="_blank">Maximum Likelihood Training of the Embedded HMM for Face Detection and Recognition</a></td><td><a href="http://www.researchgate.net/profile/Monson_Hayes/publication/221124512_Maximum_Likelihood_Training_of_the_Embedded_HMM_for_Face_Detection_and_Recognition/links/0deec53509be9d6f55000000.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>42%</td><td>67</td><td>28</td><td>39</td><td>4</td><td>29</td><td>28</td></tr><tr><td>e58dd160a76349d46f881bd6ddbc2921f08d1050</td><td>gfw</td><td>Grouping Face in the Wild</td><td><a href="papers/e58dd160a76349d46f881bd6ddbc2921f08d1050.html" target="_blank">Merge or Not? Learning to Group Faces via Imitation Learning</a></td><td><a href="https://arxiv.org/pdf/1707.03986.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>2</td><td>1</td><td>1</td><td>0</td><td>2</td><td>0</td></tr><tr><td>5753b2b5e442eaa3be066daa4a2ca8d8a0bb1725</td><td>50_people_one_question</td><td>50 People One Question</td><td><a href="papers/5753b2b5e442eaa3be066daa4a2ca8d8a0bb1725.html" target="_blank">Merging Pose Estimates Across Space and Time</a></td><td><a href="http://authors.library.caltech.edu/41565/1/tracking_bmvc.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>81%</td><td>16</td><td>13</td><td>3</td><td>0</td><td>13</td><td>4</td></tr><tr><td>5e0f8c355a37a5a89351c02f174e7a5ddcb98683</td><td>coco</td><td>COCO</td><td><a href="papers/5e0f8c355a37a5a89351c02f174e7a5ddcb98683.html" target="_blank">Microsoft COCO: Common Objects in Context</a></td><td><a href="https://arxiv.org/pdf/1405.0312.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>999</td><td>545</td><td>454</td><td>26</td><td>722</td><td>259</td></tr><tr><td>41976ebc8ab76d9a6861487c97cc7fcbe3b6015f</td><td>moments_in_time</td><td>Moments in Time</td><td><a href="papers/41976ebc8ab76d9a6861487c97cc7fcbe3b6015f.html" target="_blank">Moments in Time Dataset: one million videos for event understanding</a></td><td><a href="https://arxiv.org/pdf/1801.03150.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>66%</td><td>29</td><td>19</td><td>10</td><td>2</td><td>27</td><td>2</td></tr><tr><td>436f798d1a4e54e5947c1e7d7375c31b2bdb4064</td><td>tud_multiview</td><td>TUD-Multiview</td><td><a href="papers/436f798d1a4e54e5947c1e7d7375c31b2bdb4064.html" target="_blank">Monocular 3D pose estimation and tracking by detection</a></td><td><span class="gray">[pdf]</a></td><td>2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>TU Darmstadt</td><td>Germany</td><td>49.87482770</td><td>8.65632810</td><td>55%</td><td>311</td><td>172</td><td>139</td><td>35</td><td>208</td><td>105</td></tr><tr><td>436f798d1a4e54e5947c1e7d7375c31b2bdb4064</td><td>tud_stadtmitte</td><td>TUD-Stadtmitte</td><td><a href="papers/436f798d1a4e54e5947c1e7d7375c31b2bdb4064.html" target="_blank">Monocular 3D pose estimation and tracking by detection</a></td><td><span class="gray">[pdf]</a></td><td>2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>TU Darmstadt</td><td>Germany</td><td>49.87482770</td><td>8.65632810</td><td>55%</td><td>311</td><td>172</td><td>139</td><td>35</td><td>208</td><td>105</td></tr><tr><td>3b5b6d19d4733ab606c39c69a889f9e67967f151</td><td>qmul_grid</td><td>GRID</td><td><a href="papers/3b5b6d19d4733ab606c39c69a889f9e67967f151.html" target="_blank">Multi-camera activity correlation analysis</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>Queen Mary University of London</td><td>United Kingdom</td><td>51.52472720</td><td>-0.03931035</td><td>61%</td><td>142</td><td>87</td><td>55</td><td>7</td><td>77</td><td>64</td></tr><tr><td>6ad5a38df8dd4cdddd74f31996ce096d41219f72</td><td>tud_brussels</td><td>TUD-Brussels</td><td><a href="papers/6ad5a38df8dd4cdddd74f31996ce096d41219f72.html" target="_blank">Multi-cue onboard pedestrian detection</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>6ad5a38df8dd4cdddd74f31996ce096d41219f72</td><td>tud_motionpairs</td><td>TUD-Motionparis</td><td><a href="papers/6ad5a38df8dd4cdddd74f31996ce096d41219f72.html" target="_blank">Multi-cue onboard pedestrian detection</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>32c801cb7fbeb742edfd94cccfca4934baec71da</td><td>ucf_crowd</td><td>UCF-CC-50</td><td><a href="papers/32c801cb7fbeb742edfd94cccfca4934baec71da.html" target="_blank">Multi-source Multi-scale Counting in Extremely Dense Crowd Images</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>148</td><td>95</td><td>53</td><td>5</td><td>80</td><td>65</td></tr><tr><td>1e3df3ca8feab0b36fd293fe689f93bb2aaac591</td><td>immediacy</td><td>Immediacy</td><td><a href="papers/1e3df3ca8feab0b36fd293fe689f93bb2aaac591.html" target="_blank">Multi-task Recurrent Neural Network for Immediacy Prediction</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>26</td><td>15</td><td>11</td><td>2</td><td>21</td><td>6</td></tr><tr><td>2b926b3586399d028b46315d7d9fb9d879e4f79c</td><td>frav3d</td><td>FRAV3D</td><td><a href="papers/2b926b3586399d028b46315d7d9fb9d879e4f79c.html" target="_blank">Multimodal 2D, 2.5D & 3D Face Verification</a></td><td><span class="gray">[pdf]</a></td><td>2006 International Conference on Image Processing</td><td>edu</td><td>Universidad Rey Juan Carlos, Spain</td><td>Spain</td><td>40.33586610</td><td>-3.87694320</td><td>50%</td><td>14</td><td>7</td><td>7</td><td>0</td><td>2</td><td>12</td></tr><tr><td>53ae38a6bb2b21b42bac4f0c4c8ed1f9fa02f9d4</td><td>bp4d_plus</td><td>BP4D+</td><td><a href="papers/53ae38a6bb2b21b42bac4f0c4c8ed1f9fa02f9d4.html" target="_blank">Multimodal Spontaneous Emotion Corpus for Human Behavior Analysis</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>48%</td><td>42</td><td>20</td><td>22</td><td>0</td><td>17</td><td>26</td></tr><tr><td>2fda164863a06a92d3a910b96eef927269aeb730</td><td>names_and_faces</td><td>News Dataset</td><td><a href="papers/2fda164863a06a92d3a910b96eef927269aeb730.html" target="_blank">Names and faces in the news</a></td><td><span class="gray">[pdf]</a></td><td>Proceedings of the 2004 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2004. CVPR 2004.</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>4156b7e88f2e0ab0a7c095b9bab199ae2b23bd06</td><td>distance_nighttime</td><td>Long Distance Heterogeneous Face</td><td><a href="papers/4156b7e88f2e0ab0a7c095b9bab199ae2b23bd06.html" target="_blank">Nighttime Face Recognition at Long Distance: Cross-Distance and Cross-Spectral Matching</a></td><td><a href="https://pdfs.semanticscholar.org/4156/b7e88f2e0ab0a7c095b9bab199ae2b23bd06.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>45%</td><td>22</td><td>10</td><td>12</td><td>3</td><td>11</td><td>10</td></tr><tr><td>3394168ff0719b03ff65bcea35336a76b21fe5e4</td><td>penn_fudan</td><td>Penn Fudan</td><td><a href="papers/3394168ff0719b03ff65bcea35336a76b21fe5e4.html" target="_blank">Object Detection Combining Recognition and Segmentation</a></td><td><a href="https://pdfs.semanticscholar.org/3394/168ff0719b03ff65bcea35336a76b21fe5e4.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>105</td><td>53</td><td>52</td><td>9</td><td>58</td><td>43</td></tr><tr><td>12ad3b5bbbf407f8e54ea692c07633d1a867c566</td><td>graz</td><td>Graz Pedestrian</td><td><a href="papers/12ad3b5bbbf407f8e54ea692c07633d1a867c566.html" target="_blank">Object recognition using segmentation for feature detection</a></td><td><span class="gray">[pdf]</a></td><td>Proceedings of the 17th International Conference on Pattern Recognition, 2004. ICPR 2004.</td><td>edu</td><td>Inst. of Comput. Sci., Univ. of Leoben, Austria</td><td>Austria</td><td>47.38473720</td><td>15.09302010</td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>4f93cd09785c6e77bf4bc5a788e079df524c8d21</td><td>soton</td><td>SOTON HiD</td><td><a href="papers/4f93cd09785c6e77bf4bc5a788e079df524c8d21.html" target="_blank">On a Large Sequence-Based Human Gait Database</a></td><td><a href="https://pdfs.semanticscholar.org/4f93/cd09785c6e77bf4bc5a788e079df524c8d21.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>150</td><td>90</td><td>60</td><td>17</td><td>103</td><td>51</td></tr><tr><td>6618cff7f2ed440a0d2fa9e74ad5469df5cdbe4c</td><td>afad</td><td>AFAD</td><td><a href="papers/6618cff7f2ed440a0d2fa9e74ad5469df5cdbe4c.html" target="_blank">Ordinal Regression with Multiple Output CNN for Age Estimation</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>45%</td><td>78</td><td>35</td><td>43</td><td>8</td><td>44</td><td>31</td></tr><tr><td>a7fe834a0af614ce6b50dc093132b031dd9a856b</td><td>market_1501</td><td>Market 1501</td><td><a href="papers/a7fe834a0af614ce6b50dc093132b031dd9a856b.html" target="_blank">Orientation Driven Bag of Appearances for Person Re-identification</a></td><td><a href="https://arxiv.org/pdf/1605.02464.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>7</td><td>3</td><td>4</td><td>0</td><td>4</td><td>4</td></tr><tr><td>a7fe834a0af614ce6b50dc093132b031dd9a856b</td><td>pku_reid</td><td>PKU-Reid</td><td><a href="papers/a7fe834a0af614ce6b50dc093132b031dd9a856b.html" target="_blank">Orientation Driven Bag of Appearances for Person Re-identification</a></td><td><a href="https://arxiv.org/pdf/1605.02464.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>7</td><td>3</td><td>4</td><td>0</td><td>4</td><td>4</td></tr><tr><td>18ae7c9a4bbc832b8b14bc4122070d7939f5e00e</td><td>frgc</td><td>FRGC</td><td><a href="papers/18ae7c9a4bbc832b8b14bc4122070d7939f5e00e.html" target="_blank">Overview of the face recognition grand challenge</a></td><td><span class="gray">[pdf]</a></td><td>2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)</td><td>edu</td><td>NIST</td><td>United States</td><td>39.14004000</td><td>-77.21850600</td><td>52%</td><td>999</td><td>518</td><td>480</td><td>86</td><td>549</td><td>442</td></tr><tr><td>22909dd19a0ec3b6065334cb5be5392cb24d839d</td><td>pets</td><td>PETS 2017</td><td><a href="papers/22909dd19a0ec3b6065334cb5be5392cb24d839d.html" target="_blank">PETS 2017: Dataset and Challenge</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td></td><td></td><td></td><td></td><td></td><td>11%</td><td>9</td><td>1</td><td>8</td><td>0</td><td>1</td><td>8</td></tr><tr><td>56ffa7d906b08d02d6d5a12c7377a57e24ef3391</td><td>unbc_shoulder_pain</td><td>UNBC-McMaster Pain</td><td><a href="papers/56ffa7d906b08d02d6d5a12c7377a57e24ef3391.html" target="_blank">Painful data: The UNBC-McMaster shoulder pain expression archive database</a></td><td><span class="gray">[pdf]</a></td><td>Face and Gesture 2011</td><td>edu</td><td>Carnegie Mellon University Silicon Valley</td><td>United States</td><td>37.41021930</td><td>-122.05965487</td><td>51%</td><td>189</td><td>97</td><td>92</td><td>22</td><td>108</td><td>78</td></tr><tr><td>55206f0b5f57ce17358999145506cd01e570358c</td><td>orl</td><td>ORL</td><td><a href="papers/55206f0b5f57ce17358999145506cd01e570358c.html" target="_blank">Parameterisation of a stochastic model for human face identification</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>45%</td><td>999</td><td>451</td><td>548</td><td>94</td><td>543</td><td>427</td></tr><tr><td>0486214fb58ee9a04edfe7d6a74c6d0f661a7668</td><td>chokepoint</td><td>ChokePoint</td><td><a href="papers/0486214fb58ee9a04edfe7d6a74c6d0f661a7668.html" target="_blank">Patch-based probabilistic image quality assessment for face selection and improved video-based face recognition</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td></td><td></td><td></td><td></td><td></td><td>51%</td><td>138</td><td>71</td><td>67</td><td>6</td><td>76</td><td>63</td></tr><tr><td>488e475eeb3bb39a145f23ede197cd3620f1d98a</td><td>apis</td><td>APiS1.0</td><td><a href="papers/488e475eeb3bb39a145f23ede197cd3620f1d98a.html" target="_blank">Pedestrian Attribute Classification in Surveillance: Database and Evaluation</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision Workshops</td><td></td><td></td><td></td><td></td><td></td><td>68%</td><td>28</td><td>19</td><td>9</td><td>0</td><td>13</td><td>15</td></tr><tr><td>488e475eeb3bb39a145f23ede197cd3620f1d98a</td><td>svs</td><td>SVS</td><td><a href="papers/488e475eeb3bb39a145f23ede197cd3620f1d98a.html" target="_blank">Pedestrian Attribute Classification in Surveillance: Database and Evaluation</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision Workshops</td><td></td><td></td><td></td><td></td><td></td><td>68%</td><td>28</td><td>19</td><td>9</td><td>0</td><td>13</td><td>15</td></tr><tr><td>2a4bbee0b4cf52d5aadbbc662164f7efba89566c</td><td>peta</td><td>PETA</td><td><a href="papers/2a4bbee0b4cf52d5aadbbc662164f7efba89566c.html" target="_blank">Pedestrian Attribute Recognition At Far Distance</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>88</td><td>55</td><td>33</td><td>1</td><td>50</td><td>36</td></tr><tr><td>f72f6a45ee240cc99296a287ff725aaa7e7ebb35</td><td>caltech_pedestrians</td><td>Caltech Pedestrians</td><td><a href="papers/f72f6a45ee240cc99296a287ff725aaa7e7ebb35.html" target="_blank">Pedestrian Detection: An Evaluation of the State of the Art</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td>edu</td><td>California Institute of Technology</td><td>United States</td><td>34.13710185</td><td>-118.12527487</td><td>51%</td><td>999</td><td>510</td><td>489</td><td>71</td><td>526</td><td>466</td></tr><tr><td>1dc35905a1deff8bc74688f2d7e2f48fd2273275</td><td>caltech_pedestrians</td><td>Caltech Pedestrians</td><td><a href="papers/1dc35905a1deff8bc74688f2d7e2f48fd2273275.html" target="_blank">Pedestrian detection: A benchmark</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td><td>tud_campus</td><td>TUD-Campus</td><td><a href="papers/3316521a5527c7700af8ae6aef32a79a8b83672c.html" target="_blank">People-tracking-by-detection and people-detection-by-tracking</a></td><td><span class="gray">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>53%</td><td>545</td><td>287</td><td>257</td><td>40</td><td>330</td><td>218</td></tr><tr><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td><td>tud_crossing</td><td>TUD-Crossing</td><td><a href="papers/3316521a5527c7700af8ae6aef32a79a8b83672c.html" target="_blank">People-tracking-by-detection and people-detection-by-tracking</a></td><td><span class="gray">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>53%</td><td>545</td><td>287</td><td>257</td><td>40</td><td>330</td><td>218</td></tr><tr><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td><td>tud_pedestrian</td><td>TUD-Pedestrian</td><td><a href="papers/3316521a5527c7700af8ae6aef32a79a8b83672c.html" target="_blank">People-tracking-by-detection and people-detection-by-tracking</a></td><td><span class="gray">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>53%</td><td>545</td><td>287</td><td>257</td><td>40</td><td>330</td><td>218</td></tr><tr><td>27a2fad58dd8727e280f97036e0d2bc55ef5424c</td><td>duke_mtmc</td><td>Duke MTMC</td><td><a href="papers/27a2fad58dd8727e280f97036e0d2bc55ef5424c.html" target="_blank">Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</a></td><td><a href="https://arxiv.org/pdf/1609.01775.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Duke University</td><td>United States</td><td>35.99905220</td><td>-78.92906290</td><td>53%</td><td>169</td><td>89</td><td>80</td><td>6</td><td>113</td><td>54</td></tr><tr><td>27a2fad58dd8727e280f97036e0d2bc55ef5424c</td><td>mot</td><td>MOT</td><td><a href="papers/27a2fad58dd8727e280f97036e0d2bc55ef5424c.html" target="_blank">Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</a></td><td><a href="https://arxiv.org/pdf/1609.01775.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Duke University</td><td>United States</td><td>35.99905220</td><td>-78.92906290</td><td>53%</td><td>169</td><td>89</td><td>80</td><td>6</td><td>113</td><td>54</td></tr><tr><td>16c7c31a7553d99f1837fc6e88e77b5ccbb346b8</td><td>prid</td><td>PRID</td><td><a href="papers/16c7c31a7553d99f1837fc6e88e77b5ccbb346b8.html" target="_blank">Person Re-identification by Descriptive and Discriminative Classification</a></td><td><a href="https://pdfs.semanticscholar.org/4c1b/f0592be3e535faf256c95e27982db9b3d3d3.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>386</td><td>220</td><td>166</td><td>25</td><td>204</td><td>180</td></tr><tr><td>98bb029afe2a1239c3fdab517323066f0957b81b</td><td>ilids_mcts_vid</td><td>iLIDS-VID</td><td><a href="papers/98bb029afe2a1239c3fdab517323066f0957b81b.html" target="_blank">Person Re-identification by Video Ranking</a></td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>209</td><td>120</td><td>89</td><td>9</td><td>111</td><td>97</td></tr><tr><td>98bb029afe2a1239c3fdab517323066f0957b81b</td><td>sdu_vid</td><td>SDU-VID</td><td><a href="papers/98bb029afe2a1239c3fdab517323066f0957b81b.html" target="_blank">Person Re-identification by Video Ranking</a></td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>209</td><td>120</td><td>89</td><td>9</td><td>111</td><td>97</td></tr><tr><td>0b84f07af44f964817675ad961def8a51406dd2e</td><td>prw</td><td>PRW</td><td><a href="papers/0b84f07af44f964817675ad961def8a51406dd2e.html" target="_blank">Person Re-identification in the Wild</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>45%</td><td>77</td><td>35</td><td>42</td><td>2</td><td>47</td><td>27</td></tr><tr><td>a0cc5f73a37723a6dd465924143f1cb4976d0169</td><td>msmt_17</td><td>MSMT17</td><td><a href="papers/a0cc5f73a37723a6dd465924143f1cb4976d0169.html" target="_blank">Person Transfer GAN to Bridge Domain Gap for Person Re-identification</a></td><td><span class="gray">[pdf]</a></td><td>2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>71%</td><td>24</td><td>17</td><td>7</td><td>1</td><td>20</td><td>4</td></tr><tr><td>1c2802c2199b6d15ecefe7ba0c39bfe44363de38</td><td>youtube_poses</td><td>YouTube Pose</td><td><a href="papers/1c2802c2199b6d15ecefe7ba0c39bfe44363de38.html" target="_blank">Personalizing Human Video Pose Estimation</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>36</td><td>20</td><td>16</td><td>2</td><td>30</td><td>8</td></tr><tr><td>2830fb5282de23d7784b4b4bc37065d27839a412</td><td>h3d</td><td>H3D</td><td><a href="papers/2830fb5282de23d7784b4b4bc37065d27839a412.html" target="_blank">Poselets: Body part detectors trained using 3D human pose annotations</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE 12th International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>716</td><td>370</td><td>346</td><td>60</td><td>492</td><td>222</td></tr><tr><td>3765df816dc5a061bc261e190acc8bdd9d47bec0</td><td>rafd</td><td>RaFD</td><td><a href="papers/3765df816dc5a061bc261e190acc8bdd9d47bec0.html" target="_blank">Presentation and validation of the Radboud Faces Database</a></td><td><a href="https://pdfs.semanticscholar.org/3765/df816dc5a061bc261e190acc8bdd9d47bec0.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>39%</td><td>487</td><td>192</td><td>295</td><td>39</td><td>342</td><td>144</td></tr><tr><td>636b8ffc09b1b23ff714ac8350bb35635e49fa3c</td><td>caltech_10k_web_faces</td><td>Caltech 10K Web Faces</td><td><a href="papers/636b8ffc09b1b23ff714ac8350bb35635e49fa3c.html" target="_blank">Pruning training sets for learning of object categories</a></td><td><span class="gray">[pdf]</a></td><td>2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)</td><td></td><td></td><td></td><td></td><td></td><td>63%</td><td>63</td><td>40</td><td>23</td><td>5</td><td>42</td><td>20</td></tr><tr><td>3531332efe19be21e7401ba1f04570a142617236</td><td>ufdd</td><td>UFDD</td><td><a href="papers/3531332efe19be21e7401ba1f04570a142617236.html" target="_blank">Pushing the Limits of Unconstrained Face Detection: a Challenge Dataset and Baseline Results</a></td><td><a href="https://arxiv.org/pdf/1804.10275.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>25%</td><td>4</td><td>1</td><td>3</td><td>1</td><td>4</td><td>0</td></tr><tr><td>140c95e53c619eac594d70f6369f518adfea12ef</td><td>ijb_a</td><td>IJB-A</td><td><a href="papers/140c95e53c619eac594d70f6369f518adfea12ef.html" target="_blank">Pushing the frontiers of unconstrained face detection and recognition: IARPA Janus Benchmark A</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>51%</td><td>237</td><td>121</td><td>116</td><td>22</td><td>159</td><td>76</td></tr><tr><td>c72a2ea819df9b0e8cd267eebcc6528b8741e03d</td><td>megaage</td><td>MegaAge</td><td><a href="papers/c72a2ea819df9b0e8cd267eebcc6528b8741e03d.html" target="_blank">Quantifying Facial Age by Posterior of Age Comparisons</a></td><td><a href="https://arxiv.org/pdf/1708.09687.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>25%</td><td>4</td><td>1</td><td>3</td><td>1</td><td>4</td><td>0</td></tr><tr><td>922e0a51a3b8c67c4c6ac09a577ff674cbd28b34</td><td>v47</td><td>V47</td><td><a href="papers/922e0a51a3b8c67c4c6ac09a577ff674cbd28b34.html" target="_blank">Re-identification of pedestrians with variable occlusion and scale</a></td><td><span class="gray">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td>edu</td><td>Kingston University</td><td>United Kingdom</td><td>51.42930860</td><td>-0.26840440</td><td>11%</td><td>9</td><td>1</td><td>8</td><td>2</td><td>5</td><td>4</td></tr><tr><td>6f3c76b7c0bd8e1d122c6ea808a271fd4749c951</td><td>ward</td><td>WARD</td><td><a href="papers/6f3c76b7c0bd8e1d122c6ea808a271fd4749c951.html" target="_blank">Re-identify people in wide area camera network</a></td><td><span class="gray">[pdf]</a></td><td>2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops</td><td>edu</td><td>University of Udine</td><td>Italy</td><td>46.08107230</td><td>13.21194740</td><td>37%</td><td>60</td><td>22</td><td>38</td><td>1</td><td>38</td><td>21</td></tr><tr><td>54983972aafc8e149259d913524581357b0f91c3</td><td>reseed</td><td>ReSEED</td><td><a href="papers/54983972aafc8e149259d913524581357b0f91c3.html" target="_blank">ReSEED: social event dEtection dataset</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>67%</td><td>6</td><td>4</td><td>2</td><td>1</td><td>1</td><td>5</td></tr><tr><td>65355cbb581a219bd7461d48b3afd115263ea760</td><td>complex_activities</td><td>Ongoing Complex Activities</td><td><a href="papers/65355cbb581a219bd7461d48b3afd115263ea760.html" target="_blank">Recognition of ongoing complex activities by sequence prediction over a hierarchical label space</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Winter Conference on Applications of Computer Vision (WACV)</td><td></td><td></td><td></td><td></td><td></td><td>33%</td><td>3</td><td>1</td><td>2</td><td>0</td><td>3</td><td>0</td></tr><tr><td>e8de844fefd54541b71c9823416daa238be65546</td><td>visual_phrases</td><td>Phrasal Recognition</td><td><a href="papers/e8de844fefd54541b71c9823416daa238be65546.html" target="_blank">Recognition using visual phrases</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011</td><td>edu</td><td>University of Illinois, Urbana-Champaign</td><td>United States</td><td>40.11116745</td><td>-88.22587665</td><td>55%</td><td>246</td><td>135</td><td>111</td><td>18</td><td>170</td><td>68</td></tr><tr><td>356b431d4f7a2a0a38cf971c84568207dcdbf189</td><td>wider</td><td>WIDER</td><td><a href="papers/356b431d4f7a2a0a38cf971c84568207dcdbf189.html" target="_blank">Recognize complex events from static images by fusing deep channels</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>66%</td><td>44</td><td>29</td><td>15</td><td>1</td><td>29</td><td>15</td></tr><tr><td>25474c21613607f6bb7687a281d5f9d4ffa1f9f3</td><td>faceplace</td><td>Face Place</td><td><a href="papers/25474c21613607f6bb7687a281d5f9d4ffa1f9f3.html" target="_blank">Recognizing disguised faces</a></td><td><a href="https://pdfs.semanticscholar.org/d936/7ceb0be378c3a9ddf7cb741c678c1a3c574c.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>31%</td><td>29</td><td>9</td><td>20</td><td>0</td><td>18</td><td>10</td></tr><tr><td>4053e3423fb70ad9140ca89351df49675197196a</td><td>bio_id</td><td>BioID Face</td><td><a href="papers/4053e3423fb70ad9140ca89351df49675197196a.html" target="_blank">Robust Face Detection Using the Hausdorff Distance</a></td><td><a href="https://pdfs.semanticscholar.org/4053/e3423fb70ad9140ca89351df49675197196a.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>48%</td><td>511</td><td>247</td><td>264</td><td>51</td><td>329</td><td>182</td></tr><tr><td>2724ba85ec4a66de18da33925e537f3902f21249</td><td>cofw</td><td>COFW</td><td><a href="papers/2724ba85ec4a66de18da33925e537f3902f21249.html" target="_blank">Robust Face Landmark Estimation under Occlusion</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision</td><td>edu</td><td>California Institute of Technology</td><td>United States</td><td>34.13710185</td><td>-118.12527487</td><td>57%</td><td>325</td><td>185</td><td>140</td><td>19</td><td>194</td><td>133</td></tr><tr><td>c570d1247e337f91e555c3be0e8c8a5aba539d9f</td><td>mcgill</td><td>McGill Real World</td><td><a href="papers/c570d1247e337f91e555c3be0e8c8a5aba539d9f.html" target="_blank">Robust semi-automatic head pose labeling for real-world face video sequences</a></td><td><span class="gray">[pdf]</a></td><td>Multimedia Tools and Applications</td><td>edu</td><td>McGill University</td><td>Canada</td><td>45.50397610</td><td>-73.57496870</td><td>44%</td><td>18</td><td>8</td><td>10</td><td>0</td><td>13</td><td>7</td></tr><tr><td>e27ef52c641c2b5100a1b34fd0b819e84a31b4df</td><td>sarc3d</td><td>Sarc3D</td><td><a href="papers/e27ef52c641c2b5100a1b34fd0b819e84a31b4df.html" target="_blank">SARC3D: A New 3D Body Model for People Tracking and Re-identification</a></td><td><a href="https://pdfs.semanticscholar.org/e27e/f52c641c2b5100a1b34fd0b819e84a31b4df.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>44%</td><td>34</td><td>15</td><td>19</td><td>3</td><td>21</td><td>12</td></tr><tr><td>bd26dabab576adb6af30484183c9c9c8379bf2e0</td><td>scut_fbp</td><td>SCUT-FBP</td><td><a href="papers/bd26dabab576adb6af30484183c9c9c8379bf2e0.html" target="_blank">SCUT-FBP: A Benchmark Dataset for Facial Beauty Perception</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Systems, Man, and Cybernetics</td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>19</td><td>9</td><td>10</td><td>2</td><td>6</td><td>13</td></tr><tr><td>29a705a5fa76641e0d8963f1fdd67ee4c0d92d3d</td><td>scface</td><td>SCface</td><td><a href="papers/29a705a5fa76641e0d8963f1fdd67ee4c0d92d3d.html" target="_blank">SCface – surveillance cameras face database</a></td><td><a href="http://scface.org/SCface%20-%20Surveillance%20Cameras%20Face%20Database.pdf" target="_blank">[pdf]</a></td><td>Multimedia Tools and Applications</td><td></td><td></td><td></td><td></td><td></td><td>51%</td><td>179</td><td>91</td><td>88</td><td>15</td><td>88</td><td>89</td></tr><tr><td>d3f5a1848b0028d8ab51d0b0673732cad2e3c8c9</td><td>stair_actions</td><td>STAIR Action</td><td><a href="papers/d3f5a1848b0028d8ab51d0b0673732cad2e3c8c9.html" target="_blank">STAIR Actions: A Video Dataset of Everyday Home Actions</a></td><td><a href="https://arxiv.org/pdf/1804.04326.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>1</td><td>1</td><td>0</td><td>0</td><td>1</td><td>0</td></tr><tr><td>833fa04463d90aab4a9fe2870d480f0b40df446e</td><td>sun_attributes</td><td>SUN</td><td><a href="papers/833fa04463d90aab4a9fe2870d480f0b40df446e.html" target="_blank">SUN attribute database: Discovering, annotating, and recognizing scene attributes</a></td><td><span class="gray">[pdf]</a></td><td>2012 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>Brown University</td><td>United States</td><td>41.82686820</td><td>-71.40123146</td><td>58%</td><td>264</td><td>154</td><td>110</td><td>27</td><td>206</td><td>56</td></tr><tr><td>4308bd8c28e37e2ed9a3fcfe74d5436cce34b410</td><td>market_1501</td><td>Market 1501</td><td><a href="papers/4308bd8c28e37e2ed9a3fcfe74d5436cce34b410.html" target="_blank">Scalable Person Re-identification: A Benchmark</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>460</td><td>253</td><td>207</td><td>16</td><td>263</td><td>185</td></tr><tr><td>9c23859ec7313f2e756a3e85575735e0c52249f4</td><td>facebook_100</td><td>Facebook100</td><td><a href="papers/9c23859ec7313f2e756a3e85575735e0c52249f4.html" target="_blank">Scaling up biologically-inspired computer vision: A case study in unconstrained face recognition on facebook</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td>edu</td><td>Harvard University</td><td>United States</td><td>42.36782045</td><td>-71.12666653</td><td>52%</td><td>52</td><td>27</td><td>25</td><td>3</td><td>38</td><td>13</td></tr><tr><td>9c23859ec7313f2e756a3e85575735e0c52249f4</td><td>pubfig_83</td><td>pubfig83</td><td><a href="papers/9c23859ec7313f2e756a3e85575735e0c52249f4.html" target="_blank">Scaling up biologically-inspired computer vision: A case study in unconstrained face recognition on facebook</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td>edu</td><td>Harvard University</td><td>United States</td><td>42.36782045</td><td>-71.12666653</td><td>52%</td><td>52</td><td>27</td><td>25</td><td>3</td><td>38</td><td>13</td></tr><tr><td>51eba481dac6b229a7490f650dff7b17ce05df73</td><td>imsitu</td><td>imSitu</td><td><a href="papers/51eba481dac6b229a7490f650dff7b17ce05df73.html" target="_blank">Situation Recognition: Visual Semantic Role Labeling for Image Understanding</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>52</td><td>32</td><td>20</td><td>1</td><td>46</td><td>6</td></tr><tr><td>570f37ed63142312e6ccdf00ecc376341ec72b9f</td><td>stanford_drone</td><td>Stanford Drone</td><td><a href="papers/570f37ed63142312e6ccdf00ecc376341ec72b9f.html" target="_blank">Social LSTM: Human Trajectory Prediction in Crowded Spaces</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>49%</td><td>224</td><td>110</td><td>114</td><td>3</td><td>140</td><td>81</td></tr><tr><td>23e824d1dfc33f3780dd18076284f07bd99f1c43</td><td>mifs</td><td>MIFS</td><td><a href="papers/23e824d1dfc33f3780dd18076284f07bd99f1c43.html" target="_blank">Spoofing faces using makeup: An investigative study</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE International Conference on Identity, Security and Behavior Analysis (ISBA)</td><td>edu</td><td>INRIA Méditerranée</td><td>France</td><td>43.61581310</td><td>7.06838000</td><td>67%</td><td>6</td><td>4</td><td>2</td><td>0</td><td>1</td><td>5</td></tr><tr><td>1a40092b493c6b8840257ab7f96051d1a4dbfeb2</td><td>sports_videos_in_the_wild</td><td>SVW</td><td><a href="papers/1a40092b493c6b8840257ab7f96051d1a4dbfeb2.html" target="_blank">Sports Videos in the Wild (SVW): A video dataset for sports analysis</a></td><td><span class="gray">[pdf]</a></td><td>2015 11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)</td><td></td><td></td><td></td><td></td><td></td><td>86%</td><td>7</td><td>6</td><td>1</td><td>1</td><td>5</td><td>2</td></tr><tr><td>9361b784e73e9238d5cefbea5ac40d35d1e3103f</td><td>town_centre</td><td>TownCentre</td><td><a href="papers/9361b784e73e9238d5cefbea5ac40d35d1e3103f.html" target="_blank">Stable multi-target tracking in real-time surveillance video</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011</td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>328</td><td>153</td><td>175</td><td>24</td><td>186</td><td>140</td></tr><tr><td>2306b2a8fba28539306052764a77a0d0f5d1236a</td><td>qmul_surv_face</td><td>QMUL-SurvFace</td><td><a href="papers/2306b2a8fba28539306052764a77a0d0f5d1236a.html" target="_blank">Surveillance Face Recognition Challenge</a></td><td><a href="https://arxiv.org/pdf/1804.09691.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>1</td><td>1</td><td>0</td><td>0</td><td>1</td><td>0</td></tr><tr><td>f6c8d5e35d7e4d60a0104f233ac1a3ab757da53f</td><td>pku_reid</td><td>PKU-Reid</td><td><a href="papers/f6c8d5e35d7e4d60a0104f233ac1a3ab757da53f.html" target="_blank">Swiss-System Based Cascade Ranking for Gait-Based Person Re-Identification</a></td><td><a href="https://pdfs.semanticscholar.org/f6c8/d5e35d7e4d60a0104f233ac1a3ab757da53f.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>4</td><td>2</td><td>2</td><td>0</td><td>1</td><td>2</td></tr><tr><td>4d58f886f5150b2d5e48fd1b5a49e09799bf895d</td><td>texas_3dfrd</td><td>Texas 3DFRD</td><td><a href="papers/4d58f886f5150b2d5e48fd1b5a49e09799bf895d.html" target="_blank">Texas 3D Face Recognition Database</a></td><td><span class="gray">[pdf]</a></td><td>2010 IEEE Southwest Symposium on Image Analysis & Interpretation (SSIAI)</td><td></td><td></td><td></td><td></td><td></td><td>44%</td><td>66</td><td>29</td><td>37</td><td>3</td><td>40</td><td>27</td></tr><tr><td>6d96f946aaabc734af7fe3fc4454cf8547fcd5ed</td><td>ar_facedb</td><td>AR Face</td><td><a href="papers/6d96f946aaabc734af7fe3fc4454cf8547fcd5ed.html" target="_blank">The AR face database</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>999</td><td>518</td><td>481</td><td>56</td><td>454</td><td>530</td></tr><tr><td>2485c98aa44131d1a2f7d1355b1e372f2bb148ad</td><td>cas_peal</td><td>CAS-PEAL</td><td><a href="papers/2485c98aa44131d1a2f7d1355b1e372f2bb148ad.html" target="_blank">The CAS-PEAL Large-Scale Chinese Face Database and Baseline Evaluations</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Systems, Man, and Cybernetics - Part A: Systems and Humans</td><td></td><td></td><td></td><td></td><td></td><td>51%</td><td>429</td><td>217</td><td>212</td><td>39</td><td>198</td><td>234</td></tr><tr><td>47662d1a368daf70ba70ef2d59eb6209f98b675d</td><td>fia</td><td>CMU FiA</td><td><a href="papers/47662d1a368daf70ba70ef2d59eb6209f98b675d.html" target="_blank">The CMU Face In Action (FIA) Database</a></td><td><a href="https://pdfs.semanticscholar.org/4766/2d1a368daf70ba70ef2d59eb6209f98b675d.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>39%</td><td>54</td><td>21</td><td>33</td><td>5</td><td>40</td><td>16</td></tr><tr><td>4d423acc78273b75134e2afd1777ba6d3a398973</td><td>cmu_pie</td><td>CMU PIE</td><td><a href="papers/4d423acc78273b75134e2afd1777ba6d3a398973.html" target="_blank">The CMU Pose, Illumination, and Expression (PIE) Database</a></td><td><a href="http://www.comp.nus.edu.sg/~tsim/piedb.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>760</td><td>408</td><td>351</td><td>50</td><td>404</td><td>345</td></tr><tr><td>4d423acc78273b75134e2afd1777ba6d3a398973</td><td>multi_pie</td><td>MULTIPIE</td><td><a href="papers/4d423acc78273b75134e2afd1777ba6d3a398973.html" target="_blank">The CMU Pose, Illumination, and Expression (PIE) Database</a></td><td><a href="http://www.comp.nus.edu.sg/~tsim/piedb.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>760</td><td>408</td><td>351</td><td>50</td><td>404</td><td>345</td></tr><tr><td>4df3143922bcdf7db78eb91e6b5359d6ada004d2</td><td>cfd</td><td>CFD</td><td><a href="papers/4df3143922bcdf7db78eb91e6b5359d6ada004d2.html" target="_blank">The Chicago face database: A free stimulus set of faces and norming data.</a></td><td><a href="https://pdfs.semanticscholar.org/4df3/143922bcdf7db78eb91e6b5359d6ada004d2.pdf" target="_blank">[pdf]</a></td><td>Behavior research methods</td><td></td><td></td><td></td><td></td><td></td><td>53%</td><td>99</td><td>52</td><td>47</td><td>1</td><td>73</td><td>21</td></tr><tr><td>20388099cc415c772926e47bcbbe554e133343d1</td><td>cafe</td><td>#N/A</td><td><a href="papers/20388099cc415c772926e47bcbbe554e133343d1.html" target="_blank">The Child Affective Facial Expression (CAFE) set: validity and reliability from untrained adults</a></td><td><a href="https://pdfs.semanticscholar.org/2038/8099cc415c772926e47bcbbe554e133343d1.pdf" target="_blank">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td></td><td>46%</td><td>37</td><td>17</td><td>20</td><td>3</td><td>30</td><td>7</td></tr><tr><td>4e6ee936eb50dd032f7138702fa39b7c18ee8907</td><td>dartmouth_children</td><td>Dartmouth Children</td><td><a href="papers/4e6ee936eb50dd032f7138702fa39b7c18ee8907.html" target="_blank">The Dartmouth Database of Children’s Faces: Acquisition and Validation of a New Face Stimulus Set</a></td><td><a href="https://pdfs.semanticscholar.org/4e6e/e936eb50dd032f7138702fa39b7c18ee8907.pdf" target="_blank">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td></td><td>38%</td><td>21</td><td>8</td><td>13</td><td>2</td><td>18</td><td>3</td></tr><tr><td>9e31e77f9543ab42474ba4e9330676e18c242e72</td><td>imdb_face</td><td>IMDb Face</td><td><a href="papers/9e31e77f9543ab42474ba4e9330676e18c242e72.html" target="_blank">The Devil of Face Recognition is in the Noise</a></td><td><a href="https://arxiv.org/pdf/1807.11649.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>company</td><td>SenseTime</td><td>China</td><td>39.99300800</td><td>116.32988200</td><td>17%</td><td>6</td><td>1</td><td>5</td><td>0</td><td>4</td><td>1</td></tr><tr><td>71b7fc715e2f1bb24c0030af8d7e7b6e7cd128a6</td><td>umd_faces</td><td>UMD</td><td><a href="papers/71b7fc715e2f1bb24c0030af8d7e7b6e7cd128a6.html" target="_blank">The Do’s and Don’ts for CNN-Based Face Verification</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE International Conference on Computer Vision Workshops (ICCVW)</td><td></td><td></td><td></td><td></td><td></td><td>35%</td><td>26</td><td>9</td><td>17</td><td>3</td><td>16</td><td>8</td></tr><tr><td>72a155c987816ae81c858fddbd6beab656d86220</td><td>europersons</td><td>EuroCity Persons</td><td><a href="papers/72a155c987816ae81c858fddbd6beab656d86220.html" target="_blank">The EuroCity Persons Dataset: A Novel Benchmark for Object Detection</a></td><td><a href="https://arxiv.org/pdf/1805.07193.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>2</td><td>0</td><td>2</td><td>0</td><td>2</td><td>0</td></tr><tr><td>4d9a02d080636e9666c4d1cc438b9893391ec6c7</td><td>cohn_kanade_plus</td><td>CK+</td><td><a href="papers/4d9a02d080636e9666c4d1cc438b9893391ec6c7.html" target="_blank">The Extended Cohn-Kanade Dataset (CK+): A complete dataset for action unit and emotion-specified expression</a></td><td><span class="gray">[pdf]</a></td><td>2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition - Workshops</td><td>edu</td><td>University of Pittsburgh</td><td>United States</td><td>40.44415295</td><td>-79.96243993</td><td>54%</td><td>999</td><td>543</td><td>456</td><td>58</td><td>470</td><td>518</td></tr><tr><td>0f0fcf041559703998abf310e56f8a2f90ee6f21</td><td>feret</td><td>FERET</td><td><a href="papers/0f0fcf041559703998abf310e56f8a2f90ee6f21.html" target="_blank">The FERET Evaluation Methodology for Face-Recognition Algorithms</a></td><td><a href="https://pdfs.semanticscholar.org/5099/7a5605c1f61e09e9a96789ed7495be6625aa.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>28%</td><td>29</td><td>8</td><td>21</td><td>3</td><td>18</td><td>9</td></tr><tr><td>0c4a139bb87c6743c7905b29a3cfec27a5130652</td><td>feret</td><td>FERET</td><td><a href="papers/0c4a139bb87c6743c7905b29a3cfec27a5130652.html" target="_blank">The FERET Verification Testing Protocol for Face Recognition Algorithms</a></td><td><a href="https://pdfs.semanticscholar.org/8d2a/1c768fce6f71584dd993fb97e7b6419aaf60.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>115</td><td>49</td><td>66</td><td>8</td><td>75</td><td>37</td></tr><tr><td>dc8b25e35a3acb812beb499844734081722319b4</td><td>feret</td><td>FERET</td><td><a href="papers/dc8b25e35a3acb812beb499844734081722319b4.html" target="_blank">The FERET database and evaluation procedure for face-recognition algorithms</a></td><td><a href="http://biometrics.nist.gov/cs_links/face/frvt/feret/FERET_Database_evaluation_procedure.pdf" target="_blank">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td></td><td>45%</td><td>999</td><td>454</td><td>545</td><td>103</td><td>589</td><td>421</td></tr><tr><td>8f02ec0be21461fbcedf51d864f944cfc42c875f</td><td>hda_plus</td><td>HDA+</td><td><a href="papers/8f02ec0be21461fbcedf51d864f944cfc42c875f.html" target="_blank">The HDA+ Data Set for Research on Fully Automated Re-identification Systems</a></td><td><a href="https://pdfs.semanticscholar.org/8f02/ec0be21461fbcedf51d864f944cfc42c875f.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>25%</td><td>16</td><td>4</td><td>12</td><td>2</td><td>10</td><td>6</td></tr><tr><td>8be57cdad86fdf8c8290df4ca3149592f3c46dd3</td><td>m2vts</td><td>m2vts</td><td><a href="papers/8be57cdad86fdf8c8290df4ca3149592f3c46dd3.html" target="_blank">The M2VTS Multimodal Face Database (Release 1.00)</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>44%</td><td>73</td><td>32</td><td>41</td><td>2</td><td>39</td><td>33</td></tr><tr><td>ea050801199f98a1c7c1df6769f23f658299a3ae</td><td>mpi_large</td><td>Large MPI Facial Expression</td><td><a href="papers/ea050801199f98a1c7c1df6769f23f658299a3ae.html" target="_blank">The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</a></td><td><a href="https://pdfs.semanticscholar.org/ea05/0801199f98a1c7c1df6769f23f658299a3ae.pdf" target="_blank">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td></td><td>45%</td><td>33</td><td>15</td><td>18</td><td>4</td><td>29</td><td>4</td></tr><tr><td>ea050801199f98a1c7c1df6769f23f658299a3ae</td><td>mpi_small</td><td>Small MPI Facial Expression</td><td><a href="papers/ea050801199f98a1c7c1df6769f23f658299a3ae.html" target="_blank">The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</a></td><td><a href="https://pdfs.semanticscholar.org/ea05/0801199f98a1c7c1df6769f23f658299a3ae.pdf" target="_blank">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td></td><td>45%</td><td>33</td><td>15</td><td>18</td><td>4</td><td>29</td><td>4</td></tr><tr><td>578d4ad74818086bb64f182f72e2c8bd31e3d426</td><td>mr2</td><td>MR2</td><td><a href="papers/578d4ad74818086bb64f182f72e2c8bd31e3d426.html" target="_blank">The MR2: A multi-racial, mega-resolution database of facial stimuli.</a></td><td><a href="http://www.mpmlab.org/The%20MR2%20face%20database.pdf" target="_blank">[pdf]</a></td><td>Behavior research methods</td><td></td><td></td><td></td><td></td><td></td><td>14%</td><td>7</td><td>1</td><td>6</td><td>0</td><td>7</td><td>0</td></tr><tr><td>f1af714b92372c8e606485a3982eab2f16772ad8</td><td>mug_faces</td><td>MUG Faces</td><td><a href="papers/f1af714b92372c8e606485a3982eab2f16772ad8.html" target="_blank">The MUG facial expression database</a></td><td><span class="gray">[pdf]</a></td><td>11th International Workshop on Image Analysis for Multimedia Interactive Services WIAMIS 10</td><td>edu</td><td>Aristotle University of Thessaloniki</td><td>Greece</td><td>40.62984145</td><td>22.95889350</td><td>41%</td><td>82</td><td>34</td><td>48</td><td>5</td><td>34</td><td>47</td></tr><tr><td>79828e6e9f137a583082b8b5a9dfce0c301989b8</td><td>mapillary</td><td>Mapillary</td><td><a href="papers/79828e6e9f137a583082b8b5a9dfce0c301989b8.html" target="_blank">The Mapillary Vistas Dataset for Semantic Understanding of Street Scenes</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>46%</td><td>61</td><td>28</td><td>33</td><td>0</td><td>43</td><td>16</td></tr><tr><td>96e0cfcd81cdeb8282e29ef9ec9962b125f379b0</td><td>megaface</td><td>MegaFace</td><td><a href="papers/96e0cfcd81cdeb8282e29ef9ec9962b125f379b0.html" target="_blank">The MegaFace Benchmark: 1 Million Faces for Recognition at Scale</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>53%</td><td>139</td><td>73</td><td>66</td><td>9</td><td>100</td><td>37</td></tr><tr><td>0ee1916a0cb2dc7d3add086b5f1092c3d4beb38a</td><td>voc</td><td>VOC</td><td><a href="papers/0ee1916a0cb2dc7d3add086b5f1092c3d4beb38a.html" target="_blank">The Pascal Visual Object Classes (VOC) Challenge</a></td><td><a href="http://eprints.pascal-network.org/archive/00006187/01/PascalVOC_IJCV2009.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>999</td><td>544</td><td>454</td><td>30</td><td>557</td><td>422</td></tr><tr><td>66e6f08873325d37e0ec20a4769ce881e04e964e</td><td>sun_attributes</td><td>SUN</td><td><a href="papers/66e6f08873325d37e0ec20a4769ce881e04e964e.html" target="_blank">The SUN Attribute Database: Beyond Categories for Deeper Scene Understanding</a></td><td><a href="http://www.cc.gatech.edu/~hays/papers/attribute_ijcv.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>59%</td><td>116</td><td>69</td><td>47</td><td>14</td><td>84</td><td>31</td></tr><tr><td>8b2dd5c61b23ead5ae5508bb8ce808b5ea266730</td><td>10k_US_adult_faces</td><td>10K US Adult Faces</td><td><a href="papers/8b2dd5c61b23ead5ae5508bb8ce808b5ea266730.html" target="_blank">The intrinsic memorability of face photographs.</a></td><td><a href="https://pdfs.semanticscholar.org/8b2d/d5c61b23ead5ae5508bb8ce808b5ea266730.pdf" target="_blank">[pdf]</a></td><td>Journal of experimental psychology. General</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>52</td><td>27</td><td>25</td><td>2</td><td>36</td><td>14</td></tr><tr><td>d178cde92ab3dc0dd2ebee5a76a33d556c39448b</td><td>jiku_mobile</td><td>Jiku Mobile Video Dataset</td><td><a href="papers/d178cde92ab3dc0dd2ebee5a76a33d556c39448b.html" target="_blank">The jiku mobile video dataset</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td>edu</td><td>National University of Singapore</td><td>Singapore</td><td>1.29620180</td><td>103.77689944</td><td>71%</td><td>24</td><td>17</td><td>7</td><td>0</td><td>6</td><td>19</td></tr><tr><td>ae0aee03d946efffdc7af2362a42d3750e7dd48a</td><td>put_face</td><td>Put Face</td><td><a href="papers/ae0aee03d946efffdc7af2362a42d3750e7dd48a.html" target="_blank">The put face database</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>99</td><td>43</td><td>56</td><td>7</td><td>54</td><td>48</td></tr><tr><td>19d1b811df60f86cbd5e04a094b07f32fff7a32a</td><td>york_3d</td><td>UOY 3D Face Database</td><td><a href="papers/19d1b811df60f86cbd5e04a094b07f32fff7a32a.html" target="_blank">Three-dimensional face recognition: an eigensurface approach</a></td><td><span class="gray">[pdf]</a></td><td>2004 International Conference on Image Processing, 2004. ICIP '04.</td><td></td><td></td><td></td><td></td><td></td><td>34%</td><td>38</td><td>13</td><td>25</td><td>4</td><td>24</td><td>13</td></tr><tr><td>2edb87494278ad11641b6cf7a3f8996de12b8e14</td><td>qmul_grid</td><td>GRID</td><td><a href="papers/2edb87494278ad11641b6cf7a3f8996de12b8e14.html" target="_blank">Time-Delayed Correlation Analysis for Multi-Camera Activity Understanding</a></td><td><a href="http://www.eecs.qmul.ac.uk/~ccloy/files/ijcv_2010.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td>edu</td><td>Queen Mary University of London</td><td>United Kingdom</td><td>51.52472720</td><td>-0.03931035</td><td>51%</td><td>84</td><td>43</td><td>41</td><td>6</td><td>51</td><td>33</td></tr><tr><td>64e0690dd176a93de9d4328f6e31fc4afe1e7536</td><td>duke_mtmc</td><td>Duke MTMC</td><td><a href="papers/64e0690dd176a93de9d4328f6e31fc4afe1e7536.html" target="_blank">Tracking Multiple People Online and in Real Time</a></td><td><a href="https://pdfs.semanticscholar.org/64e0/690dd176a93de9d4328f6e31fc4afe1e7536.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>39%</td><td>23</td><td>9</td><td>14</td><td>2</td><td>12</td><td>10</td></tr><tr><td>298cbc3dfbbb3a20af4eed97906650a4ea1c29e0</td><td>ferplus</td><td>FER+</td><td><a href="papers/298cbc3dfbbb3a20af4eed97906650a4ea1c29e0.html" target="_blank">Training deep networks for facial expression recognition with crowd-sourced label distribution</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>71%</td><td>34</td><td>24</td><td>10</td><td>0</td><td>18</td><td>16</td></tr><tr><td>4eab317b5ac436a949849ed286baa3de2a541eef</td><td>laofiw</td><td>LAOFIW</td><td><a href="papers/4eab317b5ac436a949849ed286baa3de2a541eef.html" target="_blank">Turning a Blind Eye: Explicit Removal of Biases and Variation from Deep Neural Network Embeddings</a></td><td><a href="https://arxiv.org/pdf/1809.02169.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>2</td><td>1</td><td>1</td><td>0</td><td>2</td><td>0</td></tr><tr><td>b5f2846a506fc417e7da43f6a7679146d99c5e96</td><td>ucf_101</td><td>UCF101</td><td><a href="papers/b5f2846a506fc417e7da43f6a7679146d99c5e96.html" target="_blank">UCF101: A Dataset of 101 Human Actions Classes From Videos in The Wild</a></td><td><a href="https://arxiv.org/pdf/1212.0402.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>999</td><td>607</td><td>392</td><td>56</td><td>628</td><td>362</td></tr><tr><td>16e8b0a1e8451d5f697b94c0c2b32a00abee1d52</td><td>umb</td><td>UMB</td><td><a href="papers/16e8b0a1e8451d5f697b94c0c2b32a00abee1d52.html" target="_blank">UMB-DB: A database of partially occluded 3D faces</a></td><td><span class="gray">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>47</td><td>29</td><td>18</td><td>2</td><td>22</td><td>24</td></tr><tr><td>31b05f65405534a696a847dd19c621b7b8588263</td><td>umd_faces</td><td>UMD</td><td><a href="papers/31b05f65405534a696a847dd19c621b7b8588263.html" target="_blank">UMDFaces: An annotated face dataset for training deep networks</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE International Joint Conference on Biometrics (IJCB)</td><td></td><td></td><td></td><td></td><td></td><td>45%</td><td>42</td><td>19</td><td>23</td><td>5</td><td>30</td><td>11</td></tr><tr><td>8627f019882b024aef92e4eb9355c499c733e5b7</td><td>used</td><td>USED Social Event Dataset</td><td><a href="papers/8627f019882b024aef92e4eb9355c499c733e5b7.html" target="_blank">USED: a large-scale social event detection dataset</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td>edu</td><td>University of Trento</td><td>Italy</td><td>46.06588360</td><td>11.11598940</td><td>71%</td><td>7</td><td>5</td><td>2</td><td>0</td><td>3</td><td>4</td></tr><tr><td>d4f1eb008eb80595bcfdac368e23ae9754e1e745</td><td>uccs</td><td>UCCS</td><td><a href="papers/d4f1eb008eb80595bcfdac368e23ae9754e1e745.html" target="_blank">Unconstrained Face Detection and Open-Set Face Recognition Challenge</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE International Joint Conference on Biometrics (IJCB)</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>5</td><td>3</td><td>2</td><td>0</td><td>4</td><td>1</td></tr><tr><td>4b4106614c1d553365bad75d7866bff0de6056ed</td><td>ufi</td><td>UFI</td><td><a href="papers/4b4106614c1d553365bad75d7866bff0de6056ed.html" target="_blank">Unconstrained Facial Images: Database for Face Recognition Under Real-World Conditions</a></td><td><a href="http://home.zcu.cz/~pkral/papers/kral_micai15.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>33%</td><td>12</td><td>4</td><td>8</td><td>0</td><td>4</td><td>6</td></tr><tr><td>08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7</td><td>kin_face</td><td>UB KinFace</td><td><a href="papers/08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7.html" target="_blank">Understanding Kin Relationships in a Photo</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Multimedia</td><td></td><td></td><td></td><td></td><td></td><td>59%</td><td>94</td><td>55</td><td>39</td><td>1</td><td>33</td><td>61</td></tr><tr><td>5a4df9bef1872865f0b619ac3aacc97f49e4a035</td><td>cuhk_train_station</td><td>CUHK Train Station Dataset</td><td><a href="papers/5a4df9bef1872865f0b619ac3aacc97f49e4a035.html" target="_blank">Understanding collective crowd behaviors: Learning a Mixture model of Dynamic pedestrian-Agents</a></td><td><span class="gray">[pdf]</a></td><td>2012 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>Chinese University of Hong Kong</td><td>China</td><td>22.41626320</td><td>114.21093180</td><td>51%</td><td>141</td><td>72</td><td>69</td><td>6</td><td>60</td><td>75</td></tr><tr><td>21d9d0deed16f0ad62a4865e9acf0686f4f15492</td><td>images_of_groups</td><td>Images of Groups</td><td><a href="papers/21d9d0deed16f0ad62a4865e9acf0686f4f15492.html" target="_blank">Understanding images of groups of people</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>Carnegie Mellon University Silicon Valley</td><td>United States</td><td>37.41021930</td><td>-122.05965487</td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>15e1af79939dbf90790b03d8aa02477783fb1d0f</td><td>duke_mtmc</td><td>Duke MTMC</td><td><a href="papers/15e1af79939dbf90790b03d8aa02477783fb1d0f.html" target="_blank">Unlabeled Samples Generated by GAN Improve the Person Re-identification Baseline in Vitro</a></td><td><a href="https://arxiv.org/pdf/1701.07717.pdf" target="_blank">[pdf]</a></td><td>2017 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>fd8168f1c50de85bac58a8d328df0a50248b16ae</td><td>nd_2006</td><td>ND-2006</td><td><a href="papers/fd8168f1c50de85bac58a8d328df0a50248b16ae.html" target="_blank">Using a Multi-Instance Enrollment Representation to Improve 3D Face Recognition</a></td><td><span class="gray">[pdf]</a></td><td>2007 First IEEE International Conference on Biometrics: Theory, Applications, and Systems</td><td>edu</td><td>University of Notre Dame</td><td>United States</td><td>41.70456775</td><td>-86.23822026</td><td>60%</td><td>35</td><td>21</td><td>14</td><td>3</td><td>18</td><td>15</td></tr><tr><td>4563b46d42079242f06567b3f2e2f7a80cb3befe</td><td>vadana</td><td>VADANA</td><td><a href="papers/4563b46d42079242f06567b3f2e2f7a80cb3befe.html" target="_blank">VADANA: A dense dataset for facial image analysis</a></td><td><span class="gray">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td>edu</td><td>University of Delaware</td><td>United States</td><td>39.68103280</td><td>-75.75401840</td><td>47%</td><td>15</td><td>7</td><td>8</td><td>0</td><td>5</td><td>10</td></tr><tr><td>70c59dc3470ae867016f6ab0e008ac8ba03774a1</td><td>vgg_faces2</td><td>VGG Face2</td><td><a href="papers/70c59dc3470ae867016f6ab0e008ac8ba03774a1.html" target="_blank">VGGFace2: A Dataset for Recognising Faces across Pose and Age</a></td><td><a href="https://arxiv.org/pdf/1710.08092.pdf" target="_blank">[pdf]</a></td><td>2018 13th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2018)</td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>83</td><td>39</td><td>44</td><td>6</td><td>61</td><td>20</td></tr><tr><td>01959ef569f74c286956024866c1d107099199f7</td><td>vqa</td><td>VQA</td><td><a href="papers/01959ef569f74c286956024866c1d107099199f7.html" target="_blank">VQA: Visual Question Answering</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>b6c293f0420f7e945b5916ae44269fb53e139275</td><td>erce</td><td>ERCe</td><td><a href="papers/b6c293f0420f7e945b5916ae44269fb53e139275.html" target="_blank">Video Synopsis by Heterogeneous Multi-source Correlation</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>29</td><td>16</td><td>13</td><td>2</td><td>14</td><td>13</td></tr><tr><td>b6c293f0420f7e945b5916ae44269fb53e139275</td><td>tisi</td><td>Times Square Intersection</td><td><a href="papers/b6c293f0420f7e945b5916ae44269fb53e139275.html" target="_blank">Video Synopsis by Heterogeneous Multi-source Correlation</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>29</td><td>16</td><td>13</td><td>2</td><td>14</td><td>13</td></tr><tr><td>5194cbd51f9769ab25260446b4fa17204752e799</td><td>violent_flows</td><td>Violent Flows</td><td><a href="papers/5194cbd51f9769ab25260446b4fa17204752e799.html" target="_blank">Violent flows: Real-time detection of violent crowd behavior</a></td><td><span class="gray">[pdf]</a></td><td>2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops</td><td>edu</td><td>Open University of Israel</td><td>Israel</td><td>32.77824165</td><td>34.99565673</td><td>55%</td><td>88</td><td>48</td><td>40</td><td>6</td><td>45</td><td>44</td></tr><tr><td>026e3363b7f76b51cc711886597a44d5f1fd1de2</td><td>kitti</td><td>KITTI</td><td><a href="papers/026e3363b7f76b51cc711886597a44d5f1fd1de2.html" target="_blank">Vision meets robotics: The KITTI dataset</a></td><td><a href="https://pdfs.semanticscholar.org/026e/3363b7f76b51cc711886597a44d5f1fd1de2.pdf" target="_blank">[pdf]</a></td><td>I. J. Robotics Res.</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>999</td><td>547</td><td>452</td><td>37</td><td>553</td><td>462</td></tr><tr><td>066000d44d6691d27202896691f08b27117918b9</td><td>psu</td><td>PSU</td><td><a href="papers/066000d44d6691d27202896691f08b27117918b9.html" target="_blank">Vision-Based Analysis of Small Groups in Pedestrian Crowds</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>49%</td><td>168</td><td>83</td><td>85</td><td>11</td><td>85</td><td>79</td></tr><tr><td>dd65f71dac86e36eecbd3ed225d016c3336b4a13</td><td>families_in_the_wild</td><td>FIW</td><td><a href="papers/dd65f71dac86e36eecbd3ed225d016c3336b4a13.html" target="_blank">Visual Kinship Recognition of Families in the Wild</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td>edu</td><td>University of Massachusetts Dartmouth</td><td>United States</td><td>41.62772475</td><td>-71.00724501</td><td>80%</td><td>5</td><td>4</td><td>1</td><td>0</td><td>2</td><td>3</td></tr><tr><td>52d7eb0fbc3522434c13cc247549f74bb9609c5d</td><td>wider_face</td><td>WIDER FACE</td><td><a href="papers/52d7eb0fbc3522434c13cc247549f74bb9609c5d.html" target="_blank">WIDER FACE: A Face Detection Benchmark</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>Chinese University of Hong Kong</td><td>China</td><td>22.41626320</td><td>114.21093180</td><td>58%</td><td>178</td><td>104</td><td>74</td><td>13</td><td>112</td><td>66</td></tr><tr><td>36bccfb2ad847096bc76777e544f305813cd8f5b</td><td>wildtrack</td><td>WildTrack</td><td><a href="papers/36bccfb2ad847096bc76777e544f305813cd8f5b.html" target="_blank">WILDTRACK: A Multi-camera HD Dataset for Dense Unscripted Pedestrian Detection</a></td><td><a href="http://openaccess.thecvf.com/content_cvpr_2018/Supplemental/1562-supp.pdf" target="_blank">[pdf]</a></td><td>2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>5ad4e9f947c1653c247d418f05dad758a3f9277b</td><td>wlfdb</td><td>WLFDB</td><td><a href="papers/5ad4e9f947c1653c247d418f05dad758a3f9277b.html" target="_blank">WLFDB : Weakly Labeled Face Databases</a></td><td><a href="https://pdfs.semanticscholar.org/5ad4/e9f947c1653c247d418f05dad758a3f9277b.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>1</td><td>1</td><td>0</td><td>0</td><td>0</td><td>1</td></tr><tr><td>0dc11a37cadda92886c56a6fb5191ded62099c28</td><td>stickmen_family</td><td>We Are Family Stickmen</td><td><a href="papers/0dc11a37cadda92886c56a6fb5191ded62099c28.html" target="_blank">We Are Family: Joint Pose Estimation of Multiple Persons</a></td><td><a href="http://eprints.pascal-network.org/archive/00007964/01/eichner10eccv.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>78</td><td>45</td><td>33</td><td>6</td><td>54</td><td>23</td></tr><tr><td>0c91808994a250d7be332400a534a9291ca3b60e</td><td>graz</td><td>Graz Pedestrian</td><td><a href="papers/0c91808994a250d7be332400a534a9291ca3b60e.html" target="_blank">Weak Hypotheses and Boosting for Generic Object Detection and Recognition</a></td><td><a href="https://pdfs.semanticscholar.org/0c91/808994a250d7be332400a534a9291ca3b60e.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>236</td><td>127</td><td>109</td><td>17</td><td>161</td><td>77</td></tr><tr><td>2a75f34663a60ab1b04a0049ed1d14335129e908</td><td>mmi_facial_expression</td><td>MMI Facial Expression Dataset</td><td><a href="papers/2a75f34663a60ab1b04a0049ed1d14335129e908.html" target="_blank">Web-based database for facial expression analysis</a></td><td><span class="gray">[pdf]</a></td><td>2005 IEEE International Conference on Multimedia and Expo</td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>464</td><td>217</td><td>247</td><td>45</td><td>282</td><td>188</td></tr><tr><td>9b9bf5e623cb8af7407d2d2d857bc3f1b531c182</td><td>who_goes_there</td><td>WGT</td><td><a href="papers/9b9bf5e623cb8af7407d2d2d857bc3f1b531c182.html" target="_blank">Who goes there?: approaches to mapping facial appearance diversity</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td>edu</td><td>University of Kentucky</td><td>United States</td><td>38.03337420</td><td>-84.50177580</td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>b62628ac06bbac998a3ab825324a41a11bc3a988</td><td>m2vtsdb_extended</td><td>xm2vtsdb</td><td><a href="papers/b62628ac06bbac998a3ab825324a41a11bc3a988.html" target="_blank">XM2VTSDB : The extended M2VTS database</a></td><td><a href="https://pdfs.semanticscholar.org/b626/28ac06bbac998a3ab825324a41a11bc3a988.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>53%</td><td>864</td><td>462</td><td>402</td><td>40</td><td>493</td><td>404</td></tr><tr><td>010f0f4929e6a6644fb01f0e43820f91d0fad292</td><td>yfcc_100m</td><td>YFCC100M</td><td><a href="papers/010f0f4929e6a6644fb01f0e43820f91d0fad292.html" target="_blank">YFCC100M: the new data in multimedia research</a></td><td><span class="gray">[pdf]</a></td><td>Commun. ACM</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>274</td><td>154</td><td>120</td><td>24</td><td>172</td><td>100</td></tr><tr><td>a94cae786d515d3450d48267e12ca954aab791c4</td><td>yawdd</td><td>YawDD</td><td><a href="papers/a94cae786d515d3450d48267e12ca954aab791c4.html" target="_blank">YawDD: a yawning detection dataset</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>53%</td><td>15</td><td>8</td><td>7</td><td>1</td><td>2</td><td>13</td></tr></table></body></html>
\ No newline at end of file +<!doctype html><html><head><meta charset='utf-8'><title>All Papers</title><link rel='stylesheet' href='reports.css'></head><body><h2>All Papers</h2><table border='1' cellpadding='3' cellspacing='3'><th>Paper ID</th><th>Megapixels Key</th><th>Megapixels Name</th><th>Report Link</th><th>PDF Link</th><th>Journal</th><th>Type</th><th>Address</th><th>Country</th><th>Lat</th><th>Lng</th><th>Coverage</th><th>Total Citations</th><th>Geocoded Citations</th><th>Unknown Citations</th><th>Empty Citations</th><th>With PDF</th><th>With DOI</th><tr><td>3325860c0c82a93b2eac654f5324dd6a776f609e</td><td>mpii_human_pose</td><td>MPII Human Pose</td><td><a href="papers/3325860c0c82a93b2eac654f5324dd6a776f609e.html" target="_blank">2D Human Pose Estimation: New Benchmark and State of the Art Analysis</a></td><td><span class="gray">[pdf]</a></td><td>2014 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>387</td><td>224</td><td>163</td><td>21</td><td>291</td><td>96</td></tr><tr><td>e4754afaa15b1b53e70743880484b8d0736990ff</td><td>fiw_300</td><td>300-W</td><td><a href="papers/e4754afaa15b1b53e70743880484b8d0736990ff.html" target="_blank">300 Faces In-The-Wild Challenge: database and results</a></td><td><a href="http://ibug.doc.ic.ac.uk/media/uploads/documents/1-s2.0-s0262885616000147-main.pdf" target="_blank">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td></td><td>51%</td><td>129</td><td>66</td><td>63</td><td>9</td><td>74</td><td>55</td></tr><tr><td>044d9a8c61383312cdafbcc44b9d00d650b21c70</td><td>fiw_300</td><td>300-W</td><td><a href="papers/044d9a8c61383312cdafbcc44b9d00d650b21c70.html" target="_blank">300 Faces in-the-Wild Challenge: The First Facial Landmark Localization Challenge</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision Workshops</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>323</td><td>198</td><td>125</td><td>27</td><td>208</td><td>120</td></tr><tr><td>2e8d0f1802e50cccfd3c0aabac0d0beab3a7846e</td><td>3dpes</td><td>3DPeS</td><td><a href="papers/2e8d0f1802e50cccfd3c0aabac0d0beab3a7846e.html" target="_blank">3DPeS: 3D people dataset for surveillance and forensics</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>49%</td><td>133</td><td>65</td><td>68</td><td>11</td><td>73</td><td>58</td></tr><tr><td>a40f9bfd3c45658ee8da70e1f2dfbe1f0c744d43</td><td>4dfab</td><td>4DFAB</td><td><a href="papers/a40f9bfd3c45658ee8da70e1f2dfbe1f0c744d43.html" target="_blank">4DFAB: A Large Scale 4D Facial Expression Database for Biometric Applications</a></td><td><a href="https://arxiv.org/pdf/1712.01443.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>4</td><td>0</td><td>4</td><td>0</td><td>2</td><td>2</td></tr><tr><td>31b58ced31f22eab10bd3ee2d9174e7c14c27c01</td><td>tiny_images</td><td>Tiny Images</td><td><a href="papers/31b58ced31f22eab10bd3ee2d9174e7c14c27c01.html" target="_blank">80 Million Tiny Images: A Large Data Set for Nonparametric Object and Scene Recognition</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>53%</td><td>999</td><td>529</td><td>470</td><td>89</td><td>644</td><td>337</td></tr><tr><td>d08cc366a4a0192a01e9a7495af1eb5d9f9e73ae</td><td>b3d_ac</td><td>B3D(AC)</td><td><a href="papers/d08cc366a4a0192a01e9a7495af1eb5d9f9e73ae.html" target="_blank">A 3-D Audio-Visual Corpus of Affective Communication</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Multimedia</td><td></td><td></td><td></td><td></td><td></td><td>45%</td><td>42</td><td>19</td><td>23</td><td>2</td><td>26</td><td>15</td></tr><tr><td>4d4bb462c9f1d4e4ab1e4aa6a75cc0bc71b38461</td><td>3dddb_unconstrained</td><td>3D Dynamic</td><td><a href="papers/4d4bb462c9f1d4e4ab1e4aa6a75cc0bc71b38461.html" target="_blank">A 3D Dynamic Database for Unconstrained Face Recognition</a></td><td><a href="https://pdfs.semanticscholar.org/4d4b/b462c9f1d4e4ab1e4aa6a75cc0bc71b38461.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>2</td><td>1</td><td>1</td><td>0</td><td>1</td><td>1</td></tr><tr><td>639937b3a1b8bded3f7e9a40e85bd3770016cf3c</td><td>bfm</td><td>BFM</td><td><a href="papers/639937b3a1b8bded3f7e9a40e85bd3770016cf3c.html" target="_blank">A 3D Face Model for Pose and Illumination Invariant Face Recognition</a></td><td><span class="gray">[pdf]</a></td><td>2009 Sixth IEEE International Conference on Advanced Video and Signal Based Surveillance</td><td></td><td></td><td></td><td></td><td></td><td>53%</td><td>343</td><td>183</td><td>160</td><td>25</td><td>223</td><td>114</td></tr><tr><td>cc589c499dcf323fe4a143bbef0074c3e31f9b60</td><td>bu_3dfe</td><td>BU-3DFE</td><td><a href="papers/cc589c499dcf323fe4a143bbef0074c3e31f9b60.html" target="_blank">A 3D facial expression database for facial behavior research</a></td><td><span class="gray">[pdf]</a></td><td>7th International Conference on Automatic Face and Gesture Recognition (FGR06)</td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>588</td><td>275</td><td>312</td><td>45</td><td>306</td><td>282</td></tr><tr><td>22646e00a7ba34d1b5fbe3b1efcd91a1e1be3c2b</td><td>saivt</td><td>SAIVT SoftBio</td><td><a href="papers/22646e00a7ba34d1b5fbe3b1efcd91a1e1be3c2b.html" target="_blank">A Database for Person Re-Identification in Multi-Camera Surveillance Networks</a></td><td><span class="gray">[pdf]</a></td><td>2012 International Conference on Digital Image Computing Techniques and Applications (DICTA)</td><td></td><td></td><td></td><td></td><td></td><td>40%</td><td>65</td><td>26</td><td>39</td><td>7</td><td>45</td><td>20</td></tr><tr><td>070de852bc6eb275d7ca3a9cdde8f6be8795d1a3</td><td>d3dfacs</td><td>D3DFACS</td><td><a href="papers/070de852bc6eb275d7ca3a9cdde8f6be8795d1a3.html" target="_blank">A FACS valid 3D dynamic action unit database with applications to 3D dynamic morphable facial modeling</a></td><td><span class="gray">[pdf]</a></td><td>2011 International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>50</td><td>25</td><td>25</td><td>5</td><td>31</td><td>18</td></tr><tr><td>563c940054e4b456661762c1ab858e6f730c3159</td><td>data_61</td><td>Data61 Pedestrian</td><td><a href="papers/563c940054e4b456661762c1ab858e6f730c3159.html" target="_blank">A Multi-modal Graphical Model for Scene Analysis</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE Winter Conference on Applications of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>8</td><td>4</td><td>4</td><td>0</td><td>5</td><td>3</td></tr><tr><td>221c18238b829c12b911706947ab38fd017acef7</td><td>rap_pedestrian</td><td>RAP</td><td><a href="papers/221c18238b829c12b911706947ab38fd017acef7.html" target="_blank">A Richly Annotated Dataset for Pedestrian Attribute Recognition</a></td><td><a href="https://arxiv.org/pdf/1603.07054.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>46%</td><td>26</td><td>12</td><td>14</td><td>0</td><td>16</td><td>10</td></tr><tr><td>013909077ad843eb6df7a3e8e290cfd5575999d2</td><td>fiw_300</td><td>300-W</td><td><a href="papers/013909077ad843eb6df7a3e8e290cfd5575999d2.html" target="_blank">A Semi-automatic Methodology for Facial Landmark Annotation</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition Workshops</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>184</td><td>107</td><td>77</td><td>14</td><td>120</td><td>67</td></tr><tr><td>3b4ec8af470948a72a6ed37a9fd226719a874ebc</td><td>sdu_vid</td><td>SDU-VID</td><td><a href="papers/3b4ec8af470948a72a6ed37a9fd226719a874ebc.html" target="_blank">A Spatio-Temporal Appearance Representation for Video-Based Pedestrian Re-Identification</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>95</td><td>57</td><td>38</td><td>7</td><td>50</td><td>45</td></tr><tr><td>6403117f9c005ae81f1e8e6d1302f4a045e3d99d</td><td>alert_airport</td><td>ALERT Airport</td><td><a href="papers/6403117f9c005ae81f1e8e6d1302f4a045e3d99d.html" target="_blank">A Systematic Evaluation and Benchmark for Person Re-Identification: Features, Metrics, and Datasets</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>35%</td><td>20</td><td>7</td><td>13</td><td>0</td><td>9</td><td>11</td></tr><tr><td>0d3bb75852098b25d90f31d2f48fd0cb4944702b</td><td>face_scrub</td><td>FaceScrub</td><td><a href="papers/0d3bb75852098b25d90f31d2f48fd0cb4944702b.html" target="_blank">A data-driven approach to cleaning large face datasets</a></td><td><span class="gray">[pdf]</a></td><td>2014 IEEE International Conference on Image Processing (ICIP)</td><td></td><td></td><td></td><td></td><td></td><td>49%</td><td>138</td><td>67</td><td>71</td><td>5</td><td>95</td><td>41</td></tr><tr><td>b91f54e1581fbbf60392364323d00a0cd43e493c</td><td>bp4d_spontanous</td><td>BP4D-Spontanous</td><td><a href="papers/b91f54e1581fbbf60392364323d00a0cd43e493c.html" target="_blank">A high-resolution spontaneous 3D dynamic facial expression database</a></td><td><span class="gray">[pdf]</a></td><td>2013 10th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)</td><td>edu</td><td>SUNY Binghamton</td><td>United States</td><td>42.08779975</td><td>-75.97066066</td><td>46%</td><td>154</td><td>71</td><td>83</td><td>7</td><td>80</td><td>75</td></tr><tr><td>8b56e33f33e582f3e473dba573a16b598ed9bcdc</td><td>fei</td><td>FEI</td><td><a href="papers/8b56e33f33e582f3e473dba573a16b598ed9bcdc.html" target="_blank">A new ranking method for principal components analysis and its application to face image analysis</a></td><td><span class="gray">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>169</td><td>80</td><td>89</td><td>6</td><td>69</td><td>102</td></tr><tr><td>2624d84503bc2f8e190e061c5480b6aa4d89277a</td><td>afew_va</td><td>AFEW-VA</td><td><a href="papers/2624d84503bc2f8e190e061c5480b6aa4d89277a.html" target="_blank">AFEW-VA database for valence and arousal estimation in-the-wild</a></td><td><a href="https://ibug.doc.ic.ac.uk/media/uploads/documents/afew-va.pdf" target="_blank">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td></td><td>44%</td><td>18</td><td>8</td><td>10</td><td>0</td><td>12</td><td>5</td></tr><tr><td>2ad0ee93d029e790ebb50574f403a09854b65b7e</td><td>yale_faces</td><td>YaleFaces</td><td><a href="papers/2ad0ee93d029e790ebb50574f403a09854b65b7e.html" target="_blank">Acquiring linear subspaces for face recognition under variable lighting</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>999</td><td>498</td><td>501</td><td>94</td><td>495</td><td>491</td></tr><tr><td>57fe081950f21ca03b5b375ae3e84b399c015861</td><td>cvc_01_barcelona</td><td>CVC-01</td><td><a href="papers/57fe081950f21ca03b5b375ae3e84b399c015861.html" target="_blank">Adaptive Image Sampling and Windows Classification for On-board Pedestrian Detection</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>40%</td><td>47</td><td>19</td><td>28</td><td>1</td><td>23</td><td>24</td></tr><tr><td>758d7e1be64cc668c59ef33ba8882c8597406e53</td><td>affectnet</td><td>AffectNet</td><td><a href="papers/758d7e1be64cc668c59ef33ba8882c8597406e53.html" target="_blank">AffectNet: A Database for Facial Expression, Valence, and Arousal Computing in the Wild</a></td><td><a href="https://arxiv.org/pdf/1708.03985.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>59%</td><td>37</td><td>22</td><td>15</td><td>0</td><td>25</td><td>11</td></tr><tr><td>47aeb3b82f54b5ae8142b4bdda7b614433e69b9a</td><td>am_fed</td><td>AM-FED</td><td><a href="papers/47aeb3b82f54b5ae8142b4bdda7b614433e69b9a.html" target="_blank">Affectiva-MIT Facial Expression Dataset (AM-FED): Naturalistic and Spontaneous Facial Expressions Collected "In-the-Wild"</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition Workshops</td><td></td><td></td><td></td><td></td><td></td><td>45%</td><td>83</td><td>37</td><td>46</td><td>6</td><td>43</td><td>39</td></tr><tr><td>1be498d4bbc30c3bfd0029114c784bc2114d67c0</td><td>adience</td><td>Adience</td><td><a href="papers/1be498d4bbc30c3bfd0029114c784bc2114d67c0.html" target="_blank">Age and Gender Estimation of Unfiltered Faces</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Information Forensics and Security</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>179</td><td>93</td><td>86</td><td>7</td><td>98</td><td>80</td></tr><tr><td>d818568838433a6d6831adde49a58cef05e0c89f</td><td>agedb</td><td>AgeDB</td><td><a href="papers/d818568838433a6d6831adde49a58cef05e0c89f.html" target="_blank">AgeDB: The First Manually Collected, In-the-Wild Age Database</a></td><td><a href="http://eprints.mdx.ac.uk/22044/1/agedb_kotsia.pdf" target="_blank">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td></td><td></td><td></td><td></td><td></td><td>78%</td><td>18</td><td>14</td><td>4</td><td>0</td><td>14</td><td>3</td></tr><tr><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td><td>aflw</td><td>AFLW</td><td><a href="papers/a74251efa970b92925b89eeef50a5e37d9281ad0.html" target="_blank">Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</a></td><td><span class="gray">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>318</td><td>177</td><td>141</td><td>35</td><td>211</td><td>107</td></tr><tr><td>2ce2560cf59db59ce313bbeb004e8ce55c5ce928</td><td>texas_3dfrd</td><td>Texas 3DFRD</td><td><a href="papers/2ce2560cf59db59ce313bbeb004e8ce55c5ce928.html" target="_blank">Anthropometric 3D Face Recognition</a></td><td><a href="http://live.ece.utexas.edu/publications/2010/sg_ijcv_june10.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>91</td><td>47</td><td>44</td><td>5</td><td>60</td><td>31</td></tr><tr><td>633c851ebf625ad7abdda2324e9de093cf623141</td><td>appa_real</td><td>APPA-REAL</td><td><a href="papers/633c851ebf625ad7abdda2324e9de093cf623141.html" target="_blank">Apparent and Real Age Estimation in Still Images with Deep Residual Regressors on Appa-Real Database</a></td><td><span class="gray">[pdf]</a></td><td>2017 12th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2017)</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>10</td><td>5</td><td>5</td><td>0</td><td>8</td><td>3</td></tr><tr><td>0df0d1adea39a5bef318b74faa37de7f3e00b452</td><td>mpii_gaze</td><td>MPIIGaze</td><td><a href="papers/0df0d1adea39a5bef318b74faa37de7f3e00b452.html" target="_blank">Appearance-based gaze estimation in the wild</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>149</td><td>95</td><td>54</td><td>3</td><td>94</td><td>54</td></tr><tr><td>759a3b3821d9f0e08e0b0a62c8b693230afc3f8d</td><td>pubfig</td><td>PubFig</td><td><a href="papers/759a3b3821d9f0e08e0b0a62c8b693230afc3f8d.html" target="_blank">Attribute and simile classifiers for face verification</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE 12th International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>59%</td><td>914</td><td>535</td><td>379</td><td>49</td><td>586</td><td>316</td></tr><tr><td>faf40ce28857aedf183e193486f5b4b0a8c478a2</td><td>iit_dehli_ear</td><td>IIT Dehli Ear</td><td><a href="papers/faf40ce28857aedf183e193486f5b4b0a8c478a2.html" target="_blank">Automated Human Identification Using Ear Imaging</a></td><td><a href="https://pdfs.semanticscholar.org/faf4/0ce28857aedf183e193486f5b4b0a8c478a2.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>38%</td><td>80</td><td>30</td><td>50</td><td>6</td><td>35</td><td>44</td></tr><tr><td>2160788824c4c29ffe213b2cbeb3f52972d73f37</td><td>3d_rma</td><td>3D-RMA</td><td><a href="papers/2160788824c4c29ffe213b2cbeb3f52972d73f37.html" target="_blank">Automatic 3D face authentication</a></td><td><a href="http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.31.9190&rep=rep1&type=pdf" target="_blank">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>100</td><td>47</td><td>53</td><td>8</td><td>63</td><td>36</td></tr><tr><td>213a579af9e4f57f071b884aa872651372b661fd</td><td>bbc_pose</td><td>BBC Pose</td><td><a href="papers/213a579af9e4f57f071b884aa872651372b661fd.html" target="_blank">Automatic and Efficient Human Pose Estimation for Sign Language Videos</a></td><td><a href="http://tomas.pfister.fi/files/charles13ijcv.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>65%</td><td>26</td><td>17</td><td>9</td><td>1</td><td>16</td><td>11</td></tr><tr><td>fcc6fe6007c322641796cb8792718641856a22a7</td><td>miw</td><td>MIW</td><td><a href="papers/fcc6fe6007c322641796cb8792718641856a22a7.html" target="_blank">Automatic facial makeup detection with application in face recognition</a></td><td><span class="gray">[pdf]</a></td><td>2013 International Conference on Biometrics (ICB)</td><td>edu</td><td>West Virginia University</td><td>United States</td><td>39.65404635</td><td>-79.96475355</td><td>69%</td><td>49</td><td>34</td><td>15</td><td>1</td><td>19</td><td>29</td></tr><tr><td>fcc6fe6007c322641796cb8792718641856a22a7</td><td>youtube_makeup</td><td>YMU</td><td><a href="papers/fcc6fe6007c322641796cb8792718641856a22a7.html" target="_blank">Automatic facial makeup detection with application in face recognition</a></td><td><span class="gray">[pdf]</a></td><td>2013 International Conference on Biometrics (ICB)</td><td>edu</td><td>West Virginia University</td><td>United States</td><td>39.65404635</td><td>-79.96475355</td><td>69%</td><td>49</td><td>34</td><td>15</td><td>1</td><td>19</td><td>29</td></tr><tr><td>0a85bdff552615643dd74646ac881862a7c7072d</td><td>pipa</td><td>PIPA</td><td><a href="papers/0a85bdff552615643dd74646ac881862a7c7072d.html" target="_blank">Beyond frontal faces: Improving Person Recognition using multiple cues</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>company</td><td>Facebook</td><td>United States</td><td>37.39367170</td><td>-122.08072620</td><td>69%</td><td>54</td><td>37</td><td>16</td><td>1</td><td>41</td><td>12</td></tr><tr><td>2acf7e58f0a526b957be2099c10aab693f795973</td><td>bosphorus</td><td>The Bosphorus</td><td><a href="papers/2acf7e58f0a526b957be2099c10aab693f795973.html" target="_blank">Bosphorus Database for 3D Face Analysis</a></td><td><a href="https://pdfs.semanticscholar.org/4254/fbba3846008f50671edc9cf70b99d7304543.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>49%</td><td>352</td><td>171</td><td>181</td><td>17</td><td>162</td><td>188</td></tr><tr><td>37d6f0eb074d207b53885bd2eb78ccc8a04be597</td><td>vmu</td><td>VMU</td><td><a href="papers/37d6f0eb074d207b53885bd2eb78ccc8a04be597.html" target="_blank">Can facial cosmetics affect the matching accuracy of face recognition systems?</a></td><td><span class="gray">[pdf]</a></td><td>2012 IEEE Fifth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td>edu</td><td>West Virginia University</td><td>United States</td><td>39.65404635</td><td>-79.96475355</td><td>60%</td><td>53</td><td>32</td><td>21</td><td>0</td><td>19</td><td>31</td></tr><tr><td>37d6f0eb074d207b53885bd2eb78ccc8a04be597</td><td>youtube_makeup</td><td>YMU</td><td><a href="papers/37d6f0eb074d207b53885bd2eb78ccc8a04be597.html" target="_blank">Can facial cosmetics affect the matching accuracy of face recognition systems?</a></td><td><span class="gray">[pdf]</a></td><td>2012 IEEE Fifth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td>edu</td><td>West Virginia University</td><td>United States</td><td>39.65404635</td><td>-79.96475355</td><td>60%</td><td>53</td><td>32</td><td>21</td><td>0</td><td>19</td><td>31</td></tr><tr><td>8d5998cd984e7cce307da7d46f155f9db99c6590</td><td>chalearn</td><td>ChaLearn</td><td><a href="papers/8d5998cd984e7cce307da7d46f155f9db99c6590.html" target="_blank">ChaLearn looking at people: A review of events and resources</a></td><td><span class="gray">[pdf]</a></td><td>2017 International Joint Conference on Neural Networks (IJCNN)</td><td></td><td></td><td></td><td></td><td></td><td>46%</td><td>13</td><td>6</td><td>7</td><td>1</td><td>8</td><td>4</td></tr><tr><td>2bf8541199728262f78d4dced6fb91479b39b738</td><td>clothing_co_parsing</td><td>CCP</td><td><a href="papers/2bf8541199728262f78d4dced6fb91479b39b738.html" target="_blank">Clothing Co-parsing by Joint Image Segmentation and Labeling</a></td><td><span class="gray">[pdf]</a></td><td>2014 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>70%</td><td>60</td><td>42</td><td>18</td><td>0</td><td>34</td><td>28</td></tr><tr><td>22ad2c8c0f4d6aa4328b38d894b814ec22579761</td><td>gallagher</td><td>Gallagher</td><td><a href="papers/22ad2c8c0f4d6aa4328b38d894b814ec22579761.html" target="_blank">Clothing cosegmentation for recognizing people</a></td><td><span class="gray">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>Carnegie Mellon University Silicon Valley</td><td>United States</td><td>37.41021930</td><td>-122.05965487</td><td>59%</td><td>178</td><td>105</td><td>73</td><td>7</td><td>100</td><td>86</td></tr><tr><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td><td>leeds_sports_pose</td><td>Leeds Sports Pose</td><td><a href="papers/4b1d23d17476fcf78f4cbadf69fb130b1aa627c0.html" target="_blank">Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</a></td><td><a href="http://www.bmva.org/bmvc/2010/conference/paper12/abstract12.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>285</td><td>174</td><td>111</td><td>13</td><td>197</td><td>93</td></tr><tr><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td><td>stickmen_buffy</td><td>Buffy Stickmen</td><td><a href="papers/4b1d23d17476fcf78f4cbadf69fb130b1aa627c0.html" target="_blank">Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</a></td><td><a href="http://www.bmva.org/bmvc/2010/conference/paper12/abstract12.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>285</td><td>174</td><td>111</td><td>13</td><td>197</td><td>93</td></tr><tr><td>45c31cde87258414f33412b3b12fc5bec7cb3ba9</td><td>jaffe</td><td>JAFFE</td><td><a href="papers/45c31cde87258414f33412b3b12fc5bec7cb3ba9.html" target="_blank">Coding Facial Expressions with Gabor Wavelets</a></td><td><a href="http://physics.lbl.gov/patrecog/images/Facerecog_gabor.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>49%</td><td>899</td><td>443</td><td>456</td><td>51</td><td>431</td><td>451</td></tr><tr><td>b1f4423c227fa37b9680787be38857069247a307</td><td>afew_va</td><td>AFEW-VA</td><td><a href="papers/b1f4423c227fa37b9680787be38857069247a307.html" target="_blank">Collecting Large, Richly Annotated Facial-Expression Databases from Movies</a></td><td><span class="gray">[pdf]</a></td><td>IEEE MultiMedia</td><td>edu</td><td>Australian National University</td><td>Australia</td><td>-35.27769990</td><td>149.11852700</td><td>61%</td><td>181</td><td>111</td><td>70</td><td>8</td><td>87</td><td>97</td></tr><tr><td>7f4040b482d16354d5938c1d1b926b544652bf5b</td><td>nova_emotions</td><td>Novaemötions Dataset</td><td><a href="papers/7f4040b482d16354d5938c1d1b926b544652bf5b.html" target="_blank">Competitive affective gaming: winning with a smile</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Universidade NOVA de Lisboa, Caparica, Portugal</td><td>Portugal</td><td>38.66096400</td><td>-9.20581300</td><td>67%</td><td>9</td><td>6</td><td>3</td><td>0</td><td>5</td><td>4</td></tr><tr><td>079a0a3bf5200994e1f972b1b9197bf2f90e87d4</td><td>mit_cbcl</td><td>MIT CBCL</td><td><a href="papers/079a0a3bf5200994e1f972b1b9197bf2f90e87d4.html" target="_blank">Component-Based Face Recognition with 3D Morphable Models</a></td><td><span class="gray">[pdf]</a></td><td>2004 Conference on Computer Vision and Pattern Recognition Workshop</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>23fc83c8cfff14a16df7ca497661264fc54ed746</td><td>cohn_kanade</td><td>CK</td><td><a href="papers/23fc83c8cfff14a16df7ca497661264fc54ed746.html" target="_blank">Comprehensive Database for Facial Expression Analysis</a></td><td><a href="https://pdfs.semanticscholar.org/23fc/83c8cfff14a16df7ca497661264fc54ed746.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>49%</td><td>999</td><td>492</td><td>507</td><td>69</td><td>539</td><td>439</td></tr><tr><td>09d78009687bec46e70efcf39d4612822e61cb8c</td><td>raid</td><td>RAiD</td><td><a href="papers/09d78009687bec46e70efcf39d4612822e61cb8c.html" target="_blank">Consistent Re-identification in a Camera Network</a></td><td><a href="https://pdfs.semanticscholar.org/c27f/099e6e7e3f7f9979cbe9e0a5175fc5848ea0.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>49</td><td>27</td><td>22</td><td>5</td><td>34</td><td>13</td></tr><tr><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td><td>casablanca</td><td>Casablanca</td><td><a href="papers/0ceda9dae8b9f322df65ca2ef02caca9758aec6f.html" target="_blank">Context-Aware CNNs for Person Head Detection</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>33</td><td>18</td><td>15</td><td>1</td><td>23</td><td>11</td></tr><tr><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td><td>hollywood_headset</td><td>HollywoodHeads</td><td><a href="papers/0ceda9dae8b9f322df65ca2ef02caca9758aec6f.html" target="_blank">Context-Aware CNNs for Person Head Detection</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>33</td><td>18</td><td>15</td><td>1</td><td>23</td><td>11</td></tr><tr><td>c06b13d0ec3f5c43e2782cd22542588e233733c3</td><td>nova_emotions</td><td>Novaemötions Dataset</td><td><a href="papers/c06b13d0ec3f5c43e2782cd22542588e233733c3.html" target="_blank">Crowdsourcing facial expressions for affective-interaction</a></td><td><span class="gray">[pdf]</a></td><td>Computer Vision and Image Understanding</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>1</td><td>1</td><td>0</td><td>0</td><td>1</td><td>0</td></tr><tr><td>8355d095d3534ef511a9af68a3b2893339e3f96b</td><td>imdb_wiki</td><td>IMDB</td><td><a href="papers/8355d095d3534ef511a9af68a3b2893339e3f96b.html" target="_blank">DEX: Deep EXpectation of Apparent Age from a Single Image</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision Workshop (ICCVW)</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>122</td><td>68</td><td>54</td><td>6</td><td>75</td><td>48</td></tr><tr><td>5a5f0287484f0d480fed1ce585dbf729586f0edc</td><td>disfa</td><td>DISFA</td><td><a href="papers/5a5f0287484f0d480fed1ce585dbf729586f0edc.html" target="_blank">DISFA: A Spontaneous Facial Action Intensity Database</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Affective Computing</td><td>edu</td><td>University of Denver</td><td>United States</td><td>39.67665410</td><td>-104.96220300</td><td>49%</td><td>184</td><td>90</td><td>94</td><td>19</td><td>96</td><td>89</td></tr><tr><td>10195a163ab6348eef37213a46f60a3d87f289c5</td><td>imdb_wiki</td><td>IMDB</td><td><a href="papers/10195a163ab6348eef37213a46f60a3d87f289c5.html" target="_blank">Deep Expectation of Real and Apparent Age from a Single Image Without Facial Landmarks</a></td><td><a href="http://www.vision.ee.ethz.ch/en/publications/papers/articles/eth_biwi_01299.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td>edu</td><td>ETH Zurich</td><td>Switzerland</td><td>47.37631300</td><td>8.54766990</td><td>52%</td><td>145</td><td>76</td><td>69</td><td>11</td><td>93</td><td>51</td></tr><tr><td>162ea969d1929ed180cc6de9f0bf116993ff6e06</td><td>vgg_faces</td><td>VGG Face</td><td><a href="papers/162ea969d1929ed180cc6de9f0bf116993ff6e06.html" target="_blank">Deep Face Recognition</a></td><td><a href="https://pdfs.semanticscholar.org/f372/ab9b3270d4e4f6a0258c83c2736c3a5c0454.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>999</td><td>546</td><td>453</td><td>58</td><td>558</td><td>429</td></tr><tr><td>6424b69f3ff4d35249c0bb7ef912fbc2c86f4ff4</td><td>celeba</td><td>CelebA</td><td><a href="papers/6424b69f3ff4d35249c0bb7ef912fbc2c86f4ff4.html" target="_blank">Deep Learning Face Attributes in the Wild</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td>edu</td><td>Chinese University of Hong Kong</td><td>China</td><td>22.41626320</td><td>114.21093180</td><td>50%</td><td>919</td><td>456</td><td>462</td><td>64</td><td>694</td><td>201</td></tr><tr><td>18010284894ed0edcca74e5bf768ee2e15ef7841</td><td>deep_fashion</td><td>DeepFashion</td><td><a href="papers/18010284894ed0edcca74e5bf768ee2e15ef7841.html" target="_blank">DeepFashion: Powering Robust Clothes Recognition and Retrieval with Rich Annotations</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>176</td><td>105</td><td>71</td><td>2</td><td>113</td><td>62</td></tr><tr><td>6bd36e9fd0ef20a3074e1430a6cc601e6d407fc3</td><td>cuhk_campus_03</td><td>CUHK03 Campus</td><td><a href="papers/6bd36e9fd0ef20a3074e1430a6cc601e6d407fc3.html" target="_blank">DeepReID: Deep Filter Pairing Neural Network for Person Re-identification</a></td><td><span class="gray">[pdf]</a></td><td>2014 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>568</td><td>320</td><td>248</td><td>26</td><td>320</td><td>235</td></tr><tr><td>13f06b08f371ba8b5d31c3e288b4deb61335b462</td><td>eth_andreas_ess</td><td>ETHZ Pedestrian</td><td><a href="papers/13f06b08f371ba8b5d31c3e288b4deb61335b462.html" target="_blank">Depth and Appearance for Mobile Scene Analysis</a></td><td><span class="gray">[pdf]</a></td><td>2007 IEEE 11th International Conference on Computer Vision</td><td>edu</td><td>ETH Zurich</td><td>Switzerland</td><td>47.37631300</td><td>8.54766990</td><td>55%</td><td>324</td><td>179</td><td>145</td><td>26</td><td>193</td><td>127</td></tr><tr><td>4946ba10a4d5a7d0a38372f23e6622bd347ae273</td><td>coco_action</td><td>COCO-a</td><td><a href="papers/4946ba10a4d5a7d0a38372f23e6622bd347ae273.html" target="_blank">Describing Common Human Visual Actions in Images</a></td><td><a href="https://arxiv.org/pdf/1506.02203.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>25</td><td>16</td><td>9</td><td>0</td><td>23</td><td>2</td></tr><tr><td>7808937b46acad36e43c30ae4e9f3fd57462853d</td><td>bpad</td><td>BPAD</td><td><a href="papers/7808937b46acad36e43c30ae4e9f3fd57462853d.html" target="_blank">Describing people: A poselet-based approach to attribute classification</a></td><td><span class="gray">[pdf]</a></td><td>2011 International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>230</td><td>128</td><td>102</td><td>14</td><td>163</td><td>66</td></tr><tr><td>d3200d49a19a4a4e4e9745ee39649b65d80c834b</td><td>scut_head</td><td>SCUT HEAD</td><td><a href="papers/d3200d49a19a4a4e4e9745ee39649b65d80c834b.html" target="_blank">Detecting Heads using Feature Refine Net and Cascaded Multi-scale Architecture</a></td><td><a href="https://arxiv.org/pdf/1803.09256.pdf" target="_blank">[pdf]</a></td><td>2018 24th International Conference on Pattern Recognition (ICPR)</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>9cc8cf0c7d7fa7607659921b6ff657e17e135ecc</td><td>mafa</td><td>MAsked FAces</td><td><a href="papers/9cc8cf0c7d7fa7607659921b6ff657e17e135ecc.html" target="_blank">Detecting Masked Faces in the Wild with LLE-CNNs</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>5</td><td>3</td><td>2</td><td>1</td><td>4</td><td>1</td></tr><tr><td>56ae6d94fc6097ec4ca861f0daa87941d1c10b70</td><td>cmdp</td><td>CMDP</td><td><a href="papers/56ae6d94fc6097ec4ca861f0daa87941d1c10b70.html" target="_blank">Distance Estimation of an Unknown Person from a Portrait</a></td><td><a href="http://authors.library.caltech.edu/49084/13/FaceDistanceEstimation_RONCHI.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>44%</td><td>9</td><td>4</td><td>5</td><td>0</td><td>6</td><td>3</td></tr><tr><td>84fe5b4ac805af63206012d29523a1e033bc827e</td><td>awe_ears</td><td>AWE Ears</td><td><a href="papers/84fe5b4ac805af63206012d29523a1e033bc827e.html" target="_blank">Ear Recognition: More Than a Survey</a></td><td><a href="https://arxiv.org/pdf/1611.06203.pdf" target="_blank">[pdf]</a></td><td>Neurocomputing</td><td></td><td></td><td></td><td></td><td></td><td>65%</td><td>26</td><td>17</td><td>9</td><td>0</td><td>10</td><td>16</td></tr><tr><td>133f01aec1534604d184d56de866a4bd531dac87</td><td>lfw</td><td>LFW</td><td><a href="papers/133f01aec1534604d184d56de866a4bd531dac87.html" target="_blank">Effective Unconstrained Face Recognition by Combining Multiple Descriptors and Learned Background Statistics</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>183</td><td>101</td><td>82</td><td>14</td><td>103</td><td>77</td></tr><tr><td>c900e0ad4c95948baaf0acd8449fde26f9b4952a</td><td>emotio_net</td><td>EmotioNet Database</td><td><a href="papers/c900e0ad4c95948baaf0acd8449fde26f9b4952a.html" target="_blank">EmotioNet: An Accurate, Real-Time Algorithm for the Automatic Annotation of a Million Facial Expressions in the Wild</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>86</td><td>40</td><td>46</td><td>7</td><td>54</td><td>29</td></tr><tr><td>2161f6b7ee3c0acc81603b01dc0df689683577b9</td><td>large_scale_person_search</td><td>Large Scale Person Search</td><td><a href="papers/2161f6b7ee3c0acc81603b01dc0df689683577b9.html" target="_blank">End-to-End Deep Learning for Person Search</a></td><td><a href="https://pdfs.semanticscholar.org/2161/f6b7ee3c0acc81603b01dc0df689683577b9.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>46</td><td>24</td><td>22</td><td>1</td><td>27</td><td>16</td></tr><tr><td>1bd1645a629f1b612960ab9bba276afd4cf7c666</td><td>brainwash</td><td>Brainwash</td><td><a href="papers/1bd1645a629f1b612960ab9bba276afd4cf7c666.html" target="_blank">End-to-End People Detection in Crowded Scenes</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>42</td><td>21</td><td>21</td><td>1</td><td>19</td><td>19</td></tr><tr><td>6273b3491e94ea4dd1ce42b791d77bdc96ee73a8</td><td>viper</td><td>VIPeR</td><td><a href="papers/6273b3491e94ea4dd1ce42b791d77bdc96ee73a8.html" target="_blank">Evaluating Appearance Models for Recognition, Reacquisition, and Tracking</a></td><td><a href="https://pdfs.semanticscholar.org/7847/b1fbccadb780b655e72c66d3f9e93ddb880c.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>624</td><td>352</td><td>272</td><td>35</td><td>342</td><td>276</td></tr><tr><td>2258e01865367018ed6f4262c880df85b94959f8</td><td>mot</td><td>MOT</td><td><a href="papers/2258e01865367018ed6f4262c880df85b94959f8.html" target="_blank">Evaluating Multiple Object Tracking Performance: The CLEAR MOT Metrics</a></td><td><a href="https://cvhci.anthropomatik.kit.edu/images/stories/msmmi/papers/eurasip2008.pdf" target="_blank">[pdf]</a></td><td>EURASIP J. Image and Video Processing</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>632</td><td>313</td><td>317</td><td>50</td><td>358</td><td>264</td></tr><tr><td>9e5378e7b336c89735d3bb15cf67eff96f86d39a</td><td>precarious</td><td>Precarious</td><td><a href="papers/9e5378e7b336c89735d3bb15cf67eff96f86d39a.html" target="_blank">Expecting the Unexpected: Training Detectors for Unusual Pedestrians with Adversarial Imposters</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>21%</td><td>14</td><td>3</td><td>11</td><td>1</td><td>12</td><td>1</td></tr><tr><td>35b0331dfcd2897abd5749b49ff5e2b8ba0f7a62</td><td>coco_qa</td><td>COCO QA</td><td><a href="papers/35b0331dfcd2897abd5749b49ff5e2b8ba0f7a62.html" target="_blank">Exploring Models and Data for Image Question Answering</a></td><td><a href="https://arxiv.org/pdf/1505.02074.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>206</td><td>115</td><td>91</td><td>11</td><td>162</td><td>39</td></tr><tr><td>2cd7821fcf5fae53a185624f7eeda007434ae037</td><td>geofaces</td><td>GeoFaces</td><td><a href="papers/2cd7821fcf5fae53a185624f7eeda007434ae037.html" target="_blank">Exploring the geo-dependence of human face appearance</a></td><td><a href="http://cs.uky.edu/~jacobs/papers/islam2014faces.pdf" target="_blank">[pdf]</a></td><td>IEEE Winter Conference on Applications of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>88%</td><td>8</td><td>7</td><td>1</td><td>0</td><td>5</td><td>3</td></tr><tr><td>2cd7821fcf5fae53a185624f7eeda007434ae037</td><td>geofaces</td><td>GeoFaces</td><td><a href="papers/2cd7821fcf5fae53a185624f7eeda007434ae037.html" target="_blank">Exploring the geo-dependence of human face appearance</a></td><td><a href="http://cs.uky.edu/~jacobs/papers/islam2014faces.pdf" target="_blank">[pdf]</a></td><td>IEEE Winter Conference on Applications of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>88%</td><td>8</td><td>7</td><td>1</td><td>0</td><td>5</td><td>3</td></tr><tr><td>75da1df4ed319926c544eefe17ec8d720feef8c0</td><td>fddb</td><td>FDDB</td><td><a href="papers/75da1df4ed319926c544eefe17ec8d720feef8c0.html" target="_blank">FDDB: A benchmark for face detection in unconstrained settings</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>380</td><td>214</td><td>166</td><td>22</td><td>202</td><td>164</td></tr><tr><td>31de9b3dd6106ce6eec9a35991b2b9083395fd0b</td><td>feret</td><td>FERET</td><td><a href="papers/31de9b3dd6106ce6eec9a35991b2b9083395fd0b.html" target="_blank">FERET ( Face Recognition Technology ) Recognition Algorithm Development and Test Results</a></td><td><a href="https://pdfs.semanticscholar.org/31de/9b3dd6106ce6eec9a35991b2b9083395fd0b.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>45%</td><td>75</td><td>34</td><td>41</td><td>5</td><td>54</td><td>20</td></tr><tr><td>0e986f51fe45b00633de9fd0c94d082d2be51406</td><td>afw</td><td>AFW</td><td><a href="papers/0e986f51fe45b00633de9fd0c94d082d2be51406.html" target="_blank">Face detection, pose estimation, and landmark localization in the wild</a></td><td><span class="gray">[pdf]</a></td><td>2012 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>999</td><td>605</td><td>394</td><td>45</td><td>576</td><td>422</td></tr><tr><td>560e0e58d0059259ddf86fcec1fa7975dee6a868</td><td>youtube_faces</td><td>YouTubeFaces</td><td><a href="papers/560e0e58d0059259ddf86fcec1fa7975dee6a868.html" target="_blank">Face recognition in unconstrained videos with matched background similarity</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>509</td><td>291</td><td>217</td><td>28</td><td>294</td><td>216</td></tr><tr><td>670637d0303a863c1548d5b19f705860a23e285c</td><td>face_tracer</td><td>FaceTracer</td><td><a href="papers/670637d0303a863c1548d5b19f705860a23e285c.html" target="_blank">Face swapping: automatically replacing faces in photographs</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>6204776d31359d129a582057c2d788a14f8aadeb</td><td>youtube_celebrities</td><td>YouTube Celebrities</td><td><a href="papers/6204776d31359d129a582057c2d788a14f8aadeb.html" target="_blank">Face tracking and recognition with visual constraints in real-world videos</a></td><td><span class="gray">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>Rutgers University</td><td>United States</td><td>40.47913175</td><td>-74.43168868</td><td>53%</td><td>267</td><td>142</td><td>124</td><td>14</td><td>125</td><td>121</td></tr><tr><td>4c170a0dcc8de75587dae21ca508dab2f9343974</td><td>face_tracer</td><td>FaceTracer</td><td><a href="papers/4c170a0dcc8de75587dae21ca508dab2f9343974.html" target="_blank">FaceTracer: A Search Engine for Large Collections of Images with Faces</a></td><td><a href="https://pdfs.semanticscholar.org/73a8/1d311eedac8dea3ca24dc15b6990fa4a725e.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>225</td><td>126</td><td>99</td><td>17</td><td>146</td><td>77</td></tr><tr><td>7ebb153704706e457ab57b432793d2b6e5d12592</td><td>vgg_celebs_in_places</td><td>CIP</td><td><a href="papers/7ebb153704706e457ab57b432793d2b6e5d12592.html" target="_blank">Faces in Places: compound query retrieval</a></td><td><a href="https://pdfs.semanticscholar.org/7ebb/153704706e457ab57b432793d2b6e5d12592.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>80%</td><td>5</td><td>4</td><td>1</td><td>0</td><td>3</td><td>2</td></tr><tr><td>0ab7cff2ccda7269b73ff6efd9d37e1318f7db25</td><td>ibm_dif</td><td>IBM Diversity in Faces</td><td><a href="papers/0ab7cff2ccda7269b73ff6efd9d37e1318f7db25.html" target="_blank">Facial Coding Scheme Reference 1 Craniofacial Distances</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>8a3c5507237957d013a0fe0f082cab7f757af6ee</td><td>mafl</td><td>MAFL</td><td><a href="papers/8a3c5507237957d013a0fe0f082cab7f757af6ee.html" target="_blank">Facial Landmark Detection by Deep Multi-task Learning</a></td><td><a href="https://pdfs.semanticscholar.org/fcd7/1c18192928a2e0b264edd4d919ab2f8f652a.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>59%</td><td>407</td><td>239</td><td>168</td><td>18</td><td>252</td><td>153</td></tr><tr><td>8a3c5507237957d013a0fe0f082cab7f757af6ee</td><td>mtfl</td><td>MTFL</td><td><a href="papers/8a3c5507237957d013a0fe0f082cab7f757af6ee.html" target="_blank">Facial Landmark Detection by Deep Multi-task Learning</a></td><td><a href="https://pdfs.semanticscholar.org/fcd7/1c18192928a2e0b264edd4d919ab2f8f652a.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>59%</td><td>407</td><td>239</td><td>168</td><td>18</td><td>252</td><td>153</td></tr><tr><td>4fefd1bc8dc4e0ab37ee3324ddfa43ad9d6a04a7</td><td>deep_fashion</td><td>DeepFashion</td><td><a href="papers/4fefd1bc8dc4e0ab37ee3324ddfa43ad9d6a04a7.html" target="_blank">Fashion Landmark Detection in the Wild</a></td><td><a href="https://arxiv.org/pdf/1608.03049.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>69%</td><td>26</td><td>18</td><td>8</td><td>1</td><td>16</td><td>10</td></tr><tr><td>060820f110a72cbf02c14a6d1085bd6e1d994f6a</td><td>caltech_crp</td><td>Caltech CRP</td><td><a href="papers/060820f110a72cbf02c14a6d1085bd6e1d994f6a.html" target="_blank">Fine-grained classification of pedestrians in video: Benchmark and state of the art</a></td><td><a href="https://arxiv.org/pdf/1605.06177.pdf" target="_blank">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>35%</td><td>17</td><td>6</td><td>11</td><td>0</td><td>9</td><td>8</td></tr><tr><td>45e616093a92e5f1e61a7c6037d5f637aa8964af</td><td>malf</td><td>MALF</td><td><a href="papers/45e616093a92e5f1e61a7c6037d5f637aa8964af.html" target="_blank">Fine-grained evaluation on face detection in the wild</a></td><td><span class="gray">[pdf]</a></td><td>2015 11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)</td><td></td><td></td><td></td><td></td><td></td><td>65%</td><td>17</td><td>11</td><td>6</td><td>0</td><td>12</td><td>5</td></tr><tr><td>1aad2da473888cb7ebc1bfaa15bfa0f1502ce005</td><td>jpl_pose</td><td>JPL-Interaction dataset</td><td><a href="papers/1aad2da473888cb7ebc1bfaa15bfa0f1502ce005.html" target="_blank">First-Person Activity Recognition: What Are They Doing to Me?</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>66%</td><td>148</td><td>98</td><td>50</td><td>7</td><td>105</td><td>43</td></tr><tr><td>7b92d1e53cc87f7a4256695de590098a2f30261e</td><td>appa_real</td><td>APPA-REAL</td><td><a href="papers/7b92d1e53cc87f7a4256695de590098a2f30261e.html" target="_blank">From Apparent to Real Age: Gender, Age, Ethnic, Makeup, and Expression Bias Analysis in Real Age Estimation</a></td><td><span class="gray">[pdf]</a></td><td>2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>774cbb45968607a027ae4729077734db000a1ec5</td><td>urban_tribes</td><td>Urban Tribes</td><td><a href="papers/774cbb45968607a027ae4729077734db000a1ec5.html" target="_blank">From Bikers to Surfers: Visual Recognition of Urban Tribes</a></td><td><a href="http://vision.cornell.edu/se3/wp-content/uploads/2014/09/utribes_bmvc13_final.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>67%</td><td>18</td><td>12</td><td>6</td><td>1</td><td>12</td><td>6</td></tr><tr><td>22f656d0f8426c84a33a267977f511f127bfd7f3</td><td>expw</td><td>ExpW</td><td><a href="papers/22f656d0f8426c84a33a267977f511f127bfd7f3.html" target="_blank">From Facial Expression Recognition to Interpersonal Relation Prediction</a></td><td><a href="https://arxiv.org/pdf/1609.06426.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>11</td><td>6</td><td>5</td><td>0</td><td>5</td><td>4</td></tr><tr><td>18c72175ddbb7d5956d180b65a96005c100f6014</td><td>yale_faces</td><td>YaleFaces</td><td><a href="papers/18c72175ddbb7d5956d180b65a96005c100f6014.html" target="_blank">From Few to Many: Illumination Cone Models for Face Recognition under Variable Lighting and Pose</a></td><td><a href="http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.147.1487&rep=rep1&type=pdf" target="_blank">[pdf]</a></td><td>IEEE Trans. Pattern Anal. Mach. Intell.</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>999</td><td>516</td><td>483</td><td>66</td><td>497</td><td>462</td></tr><tr><td>06f02199690961ba52997cde1527e714d2b3bf8f</td><td>columbia_gaze</td><td>Columbia Gaze</td><td><a href="papers/06f02199690961ba52997cde1527e714d2b3bf8f.html" target="_blank">Gaze locking: passive eye contact detection for human-object interaction</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Columbia University</td><td>United States</td><td>40.84198360</td><td>-73.94368971</td><td>70%</td><td>79</td><td>55</td><td>24</td><td>0</td><td>49</td><td>34</td></tr><tr><td>18858cc936947fc96b5c06bbe3c6c2faa5614540</td><td>pilot_parliament</td><td>PPB</td><td><a href="papers/18858cc936947fc96b5c06bbe3c6c2faa5614540.html" target="_blank">Gender Shades: Intersectional Accuracy Disparities in Commercial Gender Classification</a></td><td><a href="http://proceedings.mlr.press/v81/buolamwini18a/buolamwini18a-supp.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>49%</td><td>59</td><td>29</td><td>30</td><td>0</td><td>47</td><td>10</td></tr><tr><td>2eb84aaba316b095d4bb51da1a3e4365bbf9ab1d</td><td>kin_face</td><td>UB KinFace</td><td><a href="papers/2eb84aaba316b095d4bb51da1a3e4365bbf9ab1d.html" target="_blank">Genealogical face recognition based on UB KinFace database</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td>edu</td><td>SUNY Buffalo</td><td>United States</td><td>42.93362780</td><td>-78.88394479</td><td>55%</td><td>31</td><td>17</td><td>14</td><td>0</td><td>11</td><td>21</td></tr><tr><td>2eed184680edcdec8a3b605ad1a3ba8e8f7cc2e9</td><td>graz</td><td>Graz Pedestrian</td><td><a href="papers/2eed184680edcdec8a3b605ad1a3ba8e8f7cc2e9.html" target="_blank">Generic object recognition with boosting</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td>edu</td><td>TU Graz</td><td>Austria</td><td>47.07071400</td><td>15.43950400</td><td>47%</td><td>293</td><td>138</td><td>155</td><td>16</td><td>195</td><td>97</td></tr><tr><td>17b46e2dad927836c689d6787ddb3387c6159ece</td><td>geofaces</td><td>GeoFaces</td><td><a href="papers/17b46e2dad927836c689d6787ddb3387c6159ece.html" target="_blank">GeoFaceExplorer: exploring the geo-dependence of facial attributes</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>2</td><td>2</td><td>0</td><td>0</td><td>1</td><td>1</td></tr><tr><td>bd88bb2e4f351352d88ee7375af834360e223498</td><td>hda_plus</td><td>HDA+</td><td><a href="papers/bd88bb2e4f351352d88ee7375af834360e223498.html" target="_blank">HDA dataset-DRAFT 1 A Multi-camera video data set for research on High-Definition surveillance</a></td><td><a href="https://pdfs.semanticscholar.org/bd88/bb2e4f351352d88ee7375af834360e223498.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>2</td><td>0</td><td>2</td><td>0</td><td>1</td><td>2</td></tr><tr><td>a8d0b149c2eadaa02204d3e4356fbc8eccf3b315</td><td>hi4d_adsip</td><td>Hi4D-ADSIP</td><td><a href="papers/a8d0b149c2eadaa02204d3e4356fbc8eccf3b315.html" target="_blank">Hi4D-ADSIP 3-D dynamic facial articulation database</a></td><td><span class="gray">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>15</td><td>9</td><td>6</td><td>1</td><td>4</td><td>11</td></tr><tr><td>2d45cfd838016a6e39f6b766ffe85acd649440c7</td><td>mcgill</td><td>McGill Real World</td><td><a href="papers/2d45cfd838016a6e39f6b766ffe85acd649440c7.html" target="_blank">Hierarchical temporal graphical model for head pose estimation and subsequent attribute classification in real-world videos</a></td><td><span class="gray">[pdf]</a></td><td>Computer Vision and Image Understanding</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>8</td><td>4</td><td>4</td><td>1</td><td>5</td><td>3</td></tr><tr><td>3cd40bfa1ff193a96bde0207e5140a399476466c</td><td>tvhi</td><td>TVHI</td><td><a href="papers/3cd40bfa1ff193a96bde0207e5140a399476466c.html" target="_blank">High Five: Recognising human interactions in TV shows</a></td><td><a href="https://pdfs.semanticscholar.org/3cd4/0bfa1ff193a96bde0207e5140a399476466c.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>98</td><td>49</td><td>49</td><td>10</td><td>66</td><td>28</td></tr><tr><td>04c2cda00e5536f4b1508cbd80041e9552880e67</td><td>hipsterwars</td><td>Hipsterwars</td><td><a href="papers/04c2cda00e5536f4b1508cbd80041e9552880e67.html" target="_blank">Hipster Wars: Discovering Elements of Fashion Styles</a></td><td><a href="https://pdfs.semanticscholar.org/04c2/cda00e5536f4b1508cbd80041e9552880e67.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>63%</td><td>95</td><td>60</td><td>35</td><td>4</td><td>59</td><td>35</td></tr><tr><td>10d6b12fa07c7c8d6c8c3f42c7f1c061c131d4c5</td><td>inria_person</td><td>INRIA Pedestrian</td><td><a href="papers/10d6b12fa07c7c8d6c8c3f42c7f1c061c131d4c5.html" target="_blank">Histograms of oriented gradients for human detection</a></td><td><span class="gray">[pdf]</a></td><td>2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)</td><td>edu</td><td>INRIA Rhone-Alps, Montbonnot, France</td><td>France</td><td>45.21788600</td><td>5.80736900</td><td>51%</td><td>999</td><td>510</td><td>489</td><td>44</td><td>419</td><td>509</td></tr><tr><td>041d3eedf5e45ce5c5229f0181c5c576ed1fafd6</td><td>ucf_selfie</td><td>UCF Selfie</td><td><a href="papers/041d3eedf5e45ce5c5229f0181c5c576ed1fafd6.html" target="_blank">How to Take a Good Selfie?</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>73%</td><td>11</td><td>8</td><td>3</td><td>0</td><td>7</td><td>5</td></tr><tr><td>44d23df380af207f5ac5b41459c722c87283e1eb</td><td>wider_attribute</td><td>WIDER Attribute</td><td><a href="papers/44d23df380af207f5ac5b41459c722c87283e1eb.html" target="_blank">Human Attribute Recognition by Deep Hierarchical Contexts</a></td><td><a href="https://pdfs.semanticscholar.org/8e28/07f2dd53b03a759e372e07f7191cae65c9fd.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>67%</td><td>18</td><td>12</td><td>6</td><td>0</td><td>14</td><td>4</td></tr><tr><td>44484d2866f222bbb9b6b0870890f9eea1ffb2d0</td><td>cuhk_campus_03</td><td>CUHK03 Campus</td><td><a href="papers/44484d2866f222bbb9b6b0870890f9eea1ffb2d0.html" target="_blank">Human Reidentification with Transferred Metric Learning</a></td><td><a href="http://www.ee.cuhk.edu.hk/~xgwang/papers/liZWaccv12.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>280</td><td>167</td><td>113</td><td>12</td><td>139</td><td>137</td></tr><tr><td>f41c7bb02fc97d5fb9cadd7a49c3e558a1c58a44</td><td>pa_100k</td><td>PA-100K</td><td><a href="papers/f41c7bb02fc97d5fb9cadd7a49c3e558a1c58a44.html" target="_blank">HydraPlus-Net: Attentive Deep Features for Pedestrian Analysis</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>55</td><td>30</td><td>25</td><td>0</td><td>36</td><td>17</td></tr><tr><td>57178b36c21fd7f4529ac6748614bb3374714e91</td><td>ijb_c</td><td>IJB-C</td><td><a href="papers/57178b36c21fd7f4529ac6748614bb3374714e91.html" target="_blank">IARPA Janus Benchmark - C: Face Dataset and Protocol</a></td><td><span class="gray">[pdf]</a></td><td>2018 International Conference on Biometrics (ICB)</td><td></td><td></td><td></td><td></td><td></td><td>36%</td><td>14</td><td>5</td><td>9</td><td>2</td><td>12</td><td>1</td></tr><tr><td>0cb2dd5f178e3a297a0c33068961018659d0f443</td><td>ijb_b</td><td>IJB-B</td><td><a href="papers/0cb2dd5f178e3a297a0c33068961018659d0f443.html" target="_blank">IARPA Janus Benchmark-B Face Dataset</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td></td><td></td><td></td><td></td><td></td><td>34%</td><td>35</td><td>12</td><td>23</td><td>6</td><td>25</td><td>8</td></tr><tr><td>0297448f3ed948e136bb06ceff10eccb34e5bb77</td><td>ilids_mcts</td><td>i-LIDS Multiple-Camera</td><td><a href="papers/0297448f3ed948e136bb06ceff10eccb34e5bb77.html" target="_blank">Imagery Library for Intelligent Detection Systems (i-LIDS); A Standard for Testing Video Based Detection Systems</a></td><td><span class="gray">[pdf]</a></td><td>Proceedings 40th Annual 2006 International Carnahan Conference on Security Technology</td><td></td><td></td><td></td><td></td><td></td><td>40%</td><td>35</td><td>14</td><td>21</td><td>2</td><td>21</td><td>14</td></tr><tr><td>7f23a4bb0c777dd72cca7665a5f370ac7980217e</td><td>duke_mtmc</td><td>Duke MTMC</td><td><a href="papers/7f23a4bb0c777dd72cca7665a5f370ac7980217e.html" target="_blank">Improving Person Re-identification by Attribute and Identity Learning</a></td><td><a href="https://arxiv.org/pdf/1703.07220.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>48%</td><td>87</td><td>42</td><td>45</td><td>0</td><td>43</td><td>42</td></tr><tr><td>55c40cbcf49a0225e72d911d762c27bb1c2d14aa</td><td>ifad</td><td>IFAD</td><td><a href="papers/55c40cbcf49a0225e72d911d762c27bb1c2d14aa.html" target="_blank">Indian Face Age Database: A Database for Face Recognition with Age Variation</a></td><td><a href="https://pdfs.semanticscholar.org/025e/4cf3fd3fdeced91e9373b56ee14af7ca432c.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>2</td><td>1</td><td>1</td><td>0</td><td>2</td><td>0</td></tr><tr><td>ca3e88d87e1344d076c964ea89d91a75c417f5ee</td><td>imfdb</td><td>IMFDB</td><td><a href="papers/ca3e88d87e1344d076c964ea89d91a75c417f5ee.html" target="_blank">Indian Movie Face Database: A benchmark for face recognition under wide variations</a></td><td><span class="gray">[pdf]</a></td><td>2013 Fourth National Conference on Computer Vision, Pattern Recognition, Image Processing and Graphics (NCVPRIPG)</td><td>edu</td><td>BVBCET, Hubli, India</td><td>India</td><td>15.36883320</td><td>75.12137960</td><td>59%</td><td>17</td><td>10</td><td>7</td><td>0</td><td>11</td><td>5</td></tr><tr><td>95f12d27c3b4914e0668a268360948bce92f7db3</td><td>helen</td><td>Helen</td><td><a href="papers/95f12d27c3b4914e0668a268360948bce92f7db3.html" target="_blank">Interactive Facial Feature Localization</a></td><td><a href="https://pdfs.semanticscholar.org/95f1/2d27c3b4914e0668a268360948bce92f7db3.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>company</td><td>Facebook</td><td>United States</td><td>37.39367170</td><td>-122.08072620</td><td>57%</td><td>352</td><td>200</td><td>152</td><td>26</td><td>212</td><td>146</td></tr><tr><td>ad01687649d95cd5b56d7399a9603c4b8e2217d7</td><td>mrp_drone</td><td>MRP Drone</td><td><a href="papers/ad01687649d95cd5b56d7399a9603c4b8e2217d7.html" target="_blank">Investigating Open-World Person Re-identification Using a Drone</a></td><td><a href="https://pdfs.semanticscholar.org/ad01/687649d95cd5b56d7399a9603c4b8e2217d7.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>29%</td><td>7</td><td>2</td><td>5</td><td>1</td><td>5</td><td>2</td></tr><tr><td>2f43b614607163abf41dfe5d17ef6749a1b61304</td><td>hrt_transgender</td><td>HRT Transgender</td><td><a href="papers/2f43b614607163abf41dfe5d17ef6749a1b61304.html" target="_blank">Investigating the Periocular-Based Face Recognition Across Gender Transformation</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Information Forensics and Security</td><td>edu</td><td>University of North Carolina at Wilmington</td><td>United States</td><td>34.22498270</td><td>-77.86907744</td><td>69%</td><td>13</td><td>9</td><td>4</td><td>0</td><td>6</td><td>8</td></tr><tr><td>066d71fcd997033dce4ca58df924397dfe0b5fd1</td><td>ifdb</td><td>IFDB</td><td><a href="papers/066d71fcd997033dce4ca58df924397dfe0b5fd1.html" target="_blank">Iranian Face Database and Evaluation with a New Detection Algorithm</a></td><td><a href="https://pdfs.semanticscholar.org/066d/71fcd997033dce4ca58df924397dfe0b5fd1.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>b71d1aa90dcbe3638888725314c0d56640c1fef1</td><td>ifdb</td><td>IFDB</td><td><a href="papers/b71d1aa90dcbe3638888725314c0d56640c1fef1.html" target="_blank">Iranian Face Database with age, pose and expression</a></td><td><span class="gray">[pdf]</a></td><td>2007 International Conference on Machine Vision</td><td>edu</td><td>Islamic Azad University</td><td>Iran</td><td>34.84529990</td><td>48.55962120</td><td>30%</td><td>23</td><td>7</td><td>16</td><td>2</td><td>14</td><td>9</td></tr><tr><td>137aa2f891d474fce1e7a1d1e9b3aefe21e22b34</td><td>hrt_transgender</td><td>HRT Transgender</td><td><a href="papers/137aa2f891d474fce1e7a1d1e9b3aefe21e22b34.html" target="_blank">Is the eye region more reliable than the face? A preliminary study of face-based recognition on a transgender dataset</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE Sixth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>7</td><td>4</td><td>3</td><td>1</td><td>3</td><td>5</td></tr><tr><td>0b440695c822a8e35184fb2f60dcdaa8a6de84ae</td><td>kinectface</td><td>KinectFaceDB</td><td><a href="papers/0b440695c822a8e35184fb2f60dcdaa8a6de84ae.html" target="_blank">KinectFaceDB: A Kinect Database for Face Recognition</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Systems, Man, and Cybernetics: Systems</td><td>edu</td><td>University of North Carolina at Chapel Hill</td><td>United States</td><td>35.91139710</td><td>-79.05045290</td><td>56%</td><td>82</td><td>46</td><td>36</td><td>6</td><td>28</td><td>52</td></tr><tr><td>4793f11fbca4a7dba898b9fff68f70d868e2497c</td><td>kin_face</td><td>UB KinFace</td><td><a href="papers/4793f11fbca4a7dba898b9fff68f70d868e2497c.html" target="_blank">Kinship Verification through Transfer Learning</a></td><td><a href="https://pdfs.semanticscholar.org/4793/f11fbca4a7dba898b9fff68f70d868e2497c.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>71</td><td>40</td><td>31</td><td>2</td><td>29</td><td>42</td></tr><tr><td>2d3482dcff69c7417c7b933f22de606a0e8e42d4</td><td>lfw</td><td>LFW</td><td><a href="papers/2d3482dcff69c7417c7b933f22de606a0e8e42d4.html" target="_blank">Labeled Faces in the Wild : Updates and New Reporting Procedures</a></td><td><a href="https://pdfs.semanticscholar.org/2d34/82dcff69c7417c7b933f22de606a0e8e42d4.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>123</td><td>71</td><td>52</td><td>3</td><td>71</td><td>51</td></tr><tr><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td><td>lfw</td><td>LFW</td><td><a href="papers/370b5757a5379b15e30d619e4d3fb9e8e13f3256.html" target="_blank">Labeled Faces in the Wild: A Database forStudying Face Recognition in Unconstrained Environments</a></td><td><a href="https://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>999</td><td>547</td><td>452</td><td>64</td><td>598</td><td>382</td></tr><tr><td>7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22</td><td>lfw</td><td>LFW</td><td><a href="papers/7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22.html" target="_blank">Labeled Faces in the Wild: A Survey</a></td><td><a href="https://pdfs.semanticscholar.org/7de6/e81d775e9cd7becbfd1bd685f4e2a5eebb22.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>109</td><td>47</td><td>62</td><td>9</td><td>66</td><td>43</td></tr><tr><td>0d2dd4fc016cb6a517d8fb43a7cc3ff62964832e</td><td>lag</td><td>LAG</td><td><a href="papers/0d2dd4fc016cb6a517d8fb43a7cc3ff62964832e.html" target="_blank">Large age-gap face verification by feature injection in deep networks</a></td><td><a href="https://arxiv.org/pdf/1602.06149.pdf" target="_blank">[pdf]</a></td><td>Pattern Recognition Letters</td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>7</td><td>3</td><td>4</td><td>0</td><td>3</td><td>4</td></tr><tr><td>07fcbae86f7a3ad3ea1cf95178459ee9eaf77cb1</td><td>uccs</td><td>UCCS</td><td><a href="papers/07fcbae86f7a3ad3ea1cf95178459ee9eaf77cb1.html" target="_blank">Large scale unconstrained open set face database</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE Sixth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td></td><td></td><td></td><td></td><td></td><td>83%</td><td>6</td><td>5</td><td>1</td><td>0</td><td>4</td><td>2</td></tr><tr><td>4af89578ac237278be310f7660a408b03f12d603</td><td>geofaces</td><td>GeoFaces</td><td><a href="papers/4af89578ac237278be310f7660a408b03f12d603.html" target="_blank">Large-scale geo-facial image analysis</a></td><td><a href="https://pdfs.semanticscholar.org/3ede/3ed28329bf48fbd06438a69c4f855bef003f.pdf" target="_blank">[pdf]</a></td><td>EURASIP J. Image and Video Processing</td><td></td><td></td><td></td><td></td><td></td><td>83%</td><td>6</td><td>5</td><td>1</td><td>0</td><td>4</td><td>2</td></tr><tr><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td><td>mafl</td><td>MAFL</td><td><a href="papers/a0fd85b3400c7b3e11122f44dc5870ae2de9009a.html" target="_blank">Learning Deep Representation for Face Alignment with Auxiliary Attributes</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>108</td><td>56</td><td>52</td><td>11</td><td>66</td><td>44</td></tr><tr><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td><td>mtfl</td><td>MTFL</td><td><a href="papers/a0fd85b3400c7b3e11122f44dc5870ae2de9009a.html" target="_blank">Learning Deep Representation for Face Alignment with Auxiliary Attributes</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>108</td><td>56</td><td>52</td><td>11</td><td>66</td><td>44</td></tr><tr><td>853bd61bc48a431b9b1c7cab10c603830c488e39</td><td>casia_webface</td><td>CASIA Webface</td><td><a href="papers/853bd61bc48a431b9b1c7cab10c603830c488e39.html" target="_blank">Learning Face Representation from Scratch</a></td><td><a href="https://arxiv.org/pdf/1411.7923.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>476</td><td>277</td><td>199</td><td>26</td><td>290</td><td>182</td></tr><tr><td>2a171f8d14b6b8735001a11c217af9587d095848</td><td>social_relation</td><td>Social Relation</td><td><a href="papers/2a171f8d14b6b8735001a11c217af9587d095848.html" target="_blank">Learning Social Relation Traits from Face Images</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>23</td><td>10</td><td>13</td><td>4</td><td>16</td><td>7</td></tr><tr><td>4e4746094bf60ee83e40d8597a6191e463b57f76</td><td>leeds_sports_pose_extended</td><td>Leeds Sports Pose Extended</td><td><a href="papers/4e4746094bf60ee83e40d8597a6191e463b57f76.html" target="_blank">Learning effective human pose estimation from inaccurate annotation</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011</td><td>edu</td><td>University of Leeds</td><td>United Kingdom</td><td>53.80387185</td><td>-1.55245712</td><td>61%</td><td>169</td><td>103</td><td>66</td><td>8</td><td>108</td><td>65</td></tr><tr><td>287ddcb3db5562235d83aee318f318b8d5e43fb1</td><td>erce</td><td>ERCe</td><td><a href="papers/287ddcb3db5562235d83aee318f318b8d5e43fb1.html" target="_blank">Learning from Multiple Sources for Video Summarisation</a></td><td><a href="https://arxiv.org/pdf/1501.03069.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>7</td><td>4</td><td>3</td><td>0</td><td>4</td><td>3</td></tr><tr><td>287ddcb3db5562235d83aee318f318b8d5e43fb1</td><td>tisi</td><td>Times Square Intersection</td><td><a href="papers/287ddcb3db5562235d83aee318f318b8d5e43fb1.html" target="_blank">Learning from Multiple Sources for Video Summarisation</a></td><td><a href="https://arxiv.org/pdf/1501.03069.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>7</td><td>4</td><td>3</td><td>0</td><td>4</td><td>3</td></tr><tr><td>5981e6479c3fd4e31644db35d236bfb84ae46514</td><td>mot</td><td>MOT</td><td><a href="papers/5981e6479c3fd4e31644db35d236bfb84ae46514.html" target="_blank">Learning to associate: HybridBoosted multi-target tracker for crowded scene</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>University of Southern California</td><td>United States</td><td>34.02241490</td><td>-118.28634407</td><td>53%</td><td>326</td><td>172</td><td>153</td><td>27</td><td>190</td><td>137</td></tr><tr><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td><td>stickmen_buffy</td><td>Buffy Stickmen</td><td><a href="papers/6dd0597f8513dc100cd0bc1b493768cde45098a9.html" target="_blank">Learning to parse images of articulated bodies</a></td><td><a href="http://machinelearning.wustl.edu/mlpapers/paper_files/NIPS2006_899.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>369</td><td>207</td><td>161</td><td>32</td><td>237</td><td>131</td></tr><tr><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td><a href="papers/6dd0597f8513dc100cd0bc1b493768cde45098a9.html" target="_blank">Learning to parse images of articulated bodies</a></td><td><a href="http://machinelearning.wustl.edu/mlpapers/paper_files/NIPS2006_899.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>369</td><td>207</td><td>161</td><td>32</td><td>237</td><td>131</td></tr><tr><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td><a href="papers/6dd0597f8513dc100cd0bc1b493768cde45098a9.html" target="_blank">Learning to parse images of articulated bodies</a></td><td><a href="http://machinelearning.wustl.edu/mlpapers/paper_files/NIPS2006_899.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>369</td><td>207</td><td>161</td><td>32</td><td>237</td><td>131</td></tr><tr><td>28d4e027c7e90b51b7d8908fce68128d1964668a</td><td>megaface</td><td>MegaFace</td><td><a href="papers/28d4e027c7e90b51b7d8908fce68128d1964668a.html" target="_blank">Level Playing Field for Million Scale Face Recognition</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>49%</td><td>39</td><td>19</td><td>20</td><td>2</td><td>29</td><td>9</td></tr><tr><td>46a01565e6afe7c074affb752e7069ee3bf2e4ef</td><td>sdu_vid</td><td>SDU-VID</td><td><a href="papers/46a01565e6afe7c074affb752e7069ee3bf2e4ef.html" target="_blank">Local Descriptors Encoded by Fisher Vectors for Person Re-identification</a></td><td><a href="https://pdfs.semanticscholar.org/a105/f1ef67b4b02da38eadce8ffb4e13aa301a93.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>197</td><td>112</td><td>85</td><td>16</td><td>108</td><td>88</td></tr><tr><td>140438a77a771a8fb656b39a78ff488066eb6b50</td><td>lfpw</td><td>LFWP</td><td><a href="papers/140438a77a771a8fb656b39a78ff488066eb6b50.html" target="_blank">Localizing Parts of Faces Using a Consensus of Exemplars</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>38b55d95189c5e69cf4ab45098a48fba407609b4</td><td>cuhk_campus_03</td><td>CUHK03 Campus</td><td><a href="papers/38b55d95189c5e69cf4ab45098a48fba407609b4.html" target="_blank">Locally Aligned Feature Transforms across Views</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>258</td><td>134</td><td>124</td><td>17</td><td>136</td><td>117</td></tr><tr><td>8990cdce3f917dad622e43e033db686b354d057c</td><td>tiny_faces</td><td>TinyFace</td><td><a href="papers/8990cdce3f917dad622e43e033db686b354d057c.html" target="_blank">Low-Resolution Face Recognition</a></td><td><a href="https://arxiv.org/pdf/1811.08965.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>c0387e788a52f10bf35d4d50659cfa515d89fbec</td><td>mars</td><td>MARS</td><td><a href="papers/c0387e788a52f10bf35d4d50659cfa515d89fbec.html" target="_blank">MARS: A Video Benchmark for Large-Scale Person Re-Identification</a></td><td><a href="https://pdfs.semanticscholar.org/c038/7e788a52f10bf35d4d50659cfa515d89fbec.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>168</td><td>84</td><td>84</td><td>5</td><td>97</td><td>69</td></tr><tr><td>9055b155cbabdce3b98e16e5ac9c0edf00f9552f</td><td>morph</td><td>MORPH Commercial</td><td><a href="papers/9055b155cbabdce3b98e16e5ac9c0edf00f9552f.html" target="_blank">MORPH: a longitudinal image database of normal adult age-progression</a></td><td><span class="gray">[pdf]</a></td><td>7th International Conference on Automatic Face and Gesture Recognition (FGR06)</td><td></td><td></td><td></td><td></td><td></td><td>51%</td><td>437</td><td>225</td><td>211</td><td>24</td><td>228</td><td>203</td></tr><tr><td>9055b155cbabdce3b98e16e5ac9c0edf00f9552f</td><td>morph_nc</td><td>MORPH Non-Commercial</td><td><a href="papers/9055b155cbabdce3b98e16e5ac9c0edf00f9552f.html" target="_blank">MORPH: a longitudinal image database of normal adult age-progression</a></td><td><span class="gray">[pdf]</a></td><td>7th International Conference on Automatic Face and Gesture Recognition (FGR06)</td><td></td><td></td><td></td><td></td><td></td><td>51%</td><td>437</td><td>225</td><td>211</td><td>24</td><td>228</td><td>203</td></tr><tr><td>291265db88023e92bb8c8e6390438e5da148e8f5</td><td>msceleb</td><td>MsCeleb</td><td><a href="papers/291265db88023e92bb8c8e6390438e5da148e8f5.html" target="_blank">MS-Celeb-1M: A Dataset and Benchmark for Large-Scale Face Recognition</a></td><td><a href="https://arxiv.org/pdf/1607.08221.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>49%</td><td>180</td><td>88</td><td>92</td><td>16</td><td>120</td><td>59</td></tr><tr><td>3dc3f0b64ef80f573e3a5f96e456e52ee980b877</td><td>georgia_tech_face_database</td><td>Georgia Tech Face</td><td><a href="papers/3dc3f0b64ef80f573e3a5f96e456e52ee980b877.html" target="_blank">Maximum Likelihood Training of the Embedded HMM for Face Detection and Recognition</a></td><td><a href="http://www.researchgate.net/profile/Monson_Hayes/publication/221124512_Maximum_Likelihood_Training_of_the_Embedded_HMM_for_Face_Detection_and_Recognition/links/0deec53509be9d6f55000000.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>42%</td><td>67</td><td>28</td><td>39</td><td>4</td><td>29</td><td>28</td></tr><tr><td>e58dd160a76349d46f881bd6ddbc2921f08d1050</td><td>gfw</td><td>Grouping Face in the Wild</td><td><a href="papers/e58dd160a76349d46f881bd6ddbc2921f08d1050.html" target="_blank">Merge or Not? Learning to Group Faces via Imitation Learning</a></td><td><a href="https://arxiv.org/pdf/1707.03986.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>2</td><td>1</td><td>1</td><td>0</td><td>2</td><td>0</td></tr><tr><td>5753b2b5e442eaa3be066daa4a2ca8d8a0bb1725</td><td>50_people_one_question</td><td>50 People One Question</td><td><a href="papers/5753b2b5e442eaa3be066daa4a2ca8d8a0bb1725.html" target="_blank">Merging Pose Estimates Across Space and Time</a></td><td><a href="http://authors.library.caltech.edu/41565/1/tracking_bmvc.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>81%</td><td>16</td><td>13</td><td>3</td><td>0</td><td>13</td><td>4</td></tr><tr><td>5e0f8c355a37a5a89351c02f174e7a5ddcb98683</td><td>coco</td><td>COCO</td><td><a href="papers/5e0f8c355a37a5a89351c02f174e7a5ddcb98683.html" target="_blank">Microsoft COCO: Common Objects in Context</a></td><td><a href="https://arxiv.org/pdf/1405.0312.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>999</td><td>545</td><td>454</td><td>26</td><td>722</td><td>259</td></tr><tr><td>41976ebc8ab76d9a6861487c97cc7fcbe3b6015f</td><td>moments_in_time</td><td>Moments in Time</td><td><a href="papers/41976ebc8ab76d9a6861487c97cc7fcbe3b6015f.html" target="_blank">Moments in Time Dataset: one million videos for event understanding</a></td><td><a href="https://arxiv.org/pdf/1801.03150.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>66%</td><td>29</td><td>19</td><td>10</td><td>2</td><td>27</td><td>2</td></tr><tr><td>436f798d1a4e54e5947c1e7d7375c31b2bdb4064</td><td>tud_multiview</td><td>TUD-Multiview</td><td><a href="papers/436f798d1a4e54e5947c1e7d7375c31b2bdb4064.html" target="_blank">Monocular 3D pose estimation and tracking by detection</a></td><td><span class="gray">[pdf]</a></td><td>2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>TU Darmstadt</td><td>Germany</td><td>49.87482770</td><td>8.65632810</td><td>55%</td><td>311</td><td>172</td><td>139</td><td>35</td><td>208</td><td>105</td></tr><tr><td>436f798d1a4e54e5947c1e7d7375c31b2bdb4064</td><td>tud_stadtmitte</td><td>TUD-Stadtmitte</td><td><a href="papers/436f798d1a4e54e5947c1e7d7375c31b2bdb4064.html" target="_blank">Monocular 3D pose estimation and tracking by detection</a></td><td><span class="gray">[pdf]</a></td><td>2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>TU Darmstadt</td><td>Germany</td><td>49.87482770</td><td>8.65632810</td><td>55%</td><td>311</td><td>172</td><td>139</td><td>35</td><td>208</td><td>105</td></tr><tr><td>3b5b6d19d4733ab606c39c69a889f9e67967f151</td><td>qmul_grid</td><td>GRID</td><td><a href="papers/3b5b6d19d4733ab606c39c69a889f9e67967f151.html" target="_blank">Multi-camera activity correlation analysis</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>Queen Mary University of London</td><td>United Kingdom</td><td>51.52472720</td><td>-0.03931035</td><td>62%</td><td>142</td><td>88</td><td>54</td><td>7</td><td>77</td><td>64</td></tr><tr><td>6ad5a38df8dd4cdddd74f31996ce096d41219f72</td><td>tud_brussels</td><td>TUD-Brussels</td><td><a href="papers/6ad5a38df8dd4cdddd74f31996ce096d41219f72.html" target="_blank">Multi-cue onboard pedestrian detection</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>6ad5a38df8dd4cdddd74f31996ce096d41219f72</td><td>tud_motionpairs</td><td>TUD-Motionparis</td><td><a href="papers/6ad5a38df8dd4cdddd74f31996ce096d41219f72.html" target="_blank">Multi-cue onboard pedestrian detection</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>32c801cb7fbeb742edfd94cccfca4934baec71da</td><td>ucf_crowd</td><td>UCF-CC-50</td><td><a href="papers/32c801cb7fbeb742edfd94cccfca4934baec71da.html" target="_blank">Multi-source Multi-scale Counting in Extremely Dense Crowd Images</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>148</td><td>95</td><td>53</td><td>5</td><td>80</td><td>65</td></tr><tr><td>1e3df3ca8feab0b36fd293fe689f93bb2aaac591</td><td>immediacy</td><td>Immediacy</td><td><a href="papers/1e3df3ca8feab0b36fd293fe689f93bb2aaac591.html" target="_blank">Multi-task Recurrent Neural Network for Immediacy Prediction</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>26</td><td>15</td><td>11</td><td>2</td><td>21</td><td>6</td></tr><tr><td>2b926b3586399d028b46315d7d9fb9d879e4f79c</td><td>frav3d</td><td>FRAV3D</td><td><a href="papers/2b926b3586399d028b46315d7d9fb9d879e4f79c.html" target="_blank">Multimodal 2D, 2.5D & 3D Face Verification</a></td><td><span class="gray">[pdf]</a></td><td>2006 International Conference on Image Processing</td><td>edu</td><td>Universidad Rey Juan Carlos, Spain</td><td>Spain</td><td>40.33586610</td><td>-3.87694320</td><td>50%</td><td>14</td><td>7</td><td>7</td><td>0</td><td>2</td><td>12</td></tr><tr><td>53ae38a6bb2b21b42bac4f0c4c8ed1f9fa02f9d4</td><td>bp4d_plus</td><td>BP4D+</td><td><a href="papers/53ae38a6bb2b21b42bac4f0c4c8ed1f9fa02f9d4.html" target="_blank">Multimodal Spontaneous Emotion Corpus for Human Behavior Analysis</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>48%</td><td>42</td><td>20</td><td>22</td><td>0</td><td>17</td><td>26</td></tr><tr><td>2fda164863a06a92d3a910b96eef927269aeb730</td><td>names_and_faces</td><td>News Dataset</td><td><a href="papers/2fda164863a06a92d3a910b96eef927269aeb730.html" target="_blank">Names and faces in the news</a></td><td><span class="gray">[pdf]</a></td><td>Proceedings of the 2004 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2004. CVPR 2004.</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>4156b7e88f2e0ab0a7c095b9bab199ae2b23bd06</td><td>distance_nighttime</td><td>Long Distance Heterogeneous Face</td><td><a href="papers/4156b7e88f2e0ab0a7c095b9bab199ae2b23bd06.html" target="_blank">Nighttime Face Recognition at Long Distance: Cross-Distance and Cross-Spectral Matching</a></td><td><a href="https://pdfs.semanticscholar.org/4156/b7e88f2e0ab0a7c095b9bab199ae2b23bd06.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>45%</td><td>22</td><td>10</td><td>12</td><td>3</td><td>11</td><td>10</td></tr><tr><td>3394168ff0719b03ff65bcea35336a76b21fe5e4</td><td>penn_fudan</td><td>Penn Fudan</td><td><a href="papers/3394168ff0719b03ff65bcea35336a76b21fe5e4.html" target="_blank">Object Detection Combining Recognition and Segmentation</a></td><td><a href="https://pdfs.semanticscholar.org/3394/168ff0719b03ff65bcea35336a76b21fe5e4.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>105</td><td>53</td><td>52</td><td>9</td><td>58</td><td>43</td></tr><tr><td>12ad3b5bbbf407f8e54ea692c07633d1a867c566</td><td>graz</td><td>Graz Pedestrian</td><td><a href="papers/12ad3b5bbbf407f8e54ea692c07633d1a867c566.html" target="_blank">Object recognition using segmentation for feature detection</a></td><td><span class="gray">[pdf]</a></td><td>Proceedings of the 17th International Conference on Pattern Recognition, 2004. ICPR 2004.</td><td>edu</td><td>Inst. of Comput. Sci., Univ. of Leoben, Austria</td><td>Austria</td><td>47.38473720</td><td>15.09302010</td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>4f93cd09785c6e77bf4bc5a788e079df524c8d21</td><td>soton</td><td>SOTON HiD</td><td><a href="papers/4f93cd09785c6e77bf4bc5a788e079df524c8d21.html" target="_blank">On a Large Sequence-Based Human Gait Database</a></td><td><a href="https://pdfs.semanticscholar.org/4f93/cd09785c6e77bf4bc5a788e079df524c8d21.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>150</td><td>90</td><td>60</td><td>17</td><td>103</td><td>51</td></tr><tr><td>6618cff7f2ed440a0d2fa9e74ad5469df5cdbe4c</td><td>afad</td><td>AFAD</td><td><a href="papers/6618cff7f2ed440a0d2fa9e74ad5469df5cdbe4c.html" target="_blank">Ordinal Regression with Multiple Output CNN for Age Estimation</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>45%</td><td>78</td><td>35</td><td>43</td><td>8</td><td>44</td><td>31</td></tr><tr><td>a7fe834a0af614ce6b50dc093132b031dd9a856b</td><td>market_1501</td><td>Market 1501</td><td><a href="papers/a7fe834a0af614ce6b50dc093132b031dd9a856b.html" target="_blank">Orientation Driven Bag of Appearances for Person Re-identification</a></td><td><a href="https://arxiv.org/pdf/1605.02464.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>7</td><td>3</td><td>4</td><td>0</td><td>4</td><td>4</td></tr><tr><td>a7fe834a0af614ce6b50dc093132b031dd9a856b</td><td>pku_reid</td><td>PKU-Reid</td><td><a href="papers/a7fe834a0af614ce6b50dc093132b031dd9a856b.html" target="_blank">Orientation Driven Bag of Appearances for Person Re-identification</a></td><td><a href="https://arxiv.org/pdf/1605.02464.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>7</td><td>3</td><td>4</td><td>0</td><td>4</td><td>4</td></tr><tr><td>18ae7c9a4bbc832b8b14bc4122070d7939f5e00e</td><td>frgc</td><td>FRGC</td><td><a href="papers/18ae7c9a4bbc832b8b14bc4122070d7939f5e00e.html" target="_blank">Overview of the face recognition grand challenge</a></td><td><span class="gray">[pdf]</a></td><td>2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)</td><td>edu</td><td>NIST</td><td>United States</td><td>39.14004000</td><td>-77.21850600</td><td>52%</td><td>999</td><td>519</td><td>479</td><td>86</td><td>549</td><td>442</td></tr><tr><td>22909dd19a0ec3b6065334cb5be5392cb24d839d</td><td>pets</td><td>PETS 2017</td><td><a href="papers/22909dd19a0ec3b6065334cb5be5392cb24d839d.html" target="_blank">PETS 2017: Dataset and Challenge</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td></td><td></td><td></td><td></td><td></td><td>11%</td><td>9</td><td>1</td><td>8</td><td>0</td><td>1</td><td>8</td></tr><tr><td>56ffa7d906b08d02d6d5a12c7377a57e24ef3391</td><td>unbc_shoulder_pain</td><td>UNBC-McMaster Pain</td><td><a href="papers/56ffa7d906b08d02d6d5a12c7377a57e24ef3391.html" target="_blank">Painful data: The UNBC-McMaster shoulder pain expression archive database</a></td><td><span class="gray">[pdf]</a></td><td>Face and Gesture 2011</td><td>edu</td><td>Carnegie Mellon University Silicon Valley</td><td>United States</td><td>37.41021930</td><td>-122.05965487</td><td>51%</td><td>189</td><td>97</td><td>92</td><td>22</td><td>108</td><td>78</td></tr><tr><td>55206f0b5f57ce17358999145506cd01e570358c</td><td>orl</td><td>ORL</td><td><a href="papers/55206f0b5f57ce17358999145506cd01e570358c.html" target="_blank">Parameterisation of a stochastic model for human face identification</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>45%</td><td>999</td><td>452</td><td>547</td><td>94</td><td>543</td><td>427</td></tr><tr><td>0486214fb58ee9a04edfe7d6a74c6d0f661a7668</td><td>chokepoint</td><td>ChokePoint</td><td><a href="papers/0486214fb58ee9a04edfe7d6a74c6d0f661a7668.html" target="_blank">Patch-based probabilistic image quality assessment for face selection and improved video-based face recognition</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>138</td><td>72</td><td>66</td><td>6</td><td>76</td><td>63</td></tr><tr><td>488e475eeb3bb39a145f23ede197cd3620f1d98a</td><td>apis</td><td>APiS1.0</td><td><a href="papers/488e475eeb3bb39a145f23ede197cd3620f1d98a.html" target="_blank">Pedestrian Attribute Classification in Surveillance: Database and Evaluation</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision Workshops</td><td></td><td></td><td></td><td></td><td></td><td>68%</td><td>28</td><td>19</td><td>9</td><td>0</td><td>13</td><td>15</td></tr><tr><td>488e475eeb3bb39a145f23ede197cd3620f1d98a</td><td>svs</td><td>SVS</td><td><a href="papers/488e475eeb3bb39a145f23ede197cd3620f1d98a.html" target="_blank">Pedestrian Attribute Classification in Surveillance: Database and Evaluation</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision Workshops</td><td></td><td></td><td></td><td></td><td></td><td>68%</td><td>28</td><td>19</td><td>9</td><td>0</td><td>13</td><td>15</td></tr><tr><td>2a4bbee0b4cf52d5aadbbc662164f7efba89566c</td><td>peta</td><td>PETA</td><td><a href="papers/2a4bbee0b4cf52d5aadbbc662164f7efba89566c.html" target="_blank">Pedestrian Attribute Recognition At Far Distance</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>88</td><td>55</td><td>33</td><td>1</td><td>50</td><td>36</td></tr><tr><td>f72f6a45ee240cc99296a287ff725aaa7e7ebb35</td><td>caltech_pedestrians</td><td>Caltech Pedestrians</td><td><a href="papers/f72f6a45ee240cc99296a287ff725aaa7e7ebb35.html" target="_blank">Pedestrian Detection: An Evaluation of the State of the Art</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td>edu</td><td>California Institute of Technology</td><td>United States</td><td>34.13710185</td><td>-118.12527487</td><td>51%</td><td>999</td><td>510</td><td>489</td><td>71</td><td>526</td><td>466</td></tr><tr><td>1dc35905a1deff8bc74688f2d7e2f48fd2273275</td><td>caltech_pedestrians</td><td>Caltech Pedestrians</td><td><a href="papers/1dc35905a1deff8bc74688f2d7e2f48fd2273275.html" target="_blank">Pedestrian detection: A benchmark</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td><td>tud_campus</td><td>TUD-Campus</td><td><a href="papers/3316521a5527c7700af8ae6aef32a79a8b83672c.html" target="_blank">People-tracking-by-detection and people-detection-by-tracking</a></td><td><span class="gray">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>53%</td><td>545</td><td>287</td><td>257</td><td>40</td><td>330</td><td>218</td></tr><tr><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td><td>tud_crossing</td><td>TUD-Crossing</td><td><a href="papers/3316521a5527c7700af8ae6aef32a79a8b83672c.html" target="_blank">People-tracking-by-detection and people-detection-by-tracking</a></td><td><span class="gray">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>53%</td><td>545</td><td>287</td><td>257</td><td>40</td><td>330</td><td>218</td></tr><tr><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td><td>tud_pedestrian</td><td>TUD-Pedestrian</td><td><a href="papers/3316521a5527c7700af8ae6aef32a79a8b83672c.html" target="_blank">People-tracking-by-detection and people-detection-by-tracking</a></td><td><span class="gray">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>53%</td><td>545</td><td>287</td><td>257</td><td>40</td><td>330</td><td>218</td></tr><tr><td>27a2fad58dd8727e280f97036e0d2bc55ef5424c</td><td>duke_mtmc</td><td>Duke MTMC</td><td><a href="papers/27a2fad58dd8727e280f97036e0d2bc55ef5424c.html" target="_blank">Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</a></td><td><a href="https://arxiv.org/pdf/1609.01775.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Duke University</td><td>United States</td><td>35.99905220</td><td>-78.92906290</td><td>53%</td><td>169</td><td>89</td><td>80</td><td>6</td><td>113</td><td>54</td></tr><tr><td>27a2fad58dd8727e280f97036e0d2bc55ef5424c</td><td>mot</td><td>MOT</td><td><a href="papers/27a2fad58dd8727e280f97036e0d2bc55ef5424c.html" target="_blank">Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</a></td><td><a href="https://arxiv.org/pdf/1609.01775.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Duke University</td><td>United States</td><td>35.99905220</td><td>-78.92906290</td><td>53%</td><td>169</td><td>89</td><td>80</td><td>6</td><td>113</td><td>54</td></tr><tr><td>16c7c31a7553d99f1837fc6e88e77b5ccbb346b8</td><td>prid</td><td>PRID</td><td><a href="papers/16c7c31a7553d99f1837fc6e88e77b5ccbb346b8.html" target="_blank">Person Re-identification by Descriptive and Discriminative Classification</a></td><td><a href="https://pdfs.semanticscholar.org/4c1b/f0592be3e535faf256c95e27982db9b3d3d3.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>386</td><td>220</td><td>166</td><td>25</td><td>204</td><td>180</td></tr><tr><td>98bb029afe2a1239c3fdab517323066f0957b81b</td><td>ilids_mcts_vid</td><td>iLIDS-VID</td><td><a href="papers/98bb029afe2a1239c3fdab517323066f0957b81b.html" target="_blank">Person Re-identification by Video Ranking</a></td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>209</td><td>121</td><td>88</td><td>9</td><td>111</td><td>97</td></tr><tr><td>98bb029afe2a1239c3fdab517323066f0957b81b</td><td>sdu_vid</td><td>SDU-VID</td><td><a href="papers/98bb029afe2a1239c3fdab517323066f0957b81b.html" target="_blank">Person Re-identification by Video Ranking</a></td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>209</td><td>121</td><td>88</td><td>9</td><td>111</td><td>97</td></tr><tr><td>0b84f07af44f964817675ad961def8a51406dd2e</td><td>prw</td><td>PRW</td><td><a href="papers/0b84f07af44f964817675ad961def8a51406dd2e.html" target="_blank">Person Re-identification in the Wild</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>45%</td><td>77</td><td>35</td><td>42</td><td>2</td><td>47</td><td>27</td></tr><tr><td>a0cc5f73a37723a6dd465924143f1cb4976d0169</td><td>msmt_17</td><td>MSMT17</td><td><a href="papers/a0cc5f73a37723a6dd465924143f1cb4976d0169.html" target="_blank">Person Transfer GAN to Bridge Domain Gap for Person Re-identification</a></td><td><span class="gray">[pdf]</a></td><td>2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>71%</td><td>24</td><td>17</td><td>7</td><td>1</td><td>20</td><td>4</td></tr><tr><td>1c2802c2199b6d15ecefe7ba0c39bfe44363de38</td><td>youtube_poses</td><td>YouTube Pose</td><td><a href="papers/1c2802c2199b6d15ecefe7ba0c39bfe44363de38.html" target="_blank">Personalizing Human Video Pose Estimation</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>36</td><td>20</td><td>16</td><td>2</td><td>30</td><td>8</td></tr><tr><td>2830fb5282de23d7784b4b4bc37065d27839a412</td><td>h3d</td><td>H3D</td><td><a href="papers/2830fb5282de23d7784b4b4bc37065d27839a412.html" target="_blank">Poselets: Body part detectors trained using 3D human pose annotations</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE 12th International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>716</td><td>370</td><td>346</td><td>60</td><td>492</td><td>222</td></tr><tr><td>3765df816dc5a061bc261e190acc8bdd9d47bec0</td><td>rafd</td><td>RaFD</td><td><a href="papers/3765df816dc5a061bc261e190acc8bdd9d47bec0.html" target="_blank">Presentation and validation of the Radboud Faces Database</a></td><td><a href="https://pdfs.semanticscholar.org/3765/df816dc5a061bc261e190acc8bdd9d47bec0.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>39%</td><td>487</td><td>192</td><td>295</td><td>39</td><td>342</td><td>144</td></tr><tr><td>636b8ffc09b1b23ff714ac8350bb35635e49fa3c</td><td>caltech_10k_web_faces</td><td>Caltech 10K Web Faces</td><td><a href="papers/636b8ffc09b1b23ff714ac8350bb35635e49fa3c.html" target="_blank">Pruning training sets for learning of object categories</a></td><td><span class="gray">[pdf]</a></td><td>2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)</td><td></td><td></td><td></td><td></td><td></td><td>63%</td><td>63</td><td>40</td><td>23</td><td>5</td><td>42</td><td>20</td></tr><tr><td>3531332efe19be21e7401ba1f04570a142617236</td><td>ufdd</td><td>UFDD</td><td><a href="papers/3531332efe19be21e7401ba1f04570a142617236.html" target="_blank">Pushing the Limits of Unconstrained Face Detection: a Challenge Dataset and Baseline Results</a></td><td><a href="https://arxiv.org/pdf/1804.10275.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>25%</td><td>4</td><td>1</td><td>3</td><td>1</td><td>4</td><td>0</td></tr><tr><td>140c95e53c619eac594d70f6369f518adfea12ef</td><td>ijb_a</td><td>IJB-A</td><td><a href="papers/140c95e53c619eac594d70f6369f518adfea12ef.html" target="_blank">Pushing the frontiers of unconstrained face detection and recognition: IARPA Janus Benchmark A</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>237</td><td>123</td><td>114</td><td>22</td><td>159</td><td>76</td></tr><tr><td>c72a2ea819df9b0e8cd267eebcc6528b8741e03d</td><td>megaage</td><td>MegaAge</td><td><a href="papers/c72a2ea819df9b0e8cd267eebcc6528b8741e03d.html" target="_blank">Quantifying Facial Age by Posterior of Age Comparisons</a></td><td><a href="https://arxiv.org/pdf/1708.09687.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>25%</td><td>4</td><td>1</td><td>3</td><td>1</td><td>4</td><td>0</td></tr><tr><td>922e0a51a3b8c67c4c6ac09a577ff674cbd28b34</td><td>v47</td><td>V47</td><td><a href="papers/922e0a51a3b8c67c4c6ac09a577ff674cbd28b34.html" target="_blank">Re-identification of pedestrians with variable occlusion and scale</a></td><td><span class="gray">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td>edu</td><td>Kingston University</td><td>United Kingdom</td><td>51.42930860</td><td>-0.26840440</td><td>11%</td><td>9</td><td>1</td><td>8</td><td>2</td><td>5</td><td>4</td></tr><tr><td>6f3c76b7c0bd8e1d122c6ea808a271fd4749c951</td><td>ward</td><td>WARD</td><td><a href="papers/6f3c76b7c0bd8e1d122c6ea808a271fd4749c951.html" target="_blank">Re-identify people in wide area camera network</a></td><td><span class="gray">[pdf]</a></td><td>2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops</td><td>edu</td><td>University of Udine</td><td>Italy</td><td>46.08107230</td><td>13.21194740</td><td>38%</td><td>60</td><td>23</td><td>37</td><td>1</td><td>38</td><td>21</td></tr><tr><td>54983972aafc8e149259d913524581357b0f91c3</td><td>reseed</td><td>ReSEED</td><td><a href="papers/54983972aafc8e149259d913524581357b0f91c3.html" target="_blank">ReSEED: social event dEtection dataset</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>67%</td><td>6</td><td>4</td><td>2</td><td>1</td><td>1</td><td>5</td></tr><tr><td>65355cbb581a219bd7461d48b3afd115263ea760</td><td>complex_activities</td><td>Ongoing Complex Activities</td><td><a href="papers/65355cbb581a219bd7461d48b3afd115263ea760.html" target="_blank">Recognition of ongoing complex activities by sequence prediction over a hierarchical label space</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Winter Conference on Applications of Computer Vision (WACV)</td><td></td><td></td><td></td><td></td><td></td><td>33%</td><td>3</td><td>1</td><td>2</td><td>0</td><td>3</td><td>0</td></tr><tr><td>e8de844fefd54541b71c9823416daa238be65546</td><td>visual_phrases</td><td>Phrasal Recognition</td><td><a href="papers/e8de844fefd54541b71c9823416daa238be65546.html" target="_blank">Recognition using visual phrases</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011</td><td>edu</td><td>University of Illinois, Urbana-Champaign</td><td>United States</td><td>40.11116745</td><td>-88.22587665</td><td>55%</td><td>246</td><td>135</td><td>111</td><td>18</td><td>170</td><td>68</td></tr><tr><td>356b431d4f7a2a0a38cf971c84568207dcdbf189</td><td>wider</td><td>WIDER</td><td><a href="papers/356b431d4f7a2a0a38cf971c84568207dcdbf189.html" target="_blank">Recognize complex events from static images by fusing deep channels</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>66%</td><td>44</td><td>29</td><td>15</td><td>1</td><td>29</td><td>15</td></tr><tr><td>25474c21613607f6bb7687a281d5f9d4ffa1f9f3</td><td>faceplace</td><td>Face Place</td><td><a href="papers/25474c21613607f6bb7687a281d5f9d4ffa1f9f3.html" target="_blank">Recognizing disguised faces</a></td><td><a href="https://pdfs.semanticscholar.org/d936/7ceb0be378c3a9ddf7cb741c678c1a3c574c.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>31%</td><td>29</td><td>9</td><td>20</td><td>0</td><td>18</td><td>10</td></tr><tr><td>4053e3423fb70ad9140ca89351df49675197196a</td><td>bio_id</td><td>BioID Face</td><td><a href="papers/4053e3423fb70ad9140ca89351df49675197196a.html" target="_blank">Robust Face Detection Using the Hausdorff Distance</a></td><td><a href="https://pdfs.semanticscholar.org/4053/e3423fb70ad9140ca89351df49675197196a.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>48%</td><td>511</td><td>247</td><td>264</td><td>51</td><td>329</td><td>182</td></tr><tr><td>2724ba85ec4a66de18da33925e537f3902f21249</td><td>cofw</td><td>COFW</td><td><a href="papers/2724ba85ec4a66de18da33925e537f3902f21249.html" target="_blank">Robust Face Landmark Estimation under Occlusion</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision</td><td>edu</td><td>California Institute of Technology</td><td>United States</td><td>34.13710185</td><td>-118.12527487</td><td>57%</td><td>325</td><td>185</td><td>140</td><td>19</td><td>194</td><td>133</td></tr><tr><td>c570d1247e337f91e555c3be0e8c8a5aba539d9f</td><td>mcgill</td><td>McGill Real World</td><td><a href="papers/c570d1247e337f91e555c3be0e8c8a5aba539d9f.html" target="_blank">Robust semi-automatic head pose labeling for real-world face video sequences</a></td><td><span class="gray">[pdf]</a></td><td>Multimedia Tools and Applications</td><td>edu</td><td>McGill University</td><td>Canada</td><td>45.50397610</td><td>-73.57496870</td><td>44%</td><td>18</td><td>8</td><td>10</td><td>0</td><td>13</td><td>7</td></tr><tr><td>e27ef52c641c2b5100a1b34fd0b819e84a31b4df</td><td>sarc3d</td><td>Sarc3D</td><td><a href="papers/e27ef52c641c2b5100a1b34fd0b819e84a31b4df.html" target="_blank">SARC3D: A New 3D Body Model for People Tracking and Re-identification</a></td><td><a href="https://pdfs.semanticscholar.org/e27e/f52c641c2b5100a1b34fd0b819e84a31b4df.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>34</td><td>16</td><td>18</td><td>3</td><td>21</td><td>12</td></tr><tr><td>bd26dabab576adb6af30484183c9c9c8379bf2e0</td><td>scut_fbp</td><td>SCUT-FBP</td><td><a href="papers/bd26dabab576adb6af30484183c9c9c8379bf2e0.html" target="_blank">SCUT-FBP: A Benchmark Dataset for Facial Beauty Perception</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Systems, Man, and Cybernetics</td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>19</td><td>9</td><td>10</td><td>2</td><td>6</td><td>13</td></tr><tr><td>29a705a5fa76641e0d8963f1fdd67ee4c0d92d3d</td><td>scface</td><td>SCface</td><td><a href="papers/29a705a5fa76641e0d8963f1fdd67ee4c0d92d3d.html" target="_blank">SCface – surveillance cameras face database</a></td><td><a href="http://scface.org/SCface%20-%20Surveillance%20Cameras%20Face%20Database.pdf" target="_blank">[pdf]</a></td><td>Multimedia Tools and Applications</td><td></td><td></td><td></td><td></td><td></td><td>53%</td><td>179</td><td>94</td><td>85</td><td>15</td><td>88</td><td>89</td></tr><tr><td>d3f5a1848b0028d8ab51d0b0673732cad2e3c8c9</td><td>stair_actions</td><td>STAIR Action</td><td><a href="papers/d3f5a1848b0028d8ab51d0b0673732cad2e3c8c9.html" target="_blank">STAIR Actions: A Video Dataset of Everyday Home Actions</a></td><td><a href="https://arxiv.org/pdf/1804.04326.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>1</td><td>1</td><td>0</td><td>0</td><td>1</td><td>0</td></tr><tr><td>833fa04463d90aab4a9fe2870d480f0b40df446e</td><td>sun_attributes</td><td>SUN</td><td><a href="papers/833fa04463d90aab4a9fe2870d480f0b40df446e.html" target="_blank">SUN attribute database: Discovering, annotating, and recognizing scene attributes</a></td><td><span class="gray">[pdf]</a></td><td>2012 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>Brown University</td><td>United States</td><td>41.82686820</td><td>-71.40123146</td><td>58%</td><td>264</td><td>154</td><td>110</td><td>27</td><td>206</td><td>56</td></tr><tr><td>4308bd8c28e37e2ed9a3fcfe74d5436cce34b410</td><td>market_1501</td><td>Market 1501</td><td><a href="papers/4308bd8c28e37e2ed9a3fcfe74d5436cce34b410.html" target="_blank">Scalable Person Re-identification: A Benchmark</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>460</td><td>254</td><td>206</td><td>16</td><td>263</td><td>185</td></tr><tr><td>9c23859ec7313f2e756a3e85575735e0c52249f4</td><td>facebook_100</td><td>Facebook100</td><td><a href="papers/9c23859ec7313f2e756a3e85575735e0c52249f4.html" target="_blank">Scaling up biologically-inspired computer vision: A case study in unconstrained face recognition on facebook</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td>edu</td><td>Harvard University</td><td>United States</td><td>42.36782045</td><td>-71.12666653</td><td>52%</td><td>52</td><td>27</td><td>25</td><td>3</td><td>38</td><td>13</td></tr><tr><td>9c23859ec7313f2e756a3e85575735e0c52249f4</td><td>pubfig_83</td><td>pubfig83</td><td><a href="papers/9c23859ec7313f2e756a3e85575735e0c52249f4.html" target="_blank">Scaling up biologically-inspired computer vision: A case study in unconstrained face recognition on facebook</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td>edu</td><td>Harvard University</td><td>United States</td><td>42.36782045</td><td>-71.12666653</td><td>52%</td><td>52</td><td>27</td><td>25</td><td>3</td><td>38</td><td>13</td></tr><tr><td>51eba481dac6b229a7490f650dff7b17ce05df73</td><td>imsitu</td><td>imSitu</td><td><a href="papers/51eba481dac6b229a7490f650dff7b17ce05df73.html" target="_blank">Situation Recognition: Visual Semantic Role Labeling for Image Understanding</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>52</td><td>32</td><td>20</td><td>1</td><td>46</td><td>6</td></tr><tr><td>570f37ed63142312e6ccdf00ecc376341ec72b9f</td><td>stanford_drone</td><td>Stanford Drone</td><td><a href="papers/570f37ed63142312e6ccdf00ecc376341ec72b9f.html" target="_blank">Social LSTM: Human Trajectory Prediction in Crowded Spaces</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>49%</td><td>224</td><td>110</td><td>114</td><td>3</td><td>140</td><td>81</td></tr><tr><td>23e824d1dfc33f3780dd18076284f07bd99f1c43</td><td>mifs</td><td>MIFS</td><td><a href="papers/23e824d1dfc33f3780dd18076284f07bd99f1c43.html" target="_blank">Spoofing faces using makeup: An investigative study</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE International Conference on Identity, Security and Behavior Analysis (ISBA)</td><td>edu</td><td>INRIA Méditerranée</td><td>France</td><td>43.61581310</td><td>7.06838000</td><td>67%</td><td>6</td><td>4</td><td>2</td><td>0</td><td>1</td><td>5</td></tr><tr><td>1a40092b493c6b8840257ab7f96051d1a4dbfeb2</td><td>sports_videos_in_the_wild</td><td>SVW</td><td><a href="papers/1a40092b493c6b8840257ab7f96051d1a4dbfeb2.html" target="_blank">Sports Videos in the Wild (SVW): A video dataset for sports analysis</a></td><td><span class="gray">[pdf]</a></td><td>2015 11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)</td><td></td><td></td><td></td><td></td><td></td><td>86%</td><td>7</td><td>6</td><td>1</td><td>1</td><td>5</td><td>2</td></tr><tr><td>9361b784e73e9238d5cefbea5ac40d35d1e3103f</td><td>town_centre</td><td>TownCentre</td><td><a href="papers/9361b784e73e9238d5cefbea5ac40d35d1e3103f.html" target="_blank">Stable multi-target tracking in real-time surveillance video</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011</td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>328</td><td>153</td><td>175</td><td>24</td><td>186</td><td>140</td></tr><tr><td>2306b2a8fba28539306052764a77a0d0f5d1236a</td><td>qmul_surv_face</td><td>QMUL-SurvFace</td><td><a href="papers/2306b2a8fba28539306052764a77a0d0f5d1236a.html" target="_blank">Surveillance Face Recognition Challenge</a></td><td><a href="https://arxiv.org/pdf/1804.09691.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td>edu</td><td>Queen Mary University of London</td><td>United Kingdom</td><td>51.52472720</td><td>-0.03931035</td><td>100%</td><td>1</td><td>1</td><td>0</td><td>0</td><td>1</td><td>0</td></tr><tr><td>f6c8d5e35d7e4d60a0104f233ac1a3ab757da53f</td><td>pku_reid</td><td>PKU-Reid</td><td><a href="papers/f6c8d5e35d7e4d60a0104f233ac1a3ab757da53f.html" target="_blank">Swiss-System Based Cascade Ranking for Gait-Based Person Re-Identification</a></td><td><a href="https://pdfs.semanticscholar.org/f6c8/d5e35d7e4d60a0104f233ac1a3ab757da53f.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>4</td><td>2</td><td>2</td><td>0</td><td>1</td><td>2</td></tr><tr><td>4d58f886f5150b2d5e48fd1b5a49e09799bf895d</td><td>texas_3dfrd</td><td>Texas 3DFRD</td><td><a href="papers/4d58f886f5150b2d5e48fd1b5a49e09799bf895d.html" target="_blank">Texas 3D Face Recognition Database</a></td><td><span class="gray">[pdf]</a></td><td>2010 IEEE Southwest Symposium on Image Analysis & Interpretation (SSIAI)</td><td></td><td></td><td></td><td></td><td></td><td>44%</td><td>66</td><td>29</td><td>37</td><td>3</td><td>40</td><td>27</td></tr><tr><td>6d96f946aaabc734af7fe3fc4454cf8547fcd5ed</td><td>ar_facedb</td><td>AR Face</td><td><a href="papers/6d96f946aaabc734af7fe3fc4454cf8547fcd5ed.html" target="_blank">The AR face database</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>999</td><td>519</td><td>480</td><td>56</td><td>455</td><td>530</td></tr><tr><td>2485c98aa44131d1a2f7d1355b1e372f2bb148ad</td><td>cas_peal</td><td>CAS-PEAL</td><td><a href="papers/2485c98aa44131d1a2f7d1355b1e372f2bb148ad.html" target="_blank">The CAS-PEAL Large-Scale Chinese Face Database and Baseline Evaluations</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Systems, Man, and Cybernetics - Part A: Systems and Humans</td><td></td><td></td><td></td><td></td><td></td><td>51%</td><td>429</td><td>218</td><td>211</td><td>39</td><td>198</td><td>234</td></tr><tr><td>47662d1a368daf70ba70ef2d59eb6209f98b675d</td><td>fia</td><td>CMU FiA</td><td><a href="papers/47662d1a368daf70ba70ef2d59eb6209f98b675d.html" target="_blank">The CMU Face In Action (FIA) Database</a></td><td><a href="https://pdfs.semanticscholar.org/4766/2d1a368daf70ba70ef2d59eb6209f98b675d.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>39%</td><td>54</td><td>21</td><td>33</td><td>5</td><td>40</td><td>16</td></tr><tr><td>4d423acc78273b75134e2afd1777ba6d3a398973</td><td>cmu_pie</td><td>CMU PIE</td><td><a href="papers/4d423acc78273b75134e2afd1777ba6d3a398973.html" target="_blank">The CMU Pose, Illumination, and Expression (PIE) Database</a></td><td><a href="http://www.comp.nus.edu.sg/~tsim/piedb.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>760</td><td>409</td><td>350</td><td>50</td><td>404</td><td>345</td></tr><tr><td>4d423acc78273b75134e2afd1777ba6d3a398973</td><td>multi_pie</td><td>MULTIPIE</td><td><a href="papers/4d423acc78273b75134e2afd1777ba6d3a398973.html" target="_blank">The CMU Pose, Illumination, and Expression (PIE) Database</a></td><td><a href="http://www.comp.nus.edu.sg/~tsim/piedb.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>760</td><td>409</td><td>350</td><td>50</td><td>404</td><td>345</td></tr><tr><td>4df3143922bcdf7db78eb91e6b5359d6ada004d2</td><td>cfd</td><td>CFD</td><td><a href="papers/4df3143922bcdf7db78eb91e6b5359d6ada004d2.html" target="_blank">The Chicago face database: A free stimulus set of faces and norming data.</a></td><td><a href="https://pdfs.semanticscholar.org/4df3/143922bcdf7db78eb91e6b5359d6ada004d2.pdf" target="_blank">[pdf]</a></td><td>Behavior research methods</td><td></td><td></td><td></td><td></td><td></td><td>53%</td><td>99</td><td>52</td><td>47</td><td>1</td><td>73</td><td>21</td></tr><tr><td>20388099cc415c772926e47bcbbe554e133343d1</td><td>cafe</td><td>#N/A</td><td><a href="papers/20388099cc415c772926e47bcbbe554e133343d1.html" target="_blank">The Child Affective Facial Expression (CAFE) set: validity and reliability from untrained adults</a></td><td><a href="https://pdfs.semanticscholar.org/2038/8099cc415c772926e47bcbbe554e133343d1.pdf" target="_blank">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td></td><td>46%</td><td>37</td><td>17</td><td>20</td><td>3</td><td>30</td><td>7</td></tr><tr><td>4e6ee936eb50dd032f7138702fa39b7c18ee8907</td><td>dartmouth_children</td><td>Dartmouth Children</td><td><a href="papers/4e6ee936eb50dd032f7138702fa39b7c18ee8907.html" target="_blank">The Dartmouth Database of Children’s Faces: Acquisition and Validation of a New Face Stimulus Set</a></td><td><a href="https://pdfs.semanticscholar.org/4e6e/e936eb50dd032f7138702fa39b7c18ee8907.pdf" target="_blank">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td></td><td>38%</td><td>21</td><td>8</td><td>13</td><td>2</td><td>18</td><td>3</td></tr><tr><td>9e31e77f9543ab42474ba4e9330676e18c242e72</td><td>imdb_face</td><td>IMDb Face</td><td><a href="papers/9e31e77f9543ab42474ba4e9330676e18c242e72.html" target="_blank">The Devil of Face Recognition is in the Noise</a></td><td><a href="https://arxiv.org/pdf/1807.11649.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>company</td><td>SenseTime</td><td>China</td><td>39.99300800</td><td>116.32988200</td><td>17%</td><td>6</td><td>1</td><td>5</td><td>0</td><td>4</td><td>1</td></tr><tr><td>71b7fc715e2f1bb24c0030af8d7e7b6e7cd128a6</td><td>umd_faces</td><td>UMD</td><td><a href="papers/71b7fc715e2f1bb24c0030af8d7e7b6e7cd128a6.html" target="_blank">The Do’s and Don’ts for CNN-Based Face Verification</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE International Conference on Computer Vision Workshops (ICCVW)</td><td></td><td></td><td></td><td></td><td></td><td>38%</td><td>26</td><td>10</td><td>16</td><td>3</td><td>16</td><td>8</td></tr><tr><td>72a155c987816ae81c858fddbd6beab656d86220</td><td>europersons</td><td>EuroCity Persons</td><td><a href="papers/72a155c987816ae81c858fddbd6beab656d86220.html" target="_blank">The EuroCity Persons Dataset: A Novel Benchmark for Object Detection</a></td><td><a href="https://arxiv.org/pdf/1805.07193.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>2</td><td>0</td><td>2</td><td>0</td><td>2</td><td>0</td></tr><tr><td>4d9a02d080636e9666c4d1cc438b9893391ec6c7</td><td>cohn_kanade_plus</td><td>CK+</td><td><a href="papers/4d9a02d080636e9666c4d1cc438b9893391ec6c7.html" target="_blank">The Extended Cohn-Kanade Dataset (CK+): A complete dataset for action unit and emotion-specified expression</a></td><td><span class="gray">[pdf]</a></td><td>2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition - Workshops</td><td>edu</td><td>University of Pittsburgh</td><td>United States</td><td>40.44415295</td><td>-79.96243993</td><td>54%</td><td>999</td><td>543</td><td>456</td><td>58</td><td>470</td><td>518</td></tr><tr><td>0f0fcf041559703998abf310e56f8a2f90ee6f21</td><td>feret</td><td>FERET</td><td><a href="papers/0f0fcf041559703998abf310e56f8a2f90ee6f21.html" target="_blank">The FERET Evaluation Methodology for Face-Recognition Algorithms</a></td><td><a href="https://pdfs.semanticscholar.org/5099/7a5605c1f61e09e9a96789ed7495be6625aa.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>28%</td><td>29</td><td>8</td><td>21</td><td>3</td><td>18</td><td>9</td></tr><tr><td>0c4a139bb87c6743c7905b29a3cfec27a5130652</td><td>feret</td><td>FERET</td><td><a href="papers/0c4a139bb87c6743c7905b29a3cfec27a5130652.html" target="_blank">The FERET Verification Testing Protocol for Face Recognition Algorithms</a></td><td><a href="https://pdfs.semanticscholar.org/8d2a/1c768fce6f71584dd993fb97e7b6419aaf60.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>115</td><td>49</td><td>66</td><td>8</td><td>75</td><td>37</td></tr><tr><td>dc8b25e35a3acb812beb499844734081722319b4</td><td>feret</td><td>FERET</td><td><a href="papers/dc8b25e35a3acb812beb499844734081722319b4.html" target="_blank">The FERET database and evaluation procedure for face-recognition algorithms</a></td><td><a href="http://biometrics.nist.gov/cs_links/face/frvt/feret/FERET_Database_evaluation_procedure.pdf" target="_blank">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td></td><td>46%</td><td>999</td><td>455</td><td>544</td><td>103</td><td>590</td><td>421</td></tr><tr><td>8f02ec0be21461fbcedf51d864f944cfc42c875f</td><td>hda_plus</td><td>HDA+</td><td><a href="papers/8f02ec0be21461fbcedf51d864f944cfc42c875f.html" target="_blank">The HDA+ Data Set for Research on Fully Automated Re-identification Systems</a></td><td><a href="https://pdfs.semanticscholar.org/8f02/ec0be21461fbcedf51d864f944cfc42c875f.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>25%</td><td>16</td><td>4</td><td>12</td><td>2</td><td>10</td><td>6</td></tr><tr><td>8be57cdad86fdf8c8290df4ca3149592f3c46dd3</td><td>m2vts</td><td>m2vts</td><td><a href="papers/8be57cdad86fdf8c8290df4ca3149592f3c46dd3.html" target="_blank">The M2VTS Multimodal Face Database (Release 1.00)</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>44%</td><td>73</td><td>32</td><td>41</td><td>2</td><td>39</td><td>33</td></tr><tr><td>ea050801199f98a1c7c1df6769f23f658299a3ae</td><td>mpi_large</td><td>Large MPI Facial Expression</td><td><a href="papers/ea050801199f98a1c7c1df6769f23f658299a3ae.html" target="_blank">The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</a></td><td><a href="https://pdfs.semanticscholar.org/ea05/0801199f98a1c7c1df6769f23f658299a3ae.pdf" target="_blank">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td></td><td>45%</td><td>33</td><td>15</td><td>18</td><td>4</td><td>29</td><td>4</td></tr><tr><td>ea050801199f98a1c7c1df6769f23f658299a3ae</td><td>mpi_small</td><td>Small MPI Facial Expression</td><td><a href="papers/ea050801199f98a1c7c1df6769f23f658299a3ae.html" target="_blank">The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</a></td><td><a href="https://pdfs.semanticscholar.org/ea05/0801199f98a1c7c1df6769f23f658299a3ae.pdf" target="_blank">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td></td><td>45%</td><td>33</td><td>15</td><td>18</td><td>4</td><td>29</td><td>4</td></tr><tr><td>578d4ad74818086bb64f182f72e2c8bd31e3d426</td><td>mr2</td><td>MR2</td><td><a href="papers/578d4ad74818086bb64f182f72e2c8bd31e3d426.html" target="_blank">The MR2: A multi-racial, mega-resolution database of facial stimuli.</a></td><td><a href="http://www.mpmlab.org/The%20MR2%20face%20database.pdf" target="_blank">[pdf]</a></td><td>Behavior research methods</td><td></td><td></td><td></td><td></td><td></td><td>14%</td><td>7</td><td>1</td><td>6</td><td>0</td><td>7</td><td>0</td></tr><tr><td>f1af714b92372c8e606485a3982eab2f16772ad8</td><td>mug_faces</td><td>MUG Faces</td><td><a href="papers/f1af714b92372c8e606485a3982eab2f16772ad8.html" target="_blank">The MUG facial expression database</a></td><td><span class="gray">[pdf]</a></td><td>11th International Workshop on Image Analysis for Multimedia Interactive Services WIAMIS 10</td><td>edu</td><td>Aristotle University of Thessaloniki</td><td>Greece</td><td>40.62984145</td><td>22.95889350</td><td>41%</td><td>82</td><td>34</td><td>48</td><td>5</td><td>34</td><td>47</td></tr><tr><td>79828e6e9f137a583082b8b5a9dfce0c301989b8</td><td>mapillary</td><td>Mapillary</td><td><a href="papers/79828e6e9f137a583082b8b5a9dfce0c301989b8.html" target="_blank">The Mapillary Vistas Dataset for Semantic Understanding of Street Scenes</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>46%</td><td>61</td><td>28</td><td>33</td><td>0</td><td>43</td><td>16</td></tr><tr><td>96e0cfcd81cdeb8282e29ef9ec9962b125f379b0</td><td>megaface</td><td>MegaFace</td><td><a href="papers/96e0cfcd81cdeb8282e29ef9ec9962b125f379b0.html" target="_blank">The MegaFace Benchmark: 1 Million Faces for Recognition at Scale</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>53%</td><td>139</td><td>74</td><td>65</td><td>9</td><td>100</td><td>37</td></tr><tr><td>0ee1916a0cb2dc7d3add086b5f1092c3d4beb38a</td><td>voc</td><td>VOC</td><td><a href="papers/0ee1916a0cb2dc7d3add086b5f1092c3d4beb38a.html" target="_blank">The Pascal Visual Object Classes (VOC) Challenge</a></td><td><a href="http://eprints.pascal-network.org/archive/00006187/01/PascalVOC_IJCV2009.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>999</td><td>544</td><td>454</td><td>30</td><td>557</td><td>422</td></tr><tr><td>66e6f08873325d37e0ec20a4769ce881e04e964e</td><td>sun_attributes</td><td>SUN</td><td><a href="papers/66e6f08873325d37e0ec20a4769ce881e04e964e.html" target="_blank">The SUN Attribute Database: Beyond Categories for Deeper Scene Understanding</a></td><td><a href="http://www.cc.gatech.edu/~hays/papers/attribute_ijcv.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>59%</td><td>116</td><td>69</td><td>47</td><td>14</td><td>84</td><td>31</td></tr><tr><td>8b2dd5c61b23ead5ae5508bb8ce808b5ea266730</td><td>10k_US_adult_faces</td><td>10K US Adult Faces</td><td><a href="papers/8b2dd5c61b23ead5ae5508bb8ce808b5ea266730.html" target="_blank">The intrinsic memorability of face photographs.</a></td><td><a href="https://pdfs.semanticscholar.org/8b2d/d5c61b23ead5ae5508bb8ce808b5ea266730.pdf" target="_blank">[pdf]</a></td><td>Journal of experimental psychology. General</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>52</td><td>27</td><td>25</td><td>2</td><td>36</td><td>14</td></tr><tr><td>d178cde92ab3dc0dd2ebee5a76a33d556c39448b</td><td>jiku_mobile</td><td>Jiku Mobile Video Dataset</td><td><a href="papers/d178cde92ab3dc0dd2ebee5a76a33d556c39448b.html" target="_blank">The jiku mobile video dataset</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td>edu</td><td>National University of Singapore</td><td>Singapore</td><td>1.29620180</td><td>103.77689944</td><td>71%</td><td>24</td><td>17</td><td>7</td><td>0</td><td>6</td><td>19</td></tr><tr><td>ae0aee03d946efffdc7af2362a42d3750e7dd48a</td><td>put_face</td><td>Put Face</td><td><a href="papers/ae0aee03d946efffdc7af2362a42d3750e7dd48a.html" target="_blank">The put face database</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>99</td><td>43</td><td>56</td><td>7</td><td>54</td><td>48</td></tr><tr><td>19d1b811df60f86cbd5e04a094b07f32fff7a32a</td><td>york_3d</td><td>UOY 3D Face Database</td><td><a href="papers/19d1b811df60f86cbd5e04a094b07f32fff7a32a.html" target="_blank">Three-dimensional face recognition: an eigensurface approach</a></td><td><span class="gray">[pdf]</a></td><td>2004 International Conference on Image Processing, 2004. ICIP '04.</td><td></td><td></td><td></td><td></td><td></td><td>34%</td><td>38</td><td>13</td><td>25</td><td>4</td><td>24</td><td>13</td></tr><tr><td>2edb87494278ad11641b6cf7a3f8996de12b8e14</td><td>qmul_grid</td><td>GRID</td><td><a href="papers/2edb87494278ad11641b6cf7a3f8996de12b8e14.html" target="_blank">Time-Delayed Correlation Analysis for Multi-Camera Activity Understanding</a></td><td><a href="http://www.eecs.qmul.ac.uk/~ccloy/files/ijcv_2010.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td>edu</td><td>Queen Mary University of London</td><td>United Kingdom</td><td>51.52472720</td><td>-0.03931035</td><td>51%</td><td>84</td><td>43</td><td>41</td><td>6</td><td>51</td><td>33</td></tr><tr><td>64e0690dd176a93de9d4328f6e31fc4afe1e7536</td><td>duke_mtmc</td><td>Duke MTMC</td><td><a href="papers/64e0690dd176a93de9d4328f6e31fc4afe1e7536.html" target="_blank">Tracking Multiple People Online and in Real Time</a></td><td><a href="https://pdfs.semanticscholar.org/64e0/690dd176a93de9d4328f6e31fc4afe1e7536.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>39%</td><td>23</td><td>9</td><td>14</td><td>2</td><td>12</td><td>10</td></tr><tr><td>298cbc3dfbbb3a20af4eed97906650a4ea1c29e0</td><td>ferplus</td><td>FER+</td><td><a href="papers/298cbc3dfbbb3a20af4eed97906650a4ea1c29e0.html" target="_blank">Training deep networks for facial expression recognition with crowd-sourced label distribution</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>71%</td><td>34</td><td>24</td><td>10</td><td>0</td><td>18</td><td>16</td></tr><tr><td>4eab317b5ac436a949849ed286baa3de2a541eef</td><td>laofiw</td><td>LAOFIW</td><td><a href="papers/4eab317b5ac436a949849ed286baa3de2a541eef.html" target="_blank">Turning a Blind Eye: Explicit Removal of Biases and Variation from Deep Neural Network Embeddings</a></td><td><a href="https://arxiv.org/pdf/1809.02169.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>2</td><td>1</td><td>1</td><td>0</td><td>2</td><td>0</td></tr><tr><td>b5f2846a506fc417e7da43f6a7679146d99c5e96</td><td>ucf_101</td><td>UCF101</td><td><a href="papers/b5f2846a506fc417e7da43f6a7679146d99c5e96.html" target="_blank">UCF101: A Dataset of 101 Human Actions Classes From Videos in The Wild</a></td><td><a href="https://arxiv.org/pdf/1212.0402.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>999</td><td>607</td><td>392</td><td>56</td><td>628</td><td>362</td></tr><tr><td>16e8b0a1e8451d5f697b94c0c2b32a00abee1d52</td><td>umb</td><td>UMB</td><td><a href="papers/16e8b0a1e8451d5f697b94c0c2b32a00abee1d52.html" target="_blank">UMB-DB: A database of partially occluded 3D faces</a></td><td><span class="gray">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>47</td><td>29</td><td>18</td><td>2</td><td>22</td><td>24</td></tr><tr><td>31b05f65405534a696a847dd19c621b7b8588263</td><td>umd_faces</td><td>UMD</td><td><a href="papers/31b05f65405534a696a847dd19c621b7b8588263.html" target="_blank">UMDFaces: An annotated face dataset for training deep networks</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE International Joint Conference on Biometrics (IJCB)</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>42</td><td>21</td><td>21</td><td>5</td><td>30</td><td>11</td></tr><tr><td>8627f019882b024aef92e4eb9355c499c733e5b7</td><td>used</td><td>USED Social Event Dataset</td><td><a href="papers/8627f019882b024aef92e4eb9355c499c733e5b7.html" target="_blank">USED: a large-scale social event detection dataset</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td>edu</td><td>University of Trento</td><td>Italy</td><td>46.06588360</td><td>11.11598940</td><td>71%</td><td>7</td><td>5</td><td>2</td><td>0</td><td>3</td><td>4</td></tr><tr><td>d4f1eb008eb80595bcfdac368e23ae9754e1e745</td><td>uccs</td><td>UCCS</td><td><a href="papers/d4f1eb008eb80595bcfdac368e23ae9754e1e745.html" target="_blank">Unconstrained Face Detection and Open-Set Face Recognition Challenge</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE International Joint Conference on Biometrics (IJCB)</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>5</td><td>5</td><td>0</td><td>0</td><td>4</td><td>1</td></tr><tr><td>4b4106614c1d553365bad75d7866bff0de6056ed</td><td>ufi</td><td>UFI</td><td><a href="papers/4b4106614c1d553365bad75d7866bff0de6056ed.html" target="_blank">Unconstrained Facial Images: Database for Face Recognition Under Real-World Conditions</a></td><td><a href="http://home.zcu.cz/~pkral/papers/kral_micai15.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>33%</td><td>12</td><td>4</td><td>8</td><td>0</td><td>4</td><td>6</td></tr><tr><td>08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7</td><td>kin_face</td><td>UB KinFace</td><td><a href="papers/08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7.html" target="_blank">Understanding Kin Relationships in a Photo</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Multimedia</td><td></td><td></td><td></td><td></td><td></td><td>59%</td><td>94</td><td>55</td><td>39</td><td>1</td><td>33</td><td>61</td></tr><tr><td>5a4df9bef1872865f0b619ac3aacc97f49e4a035</td><td>cuhk_train_station</td><td>CUHK Train Station Dataset</td><td><a href="papers/5a4df9bef1872865f0b619ac3aacc97f49e4a035.html" target="_blank">Understanding collective crowd behaviors: Learning a Mixture model of Dynamic pedestrian-Agents</a></td><td><span class="gray">[pdf]</a></td><td>2012 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>Chinese University of Hong Kong</td><td>China</td><td>22.41626320</td><td>114.21093180</td><td>51%</td><td>141</td><td>72</td><td>69</td><td>6</td><td>60</td><td>75</td></tr><tr><td>21d9d0deed16f0ad62a4865e9acf0686f4f15492</td><td>images_of_groups</td><td>Images of Groups</td><td><a href="papers/21d9d0deed16f0ad62a4865e9acf0686f4f15492.html" target="_blank">Understanding images of groups of people</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>Carnegie Mellon University Silicon Valley</td><td>United States</td><td>37.41021930</td><td>-122.05965487</td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>15e1af79939dbf90790b03d8aa02477783fb1d0f</td><td>duke_mtmc</td><td>Duke MTMC</td><td><a href="papers/15e1af79939dbf90790b03d8aa02477783fb1d0f.html" target="_blank">Unlabeled Samples Generated by GAN Improve the Person Re-identification Baseline in Vitro</a></td><td><a href="https://arxiv.org/pdf/1701.07717.pdf" target="_blank">[pdf]</a></td><td>2017 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>fd8168f1c50de85bac58a8d328df0a50248b16ae</td><td>nd_2006</td><td>ND-2006</td><td><a href="papers/fd8168f1c50de85bac58a8d328df0a50248b16ae.html" target="_blank">Using a Multi-Instance Enrollment Representation to Improve 3D Face Recognition</a></td><td><span class="gray">[pdf]</a></td><td>2007 First IEEE International Conference on Biometrics: Theory, Applications, and Systems</td><td>edu</td><td>University of Notre Dame</td><td>United States</td><td>41.70456775</td><td>-86.23822026</td><td>60%</td><td>35</td><td>21</td><td>14</td><td>3</td><td>18</td><td>15</td></tr><tr><td>4563b46d42079242f06567b3f2e2f7a80cb3befe</td><td>vadana</td><td>VADANA</td><td><a href="papers/4563b46d42079242f06567b3f2e2f7a80cb3befe.html" target="_blank">VADANA: A dense dataset for facial image analysis</a></td><td><span class="gray">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td>edu</td><td>University of Delaware</td><td>United States</td><td>39.68103280</td><td>-75.75401840</td><td>47%</td><td>15</td><td>7</td><td>8</td><td>0</td><td>5</td><td>10</td></tr><tr><td>70c59dc3470ae867016f6ab0e008ac8ba03774a1</td><td>vgg_faces2</td><td>VGG Face2</td><td><a href="papers/70c59dc3470ae867016f6ab0e008ac8ba03774a1.html" target="_blank">VGGFace2: A Dataset for Recognising Faces across Pose and Age</a></td><td><a href="https://arxiv.org/pdf/1710.08092.pdf" target="_blank">[pdf]</a></td><td>2018 13th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2018)</td><td></td><td></td><td></td><td></td><td></td><td>49%</td><td>83</td><td>41</td><td>42</td><td>6</td><td>61</td><td>20</td></tr><tr><td>01959ef569f74c286956024866c1d107099199f7</td><td>vqa</td><td>VQA</td><td><a href="papers/01959ef569f74c286956024866c1d107099199f7.html" target="_blank">VQA: Visual Question Answering</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>b6c293f0420f7e945b5916ae44269fb53e139275</td><td>erce</td><td>ERCe</td><td><a href="papers/b6c293f0420f7e945b5916ae44269fb53e139275.html" target="_blank">Video Synopsis by Heterogeneous Multi-source Correlation</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>29</td><td>16</td><td>13</td><td>2</td><td>14</td><td>13</td></tr><tr><td>b6c293f0420f7e945b5916ae44269fb53e139275</td><td>tisi</td><td>Times Square Intersection</td><td><a href="papers/b6c293f0420f7e945b5916ae44269fb53e139275.html" target="_blank">Video Synopsis by Heterogeneous Multi-source Correlation</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>29</td><td>16</td><td>13</td><td>2</td><td>14</td><td>13</td></tr><tr><td>5194cbd51f9769ab25260446b4fa17204752e799</td><td>violent_flows</td><td>Violent Flows</td><td><a href="papers/5194cbd51f9769ab25260446b4fa17204752e799.html" target="_blank">Violent flows: Real-time detection of violent crowd behavior</a></td><td><span class="gray">[pdf]</a></td><td>2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops</td><td>edu</td><td>Open University of Israel</td><td>Israel</td><td>32.77824165</td><td>34.99565673</td><td>55%</td><td>88</td><td>48</td><td>40</td><td>6</td><td>45</td><td>44</td></tr><tr><td>026e3363b7f76b51cc711886597a44d5f1fd1de2</td><td>kitti</td><td>KITTI</td><td><a href="papers/026e3363b7f76b51cc711886597a44d5f1fd1de2.html" target="_blank">Vision meets robotics: The KITTI dataset</a></td><td><a href="https://pdfs.semanticscholar.org/026e/3363b7f76b51cc711886597a44d5f1fd1de2.pdf" target="_blank">[pdf]</a></td><td>I. J. Robotics Res.</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>999</td><td>547</td><td>452</td><td>37</td><td>553</td><td>462</td></tr><tr><td>066000d44d6691d27202896691f08b27117918b9</td><td>psu</td><td>PSU</td><td><a href="papers/066000d44d6691d27202896691f08b27117918b9.html" target="_blank">Vision-Based Analysis of Small Groups in Pedestrian Crowds</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>49%</td><td>168</td><td>83</td><td>85</td><td>11</td><td>85</td><td>79</td></tr><tr><td>dd65f71dac86e36eecbd3ed225d016c3336b4a13</td><td>families_in_the_wild</td><td>FIW</td><td><a href="papers/dd65f71dac86e36eecbd3ed225d016c3336b4a13.html" target="_blank">Visual Kinship Recognition of Families in the Wild</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td>edu</td><td>University of Massachusetts Dartmouth</td><td>United States</td><td>41.62772475</td><td>-71.00724501</td><td>80%</td><td>5</td><td>4</td><td>1</td><td>0</td><td>2</td><td>3</td></tr><tr><td>52d7eb0fbc3522434c13cc247549f74bb9609c5d</td><td>wider_face</td><td>WIDER FACE</td><td><a href="papers/52d7eb0fbc3522434c13cc247549f74bb9609c5d.html" target="_blank">WIDER FACE: A Face Detection Benchmark</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>Chinese University of Hong Kong</td><td>China</td><td>22.41626320</td><td>114.21093180</td><td>58%</td><td>178</td><td>104</td><td>74</td><td>13</td><td>112</td><td>66</td></tr><tr><td>36bccfb2ad847096bc76777e544f305813cd8f5b</td><td>wildtrack</td><td>WildTrack</td><td><a href="papers/36bccfb2ad847096bc76777e544f305813cd8f5b.html" target="_blank">WILDTRACK: A Multi-camera HD Dataset for Dense Unscripted Pedestrian Detection</a></td><td><a href="http://openaccess.thecvf.com/content_cvpr_2018/Supplemental/1562-supp.pdf" target="_blank">[pdf]</a></td><td>2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>5ad4e9f947c1653c247d418f05dad758a3f9277b</td><td>wlfdb</td><td>WLFDB</td><td><a href="papers/5ad4e9f947c1653c247d418f05dad758a3f9277b.html" target="_blank">WLFDB : Weakly Labeled Face Databases</a></td><td><a href="https://pdfs.semanticscholar.org/5ad4/e9f947c1653c247d418f05dad758a3f9277b.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>1</td><td>1</td><td>0</td><td>0</td><td>0</td><td>1</td></tr><tr><td>0dc11a37cadda92886c56a6fb5191ded62099c28</td><td>stickmen_family</td><td>We Are Family Stickmen</td><td><a href="papers/0dc11a37cadda92886c56a6fb5191ded62099c28.html" target="_blank">We Are Family: Joint Pose Estimation of Multiple Persons</a></td><td><a href="http://eprints.pascal-network.org/archive/00007964/01/eichner10eccv.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>78</td><td>45</td><td>33</td><td>6</td><td>54</td><td>23</td></tr><tr><td>0c91808994a250d7be332400a534a9291ca3b60e</td><td>graz</td><td>Graz Pedestrian</td><td><a href="papers/0c91808994a250d7be332400a534a9291ca3b60e.html" target="_blank">Weak Hypotheses and Boosting for Generic Object Detection and Recognition</a></td><td><a href="https://pdfs.semanticscholar.org/0c91/808994a250d7be332400a534a9291ca3b60e.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>236</td><td>127</td><td>109</td><td>17</td><td>161</td><td>77</td></tr><tr><td>2a75f34663a60ab1b04a0049ed1d14335129e908</td><td>mmi_facial_expression</td><td>MMI Facial Expression Dataset</td><td><a href="papers/2a75f34663a60ab1b04a0049ed1d14335129e908.html" target="_blank">Web-based database for facial expression analysis</a></td><td><span class="gray">[pdf]</a></td><td>2005 IEEE International Conference on Multimedia and Expo</td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>464</td><td>217</td><td>247</td><td>45</td><td>282</td><td>188</td></tr><tr><td>9b9bf5e623cb8af7407d2d2d857bc3f1b531c182</td><td>who_goes_there</td><td>WGT</td><td><a href="papers/9b9bf5e623cb8af7407d2d2d857bc3f1b531c182.html" target="_blank">Who goes there?: approaches to mapping facial appearance diversity</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td>edu</td><td>University of Kentucky</td><td>United States</td><td>38.03337420</td><td>-84.50177580</td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>b62628ac06bbac998a3ab825324a41a11bc3a988</td><td>m2vtsdb_extended</td><td>xm2vtsdb</td><td><a href="papers/b62628ac06bbac998a3ab825324a41a11bc3a988.html" target="_blank">XM2VTSDB : The extended M2VTS database</a></td><td><a href="https://pdfs.semanticscholar.org/b626/28ac06bbac998a3ab825324a41a11bc3a988.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>864</td><td>463</td><td>401</td><td>40</td><td>493</td><td>404</td></tr><tr><td>010f0f4929e6a6644fb01f0e43820f91d0fad292</td><td>yfcc_100m</td><td>YFCC100M</td><td><a href="papers/010f0f4929e6a6644fb01f0e43820f91d0fad292.html" target="_blank">YFCC100M: the new data in multimedia research</a></td><td><span class="gray">[pdf]</a></td><td>Commun. ACM</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>274</td><td>154</td><td>120</td><td>24</td><td>172</td><td>100</td></tr><tr><td>a94cae786d515d3450d48267e12ca954aab791c4</td><td>yawdd</td><td>YawDD</td><td><a href="papers/a94cae786d515d3450d48267e12ca954aab791c4.html" target="_blank">YawDD: a yawning detection dataset</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>53%</td><td>15</td><td>8</td><td>7</td><td>1</td><td>2</td><td>13</td></tr></table></body></html>
\ No newline at end of file |
