diff options
| author | jules@lens <julescarbon@gmail.com> | 2019-02-25 16:19:26 +0100 |
|---|---|---|
| committer | jules@lens <julescarbon@gmail.com> | 2019-02-25 16:19:26 +0100 |
| commit | cd624bdcc5307713dca541f1be130450e86d62ea (patch) | |
| tree | 4dce4b0eacaa1bf3232ec5f53cdb60cc4f5001eb /scraper/reports/report_coverage.html | |
| parent | 2d3963d9a6f39786dd07717f05392ae74e6bb685 (diff) | |
update scrape
Diffstat (limited to 'scraper/reports/report_coverage.html')
| -rw-r--r-- | scraper/reports/report_coverage.html | 2 |
1 files changed, 1 insertions, 1 deletions
diff --git a/scraper/reports/report_coverage.html b/scraper/reports/report_coverage.html index 4e5847be..f384cf95 100644 --- a/scraper/reports/report_coverage.html +++ b/scraper/reports/report_coverage.html @@ -1 +1 @@ -<!doctype html><html><head><meta charset='utf-8'><title>Coverage</title><link rel='stylesheet' href='reports.css'></head><body><h2>Coverage</h2><table border='1' cellpadding='3' cellspacing='3'><th>Paper ID</th><th>Megapixels Key</th><th>Megapixels Name</th><th>Report Link</th><th>PDF Link</th><th>Journal</th><th>Type</th><th>Address</th><th>Country</th><th>Lat</th><th>Lng</th><th>Coverage</th><th>Total Citations</th><th>Geocoded Citations</th><th>Unknown Citations</th><th>Empty Citations</th><th>With PDF</th><th>With DOI</th><tr><td>b5f2846a506fc417e7da43f6a7679146d99c5e96</td><td>ucf_101</td><td>UCF101</td><td><a href="papers/b5f2846a506fc417e7da43f6a7679146d99c5e96.html" target="_blank">UCF101: A Dataset of 101 Human Actions Classes From Videos in The Wild</a></td><td><a href="https://arxiv.org/pdf/1212.0402.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>999</td><td>611</td><td>388</td><td>73</td><td>716</td><td>283</td></tr><tr><td>0e986f51fe45b00633de9fd0c94d082d2be51406</td><td>afw</td><td>AFW</td><td><a href="papers/0e986f51fe45b00633de9fd0c94d082d2be51406.html" target="_blank">Face detection, pose estimation, and landmark localization in the wild</a></td><td><a href="http://crcv.ucf.edu/courses/CAP6412/Spring2013/papers/zhu-ramanan-face-cvpr12.pdf" target="_blank">[pdf]</a></td><td>2012 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>999</td><td>608</td><td>391</td><td>59</td><td>622</td><td>387</td></tr><tr><td>0ee1916a0cb2dc7d3add086b5f1092c3d4beb38a</td><td>voc</td><td>VOC</td><td><a href="papers/0ee1916a0cb2dc7d3add086b5f1092c3d4beb38a.html" target="_blank">The Pascal Visual Object Classes (VOC) Challenge</a></td><td><a href="http://eprints.pascal-network.org/archive/00006187/01/PascalVOC_IJCV2009.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td>edu</td><td>Oxford University</td><td>United Kingdom</td><td>51.75208490</td><td>-1.25166460</td><td>58%</td><td>999</td><td>575</td><td>424</td><td>35</td><td>611</td><td>414</td></tr><tr><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td><td>lfw</td><td>LFW</td><td><a href="papers/370b5757a5379b15e30d619e4d3fb9e8e13f3256.html" target="_blank">Labeled Faces in the Wild : A Database for Studying Face Recognition in Unconstrained Environments</a></td><td><a href="https://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>999</td><td>575</td><td>422</td><td>71</td><td>639</td><td>371</td></tr><tr><td>5e0f8c355a37a5a89351c02f174e7a5ddcb98683</td><td>coco</td><td>COCO</td><td><a href="papers/5e0f8c355a37a5a89351c02f174e7a5ddcb98683.html" target="_blank">Microsoft COCO: Common Objects in Context</a></td><td><a href="https://arxiv.org/pdf/1405.0312.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>999</td><td>569</td><td>430</td><td>29</td><td>799</td><td>193</td></tr><tr><td>2e384f057211426ac5922f1b33d2aa8df5d51f57</td><td>a_pascal_yahoo</td><td>aPascal</td><td><a href="papers/2e384f057211426ac5922f1b33d2aa8df5d51f57.html" target="_blank">Describing objects by their attributes</a></td><td><a href="http://vision.lbl.gov/Conferences/cvpr/Papers/data/papers/0468.pdf" target="_blank">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>University of Illinois, Urbana-Champaign</td><td>United States</td><td>40.11116745</td><td>-88.22587665</td><td>57%</td><td>999</td><td>565</td><td>433</td><td>74</td><td>738</td><td>264</td></tr><tr><td>759a3b3821d9f0e08e0b0a62c8b693230afc3f8d</td><td>pubfig</td><td>PubFig</td><td><a href="papers/759a3b3821d9f0e08e0b0a62c8b693230afc3f8d.html" target="_blank">Attribute and simile classifiers for face verification</a></td><td><a href="http://acberg.com/papers/kbbn09iccv.pdf" target="_blank">[pdf]</a></td><td>2009 IEEE 12th International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>894</td><td>544</td><td>350</td><td>56</td><td>604</td><td>300</td></tr><tr><td>162ea969d1929ed180cc6de9f0bf116993ff6e06</td><td>vgg_faces</td><td>VGG Face</td><td><a href="papers/162ea969d1929ed180cc6de9f0bf116993ff6e06.html" target="_blank">Deep Face Recognition</a></td><td><a href="https://pdfs.semanticscholar.org/f372/ab9b3270d4e4f6a0258c83c2736c3a5c0454.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>999</td><td>543</td><td>456</td><td>70</td><td>635</td><td>370</td></tr><tr><td>10d6b12fa07c7c8d6c8c3f42c7f1c061c131d4c5</td><td>inria_person</td><td>INRIA Pedestrian</td><td><a href="papers/10d6b12fa07c7c8d6c8c3f42c7f1c061c131d4c5.html" target="_blank">Histograms of oriented gradients for human detection</a></td><td><a href="http://courses.cs.washington.edu/courses/cse576/12sp/notes/CVPR2005_HOG.pdf" target="_blank">[pdf]</a></td><td>2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)</td><td>edu</td><td>INRIA Rhone-Alps, Montbonnot, France</td><td>France</td><td>45.21788600</td><td>5.80736900</td><td>54%</td><td>999</td><td>539</td><td>460</td><td>67</td><td>537</td><td>477</td></tr><tr><td>31b58ced31f22eab10bd3ee2d9174e7c14c27c01</td><td>tiny_images</td><td>Tiny Images</td><td><a href="papers/31b58ced31f22eab10bd3ee2d9174e7c14c27c01.html" target="_blank">80 Million Tiny Images: A Large Data Set for Nonparametric Object and Scene Recognition</a></td><td><a href="http://cvcl.mit.edu/SUNSeminar/Torralba_80M_PAMI08.pdf" target="_blank">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>999</td><td>535</td><td>463</td><td>94</td><td>685</td><td>327</td></tr><tr><td>4d9a02d080636e9666c4d1cc438b9893391ec6c7</td><td>cohn_kanade_plus</td><td>CK+</td><td><a href="papers/4d9a02d080636e9666c4d1cc438b9893391ec6c7.html" target="_blank">The Extended Cohn-Kanade Dataset (CK+): A complete dataset for action unit and emotion-specified expression</a></td><td><a href="http://www.iainm.com/iainm/Publications_files/2010_The%20Extended.pdf" target="_blank">[pdf]</a></td><td>2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition - Workshops</td><td>edu</td><td>University of Pittsburgh</td><td>United States</td><td>40.44415295</td><td>-79.96243993</td><td>55%</td><td>975</td><td>535</td><td>439</td><td>67</td><td>475</td><td>510</td></tr><tr><td>026e3363b7f76b51cc711886597a44d5f1fd1de2</td><td>kitti</td><td>KITTI</td><td><a href="papers/026e3363b7f76b51cc711886597a44d5f1fd1de2.html" target="_blank">Vision meets robotics: The KITTI dataset</a></td><td><a href="https://pdfs.semanticscholar.org/026e/3363b7f76b51cc711886597a44d5f1fd1de2.pdf" target="_blank">[pdf]</a></td><td>I. J. Robotics Res.</td><td></td><td></td><td></td><td></td><td></td><td>53%</td><td>999</td><td>532</td><td>467</td><td>37</td><td>570</td><td>448</td></tr><tr><td>6d96f946aaabc734af7fe3fc4454cf8547fcd5ed</td><td>ar_facedb</td><td>AR Face</td><td><a href="papers/6d96f946aaabc734af7fe3fc4454cf8547fcd5ed.html" target="_blank">The AR face database</a></td><td><span class="gray">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td></td><td>53%</td><td>999</td><td>526</td><td>473</td><td>51</td><td>459</td><td>573</td></tr><tr><td>18c72175ddbb7d5956d180b65a96005c100f6014</td><td>yale_faces</td><td>YaleFaces</td><td><a href="papers/18c72175ddbb7d5956d180b65a96005c100f6014.html" target="_blank">From Few to Many: Illumination Cone Models for Face Recognition under Variable Lighting and Pose</a></td><td><a href="https://pdfs.semanticscholar.org/97bb/c2b439a79d4dc0dc7199d71ed96ad5e3fd0e.pdf" target="_blank">[pdf]</a></td><td>IEEE Trans. Pattern Anal. Mach. Intell.</td><td></td><td></td><td></td><td></td><td></td><td>51%</td><td>999</td><td>514</td><td>485</td><td>77</td><td>551</td><td>459</td></tr><tr><td>23fc83c8cfff14a16df7ca497661264fc54ed746</td><td>cohn_kanade</td><td>CK</td><td><a href="papers/23fc83c8cfff14a16df7ca497661264fc54ed746.html" target="_blank">Comprehensive Database for Facial Expression Analysis</a></td><td><a href="https://pdfs.semanticscholar.org/23fc/83c8cfff14a16df7ca497661264fc54ed746.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>999</td><td>503</td><td>496</td><td>75</td><td>572</td><td>439</td></tr><tr><td>18ae7c9a4bbc832b8b14bc4122070d7939f5e00e</td><td>frgc</td><td>FRGC</td><td><a href="papers/18ae7c9a4bbc832b8b14bc4122070d7939f5e00e.html" target="_blank">Overview of the face recognition grand challenge</a></td><td><a href="http://ivizlab.sfu.ca/arya/Papers/IEEE/Proceedings/C%20V%20P%20R-%2005/Face%20Recognition%20Grand%20Challenge.pdf" target="_blank">[pdf]</a></td><td>2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)</td><td>edu</td><td>NIST</td><td>United States</td><td>39.14004000</td><td>-77.21850600</td><td>50%</td><td>999</td><td>497</td><td>501</td><td>114</td><td>594</td><td>424</td></tr><tr><td>f72f6a45ee240cc99296a287ff725aaa7e7ebb35</td><td>caltech_pedestrians</td><td>Caltech Pedestrians</td><td><a href="papers/f72f6a45ee240cc99296a287ff725aaa7e7ebb35.html" target="_blank">Pedestrian Detection: An Evaluation of the State of the Art</a></td><td><a href="http://vision.caltech.edu/Image_Datasets/CaltechPedestrians/files/PAMI12pedestrians.pdf" target="_blank">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td>edu</td><td>California Institute of Technology</td><td>United States</td><td>34.13710185</td><td>-118.12527487</td><td>49%</td><td>999</td><td>485</td><td>514</td><td>71</td><td>541</td><td>464</td></tr><tr><td>2ad0ee93d029e790ebb50574f403a09854b65b7e</td><td>yale_faces</td><td>YaleFaces</td><td><a href="papers/2ad0ee93d029e790ebb50574f403a09854b65b7e.html" target="_blank">Acquiring linear subspaces for face recognition under variable lighting</a></td><td><a href="http://vision.cornell.edu/se3/wp-content/uploads/2014/09/pami05.pdf" target="_blank">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>48%</td><td>999</td><td>484</td><td>515</td><td>110</td><td>525</td><td>485</td></tr><tr><td>0f0fcf041559703998abf310e56f8a2f90ee6f21</td><td>feret</td><td>FERET</td><td><a href="papers/0f0fcf041559703998abf310e56f8a2f90ee6f21.html" target="_blank">The FERET Evaluation Methodology for Face-Recognition Algorithms</a></td><td><a href="https://pdfs.semanticscholar.org/5099/7a5605c1f61e09e9a96789ed7495be6625aa.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>48%</td><td>999</td><td>482</td><td>517</td><td>103</td><td>560</td><td>454</td></tr><tr><td>b62628ac06bbac998a3ab825324a41a11bc3a988</td><td>m2vtsdb_extended</td><td>xm2vtsdb</td><td><a href="papers/b62628ac06bbac998a3ab825324a41a11bc3a988.html" target="_blank">Xm2vtsdb: the Extended M2vts Database</a></td><td><a href="https://pdfs.semanticscholar.org/b626/28ac06bbac998a3ab825324a41a11bc3a988.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>53%</td><td>906</td><td>481</td><td>425</td><td>44</td><td>542</td><td>408</td></tr><tr><td>01959ef569f74c286956024866c1d107099199f7</td><td>vqa</td><td>VQA</td><td><a href="papers/01959ef569f74c286956024866c1d107099199f7.html" target="_blank">VQA: Visual Question Answering</a></td><td><a href="https://arxiv.org/pdf/1505.00468.pdf" target="_blank">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>731</td><td>444</td><td>287</td><td>47</td><td>629</td><td>96</td></tr><tr><td>dc8b25e35a3acb812beb499844734081722319b4</td><td>feret</td><td>FERET</td><td><a href="papers/dc8b25e35a3acb812beb499844734081722319b4.html" target="_blank">The FERET database and evaluation procedure for face-recognition algorithms</a></td><td><a href="http://biometrics.nist.gov/cs_links/face/frvt/feret/FERET_Database_evaluation_procedure.pdf" target="_blank">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td></td><td>44%</td><td>999</td><td>443</td><td>556</td><td>106</td><td>606</td><td>413</td></tr><tr><td>55206f0b5f57ce17358999145506cd01e570358c</td><td>orl</td><td>ORL</td><td><a href="papers/55206f0b5f57ce17358999145506cd01e570358c.html" target="_blank">Parameterisation of a stochastic model for human face identification</a></td><td><a href="https://pdfs.semanticscholar.org/5520/6f0b5f57ce17358999145506cd01e570358c.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>44%</td><td>999</td><td>442</td><td>557</td><td>97</td><td>569</td><td>445</td></tr><tr><td>45c31cde87258414f33412b3b12fc5bec7cb3ba9</td><td>jaffe</td><td>JAFFE</td><td><a href="papers/45c31cde87258414f33412b3b12fc5bec7cb3ba9.html" target="_blank">Coding Facial Expressions with Gabor Wavelets</a></td><td><a href="https://pdfs.semanticscholar.org/45c3/1cde87258414f33412b3b12fc5bec7cb3ba9.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>848</td><td>422</td><td>426</td><td>55</td><td>420</td><td>433</td></tr><tr><td>6424b69f3ff4d35249c0bb7ef912fbc2c86f4ff4</td><td>celeba</td><td>CelebA</td><td><a href="papers/6424b69f3ff4d35249c0bb7ef912fbc2c86f4ff4.html" target="_blank">Deep Learning Face Attributes in the Wild</a></td><td><a href="https://arxiv.org/pdf/1411.7766.pdf" target="_blank">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td>edu</td><td>Chinese University of Hong Kong</td><td>China</td><td>22.41626320</td><td>114.21093180</td><td>52%</td><td>808</td><td>421</td><td>386</td><td>68</td><td>670</td><td>118</td></tr><tr><td>32cde90437ab5a70cf003ea36f66f2de0e24b3ab</td><td>cityscapes</td><td>Cityscapes</td><td><a href="papers/32cde90437ab5a70cf003ea36f66f2de0e24b3ab.html" target="_blank">The Cityscapes Dataset for Semantic Urban Scene Understanding</a></td><td><a href="https://arxiv.org/pdf/1604.01685.pdf" target="_blank">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>771</td><td>403</td><td>368</td><td>54</td><td>624</td><td>138</td></tr><tr><td>177bc509dd0c7b8d388bb47403f28d6228c14b5c</td><td>celeba_plus</td><td>CelebFaces+</td><td><a href="papers/177bc509dd0c7b8d388bb47403f28d6228c14b5c.html" target="_blank">Deep Learning Face Representation from Predicting 10,000 Classes</a></td><td><a href="http://mmlab.ie.cuhk.edu.hk/pdf/YiSun_CVPR14.pdf" target="_blank">[pdf]</a></td><td>2014 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>660</td><td>397</td><td>263</td><td>25</td><td>340</td><td>330</td></tr><tr><td>4d423acc78273b75134e2afd1777ba6d3a398973</td><td>cmu_pie</td><td>CMU PIE</td><td><a href="papers/4d423acc78273b75134e2afd1777ba6d3a398973.html" target="_blank">The CMU Pose, Illumination, and Expression (PIE) Database of Human Faces</a></td><td><a href="http://www.comp.nus.edu.sg/~tsim/piedb.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>53%</td><td>742</td><td>396</td><td>344</td><td>59</td><td>416</td><td>329</td></tr><tr><td>4d423acc78273b75134e2afd1777ba6d3a398973</td><td>multi_pie</td><td>MULTIPIE</td><td><a href="papers/4d423acc78273b75134e2afd1777ba6d3a398973.html" target="_blank">The CMU Pose, Illumination, and Expression (PIE) Database of Human Faces</a></td><td><a href="http://www.comp.nus.edu.sg/~tsim/piedb.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>53%</td><td>742</td><td>396</td><td>344</td><td>59</td><td>416</td><td>329</td></tr><tr><td>2830fb5282de23d7784b4b4bc37065d27839a412</td><td>h3d</td><td>H3D</td><td><a href="papers/2830fb5282de23d7784b4b4bc37065d27839a412.html" target="_blank">Poselets: Body part detectors trained using 3D human pose annotations</a></td><td><a href="http://http.cs.berkeley.edu/Research/Projects/CS/vision/human/poselets_iccv09.pdf" target="_blank">[pdf]</a></td><td>2009 IEEE 12th International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>707</td><td>368</td><td>339</td><td>67</td><td>509</td><td>215</td></tr><tr><td>6273b3491e94ea4dd1ce42b791d77bdc96ee73a8</td><td>viper</td><td>VIPeR</td><td><a href="papers/6273b3491e94ea4dd1ce42b791d77bdc96ee73a8.html" target="_blank">Evaluating Appearance Models for Recognition, Reacquisition, and Tracking</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>584</td><td>329</td><td>255</td><td>38</td><td>338</td><td>245</td></tr><tr><td>140438a77a771a8fb656b39a78ff488066eb6b50</td><td>lfw_p</td><td>LFWP</td><td><a href="papers/140438a77a771a8fb656b39a78ff488066eb6b50.html" target="_blank">Localizing Parts of Faces Using a Consensus of Exemplars</a></td><td><a href="http://neerajkumar.org/projects/face-parts/base/papers/nk_cvpr2011_faceparts.pdf" target="_blank">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>521</td><td>315</td><td>206</td><td>42</td><td>337</td><td>195</td></tr><tr><td>6bd36e9fd0ef20a3074e1430a6cc601e6d407fc3</td><td>cuhk03</td><td>CUHK03</td><td><a href="papers/6bd36e9fd0ef20a3074e1430a6cc601e6d407fc3.html" target="_blank">DeepReID: Deep Filter Pairing Neural Network for Person Re-identification</a></td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Li_DeepReID_Deep_Filter_2014_CVPR_paper.pdf" target="_blank">[pdf]</a></td><td>2014 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>59%</td><td>512</td><td>304</td><td>208</td><td>29</td><td>324</td><td>180</td></tr><tr><td>560e0e58d0059259ddf86fcec1fa7975dee6a868</td><td>youtube_faces</td><td>YouTubeFaces</td><td><a href="papers/560e0e58d0059259ddf86fcec1fa7975dee6a868.html" target="_blank">Face recognition in unconstrained videos with matched background similarity</a></td><td><a href="http://www.cs.tau.ac.il/thesis/thesis/Maoz.Itay-MSc.Thesis.pdf" target="_blank">[pdf]</a></td><td>CVPR 2011</td><td>edu</td><td>Tel Aviv University</td><td>Israel</td><td>32.11198890</td><td>34.80459702</td><td>60%</td><td>485</td><td>292</td><td>192</td><td>30</td><td>298</td><td>193</td></tr><tr><td>2258e01865367018ed6f4262c880df85b94959f8</td><td>mot</td><td>MOT</td><td><a href="papers/2258e01865367018ed6f4262c880df85b94959f8.html" target="_blank">Evaluating Multiple Object Tracking Performance: The CLEAR MOT Metrics</a></td><td><a href="https://pdfs.semanticscholar.org/2e0b/00f4043e2d4b04c59c88bb54bcd907d0dcd4.pdf" target="_blank">[pdf]</a></td><td>EURASIP J. Image and Video Processing</td><td></td><td></td><td></td><td></td><td></td><td>49%</td><td>586</td><td>290</td><td>294</td><td>48</td><td>345</td><td>244</td></tr><tr><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td><td>tud_campus</td><td>TUD-Campus</td><td><a href="papers/3316521a5527c7700af8ae6aef32a79a8b83672c.html" target="_blank">People-tracking-by-detection and people-detection-by-tracking</a></td><td><a href="http://mplab.ucsd.edu/wp-content/uploads/CVPR2008/Conference/data/papers/243.pdf" target="_blank">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>53%</td><td>529</td><td>280</td><td>248</td><td>40</td><td>324</td><td>213</td></tr><tr><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td><td>tud_crossing</td><td>TUD-Crossing</td><td><a href="papers/3316521a5527c7700af8ae6aef32a79a8b83672c.html" target="_blank">People-tracking-by-detection and people-detection-by-tracking</a></td><td><a href="http://mplab.ucsd.edu/wp-content/uploads/CVPR2008/Conference/data/papers/243.pdf" target="_blank">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>53%</td><td>529</td><td>280</td><td>248</td><td>40</td><td>324</td><td>213</td></tr><tr><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td><td>tud_pedestrian</td><td>TUD-Pedestrian</td><td><a href="papers/3316521a5527c7700af8ae6aef32a79a8b83672c.html" target="_blank">People-tracking-by-detection and people-detection-by-tracking</a></td><td><a href="http://mplab.ucsd.edu/wp-content/uploads/CVPR2008/Conference/data/papers/243.pdf" target="_blank">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>53%</td><td>529</td><td>280</td><td>248</td><td>40</td><td>324</td><td>213</td></tr><tr><td>cc589c499dcf323fe4a143bbef0074c3e31f9b60</td><td>bu_3dfe</td><td>BU-3DFE</td><td><a href="papers/cc589c499dcf323fe4a143bbef0074c3e31f9b60.html" target="_blank">A 3D facial expression database for facial behavior research</a></td><td><a href="http://www.cs.binghamton.edu/~lijun/Research/3DFE/Yin_FGR06_a.pdf" target="_blank">[pdf]</a></td><td>7th International Conference on Automatic Face and Gesture Recognition (FGR06)</td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>555</td><td>263</td><td>291</td><td>47</td><td>299</td><td>270</td></tr><tr><td>1dc35905a1deff8bc74688f2d7e2f48fd2273275</td><td>caltech_pedestrians</td><td>Caltech Pedestrians</td><td><a href="papers/1dc35905a1deff8bc74688f2d7e2f48fd2273275.html" target="_blank">Pedestrian detection: A benchmark</a></td><td><a href="http://vision.lbl.gov/Conferences/cvpr/Papers/data/papers/1378.pdf" target="_blank">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>519</td><td>261</td><td>258</td><td>27</td><td>289</td><td>233</td></tr><tr><td>853bd61bc48a431b9b1c7cab10c603830c488e39</td><td>casia_webface</td><td>CASIA Webface</td><td><a href="papers/853bd61bc48a431b9b1c7cab10c603830c488e39.html" target="_blank">Learning Face Representation from Scratch</a></td><td><a href="https://arxiv.org/pdf/1411.7923.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td>edu</td><td>Chinese Academy of Sciences</td><td>China</td><td>40.00447950</td><td>116.37023800</td><td>60%</td><td>436</td><td>260</td><td>176</td><td>30</td><td>288</td><td>150</td></tr><tr><td>4308bd8c28e37e2ed9a3fcfe74d5436cce34b410</td><td>market_1501</td><td>Market 1501</td><td><a href="papers/4308bd8c28e37e2ed9a3fcfe74d5436cce34b410.html" target="_blank">Scalable Person Re-identification: A Benchmark</a></td><td><a href="http://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Zheng_Scalable_Person_Re-Identification_ICCV_2015_paper.pdf" target="_blank">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>394</td><td>238</td><td>156</td><td>18</td><td>272</td><td>116</td></tr><tr><td>4053e3423fb70ad9140ca89351df49675197196a</td><td>bio_id</td><td>BioID Face</td><td><a href="papers/4053e3423fb70ad9140ca89351df49675197196a.html" target="_blank">Robust Face Detection Using the Hausdorff Distance</a></td><td><a href="http://facedetection.homepage.t-online.de/downloads/AVBPA01BioID.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>48%</td><td>498</td><td>237</td><td>261</td><td>56</td><td>330</td><td>179</td></tr><tr><td>8a3c5507237957d013a0fe0f082cab7f757af6ee</td><td>mafl</td><td>MAFL</td><td><a href="papers/8a3c5507237957d013a0fe0f082cab7f757af6ee.html" target="_blank">Facial Landmark Detection by Deep Multi-task Learning</a></td><td><a href="http://personal.ie.cuhk.edu.hk/~ccloy/files/eccv_2014_deepfacealign.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>383</td><td>231</td><td>152</td><td>25</td><td>265</td><td>121</td></tr><tr><td>8a3c5507237957d013a0fe0f082cab7f757af6ee</td><td>mtfl</td><td>MTFL</td><td><a href="papers/8a3c5507237957d013a0fe0f082cab7f757af6ee.html" target="_blank">Facial Landmark Detection by Deep Multi-task Learning</a></td><td><a href="http://personal.ie.cuhk.edu.hk/~ccloy/files/eccv_2014_deepfacealign.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>383</td><td>231</td><td>152</td><td>25</td><td>265</td><td>121</td></tr><tr><td>9055b155cbabdce3b98e16e5ac9c0edf00f9552f</td><td>morph</td><td>MORPH Commercial</td><td><a href="papers/9055b155cbabdce3b98e16e5ac9c0edf00f9552f.html" target="_blank">MORPH: a longitudinal image database of normal adult age-progression</a></td><td><span class="gray">[pdf]</a></td><td>7th International Conference on Automatic Face and Gesture Recognition (FGR06)</td><td></td><td></td><td></td><td></td><td></td><td>53%</td><td>424</td><td>225</td><td>198</td><td>26</td><td>239</td><td>190</td></tr><tr><td>9055b155cbabdce3b98e16e5ac9c0edf00f9552f</td><td>morph_nc</td><td>MORPH Non-Commercial</td><td><a href="papers/9055b155cbabdce3b98e16e5ac9c0edf00f9552f.html" target="_blank">MORPH: a longitudinal image database of normal adult age-progression</a></td><td><span class="gray">[pdf]</a></td><td>7th International Conference on Automatic Face and Gesture Recognition (FGR06)</td><td></td><td></td><td></td><td></td><td></td><td>53%</td><td>424</td><td>225</td><td>198</td><td>26</td><td>239</td><td>190</td></tr><tr><td>3325860c0c82a93b2eac654f5324dd6a776f609e</td><td>mpii_human_pose</td><td>MPII Human Pose</td><td><a href="papers/3325860c0c82a93b2eac654f5324dd6a776f609e.html" target="_blank">2D Human Pose Estimation: New Benchmark and State of the Art Analysis</a></td><td><a href="http://ei.is.tuebingen.mpg.de/uploads_file/attachment/attachment/168/andriluka14benchmark.pdf" target="_blank">[pdf]</a></td><td>2014 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>356</td><td>221</td><td>135</td><td>21</td><td>304</td><td>53</td></tr><tr><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td><td>stickmen_buffy</td><td>Buffy Stickmen</td><td><a href="papers/6dd0597f8513dc100cd0bc1b493768cde45098a9.html" target="_blank">Learning to parse images of articulated bodies</a></td><td><a href="https://pdfs.semanticscholar.org/9cd7/4c43dbf9be0b9caae4606ee53e6d45850471.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>373</td><td>215</td><td>157</td><td>35</td><td>251</td><td>129</td></tr><tr><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td><a href="papers/6dd0597f8513dc100cd0bc1b493768cde45098a9.html" target="_blank">Learning to parse images of articulated bodies</a></td><td><a href="https://pdfs.semanticscholar.org/9cd7/4c43dbf9be0b9caae4606ee53e6d45850471.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>373</td><td>215</td><td>157</td><td>35</td><td>251</td><td>129</td></tr><tr><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td><a href="papers/6dd0597f8513dc100cd0bc1b493768cde45098a9.html" target="_blank">Learning to parse images of articulated bodies</a></td><td><a href="https://pdfs.semanticscholar.org/9cd7/4c43dbf9be0b9caae4606ee53e6d45850471.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>373</td><td>215</td><td>157</td><td>35</td><td>251</td><td>129</td></tr><tr><td>2a75f34663a60ab1b04a0049ed1d14335129e908</td><td>mmi_facial_expression</td><td>MMI Facial Expression Dataset</td><td><a href="papers/2a75f34663a60ab1b04a0049ed1d14335129e908.html" target="_blank">Web-based database for facial expression analysis</a></td><td><a href="http://dev.pubs.doc.ic.ac.uk/Pantic-ICME05-2/Pantic-ICME05-2.pdf" target="_blank">[pdf]</a></td><td>2005 IEEE International Conference on Multimedia and Expo</td><td></td><td></td><td></td><td></td><td></td><td>48%</td><td>440</td><td>212</td><td>228</td><td>44</td><td>267</td><td>181</td></tr><tr><td>2485c98aa44131d1a2f7d1355b1e372f2bb148ad</td><td>cas_peal</td><td>CAS-PEAL</td><td><a href="papers/2485c98aa44131d1a2f7d1355b1e372f2bb148ad.html" target="_blank">The CAS-PEAL Large-Scale Chinese Face Database and Baseline Evaluations</a></td><td><a href="http://www.jdl.ac.cn/peal/files/ieee_smc_a_gao_cas-peal.pdf" target="_blank">[pdf]</a></td><td>IEEE Transactions on Systems, Man, and Cybernetics - Part A: Systems and Humans</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>415</td><td>209</td><td>206</td><td>39</td><td>189</td><td>232</td></tr><tr><td>16c7c31a7553d99f1837fc6e88e77b5ccbb346b8</td><td>prid</td><td>PRID</td><td><a href="papers/16c7c31a7553d99f1837fc6e88e77b5ccbb346b8.html" target="_blank">Person Re-identification by Descriptive and Discriminative Classification</a></td><td><a href="https://pdfs.semanticscholar.org/4c1b/f0592be3e535faf256c95e27982db9b3d3d3.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>352</td><td>204</td><td>148</td><td>27</td><td>196</td><td>157</td></tr><tr><td>95f12d27c3b4914e0668a268360948bce92f7db3</td><td>helen</td><td>Helen</td><td><a href="papers/95f12d27c3b4914e0668a268360948bce92f7db3.html" target="_blank">Interactive Facial Feature Localization</a></td><td><a href="https://pdfs.semanticscholar.org/95f1/2d27c3b4914e0668a268360948bce92f7db3.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>company</td><td>Facebook</td><td>United States</td><td>37.39367170</td><td>-122.08072620</td><td>59%</td><td>339</td><td>201</td><td>138</td><td>29</td><td>219</td><td>129</td></tr><tr><td>044d9a8c61383312cdafbcc44b9d00d650b21c70</td><td>fiw_300</td><td>300-W</td><td><a href="papers/044d9a8c61383312cdafbcc44b9d00d650b21c70.html" target="_blank">300 Faces in-the-Wild Challenge: The First Facial Landmark Localization Challenge</a></td><td><a href="http://ibug.doc.ic.ac.uk/media/uploads/documents/sagonas_iccv_2013_300_w.pdf" target="_blank">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision Workshops</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>324</td><td>199</td><td>125</td><td>29</td><td>211</td><td>118</td></tr><tr><td>2724ba85ec4a66de18da33925e537f3902f21249</td><td>cofw</td><td>COFW</td><td><a href="papers/2724ba85ec4a66de18da33925e537f3902f21249.html" target="_blank">Robust Face Landmark Estimation under Occlusion</a></td><td><a href="http://authors.library.caltech.edu/45988/1/ICCV13%20Burgos-Artizzu.pdf" target="_blank">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision</td><td>edu</td><td>California Institute of Technology</td><td>United States</td><td>34.13710185</td><td>-118.12527487</td><td>61%</td><td>305</td><td>186</td><td>119</td><td>16</td><td>192</td><td>116</td></tr><tr><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td><td>leeds_sports_pose</td><td>Leeds Sports Pose</td><td><a href="papers/4b1d23d17476fcf78f4cbadf69fb130b1aa627c0.html" target="_blank">Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</a></td><td><a href="https://pdfs.semanticscholar.org/c327/15b5106f46eb6761531704cd2a9b5571832e.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>65%</td><td>278</td><td>180</td><td>98</td><td>13</td><td>208</td><td>78</td></tr><tr><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td><td>stickmen_buffy</td><td>Buffy Stickmen</td><td><a href="papers/4b1d23d17476fcf78f4cbadf69fb130b1aa627c0.html" target="_blank">Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</a></td><td><a href="https://pdfs.semanticscholar.org/c327/15b5106f46eb6761531704cd2a9b5571832e.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>65%</td><td>278</td><td>180</td><td>98</td><td>13</td><td>208</td><td>78</td></tr><tr><td>13f06b08f371ba8b5d31c3e288b4deb61335b462</td><td>eth_andreas_ess</td><td>ETHZ Pedestrian</td><td><a href="papers/13f06b08f371ba8b5d31c3e288b4deb61335b462.html" target="_blank">Depth and Appearance for Mobile Scene Analysis</a></td><td><a href="http://www.mmp.rwth-aachen.de/publications/pdf/ess-depthandappearance-iccv07-poster.pdf" target="_blank">[pdf]</a></td><td>2007 IEEE 11th International Conference on Computer Vision</td><td>edu</td><td>ETH Zurich</td><td>Switzerland</td><td>47.37631300</td><td>8.54766990</td><td>55%</td><td>319</td><td>176</td><td>143</td><td>27</td><td>195</td><td>127</td></tr><tr><td>639937b3a1b8bded3f7e9a40e85bd3770016cf3c</td><td>bfm</td><td>BFM</td><td><a href="papers/639937b3a1b8bded3f7e9a40e85bd3770016cf3c.html" target="_blank">A 3D Face Model for Pose and Illumination Invariant Face Recognition</a></td><td><a href="http://gravis.cs.unibas.ch/publications/2009/BFModel09.pdf" target="_blank">[pdf]</a></td><td>2009 Sixth IEEE International Conference on Advanced Video and Signal Based Surveillance</td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>323</td><td>176</td><td>147</td><td>29</td><td>226</td><td>98</td></tr><tr><td>3765df816dc5a061bc261e190acc8bdd9d47bec0</td><td>rafd</td><td>RaFD</td><td><a href="papers/3765df816dc5a061bc261e190acc8bdd9d47bec0.html" target="_blank">Presentation and validation of the Radboud Faces Database</a></td><td><a href="https://pdfs.semanticscholar.org/3765/df816dc5a061bc261e190acc8bdd9d47bec0.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>39%</td><td>446</td><td>175</td><td>271</td><td>43</td><td>322</td><td>136</td></tr><tr><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td><td>aflw</td><td>AFLW</td><td><a href="papers/a74251efa970b92925b89eeef50a5e37d9281ad0.html" target="_blank">Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</a></td><td><a href="http://face.cs.kit.edu/befit/workshop2011/pdf/slides/martin_koestinger-slides.pdf" target="_blank">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>292</td><td>175</td><td>117</td><td>37</td><td>212</td><td>84</td></tr><tr><td>5981e6479c3fd4e31644db35d236bfb84ae46514</td><td>mot</td><td>MOT</td><td><a href="papers/5981e6479c3fd4e31644db35d236bfb84ae46514.html" target="_blank">Learning to associate: HybridBoosted multi-target tracker for crowded scene</a></td><td><a href="http://iris.usc.edu/Outlines/papers/2009/yuan-chang-nevatia-cvpr09.pdf" target="_blank">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>University of Southern California</td><td>United States</td><td>34.02241490</td><td>-118.28634407</td><td>52%</td><td>330</td><td>172</td><td>157</td><td>27</td><td>196</td><td>139</td></tr><tr><td>436f798d1a4e54e5947c1e7d7375c31b2bdb4064</td><td>tud_multiview</td><td>TUD-Multiview</td><td><a href="papers/436f798d1a4e54e5947c1e7d7375c31b2bdb4064.html" target="_blank">Monocular 3D pose estimation and tracking by detection</a></td><td><a href="http://videolectures.net/site/normal_dl/tag=81522/cvpr2010_andriluka_m3de_01.pdf" target="_blank">[pdf]</a></td><td>2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>TU Darmstadt</td><td>Germany</td><td>49.87482770</td><td>8.65632810</td><td>54%</td><td>302</td><td>164</td><td>138</td><td>34</td><td>207</td><td>100</td></tr><tr><td>436f798d1a4e54e5947c1e7d7375c31b2bdb4064</td><td>tud_stadtmitte</td><td>TUD-Stadtmitte</td><td><a href="papers/436f798d1a4e54e5947c1e7d7375c31b2bdb4064.html" target="_blank">Monocular 3D pose estimation and tracking by detection</a></td><td><a href="http://videolectures.net/site/normal_dl/tag=81522/cvpr2010_andriluka_m3de_01.pdf" target="_blank">[pdf]</a></td><td>2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>TU Darmstadt</td><td>Germany</td><td>49.87482770</td><td>8.65632810</td><td>54%</td><td>302</td><td>164</td><td>138</td><td>34</td><td>207</td><td>100</td></tr><tr><td>44484d2866f222bbb9b6b0870890f9eea1ffb2d0</td><td>cuhk01</td><td>CUHK01</td><td><a href="papers/44484d2866f222bbb9b6b0870890f9eea1ffb2d0.html" target="_blank">Human Reidentification with Transferred Metric Learning</a></td><td><a href="https://pdfs.semanticscholar.org/4448/4d2866f222bbb9b6b0870890f9eea1ffb2d0.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>258</td><td>160</td><td>98</td><td>12</td><td>142</td><td>115</td></tr><tr><td>2acf7e58f0a526b957be2099c10aab693f795973</td><td>bosphorus</td><td>The Bosphorus</td><td><a href="papers/2acf7e58f0a526b957be2099c10aab693f795973.html" target="_blank">Bosphorus Database for 3D Face Analysis</a></td><td><a href="https://pdfs.semanticscholar.org/4254/fbba3846008f50671edc9cf70b99d7304543.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>48%</td><td>328</td><td>158</td><td>170</td><td>19</td><td>149</td><td>183</td></tr><tr><td>833fa04463d90aab4a9fe2870d480f0b40df446e</td><td>sun_attributes</td><td>SUN</td><td><a href="papers/833fa04463d90aab4a9fe2870d480f0b40df446e.html" target="_blank">SUN attribute database: Discovering, annotating, and recognizing scene attributes</a></td><td><a href="http://static.cs.brown.edu/~gen/pub_papers/SUN_Attribute_Database_CVPR2012.pdf" target="_blank">[pdf]</a></td><td>2012 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>Brown University</td><td>United States</td><td>41.82686820</td><td>-71.40123146</td><td>58%</td><td>269</td><td>156</td><td>113</td><td>29</td><td>215</td><td>57</td></tr><tr><td>010f0f4929e6a6644fb01f0e43820f91d0fad292</td><td>yfcc_100m</td><td>YFCC100M</td><td><a href="papers/010f0f4929e6a6644fb01f0e43820f91d0fad292.html" target="_blank">YFCC100M: the new data in multimedia research</a></td><td><a href="https://arxiv.org/pdf/1503.01817.pdf" target="_blank">[pdf]</a></td><td>Commun. ACM</td><td>edu</td><td>Carnegie Mellon University</td><td>United States</td><td>37.41021930</td><td>-122.05965487</td><td>56%</td><td>276</td><td>155</td><td>121</td><td>23</td><td>175</td><td>99</td></tr><tr><td>2fda164863a06a92d3a910b96eef927269aeb730</td><td>names_and_faces_news</td><td>News Dataset</td><td><a href="papers/2fda164863a06a92d3a910b96eef927269aeb730.html" target="_blank">Names and faces in the news</a></td><td><a href="http://ttic.uchicago.edu/~mmaire/papers/pdf/names_faces_cvpr2004.pdf" target="_blank">[pdf]</a></td><td>Proceedings of the 2004 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2004. CVPR 2004.</td><td></td><td></td><td></td><td></td><td></td><td>51%</td><td>294</td><td>150</td><td>143</td><td>29</td><td>215</td><td>82</td></tr><tr><td>9361b784e73e9238d5cefbea5ac40d35d1e3103f</td><td>towncenter</td><td>TownCenter</td><td><a href="papers/9361b784e73e9238d5cefbea5ac40d35d1e3103f.html" target="_blank">Stable multi-target tracking in real-time surveillance video</a></td><td><a href="http://ben.benfold.com/docs/benfold_reid_cvpr2011-preprint.pdf" target="_blank">[pdf]</a></td><td>CVPR 2011</td><td></td><td></td><td></td><td></td><td></td><td>44%</td><td>310</td><td>137</td><td>173</td><td>24</td><td>180</td><td>131</td></tr><tr><td>2eed184680edcdec8a3b605ad1a3ba8e8f7cc2e9</td><td>graz</td><td>Graz Pedestrian</td><td><a href="papers/2eed184680edcdec8a3b605ad1a3ba8e8f7cc2e9.html" target="_blank">Generic object recognition with boosting</a></td><td><a href="http://www.cse.unr.edu/~bebis/CS773C/ObjectRecognition/Papers/Opelt06.pdf" target="_blank">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td>edu</td><td>TU Graz</td><td>Austria</td><td>47.07071400</td><td>15.43950400</td><td>48%</td><td>286</td><td>136</td><td>150</td><td>16</td><td>193</td><td>97</td></tr><tr><td>e8de844fefd54541b71c9823416daa238be65546</td><td>visual_phrases</td><td>Phrasal Recognition</td><td><a href="papers/e8de844fefd54541b71c9823416daa238be65546.html" target="_blank">Recognition using visual phrases</a></td><td><a href="http://vision.cs.uiuc.edu/phrasal/recognition_using_visual_phrases.pdf" target="_blank">[pdf]</a></td><td>CVPR 2011</td><td>edu</td><td>University of Illinois, Urbana-Champaign</td><td>United States</td><td>40.11116745</td><td>-88.22587665</td><td>58%</td><td>233</td><td>135</td><td>98</td><td>18</td><td>177</td><td>58</td></tr><tr><td>38b55d95189c5e69cf4ab45098a48fba407609b4</td><td>cuhk02</td><td>CUHK02</td><td><a href="papers/38b55d95189c5e69cf4ab45098a48fba407609b4.html" target="_blank">Locally Aligned Feature Transforms across Views</a></td><td><a href="http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_CVPR2013/data/Papers/4989d594.pdf" target="_blank">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>53%</td><td>242</td><td>129</td><td>113</td><td>17</td><td>139</td><td>102</td></tr><tr><td>0c91808994a250d7be332400a534a9291ca3b60e</td><td>graz</td><td>Graz Pedestrian</td><td><a href="papers/0c91808994a250d7be332400a534a9291ca3b60e.html" target="_blank">Weak Hypotheses and Boosting for Generic Object Detection and Recognition</a></td><td><a href="https://pdfs.semanticscholar.org/0c91/808994a250d7be332400a534a9291ca3b60e.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>51%</td><td>247</td><td>125</td><td>122</td><td>18</td><td>177</td><td>78</td></tr><tr><td>4c170a0dcc8de75587dae21ca508dab2f9343974</td><td>face_tracer</td><td>FaceTracer</td><td><a href="papers/4c170a0dcc8de75587dae21ca508dab2f9343974.html" target="_blank">FaceTracer: A Search Engine for Large Collections of Images with Faces</a></td><td><a href="https://pdfs.semanticscholar.org/73a8/1d311eedac8dea3ca24dc15b6990fa4a725e.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>218</td><td>125</td><td>92</td><td>17</td><td>152</td><td>71</td></tr><tr><td>7808937b46acad36e43c30ae4e9f3fd57462853d</td><td>berkeley_pose</td><td>BPAD</td><td><a href="papers/7808937b46acad36e43c30ae4e9f3fd57462853d.html" target="_blank">Describing people: A poselet-based approach to attribute classification</a></td><td><a href="http://ttic.uchicago.edu/~smaji/papers/attributes-iccv11.pdf" target="_blank">[pdf]</a></td><td>2011 International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>221</td><td>125</td><td>96</td><td>14</td><td>165</td><td>59</td></tr><tr><td>140c95e53c619eac594d70f6369f518adfea12ef</td><td>ijb_c</td><td>IJB-A</td><td><a href="papers/140c95e53c619eac594d70f6369f518adfea12ef.html" target="_blank">Pushing the frontiers of unconstrained face detection and recognition: IARPA Janus Benchmark A</a></td><td><a href="http://biometrics.cse.msu.edu/Publications/Face/Klareetal_UnconstrainedFaceDetectionRecognitionJanus_CVPR15.pdf" target="_blank">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>222</td><td>123</td><td>99</td><td>19</td><td>161</td><td>62</td></tr><tr><td>6ad5a38df8dd4cdddd74f31996ce096d41219f72</td><td>tud_brussels</td><td>TUD-Brussels</td><td><a href="papers/6ad5a38df8dd4cdddd74f31996ce096d41219f72.html" target="_blank">Multi-cue onboard pedestrian detection</a></td><td><a href="http://vision.lbl.gov/Conferences/cvpr/Papers/data/papers/1454.pdf" target="_blank">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>217</td><td>121</td><td>96</td><td>14</td><td>133</td><td>86</td></tr><tr><td>6ad5a38df8dd4cdddd74f31996ce096d41219f72</td><td>tud_motionpairs</td><td>TUD-Motionparis</td><td><a href="papers/6ad5a38df8dd4cdddd74f31996ce096d41219f72.html" target="_blank">Multi-cue onboard pedestrian detection</a></td><td><a href="http://vision.lbl.gov/Conferences/cvpr/Papers/data/papers/1454.pdf" target="_blank">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>217</td><td>121</td><td>96</td><td>14</td><td>133</td><td>86</td></tr><tr><td>98bb029afe2a1239c3fdab517323066f0957b81b</td><td>ilids_vid_reid</td><td>iLIDS-VID</td><td><a href="papers/98bb029afe2a1239c3fdab517323066f0957b81b.html" target="_blank">Person Re-identification by Video Ranking</a></td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>210</td><td>120</td><td>90</td><td>10</td><td>115</td><td>94</td></tr><tr><td>98bb029afe2a1239c3fdab517323066f0957b81b</td><td>sdu_vid</td><td>SDU-VID</td><td><a href="papers/98bb029afe2a1239c3fdab517323066f0957b81b.html" target="_blank">Person Re-identification by Video Ranking</a></td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>210</td><td>120</td><td>90</td><td>10</td><td>115</td><td>94</td></tr><tr><td>35b0331dfcd2897abd5749b49ff5e2b8ba0f7a62</td><td>coco_qa</td><td>COCO QA</td><td><a href="papers/35b0331dfcd2897abd5749b49ff5e2b8ba0f7a62.html" target="_blank">Exploring Models and Data for Image Question Answering</a></td><td><a href="https://arxiv.org/pdf/1505.02074.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>191</td><td>115</td><td>76</td><td>12</td><td>165</td><td>27</td></tr><tr><td>46a01565e6afe7c074affb752e7069ee3bf2e4ef</td><td>sdu_vid</td><td>SDU-VID</td><td><a href="papers/46a01565e6afe7c074affb752e7069ee3bf2e4ef.html" target="_blank">Local Descriptors Encoded by Fisher Vectors for Person Re-identification</a></td><td><a href="https://pdfs.semanticscholar.org/a105/f1ef67b4b02da38eadce8ffb4e13aa301a93.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>198</td><td>114</td><td>84</td><td>16</td><td>111</td><td>88</td></tr><tr><td>013909077ad843eb6df7a3e8e290cfd5575999d2</td><td>fiw_300</td><td>300-W</td><td><a href="papers/013909077ad843eb6df7a3e8e290cfd5575999d2.html" target="_blank">A Semi-automatic Methodology for Facial Landmark Annotation</a></td><td><a href="http://ibug.doc.ic.ac.uk/media/uploads/documents/sagonas_cvpr_2013_amfg_w.pdf" target="_blank">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition Workshops</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>185</td><td>111</td><td>74</td><td>15</td><td>124</td><td>64</td></tr><tr><td>4e4746094bf60ee83e40d8597a6191e463b57f76</td><td>leeds_sports_pose_extended</td><td>Leeds Sports Pose Extended</td><td><a href="papers/4e4746094bf60ee83e40d8597a6191e463b57f76.html" target="_blank">Learning effective human pose estimation from inaccurate annotation</a></td><td><a href="http://www.comp.leeds.ac.uk/mat4saj/publications/johnson11cvpr.pdf" target="_blank">[pdf]</a></td><td>CVPR 2011</td><td>edu</td><td>University of Leeds</td><td>United Kingdom</td><td>53.80387185</td><td>-1.55245712</td><td>64%</td><td>173</td><td>111</td><td>62</td><td>10</td><td>122</td><td>56</td></tr><tr><td>21d9d0deed16f0ad62a4865e9acf0686f4f15492</td><td>images_of_groups</td><td>Images of Groups</td><td><a href="papers/21d9d0deed16f0ad62a4865e9acf0686f4f15492.html" target="_blank">Understanding images of groups of people</a></td><td><a href="http://chenlab.ece.cornell.edu/people/Andy/Andy_files/cvpr09.pdf" target="_blank">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>Carnegie Mellon University</td><td>United States</td><td>37.41021930</td><td>-122.05965487</td><td>54%</td><td>202</td><td>110</td><td>92</td><td>12</td><td>132</td><td>75</td></tr><tr><td>b1f4423c227fa37b9680787be38857069247a307</td><td>afew_va</td><td>AFEW-VA</td><td><a href="papers/b1f4423c227fa37b9680787be38857069247a307.html" target="_blank">Collecting Large, Richly Annotated Facial-Expression Databases from Movies</a></td><td><a href="http://users.cecs.anu.edu.au/~adhall/Dhall_Goecke_Lucey_Gedeon_M_2012.pdf" target="_blank">[pdf]</a></td><td>IEEE MultiMedia</td><td>edu</td><td>Australian National University</td><td>Australia</td><td>-35.27769990</td><td>149.11852700</td><td>60%</td><td>182</td><td>109</td><td>73</td><td>8</td><td>86</td><td>99</td></tr><tr><td>570f37ed63142312e6ccdf00ecc376341ec72b9f</td><td>stanford_drone</td><td>Stanford Drone</td><td><a href="papers/570f37ed63142312e6ccdf00ecc376341ec72b9f.html" target="_blank">Social LSTM: Human Trajectory Prediction in Crowded Spaces</a></td><td><a href="http://cs.stanford.edu/groups/vision/pdf/CVPR16_N_LSTM.pdf" target="_blank">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>46%</td><td>229</td><td>106</td><td>123</td><td>5</td><td>150</td><td>79</td></tr><tr><td>1aad2da473888cb7ebc1bfaa15bfa0f1502ce005</td><td>jpl_pose</td><td>JPL-Interaction dataset</td><td><a href="papers/1aad2da473888cb7ebc1bfaa15bfa0f1502ce005.html" target="_blank">First-Person Activity Recognition: What Are They Doing to Me?</a></td><td><a href="http://michaelryoo.com/papers/cvpr2013_ryoo.pdf" target="_blank">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>70%</td><td>148</td><td>103</td><td>45</td><td>8</td><td>111</td><td>38</td></tr><tr><td>22ad2c8c0f4d6aa4328b38d894b814ec22579761</td><td>gallagher</td><td>Gallagher</td><td><a href="papers/22ad2c8c0f4d6aa4328b38d894b814ec22579761.html" target="_blank">Clothing cosegmentation for recognizing people</a></td><td><a href="http://amp.ece.cmu.edu/people/Andy/Andy_files/2670CVPR08Gallagher.pdf" target="_blank">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>Carnegie Mellon University</td><td>United States</td><td>37.41021930</td><td>-122.05965487</td><td>58%</td><td>177</td><td>103</td><td>74</td><td>7</td><td>101</td><td>84</td></tr><tr><td>133f01aec1534604d184d56de866a4bd531dac87</td><td>lfw_a</td><td>LFW-a</td><td><a href="papers/133f01aec1534604d184d56de866a4bd531dac87.html" target="_blank">Effective Unconstrained Face Recognition by Combining Multiple Descriptors and Learned Background Statistics</a></td><td><a href="http://www.cs.tau.ac.il/~wolf/papers/jpatchlbp.pdf" target="_blank">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>177</td><td>98</td><td>79</td><td>15</td><td>104</td><td>75</td></tr><tr><td>18010284894ed0edcca74e5bf768ee2e15ef7841</td><td>deep_fashion</td><td>DeepFashion</td><td><a href="papers/18010284894ed0edcca74e5bf768ee2e15ef7841.html" target="_blank">DeepFashion: Powering Robust Clothes Recognition and Retrieval with Rich Annotations</a></td><td><a href="http://personal.ie.cuhk.edu.hk/~lz013/papers/deepfashion_poster.pdf" target="_blank">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>65%</td><td>150</td><td>97</td><td>53</td><td>4</td><td>111</td><td>38</td></tr><tr><td>56ffa7d906b08d02d6d5a12c7377a57e24ef3391</td><td>unbc_shoulder_pain</td><td>UNBC-McMaster Pain</td><td><a href="papers/56ffa7d906b08d02d6d5a12c7377a57e24ef3391.html" target="_blank">Painful data: The UNBC-McMaster shoulder pain expression archive database</a></td><td><a href="http://www.iainm.com/iainm/Publications_files/2011_PAINFUL.pdf" target="_blank">[pdf]</a></td><td>Face and Gesture 2011</td><td>edu</td><td>Carnegie Mellon University</td><td>United States</td><td>37.41021930</td><td>-122.05965487</td><td>52%</td><td>184</td><td>95</td><td>89</td><td>23</td><td>112</td><td>71</td></tr><tr><td>5a5f0287484f0d480fed1ce585dbf729586f0edc</td><td>disfa</td><td>DISFA</td><td><a href="papers/5a5f0287484f0d480fed1ce585dbf729586f0edc.html" target="_blank">DISFA: A Spontaneous Facial Action Intensity Database</a></td><td><a href="http://mohammadmahoor.com/wp-content/uploads/2017/06/DiSFA_Paper_andAppendix_Final_OneColumn1-1.pdf" target="_blank">[pdf]</a></td><td>IEEE Transactions on Affective Computing</td><td>edu</td><td>University of Denver</td><td>United States</td><td>39.67665410</td><td>-104.96220300</td><td>49%</td><td>190</td><td>94</td><td>96</td><td>19</td><td>100</td><td>91</td></tr><tr><td>1be498d4bbc30c3bfd0029114c784bc2114d67c0</td><td>adience</td><td>Adience</td><td><a href="papers/1be498d4bbc30c3bfd0029114c784bc2114d67c0.html" target="_blank">Age and Gender Estimation of Unfiltered Faces</a></td><td><a href="http://www.openu.ac.il/home/hassner/Adience/EidingerEnbarHassner_tifs.pdf" target="_blank">[pdf]</a></td><td>IEEE Transactions on Information Forensics and Security</td><td>edu</td><td>Open University of Israel</td><td>Israel</td><td>32.77824165</td><td>34.99565673</td><td>55%</td><td>168</td><td>92</td><td>76</td><td>5</td><td>94</td><td>78</td></tr><tr><td>291265db88023e92bb8c8e6390438e5da148e8f5</td><td>msceleb</td><td>MsCeleb</td><td><a href="papers/291265db88023e92bb8c8e6390438e5da148e8f5.html" target="_blank">MS-Celeb-1M: A Dataset and Benchmark for Large-Scale Face Recognition</a></td><td><a href="https://arxiv.org/pdf/1607.08221.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>167</td><td>91</td><td>76</td><td>14</td><td>131</td><td>36</td></tr><tr><td>0df0d1adea39a5bef318b74faa37de7f3e00b452</td><td>mpii_gaze</td><td>MPIIGaze</td><td><a href="papers/0df0d1adea39a5bef318b74faa37de7f3e00b452.html" target="_blank">Appearance-based gaze estimation in the wild</a></td><td><a href="https://arxiv.org/pdf/1504.02863.pdf" target="_blank">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>65%</td><td>138</td><td>90</td><td>48</td><td>3</td><td>97</td><td>42</td></tr><tr><td>29a705a5fa76641e0d8963f1fdd67ee4c0d92d3d</td><td>scface</td><td>SCface</td><td><a href="papers/29a705a5fa76641e0d8963f1fdd67ee4c0d92d3d.html" target="_blank">SCface – surveillance cameras face database</a></td><td><a href="http://scface.org/SCface%20-%20Surveillance%20Cameras%20Face%20Database.pdf" target="_blank">[pdf]</a></td><td>Multimedia Tools and Applications</td><td></td><td></td><td></td><td></td><td></td><td>51%</td><td>178</td><td>90</td><td>88</td><td>15</td><td>90</td><td>89</td></tr><tr><td>32c801cb7fbeb742edfd94cccfca4934baec71da</td><td>ucf_crowd</td><td>UCF-CC-50</td><td><a href="papers/32c801cb7fbeb742edfd94cccfca4934baec71da.html" target="_blank">Multi-source Multi-scale Counting in Extremely Dense Crowd Images</a></td><td><a href="http://crcv-web.eecs.ucf.edu/papers/cvpr2013/Counting_V3o.pdf" target="_blank">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>70%</td><td>125</td><td>88</td><td>37</td><td>6</td><td>73</td><td>52</td></tr><tr><td>3b5b6d19d4733ab606c39c69a889f9e67967f151</td><td>qmul_grid</td><td>GRID</td><td><a href="papers/3b5b6d19d4733ab606c39c69a889f9e67967f151.html" target="_blank">Multi-camera activity correlation analysis</a></td><td><a href="http://vision.lbl.gov/Conferences/cvpr/Papers/data/papers/0163.pdf" target="_blank">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>Queen Mary University of London</td><td>United Kingdom</td><td>51.52472720</td><td>-0.03931035</td><td>62%</td><td>138</td><td>86</td><td>52</td><td>8</td><td>79</td><td>61</td></tr><tr><td>4f93cd09785c6e77bf4bc5a788e079df524c8d21</td><td>soton</td><td>SOTON HiD</td><td><a href="papers/4f93cd09785c6e77bf4bc5a788e079df524c8d21.html" target="_blank">On a Large Sequence-Based Human Gait Database</a></td><td><a href="https://eprints.soton.ac.uk/257901/1/Shutler_2002.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>148</td><td>86</td><td>62</td><td>17</td><td>104</td><td>49</td></tr><tr><td>52d7eb0fbc3522434c13cc247549f74bb9609c5d</td><td>wider_face</td><td>WIDER FACE</td><td><a href="papers/52d7eb0fbc3522434c13cc247549f74bb9609c5d.html" target="_blank">WIDER FACE: A Face Detection Benchmark</a></td><td><a href="https://arxiv.org/pdf/1511.06523.pdf" target="_blank">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>Chinese University of Hong Kong</td><td>China</td><td>22.41626320</td><td>114.21093180</td><td>57%</td><td>148</td><td>85</td><td>63</td><td>15</td><td>108</td><td>41</td></tr><tr><td>c0387e788a52f10bf35d4d50659cfa515d89fbec</td><td>mars</td><td>MARS</td><td><a href="papers/c0387e788a52f10bf35d4d50659cfa515d89fbec.html" target="_blank">MARS: A Video Benchmark for Large-Scale Person Re-Identification</a></td><td><a href="http://liangzheng.org/1320.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>146</td><td>85</td><td>61</td><td>6</td><td>97</td><td>49</td></tr><tr><td>27a2fad58dd8727e280f97036e0d2bc55ef5424c</td><td>duke_mtmc</td><td>Duke MTMC</td><td><a href="papers/27a2fad58dd8727e280f97036e0d2bc55ef5424c.html" target="_blank">Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</a></td><td><a href="https://arxiv.org/pdf/1609.01775.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>136</td><td>79</td><td>57</td><td>7</td><td>108</td><td>27</td></tr><tr><td>27a2fad58dd8727e280f97036e0d2bc55ef5424c</td><td>mot</td><td>MOT</td><td><a href="papers/27a2fad58dd8727e280f97036e0d2bc55ef5424c.html" target="_blank">Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</a></td><td><a href="https://arxiv.org/pdf/1609.01775.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>136</td><td>79</td><td>57</td><td>7</td><td>108</td><td>27</td></tr><tr><td>8b56e33f33e582f3e473dba573a16b598ed9bcdc</td><td>fei</td><td>FEI</td><td><a href="papers/8b56e33f33e582f3e473dba573a16b598ed9bcdc.html" target="_blank">A new ranking method for principal components analysis and its application to face image analysis</a></td><td><span class="gray">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td></td><td>46%</td><td>169</td><td>78</td><td>91</td><td>6</td><td>72</td><td>101</td></tr><tr><td>b91f54e1581fbbf60392364323d00a0cd43e493c</td><td>bp4d_spontanous</td><td>BP4D-Spontanous</td><td><a href="papers/b91f54e1581fbbf60392364323d00a0cd43e493c.html" target="_blank">A high-resolution spontaneous 3D dynamic facial expression database</a></td><td><a href="http://www.csee.usf.edu/~scanavan/papers/FG2013.pdf" target="_blank">[pdf]</a></td><td>2013 10th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)</td><td>edu</td><td>SUNY Binghamton</td><td>United States</td><td>42.08779975</td><td>-75.97066066</td><td>51%</td><td>151</td><td>77</td><td>74</td><td>7</td><td>87</td><td>65</td></tr><tr><td>066000d44d6691d27202896691f08b27117918b9</td><td>psu</td><td>PSU</td><td><a href="papers/066000d44d6691d27202896691f08b27117918b9.html" target="_blank">Vision-Based Analysis of Small Groups in Pedestrian Crowds</a></td><td><a href="http://vc.cs.nthu.edu.tw/home/paper/codfiles/htchiang/201212250411/newp12.pdf" target="_blank">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>49%</td><td>151</td><td>74</td><td>77</td><td>9</td><td>79</td><td>73</td></tr><tr><td>10195a163ab6348eef37213a46f60a3d87f289c5</td><td>imdb_wiki</td><td>IMDB</td><td><a href="papers/10195a163ab6348eef37213a46f60a3d87f289c5.html" target="_blank">Deep Expectation of Real and Apparent Age from a Single Image Without Facial Landmarks</a></td><td><a href="http://www.vision.ee.ethz.ch/en/publications/papers/articles/eth_biwi_01299.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td>edu</td><td>ETH Zurich</td><td>Switzerland</td><td>47.37631300</td><td>8.54766990</td><td>53%</td><td>133</td><td>71</td><td>62</td><td>13</td><td>94</td><td>41</td></tr><tr><td>2d3482dcff69c7417c7b933f22de606a0e8e42d4</td><td>lfw</td><td>LFW</td><td><a href="papers/2d3482dcff69c7417c7b933f22de606a0e8e42d4.html" target="_blank">Labeled Faces in the Wild : Updates and New Reporting Procedures</a></td><td><a href="https://pdfs.semanticscholar.org/2d34/82dcff69c7417c7b933f22de606a0e8e42d4.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>edu</td><td>University of Massachusetts</td><td>United States</td><td>42.38897850</td><td>-72.52869870</td><td>58%</td><td>123</td><td>71</td><td>52</td><td>3</td><td>72</td><td>50</td></tr><tr><td>96e0cfcd81cdeb8282e29ef9ec9962b125f379b0</td><td>megaface</td><td>MegaFace</td><td><a href="papers/96e0cfcd81cdeb8282e29ef9ec9962b125f379b0.html" target="_blank">The MegaFace Benchmark: 1 Million Faces for Recognition at Scale</a></td><td><a href="https://arxiv.org/pdf/1512.00596.pdf" target="_blank">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>59%</td><td>121</td><td>71</td><td>50</td><td>9</td><td>98</td><td>22</td></tr><tr><td>66e6f08873325d37e0ec20a4769ce881e04e964e</td><td>sun_attributes</td><td>SUN</td><td><a href="papers/66e6f08873325d37e0ec20a4769ce881e04e964e.html" target="_blank">The SUN Attribute Database: Beyond Categories for Deeper Scene Understanding</a></td><td><a href="http://www.cc.gatech.edu/~hays/papers/attribute_ijcv.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>112</td><td>70</td><td>42</td><td>14</td><td>84</td><td>29</td></tr><tr><td>0486214fb58ee9a04edfe7d6a74c6d0f661a7668</td><td>chokepoint</td><td>ChokePoint</td><td><a href="papers/0486214fb58ee9a04edfe7d6a74c6d0f661a7668.html" target="_blank">Patch-based probabilistic image quality assessment for face selection and improved video-based face recognition</a></td><td><a href="https://arxiv.org/pdf/1304.0869.pdf" target="_blank">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td></td><td></td><td></td><td></td><td></td><td>53%</td><td>128</td><td>68</td><td>60</td><td>6</td><td>73</td><td>60</td></tr><tr><td>8355d095d3534ef511a9af68a3b2893339e3f96b</td><td>imdb_wiki</td><td>IMDB</td><td><a href="papers/8355d095d3534ef511a9af68a3b2893339e3f96b.html" target="_blank">DEX: Deep EXpectation of Apparent Age from a Single Image</a></td><td><a href="http://www.cv-foundation.org/openaccess/content_iccv_2015_workshops/w11/papers/Rothe_DEX_Deep_EXpectation_ICCV_2015_paper.pdf" target="_blank">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision Workshop (ICCVW)</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>120</td><td>67</td><td>53</td><td>5</td><td>74</td><td>47</td></tr><tr><td>0d3bb75852098b25d90f31d2f48fd0cb4944702b</td><td>face_scrub</td><td>FaceScrub</td><td><a href="papers/0d3bb75852098b25d90f31d2f48fd0cb4944702b.html" target="_blank">A data-driven approach to cleaning large face datasets</a></td><td><a href="http://stefan.winkler.net/Publications/icip2014a.pdf" target="_blank">[pdf]</a></td><td>2014 IEEE International Conference on Image Processing (ICIP)</td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>123</td><td>66</td><td>57</td><td>4</td><td>96</td><td>27</td></tr><tr><td>e4754afaa15b1b53e70743880484b8d0736990ff</td><td>fiw_300</td><td>300-W</td><td><a href="papers/e4754afaa15b1b53e70743880484b8d0736990ff.html" target="_blank">300 Faces In-The-Wild Challenge: database and results</a></td><td><a href="http://ibug.doc.ic.ac.uk/media/uploads/documents/1-s2.0-s0262885616000147-main.pdf" target="_blank">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>114</td><td>61</td><td>53</td><td>10</td><td>71</td><td>43</td></tr><tr><td>04c2cda00e5536f4b1508cbd80041e9552880e67</td><td>hipsterwars</td><td>Hipsterwars</td><td><a href="papers/04c2cda00e5536f4b1508cbd80041e9552880e67.html" target="_blank">Hipster wars: Discovering elements of fashion styles</a></td><td><a href="http://acberg.com/papers/hipster_eccv14.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>66%</td><td>91</td><td>60</td><td>31</td><td>5</td><td>61</td><td>29</td></tr><tr><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td><td>mafl</td><td>MAFL</td><td><a href="papers/a0fd85b3400c7b3e11122f44dc5870ae2de9009a.html" target="_blank">Learning Deep Representation for Face Alignment with Auxiliary Attributes</a></td><td><a href="https://arxiv.org/pdf/1408.3967.pdf" target="_blank">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>110</td><td>60</td><td>50</td><td>12</td><td>69</td><td>43</td></tr><tr><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td><td>mtfl</td><td>MTFL</td><td><a href="papers/a0fd85b3400c7b3e11122f44dc5870ae2de9009a.html" target="_blank">Learning Deep Representation for Face Alignment with Auxiliary Attributes</a></td><td><a href="https://arxiv.org/pdf/1408.3967.pdf" target="_blank">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>110</td><td>60</td><td>50</td><td>12</td><td>69</td><td>43</td></tr><tr><td>2e8d0f1802e50cccfd3c0aabac0d0beab3a7846e</td><td>3dpes</td><td>3DPeS</td><td><a href="papers/2e8d0f1802e50cccfd3c0aabac0d0beab3a7846e.html" target="_blank">3DPeS: 3D people dataset for surveillance and forensics</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>48%</td><td>122</td><td>59</td><td>63</td><td>11</td><td>71</td><td>51</td></tr><tr><td>08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7</td><td>kin_face</td><td>UB KinFace</td><td><a href="papers/08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7.html" target="_blank">Understanding Kin Relationships in a Photo</a></td><td><a href="http://www1.ece.neu.edu/~yunfu/papers/Kinship-TMM.pdf" target="_blank">[pdf]</a></td><td>IEEE Transactions on Multimedia</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>96</td><td>55</td><td>41</td><td>2</td><td>34</td><td>63</td></tr><tr><td>9a9877791945c6fa4c1743ec6d3fb32570ef8481</td><td>m2vts</td><td>m2vts</td><td><a href="papers/9a9877791945c6fa4c1743ec6d3fb32570ef8481.html" target="_blank">The M2VTS Multimodal Face Database (Release 1.00)</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Laboratoire de Télécommunications et Télédétection, UCL, Louvain-La-Neuve, Belgium</td><td>Belgium</td><td>50.66968750</td><td>4.61559090</td><td>43%</td><td>129</td><td>55</td><td>74</td><td>4</td><td>80</td><td>54</td></tr><tr><td>2a4bbee0b4cf52d5aadbbc662164f7efba89566c</td><td>peta</td><td>PETA</td><td><a href="papers/2a4bbee0b4cf52d5aadbbc662164f7efba89566c.html" target="_blank">Pedestrian Attribute Recognition At Far Distance</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>68%</td><td>80</td><td>54</td><td>26</td><td>2</td><td>51</td><td>28</td></tr><tr><td>3b4ec8af470948a72a6ed37a9fd226719a874ebc</td><td>sdu_vid</td><td>SDU-VID</td><td><a href="papers/3b4ec8af470948a72a6ed37a9fd226719a874ebc.html" target="_blank">A Spatio-Temporal Appearance Representation for Video-Based Pedestrian Re-Identification</a></td><td><a href="http://openaccess.thecvf.com/content_iccv_2015/papers/Liu_A_Spatio-Temporal_Appearance_ICCV_2015_paper.pdf" target="_blank">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>85</td><td>53</td><td>32</td><td>9</td><td>51</td><td>34</td></tr><tr><td>06f02199690961ba52997cde1527e714d2b3bf8f</td><td>columbia_gaze</td><td>Columbia Gaze</td><td><a href="papers/06f02199690961ba52997cde1527e714d2b3bf8f.html" target="_blank">Gaze locking: passive eye contact detection for human-object interaction</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Columbia University</td><td>United States</td><td>40.84198360</td><td>-73.94368971</td><td>64%</td><td>80</td><td>51</td><td>29</td><td>0</td><td>49</td><td>35</td></tr><tr><td>0dc11a37cadda92886c56a6fb5191ded62099c28</td><td>stickmen_family</td><td>We Are Family Stickmen</td><td><a href="papers/0dc11a37cadda92886c56a6fb5191ded62099c28.html" target="_blank">We are family: joint pose estimation of multiple persons</a></td><td><a href="http://eprints.pascal-network.org/archive/00007964/01/eichner10eccv.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>66%</td><td>77</td><td>51</td><td>26</td><td>5</td><td>60</td><td>19</td></tr><tr><td>3394168ff0719b03ff65bcea35336a76b21fe5e4</td><td>penn_fudan</td><td>Penn Fudan</td><td><a href="papers/3394168ff0719b03ff65bcea35336a76b21fe5e4.html" target="_blank">Object Detection Combining Recognition and Segmentation</a></td><td><a href="https://pdfs.semanticscholar.org/3394/168ff0719b03ff65bcea35336a76b21fe5e4.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>101</td><td>50</td><td>51</td><td>11</td><td>58</td><td>42</td></tr><tr><td>0c4a139bb87c6743c7905b29a3cfec27a5130652</td><td>feret</td><td>FERET</td><td><a href="papers/0c4a139bb87c6743c7905b29a3cfec27a5130652.html" target="_blank">The FERET Verification Testing Protocol for Face Recognition Algorithms</a></td><td><a href="https://pdfs.semanticscholar.org/8d2a/1c768fce6f71584dd993fb97e7b6419aaf60.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>44%</td><td>112</td><td>49</td><td>63</td><td>11</td><td>79</td><td>35</td></tr><tr><td>3cd40bfa1ff193a96bde0207e5140a399476466c</td><td>tvhi</td><td>TVHI</td><td><a href="papers/3cd40bfa1ff193a96bde0207e5140a399476466c.html" target="_blank">High Five: Recognising human interactions in TV shows</a></td><td><a href="https://pdfs.semanticscholar.org/3cd4/0bfa1ff193a96bde0207e5140a399476466c.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>91</td><td>47</td><td>44</td><td>11</td><td>64</td><td>27</td></tr><tr><td>0b440695c822a8e35184fb2f60dcdaa8a6de84ae</td><td>kinectface</td><td>KinectFaceDB</td><td><a href="papers/0b440695c822a8e35184fb2f60dcdaa8a6de84ae.html" target="_blank">KinectFaceDB: A Kinect Database for Face Recognition</a></td><td><a href="http://www.eurecom.fr/fr/publication/4393/download/mm-publi-4393.pdf" target="_blank">[pdf]</a></td><td>IEEE Transactions on Systems, Man, and Cybernetics: Systems</td><td>edu</td><td>University of North Carolina at Chapel Hill</td><td>United States</td><td>35.91139710</td><td>-79.05045290</td><td>61%</td><td>75</td><td>46</td><td>29</td><td>6</td><td>26</td><td>50</td></tr><tr><td>5194cbd51f9769ab25260446b4fa17204752e799</td><td>violent_flows</td><td>Violent Flows</td><td><a href="papers/5194cbd51f9769ab25260446b4fa17204752e799.html" target="_blank">Violent flows: Real-time detection of violent crowd behavior</a></td><td><a href="http://www.openu.ac.il/home/hassner/data/violentflows/violent_flows.pdf" target="_blank">[pdf]</a></td><td>2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops</td><td>edu</td><td>Open University of Israel</td><td>Israel</td><td>32.77824165</td><td>34.99565673</td><td>55%</td><td>83</td><td>46</td><td>37</td><td>6</td><td>44</td><td>41</td></tr><tr><td>2ce2560cf59db59ce313bbeb004e8ce55c5ce928</td><td>texas_3dfrd</td><td>Texas 3DFRD</td><td><a href="papers/2ce2560cf59db59ce313bbeb004e8ce55c5ce928.html" target="_blank">Anthropometric 3D Face Recognition</a></td><td><a href="http://live.ece.utexas.edu/publications/2010/sg_ijcv_june10.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>90</td><td>45</td><td>45</td><td>5</td><td>60</td><td>31</td></tr><tr><td>7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22</td><td>lfw</td><td>LFW</td><td><a href="papers/7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22.html" target="_blank">Labeled Faces in the Wild : A Survey</a></td><td><a href="https://pdfs.semanticscholar.org/7de6/e81d775e9cd7becbfd1bd685f4e2a5eebb22.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Stevens Institute of Technology</td><td>United States</td><td>40.74225200</td><td>-74.02709490</td><td>45%</td><td>99</td><td>45</td><td>54</td><td>8</td><td>63</td><td>36</td></tr><tr><td>2160788824c4c29ffe213b2cbeb3f52972d73f37</td><td>3d_rma</td><td>3D-RMA</td><td><a href="papers/2160788824c4c29ffe213b2cbeb3f52972d73f37.html" target="_blank">Automatic 3D face authentication</a></td><td><a href="http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.31.9190&rep=rep1&type=pdf" target="_blank">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td></td><td>46%</td><td>95</td><td>44</td><td>51</td><td>8</td><td>61</td><td>35</td></tr><tr><td>2bf8541199728262f78d4dced6fb91479b39b738</td><td>clothing_co_parsing</td><td>CCP</td><td><a href="papers/2bf8541199728262f78d4dced6fb91479b39b738.html" target="_blank">Clothing Co-parsing by Joint Image Segmentation and Labeling</a></td><td><a href="https://arxiv.org/pdf/1502.00739.pdf" target="_blank">[pdf]</a></td><td>2014 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>70%</td><td>60</td><td>42</td><td>18</td><td>0</td><td>38</td><td>24</td></tr><tr><td>ae0aee03d946efffdc7af2362a42d3750e7dd48a</td><td>put_face</td><td>Put Face</td><td><a href="papers/ae0aee03d946efffdc7af2362a42d3750e7dd48a.html" target="_blank">The put face database</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>42%</td><td>100</td><td>42</td><td>58</td><td>7</td><td>56</td><td>48</td></tr><tr><td>2edb87494278ad11641b6cf7a3f8996de12b8e14</td><td>qmul_grid</td><td>GRID</td><td><a href="papers/2edb87494278ad11641b6cf7a3f8996de12b8e14.html" target="_blank">Time-Delayed Correlation Analysis for Multi-Camera Activity Understanding</a></td><td><a href="http://www.eecs.qmul.ac.uk/~ccloy/files/ijcv_2010.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td>edu</td><td>Queen Mary University of London</td><td>United Kingdom</td><td>51.52472720</td><td>-0.03931035</td><td>49%</td><td>83</td><td>41</td><td>42</td><td>6</td><td>51</td><td>33</td></tr><tr><td>4df3143922bcdf7db78eb91e6b5359d6ada004d2</td><td>cfd</td><td>CFD</td><td><a href="papers/4df3143922bcdf7db78eb91e6b5359d6ada004d2.html" target="_blank">The Chicago face database: A free stimulus set of faces and norming data.</a></td><td><a href="https://pdfs.semanticscholar.org/4df3/143922bcdf7db78eb91e6b5359d6ada004d2.pdf" target="_blank">[pdf]</a></td><td>Behavior research methods</td><td></td><td></td><td></td><td></td><td></td><td>48%</td><td>83</td><td>40</td><td>43</td><td>2</td><td>63</td><td>19</td></tr><tr><td>4793f11fbca4a7dba898b9fff68f70d868e2497c</td><td>kin_face</td><td>UB KinFace</td><td><a href="papers/4793f11fbca4a7dba898b9fff68f70d868e2497c.html" target="_blank">Kinship verification through transfer learning</a></td><td><a href="http://ijcai.org/Proceedings/11/Papers/422.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>71</td><td>39</td><td>32</td><td>2</td><td>29</td><td>43</td></tr><tr><td>636b8ffc09b1b23ff714ac8350bb35635e49fa3c</td><td>caltech_10k_web_faces</td><td>Caltech 10K Web Faces</td><td><a href="papers/636b8ffc09b1b23ff714ac8350bb35635e49fa3c.html" target="_blank">Pruning training sets for learning of object categories</a></td><td><a href="http://authors.library.caltech.edu/11469/1/ANGcvpr05.pdf" target="_blank">[pdf]</a></td><td>2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)</td><td></td><td></td><td></td><td></td><td></td><td>65%</td><td>60</td><td>39</td><td>21</td><td>5</td><td>43</td><td>17</td></tr><tr><td>c900e0ad4c95948baaf0acd8449fde26f9b4952a</td><td>emotio_net</td><td>EmotioNet Database</td><td><a href="papers/c900e0ad4c95948baaf0acd8449fde26f9b4952a.html" target="_blank">EmotioNet: An Accurate, Real-Time Algorithm for the Automatic Annotation of a Million Facial Expressions in the Wild</a></td><td><a href="http://cbcsl.ece.ohio-state.edu/cvpr16.pdf" target="_blank">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>72</td><td>39</td><td>33</td><td>7</td><td>54</td><td>17</td></tr><tr><td>31de9b3dd6106ce6eec9a35991b2b9083395fd0b</td><td>feret</td><td>FERET</td><td><a href="papers/31de9b3dd6106ce6eec9a35991b2b9083395fd0b.html" target="_blank">FERET ( Face Recognition Technology ) Recognition Algorithm Development and Test Results</a></td><td><a href="https://pdfs.semanticscholar.org/31de/9b3dd6106ce6eec9a35991b2b9083395fd0b.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>46%</td><td>82</td><td>38</td><td>44</td><td>5</td><td>62</td><td>20</td></tr><tr><td>0a85bdff552615643dd74646ac881862a7c7072d</td><td>pipa</td><td>PIPA</td><td><a href="papers/0a85bdff552615643dd74646ac881862a7c7072d.html" target="_blank">Beyond frontal faces: Improving Person Recognition using multiple cues</a></td><td><a href="https://arxiv.org/pdf/1501.05703.pdf" target="_blank">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>company</td><td>Facebook</td><td>United States</td><td>37.39367170</td><td>-122.08072620</td><td>72%</td><td>50</td><td>36</td><td>13</td><td>2</td><td>40</td><td>9</td></tr><tr><td>6618cff7f2ed440a0d2fa9e74ad5469df5cdbe4c</td><td>afad</td><td>AFAD</td><td><a href="papers/6618cff7f2ed440a0d2fa9e74ad5469df5cdbe4c.html" target="_blank">Ordinal Regression with Multiple Output CNN for Age Estimation</a></td><td><a href="http://openaccess.thecvf.com/content_cvpr_2016/papers/Niu_Ordinal_Regression_With_CVPR_2016_paper.pdf" target="_blank">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>53%</td><td>68</td><td>36</td><td>32</td><td>8</td><td>49</td><td>17</td></tr><tr><td>47aeb3b82f54b5ae8142b4bdda7b614433e69b9a</td><td>am_fed</td><td>AM-FED</td><td><a href="papers/47aeb3b82f54b5ae8142b4bdda7b614433e69b9a.html" target="_blank">Affectiva-MIT Facial Expression Dataset (AM-FED): Naturalistic and Spontaneous Facial Expressions Collected "In-the-Wild"</a></td><td><a href="http://affect.media.mit.edu/pdfs/13.McDuff-etal-AMFED.pdf" target="_blank">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition Workshops</td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>73</td><td>34</td><td>39</td><td>6</td><td>41</td><td>34</td></tr><tr><td>fcc6fe6007c322641796cb8792718641856a22a7</td><td>miw</td><td>MIW</td><td><a href="papers/fcc6fe6007c322641796cb8792718641856a22a7.html" target="_blank">Automatic facial makeup detection with application in face recognition</a></td><td><a href="http://www.cse.msu.edu/~rossarun/pubs/ChenMakeupDetection_ICB2013.pdf" target="_blank">[pdf]</a></td><td>2013 International Conference on Biometrics (ICB)</td><td>edu</td><td>West Virginia University</td><td>United States</td><td>39.65404635</td><td>-79.96475355</td><td>74%</td><td>46</td><td>34</td><td>12</td><td>1</td><td>18</td><td>28</td></tr><tr><td>fcc6fe6007c322641796cb8792718641856a22a7</td><td>youtube_makeup</td><td>YMU</td><td><a href="papers/fcc6fe6007c322641796cb8792718641856a22a7.html" target="_blank">Automatic facial makeup detection with application in face recognition</a></td><td><a href="http://www.cse.msu.edu/~rossarun/pubs/ChenMakeupDetection_ICB2013.pdf" target="_blank">[pdf]</a></td><td>2013 International Conference on Biometrics (ICB)</td><td>edu</td><td>West Virginia University</td><td>United States</td><td>39.65404635</td><td>-79.96475355</td><td>74%</td><td>46</td><td>34</td><td>12</td><td>1</td><td>18</td><td>28</td></tr><tr><td>0b84f07af44f964817675ad961def8a51406dd2e</td><td>prw</td><td>PRW</td><td><a href="papers/0b84f07af44f964817675ad961def8a51406dd2e.html" target="_blank">Person Re-identification in the Wild</a></td><td><a href="https://arxiv.org/pdf/1604.02531.pdf" target="_blank">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>51%</td><td>65</td><td>33</td><td>32</td><td>1</td><td>46</td><td>17</td></tr><tr><td>5ffd74d2873b7cba2cbc5fd295cc7fbdedca22a2</td><td>cityscapes</td><td>Cityscapes</td><td><a href="papers/5ffd74d2873b7cba2cbc5fd295cc7fbdedca22a2.html" target="_blank">The Cityscapes Dataset</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>59%</td><td>54</td><td>32</td><td>22</td><td>3</td><td>40</td><td>14</td></tr><tr><td>7ace44190729927e5cb0dd5d363fcae966fe13f7</td><td>nudedetection</td><td>Nude Detection</td><td><a href="papers/7ace44190729927e5cb0dd5d363fcae966fe13f7.html" target="_blank">A bag-of-features approach based on Hue-SIFT descriptor for nude detection</a></td><td><a href="http://www.eurasip.org/Proceedings/Eusipco/Eusipco2009/contents/papers/1569191772.pdf" target="_blank">[pdf]</a></td><td>2009 17th European Signal Processing Conference</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>51</td><td>31</td><td>20</td><td>1</td><td>18</td><td>33</td></tr><tr><td>070de852bc6eb275d7ca3a9cdde8f6be8795d1a3</td><td>d3dfacs</td><td>D3DFACS</td><td><a href="papers/070de852bc6eb275d7ca3a9cdde8f6be8795d1a3.html" target="_blank">A FACS valid 3D dynamic action unit database with applications to 3D dynamic morphable facial modeling</a></td><td><a href="http://www.cs.bath.ac.uk/~dpc/D3DFACS/ICCV_final_2011.pdf" target="_blank">[pdf]</a></td><td>2011 International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>52</td><td>30</td><td>22</td><td>5</td><td>37</td><td>15</td></tr><tr><td>37d6f0eb074d207b53885bd2eb78ccc8a04be597</td><td>vmu</td><td>VMU</td><td><a href="papers/37d6f0eb074d207b53885bd2eb78ccc8a04be597.html" target="_blank">Can facial cosmetics affect the matching accuracy of face recognition systems?</a></td><td><a href="http://www.cse.msu.edu/~climer/DantchevaChenRossFaceCosmetics_BTAS2012.pdf" target="_blank">[pdf]</a></td><td>2012 IEEE Fifth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td>edu</td><td>West Virginia University</td><td>United States</td><td>39.65404635</td><td>-79.96475355</td><td>61%</td><td>49</td><td>30</td><td>19</td><td>0</td><td>18</td><td>31</td></tr><tr><td>37d6f0eb074d207b53885bd2eb78ccc8a04be597</td><td>youtube_makeup</td><td>YMU</td><td><a href="papers/37d6f0eb074d207b53885bd2eb78ccc8a04be597.html" target="_blank">Can facial cosmetics affect the matching accuracy of face recognition systems?</a></td><td><a href="http://www.cse.msu.edu/~climer/DantchevaChenRossFaceCosmetics_BTAS2012.pdf" target="_blank">[pdf]</a></td><td>2012 IEEE Fifth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td>edu</td><td>West Virginia University</td><td>United States</td><td>39.65404635</td><td>-79.96475355</td><td>61%</td><td>49</td><td>30</td><td>19</td><td>0</td><td>18</td><td>31</td></tr><tr><td>51eba481dac6b229a7490f650dff7b17ce05df73</td><td>imsitu</td><td>imSitu</td><td><a href="papers/51eba481dac6b229a7490f650dff7b17ce05df73.html" target="_blank">Situation Recognition: Visual Semantic Role Labeling for Image Understanding</a></td><td><a href="http://allenai.org/content/publications/SituationRecognition.pdf" target="_blank">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>48</td><td>30</td><td>18</td><td>2</td><td>46</td><td>2</td></tr><tr><td>18858cc936947fc96b5c06bbe3c6c2faa5614540</td><td>pilot_parliament</td><td>PPB</td><td><a href="papers/18858cc936947fc96b5c06bbe3c6c2faa5614540.html" target="_blank">Gender Shades: Intersectional Accuracy Disparities in Commercial Gender Classification</a></td><td><a href="https://pdfs.semanticscholar.org/03c1/fc9c3339813ed81ad0de540132f9f695a0f8.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>53%</td><td>55</td><td>29</td><td>26</td><td>0</td><td>47</td><td>7</td></tr><tr><td>356b431d4f7a2a0a38cf971c84568207dcdbf189</td><td>wider</td><td>WIDER</td><td><a href="papers/356b431d4f7a2a0a38cf971c84568207dcdbf189.html" target="_blank">Recognize complex events from static images by fusing deep channels</a></td><td><a href="http://openaccess.thecvf.com/content_cvpr_2015/supplemental/Xiong_Recognize_Complex_Events_2015_CVPR_supplemental.pdf" target="_blank">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>45</td><td>29</td><td>16</td><td>1</td><td>30</td><td>15</td></tr><tr><td>9c23859ec7313f2e756a3e85575735e0c52249f4</td><td>facebook_100</td><td>Facebook100</td><td><a href="papers/9c23859ec7313f2e756a3e85575735e0c52249f4.html" target="_blank">Scaling up biologically-inspired computer vision: A case study in unconstrained face recognition on facebook</a></td><td><a href="http://klab.tch.harvard.edu/academia/classes/Neuro230/2012/lectures/Lecture_11_Reading.pdf" target="_blank">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td>edu</td><td>Harvard University</td><td>United States</td><td>42.36782045</td><td>-71.12666653</td><td>58%</td><td>50</td><td>29</td><td>21</td><td>3</td><td>39</td><td>11</td></tr><tr><td>9c23859ec7313f2e756a3e85575735e0c52249f4</td><td>pubfig_83</td><td>pubfig83</td><td><a href="papers/9c23859ec7313f2e756a3e85575735e0c52249f4.html" target="_blank">Scaling up biologically-inspired computer vision: A case study in unconstrained face recognition on facebook</a></td><td><a href="http://klab.tch.harvard.edu/academia/classes/Neuro230/2012/lectures/Lecture_11_Reading.pdf" target="_blank">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td>edu</td><td>Harvard University</td><td>United States</td><td>42.36782045</td><td>-71.12666653</td><td>58%</td><td>50</td><td>29</td><td>21</td><td>3</td><td>39</td><td>11</td></tr><tr><td>f1af714b92372c8e606485a3982eab2f16772ad8</td><td>mug_faces</td><td>MUG Faces</td><td><a href="papers/f1af714b92372c8e606485a3982eab2f16772ad8.html" target="_blank">The MUG facial expression database</a></td><td><span class="gray">[pdf]</a></td><td>11th International Workshop on Image Analysis for Multimedia Interactive Services WIAMIS 10</td><td>edu</td><td>Aristotle University of Thessaloniki</td><td>Greece</td><td>40.62984145</td><td>22.95889350</td><td>43%</td><td>68</td><td>29</td><td>39</td><td>5</td><td>28</td><td>40</td></tr><tr><td>16e8b0a1e8451d5f697b94c0c2b32a00abee1d52</td><td>umb</td><td>UMB</td><td><a href="papers/16e8b0a1e8451d5f697b94c0c2b32a00abee1d52.html" target="_blank">UMB-DB: A database of partially occluded 3D faces</a></td><td><a href="http://face.cs.kit.edu/befit/workshop2011/pdf/slides/claudio_cusano-slides.pdf" target="_blank">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>45</td><td>27</td><td>18</td><td>2</td><td>20</td><td>24</td></tr><tr><td>22646e00a7ba34d1b5fbe3b1efcd91a1e1be3c2b</td><td>saivt</td><td>SAIVT SoftBio</td><td><a href="papers/22646e00a7ba34d1b5fbe3b1efcd91a1e1be3c2b.html" target="_blank">A Database for Person Re-Identification in Multi-Camera Surveillance Networks</a></td><td><a href="http://eprints.qut.edu.au/53437/3/Bialkowski_Database4PersonReID_DICTA.pdf" target="_blank">[pdf]</a></td><td>2012 International Conference on Digital Image Computing Techniques and Applications (DICTA)</td><td></td><td></td><td></td><td></td><td></td><td>45%</td><td>58</td><td>26</td><td>32</td><td>7</td><td>41</td><td>18</td></tr><tr><td>faf40ce28857aedf183e193486f5b4b0a8c478a2</td><td>iit_dehli_ear</td><td>IIT Dehli Ear</td><td><a href="papers/faf40ce28857aedf183e193486f5b4b0a8c478a2.html" target="_blank">Automated Human Identification Using Ear Imaging</a></td><td><a href="https://pdfs.semanticscholar.org/faf4/0ce28857aedf183e193486f5b4b0a8c478a2.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>37%</td><td>70</td><td>26</td><td>44</td><td>6</td><td>28</td><td>42</td></tr><tr><td>b92a1ed9622b8268ae3ac9090e25789fc41cc9b8</td><td>pornodb</td><td>Pornography DB</td><td><a href="papers/b92a1ed9622b8268ae3ac9090e25789fc41cc9b8.html" target="_blank">Pooling in image representation: The visual codeword point of view</a></td><td><a href="http://cedric.cnam.fr/~thomen/papers/avila_CVIU2012_final.pdf" target="_blank">[pdf]</a></td><td>Computer Vision and Image Understanding</td><td></td><td></td><td></td><td></td><td></td><td>32%</td><td>77</td><td>25</td><td>52</td><td>7</td><td>46</td><td>34</td></tr><tr><td>eb027969f9310e0ae941e2adee2d42cdf07d938c</td><td>vgg_faces2</td><td>VGG Face2</td><td><a href="papers/eb027969f9310e0ae941e2adee2d42cdf07d938c.html" target="_blank">VGGFace2: A Dataset for Recognising Faces across Pose and Age</a></td><td><a href="https://arxiv.org/pdf/1710.08092.pdf" target="_blank">[pdf]</a></td><td>2018 13th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2018)</td><td>edu</td><td>Oxford University</td><td>United Kingdom</td><td>51.75208490</td><td>-1.25166460</td><td>45%</td><td>56</td><td>25</td><td>31</td><td>6</td><td>50</td><td>6</td></tr><tr><td>47662d1a368daf70ba70ef2d59eb6209f98b675d</td><td>fia</td><td>CMU FiA</td><td><a href="papers/47662d1a368daf70ba70ef2d59eb6209f98b675d.html" target="_blank">The CMU Face In Action (FIA) Database</a></td><td><a href="https://pdfs.semanticscholar.org/4766/2d1a368daf70ba70ef2d59eb6209f98b675d.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>44%</td><td>55</td><td>24</td><td>31</td><td>5</td><td>41</td><td>17</td></tr><tr><td>4d58f886f5150b2d5e48fd1b5a49e09799bf895d</td><td>texas_3dfrd</td><td>Texas 3DFRD</td><td><a href="papers/4d58f886f5150b2d5e48fd1b5a49e09799bf895d.html" target="_blank">Texas 3D Face Recognition Database</a></td><td><a href="http://live.ece.utexas.edu/publications/2010/sg_ssiai_may10.pdf" target="_blank">[pdf]</a></td><td>2010 IEEE Southwest Symposium on Image Analysis & Interpretation (SSIAI)</td><td></td><td></td><td></td><td></td><td></td><td>39%</td><td>61</td><td>24</td><td>37</td><td>3</td><td>37</td><td>25</td></tr><tr><td>8b2dd5c61b23ead5ae5508bb8ce808b5ea266730</td><td>10k_US_adult_faces</td><td>10K US Adult Faces</td><td><a href="papers/8b2dd5c61b23ead5ae5508bb8ce808b5ea266730.html" target="_blank">The intrinsic memorability of face photographs.</a></td><td><a href="https://pdfs.semanticscholar.org/8b2d/d5c61b23ead5ae5508bb8ce808b5ea266730.pdf" target="_blank">[pdf]</a></td><td>Journal of experimental psychology. General</td><td></td><td></td><td></td><td></td><td></td><td>51%</td><td>47</td><td>24</td><td>23</td><td>3</td><td>34</td><td>13</td></tr><tr><td>09d78009687bec46e70efcf39d4612822e61cb8c</td><td>raid</td><td>RAiD</td><td><a href="papers/09d78009687bec46e70efcf39d4612822e61cb8c.html" target="_blank">Consistent Re-identification in a Camera Network</a></td><td><a href="http://cs-people.bu.edu/dasabir/papers/ECCV14_Poster.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>51%</td><td>45</td><td>23</td><td>22</td><td>7</td><td>34</td><td>11</td></tr><tr><td>79828e6e9f137a583082b8b5a9dfce0c301989b8</td><td>mapillary</td><td>Mapillary</td><td><a href="papers/79828e6e9f137a583082b8b5a9dfce0c301989b8.html" target="_blank">The Mapillary Vistas Dataset for Semantic Understanding of Street Scenes</a></td><td><a href="http://openaccess.thecvf.com/content_ICCV_2017/papers/Neuhold_The_Mapillary_Vistas_ICCV_2017_paper.pdf" target="_blank">[pdf]</a></td><td>2017 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>44</td><td>23</td><td>21</td><td>0</td><td>36</td><td>7</td></tr><tr><td>2161f6b7ee3c0acc81603b01dc0df689683577b9</td><td>large_scale_person_search</td><td>Large Scale Person Search</td><td><a href="papers/2161f6b7ee3c0acc81603b01dc0df689683577b9.html" target="_blank">End-to-End Deep Learning for Person Search</a></td><td><a href="https://pdfs.semanticscholar.org/2161/f6b7ee3c0acc81603b01dc0df689683577b9.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>41</td><td>22</td><td>19</td><td>2</td><td>27</td><td>12</td></tr><tr><td>1c2802c2199b6d15ecefe7ba0c39bfe44363de38</td><td>youtube_poses</td><td>YouTube Pose</td><td><a href="papers/1c2802c2199b6d15ecefe7ba0c39bfe44363de38.html" target="_blank">Personalizing Human Video Pose Estimation</a></td><td><a href="https://arxiv.org/pdf/1511.06676.pdf" target="_blank">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>Oxford University</td><td>United Kingdom</td><td>51.75208490</td><td>-1.25166460</td><td>66%</td><td>32</td><td>21</td><td>11</td><td>2</td><td>29</td><td>5</td></tr><tr><td>6f3c76b7c0bd8e1d122c6ea808a271fd4749c951</td><td>ward</td><td>WARD</td><td><a href="papers/6f3c76b7c0bd8e1d122c6ea808a271fd4749c951.html" target="_blank">Re-identify people in wide area camera network</a></td><td><a href="http://users.dimi.uniud.it/~niki.martinel/data/publications/2012/CVPR/MarMicCVPR2012.pdf" target="_blank">[pdf]</a></td><td>2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops</td><td></td><td></td><td></td><td></td><td></td><td>38%</td><td>55</td><td>21</td><td>34</td><td>2</td><td>35</td><td>19</td></tr><tr><td>31b05f65405534a696a847dd19c621b7b8588263</td><td>umd_faces</td><td>UMD</td><td><a href="papers/31b05f65405534a696a847dd19c621b7b8588263.html" target="_blank">UMDFaces: An annotated face dataset for training deep networks</a></td><td><a href="https://arxiv.org/pdf/1611.01484.pdf" target="_blank">[pdf]</a></td><td>2017 IEEE International Joint Conference on Biometrics (IJCB)</td><td>edu</td><td>University of Maryland</td><td>United States</td><td>39.28996850</td><td>-76.62196103</td><td>57%</td><td>35</td><td>20</td><td>15</td><td>4</td><td>28</td><td>7</td></tr><tr><td>53ae38a6bb2b21b42bac4f0c4c8ed1f9fa02f9d4</td><td>bp4d_plus</td><td>BP4D+</td><td><a href="papers/53ae38a6bb2b21b42bac4f0c4c8ed1f9fa02f9d4.html" target="_blank">Multimodal Spontaneous Emotion Corpus for Human Behavior Analysis</a></td><td><a href="http://openaccess.thecvf.com/content_cvpr_2016/papers/Zhang_Multimodal_Spontaneous_Emotion_CVPR_2016_paper.pdf" target="_blank">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>40</td><td>20</td><td>20</td><td>0</td><td>21</td><td>20</td></tr><tr><td>758d7e1be64cc668c59ef33ba8882c8597406e53</td><td>affectnet</td><td>AffectNet</td><td><a href="papers/758d7e1be64cc668c59ef33ba8882c8597406e53.html" target="_blank">AffectNet: A Database for Facial Expression, Valence, and Arousal Computing in the Wild</a></td><td><a href="https://arxiv.org/pdf/1708.03985.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>53%</td><td>38</td><td>20</td><td>18</td><td>1</td><td>26</td><td>11</td></tr><tr><td>213a579af9e4f57f071b884aa872651372b661fd</td><td>bbc_pose</td><td>BBC Pose</td><td><a href="papers/213a579af9e4f57f071b884aa872651372b661fd.html" target="_blank">Automatic and Efficient Human Pose Estimation for Sign Language Videos</a></td><td><a href="http://tomas.pfister.fi/files/charles13ijcv.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>76%</td><td>25</td><td>19</td><td>6</td><td>1</td><td>19</td><td>7</td></tr><tr><td>d08cc366a4a0192a01e9a7495af1eb5d9f9e73ae</td><td>b3d_ac</td><td>B3D(AC)</td><td><a href="papers/d08cc366a4a0192a01e9a7495af1eb5d9f9e73ae.html" target="_blank">A 3-D Audio-Visual Corpus of Affective Communication</a></td><td><a href="http://files.is.tue.mpg.de/jgall/download/jgall_avcorpus_mm10.pdf" target="_blank">[pdf]</a></td><td>IEEE Transactions on Multimedia</td><td></td><td></td><td></td><td></td><td></td><td>49%</td><td>39</td><td>19</td><td>20</td><td>2</td><td>27</td><td>12</td></tr><tr><td>298cbc3dfbbb3a20af4eed97906650a4ea1c29e0</td><td>ferplus</td><td>FER+</td><td><a href="papers/298cbc3dfbbb3a20af4eed97906650a4ea1c29e0.html" target="_blank">Training deep networks for facial expression recognition with crowd-sourced label distribution</a></td><td><a href="https://arxiv.org/pdf/1608.01041.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>29</td><td>18</td><td>11</td><td>0</td><td>15</td><td>14</td></tr><tr><td>488e475eeb3bb39a145f23ede197cd3620f1d98a</td><td>apis</td><td>APiS1.0</td><td><a href="papers/488e475eeb3bb39a145f23ede197cd3620f1d98a.html" target="_blank">Pedestrian Attribute Classification in Surveillance: Database and Evaluation</a></td><td><a href="http://www.cbsr.ia.ac.cn/english/APiS_1.0_paper.pdf" target="_blank">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision Workshops</td><td></td><td></td><td></td><td></td><td></td><td>69%</td><td>26</td><td>18</td><td>8</td><td>1</td><td>13</td><td>13</td></tr><tr><td>488e475eeb3bb39a145f23ede197cd3620f1d98a</td><td>svs</td><td>SVS</td><td><a href="papers/488e475eeb3bb39a145f23ede197cd3620f1d98a.html" target="_blank">Pedestrian Attribute Classification in Surveillance: Database and Evaluation</a></td><td><a href="http://www.cbsr.ia.ac.cn/english/APiS_1.0_paper.pdf" target="_blank">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision Workshops</td><td></td><td></td><td></td><td></td><td></td><td>69%</td><td>26</td><td>18</td><td>8</td><td>1</td><td>13</td><td>13</td></tr><tr><td>fd8168f1c50de85bac58a8d328df0a50248b16ae</td><td>nd_2006</td><td>ND-2006</td><td><a href="papers/fd8168f1c50de85bac58a8d328df0a50248b16ae.html" target="_blank">Using a Multi-Instance Enrollment Representation to Improve 3D Face Recognition</a></td><td><span class="gray">[pdf]</a></td><td>2007 First IEEE International Conference on Biometrics: Theory, Applications, and Systems</td><td>edu</td><td>University of Notre Dame</td><td>United States</td><td>41.70456775</td><td>-86.23822026</td><td>56%</td><td>32</td><td>18</td><td>14</td><td>3</td><td>17</td><td>15</td></tr><tr><td>4fefd1bc8dc4e0ab37ee3324ddfa43ad9d6a04a7</td><td>deep_fashion</td><td>DeepFashion</td><td><a href="papers/4fefd1bc8dc4e0ab37ee3324ddfa43ad9d6a04a7.html" target="_blank">Fashion Landmark Detection in the Wild</a></td><td><a href="https://arxiv.org/pdf/1608.03049.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>65%</td><td>26</td><td>17</td><td>9</td><td>1</td><td>17</td><td>9</td></tr><tr><td>57fe081950f21ca03b5b375ae3e84b399c015861</td><td>cvc_01_barcelona</td><td>CVC-01</td><td><a href="papers/57fe081950f21ca03b5b375ae3e84b399c015861.html" target="_blank">Adaptive Image Sampling and Windows Classification for On – board Pedestrian Detection</a></td><td><a href="https://pdfs.semanticscholar.org/57fe/081950f21ca03b5b375ae3e84b399c015861.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>39%</td><td>44</td><td>17</td><td>27</td><td>1</td><td>21</td><td>23</td></tr><tr><td>1e3df3ca8feab0b36fd293fe689f93bb2aaac591</td><td>immediacy</td><td>Immediacy</td><td><a href="papers/1e3df3ca8feab0b36fd293fe689f93bb2aaac591.html" target="_blank">Multi-task Recurrent Neural Network for Immediacy Prediction</a></td><td><a href="http://openaccess.thecvf.com/content_iccv_2015/papers/Chu_Multi-Task_Recurrent_Neural_ICCV_2015_paper.pdf" target="_blank">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>25</td><td>16</td><td>9</td><td>2</td><td>21</td><td>5</td></tr><tr><td>20388099cc415c772926e47bcbbe554e133343d1</td><td>cafe</td><td>CAFE</td><td><a href="papers/20388099cc415c772926e47bcbbe554e133343d1.html" target="_blank">The Child Affective Facial Expression (CAFE) set: validity and reliability from untrained adults</a></td><td><a href="https://pdfs.semanticscholar.org/2038/8099cc415c772926e47bcbbe554e133343d1.pdf" target="_blank">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td></td><td>48%</td><td>33</td><td>16</td><td>17</td><td>3</td><td>28</td><td>5</td></tr><tr><td>84fe5b4ac805af63206012d29523a1e033bc827e</td><td>awe_ears</td><td>AWE Ears</td><td><a href="papers/84fe5b4ac805af63206012d29523a1e033bc827e.html" target="_blank">Ear Recognition: More Than a Survey</a></td><td><a href="https://arxiv.org/pdf/1611.06203.pdf" target="_blank">[pdf]</a></td><td>Neurocomputing</td><td></td><td></td><td></td><td></td><td></td><td>67%</td><td>24</td><td>16</td><td>8</td><td>0</td><td>11</td><td>13</td></tr><tr><td>a5a44a32a91474f00a3cda671a802e87c899fbb4</td><td>moments_in_time</td><td>Moments in Time</td><td><a href="papers/a5a44a32a91474f00a3cda671a802e87c899fbb4.html" target="_blank">Moments in Time Dataset: one million videos for event understanding</a></td><td><a href="https://arxiv.org/pdf/1801.03150.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>25</td><td>16</td><td>9</td><td>2</td><td>25</td><td>0</td></tr><tr><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td><td>casablanca</td><td>Casablanca</td><td><a href="papers/0ceda9dae8b9f322df65ca2ef02caca9758aec6f.html" target="_blank">Context-Aware CNNs for Person Head Detection</a></td><td><a href="https://arxiv.org/pdf/1511.07917.pdf" target="_blank">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>27</td><td>15</td><td>12</td><td>1</td><td>23</td><td>5</td></tr><tr><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td><td>hollywood_headset</td><td>HollywoodHeads</td><td><a href="papers/0ceda9dae8b9f322df65ca2ef02caca9758aec6f.html" target="_blank">Context-Aware CNNs for Person Head Detection</a></td><td><a href="https://arxiv.org/pdf/1511.07917.pdf" target="_blank">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>27</td><td>15</td><td>12</td><td>1</td><td>23</td><td>5</td></tr><tr><td>28d4e027c7e90b51b7d8908fce68128d1964668a</td><td>megaface</td><td>MegaFace</td><td><a href="papers/28d4e027c7e90b51b7d8908fce68128d1964668a.html" target="_blank">Level Playing Field for Million Scale Face Recognition</a></td><td><a href="https://arxiv.org/pdf/1705.00393.pdf" target="_blank">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>University of Washington</td><td>United States</td><td>47.65432380</td><td>-122.30800894</td><td>39%</td><td>38</td><td>15</td><td>23</td><td>2</td><td>29</td><td>8</td></tr><tr><td>4946ba10a4d5a7d0a38372f23e6622bd347ae273</td><td>coco_action</td><td>COCO-a</td><td><a href="papers/4946ba10a4d5a7d0a38372f23e6622bd347ae273.html" target="_blank">Describing Common Human Visual Actions in Images</a></td><td><a href="https://arxiv.org/pdf/1506.02203.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>26</td><td>15</td><td>11</td><td>0</td><td>25</td><td>1</td></tr><tr><td>221c18238b829c12b911706947ab38fd017acef7</td><td>rap_pedestrian</td><td>RAP</td><td><a href="papers/221c18238b829c12b911706947ab38fd017acef7.html" target="_blank">A Richly Annotated Dataset for Pedestrian Attribute Recognition</a></td><td><a href="https://arxiv.org/pdf/1603.07054.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>67%</td><td>21</td><td>14</td><td>7</td><td>0</td><td>18</td><td>3</td></tr><tr><td>2eb84aaba316b095d4bb51da1a3e4365bbf9ab1d</td><td>kin_face</td><td>UB KinFace</td><td><a href="papers/2eb84aaba316b095d4bb51da1a3e4365bbf9ab1d.html" target="_blank">Genealogical face recognition based on UB KinFace database</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td>edu</td><td>SUNY Buffalo</td><td>United States</td><td>42.93362780</td><td>-78.88394479</td><td>47%</td><td>30</td><td>14</td><td>16</td><td>1</td><td>10</td><td>21</td></tr><tr><td>44d23df380af207f5ac5b41459c722c87283e1eb</td><td>wider_attribute</td><td>WIDER Attribute</td><td><a href="papers/44d23df380af207f5ac5b41459c722c87283e1eb.html" target="_blank">Human Attribute Recognition by Deep Hierarchical Contexts</a></td><td><a href="https://pdfs.semanticscholar.org/8e28/07f2dd53b03a759e372e07f7191cae65c9fd.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>78%</td><td>18</td><td>14</td><td>4</td><td>0</td><td>16</td><td>2</td></tr><tr><td>5753b2b5e442eaa3be066daa4a2ca8d8a0bb1725</td><td>50_people_one_question</td><td>50 People One Question</td><td><a href="papers/5753b2b5e442eaa3be066daa4a2ca8d8a0bb1725.html" target="_blank">Merging Pose Estimates Across Space and Time</a></td><td><a href="https://pdfs.semanticscholar.org/63b2/f5348af0f969dfc2afb4977732393c6459ec.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>87%</td><td>15</td><td>13</td><td>2</td><td>0</td><td>12</td><td>4</td></tr><tr><td>ea050801199f98a1c7c1df6769f23f658299a3ae</td><td>mpi_large</td><td>Large MPI Facial Expression</td><td><a href="papers/ea050801199f98a1c7c1df6769f23f658299a3ae.html" target="_blank">The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</a></td><td><a href="https://pdfs.semanticscholar.org/ea05/0801199f98a1c7c1df6769f23f658299a3ae.pdf" target="_blank">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td></td><td>46%</td><td>28</td><td>13</td><td>15</td><td>4</td><td>24</td><td>4</td></tr><tr><td>ea050801199f98a1c7c1df6769f23f658299a3ae</td><td>mpi_small</td><td>Small MPI Facial Expression</td><td><a href="papers/ea050801199f98a1c7c1df6769f23f658299a3ae.html" target="_blank">The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</a></td><td><a href="https://pdfs.semanticscholar.org/ea05/0801199f98a1c7c1df6769f23f658299a3ae.pdf" target="_blank">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td></td><td>46%</td><td>28</td><td>13</td><td>15</td><td>4</td><td>24</td><td>4</td></tr><tr><td>014b8df0180f33b9fea98f34ae611c6447d761d2</td><td>buhmap_db</td><td>BUHMAP-DB</td><td><a href="papers/014b8df0180f33b9fea98f34ae611c6447d761d2.html" target="_blank">Facial feature tracking and expression recognition for sign language</a></td><td><a href="https://www.cmpe.boun.edu.tr/~ari/files/ari2008iscis.pdf" target="_blank">[pdf]</a></td><td>2009 IEEE 17th Signal Processing and Communications Applications Conference</td><td></td><td></td><td></td><td></td><td></td><td>48%</td><td>25</td><td>12</td><td>13</td><td>1</td><td>11</td><td>15</td></tr><tr><td>0297448f3ed948e136bb06ceff10eccb34e5bb77</td><td>ilids_mcts</td><td>i-LIDS</td><td><a href="papers/0297448f3ed948e136bb06ceff10eccb34e5bb77.html" target="_blank">Imagery Library for Intelligent Detection Systems (i-LIDS); A Standard for Testing Video Based Detection Systems</a></td><td><span class="gray">[pdf]</a></td><td>Proceedings 40th Annual 2006 International Carnahan Conference on Security Technology</td><td></td><td></td><td></td><td></td><td></td><td>38%</td><td>32</td><td>12</td><td>20</td><td>2</td><td>18</td><td>15</td></tr><tr><td>12ad3b5bbbf407f8e54ea692c07633d1a867c566</td><td>graz</td><td>Graz Pedestrian</td><td><a href="papers/12ad3b5bbbf407f8e54ea692c07633d1a867c566.html" target="_blank">Object recognition using segmentation for feature detection</a></td><td><span class="gray">[pdf]</a></td><td>Proceedings of the 17th International Conference on Pattern Recognition, 2004. ICPR 2004.</td><td>edu</td><td>Inst. of Comput. Sci., Univ. of Leoben, Austria</td><td>Austria</td><td>47.38473720</td><td>15.09302010</td><td>41%</td><td>29</td><td>12</td><td>17</td><td>1</td><td>21</td><td>8</td></tr><tr><td>19d1b811df60f86cbd5e04a094b07f32fff7a32a</td><td>york_3d</td><td>UOY 3D Face Database</td><td><a href="papers/19d1b811df60f86cbd5e04a094b07f32fff7a32a.html" target="_blank">Three-dimensional face recognition: an eigensurface approach</a></td><td><a href="http://www-users.cs.york.ac.uk/~nep/research/3Dface/tomh/3DFaceRecognition-Eigensurface-ICIP(web)2.pdf" target="_blank">[pdf]</a></td><td>2004 International Conference on Image Processing, 2004. ICIP '04.</td><td></td><td></td><td></td><td></td><td></td><td>33%</td><td>36</td><td>12</td><td>24</td><td>4</td><td>25</td><td>11</td></tr><tr><td>45e616093a92e5f1e61a7c6037d5f637aa8964af</td><td>malf</td><td>MALF</td><td><a href="papers/45e616093a92e5f1e61a7c6037d5f637aa8964af.html" target="_blank">Fine-grained evaluation on face detection in the wild</a></td><td><a href="http://www.cs.toronto.edu/~byang/papers/malf_fg15.pdf" target="_blank">[pdf]</a></td><td>2015 11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)</td><td></td><td></td><td></td><td></td><td></td><td>71%</td><td>17</td><td>12</td><td>5</td><td>0</td><td>13</td><td>4</td></tr><tr><td>e27ef52c641c2b5100a1b34fd0b819e84a31b4df</td><td>sarc3d</td><td>Sarc3D</td><td><a href="papers/e27ef52c641c2b5100a1b34fd0b819e84a31b4df.html" target="_blank">SARC3D: A New 3D Body Model for People Tracking and Re-identification</a></td><td><a href="https://pdfs.semanticscholar.org/e27e/f52c641c2b5100a1b34fd0b819e84a31b4df.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>41%</td><td>29</td><td>12</td><td>17</td><td>3</td><td>17</td><td>12</td></tr><tr><td>774cbb45968607a027ae4729077734db000a1ec5</td><td>urban_tribes</td><td>Urban Tribes</td><td><a href="papers/774cbb45968607a027ae4729077734db000a1ec5.html" target="_blank">From Bikers to Surfers: Visual Recognition of Urban Tribes</a></td><td><a href="https://pdfs.semanticscholar.org/774c/bb45968607a027ae4729077734db000a1ec5.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>65%</td><td>17</td><td>11</td><td>6</td><td>1</td><td>12</td><td>5</td></tr><tr><td>a8d0b149c2eadaa02204d3e4356fbc8eccf3b315</td><td>hi4d_adsip</td><td>Hi4D-ADSIP</td><td><a href="papers/a8d0b149c2eadaa02204d3e4356fbc8eccf3b315.html" target="_blank">Hi4D-ADSIP 3-D dynamic facial articulation database</a></td><td><span class="gray">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>18</td><td>11</td><td>7</td><td>1</td><td>7</td><td>11</td></tr><tr><td>6dcf418c778f528b5792104760f1fbfe90c6dd6a</td><td>agedb</td><td>AgeDB</td><td><a href="papers/6dcf418c778f528b5792104760f1fbfe90c6dd6a.html" target="_blank">AgeDB: The First Manually Collected, In-the-Wild Age Database</a></td><td><a href="https://ibug.doc.ic.ac.uk/media/uploads/documents/agedb.pdf" target="_blank">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td></td><td></td><td></td><td></td><td></td><td>91%</td><td>11</td><td>10</td><td>1</td><td>0</td><td>10</td><td>1</td></tr><tr><td>ec792ad2433b6579f2566c932ee414111e194537</td><td>msmt_17</td><td>MSMT17</td><td><a href="papers/ec792ad2433b6579f2566c932ee414111e194537.html" target="_blank">Person Transfer GAN to Bridge Domain Gap for Person Re-Identification</a></td><td><a href="https://arxiv.org/pdf/1711.08565.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>71%</td><td>14</td><td>10</td><td>4</td><td>1</td><td>11</td><td>3</td></tr><tr><td>2f43b614607163abf41dfe5d17ef6749a1b61304</td><td>hrt_transgender</td><td>HRT Transgender</td><td><a href="papers/2f43b614607163abf41dfe5d17ef6749a1b61304.html" target="_blank">Investigating the Periocular-Based Face Recognition Across Gender Transformation</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Information Forensics and Security</td><td>edu</td><td>University of North Carolina at Wilmington</td><td>United States</td><td>34.22498270</td><td>-77.86907744</td><td>69%</td><td>13</td><td>9</td><td>4</td><td>0</td><td>6</td><td>8</td></tr><tr><td>4156b7e88f2e0ab0a7c095b9bab199ae2b23bd06</td><td>distance_nighttime</td><td>Long Distance Heterogeneous Face</td><td><a href="papers/4156b7e88f2e0ab0a7c095b9bab199ae2b23bd06.html" target="_blank">Nighttime Face Recognition at Long Distance: Cross-Distance and Cross-Spectral Matching</a></td><td><a href="https://pdfs.semanticscholar.org/4156/b7e88f2e0ab0a7c095b9bab199ae2b23bd06.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>21</td><td>9</td><td>12</td><td>3</td><td>11</td><td>10</td></tr><tr><td>4e6ee936eb50dd032f7138702fa39b7c18ee8907</td><td>dartmouth_children</td><td>Dartmouth Children</td><td><a href="papers/4e6ee936eb50dd032f7138702fa39b7c18ee8907.html" target="_blank">The Dartmouth Database of Children’s Faces: Acquisition and Validation of a New Face Stimulus Set</a></td><td><a href="https://pdfs.semanticscholar.org/4e6e/e936eb50dd032f7138702fa39b7c18ee8907.pdf" target="_blank">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td></td><td>45%</td><td>20</td><td>9</td><td>11</td><td>2</td><td>17</td><td>4</td></tr><tr><td>71b7fc715e2f1bb24c0030af8d7e7b6e7cd128a6</td><td>umd_faces</td><td>UMD</td><td><a href="papers/71b7fc715e2f1bb24c0030af8d7e7b6e7cd128a6.html" target="_blank">The Do’s and Don’ts for CNN-Based Face Verification</a></td><td><a href="https://arxiv.org/pdf/1705.07426.pdf" target="_blank">[pdf]</a></td><td>2017 IEEE International Conference on Computer Vision Workshops (ICCVW)</td><td></td><td></td><td></td><td></td><td></td><td>36%</td><td>25</td><td>9</td><td>16</td><td>3</td><td>17</td><td>6</td></tr><tr><td>ca3e88d87e1344d076c964ea89d91a75c417f5ee</td><td>imfdb</td><td>IMFDB</td><td><a href="papers/ca3e88d87e1344d076c964ea89d91a75c417f5ee.html" target="_blank">Indian Movie Face Database: A benchmark for face recognition under wide variations</a></td><td><a href="http://cdn.iiit.ac.in/cdn/cvit.iiit.ac.in/papers/Shankar2013Indian.pdf" target="_blank">[pdf]</a></td><td>2013 Fourth National Conference on Computer Vision, Pattern Recognition, Image Processing and Graphics (NCVPRIPG)</td><td>edu</td><td>CVIT, IIITH, India</td><td>India</td><td>17.44595810</td><td>78.34959940</td><td>60%</td><td>15</td><td>9</td><td>6</td><td>0</td><td>10</td><td>5</td></tr><tr><td>25474c21613607f6bb7687a281d5f9d4ffa1f9f3</td><td>faceplace</td><td>Face Place</td><td><a href="papers/25474c21613607f6bb7687a281d5f9d4ffa1f9f3.html" target="_blank">Recognizing disguised faces</a></td><td><a href="https://pdfs.semanticscholar.org/d936/7ceb0be378c3a9ddf7cb741c678c1a3c574c.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>33%</td><td>24</td><td>8</td><td>16</td><td>0</td><td>18</td><td>6</td></tr><tr><td>2a171f8d14b6b8735001a11c217af9587d095848</td><td>social_relation</td><td>Social Relation</td><td><a href="papers/2a171f8d14b6b8735001a11c217af9587d095848.html" target="_blank">Learning Social Relation Traits from Face Images</a></td><td><a href="https://arxiv.org/pdf/1509.03936.pdf" target="_blank">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>40%</td><td>20</td><td>8</td><td>12</td><td>5</td><td>15</td><td>5</td></tr><tr><td>a94cae786d515d3450d48267e12ca954aab791c4</td><td>yawdd</td><td>YawDD</td><td><a href="papers/a94cae786d515d3450d48267e12ca954aab791c4.html" target="_blank">YawDD: a yawning detection dataset</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>14</td><td>8</td><td>6</td><td>1</td><td>2</td><td>12</td></tr><tr><td>c570d1247e337f91e555c3be0e8c8a5aba539d9f</td><td>mcgill</td><td>McGill Real World</td><td><a href="papers/c570d1247e337f91e555c3be0e8c8a5aba539d9f.html" target="_blank">Robust semi-automatic head pose labeling for real-world face video sequences</a></td><td><span class="gray">[pdf]</a></td><td>Multimedia Tools and Applications</td><td>edu</td><td>McGill University</td><td>Canada</td><td>45.50397610</td><td>-73.57496870</td><td>44%</td><td>18</td><td>8</td><td>10</td><td>0</td><td>13</td><td>7</td></tr><tr><td>0cb2dd5f178e3a297a0c33068961018659d0f443</td><td>ijb_c</td><td>IJB-B</td><td><a href="papers/0cb2dd5f178e3a297a0c33068961018659d0f443.html" target="_blank">IARPA Janus Benchmark-B Face Dataset</a></td><td><a href="http://biometrics.cse.msu.edu/Publications/Face/Whitelametal_IARPAJanusBenchmark-BFaceDataset_CVPRW17.pdf" target="_blank">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td>edu</td><td>Michigan State University</td><td>United States</td><td>42.71856800</td><td>-84.47791571</td><td>28%</td><td>25</td><td>7</td><td>18</td><td>6</td><td>21</td><td>4</td></tr><tr><td>22f656d0f8426c84a33a267977f511f127bfd7f3</td><td>expw</td><td>ExpW</td><td><a href="papers/22f656d0f8426c84a33a267977f511f127bfd7f3.html" target="_blank">From Facial Expression Recognition to Interpersonal Relation Prediction</a></td><td><a href="https://arxiv.org/pdf/1609.06426.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>78%</td><td>9</td><td>7</td><td>2</td><td>0</td><td>5</td><td>4</td></tr><tr><td>2624d84503bc2f8e190e061c5480b6aa4d89277a</td><td>afew_va</td><td>AFEW-VA</td><td><a href="papers/2624d84503bc2f8e190e061c5480b6aa4d89277a.html" target="_blank">AFEW-VA database for valence and arousal estimation in-the-wild</a></td><td><a href="https://ibug.doc.ic.ac.uk/media/uploads/documents/afew-va.pdf" target="_blank">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>15</td><td>7</td><td>8</td><td>1</td><td>10</td><td>4</td></tr><tr><td>2b926b3586399d028b46315d7d9fb9d879e4f79c</td><td>frav3d</td><td>FRAV3D</td><td><a href="papers/2b926b3586399d028b46315d7d9fb9d879e4f79c.html" target="_blank">Multimodal 2D, 2.5D & 3D Face Verification</a></td><td><a href="http://www.researchgate.net/profile/Enrique_Cabello/publication/224057733_Multimodal_2D_2.5D__3D_Face_Verification/links/0912f50f522298fa95000000.pdf" target="_blank">[pdf]</a></td><td>2006 International Conference on Image Processing</td><td>edu</td><td>Universidad Rey Juan Carlos, Spain</td><td></td><td>40.33586610</td><td>-3.87694320</td><td>50%</td><td>14</td><td>7</td><td>7</td><td>0</td><td>2</td><td>12</td></tr><tr><td>4563b46d42079242f06567b3f2e2f7a80cb3befe</td><td>vadana</td><td>VADANA</td><td><a href="papers/4563b46d42079242f06567b3f2e2f7a80cb3befe.html" target="_blank">VADANA: A dense dataset for facial image analysis</a></td><td><a href="http://vims.cis.udel.edu/publications/VADANA_BeFIT2011.pdf" target="_blank">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td>edu</td><td>University of Delaware</td><td>United States</td><td>39.68103280</td><td>-75.75401840</td><td>44%</td><td>16</td><td>7</td><td>9</td><td>0</td><td>6</td><td>10</td></tr><tr><td>6403117f9c005ae81f1e8e6d1302f4a045e3d99d</td><td>alert_airport</td><td>ALERT Airport</td><td><a href="papers/6403117f9c005ae81f1e8e6d1302f4a045e3d99d.html" target="_blank">A Systematic Evaluation and Benchmark for Person Re-Identification: Features, Metrics, and Datasets.</a></td><td><a href="https://arxiv.org/pdf/1605.09653.pdf" target="_blank">[pdf]</a></td><td>IEEE transactions on pattern analysis and machine intelligence</td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>15</td><td>7</td><td>8</td><td>1</td><td>10</td><td>5</td></tr><tr><td>b71d1aa90dcbe3638888725314c0d56640c1fef1</td><td>ifdb</td><td>IFDB</td><td><a href="papers/b71d1aa90dcbe3638888725314c0d56640c1fef1.html" target="_blank">Iranian Face Database with age, pose and expression</a></td><td><a href="http://www.iranprc.org/pdf/paper/2007-02.pdf" target="_blank">[pdf]</a></td><td>2007 International Conference on Machine Vision</td><td>edu</td><td>Islamic Azad University</td><td>Iran</td><td>34.84529990</td><td>48.55962120</td><td>35%</td><td>20</td><td>7</td><td>13</td><td>2</td><td>12</td><td>9</td></tr><tr><td>041d3eedf5e45ce5c5229f0181c5c576ed1fafd6</td><td>ucf_selfie</td><td>UCF Selfie</td><td><a href="papers/041d3eedf5e45ce5c5229f0181c5c576ed1fafd6.html" target="_blank">How to Take a Good Selfie?</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>67%</td><td>9</td><td>6</td><td>3</td><td>0</td><td>6</td><td>4</td></tr><tr><td>7f4040b482d16354d5938c1d1b926b544652bf5b</td><td>nova_emotions</td><td>Novaemötions Dataset</td><td><a href="papers/7f4040b482d16354d5938c1d1b926b544652bf5b.html" target="_blank">Competitive affective gaming: winning with a smile</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Universidade NOVA de Lisboa, Caparica, Portugal</td><td>Portugal</td><td>38.66096400</td><td>-9.20581300</td><td>75%</td><td>8</td><td>6</td><td>2</td><td>0</td><td>4</td><td>4</td></tr><tr><td>8f02ec0be21461fbcedf51d864f944cfc42c875f</td><td>hda_plus</td><td>HDA+</td><td><a href="papers/8f02ec0be21461fbcedf51d864f944cfc42c875f.html" target="_blank">The HDA+ Data Set for Research on Fully Automated Re-identification Systems</a></td><td><a href="http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/ECCV_2014/workshops/w19/11%20-%20The%20HDA%20data%20set%20for%20research%20on%20fully.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>35%</td><td>17</td><td>6</td><td>11</td><td>2</td><td>11</td><td>6</td></tr><tr><td>bd26dabab576adb6af30484183c9c9c8379bf2e0</td><td>scut_fbp</td><td>SCUT-FBP</td><td><a href="papers/bd26dabab576adb6af30484183c9c9c8379bf2e0.html" target="_blank">SCUT-FBP: A Benchmark Dataset for Facial Beauty Perception</a></td><td><a href="https://arxiv.org/pdf/1511.02459.pdf" target="_blank">[pdf]</a></td><td>2015 IEEE International Conference on Systems, Man, and Cybernetics</td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>14</td><td>6</td><td>8</td><td>3</td><td>5</td><td>9</td></tr><tr><td>1a40092b493c6b8840257ab7f96051d1a4dbfeb2</td><td>sports_videos_in_the_wild</td><td>SVW</td><td><a href="papers/1a40092b493c6b8840257ab7f96051d1a4dbfeb2.html" target="_blank">Sports Videos in the Wild (SVW): A video dataset for sports analysis</a></td><td><a href="http://cse.msu.edu/~liuxm/publication/Safdarnejad_Liu_Udpa_Andrus_Wood_Craven_FG2015.pdf" target="_blank">[pdf]</a></td><td>2015 11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)</td><td></td><td></td><td></td><td></td><td></td><td>83%</td><td>6</td><td>5</td><td>1</td><td>1</td><td>5</td><td>1</td></tr><tr><td>8627f019882b024aef92e4eb9355c499c733e5b7</td><td>used</td><td>USED Social Event Dataset</td><td><a href="papers/8627f019882b024aef92e4eb9355c499c733e5b7.html" target="_blank">USED: a large-scale social event detection dataset</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td>edu</td><td>University of Trento</td><td>Italy</td><td>46.06588360</td><td>11.11598940</td><td>71%</td><td>7</td><td>5</td><td>2</td><td>0</td><td>3</td><td>4</td></tr><tr><td>8d5998cd984e7cce307da7d46f155f9db99c6590</td><td>chalearn</td><td>ChaLearn</td><td><a href="papers/8d5998cd984e7cce307da7d46f155f9db99c6590.html" target="_blank">ChaLearn looking at people: A review of events and resources</a></td><td><a href="https://arxiv.org/pdf/1701.02664.pdf" target="_blank">[pdf]</a></td><td>2017 International Joint Conference on Neural Networks (IJCNN)</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>10</td><td>5</td><td>5</td><td>1</td><td>6</td><td>4</td></tr><tr><td>079a0a3bf5200994e1f972b1b9197bf2f90e87d4</td><td>mit_cbcl</td><td>MIT CBCL</td><td><a href="papers/079a0a3bf5200994e1f972b1b9197bf2f90e87d4.html" target="_blank">Component-Based Face Recognition with 3D Morphable Models</a></td><td><a href="http://cbcl.mit.edu/cbcl/publications/theses/thesis-huang.pdf" target="_blank">[pdf]</a></td><td>2004 Conference on Computer Vision and Pattern Recognition Workshop</td><td></td><td></td><td></td><td></td><td></td><td>33%</td><td>12</td><td>4</td><td>8</td><td>0</td><td>8</td><td>4</td></tr><tr><td>07fcbae86f7a3ad3ea1cf95178459ee9eaf77cb1</td><td>uccs</td><td>UCCS</td><td><a href="papers/07fcbae86f7a3ad3ea1cf95178459ee9eaf77cb1.html" target="_blank">Large scale unconstrained open set face database</a></td><td><a href="http://vast.uccs.edu/~tboult/PAPERS/BTAS13-Sapkota-Boult-UCCSFaceDB.pdf" target="_blank">[pdf]</a></td><td>2013 IEEE Sixth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td>edu</td><td>University of Colorado at Colorado Springs</td><td>United States</td><td>38.89646790</td><td>-104.80505940</td><td>80%</td><td>5</td><td>4</td><td>1</td><td>0</td><td>3</td><td>2</td></tr><tr><td>137aa2f891d474fce1e7a1d1e9b3aefe21e22b34</td><td>hrt_transgender</td><td>HRT Transgender</td><td><a href="papers/137aa2f891d474fce1e7a1d1e9b3aefe21e22b34.html" target="_blank">Is the eye region more reliable than the face? A preliminary study of face-based recognition on a transgender dataset</a></td><td><a href="http://www.csis.pace.edu/~ctappert/dps/2013BTAS/Papers/Paper%20139/PID2859389.pdf" target="_blank">[pdf]</a></td><td>2013 IEEE Sixth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>7</td><td>4</td><td>3</td><td>1</td><td>3</td><td>5</td></tr><tr><td>4b4106614c1d553365bad75d7866bff0de6056ed</td><td>czech_news_agency</td><td>UFI</td><td><a href="papers/4b4106614c1d553365bad75d7866bff0de6056ed.html" target="_blank">Unconstrained Facial Images: Database for Face Recognition Under Real-World Conditions</a></td><td><a href="https://pdfs.semanticscholar.org/4b41/06614c1d553365bad75d7866bff0de6056ed.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>40%</td><td>10</td><td>4</td><td>6</td><td>0</td><td>4</td><td>6</td></tr><tr><td>54983972aafc8e149259d913524581357b0f91c3</td><td>reseed</td><td>ReSEED</td><td><a href="papers/54983972aafc8e149259d913524581357b0f91c3.html" target="_blank">ReSEED: social event dEtection dataset</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>67%</td><td>6</td><td>4</td><td>2</td><td>1</td><td>1</td><td>5</td></tr><tr><td>563c940054e4b456661762c1ab858e6f730c3159</td><td>data_61</td><td>Data61 Pedestrian</td><td><a href="papers/563c940054e4b456661762c1ab858e6f730c3159.html" target="_blank">A Multi-modal Graphical Model for Scene Analysis</a></td><td><a href="http://www.nicta.com.au/wp-content/uploads/2015/02/TaghaviNaminetalWACV15.pdf" target="_blank">[pdf]</a></td><td>2015 IEEE Winter Conference on Applications of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>8</td><td>4</td><td>4</td><td>0</td><td>5</td><td>3</td></tr><tr><td>56ae6d94fc6097ec4ca861f0daa87941d1c10b70</td><td>cmdp</td><td>CMDP</td><td><a href="papers/56ae6d94fc6097ec4ca861f0daa87941d1c10b70.html" target="_blank">Distance Estimation of an Unknown Person from a Portrait</a></td><td><a href="https://pdfs.semanticscholar.org/56ae/6d94fc6097ec4ca861f0daa87941d1c10b70.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>44%</td><td>9</td><td>4</td><td>5</td><td>0</td><td>6</td><td>3</td></tr><tr><td>7ebb153704706e457ab57b432793d2b6e5d12592</td><td>vgg_celebs_in_places</td><td>CIP</td><td><a href="papers/7ebb153704706e457ab57b432793d2b6e5d12592.html" target="_blank">Faces in Places: compound query retrieval</a></td><td><a href="https://pdfs.semanticscholar.org/7ebb/153704706e457ab57b432793d2b6e5d12592.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>80%</td><td>5</td><td>4</td><td>1</td><td>0</td><td>4</td><td>1</td></tr><tr><td>a5a3bc3e5e9753769163cb30b16dbd12e266b93e</td><td>mcgill</td><td>McGill Real World</td><td><a href="papers/a5a3bc3e5e9753769163cb30b16dbd12e266b93e.html" target="_blank">Hierarchical temporal graphical model for head pose estimation and subsequent attribute classification in real-world videos</a></td><td><span class="gray">[pdf]</a></td><td>Computer Vision and Image Understanding</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>8</td><td>4</td><td>4</td><td>1</td><td>5</td><td>3</td></tr><tr><td>0d2dd4fc016cb6a517d8fb43a7cc3ff62964832e</td><td>lag</td><td>LAG</td><td><a href="papers/0d2dd4fc016cb6a517d8fb43a7cc3ff62964832e.html" target="_blank">Large age-gap face verification by feature injection in deep networks</a></td><td><a href="https://arxiv.org/pdf/1602.06149.pdf" target="_blank">[pdf]</a></td><td>Pattern Recognition Letters</td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>7</td><td>3</td><td>4</td><td>0</td><td>3</td><td>4</td></tr><tr><td>23e824d1dfc33f3780dd18076284f07bd99f1c43</td><td>mifs</td><td>MIFS</td><td><a href="papers/23e824d1dfc33f3780dd18076284f07bd99f1c43.html" target="_blank">Spoofing faces using makeup: An investigative study</a></td><td><a href="http://www.cse.msu.edu/~rossarun/pubs/ChenFaceMakeupSpoof_ISBA2017.pdf" target="_blank">[pdf]</a></td><td>2017 IEEE International Conference on Identity, Security and Behavior Analysis (ISBA)</td><td>edu</td><td>INRIA Méditerranée</td><td>France</td><td>43.61581310</td><td>7.06838000</td><td>60%</td><td>5</td><td>3</td><td>2</td><td>0</td><td>1</td><td>4</td></tr><tr><td>57178b36c21fd7f4529ac6748614bb3374714e91</td><td>ijb_c</td><td>IJB-C</td><td><a href="papers/57178b36c21fd7f4529ac6748614bb3374714e91.html" target="_blank">IARPA Janus Benchmark - C: Face Dataset and Protocol</a></td><td><a href="http://biometrics.cse.msu.edu/Publications/Face/Mazeetal_IARPAJanusBenchmarkCFaceDatasetAndProtocol_ICB2018.pdf" target="_blank">[pdf]</a></td><td>2018 International Conference on Biometrics (ICB)</td><td></td><td></td><td></td><td></td><td></td><td>33%</td><td>9</td><td>3</td><td>6</td><td>2</td><td>9</td><td>0</td></tr><tr><td>a7fe834a0af614ce6b50dc093132b031dd9a856b</td><td>market1203</td><td>Market 1203</td><td><a href="papers/a7fe834a0af614ce6b50dc093132b031dd9a856b.html" target="_blank">Orientation Driven Bag of Appearances for Person Re-identification</a></td><td><a href="https://arxiv.org/pdf/1605.02464.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>7</td><td>3</td><td>4</td><td>0</td><td>4</td><td>4</td></tr><tr><td>a7fe834a0af614ce6b50dc093132b031dd9a856b</td><td>pku_reid</td><td>PKU-Reid</td><td><a href="papers/a7fe834a0af614ce6b50dc093132b031dd9a856b.html" target="_blank">Orientation Driven Bag of Appearances for Person Re-identification</a></td><td><a href="https://arxiv.org/pdf/1605.02464.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>7</td><td>3</td><td>4</td><td>0</td><td>4</td><td>4</td></tr><tr><td>dd65f71dac86e36eecbd3ed225d016c3336b4a13</td><td>families_in_the_wild</td><td>FIW</td><td><a href="papers/dd65f71dac86e36eecbd3ed225d016c3336b4a13.html" target="_blank">Visual Kinship Recognition of Families in the Wild</a></td><td><a href="https://web.northeastern.edu/smilelab/fiw/papers/Supplemental_PP.pdf" target="_blank">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td>edu</td><td>University of Massachusetts Dartmouth</td><td>United States</td><td>41.62772475</td><td>-71.00724501</td><td>100%</td><td>3</td><td>3</td><td>0</td><td>0</td><td>2</td><td>1</td></tr><tr><td>d4f1eb008eb80595bcfdac368e23ae9754e1e745</td><td>uccs</td><td>UCCS</td><td><a href="papers/d4f1eb008eb80595bcfdac368e23ae9754e1e745.html" target="_blank">Unconstrained Face Detection and Open-Set Face Recognition Challenge</a></td><td><a href="https://arxiv.org/pdf/1708.02337.pdf" target="_blank">[pdf]</a></td><td>2017 IEEE International Joint Conference on Biometrics (IJCB)</td><td></td><td></td><td></td><td></td><td></td><td>40%</td><td>5</td><td>2</td><td>3</td><td>0</td><td>4</td><td>1</td></tr><tr><td>17b46e2dad927836c689d6787ddb3387c6159ece</td><td>geofaces</td><td>GeoFaces</td><td><a href="papers/17b46e2dad927836c689d6787ddb3387c6159ece.html" target="_blank">GeoFaceExplorer: exploring the geo-dependence of facial attributes</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>2</td><td>2</td><td>0</td><td>0</td><td>1</td><td>1</td></tr><tr><td>9e5378e7b336c89735d3bb15cf67eff96f86d39a</td><td>precarious</td><td>Precarious</td><td><a href="papers/9e5378e7b336c89735d3bb15cf67eff96f86d39a.html" target="_blank">Expecting the Unexpected: Training Detectors for Unusual Pedestrians with Adversarial Imposters</a></td><td><a href="https://arxiv.org/pdf/1703.06283.pdf" target="_blank">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>17%</td><td>12</td><td>2</td><td>10</td><td>1</td><td>11</td><td>1</td></tr><tr><td>ad01687649d95cd5b56d7399a9603c4b8e2217d7</td><td>mrp_drone</td><td>MRP Drone</td><td><a href="papers/ad01687649d95cd5b56d7399a9603c4b8e2217d7.html" target="_blank">Investigating Open-World Person Re-identification Using a Drone</a></td><td><a href="https://pdfs.semanticscholar.org/ad01/687649d95cd5b56d7399a9603c4b8e2217d7.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>40%</td><td>5</td><td>2</td><td>3</td><td>0</td><td>3</td><td>2</td></tr><tr><td>f6c8d5e35d7e4d60a0104f233ac1a3ab757da53f</td><td>pku</td><td>PKU</td><td><a href="papers/f6c8d5e35d7e4d60a0104f233ac1a3ab757da53f.html" target="_blank">Swiss-System Based Cascade Ranking for Gait-Based Person Re-Identification</a></td><td><a href="https://pdfs.semanticscholar.org/f6c8/d5e35d7e4d60a0104f233ac1a3ab757da53f.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>67%</td><td>3</td><td>2</td><td>1</td><td>0</td><td>1</td><td>2</td></tr><tr><td>3dc3f0b64ef80f573e3a5f96e456e52ee980b877</td><td>georgia_tech_face_database</td><td>Georgia Tech Face</td><td><a href="papers/3dc3f0b64ef80f573e3a5f96e456e52ee980b877.html" target="_blank">MAXIMUM LIKELIHOOD TRAINING OF THE EMBEDDED HMM FOR FACE DETECTION AND RECOGNITION Ara V. Ne an and Monson H. Hayes III Center for Signal and Image Processing School of Electrical and Computer Engineering</a></td><td><a href="https://pdfs.semanticscholar.org/3dc3/f0b64ef80f573e3a5f96e456e52ee980b877.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>33%</td><td>3</td><td>1</td><td>2</td><td>0</td><td>2</td><td>1</td></tr><tr><td>4d4bb462c9f1d4e4ab1e4aa6a75cc0bc71b38461</td><td>3dddb_unconstrained</td><td>3D Dynamic</td><td><a href="papers/4d4bb462c9f1d4e4ab1e4aa6a75cc0bc71b38461.html" target="_blank">A 3 D Dynamic Database for Unconstrained Face Recognition</a></td><td><a href="https://pdfs.semanticscholar.org/4d4b/b462c9f1d4e4ab1e4aa6a75cc0bc71b38461.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>2</td><td>1</td><td>1</td><td>0</td><td>1</td><td>1</td></tr><tr><td>55c40cbcf49a0225e72d911d762c27bb1c2d14aa</td><td>ifad</td><td>IFAD</td><td><a href="papers/55c40cbcf49a0225e72d911d762c27bb1c2d14aa.html" target="_blank">Indian Face Age Database : A Database for Face Recognition with Age Variation</a></td><td><a href="https://pdfs.semanticscholar.org/55c4/0cbcf49a0225e72d911d762c27bb1c2d14aa.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>2</td><td>1</td><td>1</td><td>0</td><td>2</td><td>0</td></tr><tr><td>578d4ad74818086bb64f182f72e2c8bd31e3d426</td><td>mr2</td><td>MR2</td><td><a href="papers/578d4ad74818086bb64f182f72e2c8bd31e3d426.html" target="_blank">The MR2: A multi-racial, mega-resolution database of facial stimuli.</a></td><td><a href="https://pdfs.semanticscholar.org/be5b/455abd379240460d022a0e246615b0b86c14.pdf" target="_blank">[pdf]</a></td><td>Behavior research methods</td><td></td><td></td><td></td><td></td><td></td><td>14%</td><td>7</td><td>1</td><td>6</td><td>0</td><td>7</td><td>0</td></tr><tr><td>5ad4e9f947c1653c247d418f05dad758a3f9277b</td><td>wlfdb</td><td>WLFDB</td><td><a href="papers/5ad4e9f947c1653c247d418f05dad758a3f9277b.html" target="_blank">WLFDB: Weakly Labeled Face Databases</a></td><td><a href="https://pdfs.semanticscholar.org/5ad4/e9f947c1653c247d418f05dad758a3f9277b.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>1</td><td>1</td><td>0</td><td>0</td><td>0</td><td>1</td></tr><tr><td>65355cbb581a219bd7461d48b3afd115263ea760</td><td>complex_activities</td><td>Ongoing Complex Activities</td><td><a href="papers/65355cbb581a219bd7461d48b3afd115263ea760.html" target="_blank">Recognition of ongoing complex activities by sequence prediction over a hierarchical label space</a></td><td><a href="https://scalable.mpi-inf.mpg.de/files/2016/01/main_wacv.pdf" target="_blank">[pdf]</a></td><td>2016 IEEE Winter Conference on Applications of Computer Vision (WACV)</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>2</td><td>1</td><td>1</td><td>0</td><td>2</td><td>0</td></tr><tr><td>922e0a51a3b8c67c4c6ac09a577ff674cbd28b34</td><td>v47</td><td>V47</td><td><a href="papers/922e0a51a3b8c67c4c6ac09a577ff674cbd28b34.html" target="_blank">Re-identification of pedestrians with variable occlusion and scale</a></td><td><span class="gray">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td>edu</td><td>Kingston University</td><td>United Kingdom</td><td>51.42930860</td><td>-0.26840440</td><td>10%</td><td>10</td><td>1</td><td>9</td><td>2</td><td>6</td><td>4</td></tr><tr><td>c06b13d0ec3f5c43e2782cd22542588e233733c3</td><td>nova_emotions</td><td>Novaemötions Dataset</td><td><a href="papers/c06b13d0ec3f5c43e2782cd22542588e233733c3.html" target="_blank">Crowdsourcing facial expressions for affective-interaction</a></td><td><span class="gray">[pdf]</a></td><td>Computer Vision and Image Understanding</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>1</td><td>1</td><td>0</td><td>0</td><td>1</td><td>0</td></tr><tr><td>d80a3d1f3a438e02a6685e66ee908446766fefa9</td><td>megaage</td><td>MegaAge</td><td><a href="papers/d80a3d1f3a438e02a6685e66ee908446766fefa9.html" target="_blank">Quantifying Facial Age by Posterior of Age Comparisons</a></td><td><a href="https://arxiv.org/pdf/1708.09687.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>25%</td><td>4</td><td>1</td><td>3</td><td>1</td><td>4</td><td>0</td></tr><tr><td>e58dd160a76349d46f881bd6ddbc2921f08d1050</td><td>gfw</td><td>YouTube Pose</td><td><a href="papers/e58dd160a76349d46f881bd6ddbc2921f08d1050.html" target="_blank">Merge or Not? Learning to Group Faces via Imitation Learning</a></td><td><a href="https://arxiv.org/pdf/1707.03986.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>2</td><td>1</td><td>1</td><td>0</td><td>2</td><td>0</td></tr><tr><td>9e31e77f9543ab42474ba4e9330676e18c242e72</td><td>imdb_face</td><td>IMDB Face</td><td><a href="papers/9e31e77f9543ab42474ba4e9330676e18c242e72.html" target="_blank">The Devil of Face Recognition is in the Noise</a></td><td><a href="https://arxiv.org/pdf/1807.11649.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Nanyang Technological University</td><td>Singapore</td><td>1.34841040</td><td>103.68297965</td><td>20%</td><td>5</td><td>1</td><td>4</td><td>0</td><td>3</td><td>1</td></tr><tr><td>066d71fcd997033dce4ca58df924397dfe0b5fd1</td><td>ifdb</td><td>IFDB</td><td><a href="papers/066d71fcd997033dce4ca58df924397dfe0b5fd1.html" target="_blank">Iranian Face Database and Evaluation with a New Detection Algorithm</a></td><td><a href="https://pdfs.semanticscholar.org/066d/71fcd997033dce4ca58df924397dfe0b5fd1.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>22909dd19a0ec3b6065334cb5be5392cb24d839d</td><td>pets</td><td>PETS 2017</td><td><a href="papers/22909dd19a0ec3b6065334cb5be5392cb24d839d.html" target="_blank">PETS 2017: Dataset and Challenge</a></td><td><a href="http://tahirnawaz.com/papers/2017_CVPRW_PETS2017Dataset_Luis_Nawaz_Cane_Ferryman.pdf" target="_blank">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>8</td><td>0</td><td>8</td><td>0</td><td>2</td><td>6</td></tr><tr><td>377f2b65e6a9300448bdccf678cde59449ecd337</td><td>ufdd</td><td>UFDD</td><td><a href="papers/377f2b65e6a9300448bdccf678cde59449ecd337.html" target="_blank">Pushing the Limits of Unconstrained Face Detection: a Challenge Dataset and Baseline Results</a></td><td><a href="https://arxiv.org/pdf/1804.10275.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>2</td><td>0</td><td>2</td><td>0</td><td>2</td><td>0</td></tr><tr><td>670637d0303a863c1548d5b19f705860a23e285c</td><td>face_tracer</td><td>FaceTracer</td><td><a href="papers/670637d0303a863c1548d5b19f705860a23e285c.html" target="_blank">Face swapping: automatically replacing faces in photographs</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>75da1df4ed319926c544eefe17ec8d720feef8c0</td><td>fddb</td><td>FDDB</td><td><a href="papers/75da1df4ed319926c544eefe17ec8d720feef8c0.html" target="_blank">FDDB: A benchmark for face detection in unconstrained settings</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>1</td><td>0</td><td>1</td><td>0</td><td>0</td><td>1</td></tr><tr><td>77c81c13a110a341c140995bedb98101b9e84f7f</td><td>wildtrack</td><td>WildTrack</td><td><a href="papers/77c81c13a110a341c140995bedb98101b9e84f7f.html" target="_blank">WILDTRACK : A Multi-camera HD Dataset for Dense Unscripted Pedestrian Detection</a></td><td><a href="https://pdfs.semanticscholar.org/fe1c/ec4e4995b8615855572374ae3efc94949105.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>8990cdce3f917dad622e43e033db686b354d057c</td><td>tiny_faces</td><td>TinyFace</td><td><a href="papers/8990cdce3f917dad622e43e033db686b354d057c.html" target="_blank">Low-Resolution Face Recognition</a></td><td><a href="https://arxiv.org/pdf/1811.08965.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>9696ad8b164f5e10fcfe23aacf74bd6168aebb15</td><td>4dfab</td><td>4DFAB</td><td><a href="papers/9696ad8b164f5e10fcfe23aacf74bd6168aebb15.html" target="_blank">4DFAB: A Large Scale 4D Facial Expression Database for Biometric Applications</a></td><td><a href="https://arxiv.org/pdf/1712.01443.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>4</td><td>0</td><td>4</td><td>0</td><td>2</td><td>2</td></tr><tr><td>9b9bf5e623cb8af7407d2d2d857bc3f1b531c182</td><td>who_goes_there</td><td>WGT</td><td><a href="papers/9b9bf5e623cb8af7407d2d2d857bc3f1b531c182.html" target="_blank">Who goes there?: approaches to mapping facial appearance diversity</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td>edu</td><td>University of Kentucky</td><td>United States</td><td>38.03337420</td><td>-84.50177580</td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>bd88bb2e4f351352d88ee7375af834360e223498</td><td>hda_plus</td><td>HDA+</td><td><a href="papers/bd88bb2e4f351352d88ee7375af834360e223498.html" target="_blank">HDA dataset-DRAFT 1 A Multi-camera video data set for research on High-Definition surveillance</a></td><td><a href="https://pdfs.semanticscholar.org/bd88/bb2e4f351352d88ee7375af834360e223498.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>2</td><td>0</td><td>2</td><td>0</td><td>1</td><td>2</td></tr><tr><td>c866a2afc871910e3282fd9498dce4ab20f6a332</td><td>qmul_surv_face</td><td>QMUL-SurvFace</td><td><a href="papers/c866a2afc871910e3282fd9498dce4ab20f6a332.html" target="_blank">Surveillance Face Recognition Challenge</a></td><td><a href="https://arxiv.org/pdf/1804.09691.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>d3f5a1848b0028d8ab51d0b0673732cad2e3c8c9</td><td>stair_actions</td><td>STAIR Action</td><td><a href="papers/d3f5a1848b0028d8ab51d0b0673732cad2e3c8c9.html" target="_blank">STAIR Actions: A Video Dataset of Everyday Home Actions</a></td><td><a href="https://arxiv.org/pdf/1804.04326.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>d3200d49a19a4a4e4e9745ee39649b65d80c834b</td><td>scut_head</td><td>SCUT HEAD</td><td><a href="papers/d3200d49a19a4a4e4e9745ee39649b65d80c834b.html" target="_blank">Detecting Heads using Feature Refine Net and Cascaded Multi-scale Architecture</a></td><td><a href="https://arxiv.org/pdf/1803.09256.pdf" target="_blank">[pdf]</a></td><td>2018 24th International Conference on Pattern Recognition (ICPR)</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>f0e17f27f029db4ad650ff278fe3c10ecb6cb0c4</td><td>europersons</td><td>EuroCity Persons</td><td><a href="papers/f0e17f27f029db4ad650ff278fe3c10ecb6cb0c4.html" target="_blank">The EuroCity Persons Dataset: A Novel Benchmark for Object Detection</a></td><td><a href="https://arxiv.org/pdf/1805.07193.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>1</td><td>0</td><td>1</td><td>0</td><td>1</td><td>0</td></tr></table></body></html>
\ No newline at end of file +<!doctype html><html><head><meta charset='utf-8'><title>Coverage</title><link rel='stylesheet' href='reports.css'></head><body><h2>Coverage</h2><table border='1' cellpadding='3' cellspacing='3'><th>Paper ID</th><th>Megapixels Key</th><th>Megapixels Name</th><th>Report Link</th><th>PDF Link</th><th>Journal</th><th>Type</th><th>Address</th><th>Country</th><th>Lat</th><th>Lng</th><th>Coverage</th><th>Total Citations</th><th>Geocoded Citations</th><th>Unknown Citations</th><th>Empty Citations</th><th>With PDF</th><th>With DOI</th><tr><td>b5f2846a506fc417e7da43f6a7679146d99c5e96</td><td>ucf_101</td><td>UCF101</td><td><a href="papers/b5f2846a506fc417e7da43f6a7679146d99c5e96.html" target="_blank">UCF101: A Dataset of 101 Human Actions Classes From Videos in The Wild</a></td><td><a href="https://arxiv.org/pdf/1212.0402.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>999</td><td>611</td><td>388</td><td>73</td><td>716</td><td>283</td></tr><tr><td>0e986f51fe45b00633de9fd0c94d082d2be51406</td><td>afw</td><td>AFW</td><td><a href="papers/0e986f51fe45b00633de9fd0c94d082d2be51406.html" target="_blank">Face detection, pose estimation, and landmark localization in the wild</a></td><td><a href="http://crcv.ucf.edu/courses/CAP6412/Spring2013/papers/zhu-ramanan-face-cvpr12.pdf" target="_blank">[pdf]</a></td><td>2012 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>999</td><td>608</td><td>391</td><td>59</td><td>622</td><td>387</td></tr><tr><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td><td>lfw</td><td>LFW</td><td><a href="papers/370b5757a5379b15e30d619e4d3fb9e8e13f3256.html" target="_blank">Labeled Faces in the Wild : A Database for Studying Face Recognition in Unconstrained Environments</a></td><td><a href="https://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>999</td><td>575</td><td>422</td><td>71</td><td>639</td><td>371</td></tr><tr><td>0ee1916a0cb2dc7d3add086b5f1092c3d4beb38a</td><td>voc</td><td>VOC</td><td><a href="papers/0ee1916a0cb2dc7d3add086b5f1092c3d4beb38a.html" target="_blank">The Pascal Visual Object Classes (VOC) Challenge</a></td><td><a href="http://eprints.pascal-network.org/archive/00006187/01/PascalVOC_IJCV2009.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td>edu</td><td>Oxford University</td><td>United Kingdom</td><td>51.75208490</td><td>-1.25166460</td><td>58%</td><td>999</td><td>575</td><td>424</td><td>35</td><td>613</td><td>414</td></tr><tr><td>5e0f8c355a37a5a89351c02f174e7a5ddcb98683</td><td>coco</td><td>COCO</td><td><a href="papers/5e0f8c355a37a5a89351c02f174e7a5ddcb98683.html" target="_blank">Microsoft COCO: Common Objects in Context</a></td><td><a href="https://arxiv.org/pdf/1405.0312.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>999</td><td>569</td><td>430</td><td>29</td><td>799</td><td>193</td></tr><tr><td>2e384f057211426ac5922f1b33d2aa8df5d51f57</td><td>a_pascal_yahoo</td><td>#N/A</td><td><a href="papers/2e384f057211426ac5922f1b33d2aa8df5d51f57.html" target="_blank">Describing objects by their attributes</a></td><td><a href="http://vision.lbl.gov/Conferences/cvpr/Papers/data/papers/0468.pdf" target="_blank">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>University of Illinois, Urbana-Champaign</td><td>United States</td><td>40.11116745</td><td>-88.22587665</td><td>57%</td><td>999</td><td>565</td><td>433</td><td>74</td><td>738</td><td>264</td></tr><tr><td>759a3b3821d9f0e08e0b0a62c8b693230afc3f8d</td><td>pubfig</td><td>PubFig</td><td><a href="papers/759a3b3821d9f0e08e0b0a62c8b693230afc3f8d.html" target="_blank">Attribute and simile classifiers for face verification</a></td><td><a href="http://acberg.com/papers/kbbn09iccv.pdf" target="_blank">[pdf]</a></td><td>2009 IEEE 12th International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>894</td><td>544</td><td>350</td><td>56</td><td>604</td><td>300</td></tr><tr><td>162ea969d1929ed180cc6de9f0bf116993ff6e06</td><td>vgg_faces</td><td>VGG Face</td><td><a href="papers/162ea969d1929ed180cc6de9f0bf116993ff6e06.html" target="_blank">Deep Face Recognition</a></td><td><a href="https://pdfs.semanticscholar.org/f372/ab9b3270d4e4f6a0258c83c2736c3a5c0454.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>999</td><td>543</td><td>456</td><td>70</td><td>635</td><td>370</td></tr><tr><td>10d6b12fa07c7c8d6c8c3f42c7f1c061c131d4c5</td><td>inria_person</td><td>INRIA Pedestrian</td><td><a href="papers/10d6b12fa07c7c8d6c8c3f42c7f1c061c131d4c5.html" target="_blank">Histograms of oriented gradients for human detection</a></td><td><a href="http://courses.cs.washington.edu/courses/cse576/12sp/notes/CVPR2005_HOG.pdf" target="_blank">[pdf]</a></td><td>2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)</td><td>edu</td><td>INRIA Rhone-Alps, Montbonnot, France</td><td>France</td><td>45.21788600</td><td>5.80736900</td><td>54%</td><td>999</td><td>539</td><td>460</td><td>67</td><td>537</td><td>477</td></tr><tr><td>4d9a02d080636e9666c4d1cc438b9893391ec6c7</td><td>cohn_kanade_plus</td><td>CK+</td><td><a href="papers/4d9a02d080636e9666c4d1cc438b9893391ec6c7.html" target="_blank">The Extended Cohn-Kanade Dataset (CK+): A complete dataset for action unit and emotion-specified expression</a></td><td><a href="http://www.iainm.com/iainm/Publications_files/2010_The%20Extended.pdf" target="_blank">[pdf]</a></td><td>2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition - Workshops</td><td>edu</td><td>University of Pittsburgh</td><td>United States</td><td>40.44415295</td><td>-79.96243993</td><td>55%</td><td>975</td><td>535</td><td>439</td><td>67</td><td>475</td><td>510</td></tr><tr><td>31b58ced31f22eab10bd3ee2d9174e7c14c27c01</td><td>tiny_images</td><td>Tiny Images</td><td><a href="papers/31b58ced31f22eab10bd3ee2d9174e7c14c27c01.html" target="_blank">80 Million Tiny Images: A Large Data Set for Nonparametric Object and Scene Recognition</a></td><td><a href="http://cvcl.mit.edu/SUNSeminar/Torralba_80M_PAMI08.pdf" target="_blank">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>999</td><td>535</td><td>463</td><td>94</td><td>685</td><td>327</td></tr><tr><td>026e3363b7f76b51cc711886597a44d5f1fd1de2</td><td>kitti</td><td>KITTI</td><td><a href="papers/026e3363b7f76b51cc711886597a44d5f1fd1de2.html" target="_blank">Vision meets robotics: The KITTI dataset</a></td><td><a href="https://pdfs.semanticscholar.org/026e/3363b7f76b51cc711886597a44d5f1fd1de2.pdf" target="_blank">[pdf]</a></td><td>I. J. Robotics Res.</td><td></td><td></td><td></td><td></td><td></td><td>53%</td><td>999</td><td>532</td><td>467</td><td>37</td><td>571</td><td>448</td></tr><tr><td>6d96f946aaabc734af7fe3fc4454cf8547fcd5ed</td><td>ar_facedb</td><td>AR Face</td><td><a href="papers/6d96f946aaabc734af7fe3fc4454cf8547fcd5ed.html" target="_blank">The AR face database</a></td><td><span class="gray">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td></td><td>53%</td><td>999</td><td>526</td><td>473</td><td>51</td><td>459</td><td>573</td></tr><tr><td>18c72175ddbb7d5956d180b65a96005c100f6014</td><td>yale_faces</td><td>YaleFaces</td><td><a href="papers/18c72175ddbb7d5956d180b65a96005c100f6014.html" target="_blank">From Few to Many: Illumination Cone Models for Face Recognition under Variable Lighting and Pose</a></td><td><a href="https://pdfs.semanticscholar.org/97bb/c2b439a79d4dc0dc7199d71ed96ad5e3fd0e.pdf" target="_blank">[pdf]</a></td><td>IEEE Trans. Pattern Anal. Mach. Intell.</td><td></td><td></td><td></td><td></td><td></td><td>51%</td><td>999</td><td>514</td><td>485</td><td>77</td><td>551</td><td>459</td></tr><tr><td>23fc83c8cfff14a16df7ca497661264fc54ed746</td><td>cohn_kanade</td><td>CK</td><td><a href="papers/23fc83c8cfff14a16df7ca497661264fc54ed746.html" target="_blank">Comprehensive Database for Facial Expression Analysis</a></td><td><a href="https://pdfs.semanticscholar.org/23fc/83c8cfff14a16df7ca497661264fc54ed746.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>999</td><td>503</td><td>496</td><td>75</td><td>572</td><td>439</td></tr><tr><td>18ae7c9a4bbc832b8b14bc4122070d7939f5e00e</td><td>frgc</td><td>FRGC</td><td><a href="papers/18ae7c9a4bbc832b8b14bc4122070d7939f5e00e.html" target="_blank">Overview of the face recognition grand challenge</a></td><td><a href="http://ivizlab.sfu.ca/arya/Papers/IEEE/Proceedings/C%20V%20P%20R-%2005/Face%20Recognition%20Grand%20Challenge.pdf" target="_blank">[pdf]</a></td><td>2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)</td><td>edu</td><td>NIST</td><td>United States</td><td>39.14004000</td><td>-77.21850600</td><td>50%</td><td>999</td><td>497</td><td>501</td><td>114</td><td>594</td><td>424</td></tr><tr><td>f72f6a45ee240cc99296a287ff725aaa7e7ebb35</td><td>caltech_pedestrians</td><td>Caltech Pedestrians</td><td><a href="papers/f72f6a45ee240cc99296a287ff725aaa7e7ebb35.html" target="_blank">Pedestrian Detection: An Evaluation of the State of the Art</a></td><td><a href="http://vision.caltech.edu/Image_Datasets/CaltechPedestrians/files/PAMI12pedestrians.pdf" target="_blank">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td>edu</td><td>California Institute of Technology</td><td>United States</td><td>34.13710185</td><td>-118.12527487</td><td>49%</td><td>999</td><td>485</td><td>514</td><td>71</td><td>541</td><td>464</td></tr><tr><td>2ad0ee93d029e790ebb50574f403a09854b65b7e</td><td>yale_faces</td><td>YaleFaces</td><td><a href="papers/2ad0ee93d029e790ebb50574f403a09854b65b7e.html" target="_blank">Acquiring linear subspaces for face recognition under variable lighting</a></td><td><a href="http://vision.cornell.edu/se3/wp-content/uploads/2014/09/pami05.pdf" target="_blank">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>48%</td><td>999</td><td>484</td><td>515</td><td>110</td><td>525</td><td>485</td></tr><tr><td>0f0fcf041559703998abf310e56f8a2f90ee6f21</td><td>feret</td><td>FERET</td><td><a href="papers/0f0fcf041559703998abf310e56f8a2f90ee6f21.html" target="_blank">The FERET Evaluation Methodology for Face-Recognition Algorithms</a></td><td><a href="https://pdfs.semanticscholar.org/5099/7a5605c1f61e09e9a96789ed7495be6625aa.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>48%</td><td>999</td><td>482</td><td>517</td><td>103</td><td>560</td><td>454</td></tr><tr><td>b62628ac06bbac998a3ab825324a41a11bc3a988</td><td>m2vtsdb_extended</td><td>xm2vtsdb</td><td><a href="papers/b62628ac06bbac998a3ab825324a41a11bc3a988.html" target="_blank">Xm2vtsdb: the Extended M2vts Database</a></td><td><a href="https://pdfs.semanticscholar.org/b626/28ac06bbac998a3ab825324a41a11bc3a988.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>53%</td><td>906</td><td>481</td><td>425</td><td>44</td><td>542</td><td>408</td></tr><tr><td>01959ef569f74c286956024866c1d107099199f7</td><td>vqa</td><td>VQA</td><td><a href="papers/01959ef569f74c286956024866c1d107099199f7.html" target="_blank">VQA: Visual Question Answering</a></td><td><a href="https://arxiv.org/pdf/1505.00468.pdf" target="_blank">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>731</td><td>444</td><td>287</td><td>47</td><td>629</td><td>96</td></tr><tr><td>dc8b25e35a3acb812beb499844734081722319b4</td><td>feret</td><td>FERET</td><td><a href="papers/dc8b25e35a3acb812beb499844734081722319b4.html" target="_blank">The FERET database and evaluation procedure for face-recognition algorithms</a></td><td><a href="http://biometrics.nist.gov/cs_links/face/frvt/feret/FERET_Database_evaluation_procedure.pdf" target="_blank">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td></td><td>44%</td><td>999</td><td>443</td><td>556</td><td>106</td><td>606</td><td>413</td></tr><tr><td>55206f0b5f57ce17358999145506cd01e570358c</td><td>orl</td><td>ORL</td><td><a href="papers/55206f0b5f57ce17358999145506cd01e570358c.html" target="_blank">Parameterisation of a stochastic model for human face identification</a></td><td><a href="https://pdfs.semanticscholar.org/5520/6f0b5f57ce17358999145506cd01e570358c.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>44%</td><td>999</td><td>442</td><td>557</td><td>97</td><td>569</td><td>445</td></tr><tr><td>45c31cde87258414f33412b3b12fc5bec7cb3ba9</td><td>jaffe</td><td>JAFFE</td><td><a href="papers/45c31cde87258414f33412b3b12fc5bec7cb3ba9.html" target="_blank">Coding Facial Expressions with Gabor Wavelets</a></td><td><a href="https://pdfs.semanticscholar.org/45c3/1cde87258414f33412b3b12fc5bec7cb3ba9.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>848</td><td>422</td><td>426</td><td>55</td><td>420</td><td>433</td></tr><tr><td>6424b69f3ff4d35249c0bb7ef912fbc2c86f4ff4</td><td>celeba</td><td>CelebA</td><td><a href="papers/6424b69f3ff4d35249c0bb7ef912fbc2c86f4ff4.html" target="_blank">Deep Learning Face Attributes in the Wild</a></td><td><a href="https://arxiv.org/pdf/1411.7766.pdf" target="_blank">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td>edu</td><td>Chinese University of Hong Kong</td><td>China</td><td>22.41626320</td><td>114.21093180</td><td>52%</td><td>808</td><td>421</td><td>386</td><td>68</td><td>670</td><td>118</td></tr><tr><td>32cde90437ab5a70cf003ea36f66f2de0e24b3ab</td><td>cityscapes</td><td>Cityscapes</td><td><a href="papers/32cde90437ab5a70cf003ea36f66f2de0e24b3ab.html" target="_blank">The Cityscapes Dataset for Semantic Urban Scene Understanding</a></td><td><a href="https://arxiv.org/pdf/1604.01685.pdf" target="_blank">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>771</td><td>403</td><td>368</td><td>54</td><td>624</td><td>138</td></tr><tr><td>177bc509dd0c7b8d388bb47403f28d6228c14b5c</td><td>celeba_plus</td><td>CelebFaces+</td><td><a href="papers/177bc509dd0c7b8d388bb47403f28d6228c14b5c.html" target="_blank">Deep Learning Face Representation from Predicting 10,000 Classes</a></td><td><a href="http://mmlab.ie.cuhk.edu.hk/pdf/YiSun_CVPR14.pdf" target="_blank">[pdf]</a></td><td>2014 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>660</td><td>397</td><td>263</td><td>25</td><td>340</td><td>330</td></tr><tr><td>4d423acc78273b75134e2afd1777ba6d3a398973</td><td>cmu_pie</td><td>CMU PIE</td><td><a href="papers/4d423acc78273b75134e2afd1777ba6d3a398973.html" target="_blank">The CMU Pose, Illumination, and Expression (PIE) Database of Human Faces</a></td><td><a href="http://www.comp.nus.edu.sg/~tsim/piedb.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>53%</td><td>742</td><td>396</td><td>344</td><td>59</td><td>416</td><td>329</td></tr><tr><td>4d423acc78273b75134e2afd1777ba6d3a398973</td><td>multi_pie</td><td>MULTIPIE</td><td><a href="papers/4d423acc78273b75134e2afd1777ba6d3a398973.html" target="_blank">The CMU Pose, Illumination, and Expression (PIE) Database of Human Faces</a></td><td><a href="http://www.comp.nus.edu.sg/~tsim/piedb.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>53%</td><td>742</td><td>396</td><td>344</td><td>59</td><td>416</td><td>329</td></tr><tr><td>2830fb5282de23d7784b4b4bc37065d27839a412</td><td>h3d</td><td>H3D</td><td><a href="papers/2830fb5282de23d7784b4b4bc37065d27839a412.html" target="_blank">Poselets: Body part detectors trained using 3D human pose annotations</a></td><td><a href="http://http.cs.berkeley.edu/Research/Projects/CS/vision/human/poselets_iccv09.pdf" target="_blank">[pdf]</a></td><td>2009 IEEE 12th International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>707</td><td>368</td><td>339</td><td>67</td><td>509</td><td>215</td></tr><tr><td>6273b3491e94ea4dd1ce42b791d77bdc96ee73a8</td><td>viper</td><td>VIPeR</td><td><a href="papers/6273b3491e94ea4dd1ce42b791d77bdc96ee73a8.html" target="_blank">Evaluating Appearance Models for Recognition, Reacquisition, and Tracking</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>584</td><td>329</td><td>255</td><td>38</td><td>338</td><td>245</td></tr><tr><td>140438a77a771a8fb656b39a78ff488066eb6b50</td><td>lfpw</td><td>LFWP</td><td><a href="papers/140438a77a771a8fb656b39a78ff488066eb6b50.html" target="_blank">Localizing Parts of Faces Using a Consensus of Exemplars</a></td><td><a href="http://neerajkumar.org/projects/face-parts/base/papers/nk_cvpr2011_faceparts.pdf" target="_blank">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>521</td><td>315</td><td>206</td><td>42</td><td>337</td><td>195</td></tr><tr><td>6bd36e9fd0ef20a3074e1430a6cc601e6d407fc3</td><td>cuhk03</td><td>CUHK03</td><td><a href="papers/6bd36e9fd0ef20a3074e1430a6cc601e6d407fc3.html" target="_blank">DeepReID: Deep Filter Pairing Neural Network for Person Re-identification</a></td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Li_DeepReID_Deep_Filter_2014_CVPR_paper.pdf" target="_blank">[pdf]</a></td><td>2014 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>59%</td><td>512</td><td>304</td><td>208</td><td>29</td><td>324</td><td>180</td></tr><tr><td>560e0e58d0059259ddf86fcec1fa7975dee6a868</td><td>youtube_faces</td><td>YouTubeFaces</td><td><a href="papers/560e0e58d0059259ddf86fcec1fa7975dee6a868.html" target="_blank">Face recognition in unconstrained videos with matched background similarity</a></td><td><a href="http://www.cs.tau.ac.il/thesis/thesis/Maoz.Itay-MSc.Thesis.pdf" target="_blank">[pdf]</a></td><td>CVPR 2011</td><td>edu</td><td>Tel Aviv University</td><td>Israel</td><td>32.11198890</td><td>34.80459702</td><td>60%</td><td>485</td><td>292</td><td>192</td><td>30</td><td>298</td><td>193</td></tr><tr><td>2258e01865367018ed6f4262c880df85b94959f8</td><td>mot</td><td>MOT</td><td><a href="papers/2258e01865367018ed6f4262c880df85b94959f8.html" target="_blank">Evaluating Multiple Object Tracking Performance: The CLEAR MOT Metrics</a></td><td><a href="https://pdfs.semanticscholar.org/2e0b/00f4043e2d4b04c59c88bb54bcd907d0dcd4.pdf" target="_blank">[pdf]</a></td><td>EURASIP J. Image and Video Processing</td><td></td><td></td><td></td><td></td><td></td><td>49%</td><td>586</td><td>290</td><td>294</td><td>48</td><td>345</td><td>244</td></tr><tr><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td><td>tud_campus</td><td>TUD-Campus</td><td><a href="papers/3316521a5527c7700af8ae6aef32a79a8b83672c.html" target="_blank">People-tracking-by-detection and people-detection-by-tracking</a></td><td><a href="http://mplab.ucsd.edu/wp-content/uploads/CVPR2008/Conference/data/papers/243.pdf" target="_blank">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>53%</td><td>529</td><td>280</td><td>248</td><td>40</td><td>324</td><td>213</td></tr><tr><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td><td>tud_crossing</td><td>TUD-Crossing</td><td><a href="papers/3316521a5527c7700af8ae6aef32a79a8b83672c.html" target="_blank">People-tracking-by-detection and people-detection-by-tracking</a></td><td><a href="http://mplab.ucsd.edu/wp-content/uploads/CVPR2008/Conference/data/papers/243.pdf" target="_blank">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>53%</td><td>529</td><td>280</td><td>248</td><td>40</td><td>324</td><td>213</td></tr><tr><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td><td>tud_pedestrian</td><td>TUD-Pedestrian</td><td><a href="papers/3316521a5527c7700af8ae6aef32a79a8b83672c.html" target="_blank">People-tracking-by-detection and people-detection-by-tracking</a></td><td><a href="http://mplab.ucsd.edu/wp-content/uploads/CVPR2008/Conference/data/papers/243.pdf" target="_blank">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>53%</td><td>529</td><td>280</td><td>248</td><td>40</td><td>324</td><td>213</td></tr><tr><td>cc589c499dcf323fe4a143bbef0074c3e31f9b60</td><td>bu_3dfe</td><td>BU-3DFE</td><td><a href="papers/cc589c499dcf323fe4a143bbef0074c3e31f9b60.html" target="_blank">A 3D facial expression database for facial behavior research</a></td><td><a href="http://www.cs.binghamton.edu/~lijun/Research/3DFE/Yin_FGR06_a.pdf" target="_blank">[pdf]</a></td><td>7th International Conference on Automatic Face and Gesture Recognition (FGR06)</td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>555</td><td>263</td><td>291</td><td>47</td><td>299</td><td>270</td></tr><tr><td>1dc35905a1deff8bc74688f2d7e2f48fd2273275</td><td>caltech_pedestrians</td><td>Caltech Pedestrians</td><td><a href="papers/1dc35905a1deff8bc74688f2d7e2f48fd2273275.html" target="_blank">Pedestrian detection: A benchmark</a></td><td><a href="http://vision.lbl.gov/Conferences/cvpr/Papers/data/papers/1378.pdf" target="_blank">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>519</td><td>261</td><td>258</td><td>27</td><td>289</td><td>233</td></tr><tr><td>853bd61bc48a431b9b1c7cab10c603830c488e39</td><td>casia_webface</td><td>CASIA Webface</td><td><a href="papers/853bd61bc48a431b9b1c7cab10c603830c488e39.html" target="_blank">Learning Face Representation from Scratch</a></td><td><a href="https://arxiv.org/pdf/1411.7923.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td>edu</td><td>Chinese Academy of Sciences</td><td>China</td><td>40.00447950</td><td>116.37023800</td><td>60%</td><td>436</td><td>260</td><td>176</td><td>30</td><td>288</td><td>150</td></tr><tr><td>4308bd8c28e37e2ed9a3fcfe74d5436cce34b410</td><td>market_1501</td><td>Market 1501</td><td><a href="papers/4308bd8c28e37e2ed9a3fcfe74d5436cce34b410.html" target="_blank">Scalable Person Re-identification: A Benchmark</a></td><td><a href="http://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Zheng_Scalable_Person_Re-Identification_ICCV_2015_paper.pdf" target="_blank">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>394</td><td>238</td><td>156</td><td>18</td><td>272</td><td>116</td></tr><tr><td>4053e3423fb70ad9140ca89351df49675197196a</td><td>bio_id</td><td>BioID Face</td><td><a href="papers/4053e3423fb70ad9140ca89351df49675197196a.html" target="_blank">Robust Face Detection Using the Hausdorff Distance</a></td><td><a href="http://facedetection.homepage.t-online.de/downloads/AVBPA01BioID.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>48%</td><td>498</td><td>237</td><td>261</td><td>56</td><td>330</td><td>179</td></tr><tr><td>8a3c5507237957d013a0fe0f082cab7f757af6ee</td><td>mafl</td><td>MAFL</td><td><a href="papers/8a3c5507237957d013a0fe0f082cab7f757af6ee.html" target="_blank">Facial Landmark Detection by Deep Multi-task Learning</a></td><td><a href="http://personal.ie.cuhk.edu.hk/~ccloy/files/eccv_2014_deepfacealign.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>383</td><td>231</td><td>152</td><td>25</td><td>265</td><td>121</td></tr><tr><td>8a3c5507237957d013a0fe0f082cab7f757af6ee</td><td>mtfl</td><td>MTFL</td><td><a href="papers/8a3c5507237957d013a0fe0f082cab7f757af6ee.html" target="_blank">Facial Landmark Detection by Deep Multi-task Learning</a></td><td><a href="http://personal.ie.cuhk.edu.hk/~ccloy/files/eccv_2014_deepfacealign.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>383</td><td>231</td><td>152</td><td>25</td><td>265</td><td>121</td></tr><tr><td>9055b155cbabdce3b98e16e5ac9c0edf00f9552f</td><td>morph</td><td>MORPH Commercial</td><td><a href="papers/9055b155cbabdce3b98e16e5ac9c0edf00f9552f.html" target="_blank">MORPH: a longitudinal image database of normal adult age-progression</a></td><td><span class="gray">[pdf]</a></td><td>7th International Conference on Automatic Face and Gesture Recognition (FGR06)</td><td></td><td></td><td></td><td></td><td></td><td>53%</td><td>424</td><td>225</td><td>198</td><td>26</td><td>239</td><td>190</td></tr><tr><td>9055b155cbabdce3b98e16e5ac9c0edf00f9552f</td><td>morph_nc</td><td>MORPH Non-Commercial</td><td><a href="papers/9055b155cbabdce3b98e16e5ac9c0edf00f9552f.html" target="_blank">MORPH: a longitudinal image database of normal adult age-progression</a></td><td><span class="gray">[pdf]</a></td><td>7th International Conference on Automatic Face and Gesture Recognition (FGR06)</td><td></td><td></td><td></td><td></td><td></td><td>53%</td><td>424</td><td>225</td><td>198</td><td>26</td><td>239</td><td>190</td></tr><tr><td>3325860c0c82a93b2eac654f5324dd6a776f609e</td><td>mpii_human_pose</td><td>MPII Human Pose</td><td><a href="papers/3325860c0c82a93b2eac654f5324dd6a776f609e.html" target="_blank">2D Human Pose Estimation: New Benchmark and State of the Art Analysis</a></td><td><a href="http://ei.is.tuebingen.mpg.de/uploads_file/attachment/attachment/168/andriluka14benchmark.pdf" target="_blank">[pdf]</a></td><td>2014 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>356</td><td>221</td><td>135</td><td>21</td><td>304</td><td>53</td></tr><tr><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td><td>stickmen_buffy</td><td>Buffy Stickmen</td><td><a href="papers/6dd0597f8513dc100cd0bc1b493768cde45098a9.html" target="_blank">Learning to parse images of articulated bodies</a></td><td><a href="https://pdfs.semanticscholar.org/9cd7/4c43dbf9be0b9caae4606ee53e6d45850471.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>373</td><td>215</td><td>157</td><td>35</td><td>251</td><td>129</td></tr><tr><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td><a href="papers/6dd0597f8513dc100cd0bc1b493768cde45098a9.html" target="_blank">Learning to parse images of articulated bodies</a></td><td><a href="https://pdfs.semanticscholar.org/9cd7/4c43dbf9be0b9caae4606ee53e6d45850471.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>373</td><td>215</td><td>157</td><td>35</td><td>251</td><td>129</td></tr><tr><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td><a href="papers/6dd0597f8513dc100cd0bc1b493768cde45098a9.html" target="_blank">Learning to parse images of articulated bodies</a></td><td><a href="https://pdfs.semanticscholar.org/9cd7/4c43dbf9be0b9caae4606ee53e6d45850471.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>373</td><td>215</td><td>157</td><td>35</td><td>251</td><td>129</td></tr><tr><td>2a75f34663a60ab1b04a0049ed1d14335129e908</td><td>mmi_facial_expression</td><td>MMI Facial Expression Dataset</td><td><a href="papers/2a75f34663a60ab1b04a0049ed1d14335129e908.html" target="_blank">Web-based database for facial expression analysis</a></td><td><a href="http://dev.pubs.doc.ic.ac.uk/Pantic-ICME05-2/Pantic-ICME05-2.pdf" target="_blank">[pdf]</a></td><td>2005 IEEE International Conference on Multimedia and Expo</td><td></td><td></td><td></td><td></td><td></td><td>48%</td><td>440</td><td>212</td><td>228</td><td>44</td><td>267</td><td>181</td></tr><tr><td>2485c98aa44131d1a2f7d1355b1e372f2bb148ad</td><td>cas_peal</td><td>CAS-PEAL</td><td><a href="papers/2485c98aa44131d1a2f7d1355b1e372f2bb148ad.html" target="_blank">The CAS-PEAL Large-Scale Chinese Face Database and Baseline Evaluations</a></td><td><a href="http://www.jdl.ac.cn/peal/files/ieee_smc_a_gao_cas-peal.pdf" target="_blank">[pdf]</a></td><td>IEEE Transactions on Systems, Man, and Cybernetics - Part A: Systems and Humans</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>415</td><td>209</td><td>206</td><td>39</td><td>189</td><td>232</td></tr><tr><td>16c7c31a7553d99f1837fc6e88e77b5ccbb346b8</td><td>prid</td><td>PRID</td><td><a href="papers/16c7c31a7553d99f1837fc6e88e77b5ccbb346b8.html" target="_blank">Person Re-identification by Descriptive and Discriminative Classification</a></td><td><a href="https://pdfs.semanticscholar.org/4c1b/f0592be3e535faf256c95e27982db9b3d3d3.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>352</td><td>204</td><td>148</td><td>27</td><td>196</td><td>157</td></tr><tr><td>95f12d27c3b4914e0668a268360948bce92f7db3</td><td>helen</td><td>Helen</td><td><a href="papers/95f12d27c3b4914e0668a268360948bce92f7db3.html" target="_blank">Interactive Facial Feature Localization</a></td><td><a href="https://pdfs.semanticscholar.org/95f1/2d27c3b4914e0668a268360948bce92f7db3.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>company</td><td>Facebook</td><td>United States</td><td>37.39367170</td><td>-122.08072620</td><td>59%</td><td>339</td><td>201</td><td>138</td><td>29</td><td>219</td><td>129</td></tr><tr><td>044d9a8c61383312cdafbcc44b9d00d650b21c70</td><td>fiw_300</td><td>300-W</td><td><a href="papers/044d9a8c61383312cdafbcc44b9d00d650b21c70.html" target="_blank">300 Faces in-the-Wild Challenge: The First Facial Landmark Localization Challenge</a></td><td><a href="http://ibug.doc.ic.ac.uk/media/uploads/documents/sagonas_iccv_2013_300_w.pdf" target="_blank">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision Workshops</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>324</td><td>199</td><td>125</td><td>29</td><td>211</td><td>118</td></tr><tr><td>2724ba85ec4a66de18da33925e537f3902f21249</td><td>cofw</td><td>COFW</td><td><a href="papers/2724ba85ec4a66de18da33925e537f3902f21249.html" target="_blank">Robust Face Landmark Estimation under Occlusion</a></td><td><a href="http://authors.library.caltech.edu/45988/1/ICCV13%20Burgos-Artizzu.pdf" target="_blank">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision</td><td>edu</td><td>California Institute of Technology</td><td>United States</td><td>34.13710185</td><td>-118.12527487</td><td>61%</td><td>305</td><td>186</td><td>119</td><td>16</td><td>192</td><td>116</td></tr><tr><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td><td>leeds_sports_pose</td><td>Leeds Sports Pose</td><td><a href="papers/4b1d23d17476fcf78f4cbadf69fb130b1aa627c0.html" target="_blank">Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</a></td><td><a href="https://pdfs.semanticscholar.org/c327/15b5106f46eb6761531704cd2a9b5571832e.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>65%</td><td>278</td><td>180</td><td>98</td><td>13</td><td>208</td><td>78</td></tr><tr><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td><td>stickmen_buffy</td><td>Buffy Stickmen</td><td><a href="papers/4b1d23d17476fcf78f4cbadf69fb130b1aa627c0.html" target="_blank">Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</a></td><td><a href="https://pdfs.semanticscholar.org/c327/15b5106f46eb6761531704cd2a9b5571832e.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>65%</td><td>278</td><td>180</td><td>98</td><td>13</td><td>208</td><td>78</td></tr><tr><td>639937b3a1b8bded3f7e9a40e85bd3770016cf3c</td><td>bfm</td><td>BFM</td><td><a href="papers/639937b3a1b8bded3f7e9a40e85bd3770016cf3c.html" target="_blank">A 3D Face Model for Pose and Illumination Invariant Face Recognition</a></td><td><a href="http://gravis.cs.unibas.ch/publications/2009/BFModel09.pdf" target="_blank">[pdf]</a></td><td>2009 Sixth IEEE International Conference on Advanced Video and Signal Based Surveillance</td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>323</td><td>176</td><td>147</td><td>29</td><td>226</td><td>98</td></tr><tr><td>13f06b08f371ba8b5d31c3e288b4deb61335b462</td><td>eth_andreas_ess</td><td>ETHZ Pedestrian</td><td><a href="papers/13f06b08f371ba8b5d31c3e288b4deb61335b462.html" target="_blank">Depth and Appearance for Mobile Scene Analysis</a></td><td><a href="http://www.mmp.rwth-aachen.de/publications/pdf/ess-depthandappearance-iccv07-poster.pdf" target="_blank">[pdf]</a></td><td>2007 IEEE 11th International Conference on Computer Vision</td><td>edu</td><td>ETH Zurich</td><td>Switzerland</td><td>47.37631300</td><td>8.54766990</td><td>55%</td><td>319</td><td>176</td><td>143</td><td>27</td><td>195</td><td>127</td></tr><tr><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td><td>aflw</td><td>AFLW</td><td><a href="papers/a74251efa970b92925b89eeef50a5e37d9281ad0.html" target="_blank">Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</a></td><td><a href="http://face.cs.kit.edu/befit/workshop2011/pdf/slides/martin_koestinger-slides.pdf" target="_blank">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>292</td><td>175</td><td>117</td><td>37</td><td>212</td><td>84</td></tr><tr><td>3765df816dc5a061bc261e190acc8bdd9d47bec0</td><td>rafd</td><td>RaFD</td><td><a href="papers/3765df816dc5a061bc261e190acc8bdd9d47bec0.html" target="_blank">Presentation and validation of the Radboud Faces Database</a></td><td><a href="https://pdfs.semanticscholar.org/3765/df816dc5a061bc261e190acc8bdd9d47bec0.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>39%</td><td>446</td><td>175</td><td>271</td><td>43</td><td>322</td><td>136</td></tr><tr><td>5981e6479c3fd4e31644db35d236bfb84ae46514</td><td>mot</td><td>MOT</td><td><a href="papers/5981e6479c3fd4e31644db35d236bfb84ae46514.html" target="_blank">Learning to associate: HybridBoosted multi-target tracker for crowded scene</a></td><td><a href="http://iris.usc.edu/Outlines/papers/2009/yuan-chang-nevatia-cvpr09.pdf" target="_blank">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>University of Southern California</td><td>United States</td><td>34.02241490</td><td>-118.28634407</td><td>52%</td><td>330</td><td>172</td><td>157</td><td>27</td><td>196</td><td>139</td></tr><tr><td>436f798d1a4e54e5947c1e7d7375c31b2bdb4064</td><td>tud_multiview</td><td>TUD-Multiview</td><td><a href="papers/436f798d1a4e54e5947c1e7d7375c31b2bdb4064.html" target="_blank">Monocular 3D pose estimation and tracking by detection</a></td><td><a href="http://videolectures.net/site/normal_dl/tag=81522/cvpr2010_andriluka_m3de_01.pdf" target="_blank">[pdf]</a></td><td>2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>TU Darmstadt</td><td>Germany</td><td>49.87482770</td><td>8.65632810</td><td>54%</td><td>302</td><td>164</td><td>138</td><td>34</td><td>207</td><td>100</td></tr><tr><td>436f798d1a4e54e5947c1e7d7375c31b2bdb4064</td><td>tud_stadtmitte</td><td>TUD-Stadtmitte</td><td><a href="papers/436f798d1a4e54e5947c1e7d7375c31b2bdb4064.html" target="_blank">Monocular 3D pose estimation and tracking by detection</a></td><td><a href="http://videolectures.net/site/normal_dl/tag=81522/cvpr2010_andriluka_m3de_01.pdf" target="_blank">[pdf]</a></td><td>2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>TU Darmstadt</td><td>Germany</td><td>49.87482770</td><td>8.65632810</td><td>54%</td><td>302</td><td>164</td><td>138</td><td>34</td><td>207</td><td>100</td></tr><tr><td>44484d2866f222bbb9b6b0870890f9eea1ffb2d0</td><td>cuhk01</td><td>CUHK01</td><td><a href="papers/44484d2866f222bbb9b6b0870890f9eea1ffb2d0.html" target="_blank">Human Reidentification with Transferred Metric Learning</a></td><td><a href="https://pdfs.semanticscholar.org/4448/4d2866f222bbb9b6b0870890f9eea1ffb2d0.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>258</td><td>160</td><td>98</td><td>12</td><td>142</td><td>115</td></tr><tr><td>2acf7e58f0a526b957be2099c10aab693f795973</td><td>bosphorus</td><td>The Bosphorus</td><td><a href="papers/2acf7e58f0a526b957be2099c10aab693f795973.html" target="_blank">Bosphorus Database for 3D Face Analysis</a></td><td><a href="https://pdfs.semanticscholar.org/4254/fbba3846008f50671edc9cf70b99d7304543.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>48%</td><td>328</td><td>158</td><td>170</td><td>19</td><td>149</td><td>183</td></tr><tr><td>833fa04463d90aab4a9fe2870d480f0b40df446e</td><td>sun_attributes</td><td>SUN</td><td><a href="papers/833fa04463d90aab4a9fe2870d480f0b40df446e.html" target="_blank">SUN attribute database: Discovering, annotating, and recognizing scene attributes</a></td><td><a href="http://static.cs.brown.edu/~gen/pub_papers/SUN_Attribute_Database_CVPR2012.pdf" target="_blank">[pdf]</a></td><td>2012 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>Brown University</td><td>United States</td><td>41.82686820</td><td>-71.40123146</td><td>58%</td><td>269</td><td>156</td><td>113</td><td>29</td><td>215</td><td>57</td></tr><tr><td>010f0f4929e6a6644fb01f0e43820f91d0fad292</td><td>yfcc_100m</td><td>YFCC100M</td><td><a href="papers/010f0f4929e6a6644fb01f0e43820f91d0fad292.html" target="_blank">YFCC100M: the new data in multimedia research</a></td><td><a href="https://arxiv.org/pdf/1503.01817.pdf" target="_blank">[pdf]</a></td><td>Commun. ACM</td><td>edu</td><td>Carnegie Mellon University</td><td>United States</td><td>37.41021930</td><td>-122.05965487</td><td>56%</td><td>276</td><td>155</td><td>121</td><td>23</td><td>175</td><td>99</td></tr><tr><td>2fda164863a06a92d3a910b96eef927269aeb730</td><td>names_and_faces</td><td>News Dataset</td><td><a href="papers/2fda164863a06a92d3a910b96eef927269aeb730.html" target="_blank">Names and faces in the news</a></td><td><a href="http://ttic.uchicago.edu/~mmaire/papers/pdf/names_faces_cvpr2004.pdf" target="_blank">[pdf]</a></td><td>Proceedings of the 2004 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2004. CVPR 2004.</td><td></td><td></td><td></td><td></td><td></td><td>51%</td><td>294</td><td>150</td><td>143</td><td>29</td><td>215</td><td>82</td></tr><tr><td>9361b784e73e9238d5cefbea5ac40d35d1e3103f</td><td>towncenter</td><td>TownCenter</td><td><a href="papers/9361b784e73e9238d5cefbea5ac40d35d1e3103f.html" target="_blank">Stable multi-target tracking in real-time surveillance video</a></td><td><a href="http://ben.benfold.com/docs/benfold_reid_cvpr2011-preprint.pdf" target="_blank">[pdf]</a></td><td>CVPR 2011</td><td></td><td></td><td></td><td></td><td></td><td>44%</td><td>310</td><td>137</td><td>173</td><td>24</td><td>180</td><td>131</td></tr><tr><td>2eed184680edcdec8a3b605ad1a3ba8e8f7cc2e9</td><td>graz</td><td>Graz Pedestrian</td><td><a href="papers/2eed184680edcdec8a3b605ad1a3ba8e8f7cc2e9.html" target="_blank">Generic object recognition with boosting</a></td><td><a href="http://www.cse.unr.edu/~bebis/CS773C/ObjectRecognition/Papers/Opelt06.pdf" target="_blank">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td>edu</td><td>TU Graz</td><td>Austria</td><td>47.07071400</td><td>15.43950400</td><td>48%</td><td>286</td><td>136</td><td>150</td><td>16</td><td>193</td><td>97</td></tr><tr><td>e8de844fefd54541b71c9823416daa238be65546</td><td>visual_phrases</td><td>Phrasal Recognition</td><td><a href="papers/e8de844fefd54541b71c9823416daa238be65546.html" target="_blank">Recognition using visual phrases</a></td><td><a href="http://vision.cs.uiuc.edu/phrasal/recognition_using_visual_phrases.pdf" target="_blank">[pdf]</a></td><td>CVPR 2011</td><td>edu</td><td>University of Illinois, Urbana-Champaign</td><td>United States</td><td>40.11116745</td><td>-88.22587665</td><td>58%</td><td>233</td><td>135</td><td>98</td><td>18</td><td>177</td><td>58</td></tr><tr><td>38b55d95189c5e69cf4ab45098a48fba407609b4</td><td>cuhk02</td><td>CUHK02</td><td><a href="papers/38b55d95189c5e69cf4ab45098a48fba407609b4.html" target="_blank">Locally Aligned Feature Transforms across Views</a></td><td><a href="http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_CVPR2013/data/Papers/4989d594.pdf" target="_blank">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>53%</td><td>242</td><td>129</td><td>113</td><td>17</td><td>139</td><td>102</td></tr><tr><td>7808937b46acad36e43c30ae4e9f3fd57462853d</td><td>bpad</td><td>BPAD</td><td><a href="papers/7808937b46acad36e43c30ae4e9f3fd57462853d.html" target="_blank">Describing people: A poselet-based approach to attribute classification</a></td><td><a href="http://ttic.uchicago.edu/~smaji/papers/attributes-iccv11.pdf" target="_blank">[pdf]</a></td><td>2011 International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>221</td><td>125</td><td>96</td><td>14</td><td>165</td><td>59</td></tr><tr><td>4c170a0dcc8de75587dae21ca508dab2f9343974</td><td>face_tracer</td><td>FaceTracer</td><td><a href="papers/4c170a0dcc8de75587dae21ca508dab2f9343974.html" target="_blank">FaceTracer: A Search Engine for Large Collections of Images with Faces</a></td><td><a href="https://pdfs.semanticscholar.org/73a8/1d311eedac8dea3ca24dc15b6990fa4a725e.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>218</td><td>125</td><td>92</td><td>17</td><td>152</td><td>71</td></tr><tr><td>0c91808994a250d7be332400a534a9291ca3b60e</td><td>graz</td><td>Graz Pedestrian</td><td><a href="papers/0c91808994a250d7be332400a534a9291ca3b60e.html" target="_blank">Weak Hypotheses and Boosting for Generic Object Detection and Recognition</a></td><td><a href="https://pdfs.semanticscholar.org/0c91/808994a250d7be332400a534a9291ca3b60e.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>51%</td><td>247</td><td>125</td><td>122</td><td>18</td><td>177</td><td>78</td></tr><tr><td>140c95e53c619eac594d70f6369f518adfea12ef</td><td>ijb_c</td><td>IJB-C</td><td><a href="papers/140c95e53c619eac594d70f6369f518adfea12ef.html" target="_blank">Pushing the frontiers of unconstrained face detection and recognition: IARPA Janus Benchmark A</a></td><td><a href="http://biometrics.cse.msu.edu/Publications/Face/Klareetal_UnconstrainedFaceDetectionRecognitionJanus_CVPR15.pdf" target="_blank">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>222</td><td>123</td><td>99</td><td>19</td><td>161</td><td>62</td></tr><tr><td>6ad5a38df8dd4cdddd74f31996ce096d41219f72</td><td>tud_brussels</td><td>TUD-Brussels</td><td><a href="papers/6ad5a38df8dd4cdddd74f31996ce096d41219f72.html" target="_blank">Multi-cue onboard pedestrian detection</a></td><td><a href="http://vision.lbl.gov/Conferences/cvpr/Papers/data/papers/1454.pdf" target="_blank">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>217</td><td>121</td><td>96</td><td>14</td><td>133</td><td>86</td></tr><tr><td>6ad5a38df8dd4cdddd74f31996ce096d41219f72</td><td>tud_motionpairs</td><td>TUD-Motionparis</td><td><a href="papers/6ad5a38df8dd4cdddd74f31996ce096d41219f72.html" target="_blank">Multi-cue onboard pedestrian detection</a></td><td><a href="http://vision.lbl.gov/Conferences/cvpr/Papers/data/papers/1454.pdf" target="_blank">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>217</td><td>121</td><td>96</td><td>14</td><td>133</td><td>86</td></tr><tr><td>98bb029afe2a1239c3fdab517323066f0957b81b</td><td>ilids_vid_reid</td><td>iLIDS-VID</td><td><a href="papers/98bb029afe2a1239c3fdab517323066f0957b81b.html" target="_blank">Person Re-identification by Video Ranking</a></td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>210</td><td>120</td><td>90</td><td>10</td><td>115</td><td>94</td></tr><tr><td>98bb029afe2a1239c3fdab517323066f0957b81b</td><td>sdu_vid</td><td>SDU-VID</td><td><a href="papers/98bb029afe2a1239c3fdab517323066f0957b81b.html" target="_blank">Person Re-identification by Video Ranking</a></td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>210</td><td>120</td><td>90</td><td>10</td><td>115</td><td>94</td></tr><tr><td>35b0331dfcd2897abd5749b49ff5e2b8ba0f7a62</td><td>coco_qa</td><td>COCO QA</td><td><a href="papers/35b0331dfcd2897abd5749b49ff5e2b8ba0f7a62.html" target="_blank">Exploring Models and Data for Image Question Answering</a></td><td><a href="https://arxiv.org/pdf/1505.02074.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>191</td><td>115</td><td>76</td><td>12</td><td>165</td><td>27</td></tr><tr><td>46a01565e6afe7c074affb752e7069ee3bf2e4ef</td><td>sdu_vid</td><td>SDU-VID</td><td><a href="papers/46a01565e6afe7c074affb752e7069ee3bf2e4ef.html" target="_blank">Local Descriptors Encoded by Fisher Vectors for Person Re-identification</a></td><td><a href="https://pdfs.semanticscholar.org/a105/f1ef67b4b02da38eadce8ffb4e13aa301a93.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>198</td><td>114</td><td>84</td><td>16</td><td>111</td><td>88</td></tr><tr><td>013909077ad843eb6df7a3e8e290cfd5575999d2</td><td>fiw_300</td><td>300-W</td><td><a href="papers/013909077ad843eb6df7a3e8e290cfd5575999d2.html" target="_blank">A Semi-automatic Methodology for Facial Landmark Annotation</a></td><td><a href="http://ibug.doc.ic.ac.uk/media/uploads/documents/sagonas_cvpr_2013_amfg_w.pdf" target="_blank">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition Workshops</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>185</td><td>111</td><td>74</td><td>15</td><td>124</td><td>64</td></tr><tr><td>4e4746094bf60ee83e40d8597a6191e463b57f76</td><td>leeds_sports_pose_extended</td><td>Leeds Sports Pose Extended</td><td><a href="papers/4e4746094bf60ee83e40d8597a6191e463b57f76.html" target="_blank">Learning effective human pose estimation from inaccurate annotation</a></td><td><a href="http://www.comp.leeds.ac.uk/mat4saj/publications/johnson11cvpr.pdf" target="_blank">[pdf]</a></td><td>CVPR 2011</td><td>edu</td><td>University of Leeds</td><td>United Kingdom</td><td>53.80387185</td><td>-1.55245712</td><td>64%</td><td>173</td><td>111</td><td>62</td><td>10</td><td>122</td><td>56</td></tr><tr><td>21d9d0deed16f0ad62a4865e9acf0686f4f15492</td><td>images_of_groups</td><td>Images of Groups</td><td><a href="papers/21d9d0deed16f0ad62a4865e9acf0686f4f15492.html" target="_blank">Understanding images of groups of people</a></td><td><a href="http://chenlab.ece.cornell.edu/people/Andy/Andy_files/cvpr09.pdf" target="_blank">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>Carnegie Mellon University</td><td>United States</td><td>37.41021930</td><td>-122.05965487</td><td>54%</td><td>202</td><td>110</td><td>92</td><td>12</td><td>132</td><td>75</td></tr><tr><td>b1f4423c227fa37b9680787be38857069247a307</td><td>afew_va</td><td>AFEW-VA</td><td><a href="papers/b1f4423c227fa37b9680787be38857069247a307.html" target="_blank">Collecting Large, Richly Annotated Facial-Expression Databases from Movies</a></td><td><a href="http://users.cecs.anu.edu.au/~adhall/Dhall_Goecke_Lucey_Gedeon_M_2012.pdf" target="_blank">[pdf]</a></td><td>IEEE MultiMedia</td><td>edu</td><td>Australian National University</td><td>Australia</td><td>-35.27769990</td><td>149.11852700</td><td>60%</td><td>182</td><td>109</td><td>73</td><td>8</td><td>86</td><td>99</td></tr><tr><td>570f37ed63142312e6ccdf00ecc376341ec72b9f</td><td>stanford_drone</td><td>Stanford Drone</td><td><a href="papers/570f37ed63142312e6ccdf00ecc376341ec72b9f.html" target="_blank">Social LSTM: Human Trajectory Prediction in Crowded Spaces</a></td><td><a href="http://cs.stanford.edu/groups/vision/pdf/CVPR16_N_LSTM.pdf" target="_blank">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>46%</td><td>229</td><td>106</td><td>123</td><td>5</td><td>150</td><td>79</td></tr><tr><td>22ad2c8c0f4d6aa4328b38d894b814ec22579761</td><td>gallagher</td><td>Gallagher</td><td><a href="papers/22ad2c8c0f4d6aa4328b38d894b814ec22579761.html" target="_blank">Clothing cosegmentation for recognizing people</a></td><td><a href="http://amp.ece.cmu.edu/people/Andy/Andy_files/2670CVPR08Gallagher.pdf" target="_blank">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>Carnegie Mellon University</td><td>United States</td><td>37.41021930</td><td>-122.05965487</td><td>58%</td><td>177</td><td>103</td><td>74</td><td>7</td><td>101</td><td>84</td></tr><tr><td>1aad2da473888cb7ebc1bfaa15bfa0f1502ce005</td><td>jpl_pose</td><td>JPL-Interaction dataset</td><td><a href="papers/1aad2da473888cb7ebc1bfaa15bfa0f1502ce005.html" target="_blank">First-Person Activity Recognition: What Are They Doing to Me?</a></td><td><a href="http://michaelryoo.com/papers/cvpr2013_ryoo.pdf" target="_blank">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>70%</td><td>148</td><td>103</td><td>45</td><td>8</td><td>111</td><td>38</td></tr><tr><td>133f01aec1534604d184d56de866a4bd531dac87</td><td>lfw_a</td><td>#N/A</td><td><a href="papers/133f01aec1534604d184d56de866a4bd531dac87.html" target="_blank">Effective Unconstrained Face Recognition by Combining Multiple Descriptors and Learned Background Statistics</a></td><td><a href="http://www.cs.tau.ac.il/~wolf/papers/jpatchlbp.pdf" target="_blank">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>177</td><td>98</td><td>79</td><td>15</td><td>104</td><td>75</td></tr><tr><td>18010284894ed0edcca74e5bf768ee2e15ef7841</td><td>deep_fashion</td><td>DeepFashion</td><td><a href="papers/18010284894ed0edcca74e5bf768ee2e15ef7841.html" target="_blank">DeepFashion: Powering Robust Clothes Recognition and Retrieval with Rich Annotations</a></td><td><a href="http://personal.ie.cuhk.edu.hk/~lz013/papers/deepfashion_poster.pdf" target="_blank">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>65%</td><td>150</td><td>97</td><td>53</td><td>4</td><td>111</td><td>38</td></tr><tr><td>6204776d31359d129a582057c2d788a14f8aadeb</td><td>youtube_celebrities</td><td>YouTube Celebrities</td><td><a href="papers/6204776d31359d129a582057c2d788a14f8aadeb.html" target="_blank">Face tracking and recognition with visual constraints in real-world videos</a></td><td><span class="gray">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>32%</td><td>301</td><td>97</td><td>202</td><td>18</td><td>144</td><td>133</td></tr><tr><td>56ffa7d906b08d02d6d5a12c7377a57e24ef3391</td><td>unbc_shoulder_pain</td><td>UNBC-McMaster Pain</td><td><a href="papers/56ffa7d906b08d02d6d5a12c7377a57e24ef3391.html" target="_blank">Painful data: The UNBC-McMaster shoulder pain expression archive database</a></td><td><a href="http://www.iainm.com/iainm/Publications_files/2011_PAINFUL.pdf" target="_blank">[pdf]</a></td><td>Face and Gesture 2011</td><td>edu</td><td>Carnegie Mellon University</td><td>United States</td><td>37.41021930</td><td>-122.05965487</td><td>52%</td><td>184</td><td>95</td><td>89</td><td>23</td><td>112</td><td>71</td></tr><tr><td>5a5f0287484f0d480fed1ce585dbf729586f0edc</td><td>disfa</td><td>DISFA</td><td><a href="papers/5a5f0287484f0d480fed1ce585dbf729586f0edc.html" target="_blank">DISFA: A Spontaneous Facial Action Intensity Database</a></td><td><a href="http://mohammadmahoor.com/wp-content/uploads/2017/06/DiSFA_Paper_andAppendix_Final_OneColumn1-1.pdf" target="_blank">[pdf]</a></td><td>IEEE Transactions on Affective Computing</td><td>edu</td><td>University of Denver</td><td>United States</td><td>39.67665410</td><td>-104.96220300</td><td>49%</td><td>190</td><td>94</td><td>96</td><td>19</td><td>100</td><td>91</td></tr><tr><td>1be498d4bbc30c3bfd0029114c784bc2114d67c0</td><td>adience</td><td>Adience</td><td><a href="papers/1be498d4bbc30c3bfd0029114c784bc2114d67c0.html" target="_blank">Age and Gender Estimation of Unfiltered Faces</a></td><td><a href="http://www.openu.ac.il/home/hassner/Adience/EidingerEnbarHassner_tifs.pdf" target="_blank">[pdf]</a></td><td>IEEE Transactions on Information Forensics and Security</td><td>edu</td><td>Open University of Israel</td><td>Israel</td><td>32.77824165</td><td>34.99565673</td><td>55%</td><td>168</td><td>92</td><td>76</td><td>5</td><td>94</td><td>78</td></tr><tr><td>291265db88023e92bb8c8e6390438e5da148e8f5</td><td>msceleb</td><td>MsCeleb</td><td><a href="papers/291265db88023e92bb8c8e6390438e5da148e8f5.html" target="_blank">MS-Celeb-1M: A Dataset and Benchmark for Large-Scale Face Recognition</a></td><td><a href="https://arxiv.org/pdf/1607.08221.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>167</td><td>91</td><td>76</td><td>14</td><td>131</td><td>36</td></tr><tr><td>0df0d1adea39a5bef318b74faa37de7f3e00b452</td><td>mpii_gaze</td><td>MPIIGaze</td><td><a href="papers/0df0d1adea39a5bef318b74faa37de7f3e00b452.html" target="_blank">Appearance-based gaze estimation in the wild</a></td><td><a href="https://arxiv.org/pdf/1504.02863.pdf" target="_blank">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>65%</td><td>138</td><td>90</td><td>48</td><td>3</td><td>97</td><td>42</td></tr><tr><td>29a705a5fa76641e0d8963f1fdd67ee4c0d92d3d</td><td>scface</td><td>SCface</td><td><a href="papers/29a705a5fa76641e0d8963f1fdd67ee4c0d92d3d.html" target="_blank">SCface – surveillance cameras face database</a></td><td><a href="http://scface.org/SCface%20-%20Surveillance%20Cameras%20Face%20Database.pdf" target="_blank">[pdf]</a></td><td>Multimedia Tools and Applications</td><td></td><td></td><td></td><td></td><td></td><td>51%</td><td>178</td><td>90</td><td>88</td><td>15</td><td>90</td><td>89</td></tr><tr><td>32c801cb7fbeb742edfd94cccfca4934baec71da</td><td>ucf_crowd</td><td>UCF-CC-50</td><td><a href="papers/32c801cb7fbeb742edfd94cccfca4934baec71da.html" target="_blank">Multi-source Multi-scale Counting in Extremely Dense Crowd Images</a></td><td><a href="http://crcv-web.eecs.ucf.edu/papers/cvpr2013/Counting_V3o.pdf" target="_blank">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>70%</td><td>125</td><td>88</td><td>37</td><td>6</td><td>73</td><td>52</td></tr><tr><td>3b5b6d19d4733ab606c39c69a889f9e67967f151</td><td>qmul_grid</td><td>GRID</td><td><a href="papers/3b5b6d19d4733ab606c39c69a889f9e67967f151.html" target="_blank">Multi-camera activity correlation analysis</a></td><td><a href="http://vision.lbl.gov/Conferences/cvpr/Papers/data/papers/0163.pdf" target="_blank">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>Queen Mary University of London</td><td>United Kingdom</td><td>51.52472720</td><td>-0.03931035</td><td>62%</td><td>138</td><td>86</td><td>52</td><td>8</td><td>79</td><td>61</td></tr><tr><td>4f93cd09785c6e77bf4bc5a788e079df524c8d21</td><td>soton</td><td>SOTON HiD</td><td><a href="papers/4f93cd09785c6e77bf4bc5a788e079df524c8d21.html" target="_blank">On a Large Sequence-Based Human Gait Database</a></td><td><a href="https://eprints.soton.ac.uk/257901/1/Shutler_2002.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>148</td><td>86</td><td>62</td><td>17</td><td>104</td><td>49</td></tr><tr><td>c0387e788a52f10bf35d4d50659cfa515d89fbec</td><td>mars</td><td>MARS</td><td><a href="papers/c0387e788a52f10bf35d4d50659cfa515d89fbec.html" target="_blank">MARS: A Video Benchmark for Large-Scale Person Re-Identification</a></td><td><a href="http://liangzheng.org/1320.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>146</td><td>85</td><td>61</td><td>6</td><td>97</td><td>49</td></tr><tr><td>52d7eb0fbc3522434c13cc247549f74bb9609c5d</td><td>wider_face</td><td>WIDER FACE</td><td><a href="papers/52d7eb0fbc3522434c13cc247549f74bb9609c5d.html" target="_blank">WIDER FACE: A Face Detection Benchmark</a></td><td><a href="https://arxiv.org/pdf/1511.06523.pdf" target="_blank">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>Chinese University of Hong Kong</td><td>China</td><td>22.41626320</td><td>114.21093180</td><td>57%</td><td>148</td><td>85</td><td>63</td><td>15</td><td>108</td><td>41</td></tr><tr><td>27a2fad58dd8727e280f97036e0d2bc55ef5424c</td><td>duke_mtmc</td><td>Duke MTMC</td><td><a href="papers/27a2fad58dd8727e280f97036e0d2bc55ef5424c.html" target="_blank">Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</a></td><td><a href="https://arxiv.org/pdf/1609.01775.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>136</td><td>79</td><td>57</td><td>7</td><td>108</td><td>27</td></tr><tr><td>27a2fad58dd8727e280f97036e0d2bc55ef5424c</td><td>mot</td><td>MOT</td><td><a href="papers/27a2fad58dd8727e280f97036e0d2bc55ef5424c.html" target="_blank">Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</a></td><td><a href="https://arxiv.org/pdf/1609.01775.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>136</td><td>79</td><td>57</td><td>7</td><td>108</td><td>27</td></tr><tr><td>8b56e33f33e582f3e473dba573a16b598ed9bcdc</td><td>fei</td><td>FEI</td><td><a href="papers/8b56e33f33e582f3e473dba573a16b598ed9bcdc.html" target="_blank">A new ranking method for principal components analysis and its application to face image analysis</a></td><td><span class="gray">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td></td><td>46%</td><td>169</td><td>78</td><td>91</td><td>6</td><td>72</td><td>101</td></tr><tr><td>b91f54e1581fbbf60392364323d00a0cd43e493c</td><td>bp4d_spontanous</td><td>BP4D-Spontanous</td><td><a href="papers/b91f54e1581fbbf60392364323d00a0cd43e493c.html" target="_blank">A high-resolution spontaneous 3D dynamic facial expression database</a></td><td><a href="http://www.csee.usf.edu/~scanavan/papers/FG2013.pdf" target="_blank">[pdf]</a></td><td>2013 10th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)</td><td>edu</td><td>SUNY Binghamton</td><td>United States</td><td>42.08779975</td><td>-75.97066066</td><td>51%</td><td>151</td><td>77</td><td>74</td><td>7</td><td>87</td><td>65</td></tr><tr><td>066000d44d6691d27202896691f08b27117918b9</td><td>psu</td><td>PSU</td><td><a href="papers/066000d44d6691d27202896691f08b27117918b9.html" target="_blank">Vision-Based Analysis of Small Groups in Pedestrian Crowds</a></td><td><a href="http://vc.cs.nthu.edu.tw/home/paper/codfiles/htchiang/201212250411/newp12.pdf" target="_blank">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>49%</td><td>151</td><td>74</td><td>77</td><td>9</td><td>79</td><td>73</td></tr><tr><td>10195a163ab6348eef37213a46f60a3d87f289c5</td><td>imdb_wiki</td><td>IMDB</td><td><a href="papers/10195a163ab6348eef37213a46f60a3d87f289c5.html" target="_blank">Deep Expectation of Real and Apparent Age from a Single Image Without Facial Landmarks</a></td><td><a href="http://www.vision.ee.ethz.ch/en/publications/papers/articles/eth_biwi_01299.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td>edu</td><td>ETH Zurich</td><td>Switzerland</td><td>47.37631300</td><td>8.54766990</td><td>53%</td><td>133</td><td>71</td><td>62</td><td>13</td><td>94</td><td>41</td></tr><tr><td>2d3482dcff69c7417c7b933f22de606a0e8e42d4</td><td>lfw</td><td>LFW</td><td><a href="papers/2d3482dcff69c7417c7b933f22de606a0e8e42d4.html" target="_blank">Labeled Faces in the Wild : Updates and New Reporting Procedures</a></td><td><a href="https://pdfs.semanticscholar.org/2d34/82dcff69c7417c7b933f22de606a0e8e42d4.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>edu</td><td>University of Massachusetts</td><td>United States</td><td>42.38897850</td><td>-72.52869870</td><td>58%</td><td>123</td><td>71</td><td>52</td><td>3</td><td>72</td><td>50</td></tr><tr><td>96e0cfcd81cdeb8282e29ef9ec9962b125f379b0</td><td>megaface</td><td>MegaFace</td><td><a href="papers/96e0cfcd81cdeb8282e29ef9ec9962b125f379b0.html" target="_blank">The MegaFace Benchmark: 1 Million Faces for Recognition at Scale</a></td><td><a href="https://arxiv.org/pdf/1512.00596.pdf" target="_blank">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>59%</td><td>121</td><td>71</td><td>50</td><td>9</td><td>98</td><td>22</td></tr><tr><td>66e6f08873325d37e0ec20a4769ce881e04e964e</td><td>sun_attributes</td><td>SUN</td><td><a href="papers/66e6f08873325d37e0ec20a4769ce881e04e964e.html" target="_blank">The SUN Attribute Database: Beyond Categories for Deeper Scene Understanding</a></td><td><a href="http://www.cc.gatech.edu/~hays/papers/attribute_ijcv.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>112</td><td>70</td><td>42</td><td>14</td><td>84</td><td>29</td></tr><tr><td>0486214fb58ee9a04edfe7d6a74c6d0f661a7668</td><td>chokepoint</td><td>ChokePoint</td><td><a href="papers/0486214fb58ee9a04edfe7d6a74c6d0f661a7668.html" target="_blank">Patch-based probabilistic image quality assessment for face selection and improved video-based face recognition</a></td><td><a href="https://arxiv.org/pdf/1304.0869.pdf" target="_blank">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td></td><td></td><td></td><td></td><td></td><td>53%</td><td>128</td><td>68</td><td>60</td><td>6</td><td>73</td><td>60</td></tr><tr><td>8355d095d3534ef511a9af68a3b2893339e3f96b</td><td>imdb_wiki</td><td>IMDB</td><td><a href="papers/8355d095d3534ef511a9af68a3b2893339e3f96b.html" target="_blank">DEX: Deep EXpectation of Apparent Age from a Single Image</a></td><td><a href="http://www.cv-foundation.org/openaccess/content_iccv_2015_workshops/w11/papers/Rothe_DEX_Deep_EXpectation_ICCV_2015_paper.pdf" target="_blank">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision Workshop (ICCVW)</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>120</td><td>67</td><td>53</td><td>5</td><td>74</td><td>47</td></tr><tr><td>0d3bb75852098b25d90f31d2f48fd0cb4944702b</td><td>face_scrub</td><td>FaceScrub</td><td><a href="papers/0d3bb75852098b25d90f31d2f48fd0cb4944702b.html" target="_blank">A data-driven approach to cleaning large face datasets</a></td><td><a href="http://stefan.winkler.net/Publications/icip2014a.pdf" target="_blank">[pdf]</a></td><td>2014 IEEE International Conference on Image Processing (ICIP)</td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>123</td><td>66</td><td>57</td><td>4</td><td>96</td><td>27</td></tr><tr><td>e4754afaa15b1b53e70743880484b8d0736990ff</td><td>fiw_300</td><td>300-W</td><td><a href="papers/e4754afaa15b1b53e70743880484b8d0736990ff.html" target="_blank">300 Faces In-The-Wild Challenge: database and results</a></td><td><a href="http://ibug.doc.ic.ac.uk/media/uploads/documents/1-s2.0-s0262885616000147-main.pdf" target="_blank">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>114</td><td>61</td><td>53</td><td>10</td><td>71</td><td>43</td></tr><tr><td>04c2cda00e5536f4b1508cbd80041e9552880e67</td><td>hipsterwars</td><td>Hipsterwars</td><td><a href="papers/04c2cda00e5536f4b1508cbd80041e9552880e67.html" target="_blank">Hipster wars: Discovering elements of fashion styles</a></td><td><a href="http://acberg.com/papers/hipster_eccv14.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>66%</td><td>91</td><td>60</td><td>31</td><td>5</td><td>61</td><td>29</td></tr><tr><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td><td>mafl</td><td>MAFL</td><td><a href="papers/a0fd85b3400c7b3e11122f44dc5870ae2de9009a.html" target="_blank">Learning Deep Representation for Face Alignment with Auxiliary Attributes</a></td><td><a href="https://arxiv.org/pdf/1408.3967.pdf" target="_blank">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>110</td><td>60</td><td>50</td><td>12</td><td>69</td><td>43</td></tr><tr><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td><td>mtfl</td><td>MTFL</td><td><a href="papers/a0fd85b3400c7b3e11122f44dc5870ae2de9009a.html" target="_blank">Learning Deep Representation for Face Alignment with Auxiliary Attributes</a></td><td><a href="https://arxiv.org/pdf/1408.3967.pdf" target="_blank">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>110</td><td>60</td><td>50</td><td>12</td><td>69</td><td>43</td></tr><tr><td>2e8d0f1802e50cccfd3c0aabac0d0beab3a7846e</td><td>3dpes</td><td>3DPeS</td><td><a href="papers/2e8d0f1802e50cccfd3c0aabac0d0beab3a7846e.html" target="_blank">3DPeS: 3D people dataset for surveillance and forensics</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>48%</td><td>122</td><td>59</td><td>63</td><td>11</td><td>71</td><td>51</td></tr><tr><td>08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7</td><td>kin_face</td><td>UB KinFace</td><td><a href="papers/08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7.html" target="_blank">Understanding Kin Relationships in a Photo</a></td><td><a href="http://www1.ece.neu.edu/~yunfu/papers/Kinship-TMM.pdf" target="_blank">[pdf]</a></td><td>IEEE Transactions on Multimedia</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>96</td><td>55</td><td>41</td><td>2</td><td>34</td><td>63</td></tr><tr><td>9a9877791945c6fa4c1743ec6d3fb32570ef8481</td><td>m2vts</td><td>m2vts</td><td><a href="papers/9a9877791945c6fa4c1743ec6d3fb32570ef8481.html" target="_blank">The M2VTS Multimodal Face Database (Release 1.00)</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Laboratoire de Télécommunications et Télédétection, UCL, Louvain-La-Neuve, Belgium</td><td>Belgium</td><td>50.66968750</td><td>4.61559090</td><td>43%</td><td>129</td><td>55</td><td>74</td><td>4</td><td>80</td><td>54</td></tr><tr><td>2a4bbee0b4cf52d5aadbbc662164f7efba89566c</td><td>peta</td><td>PETA</td><td><a href="papers/2a4bbee0b4cf52d5aadbbc662164f7efba89566c.html" target="_blank">Pedestrian Attribute Recognition At Far Distance</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>68%</td><td>80</td><td>54</td><td>26</td><td>2</td><td>51</td><td>28</td></tr><tr><td>3b4ec8af470948a72a6ed37a9fd226719a874ebc</td><td>sdu_vid</td><td>SDU-VID</td><td><a href="papers/3b4ec8af470948a72a6ed37a9fd226719a874ebc.html" target="_blank">A Spatio-Temporal Appearance Representation for Video-Based Pedestrian Re-Identification</a></td><td><a href="http://openaccess.thecvf.com/content_iccv_2015/papers/Liu_A_Spatio-Temporal_Appearance_ICCV_2015_paper.pdf" target="_blank">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>85</td><td>53</td><td>32</td><td>9</td><td>51</td><td>34</td></tr><tr><td>06f02199690961ba52997cde1527e714d2b3bf8f</td><td>columbia_gaze</td><td>Columbia Gaze</td><td><a href="papers/06f02199690961ba52997cde1527e714d2b3bf8f.html" target="_blank">Gaze locking: passive eye contact detection for human-object interaction</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Columbia University</td><td>United States</td><td>40.84198360</td><td>-73.94368971</td><td>64%</td><td>80</td><td>51</td><td>29</td><td>0</td><td>49</td><td>35</td></tr><tr><td>0dc11a37cadda92886c56a6fb5191ded62099c28</td><td>stickmen_family</td><td>We Are Family Stickmen</td><td><a href="papers/0dc11a37cadda92886c56a6fb5191ded62099c28.html" target="_blank">We are family: joint pose estimation of multiple persons</a></td><td><a href="http://eprints.pascal-network.org/archive/00007964/01/eichner10eccv.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>66%</td><td>77</td><td>51</td><td>26</td><td>5</td><td>60</td><td>19</td></tr><tr><td>3394168ff0719b03ff65bcea35336a76b21fe5e4</td><td>penn_fudan</td><td>Penn Fudan</td><td><a href="papers/3394168ff0719b03ff65bcea35336a76b21fe5e4.html" target="_blank">Object Detection Combining Recognition and Segmentation</a></td><td><a href="https://pdfs.semanticscholar.org/3394/168ff0719b03ff65bcea35336a76b21fe5e4.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>101</td><td>50</td><td>51</td><td>11</td><td>58</td><td>42</td></tr><tr><td>0c4a139bb87c6743c7905b29a3cfec27a5130652</td><td>feret</td><td>FERET</td><td><a href="papers/0c4a139bb87c6743c7905b29a3cfec27a5130652.html" target="_blank">The FERET Verification Testing Protocol for Face Recognition Algorithms</a></td><td><a href="https://pdfs.semanticscholar.org/8d2a/1c768fce6f71584dd993fb97e7b6419aaf60.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>44%</td><td>112</td><td>49</td><td>63</td><td>11</td><td>79</td><td>35</td></tr><tr><td>3cd40bfa1ff193a96bde0207e5140a399476466c</td><td>tvhi</td><td>TVHI</td><td><a href="papers/3cd40bfa1ff193a96bde0207e5140a399476466c.html" target="_blank">High Five: Recognising human interactions in TV shows</a></td><td><a href="https://pdfs.semanticscholar.org/3cd4/0bfa1ff193a96bde0207e5140a399476466c.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>91</td><td>47</td><td>44</td><td>11</td><td>64</td><td>27</td></tr><tr><td>0b440695c822a8e35184fb2f60dcdaa8a6de84ae</td><td>kinectface</td><td>KinectFaceDB</td><td><a href="papers/0b440695c822a8e35184fb2f60dcdaa8a6de84ae.html" target="_blank">KinectFaceDB: A Kinect Database for Face Recognition</a></td><td><a href="http://www.eurecom.fr/fr/publication/4393/download/mm-publi-4393.pdf" target="_blank">[pdf]</a></td><td>IEEE Transactions on Systems, Man, and Cybernetics: Systems</td><td>edu</td><td>University of North Carolina at Chapel Hill</td><td>United States</td><td>35.91139710</td><td>-79.05045290</td><td>61%</td><td>75</td><td>46</td><td>29</td><td>6</td><td>26</td><td>50</td></tr><tr><td>5194cbd51f9769ab25260446b4fa17204752e799</td><td>violent_flows</td><td>Violent Flows</td><td><a href="papers/5194cbd51f9769ab25260446b4fa17204752e799.html" target="_blank">Violent flows: Real-time detection of violent crowd behavior</a></td><td><a href="http://www.openu.ac.il/home/hassner/data/violentflows/violent_flows.pdf" target="_blank">[pdf]</a></td><td>2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops</td><td>edu</td><td>Open University of Israel</td><td>Israel</td><td>32.77824165</td><td>34.99565673</td><td>55%</td><td>83</td><td>46</td><td>37</td><td>6</td><td>44</td><td>41</td></tr><tr><td>7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22</td><td>lfw</td><td>LFW</td><td><a href="papers/7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22.html" target="_blank">Labeled Faces in the Wild : A Survey</a></td><td><a href="https://pdfs.semanticscholar.org/7de6/e81d775e9cd7becbfd1bd685f4e2a5eebb22.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Stevens Institute of Technology</td><td>United States</td><td>40.74225200</td><td>-74.02709490</td><td>45%</td><td>99</td><td>45</td><td>54</td><td>8</td><td>63</td><td>36</td></tr><tr><td>2ce2560cf59db59ce313bbeb004e8ce55c5ce928</td><td>texas_3dfrd</td><td>Texas 3DFRD</td><td><a href="papers/2ce2560cf59db59ce313bbeb004e8ce55c5ce928.html" target="_blank">Anthropometric 3D Face Recognition</a></td><td><a href="http://live.ece.utexas.edu/publications/2010/sg_ijcv_june10.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>90</td><td>45</td><td>45</td><td>5</td><td>60</td><td>31</td></tr><tr><td>2160788824c4c29ffe213b2cbeb3f52972d73f37</td><td>3d_rma</td><td>3D-RMA</td><td><a href="papers/2160788824c4c29ffe213b2cbeb3f52972d73f37.html" target="_blank">Automatic 3D face authentication</a></td><td><a href="http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.31.9190&rep=rep1&type=pdf" target="_blank">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td></td><td>46%</td><td>95</td><td>44</td><td>51</td><td>8</td><td>61</td><td>35</td></tr><tr><td>2bf8541199728262f78d4dced6fb91479b39b738</td><td>clothing_co_parsing</td><td>CCP</td><td><a href="papers/2bf8541199728262f78d4dced6fb91479b39b738.html" target="_blank">Clothing Co-parsing by Joint Image Segmentation and Labeling</a></td><td><a href="https://arxiv.org/pdf/1502.00739.pdf" target="_blank">[pdf]</a></td><td>2014 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>70%</td><td>60</td><td>42</td><td>18</td><td>0</td><td>38</td><td>24</td></tr><tr><td>ae0aee03d946efffdc7af2362a42d3750e7dd48a</td><td>put_face</td><td>Put Face</td><td><a href="papers/ae0aee03d946efffdc7af2362a42d3750e7dd48a.html" target="_blank">The put face database</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>42%</td><td>100</td><td>42</td><td>58</td><td>7</td><td>56</td><td>48</td></tr><tr><td>2edb87494278ad11641b6cf7a3f8996de12b8e14</td><td>qmul_grid</td><td>GRID</td><td><a href="papers/2edb87494278ad11641b6cf7a3f8996de12b8e14.html" target="_blank">Time-Delayed Correlation Analysis for Multi-Camera Activity Understanding</a></td><td><a href="http://www.eecs.qmul.ac.uk/~ccloy/files/ijcv_2010.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td>edu</td><td>Queen Mary University of London</td><td>United Kingdom</td><td>51.52472720</td><td>-0.03931035</td><td>49%</td><td>83</td><td>41</td><td>42</td><td>6</td><td>51</td><td>33</td></tr><tr><td>4df3143922bcdf7db78eb91e6b5359d6ada004d2</td><td>cfd</td><td>CFD</td><td><a href="papers/4df3143922bcdf7db78eb91e6b5359d6ada004d2.html" target="_blank">The Chicago face database: A free stimulus set of faces and norming data.</a></td><td><a href="https://pdfs.semanticscholar.org/4df3/143922bcdf7db78eb91e6b5359d6ada004d2.pdf" target="_blank">[pdf]</a></td><td>Behavior research methods</td><td></td><td></td><td></td><td></td><td></td><td>48%</td><td>83</td><td>40</td><td>43</td><td>2</td><td>63</td><td>19</td></tr><tr><td>636b8ffc09b1b23ff714ac8350bb35635e49fa3c</td><td>caltech_10k_web_faces</td><td>Caltech 10K Web Faces</td><td><a href="papers/636b8ffc09b1b23ff714ac8350bb35635e49fa3c.html" target="_blank">Pruning training sets for learning of object categories</a></td><td><a href="http://authors.library.caltech.edu/11469/1/ANGcvpr05.pdf" target="_blank">[pdf]</a></td><td>2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)</td><td></td><td></td><td></td><td></td><td></td><td>65%</td><td>60</td><td>39</td><td>21</td><td>5</td><td>43</td><td>17</td></tr><tr><td>c900e0ad4c95948baaf0acd8449fde26f9b4952a</td><td>emotio_net</td><td>EmotioNet Database</td><td><a href="papers/c900e0ad4c95948baaf0acd8449fde26f9b4952a.html" target="_blank">EmotioNet: An Accurate, Real-Time Algorithm for the Automatic Annotation of a Million Facial Expressions in the Wild</a></td><td><a href="http://cbcsl.ece.ohio-state.edu/cvpr16.pdf" target="_blank">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>72</td><td>39</td><td>33</td><td>7</td><td>54</td><td>17</td></tr><tr><td>4793f11fbca4a7dba898b9fff68f70d868e2497c</td><td>kin_face</td><td>UB KinFace</td><td><a href="papers/4793f11fbca4a7dba898b9fff68f70d868e2497c.html" target="_blank">Kinship verification through transfer learning</a></td><td><a href="http://ijcai.org/Proceedings/11/Papers/422.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>71</td><td>39</td><td>32</td><td>2</td><td>29</td><td>43</td></tr><tr><td>31de9b3dd6106ce6eec9a35991b2b9083395fd0b</td><td>feret</td><td>FERET</td><td><a href="papers/31de9b3dd6106ce6eec9a35991b2b9083395fd0b.html" target="_blank">FERET ( Face Recognition Technology ) Recognition Algorithm Development and Test Results</a></td><td><a href="https://pdfs.semanticscholar.org/31de/9b3dd6106ce6eec9a35991b2b9083395fd0b.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>46%</td><td>82</td><td>38</td><td>44</td><td>5</td><td>62</td><td>20</td></tr><tr><td>6618cff7f2ed440a0d2fa9e74ad5469df5cdbe4c</td><td>afad</td><td>AFAD</td><td><a href="papers/6618cff7f2ed440a0d2fa9e74ad5469df5cdbe4c.html" target="_blank">Ordinal Regression with Multiple Output CNN for Age Estimation</a></td><td><a href="http://openaccess.thecvf.com/content_cvpr_2016/papers/Niu_Ordinal_Regression_With_CVPR_2016_paper.pdf" target="_blank">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>53%</td><td>68</td><td>36</td><td>32</td><td>8</td><td>49</td><td>17</td></tr><tr><td>0a85bdff552615643dd74646ac881862a7c7072d</td><td>pipa</td><td>PIPA</td><td><a href="papers/0a85bdff552615643dd74646ac881862a7c7072d.html" target="_blank">Beyond frontal faces: Improving Person Recognition using multiple cues</a></td><td><a href="https://arxiv.org/pdf/1501.05703.pdf" target="_blank">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>company</td><td>Facebook</td><td>United States</td><td>37.39367170</td><td>-122.08072620</td><td>72%</td><td>50</td><td>36</td><td>13</td><td>2</td><td>40</td><td>9</td></tr><tr><td>47aeb3b82f54b5ae8142b4bdda7b614433e69b9a</td><td>am_fed</td><td>AM-FED</td><td><a href="papers/47aeb3b82f54b5ae8142b4bdda7b614433e69b9a.html" target="_blank">Affectiva-MIT Facial Expression Dataset (AM-FED): Naturalistic and Spontaneous Facial Expressions Collected "In-the-Wild"</a></td><td><a href="http://affect.media.mit.edu/pdfs/13.McDuff-etal-AMFED.pdf" target="_blank">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition Workshops</td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>73</td><td>34</td><td>39</td><td>6</td><td>41</td><td>34</td></tr><tr><td>fcc6fe6007c322641796cb8792718641856a22a7</td><td>miw</td><td>MIW</td><td><a href="papers/fcc6fe6007c322641796cb8792718641856a22a7.html" target="_blank">Automatic facial makeup detection with application in face recognition</a></td><td><a href="http://www.cse.msu.edu/~rossarun/pubs/ChenMakeupDetection_ICB2013.pdf" target="_blank">[pdf]</a></td><td>2013 International Conference on Biometrics (ICB)</td><td>edu</td><td>West Virginia University</td><td>United States</td><td>39.65404635</td><td>-79.96475355</td><td>74%</td><td>46</td><td>34</td><td>12</td><td>1</td><td>18</td><td>28</td></tr><tr><td>fcc6fe6007c322641796cb8792718641856a22a7</td><td>youtube_makeup</td><td>YMU</td><td><a href="papers/fcc6fe6007c322641796cb8792718641856a22a7.html" target="_blank">Automatic facial makeup detection with application in face recognition</a></td><td><a href="http://www.cse.msu.edu/~rossarun/pubs/ChenMakeupDetection_ICB2013.pdf" target="_blank">[pdf]</a></td><td>2013 International Conference on Biometrics (ICB)</td><td>edu</td><td>West Virginia University</td><td>United States</td><td>39.65404635</td><td>-79.96475355</td><td>74%</td><td>46</td><td>34</td><td>12</td><td>1</td><td>18</td><td>28</td></tr><tr><td>0b84f07af44f964817675ad961def8a51406dd2e</td><td>prw</td><td>PRW</td><td><a href="papers/0b84f07af44f964817675ad961def8a51406dd2e.html" target="_blank">Person Re-identification in the Wild</a></td><td><a href="https://arxiv.org/pdf/1604.02531.pdf" target="_blank">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>51%</td><td>65</td><td>33</td><td>32</td><td>1</td><td>46</td><td>17</td></tr><tr><td>5ffd74d2873b7cba2cbc5fd295cc7fbdedca22a2</td><td>cityscapes</td><td>Cityscapes</td><td><a href="papers/5ffd74d2873b7cba2cbc5fd295cc7fbdedca22a2.html" target="_blank">The Cityscapes Dataset</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>59%</td><td>54</td><td>32</td><td>22</td><td>3</td><td>40</td><td>14</td></tr><tr><td>7ace44190729927e5cb0dd5d363fcae966fe13f7</td><td>nudedetection</td><td>Nude Detection</td><td><a href="papers/7ace44190729927e5cb0dd5d363fcae966fe13f7.html" target="_blank">A bag-of-features approach based on Hue-SIFT descriptor for nude detection</a></td><td><a href="http://www.eurasip.org/Proceedings/Eusipco/Eusipco2009/contents/papers/1569191772.pdf" target="_blank">[pdf]</a></td><td>2009 17th European Signal Processing Conference</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>51</td><td>31</td><td>20</td><td>1</td><td>18</td><td>33</td></tr><tr><td>070de852bc6eb275d7ca3a9cdde8f6be8795d1a3</td><td>d3dfacs</td><td>D3DFACS</td><td><a href="papers/070de852bc6eb275d7ca3a9cdde8f6be8795d1a3.html" target="_blank">A FACS valid 3D dynamic action unit database with applications to 3D dynamic morphable facial modeling</a></td><td><a href="http://www.cs.bath.ac.uk/~dpc/D3DFACS/ICCV_final_2011.pdf" target="_blank">[pdf]</a></td><td>2011 International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>52</td><td>30</td><td>22</td><td>5</td><td>37</td><td>15</td></tr><tr><td>51eba481dac6b229a7490f650dff7b17ce05df73</td><td>imsitu</td><td>imSitu</td><td><a href="papers/51eba481dac6b229a7490f650dff7b17ce05df73.html" target="_blank">Situation Recognition: Visual Semantic Role Labeling for Image Understanding</a></td><td><a href="http://allenai.org/content/publications/SituationRecognition.pdf" target="_blank">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>48</td><td>30</td><td>18</td><td>2</td><td>46</td><td>2</td></tr><tr><td>37d6f0eb074d207b53885bd2eb78ccc8a04be597</td><td>vmu</td><td>VMU</td><td><a href="papers/37d6f0eb074d207b53885bd2eb78ccc8a04be597.html" target="_blank">Can facial cosmetics affect the matching accuracy of face recognition systems?</a></td><td><a href="http://www.cse.msu.edu/~climer/DantchevaChenRossFaceCosmetics_BTAS2012.pdf" target="_blank">[pdf]</a></td><td>2012 IEEE Fifth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td>edu</td><td>West Virginia University</td><td>United States</td><td>39.65404635</td><td>-79.96475355</td><td>61%</td><td>49</td><td>30</td><td>19</td><td>0</td><td>18</td><td>31</td></tr><tr><td>37d6f0eb074d207b53885bd2eb78ccc8a04be597</td><td>youtube_makeup</td><td>YMU</td><td><a href="papers/37d6f0eb074d207b53885bd2eb78ccc8a04be597.html" target="_blank">Can facial cosmetics affect the matching accuracy of face recognition systems?</a></td><td><a href="http://www.cse.msu.edu/~climer/DantchevaChenRossFaceCosmetics_BTAS2012.pdf" target="_blank">[pdf]</a></td><td>2012 IEEE Fifth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td>edu</td><td>West Virginia University</td><td>United States</td><td>39.65404635</td><td>-79.96475355</td><td>61%</td><td>49</td><td>30</td><td>19</td><td>0</td><td>18</td><td>31</td></tr><tr><td>9c23859ec7313f2e756a3e85575735e0c52249f4</td><td>facebook_100</td><td>Facebook100</td><td><a href="papers/9c23859ec7313f2e756a3e85575735e0c52249f4.html" target="_blank">Scaling up biologically-inspired computer vision: A case study in unconstrained face recognition on facebook</a></td><td><a href="http://klab.tch.harvard.edu/academia/classes/Neuro230/2012/lectures/Lecture_11_Reading.pdf" target="_blank">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td>edu</td><td>Harvard University</td><td>United States</td><td>42.36782045</td><td>-71.12666653</td><td>58%</td><td>50</td><td>29</td><td>21</td><td>3</td><td>39</td><td>11</td></tr><tr><td>f1af714b92372c8e606485a3982eab2f16772ad8</td><td>mug_faces</td><td>MUG Faces</td><td><a href="papers/f1af714b92372c8e606485a3982eab2f16772ad8.html" target="_blank">The MUG facial expression database</a></td><td><span class="gray">[pdf]</a></td><td>11th International Workshop on Image Analysis for Multimedia Interactive Services WIAMIS 10</td><td>edu</td><td>Aristotle University of Thessaloniki</td><td>Greece</td><td>40.62984145</td><td>22.95889350</td><td>43%</td><td>68</td><td>29</td><td>39</td><td>5</td><td>28</td><td>40</td></tr><tr><td>18858cc936947fc96b5c06bbe3c6c2faa5614540</td><td>pilot_parliament</td><td>PPB</td><td><a href="papers/18858cc936947fc96b5c06bbe3c6c2faa5614540.html" target="_blank">Gender Shades: Intersectional Accuracy Disparities in Commercial Gender Classification</a></td><td><a href="https://pdfs.semanticscholar.org/03c1/fc9c3339813ed81ad0de540132f9f695a0f8.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>53%</td><td>55</td><td>29</td><td>26</td><td>0</td><td>47</td><td>7</td></tr><tr><td>9c23859ec7313f2e756a3e85575735e0c52249f4</td><td>pubfig_83</td><td>pubfig83</td><td><a href="papers/9c23859ec7313f2e756a3e85575735e0c52249f4.html" target="_blank">Scaling up biologically-inspired computer vision: A case study in unconstrained face recognition on facebook</a></td><td><a href="http://klab.tch.harvard.edu/academia/classes/Neuro230/2012/lectures/Lecture_11_Reading.pdf" target="_blank">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td>edu</td><td>Harvard University</td><td>United States</td><td>42.36782045</td><td>-71.12666653</td><td>58%</td><td>50</td><td>29</td><td>21</td><td>3</td><td>39</td><td>11</td></tr><tr><td>356b431d4f7a2a0a38cf971c84568207dcdbf189</td><td>wider</td><td>WIDER</td><td><a href="papers/356b431d4f7a2a0a38cf971c84568207dcdbf189.html" target="_blank">Recognize complex events from static images by fusing deep channels</a></td><td><a href="http://openaccess.thecvf.com/content_cvpr_2015/supplemental/Xiong_Recognize_Complex_Events_2015_CVPR_supplemental.pdf" target="_blank">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>45</td><td>29</td><td>16</td><td>1</td><td>30</td><td>15</td></tr><tr><td>16e8b0a1e8451d5f697b94c0c2b32a00abee1d52</td><td>umb</td><td>UMB</td><td><a href="papers/16e8b0a1e8451d5f697b94c0c2b32a00abee1d52.html" target="_blank">UMB-DB: A database of partially occluded 3D faces</a></td><td><a href="http://face.cs.kit.edu/befit/workshop2011/pdf/slides/claudio_cusano-slides.pdf" target="_blank">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>45</td><td>27</td><td>18</td><td>2</td><td>20</td><td>24</td></tr><tr><td>faf40ce28857aedf183e193486f5b4b0a8c478a2</td><td>iit_dehli_ear</td><td>IIT Dehli Ear</td><td><a href="papers/faf40ce28857aedf183e193486f5b4b0a8c478a2.html" target="_blank">Automated Human Identification Using Ear Imaging</a></td><td><a href="https://pdfs.semanticscholar.org/faf4/0ce28857aedf183e193486f5b4b0a8c478a2.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>37%</td><td>70</td><td>26</td><td>44</td><td>6</td><td>28</td><td>42</td></tr><tr><td>22646e00a7ba34d1b5fbe3b1efcd91a1e1be3c2b</td><td>saivt</td><td>SAIVT SoftBio</td><td><a href="papers/22646e00a7ba34d1b5fbe3b1efcd91a1e1be3c2b.html" target="_blank">A Database for Person Re-Identification in Multi-Camera Surveillance Networks</a></td><td><a href="http://eprints.qut.edu.au/53437/3/Bialkowski_Database4PersonReID_DICTA.pdf" target="_blank">[pdf]</a></td><td>2012 International Conference on Digital Image Computing Techniques and Applications (DICTA)</td><td></td><td></td><td></td><td></td><td></td><td>45%</td><td>58</td><td>26</td><td>32</td><td>7</td><td>41</td><td>18</td></tr><tr><td>b92a1ed9622b8268ae3ac9090e25789fc41cc9b8</td><td>pornodb</td><td>#N/A</td><td><a href="papers/b92a1ed9622b8268ae3ac9090e25789fc41cc9b8.html" target="_blank">Pooling in image representation: The visual codeword point of view</a></td><td><a href="http://cedric.cnam.fr/~thomen/papers/avila_CVIU2012_final.pdf" target="_blank">[pdf]</a></td><td>Computer Vision and Image Understanding</td><td></td><td></td><td></td><td></td><td></td><td>32%</td><td>77</td><td>25</td><td>52</td><td>7</td><td>46</td><td>34</td></tr><tr><td>eb027969f9310e0ae941e2adee2d42cdf07d938c</td><td>vgg_faces2</td><td>VGG Face2</td><td><a href="papers/eb027969f9310e0ae941e2adee2d42cdf07d938c.html" target="_blank">VGGFace2: A Dataset for Recognising Faces across Pose and Age</a></td><td><a href="https://arxiv.org/pdf/1710.08092.pdf" target="_blank">[pdf]</a></td><td>2018 13th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2018)</td><td>edu</td><td>Oxford University</td><td>United Kingdom</td><td>51.75208490</td><td>-1.25166460</td><td>45%</td><td>56</td><td>25</td><td>31</td><td>6</td><td>50</td><td>6</td></tr><tr><td>8b2dd5c61b23ead5ae5508bb8ce808b5ea266730</td><td>10k_US_adult_faces</td><td>10K US Adult Faces</td><td><a href="papers/8b2dd5c61b23ead5ae5508bb8ce808b5ea266730.html" target="_blank">The intrinsic memorability of face photographs.</a></td><td><a href="https://pdfs.semanticscholar.org/8b2d/d5c61b23ead5ae5508bb8ce808b5ea266730.pdf" target="_blank">[pdf]</a></td><td>Journal of experimental psychology. General</td><td></td><td></td><td></td><td></td><td></td><td>51%</td><td>47</td><td>24</td><td>23</td><td>3</td><td>34</td><td>13</td></tr><tr><td>47662d1a368daf70ba70ef2d59eb6209f98b675d</td><td>fia</td><td>CMU FiA</td><td><a href="papers/47662d1a368daf70ba70ef2d59eb6209f98b675d.html" target="_blank">The CMU Face In Action (FIA) Database</a></td><td><a href="https://pdfs.semanticscholar.org/4766/2d1a368daf70ba70ef2d59eb6209f98b675d.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>44%</td><td>55</td><td>24</td><td>31</td><td>5</td><td>41</td><td>17</td></tr><tr><td>4d58f886f5150b2d5e48fd1b5a49e09799bf895d</td><td>texas_3dfrd</td><td>Texas 3DFRD</td><td><a href="papers/4d58f886f5150b2d5e48fd1b5a49e09799bf895d.html" target="_blank">Texas 3D Face Recognition Database</a></td><td><a href="http://live.ece.utexas.edu/publications/2010/sg_ssiai_may10.pdf" target="_blank">[pdf]</a></td><td>2010 IEEE Southwest Symposium on Image Analysis & Interpretation (SSIAI)</td><td></td><td></td><td></td><td></td><td></td><td>39%</td><td>61</td><td>24</td><td>37</td><td>3</td><td>37</td><td>25</td></tr><tr><td>79828e6e9f137a583082b8b5a9dfce0c301989b8</td><td>mapillary</td><td>Mapillary</td><td><a href="papers/79828e6e9f137a583082b8b5a9dfce0c301989b8.html" target="_blank">The Mapillary Vistas Dataset for Semantic Understanding of Street Scenes</a></td><td><a href="http://openaccess.thecvf.com/content_ICCV_2017/papers/Neuhold_The_Mapillary_Vistas_ICCV_2017_paper.pdf" target="_blank">[pdf]</a></td><td>2017 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>44</td><td>23</td><td>21</td><td>0</td><td>36</td><td>7</td></tr><tr><td>09d78009687bec46e70efcf39d4612822e61cb8c</td><td>raid</td><td>RAiD</td><td><a href="papers/09d78009687bec46e70efcf39d4612822e61cb8c.html" target="_blank">Consistent Re-identification in a Camera Network</a></td><td><a href="http://cs-people.bu.edu/dasabir/papers/ECCV14_Poster.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>51%</td><td>45</td><td>23</td><td>22</td><td>7</td><td>34</td><td>11</td></tr><tr><td>2161f6b7ee3c0acc81603b01dc0df689683577b9</td><td>large_scale_person_search</td><td>Large Scale Person Search</td><td><a href="papers/2161f6b7ee3c0acc81603b01dc0df689683577b9.html" target="_blank">End-to-End Deep Learning for Person Search</a></td><td><a href="https://pdfs.semanticscholar.org/2161/f6b7ee3c0acc81603b01dc0df689683577b9.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>41</td><td>22</td><td>19</td><td>2</td><td>27</td><td>12</td></tr><tr><td>6f3c76b7c0bd8e1d122c6ea808a271fd4749c951</td><td>ward</td><td>WARD</td><td><a href="papers/6f3c76b7c0bd8e1d122c6ea808a271fd4749c951.html" target="_blank">Re-identify people in wide area camera network</a></td><td><a href="http://users.dimi.uniud.it/~niki.martinel/data/publications/2012/CVPR/MarMicCVPR2012.pdf" target="_blank">[pdf]</a></td><td>2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops</td><td></td><td></td><td></td><td></td><td></td><td>38%</td><td>55</td><td>21</td><td>34</td><td>2</td><td>35</td><td>19</td></tr><tr><td>1c2802c2199b6d15ecefe7ba0c39bfe44363de38</td><td>youtube_poses</td><td>YouTube Pose</td><td><a href="papers/1c2802c2199b6d15ecefe7ba0c39bfe44363de38.html" target="_blank">Personalizing Human Video Pose Estimation</a></td><td><a href="https://arxiv.org/pdf/1511.06676.pdf" target="_blank">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>Oxford University</td><td>United Kingdom</td><td>51.75208490</td><td>-1.25166460</td><td>66%</td><td>32</td><td>21</td><td>11</td><td>2</td><td>29</td><td>5</td></tr><tr><td>758d7e1be64cc668c59ef33ba8882c8597406e53</td><td>affectnet</td><td>AffectNet</td><td><a href="papers/758d7e1be64cc668c59ef33ba8882c8597406e53.html" target="_blank">AffectNet: A Database for Facial Expression, Valence, and Arousal Computing in the Wild</a></td><td><a href="https://arxiv.org/pdf/1708.03985.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>53%</td><td>38</td><td>20</td><td>18</td><td>1</td><td>26</td><td>11</td></tr><tr><td>53ae38a6bb2b21b42bac4f0c4c8ed1f9fa02f9d4</td><td>bp4d_plus</td><td>BP4D+</td><td><a href="papers/53ae38a6bb2b21b42bac4f0c4c8ed1f9fa02f9d4.html" target="_blank">Multimodal Spontaneous Emotion Corpus for Human Behavior Analysis</a></td><td><a href="http://openaccess.thecvf.com/content_cvpr_2016/papers/Zhang_Multimodal_Spontaneous_Emotion_CVPR_2016_paper.pdf" target="_blank">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>40</td><td>20</td><td>20</td><td>0</td><td>21</td><td>20</td></tr><tr><td>31b05f65405534a696a847dd19c621b7b8588263</td><td>umd_faces</td><td>UMD</td><td><a href="papers/31b05f65405534a696a847dd19c621b7b8588263.html" target="_blank">UMDFaces: An annotated face dataset for training deep networks</a></td><td><a href="https://arxiv.org/pdf/1611.01484.pdf" target="_blank">[pdf]</a></td><td>2017 IEEE International Joint Conference on Biometrics (IJCB)</td><td>edu</td><td>University of Maryland</td><td>United States</td><td>39.28996850</td><td>-76.62196103</td><td>57%</td><td>35</td><td>20</td><td>15</td><td>4</td><td>28</td><td>7</td></tr><tr><td>d08cc366a4a0192a01e9a7495af1eb5d9f9e73ae</td><td>b3d_ac</td><td>B3D(AC)</td><td><a href="papers/d08cc366a4a0192a01e9a7495af1eb5d9f9e73ae.html" target="_blank">A 3-D Audio-Visual Corpus of Affective Communication</a></td><td><a href="http://files.is.tue.mpg.de/jgall/download/jgall_avcorpus_mm10.pdf" target="_blank">[pdf]</a></td><td>IEEE Transactions on Multimedia</td><td></td><td></td><td></td><td></td><td></td><td>49%</td><td>39</td><td>19</td><td>20</td><td>2</td><td>27</td><td>12</td></tr><tr><td>213a579af9e4f57f071b884aa872651372b661fd</td><td>bbc_pose</td><td>BBC Pose</td><td><a href="papers/213a579af9e4f57f071b884aa872651372b661fd.html" target="_blank">Automatic and Efficient Human Pose Estimation for Sign Language Videos</a></td><td><a href="http://tomas.pfister.fi/files/charles13ijcv.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>76%</td><td>25</td><td>19</td><td>6</td><td>1</td><td>19</td><td>7</td></tr><tr><td>488e475eeb3bb39a145f23ede197cd3620f1d98a</td><td>apis</td><td>APiS1.0</td><td><a href="papers/488e475eeb3bb39a145f23ede197cd3620f1d98a.html" target="_blank">Pedestrian Attribute Classification in Surveillance: Database and Evaluation</a></td><td><a href="http://www.cbsr.ia.ac.cn/english/APiS_1.0_paper.pdf" target="_blank">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision Workshops</td><td></td><td></td><td></td><td></td><td></td><td>69%</td><td>26</td><td>18</td><td>8</td><td>1</td><td>13</td><td>13</td></tr><tr><td>298cbc3dfbbb3a20af4eed97906650a4ea1c29e0</td><td>ferplus</td><td>FER+</td><td><a href="papers/298cbc3dfbbb3a20af4eed97906650a4ea1c29e0.html" target="_blank">Training deep networks for facial expression recognition with crowd-sourced label distribution</a></td><td><a href="https://arxiv.org/pdf/1608.01041.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>29</td><td>18</td><td>11</td><td>0</td><td>15</td><td>14</td></tr><tr><td>fd8168f1c50de85bac58a8d328df0a50248b16ae</td><td>nd_2006</td><td>ND-2006</td><td><a href="papers/fd8168f1c50de85bac58a8d328df0a50248b16ae.html" target="_blank">Using a Multi-Instance Enrollment Representation to Improve 3D Face Recognition</a></td><td><span class="gray">[pdf]</a></td><td>2007 First IEEE International Conference on Biometrics: Theory, Applications, and Systems</td><td>edu</td><td>University of Notre Dame</td><td>United States</td><td>41.70456775</td><td>-86.23822026</td><td>56%</td><td>32</td><td>18</td><td>14</td><td>3</td><td>17</td><td>15</td></tr><tr><td>488e475eeb3bb39a145f23ede197cd3620f1d98a</td><td>svs</td><td>SVS</td><td><a href="papers/488e475eeb3bb39a145f23ede197cd3620f1d98a.html" target="_blank">Pedestrian Attribute Classification in Surveillance: Database and Evaluation</a></td><td><a href="http://www.cbsr.ia.ac.cn/english/APiS_1.0_paper.pdf" target="_blank">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision Workshops</td><td></td><td></td><td></td><td></td><td></td><td>69%</td><td>26</td><td>18</td><td>8</td><td>1</td><td>13</td><td>13</td></tr><tr><td>57fe081950f21ca03b5b375ae3e84b399c015861</td><td>cvc_01_barcelona</td><td>CVC-01</td><td><a href="papers/57fe081950f21ca03b5b375ae3e84b399c015861.html" target="_blank">Adaptive Image Sampling and Windows Classification for On – board Pedestrian Detection</a></td><td><a href="https://pdfs.semanticscholar.org/57fe/081950f21ca03b5b375ae3e84b399c015861.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>39%</td><td>44</td><td>17</td><td>27</td><td>1</td><td>21</td><td>23</td></tr><tr><td>4fefd1bc8dc4e0ab37ee3324ddfa43ad9d6a04a7</td><td>deep_fashion</td><td>DeepFashion</td><td><a href="papers/4fefd1bc8dc4e0ab37ee3324ddfa43ad9d6a04a7.html" target="_blank">Fashion Landmark Detection in the Wild</a></td><td><a href="https://arxiv.org/pdf/1608.03049.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>65%</td><td>26</td><td>17</td><td>9</td><td>1</td><td>17</td><td>9</td></tr><tr><td>84fe5b4ac805af63206012d29523a1e033bc827e</td><td>awe_ears</td><td>AWE Ears</td><td><a href="papers/84fe5b4ac805af63206012d29523a1e033bc827e.html" target="_blank">Ear Recognition: More Than a Survey</a></td><td><a href="https://arxiv.org/pdf/1611.06203.pdf" target="_blank">[pdf]</a></td><td>Neurocomputing</td><td></td><td></td><td></td><td></td><td></td><td>67%</td><td>24</td><td>16</td><td>8</td><td>0</td><td>11</td><td>13</td></tr><tr><td>20388099cc415c772926e47bcbbe554e133343d1</td><td>cafe</td><td>CAFE</td><td><a href="papers/20388099cc415c772926e47bcbbe554e133343d1.html" target="_blank">The Child Affective Facial Expression (CAFE) set: validity and reliability from untrained adults</a></td><td><a href="https://pdfs.semanticscholar.org/2038/8099cc415c772926e47bcbbe554e133343d1.pdf" target="_blank">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td></td><td>48%</td><td>33</td><td>16</td><td>17</td><td>3</td><td>28</td><td>5</td></tr><tr><td>1e3df3ca8feab0b36fd293fe689f93bb2aaac591</td><td>immediacy</td><td>Immediacy</td><td><a href="papers/1e3df3ca8feab0b36fd293fe689f93bb2aaac591.html" target="_blank">Multi-task Recurrent Neural Network for Immediacy Prediction</a></td><td><a href="http://openaccess.thecvf.com/content_iccv_2015/papers/Chu_Multi-Task_Recurrent_Neural_ICCV_2015_paper.pdf" target="_blank">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>25</td><td>16</td><td>9</td><td>2</td><td>21</td><td>5</td></tr><tr><td>a5a44a32a91474f00a3cda671a802e87c899fbb4</td><td>moments_in_time</td><td>Moments in Time</td><td><a href="papers/a5a44a32a91474f00a3cda671a802e87c899fbb4.html" target="_blank">Moments in Time Dataset: one million videos for event understanding</a></td><td><a href="https://arxiv.org/pdf/1801.03150.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>25</td><td>16</td><td>9</td><td>2</td><td>25</td><td>0</td></tr><tr><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td><td>casablanca</td><td>Casablanca</td><td><a href="papers/0ceda9dae8b9f322df65ca2ef02caca9758aec6f.html" target="_blank">Context-Aware CNNs for Person Head Detection</a></td><td><a href="https://arxiv.org/pdf/1511.07917.pdf" target="_blank">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>27</td><td>15</td><td>12</td><td>1</td><td>23</td><td>5</td></tr><tr><td>4946ba10a4d5a7d0a38372f23e6622bd347ae273</td><td>coco_action</td><td>COCO-a</td><td><a href="papers/4946ba10a4d5a7d0a38372f23e6622bd347ae273.html" target="_blank">Describing Common Human Visual Actions in Images</a></td><td><a href="https://arxiv.org/pdf/1506.02203.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>26</td><td>15</td><td>11</td><td>0</td><td>25</td><td>1</td></tr><tr><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td><td>hollywood_headset</td><td>HollywoodHeads</td><td><a href="papers/0ceda9dae8b9f322df65ca2ef02caca9758aec6f.html" target="_blank">Context-Aware CNNs for Person Head Detection</a></td><td><a href="https://arxiv.org/pdf/1511.07917.pdf" target="_blank">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>27</td><td>15</td><td>12</td><td>1</td><td>23</td><td>5</td></tr><tr><td>28d4e027c7e90b51b7d8908fce68128d1964668a</td><td>megaface</td><td>MegaFace</td><td><a href="papers/28d4e027c7e90b51b7d8908fce68128d1964668a.html" target="_blank">Level Playing Field for Million Scale Face Recognition</a></td><td><a href="https://arxiv.org/pdf/1705.00393.pdf" target="_blank">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>University of Washington</td><td>United States</td><td>47.65432380</td><td>-122.30800894</td><td>39%</td><td>38</td><td>15</td><td>23</td><td>2</td><td>29</td><td>8</td></tr><tr><td>2eb84aaba316b095d4bb51da1a3e4365bbf9ab1d</td><td>kin_face</td><td>UB KinFace</td><td><a href="papers/2eb84aaba316b095d4bb51da1a3e4365bbf9ab1d.html" target="_blank">Genealogical face recognition based on UB KinFace database</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td>edu</td><td>SUNY Buffalo</td><td>United States</td><td>42.93362780</td><td>-78.88394479</td><td>47%</td><td>30</td><td>14</td><td>16</td><td>1</td><td>10</td><td>21</td></tr><tr><td>221c18238b829c12b911706947ab38fd017acef7</td><td>rap_pedestrian</td><td>RAP</td><td><a href="papers/221c18238b829c12b911706947ab38fd017acef7.html" target="_blank">A Richly Annotated Dataset for Pedestrian Attribute Recognition</a></td><td><a href="https://arxiv.org/pdf/1603.07054.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>67%</td><td>21</td><td>14</td><td>7</td><td>0</td><td>18</td><td>3</td></tr><tr><td>44d23df380af207f5ac5b41459c722c87283e1eb</td><td>wider_attribute</td><td>WIDER Attribute</td><td><a href="papers/44d23df380af207f5ac5b41459c722c87283e1eb.html" target="_blank">Human Attribute Recognition by Deep Hierarchical Contexts</a></td><td><a href="https://pdfs.semanticscholar.org/8e28/07f2dd53b03a759e372e07f7191cae65c9fd.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>78%</td><td>18</td><td>14</td><td>4</td><td>0</td><td>16</td><td>2</td></tr><tr><td>5753b2b5e442eaa3be066daa4a2ca8d8a0bb1725</td><td>50_people_one_question</td><td>50 People One Question</td><td><a href="papers/5753b2b5e442eaa3be066daa4a2ca8d8a0bb1725.html" target="_blank">Merging Pose Estimates Across Space and Time</a></td><td><a href="https://pdfs.semanticscholar.org/63b2/f5348af0f969dfc2afb4977732393c6459ec.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>87%</td><td>15</td><td>13</td><td>2</td><td>0</td><td>12</td><td>4</td></tr><tr><td>ea050801199f98a1c7c1df6769f23f658299a3ae</td><td>mpi_large</td><td>Large MPI Facial Expression</td><td><a href="papers/ea050801199f98a1c7c1df6769f23f658299a3ae.html" target="_blank">The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</a></td><td><a href="https://pdfs.semanticscholar.org/ea05/0801199f98a1c7c1df6769f23f658299a3ae.pdf" target="_blank">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td></td><td>46%</td><td>28</td><td>13</td><td>15</td><td>4</td><td>24</td><td>4</td></tr><tr><td>ea050801199f98a1c7c1df6769f23f658299a3ae</td><td>mpi_small</td><td>Small MPI Facial Expression</td><td><a href="papers/ea050801199f98a1c7c1df6769f23f658299a3ae.html" target="_blank">The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</a></td><td><a href="https://pdfs.semanticscholar.org/ea05/0801199f98a1c7c1df6769f23f658299a3ae.pdf" target="_blank">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td></td><td>46%</td><td>28</td><td>13</td><td>15</td><td>4</td><td>24</td><td>4</td></tr><tr><td>014b8df0180f33b9fea98f34ae611c6447d761d2</td><td>buhmap_db</td><td>BUHMAP-DB </td><td><a href="papers/014b8df0180f33b9fea98f34ae611c6447d761d2.html" target="_blank">Facial feature tracking and expression recognition for sign language</a></td><td><a href="https://www.cmpe.boun.edu.tr/~ari/files/ari2008iscis.pdf" target="_blank">[pdf]</a></td><td>2009 IEEE 17th Signal Processing and Communications Applications Conference</td><td></td><td></td><td></td><td></td><td></td><td>48%</td><td>25</td><td>12</td><td>13</td><td>1</td><td>11</td><td>15</td></tr><tr><td>12ad3b5bbbf407f8e54ea692c07633d1a867c566</td><td>graz</td><td>Graz Pedestrian</td><td><a href="papers/12ad3b5bbbf407f8e54ea692c07633d1a867c566.html" target="_blank">Object recognition using segmentation for feature detection</a></td><td><span class="gray">[pdf]</a></td><td>Proceedings of the 17th International Conference on Pattern Recognition, 2004. ICPR 2004.</td><td>edu</td><td>Inst. of Comput. Sci., Univ. of Leoben, Austria</td><td>Austria</td><td>47.38473720</td><td>15.09302010</td><td>41%</td><td>29</td><td>12</td><td>17</td><td>1</td><td>21</td><td>8</td></tr><tr><td>0297448f3ed948e136bb06ceff10eccb34e5bb77</td><td>ilids_mcts</td><td>i-LIDS Multiple-Camera</td><td><a href="papers/0297448f3ed948e136bb06ceff10eccb34e5bb77.html" target="_blank">Imagery Library for Intelligent Detection Systems (i-LIDS); A Standard for Testing Video Based Detection Systems</a></td><td><span class="gray">[pdf]</a></td><td>Proceedings 40th Annual 2006 International Carnahan Conference on Security Technology</td><td></td><td></td><td></td><td></td><td></td><td>38%</td><td>32</td><td>12</td><td>20</td><td>2</td><td>18</td><td>15</td></tr><tr><td>45e616093a92e5f1e61a7c6037d5f637aa8964af</td><td>malf</td><td>MALF</td><td><a href="papers/45e616093a92e5f1e61a7c6037d5f637aa8964af.html" target="_blank">Fine-grained evaluation on face detection in the wild</a></td><td><a href="http://www.cs.toronto.edu/~byang/papers/malf_fg15.pdf" target="_blank">[pdf]</a></td><td>2015 11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)</td><td></td><td></td><td></td><td></td><td></td><td>71%</td><td>17</td><td>12</td><td>5</td><td>0</td><td>13</td><td>4</td></tr><tr><td>e27ef52c641c2b5100a1b34fd0b819e84a31b4df</td><td>sarc3d</td><td>Sarc3D</td><td><a href="papers/e27ef52c641c2b5100a1b34fd0b819e84a31b4df.html" target="_blank">SARC3D: A New 3D Body Model for People Tracking and Re-identification</a></td><td><a href="https://pdfs.semanticscholar.org/e27e/f52c641c2b5100a1b34fd0b819e84a31b4df.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>41%</td><td>29</td><td>12</td><td>17</td><td>3</td><td>17</td><td>12</td></tr><tr><td>19d1b811df60f86cbd5e04a094b07f32fff7a32a</td><td>york_3d</td><td>UOY 3D Face Database</td><td><a href="papers/19d1b811df60f86cbd5e04a094b07f32fff7a32a.html" target="_blank">Three-dimensional face recognition: an eigensurface approach</a></td><td><a href="http://www-users.cs.york.ac.uk/~nep/research/3Dface/tomh/3DFaceRecognition-Eigensurface-ICIP(web)2.pdf" target="_blank">[pdf]</a></td><td>2004 International Conference on Image Processing, 2004. ICIP '04.</td><td></td><td></td><td></td><td></td><td></td><td>33%</td><td>36</td><td>12</td><td>24</td><td>4</td><td>25</td><td>11</td></tr><tr><td>a8d0b149c2eadaa02204d3e4356fbc8eccf3b315</td><td>hi4d_adsip</td><td>Hi4D-ADSIP</td><td><a href="papers/a8d0b149c2eadaa02204d3e4356fbc8eccf3b315.html" target="_blank">Hi4D-ADSIP 3-D dynamic facial articulation database</a></td><td><span class="gray">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>18</td><td>11</td><td>7</td><td>1</td><td>7</td><td>11</td></tr><tr><td>774cbb45968607a027ae4729077734db000a1ec5</td><td>urban_tribes</td><td>Urban Tribes</td><td><a href="papers/774cbb45968607a027ae4729077734db000a1ec5.html" target="_blank">From Bikers to Surfers: Visual Recognition of Urban Tribes</a></td><td><a href="https://pdfs.semanticscholar.org/774c/bb45968607a027ae4729077734db000a1ec5.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>65%</td><td>17</td><td>11</td><td>6</td><td>1</td><td>12</td><td>5</td></tr><tr><td>6dcf418c778f528b5792104760f1fbfe90c6dd6a</td><td>agedb</td><td>AgeDB</td><td><a href="papers/6dcf418c778f528b5792104760f1fbfe90c6dd6a.html" target="_blank">AgeDB: The First Manually Collected, In-the-Wild Age Database</a></td><td><a href="https://ibug.doc.ic.ac.uk/media/uploads/documents/agedb.pdf" target="_blank">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td></td><td></td><td></td><td></td><td></td><td>91%</td><td>11</td><td>10</td><td>1</td><td>0</td><td>10</td><td>1</td></tr><tr><td>ec792ad2433b6579f2566c932ee414111e194537</td><td>msmt_17</td><td>MSMT17</td><td><a href="papers/ec792ad2433b6579f2566c932ee414111e194537.html" target="_blank">Person Transfer GAN to Bridge Domain Gap for Person Re-Identification</a></td><td><a href="https://arxiv.org/pdf/1711.08565.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>71%</td><td>14</td><td>10</td><td>4</td><td>1</td><td>11</td><td>3</td></tr><tr><td>4e6ee936eb50dd032f7138702fa39b7c18ee8907</td><td>dartmouth_children</td><td>Dartmouth Children</td><td><a href="papers/4e6ee936eb50dd032f7138702fa39b7c18ee8907.html" target="_blank">The Dartmouth Database of Children’s Faces: Acquisition and Validation of a New Face Stimulus Set</a></td><td><a href="https://pdfs.semanticscholar.org/4e6e/e936eb50dd032f7138702fa39b7c18ee8907.pdf" target="_blank">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td></td><td>45%</td><td>20</td><td>9</td><td>11</td><td>2</td><td>17</td><td>4</td></tr><tr><td>4156b7e88f2e0ab0a7c095b9bab199ae2b23bd06</td><td>distance_nighttime</td><td>Long Distance Heterogeneous Face</td><td><a href="papers/4156b7e88f2e0ab0a7c095b9bab199ae2b23bd06.html" target="_blank">Nighttime Face Recognition at Long Distance: Cross-Distance and Cross-Spectral Matching</a></td><td><a href="https://pdfs.semanticscholar.org/4156/b7e88f2e0ab0a7c095b9bab199ae2b23bd06.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>21</td><td>9</td><td>12</td><td>3</td><td>11</td><td>10</td></tr><tr><td>2f43b614607163abf41dfe5d17ef6749a1b61304</td><td>hrt_transgender</td><td>HRT Transgender</td><td><a href="papers/2f43b614607163abf41dfe5d17ef6749a1b61304.html" target="_blank">Investigating the Periocular-Based Face Recognition Across Gender Transformation</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Information Forensics and Security</td><td>edu</td><td>University of North Carolina at Wilmington</td><td>United States</td><td>34.22498270</td><td>-77.86907744</td><td>69%</td><td>13</td><td>9</td><td>4</td><td>0</td><td>6</td><td>8</td></tr><tr><td>ca3e88d87e1344d076c964ea89d91a75c417f5ee</td><td>imfdb</td><td>IMFDB</td><td><a href="papers/ca3e88d87e1344d076c964ea89d91a75c417f5ee.html" target="_blank">Indian Movie Face Database: A benchmark for face recognition under wide variations</a></td><td><a href="http://cdn.iiit.ac.in/cdn/cvit.iiit.ac.in/papers/Shankar2013Indian.pdf" target="_blank">[pdf]</a></td><td>2013 Fourth National Conference on Computer Vision, Pattern Recognition, Image Processing and Graphics (NCVPRIPG)</td><td>edu</td><td>CVIT, IIITH, India</td><td>India</td><td>17.44595810</td><td>78.34959940</td><td>60%</td><td>15</td><td>9</td><td>6</td><td>0</td><td>10</td><td>5</td></tr><tr><td>71b7fc715e2f1bb24c0030af8d7e7b6e7cd128a6</td><td>umd_faces</td><td>UMD</td><td><a href="papers/71b7fc715e2f1bb24c0030af8d7e7b6e7cd128a6.html" target="_blank">The Do’s and Don’ts for CNN-Based Face Verification</a></td><td><a href="https://arxiv.org/pdf/1705.07426.pdf" target="_blank">[pdf]</a></td><td>2017 IEEE International Conference on Computer Vision Workshops (ICCVW)</td><td></td><td></td><td></td><td></td><td></td><td>36%</td><td>25</td><td>9</td><td>16</td><td>3</td><td>17</td><td>6</td></tr><tr><td>25474c21613607f6bb7687a281d5f9d4ffa1f9f3</td><td>faceplace</td><td>Face Place</td><td><a href="papers/25474c21613607f6bb7687a281d5f9d4ffa1f9f3.html" target="_blank">Recognizing disguised faces</a></td><td><a href="https://pdfs.semanticscholar.org/d936/7ceb0be378c3a9ddf7cb741c678c1a3c574c.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>33%</td><td>24</td><td>8</td><td>16</td><td>0</td><td>18</td><td>6</td></tr><tr><td>c570d1247e337f91e555c3be0e8c8a5aba539d9f</td><td>mcgill</td><td>McGill Real World</td><td><a href="papers/c570d1247e337f91e555c3be0e8c8a5aba539d9f.html" target="_blank">Robust semi-automatic head pose labeling for real-world face video sequences</a></td><td><span class="gray">[pdf]</a></td><td>Multimedia Tools and Applications</td><td>edu</td><td>McGill University</td><td>Canada</td><td>45.50397610</td><td>-73.57496870</td><td>44%</td><td>18</td><td>8</td><td>10</td><td>0</td><td>13</td><td>7</td></tr><tr><td>2a171f8d14b6b8735001a11c217af9587d095848</td><td>social_relation</td><td>Social Relation</td><td><a href="papers/2a171f8d14b6b8735001a11c217af9587d095848.html" target="_blank">Learning Social Relation Traits from Face Images</a></td><td><a href="https://arxiv.org/pdf/1509.03936.pdf" target="_blank">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>40%</td><td>20</td><td>8</td><td>12</td><td>5</td><td>15</td><td>5</td></tr><tr><td>a94cae786d515d3450d48267e12ca954aab791c4</td><td>yawdd</td><td>YawDD</td><td><a href="papers/a94cae786d515d3450d48267e12ca954aab791c4.html" target="_blank">YawDD: a yawning detection dataset</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>14</td><td>8</td><td>6</td><td>1</td><td>2</td><td>12</td></tr><tr><td>2624d84503bc2f8e190e061c5480b6aa4d89277a</td><td>afew_va</td><td>AFEW-VA</td><td><a href="papers/2624d84503bc2f8e190e061c5480b6aa4d89277a.html" target="_blank">AFEW-VA database for valence and arousal estimation in-the-wild</a></td><td><a href="https://ibug.doc.ic.ac.uk/media/uploads/documents/afew-va.pdf" target="_blank">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>15</td><td>7</td><td>8</td><td>1</td><td>10</td><td>4</td></tr><tr><td>6403117f9c005ae81f1e8e6d1302f4a045e3d99d</td><td>alert_airport</td><td>ALERT Airport</td><td><a href="papers/6403117f9c005ae81f1e8e6d1302f4a045e3d99d.html" target="_blank">A Systematic Evaluation and Benchmark for Person Re-Identification: Features, Metrics, and Datasets.</a></td><td><a href="https://arxiv.org/pdf/1605.09653.pdf" target="_blank">[pdf]</a></td><td>IEEE transactions on pattern analysis and machine intelligence</td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>15</td><td>7</td><td>8</td><td>1</td><td>10</td><td>5</td></tr><tr><td>22f656d0f8426c84a33a267977f511f127bfd7f3</td><td>expw</td><td>ExpW</td><td><a href="papers/22f656d0f8426c84a33a267977f511f127bfd7f3.html" target="_blank">From Facial Expression Recognition to Interpersonal Relation Prediction</a></td><td><a href="https://arxiv.org/pdf/1609.06426.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>78%</td><td>9</td><td>7</td><td>2</td><td>0</td><td>5</td><td>4</td></tr><tr><td>2b926b3586399d028b46315d7d9fb9d879e4f79c</td><td>frav3d</td><td>FRAV3D</td><td><a href="papers/2b926b3586399d028b46315d7d9fb9d879e4f79c.html" target="_blank">Multimodal 2D, 2.5D & 3D Face Verification</a></td><td><a href="http://www.researchgate.net/profile/Enrique_Cabello/publication/224057733_Multimodal_2D_2.5D__3D_Face_Verification/links/0912f50f522298fa95000000.pdf" target="_blank">[pdf]</a></td><td>2006 International Conference on Image Processing</td><td>edu</td><td>Universidad Rey Juan Carlos, Spain</td><td></td><td>40.33586610</td><td>-3.87694320</td><td>50%</td><td>14</td><td>7</td><td>7</td><td>0</td><td>2</td><td>12</td></tr><tr><td>b71d1aa90dcbe3638888725314c0d56640c1fef1</td><td>ifdb</td><td>IFDB</td><td><a href="papers/b71d1aa90dcbe3638888725314c0d56640c1fef1.html" target="_blank">Iranian Face Database with age, pose and expression</a></td><td><a href="http://www.iranprc.org/pdf/paper/2007-02.pdf" target="_blank">[pdf]</a></td><td>2007 International Conference on Machine Vision</td><td>edu</td><td>Islamic Azad University</td><td>Iran</td><td>34.84529990</td><td>48.55962120</td><td>35%</td><td>20</td><td>7</td><td>13</td><td>2</td><td>12</td><td>9</td></tr><tr><td>0cb2dd5f178e3a297a0c33068961018659d0f443</td><td>ijb_c</td><td>IJB-C</td><td><a href="papers/0cb2dd5f178e3a297a0c33068961018659d0f443.html" target="_blank">IARPA Janus Benchmark-B Face Dataset</a></td><td><a href="http://biometrics.cse.msu.edu/Publications/Face/Whitelametal_IARPAJanusBenchmark-BFaceDataset_CVPRW17.pdf" target="_blank">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td>edu</td><td>Michigan State University</td><td>United States</td><td>42.71856800</td><td>-84.47791571</td><td>28%</td><td>25</td><td>7</td><td>18</td><td>6</td><td>21</td><td>4</td></tr><tr><td>4563b46d42079242f06567b3f2e2f7a80cb3befe</td><td>vadana</td><td>VADANA</td><td><a href="papers/4563b46d42079242f06567b3f2e2f7a80cb3befe.html" target="_blank">VADANA: A dense dataset for facial image analysis</a></td><td><a href="http://vims.cis.udel.edu/publications/VADANA_BeFIT2011.pdf" target="_blank">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td>edu</td><td>University of Delaware</td><td>United States</td><td>39.68103280</td><td>-75.75401840</td><td>44%</td><td>16</td><td>7</td><td>9</td><td>0</td><td>6</td><td>10</td></tr><tr><td>8f02ec0be21461fbcedf51d864f944cfc42c875f</td><td>hda_plus</td><td>HDA+</td><td><a href="papers/8f02ec0be21461fbcedf51d864f944cfc42c875f.html" target="_blank">The HDA+ Data Set for Research on Fully Automated Re-identification Systems</a></td><td><a href="http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/ECCV_2014/workshops/w19/11%20-%20The%20HDA%20data%20set%20for%20research%20on%20fully.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>35%</td><td>17</td><td>6</td><td>11</td><td>2</td><td>11</td><td>6</td></tr><tr><td>7f4040b482d16354d5938c1d1b926b544652bf5b</td><td>nova_emotions</td><td>Novaemötions Dataset</td><td><a href="papers/7f4040b482d16354d5938c1d1b926b544652bf5b.html" target="_blank">Competitive affective gaming: winning with a smile</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Universidade NOVA de Lisboa, Caparica, Portugal</td><td>Portugal</td><td>38.66096400</td><td>-9.20581300</td><td>75%</td><td>8</td><td>6</td><td>2</td><td>0</td><td>4</td><td>4</td></tr><tr><td>bd26dabab576adb6af30484183c9c9c8379bf2e0</td><td>scut_fbp</td><td>SCUT-FBP</td><td><a href="papers/bd26dabab576adb6af30484183c9c9c8379bf2e0.html" target="_blank">SCUT-FBP: A Benchmark Dataset for Facial Beauty Perception</a></td><td><a href="https://arxiv.org/pdf/1511.02459.pdf" target="_blank">[pdf]</a></td><td>2015 IEEE International Conference on Systems, Man, and Cybernetics</td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>14</td><td>6</td><td>8</td><td>3</td><td>5</td><td>9</td></tr><tr><td>041d3eedf5e45ce5c5229f0181c5c576ed1fafd6</td><td>ucf_selfie</td><td>UCF Selfie</td><td><a href="papers/041d3eedf5e45ce5c5229f0181c5c576ed1fafd6.html" target="_blank">How to Take a Good Selfie?</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>67%</td><td>9</td><td>6</td><td>3</td><td>0</td><td>6</td><td>4</td></tr><tr><td>8d5998cd984e7cce307da7d46f155f9db99c6590</td><td>chalearn</td><td>ChaLearn</td><td><a href="papers/8d5998cd984e7cce307da7d46f155f9db99c6590.html" target="_blank">ChaLearn looking at people: A review of events and resources</a></td><td><a href="https://arxiv.org/pdf/1701.02664.pdf" target="_blank">[pdf]</a></td><td>2017 International Joint Conference on Neural Networks (IJCNN)</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>10</td><td>5</td><td>5</td><td>1</td><td>6</td><td>4</td></tr><tr><td>1a40092b493c6b8840257ab7f96051d1a4dbfeb2</td><td>sports_videos_in_the_wild</td><td>SVW</td><td><a href="papers/1a40092b493c6b8840257ab7f96051d1a4dbfeb2.html" target="_blank">Sports Videos in the Wild (SVW): A video dataset for sports analysis</a></td><td><a href="http://cse.msu.edu/~liuxm/publication/Safdarnejad_Liu_Udpa_Andrus_Wood_Craven_FG2015.pdf" target="_blank">[pdf]</a></td><td>2015 11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)</td><td></td><td></td><td></td><td></td><td></td><td>83%</td><td>6</td><td>5</td><td>1</td><td>1</td><td>5</td><td>1</td></tr><tr><td>8627f019882b024aef92e4eb9355c499c733e5b7</td><td>used</td><td>USED Social Event Dataset</td><td><a href="papers/8627f019882b024aef92e4eb9355c499c733e5b7.html" target="_blank">USED: a large-scale social event detection dataset</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td>edu</td><td>University of Trento</td><td>Italy</td><td>46.06588360</td><td>11.11598940</td><td>71%</td><td>7</td><td>5</td><td>2</td><td>0</td><td>3</td><td>4</td></tr><tr><td>56ae6d94fc6097ec4ca861f0daa87941d1c10b70</td><td>cmdp</td><td>CMDP</td><td><a href="papers/56ae6d94fc6097ec4ca861f0daa87941d1c10b70.html" target="_blank">Distance Estimation of an Unknown Person from a Portrait</a></td><td><a href="https://pdfs.semanticscholar.org/56ae/6d94fc6097ec4ca861f0daa87941d1c10b70.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>44%</td><td>9</td><td>4</td><td>5</td><td>0</td><td>6</td><td>3</td></tr><tr><td>4b4106614c1d553365bad75d7866bff0de6056ed</td><td>czech_news_agency</td><td>UFI</td><td><a href="papers/4b4106614c1d553365bad75d7866bff0de6056ed.html" target="_blank">Unconstrained Facial Images: Database for Face Recognition Under Real-World Conditions</a></td><td><a href="https://pdfs.semanticscholar.org/4b41/06614c1d553365bad75d7866bff0de6056ed.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>40%</td><td>10</td><td>4</td><td>6</td><td>0</td><td>4</td><td>6</td></tr><tr><td>563c940054e4b456661762c1ab858e6f730c3159</td><td>data_61</td><td>Data61 Pedestrian</td><td><a href="papers/563c940054e4b456661762c1ab858e6f730c3159.html" target="_blank">A Multi-modal Graphical Model for Scene Analysis</a></td><td><a href="http://www.nicta.com.au/wp-content/uploads/2015/02/TaghaviNaminetalWACV15.pdf" target="_blank">[pdf]</a></td><td>2015 IEEE Winter Conference on Applications of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>8</td><td>4</td><td>4</td><td>0</td><td>5</td><td>3</td></tr><tr><td>137aa2f891d474fce1e7a1d1e9b3aefe21e22b34</td><td>hrt_transgender</td><td>HRT Transgender</td><td><a href="papers/137aa2f891d474fce1e7a1d1e9b3aefe21e22b34.html" target="_blank">Is the eye region more reliable than the face? A preliminary study of face-based recognition on a transgender dataset</a></td><td><a href="http://www.csis.pace.edu/~ctappert/dps/2013BTAS/Papers/Paper%20139/PID2859389.pdf" target="_blank">[pdf]</a></td><td>2013 IEEE Sixth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>7</td><td>4</td><td>3</td><td>1</td><td>3</td><td>5</td></tr><tr><td>a5a3bc3e5e9753769163cb30b16dbd12e266b93e</td><td>mcgill</td><td>McGill Real World</td><td><a href="papers/a5a3bc3e5e9753769163cb30b16dbd12e266b93e.html" target="_blank">Hierarchical temporal graphical model for head pose estimation and subsequent attribute classification in real-world videos</a></td><td><span class="gray">[pdf]</a></td><td>Computer Vision and Image Understanding</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>8</td><td>4</td><td>4</td><td>1</td><td>5</td><td>3</td></tr><tr><td>079a0a3bf5200994e1f972b1b9197bf2f90e87d4</td><td>mit_cbcl</td><td>MIT CBCL</td><td><a href="papers/079a0a3bf5200994e1f972b1b9197bf2f90e87d4.html" target="_blank">Component-Based Face Recognition with 3D Morphable Models</a></td><td><a href="http://cbcl.mit.edu/cbcl/publications/theses/thesis-huang.pdf" target="_blank">[pdf]</a></td><td>2004 Conference on Computer Vision and Pattern Recognition Workshop</td><td></td><td></td><td></td><td></td><td></td><td>33%</td><td>12</td><td>4</td><td>8</td><td>0</td><td>8</td><td>4</td></tr><tr><td>54983972aafc8e149259d913524581357b0f91c3</td><td>reseed</td><td>ReSEED</td><td><a href="papers/54983972aafc8e149259d913524581357b0f91c3.html" target="_blank">ReSEED: social event dEtection dataset</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>67%</td><td>6</td><td>4</td><td>2</td><td>1</td><td>1</td><td>5</td></tr><tr><td>07fcbae86f7a3ad3ea1cf95178459ee9eaf77cb1</td><td>uccs</td><td>UCCS</td><td><a href="papers/07fcbae86f7a3ad3ea1cf95178459ee9eaf77cb1.html" target="_blank">Large scale unconstrained open set face database</a></td><td><a href="http://vast.uccs.edu/~tboult/PAPERS/BTAS13-Sapkota-Boult-UCCSFaceDB.pdf" target="_blank">[pdf]</a></td><td>2013 IEEE Sixth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td>edu</td><td>University of Colorado at Colorado Springs</td><td>United States</td><td>38.89646790</td><td>-104.80505940</td><td>80%</td><td>5</td><td>4</td><td>1</td><td>0</td><td>3</td><td>2</td></tr><tr><td>7ebb153704706e457ab57b432793d2b6e5d12592</td><td>vgg_celebs_in_places</td><td>CIP</td><td><a href="papers/7ebb153704706e457ab57b432793d2b6e5d12592.html" target="_blank">Faces in Places: compound query retrieval</a></td><td><a href="https://pdfs.semanticscholar.org/7ebb/153704706e457ab57b432793d2b6e5d12592.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>80%</td><td>5</td><td>4</td><td>1</td><td>0</td><td>4</td><td>1</td></tr><tr><td>dd65f71dac86e36eecbd3ed225d016c3336b4a13</td><td>families_in_the_wild</td><td>FIW</td><td><a href="papers/dd65f71dac86e36eecbd3ed225d016c3336b4a13.html" target="_blank">Visual Kinship Recognition of Families in the Wild</a></td><td><a href="https://web.northeastern.edu/smilelab/fiw/papers/Supplemental_PP.pdf" target="_blank">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td>edu</td><td>University of Massachusetts Dartmouth</td><td>United States</td><td>41.62772475</td><td>-71.00724501</td><td>100%</td><td>3</td><td>3</td><td>0</td><td>0</td><td>2</td><td>1</td></tr><tr><td>57178b36c21fd7f4529ac6748614bb3374714e91</td><td>ijb_c</td><td>IJB-C</td><td><a href="papers/57178b36c21fd7f4529ac6748614bb3374714e91.html" target="_blank">IARPA Janus Benchmark - C: Face Dataset and Protocol</a></td><td><a href="http://biometrics.cse.msu.edu/Publications/Face/Mazeetal_IARPAJanusBenchmarkCFaceDatasetAndProtocol_ICB2018.pdf" target="_blank">[pdf]</a></td><td>2018 International Conference on Biometrics (ICB)</td><td></td><td></td><td></td><td></td><td></td><td>33%</td><td>9</td><td>3</td><td>6</td><td>2</td><td>9</td><td>0</td></tr><tr><td>0d2dd4fc016cb6a517d8fb43a7cc3ff62964832e</td><td>lag</td><td>LAG</td><td><a href="papers/0d2dd4fc016cb6a517d8fb43a7cc3ff62964832e.html" target="_blank">Large age-gap face verification by feature injection in deep networks</a></td><td><a href="https://arxiv.org/pdf/1602.06149.pdf" target="_blank">[pdf]</a></td><td>Pattern Recognition Letters</td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>7</td><td>3</td><td>4</td><td>0</td><td>3</td><td>4</td></tr><tr><td>a7fe834a0af614ce6b50dc093132b031dd9a856b</td><td>market1203</td><td>Market 1203</td><td><a href="papers/a7fe834a0af614ce6b50dc093132b031dd9a856b.html" target="_blank">Orientation Driven Bag of Appearances for Person Re-identification</a></td><td><a href="https://arxiv.org/pdf/1605.02464.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>7</td><td>3</td><td>4</td><td>0</td><td>4</td><td>4</td></tr><tr><td>23e824d1dfc33f3780dd18076284f07bd99f1c43</td><td>mifs</td><td>MIFS</td><td><a href="papers/23e824d1dfc33f3780dd18076284f07bd99f1c43.html" target="_blank">Spoofing faces using makeup: An investigative study</a></td><td><a href="http://www.cse.msu.edu/~rossarun/pubs/ChenFaceMakeupSpoof_ISBA2017.pdf" target="_blank">[pdf]</a></td><td>2017 IEEE International Conference on Identity, Security and Behavior Analysis (ISBA)</td><td>edu</td><td>INRIA Méditerranée</td><td>France</td><td>43.61581310</td><td>7.06838000</td><td>60%</td><td>5</td><td>3</td><td>2</td><td>0</td><td>1</td><td>4</td></tr><tr><td>a7fe834a0af614ce6b50dc093132b031dd9a856b</td><td>pku_reid</td><td>PKU-Reid</td><td><a href="papers/a7fe834a0af614ce6b50dc093132b031dd9a856b.html" target="_blank">Orientation Driven Bag of Appearances for Person Re-identification</a></td><td><a href="https://arxiv.org/pdf/1605.02464.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>7</td><td>3</td><td>4</td><td>0</td><td>4</td><td>4</td></tr><tr><td>17b46e2dad927836c689d6787ddb3387c6159ece</td><td>geofaces</td><td>GeoFaces</td><td><a href="papers/17b46e2dad927836c689d6787ddb3387c6159ece.html" target="_blank">GeoFaceExplorer: exploring the geo-dependence of facial attributes</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>2</td><td>2</td><td>0</td><td>0</td><td>1</td><td>1</td></tr><tr><td>ad01687649d95cd5b56d7399a9603c4b8e2217d7</td><td>mrp_drone</td><td>MRP Drone</td><td><a href="papers/ad01687649d95cd5b56d7399a9603c4b8e2217d7.html" target="_blank">Investigating Open-World Person Re-identification Using a Drone</a></td><td><a href="https://pdfs.semanticscholar.org/ad01/687649d95cd5b56d7399a9603c4b8e2217d7.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>40%</td><td>5</td><td>2</td><td>3</td><td>0</td><td>3</td><td>2</td></tr><tr><td>f6c8d5e35d7e4d60a0104f233ac1a3ab757da53f</td><td>pku</td><td>PKU</td><td><a href="papers/f6c8d5e35d7e4d60a0104f233ac1a3ab757da53f.html" target="_blank">Swiss-System Based Cascade Ranking for Gait-Based Person Re-Identification</a></td><td><a href="https://pdfs.semanticscholar.org/f6c8/d5e35d7e4d60a0104f233ac1a3ab757da53f.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>67%</td><td>3</td><td>2</td><td>1</td><td>0</td><td>1</td><td>2</td></tr><tr><td>9e5378e7b336c89735d3bb15cf67eff96f86d39a</td><td>precarious</td><td>Precarious</td><td><a href="papers/9e5378e7b336c89735d3bb15cf67eff96f86d39a.html" target="_blank">Expecting the Unexpected: Training Detectors for Unusual Pedestrians with Adversarial Imposters</a></td><td><a href="https://arxiv.org/pdf/1703.06283.pdf" target="_blank">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>17%</td><td>12</td><td>2</td><td>10</td><td>1</td><td>11</td><td>1</td></tr><tr><td>d4f1eb008eb80595bcfdac368e23ae9754e1e745</td><td>uccs</td><td>UCCS</td><td><a href="papers/d4f1eb008eb80595bcfdac368e23ae9754e1e745.html" target="_blank">Unconstrained Face Detection and Open-Set Face Recognition Challenge</a></td><td><a href="https://arxiv.org/pdf/1708.02337.pdf" target="_blank">[pdf]</a></td><td>2017 IEEE International Joint Conference on Biometrics (IJCB)</td><td></td><td></td><td></td><td></td><td></td><td>40%</td><td>5</td><td>2</td><td>3</td><td>0</td><td>4</td><td>1</td></tr><tr><td>4d4bb462c9f1d4e4ab1e4aa6a75cc0bc71b38461</td><td>3dddb_unconstrained</td><td>3D Dynamic</td><td><a href="papers/4d4bb462c9f1d4e4ab1e4aa6a75cc0bc71b38461.html" target="_blank">A 3 D Dynamic Database for Unconstrained Face Recognition</a></td><td><a href="https://pdfs.semanticscholar.org/4d4b/b462c9f1d4e4ab1e4aa6a75cc0bc71b38461.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>2</td><td>1</td><td>1</td><td>0</td><td>1</td><td>1</td></tr><tr><td>65355cbb581a219bd7461d48b3afd115263ea760</td><td>complex_activities</td><td>Ongoing Complex Activities</td><td><a href="papers/65355cbb581a219bd7461d48b3afd115263ea760.html" target="_blank">Recognition of ongoing complex activities by sequence prediction over a hierarchical label space</a></td><td><a href="https://scalable.mpi-inf.mpg.de/files/2016/01/main_wacv.pdf" target="_blank">[pdf]</a></td><td>2016 IEEE Winter Conference on Applications of Computer Vision (WACV)</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>2</td><td>1</td><td>1</td><td>0</td><td>2</td><td>0</td></tr><tr><td>3dc3f0b64ef80f573e3a5f96e456e52ee980b877</td><td>georgia_tech_face_database</td><td>Georgia Tech Face</td><td><a href="papers/3dc3f0b64ef80f573e3a5f96e456e52ee980b877.html" target="_blank">MAXIMUM LIKELIHOOD TRAINING OF THE EMBEDDED HMM FOR FACE DETECTION AND RECOGNITION Ara V. Ne an and Monson H. Hayes III Center for Signal and Image Processing School of Electrical and Computer Engineering</a></td><td><a href="https://pdfs.semanticscholar.org/3dc3/f0b64ef80f573e3a5f96e456e52ee980b877.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>33%</td><td>3</td><td>1</td><td>2</td><td>0</td><td>2</td><td>1</td></tr><tr><td>e58dd160a76349d46f881bd6ddbc2921f08d1050</td><td>gfw</td><td>Grouping Face in the Wild</td><td><a href="papers/e58dd160a76349d46f881bd6ddbc2921f08d1050.html" target="_blank">Merge or Not? Learning to Group Faces via Imitation Learning</a></td><td><a href="https://arxiv.org/pdf/1707.03986.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>2</td><td>1</td><td>1</td><td>0</td><td>2</td><td>0</td></tr><tr><td>55c40cbcf49a0225e72d911d762c27bb1c2d14aa</td><td>ifad</td><td>IFAD</td><td><a href="papers/55c40cbcf49a0225e72d911d762c27bb1c2d14aa.html" target="_blank">Indian Face Age Database : A Database for Face Recognition with Age Variation</a></td><td><a href="https://pdfs.semanticscholar.org/55c4/0cbcf49a0225e72d911d762c27bb1c2d14aa.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>2</td><td>1</td><td>1</td><td>0</td><td>2</td><td>0</td></tr><tr><td>9e31e77f9543ab42474ba4e9330676e18c242e72</td><td>imdb_face</td><td>IMDb Face</td><td><a href="papers/9e31e77f9543ab42474ba4e9330676e18c242e72.html" target="_blank">The Devil of Face Recognition is in the Noise</a></td><td><a href="https://arxiv.org/pdf/1807.11649.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Nanyang Technological University</td><td>Singapore</td><td>1.34841040</td><td>103.68297965</td><td>20%</td><td>5</td><td>1</td><td>4</td><td>0</td><td>3</td><td>1</td></tr><tr><td>d80a3d1f3a438e02a6685e66ee908446766fefa9</td><td>megaage</td><td>MegaAge</td><td><a href="papers/d80a3d1f3a438e02a6685e66ee908446766fefa9.html" target="_blank">Quantifying Facial Age by Posterior of Age Comparisons</a></td><td><a href="https://arxiv.org/pdf/1708.09687.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>25%</td><td>4</td><td>1</td><td>3</td><td>1</td><td>4</td><td>0</td></tr><tr><td>578d4ad74818086bb64f182f72e2c8bd31e3d426</td><td>mr2</td><td>MR2</td><td><a href="papers/578d4ad74818086bb64f182f72e2c8bd31e3d426.html" target="_blank">The MR2: A multi-racial, mega-resolution database of facial stimuli.</a></td><td><a href="https://pdfs.semanticscholar.org/be5b/455abd379240460d022a0e246615b0b86c14.pdf" target="_blank">[pdf]</a></td><td>Behavior research methods</td><td></td><td></td><td></td><td></td><td></td><td>14%</td><td>7</td><td>1</td><td>6</td><td>0</td><td>7</td><td>0</td></tr><tr><td>c06b13d0ec3f5c43e2782cd22542588e233733c3</td><td>nova_emotions</td><td>Novaemötions Dataset</td><td><a href="papers/c06b13d0ec3f5c43e2782cd22542588e233733c3.html" target="_blank">Crowdsourcing facial expressions for affective-interaction</a></td><td><span class="gray">[pdf]</a></td><td>Computer Vision and Image Understanding</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>1</td><td>1</td><td>0</td><td>0</td><td>1</td><td>0</td></tr><tr><td>922e0a51a3b8c67c4c6ac09a577ff674cbd28b34</td><td>v47</td><td>V47</td><td><a href="papers/922e0a51a3b8c67c4c6ac09a577ff674cbd28b34.html" target="_blank">Re-identification of pedestrians with variable occlusion and scale</a></td><td><span class="gray">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td>edu</td><td>Kingston University</td><td>United Kingdom</td><td>51.42930860</td><td>-0.26840440</td><td>10%</td><td>10</td><td>1</td><td>9</td><td>2</td><td>6</td><td>4</td></tr><tr><td>5ad4e9f947c1653c247d418f05dad758a3f9277b</td><td>wlfdb</td><td>WLFDB</td><td><a href="papers/5ad4e9f947c1653c247d418f05dad758a3f9277b.html" target="_blank">WLFDB: Weakly Labeled Face Databases</a></td><td><a href="https://pdfs.semanticscholar.org/5ad4/e9f947c1653c247d418f05dad758a3f9277b.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>1</td><td>1</td><td>0</td><td>0</td><td>0</td><td>1</td></tr><tr><td>9696ad8b164f5e10fcfe23aacf74bd6168aebb15</td><td>4dfab</td><td>4DFAB</td><td><a href="papers/9696ad8b164f5e10fcfe23aacf74bd6168aebb15.html" target="_blank">4DFAB: A Large Scale 4D Facial Expression Database for Biometric Applications</a></td><td><a href="https://arxiv.org/pdf/1712.01443.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>4</td><td>0</td><td>4</td><td>0</td><td>2</td><td>2</td></tr><tr><td>f0e17f27f029db4ad650ff278fe3c10ecb6cb0c4</td><td>europersons</td><td>EuroCity Persons</td><td><a href="papers/f0e17f27f029db4ad650ff278fe3c10ecb6cb0c4.html" target="_blank">The EuroCity Persons Dataset: A Novel Benchmark for Object Detection</a></td><td><a href="https://arxiv.org/pdf/1805.07193.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>1</td><td>0</td><td>1</td><td>0</td><td>1</td><td>0</td></tr><tr><td>670637d0303a863c1548d5b19f705860a23e285c</td><td>face_tracer</td><td>FaceTracer</td><td><a href="papers/670637d0303a863c1548d5b19f705860a23e285c.html" target="_blank">Face swapping: automatically replacing faces in photographs</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>75da1df4ed319926c544eefe17ec8d720feef8c0</td><td>fddb</td><td>FDDB</td><td><a href="papers/75da1df4ed319926c544eefe17ec8d720feef8c0.html" target="_blank">FDDB: A benchmark for face detection in unconstrained settings</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>1</td><td>0</td><td>1</td><td>0</td><td>0</td><td>1</td></tr><tr><td>bd88bb2e4f351352d88ee7375af834360e223498</td><td>hda_plus</td><td>HDA+</td><td><a href="papers/bd88bb2e4f351352d88ee7375af834360e223498.html" target="_blank">HDA dataset-DRAFT 1 A Multi-camera video data set for research on High-Definition surveillance</a></td><td><a href="https://pdfs.semanticscholar.org/bd88/bb2e4f351352d88ee7375af834360e223498.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>2</td><td>0</td><td>2</td><td>0</td><td>1</td><td>2</td></tr><tr><td>2b89de1d81cee50552f10e26e865df3365e9bc88</td><td>ibm_dif</td><td>IBM Diversity in Faces</td><td><a href="papers/2b89de1d81cee50552f10e26e865df3365e9bc88.html" target="_blank">Diversity in Faces</a></td><td><a href="https://arxiv.org/pdf/1901.10436.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>066d71fcd997033dce4ca58df924397dfe0b5fd1</td><td>ifdb</td><td>IFDB</td><td><a href="papers/066d71fcd997033dce4ca58df924397dfe0b5fd1.html" target="_blank">Iranian Face Database and Evaluation with a New Detection Algorithm</a></td><td><a href="https://pdfs.semanticscholar.org/066d/71fcd997033dce4ca58df924397dfe0b5fd1.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>22909dd19a0ec3b6065334cb5be5392cb24d839d</td><td>pets</td><td>PETS 2017</td><td><a href="papers/22909dd19a0ec3b6065334cb5be5392cb24d839d.html" target="_blank">PETS 2017: Dataset and Challenge</a></td><td><a href="http://tahirnawaz.com/papers/2017_CVPRW_PETS2017Dataset_Luis_Nawaz_Cane_Ferryman.pdf" target="_blank">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>8</td><td>0</td><td>8</td><td>0</td><td>2</td><td>6</td></tr><tr><td>c866a2afc871910e3282fd9498dce4ab20f6a332</td><td>qmul_surv_face</td><td>QMUL-SurvFace</td><td><a href="papers/c866a2afc871910e3282fd9498dce4ab20f6a332.html" target="_blank">Surveillance Face Recognition Challenge</a></td><td><a href="https://arxiv.org/pdf/1804.09691.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>d3200d49a19a4a4e4e9745ee39649b65d80c834b</td><td>scut_head</td><td>SCUT HEAD</td><td><a href="papers/d3200d49a19a4a4e4e9745ee39649b65d80c834b.html" target="_blank">Detecting Heads using Feature Refine Net and Cascaded Multi-scale Architecture</a></td><td><a href="https://arxiv.org/pdf/1803.09256.pdf" target="_blank">[pdf]</a></td><td>2018 24th International Conference on Pattern Recognition (ICPR)</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>d3f5a1848b0028d8ab51d0b0673732cad2e3c8c9</td><td>stair_actions</td><td>STAIR Action</td><td><a href="papers/d3f5a1848b0028d8ab51d0b0673732cad2e3c8c9.html" target="_blank">STAIR Actions: A Video Dataset of Everyday Home Actions</a></td><td><a href="https://arxiv.org/pdf/1804.04326.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>8990cdce3f917dad622e43e033db686b354d057c</td><td>tiny_faces</td><td>TinyFace</td><td><a href="papers/8990cdce3f917dad622e43e033db686b354d057c.html" target="_blank">Low-Resolution Face Recognition</a></td><td><a href="https://arxiv.org/pdf/1811.08965.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>377f2b65e6a9300448bdccf678cde59449ecd337</td><td>ufdd</td><td>UFDD</td><td><a href="papers/377f2b65e6a9300448bdccf678cde59449ecd337.html" target="_blank">Pushing the Limits of Unconstrained Face Detection: a Challenge Dataset and Baseline Results</a></td><td><a href="https://arxiv.org/pdf/1804.10275.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>2</td><td>0</td><td>2</td><td>0</td><td>2</td><td>0</td></tr><tr><td>9b9bf5e623cb8af7407d2d2d857bc3f1b531c182</td><td>who_goes_there</td><td>WGT</td><td><a href="papers/9b9bf5e623cb8af7407d2d2d857bc3f1b531c182.html" target="_blank">Who goes there?: approaches to mapping facial appearance diversity</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td>edu</td><td>University of Kentucky</td><td>United States</td><td>38.03337420</td><td>-84.50177580</td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>77c81c13a110a341c140995bedb98101b9e84f7f</td><td>wildtrack</td><td>WildTrack</td><td><a href="papers/77c81c13a110a341c140995bedb98101b9e84f7f.html" target="_blank">WILDTRACK : A Multi-camera HD Dataset for Dense Unscripted Pedestrian Detection</a></td><td><a href="https://pdfs.semanticscholar.org/fe1c/ec4e4995b8615855572374ae3efc94949105.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr></table></body></html>
\ No newline at end of file |
