diff options
| author | Jules Laplace <julescarbon@gmail.com> | 2018-12-16 16:29:04 +0100 |
|---|---|---|
| committer | Jules Laplace <julescarbon@gmail.com> | 2018-12-16 16:29:04 +0100 |
| commit | 05fc975a313aa38483d904cb9ad07a029641d086 (patch) | |
| tree | 85f3d4fdf3688c2779d3ca3ba9c59910a48d1df9 /scraper | |
| parent | 110f3a34f1f36d0ea999d4aa34bbe66d5f2a01da (diff) | |
rebuild
Diffstat (limited to 'scraper')
| -rw-r--r-- | scraper/reports/report_coverage.html | 2 | ||||
| -rw-r--r-- | scraper/reports/report_index.html | 2 | ||||
| -rw-r--r-- | scraper/s2-citation-report.py | 67 | ||||
| -rw-r--r-- | scraper/s2-search.py | 20 | ||||
| -rw-r--r-- | scraper/util.py | 2 |
5 files changed, 76 insertions, 17 deletions
diff --git a/scraper/reports/report_coverage.html b/scraper/reports/report_coverage.html index 41716aaa..0f6d7c55 100644 --- a/scraper/reports/report_coverage.html +++ b/scraper/reports/report_coverage.html @@ -1 +1 @@ -<!doctype html><html><head><meta charset='utf-8'><title>Coverage</title><link rel='stylesheet' href='reports.css'></head><body><h2>Coverage</h2><table border='1' cellpadding='3' cellspacing='3'><th>Paper ID</th><th>Megapixels Key</th><th>Megapixels Name</th><th>Report Link</th><th>PDF Link</th><th>Journal</th><th>Type</th><th>Address</th><th>Lat</th><th>Lng</th><th>Coverage</th><th>Total Citations</th><th>Geocoded Citations</th><th>Unknown Citations</th><th>Empty Citations</th><th>With PDF</th><th>With DOI</th><tr><td>b5f2846a506fc417e7da43f6a7679146d99c5e96</td><td>ucf_101</td><td>UCF101</td><td><a href="papers/b5f2846a506fc417e7da43f6a7679146d99c5e96.html">UCF101: A Dataset of 101 Human Actions Classes From Videos in The Wild</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1212.0402.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td>edu</td><td>University of Central Florida</td><td>28.59899755</td><td>-81.19712501</td><td>54%</td><td>999</td><td>535</td><td>464</td><td>73</td><td>708</td><td>212</td></tr><tr><td>0e986f51fe45b00633de9fd0c94d082d2be51406</td><td>afw</td><td>AFW</td><td><a href="papers/0e986f51fe45b00633de9fd0c94d082d2be51406.html">Face detection, pose estimation, and landmark localization in the wild</a></td><td><a href="http://vision.ics.uci.edu/papers/ZhuR_CVPR_2012/ZhuR_CVPR_2012.pdf">[pdf]</a></td><td>2012 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>University of California, Irvine</td><td>33.64319010</td><td>-117.84016494</td><td>52%</td><td>999</td><td>521</td><td>478</td><td>59</td><td>607</td><td>273</td></tr><tr><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td><td>3dddb_unconstrained</td><td>3D Dynamic</td><td><a href="papers/370b5757a5379b15e30d619e4d3fb9e8e13f3256.html">Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments</a></td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>999</td><td>472</td><td>526</td><td>71</td><td>619</td><td>260</td></tr><tr><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td><td>ar_facedb</td><td>AR Face</td><td><a href="papers/370b5757a5379b15e30d619e4d3fb9e8e13f3256.html">Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments</a></td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>999</td><td>472</td><td>526</td><td>71</td><td>619</td><td>260</td></tr><tr><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td><td>lfw</td><td>LFW</td><td><a href="papers/370b5757a5379b15e30d619e4d3fb9e8e13f3256.html">Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments</a></td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>999</td><td>472</td><td>526</td><td>71</td><td>619</td><td>260</td></tr><tr><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td><td>m2vtsdb_extended</td><td>xm2vtsdb</td><td><a href="papers/370b5757a5379b15e30d619e4d3fb9e8e13f3256.html">Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments</a></td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>999</td><td>472</td><td>526</td><td>71</td><td>619</td><td>260</td></tr><tr><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td><td>put_face</td><td>Put Face</td><td><a href="papers/370b5757a5379b15e30d619e4d3fb9e8e13f3256.html">Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments</a></td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>999</td><td>472</td><td>526</td><td>71</td><td>619</td><td>260</td></tr><tr><td>759a3b3821d9f0e08e0b0a62c8b693230afc3f8d</td><td>pubfig</td><td>PubFig</td><td><a href="papers/759a3b3821d9f0e08e0b0a62c8b693230afc3f8d.html">Attribute and simile classifiers for face verification</a></td><td><a href="http://homes.cs.washington.edu/~neeraj/projects/faceverification/base/papers/nk_iccv2009_attrs.pdf">[pdf]</a></td><td>2009 IEEE 12th International Conference on Computer Vision</td><td>edu</td><td>Columbia University</td><td>40.84198360</td><td>-73.94368971</td><td>51%</td><td>894</td><td>454</td><td>440</td><td>55</td><td>587</td><td>222</td></tr><tr><td>18c72175ddbb7d5956d180b65a96005c100f6014</td><td>yale_faces</td><td>YaleFaces</td><td><a href="papers/18c72175ddbb7d5956d180b65a96005c100f6014.html">From Few to Many: Illumination Cone Models for Face Recognition under Variable Lighting and Pose</a></td><td><a href="http://pdfs.semanticscholar.org/97bb/c2b439a79d4dc0dc7199d71ed96ad5e3fd0e.pdf">[pdf]</a></td><td>IEEE Trans. Pattern Anal. Mach. Intell.</td><td></td><td></td><td></td><td></td><td>42%</td><td>999</td><td>422</td><td>577</td><td>80</td><td>538</td><td>295</td></tr><tr><td>4d9a02d080636e9666c4d1cc438b9893391ec6c7</td><td>cohn_kanade_plus</td><td>CK+</td><td><a href="papers/4d9a02d080636e9666c4d1cc438b9893391ec6c7.html">The Extended Cohn-Kanade Dataset (CK+): A complete dataset for action unit and emotion-specified expression</a></td><td><a href="http://www.iainm.com/iainm/Publications_files/2010_The%20Extended.pdf">[pdf]</a></td><td>2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition - Workshops</td><td></td><td></td><td></td><td></td><td>41%</td><td>975</td><td>403</td><td>572</td><td>65</td><td>460</td><td>345</td></tr><tr><td>2e384f057211426ac5922f1b33d2aa8df5d51f57</td><td>a_pascal_yahoo</td><td>aPascal</td><td><a href="papers/2e384f057211426ac5922f1b33d2aa8df5d51f57.html">Describing objects by their attributes</a></td><td><a href="http://www-2.cs.cmu.edu/~dhoiem/publications/cvpr2009_attributes.pdf">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>39%</td><td>999</td><td>393</td><td>606</td><td>71</td><td>727</td><td>73</td></tr><tr><td>162ea969d1929ed180cc6de9f0bf116993ff6e06</td><td>vgg_faces</td><td>VGG Face</td><td><a href="papers/162ea969d1929ed180cc6de9f0bf116993ff6e06.html">Deep Face Recognition</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/f372/ab9b3270d4e4f6a0258c83c2736c3a5c0454.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>39%</td><td>999</td><td>393</td><td>606</td><td>71</td><td>621</td><td>156</td></tr><tr><td>23fc83c8cfff14a16df7ca497661264fc54ed746</td><td>cohn_kanade</td><td>CK</td><td><a href="papers/23fc83c8cfff14a16df7ca497661264fc54ed746.html">Comprehensive Database for Facial Expression Analysis</a></td><td><a href="http://pdfs.semanticscholar.org/23fc/83c8cfff14a16df7ca497661264fc54ed746.pdf">[pdf]</a></td><td></td><td>edu</td><td>Carnegie Mellon University</td><td>37.41021930</td><td>-122.05965487</td><td>38%</td><td>999</td><td>380</td><td>619</td><td>75</td><td>555</td><td>252</td></tr><tr><td>01959ef569f74c286956024866c1d107099199f7</td><td>vqa</td><td>VQA</td><td><a href="papers/01959ef569f74c286956024866c1d107099199f7.html">VQA: Visual Question Answering</a></td><td><a href="http://arxiv.org/pdf/1505.00468v3.pdf">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td>47%</td><td>731</td><td>344</td><td>387</td><td>47</td><td>628</td><td>4</td></tr><tr><td>6424b69f3ff4d35249c0bb7ef912fbc2c86f4ff4</td><td>celeba</td><td>CelebA</td><td><a href="papers/6424b69f3ff4d35249c0bb7ef912fbc2c86f4ff4.html">Deep Learning Face Attributes in the Wild</a></td><td><a href="http://arxiv.org/pdf/1411.7766v2.pdf">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>42%</td><td>808</td><td>340</td><td>468</td><td>69</td><td>666</td><td>50</td></tr><tr><td>4d423acc78273b75134e2afd1777ba6d3a398973</td><td>cmu_pie</td><td>CMU PIE</td><td><a href="papers/4d423acc78273b75134e2afd1777ba6d3a398973.html">International Conference on Automatic Face and Gesture Recognition The CMU Pose , Illumination , and Expression ( PIE ) Database</a></td><td><a href="http://pdfs.semanticscholar.org/4d42/3acc78273b75134e2afd1777ba6d3a398973.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>44%</td><td>742</td><td>330</td><td>412</td><td>61</td><td>410</td><td>232</td></tr><tr><td>abe9f3b91fd26fa1b50cd685c0d20debfb372f73</td><td>voc</td><td>VOC</td><td><a href="papers/abe9f3b91fd26fa1b50cd685c0d20debfb372f73.html">The Pascal Visual Object Classes Challenge: A Retrospective</a></td><td><a href="http://homepages.inf.ed.ac.uk/ckiw/postscript/ijcv_voc14.pdf">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td>32%</td><td>999</td><td>315</td><td>684</td><td>75</td><td>698</td><td>6</td></tr><tr><td>45c31cde87258414f33412b3b12fc5bec7cb3ba9</td><td>jaffe</td><td>JAFFE</td><td><a href="papers/45c31cde87258414f33412b3b12fc5bec7cb3ba9.html">Coding Facial Expressions with Gabor Wavelets</a></td><td><a href="http://pdfs.semanticscholar.org/45c3/1cde87258414f33412b3b12fc5bec7cb3ba9.pdf">[pdf]</a></td><td></td><td>edu</td><td>Kyushu University</td><td>33.59914655</td><td>130.22359848</td><td>36%</td><td>848</td><td>308</td><td>540</td><td>56</td><td>413</td><td>255</td></tr><tr><td>31b58ced31f22eab10bd3ee2d9174e7c14c27c01</td><td>tiny_images</td><td>Tiny Images</td><td><a href="papers/31b58ced31f22eab10bd3ee2d9174e7c14c27c01.html">Nonparametric Object and Scene Recognition</a></td><td><a href="http://pdfs.semanticscholar.org/31b5/8ced31f22eab10bd3ee2d9174e7c14c27c01.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>31%</td><td>999</td><td>305</td><td>694</td><td>93</td><td>670</td><td>9</td></tr><tr><td>140438a77a771a8fb656b39a78ff488066eb6b50</td><td>lfw_p</td><td>LFWP</td><td><a href="papers/140438a77a771a8fb656b39a78ff488066eb6b50.html">Localizing Parts of Faces Using a Consensus of Exemplars</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2011.5995602">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td>edu</td><td>Columbia University</td><td>40.84198360</td><td>-73.94368971</td><td>53%</td><td>521</td><td>274</td><td>247</td><td>40</td><td>321</td><td>144</td></tr><tr><td>18ae7c9a4bbc832b8b14bc4122070d7939f5e00e</td><td>frgc</td><td>FRGC</td><td><a href="papers/18ae7c9a4bbc832b8b14bc4122070d7939f5e00e.html">Overview of the face recognition grand challenge</a></td><td><a href="http://www3.nd.edu/~kwb/PhillipsEtAlCVPR_2005.pdf">[pdf]</a></td><td>2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)</td><td></td><td></td><td></td><td></td><td>25%</td><td>999</td><td>253</td><td>746</td><td>110</td><td>572</td><td>64</td></tr><tr><td>32cde90437ab5a70cf003ea36f66f2de0e24b3ab</td><td>cityscapes</td><td>Cityscapes</td><td><a href="papers/32cde90437ab5a70cf003ea36f66f2de0e24b3ab.html">The Cityscapes Dataset for Semantic Urban Scene Understanding</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1604.01685.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>33%</td><td>771</td><td>252</td><td>519</td><td>54</td><td>622</td><td>0</td></tr><tr><td>32cde90437ab5a70cf003ea36f66f2de0e24b3ab</td><td>cityscapes</td><td>Cityscapes</td><td><a href="papers/32cde90437ab5a70cf003ea36f66f2de0e24b3ab.html">The Cityscapes Dataset for Semantic Urban Scene Understanding</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1604.01685.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>33%</td><td>771</td><td>252</td><td>519</td><td>54</td><td>622</td><td>0</td></tr><tr><td>2ad0ee93d029e790ebb50574f403a09854b65b7e</td><td>yale_faces</td><td>YaleFaces</td><td><a href="papers/2ad0ee93d029e790ebb50574f403a09854b65b7e.html">Acquiring linear subspaces for face recognition under variable lighting</a></td><td><a href="http://vision.cornell.edu/se3/wp-content/uploads/2014/09/pami05.pdf">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>25%</td><td>999</td><td>251</td><td>748</td><td>110</td><td>509</td><td>113</td></tr><tr><td>560e0e58d0059259ddf86fcec1fa7975dee6a868</td><td>youtube_faces</td><td>YouTubeFaces</td><td><a href="papers/560e0e58d0059259ddf86fcec1fa7975dee6a868.html">Face recognition in unconstrained videos with matched background similarity</a></td><td><a href="http://www.cs.tau.ac.il/~wolf/papers/lvfw.pdf">[pdf]</a></td><td>CVPR 2011</td><td>edu</td><td>Open University of Israel</td><td>32.77824165</td><td>34.99565673</td><td>50%</td><td>485</td><td>244</td><td>240</td><td>32</td><td>290</td><td>140</td></tr><tr><td>3607afdb204de9a5a9300ae98aa4635d9effcda2</td><td>sheffield</td><td>Sheffield Face</td><td><a href="papers/3607afdb204de9a5a9300ae98aa4635d9effcda2.html">Face Description with Local Binary Patterns: Application to Face Recognition</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/TPAMI.2006.244">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>24%</td><td>999</td><td>238</td><td>761</td><td>65</td><td>483</td><td>87</td></tr><tr><td>853bd61bc48a431b9b1c7cab10c603830c488e39</td><td>casia_webface</td><td>CASIA Webface</td><td><a href="papers/853bd61bc48a431b9b1c7cab10c603830c488e39.html">Learning Face Representation from Scratch</a></td><td><a href="http://pdfs.semanticscholar.org/b8a2/0ed7e74325da76d7183d1ab77b082a92b447.pdf">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>53%</td><td>436</td><td>233</td><td>203</td><td>32</td><td>284</td><td>115</td></tr><tr><td>2830fb5282de23d7784b4b4bc37065d27839a412</td><td>h3d</td><td>H3D</td><td><a href="papers/2830fb5282de23d7784b4b4bc37065d27839a412.html">Poselets: Body part detectors trained using 3D human pose annotations</a></td><td><a href="http://vision.stanford.edu/teaching/cs231b_spring1213/papers/ICCV09_BourdevMalik.pdf">[pdf]</a></td><td>2009 IEEE 12th International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td>32%</td><td>707</td><td>223</td><td>484</td><td>62</td><td>482</td><td>18</td></tr><tr><td>10d6b12fa07c7c8d6c8c3f42c7f1c061c131d4c5</td><td>inria_person</td><td>INRIA Pedestrian</td><td><a href="papers/10d6b12fa07c7c8d6c8c3f42c7f1c061c131d4c5.html">Histograms of oriented gradients for human detection</a></td><td><a href="http://nichol.as/papers/Dalai/Histograms%20of%20oriented%20gradients%20for%20human%20detection.pdf">[pdf]</a></td><td>2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)</td><td></td><td></td><td></td><td></td><td>22%</td><td>999</td><td>217</td><td>782</td><td>67</td><td>520</td><td>22</td></tr><tr><td>55206f0b5f57ce17358999145506cd01e570358c</td><td>orl</td><td>ORL</td><td><a href="papers/55206f0b5f57ce17358999145506cd01e570358c.html">O M 4 . 1 The Subject Database 4 . 2 Experiment Plan 5 . 1 Varying the Overlap 4 Experimental Setup 5 Parameterisation Results</a></td><td><a href="http://pdfs.semanticscholar.org/5520/6f0b5f57ce17358999145506cd01e570358c.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>21%</td><td>999</td><td>214</td><td>785</td><td>96</td><td>550</td><td>57</td></tr><tr><td>9055b155cbabdce3b98e16e5ac9c0edf00f9552f</td><td>morph</td><td>MORPH Commercial</td><td><a href="papers/9055b155cbabdce3b98e16e5ac9c0edf00f9552f.html">MORPH: a longitudinal image database of normal adult age-progression</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/FGR.2006.78">[pdf]</a></td><td>7th International Conference on Automatic Face and Gesture Recognition (FGR06)</td><td></td><td></td><td></td><td></td><td>46%</td><td>424</td><td>195</td><td>229</td><td>27</td><td>231</td><td>155</td></tr><tr><td>9055b155cbabdce3b98e16e5ac9c0edf00f9552f</td><td>morph_nc</td><td>MORPH Non-Commercial</td><td><a href="papers/9055b155cbabdce3b98e16e5ac9c0edf00f9552f.html">MORPH: a longitudinal image database of normal adult age-progression</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/FGR.2006.78">[pdf]</a></td><td>7th International Conference on Automatic Face and Gesture Recognition (FGR06)</td><td></td><td></td><td></td><td></td><td>46%</td><td>424</td><td>195</td><td>229</td><td>27</td><td>231</td><td>155</td></tr><tr><td>93884e46c49f7ae1c7c34046fbc28882f2bd6341</td><td>kdef</td><td>KDEF</td><td><a href="papers/93884e46c49f7ae1c7c34046fbc28882f2bd6341.html">Gaze fixation and the neural circuitry of face processing in autism</a></td><td><a href="{'url': 'http://doi.org/10.1038/nn1421', 'linkType': 'nature'}">[pdf]</a></td><td>Nature Neuroscience</td><td></td><td></td><td></td><td></td><td>31%</td><td>608</td><td>190</td><td>418</td><td>92</td><td>463</td><td>0</td></tr><tr><td>8a3c5507237957d013a0fe0f082cab7f757af6ee</td><td>mafl</td><td>MAFL</td><td><a href="papers/8a3c5507237957d013a0fe0f082cab7f757af6ee.html">Facial Landmark Detection by Deep Multi-task Learning</a></td><td><a href="http://pdfs.semanticscholar.org/fcd7/1c18192928a2e0b264edd4d919ab2f8f652a.pdf">[pdf]</a></td><td></td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>48%</td><td>383</td><td>182</td><td>201</td><td>25</td><td>259</td><td>60</td></tr><tr><td>8a3c5507237957d013a0fe0f082cab7f757af6ee</td><td>mtfl</td><td>MTFL</td><td><a href="papers/8a3c5507237957d013a0fe0f082cab7f757af6ee.html">Facial Landmark Detection by Deep Multi-task Learning</a></td><td><a href="http://pdfs.semanticscholar.org/fcd7/1c18192928a2e0b264edd4d919ab2f8f652a.pdf">[pdf]</a></td><td></td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>48%</td><td>383</td><td>182</td><td>201</td><td>25</td><td>259</td><td>60</td></tr><tr><td>6bd36e9fd0ef20a3074e1430a6cc601e6d407fc3</td><td>cuhk03</td><td>CUHK03</td><td><a href="papers/6bd36e9fd0ef20a3074e1430a6cc601e6d407fc3.html">DeepReID: Deep Filter Pairing Neural Network for Person Re-identification</a></td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Li_DeepReID_Deep_Filter_2014_CVPR_paper.pdf">[pdf]</a></td><td>2014 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>35%</td><td>512</td><td>180</td><td>332</td><td>29</td><td>323</td><td>4</td></tr><tr><td>3325860c0c82a93b2eac654f5324dd6a776f609e</td><td>mpii_human_pose</td><td>MPII Human Pose</td><td><a href="papers/3325860c0c82a93b2eac654f5324dd6a776f609e.html">2D Human Pose Estimation: New Benchmark and State of the Art Analysis</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6909866', 'linkType': 'ieee'}">[pdf]</a></td><td>2014 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>50%</td><td>356</td><td>179</td><td>177</td><td>21</td><td>299</td><td>3</td></tr><tr><td>95f12d27c3b4914e0668a268360948bce92f7db3</td><td>helen</td><td>Helen</td><td><a href="papers/95f12d27c3b4914e0668a268360948bce92f7db3.html">Interactive Facial Feature Localization</a></td><td><a href="http://pdfs.semanticscholar.org/95f1/2d27c3b4914e0668a268360948bce92f7db3.pdf">[pdf]</a></td><td></td><td>edu</td><td>University of Illinois, Urbana-Champaign</td><td>40.11116745</td><td>-88.22587665</td><td>52%</td><td>339</td><td>177</td><td>162</td><td>27</td><td>208</td><td>100</td></tr><tr><td>2724ba85ec4a66de18da33925e537f3902f21249</td><td>cofw</td><td>COFW</td><td><a href="papers/2724ba85ec4a66de18da33925e537f3902f21249.html">Robust Face Landmark Estimation under Occlusion</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6751298', 'linkType': 'ieee'}">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td>55%</td><td>305</td><td>167</td><td>138</td><td>16</td><td>186</td><td>95</td></tr><tr><td>044d9a8c61383312cdafbcc44b9d00d650b21c70</td><td>fiw_300</td><td>300-W</td><td><a href="papers/044d9a8c61383312cdafbcc44b9d00d650b21c70.html">300 Faces in-the-Wild Challenge: The First Facial Landmark Localization Challenge</a></td><td><a href="https://ibug.doc.ic.ac.uk/media/uploads/documents/sagonas_iccv_2013_300_w.pdf">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision Workshops</td><td>edu</td><td>University of Twente</td><td>52.23801390</td><td>6.85667610</td><td>56%</td><td>285</td><td>159</td><td>125</td><td>28</td><td>188</td><td>82</td></tr><tr><td>6273b3491e94ea4dd1ce42b791d77bdc96ee73a8</td><td>viper</td><td>VIPeR</td><td><a href="papers/6273b3491e94ea4dd1ce42b791d77bdc96ee73a8.html">Evaluating Appearance Models for Recognition, Reacquisition, and Tracking</a></td><td><a href="http://pdfs.semanticscholar.org/6273/b3491e94ea4dd1ce42b791d77bdc96ee73a8.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>27%</td><td>584</td><td>159</td><td>425</td><td>38</td><td>336</td><td>9</td></tr><tr><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td><td>aflw</td><td>AFLW</td><td><a href="papers/a74251efa970b92925b89eeef50a5e37d9281ad0.html">Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</a></td><td><a href="http://lrs.icg.tugraz.at/pubs/koestinger_befit_11.pdf">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td>53%</td><td>292</td><td>155</td><td>137</td><td>38</td><td>207</td><td>59</td></tr><tr><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td><td>imm_face</td><td>IMM Face Dataset</td><td><a href="papers/a74251efa970b92925b89eeef50a5e37d9281ad0.html">Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</a></td><td><a href="http://lrs.icg.tugraz.at/pubs/koestinger_befit_11.pdf">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td>53%</td><td>292</td><td>155</td><td>137</td><td>38</td><td>207</td><td>59</td></tr><tr><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td><td>muct</td><td>MUCT</td><td><a href="papers/a74251efa970b92925b89eeef50a5e37d9281ad0.html">Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</a></td><td><a href="http://lrs.icg.tugraz.at/pubs/koestinger_befit_11.pdf">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td>53%</td><td>292</td><td>155</td><td>137</td><td>38</td><td>207</td><td>59</td></tr><tr><td>4308bd8c28e37e2ed9a3fcfe74d5436cce34b410</td><td>market_1501</td><td>Market 1501</td><td><a href="papers/4308bd8c28e37e2ed9a3fcfe74d5436cce34b410.html">Scalable Person Re-identification: A Benchmark</a></td><td><a href="https://www.microsoft.com/en-us/research/wp-content/uploads/2017/01/ICCV15-ReIDDataset.pdf">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td>38%</td><td>394</td><td>149</td><td>245</td><td>18</td><td>271</td><td>3</td></tr><tr><td>2a75f34663a60ab1b04a0049ed1d14335129e908</td><td>mmi_facial_expression</td><td>MMI Facial Expression Dataset</td><td><a href="papers/2a75f34663a60ab1b04a0049ed1d14335129e908.html">Web-based database for facial expression analysis</a></td><td><a href="http://ibug.doc.ic.ac.uk/media/uploads/documents/PanticEtAl-ICME2005-final.pdf">[pdf]</a></td><td>2005 IEEE International Conference on Multimedia and Expo</td><td></td><td></td><td></td><td></td><td>32%</td><td>440</td><td>142</td><td>298</td><td>44</td><td>258</td><td>82</td></tr><tr><td>639937b3a1b8bded3f7e9a40e85bd3770016cf3c</td><td>bfm</td><td>BFM</td><td><a href="papers/639937b3a1b8bded3f7e9a40e85bd3770016cf3c.html">A 3D Face Model for Pose and Illumination Invariant Face Recognition</a></td><td><a href="https://pdfs.semanticscholar.org/6399/37b3a1b8bded3f7e9a40e85bd3770016cf3c.pdf">[pdf]</a></td><td>2009 Sixth IEEE International Conference on Advanced Video and Signal Based Surveillance</td><td></td><td></td><td></td><td></td><td>41%</td><td>323</td><td>131</td><td>192</td><td>29</td><td>221</td><td>25</td></tr><tr><td>cc589c499dcf323fe4a143bbef0074c3e31f9b60</td><td>bu_3dfe</td><td>BU-3DFE</td><td><a href="papers/cc589c499dcf323fe4a143bbef0074c3e31f9b60.html">A 3D facial expression database for facial behavior research</a></td><td><a href="http://www.cs.binghamton.edu/~lijun/Research/3DFE/Yin_FGR06_a.pdf">[pdf]</a></td><td>7th International Conference on Automatic Face and Gesture Recognition (FGR06)</td><td>edu</td><td>SUNY Binghamton</td><td>42.08779975</td><td>-75.97066066</td><td>24%</td><td>555</td><td>131</td><td>424</td><td>47</td><td>283</td><td>48</td></tr><tr><td>696ca58d93f6404fea0fc75c62d1d7b378f47628</td><td>coco</td><td>COCO</td><td><a href="papers/696ca58d93f6404fea0fc75c62d1d7b378f47628.html">Microsoft COCO Captions: Data Collection and Evaluation Server</a></td><td><a href="http://pdfs.semanticscholar.org/ba95/81c33a7eebe87c50e61763e4c8d1723538f2.pdf">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>46%</td><td>283</td><td>129</td><td>154</td><td>16</td><td>231</td><td>4</td></tr><tr><td>4053e3423fb70ad9140ca89351df49675197196a</td><td>bio_id</td><td>BioID Face</td><td><a href="papers/4053e3423fb70ad9140ca89351df49675197196a.html">Robust Face Detection Using the Hausdorff Distance</a></td><td><a href="http://pdfs.semanticscholar.org/4053/e3423fb70ad9140ca89351df49675197196a.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>26%</td><td>498</td><td>127</td><td>371</td><td>55</td><td>319</td><td>32</td></tr><tr><td>3765df816dc5a061bc261e190acc8bdd9d47bec0</td><td>rafd</td><td>RaFD</td><td><a href="papers/3765df816dc5a061bc261e190acc8bdd9d47bec0.html">Presentation and validation of the Radboud Faces Database</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/3765/df816dc5a061bc261e190acc8bdd9d47bec0.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>28%</td><td>446</td><td>127</td><td>319</td><td>43</td><td>307</td><td>19</td></tr><tr><td>2fda164863a06a92d3a910b96eef927269aeb730</td><td>names_and_faces_news</td><td>News Dataset</td><td><a href="papers/2fda164863a06a92d3a910b96eef927269aeb730.html">Names and faces in the news</a></td><td><a href="http://www.cs.utexas.edu/~grauman/courses/spring2007/395T/papers/berg_names_and_faces.pdf">[pdf]</a></td><td>Proceedings of the 2004 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2004. CVPR 2004.</td><td></td><td></td><td></td><td></td><td>41%</td><td>294</td><td>120</td><td>174</td><td>24</td><td>207</td><td>45</td></tr><tr><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td><td>leeds_sports_pose</td><td>Leeds Sports Pose</td><td><a href="papers/4b1d23d17476fcf78f4cbadf69fb130b1aa627c0.html">Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</a></td><td><a href="http://pdfs.semanticscholar.org/4b1d/23d17476fcf78f4cbadf69fb130b1aa627c0.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>278</td><td>119</td><td>159</td><td>12</td><td>198</td><td>2</td></tr><tr><td>2258e01865367018ed6f4262c880df85b94959f8</td><td>mot</td><td>MOT</td><td><a href="papers/2258e01865367018ed6f4262c880df85b94959f8.html">Evaluating Multiple Object Tracking Performance: The CLEAR MOT Metrics</a></td><td><a href="http://pdfs.semanticscholar.org/2e0b/00f4043e2d4b04c59c88bb54bcd907d0dcd4.pdf">[pdf]</a></td><td>EURASIP J. Image and Video Processing</td><td></td><td></td><td></td><td></td><td>20%</td><td>586</td><td>119</td><td>467</td><td>48</td><td>336</td><td>3</td></tr><tr><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td><td>stickmen_buffy</td><td>Buffy Stickmen</td><td><a href="papers/4b1d23d17476fcf78f4cbadf69fb130b1aa627c0.html">Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</a></td><td><a href="http://pdfs.semanticscholar.org/4b1d/23d17476fcf78f4cbadf69fb130b1aa627c0.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>278</td><td>119</td><td>159</td><td>12</td><td>198</td><td>2</td></tr><tr><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td><a href="papers/4b1d23d17476fcf78f4cbadf69fb130b1aa627c0.html">Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</a></td><td><a href="http://pdfs.semanticscholar.org/4b1d/23d17476fcf78f4cbadf69fb130b1aa627c0.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>278</td><td>119</td><td>159</td><td>12</td><td>198</td><td>2</td></tr><tr><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td><td>stickmen_buffy</td><td>Buffy Stickmen</td><td><a href="papers/6dd0597f8513dc100cd0bc1b493768cde45098a9.html">Learning to parse images of articulated bodies</a></td><td><a href="http://pdfs.semanticscholar.org/9cd7/4c43dbf9be0b9caae4606ee53e6d45850471.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>31%</td><td>373</td><td>117</td><td>256</td><td>30</td><td>238</td><td>2</td></tr><tr><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td><a href="papers/6dd0597f8513dc100cd0bc1b493768cde45098a9.html">Learning to parse images of articulated bodies</a></td><td><a href="http://pdfs.semanticscholar.org/9cd7/4c43dbf9be0b9caae4606ee53e6d45850471.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>31%</td><td>373</td><td>117</td><td>256</td><td>30</td><td>238</td><td>2</td></tr><tr><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td><td>tud_campus</td><td>TUD-Campus</td><td><a href="papers/3316521a5527c7700af8ae6aef32a79a8b83672c.html">People-tracking-by-detection and people-detection-by-tracking</a></td><td><a href="http://mplab.ucsd.edu/wp-content/uploads/CVPR2008/Conference/data/papers/243.pdf">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>22%</td><td>529</td><td>116</td><td>413</td><td>41</td><td>316</td><td>1</td></tr><tr><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td><td>tud_crossing</td><td>TUD-Crossing</td><td><a href="papers/3316521a5527c7700af8ae6aef32a79a8b83672c.html">People-tracking-by-detection and people-detection-by-tracking</a></td><td><a href="http://mplab.ucsd.edu/wp-content/uploads/CVPR2008/Conference/data/papers/243.pdf">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>22%</td><td>529</td><td>116</td><td>413</td><td>41</td><td>316</td><td>1</td></tr><tr><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td><td>tud_pedestrian</td><td>TUD-Pedestrian</td><td><a href="papers/3316521a5527c7700af8ae6aef32a79a8b83672c.html">People-tracking-by-detection and people-detection-by-tracking</a></td><td><a href="http://mplab.ucsd.edu/wp-content/uploads/CVPR2008/Conference/data/papers/243.pdf">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>22%</td><td>529</td><td>116</td><td>413</td><td>41</td><td>316</td><td>1</td></tr><tr><td>0f0fcf041559703998abf310e56f8a2f90ee6f21</td><td>feret</td><td>FERET</td><td><a href="papers/0f0fcf041559703998abf310e56f8a2f90ee6f21.html">The FERET Evaluation Methodology for Face-Recognition Algorithms</a></td><td><a href="http://pdfs.semanticscholar.org/0f0f/cf041559703998abf310e56f8a2f90ee6f21.pdf">[pdf]</a></td><td>IEEE Trans. Pattern Anal. Mach. Intell.</td><td></td><td></td><td></td><td></td><td>11%</td><td>999</td><td>109</td><td>259</td><td>32</td><td>213</td><td>51</td></tr><tr><td>140c95e53c619eac594d70f6369f518adfea12ef</td><td>ijb_a</td><td>IJB-A</td><td><a href="papers/140c95e53c619eac594d70f6369f518adfea12ef.html">Pushing the frontiers of unconstrained face detection and recognition: IARPA Janus Benchmark A</a></td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/1B_089_ext.pdf">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>48%</td><td>222</td><td>107</td><td>115</td><td>21</td><td>158</td><td>48</td></tr><tr><td>4c170a0dcc8de75587dae21ca508dab2f9343974</td><td>face_tracer</td><td>FaceTracer</td><td><a href="papers/4c170a0dcc8de75587dae21ca508dab2f9343974.html">FaceTracer: A Search Engine for Large Collections of Images with Faces</a></td><td><a href="http://pdfs.semanticscholar.org/73a8/1d311eedac8dea3ca24dc15b6990fa4a725e.pdf">[pdf]</a></td><td></td><td>edu</td><td>Columbia University</td><td>40.84198360</td><td>-73.94368971</td><td>48%</td><td>218</td><td>105</td><td>113</td><td>17</td><td>146</td><td>52</td></tr><tr><td>28312c3a47c1be3a67365700744d3d6665b86f22</td><td>hrt_transgender</td><td>HRT Transgender</td><td><a href="papers/28312c3a47c1be3a67365700744d3d6665b86f22.html">Face Recognition: A Literature Survey1</a></td><td><a href="http://pdfs.semanticscholar.org/2831/2c3a47c1be3a67365700744d3d6665b86f22.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>11%</td><td>999</td><td>105</td><td>265</td><td>32</td><td>217</td><td>39</td></tr><tr><td>013909077ad843eb6df7a3e8e290cfd5575999d2</td><td>fiw_300</td><td>300-W</td><td><a href="papers/013909077ad843eb6df7a3e8e290cfd5575999d2.html">A Semi-automatic Methodology for Facial Landmark Annotation</a></td><td><a href="http://ibug.doc.ic.ac.uk/media/uploads/documents/sagonas_cvpr_2013_amfg_w.pdf">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition Workshops</td><td>edu</td><td>University of Twente</td><td>52.23801390</td><td>6.85667610</td><td>59%</td><td>169</td><td>100</td><td>69</td><td>14</td><td>112</td><td>49</td></tr><tr><td>7808937b46acad36e43c30ae4e9f3fd57462853d</td><td>berkeley_pose</td><td>BPAD</td><td><a href="papers/7808937b46acad36e43c30ae4e9f3fd57462853d.html">Describing people: A poselet-based approach to attribute classification</a></td><td><a href="http://ttic.uchicago.edu/~smaji/papers/attributes-iccv11.pdf">[pdf]</a></td><td>2011 International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td>43%</td><td>221</td><td>96</td><td>125</td><td>14</td><td>160</td><td>23</td></tr><tr><td>f72f6a45ee240cc99296a287ff725aaa7e7ebb35</td><td>caltech_pedestrians</td><td>Caltech Pedestrians</td><td><a href="papers/f72f6a45ee240cc99296a287ff725aaa7e7ebb35.html">Pedestrian Detection: An Evaluation of the State of the Art</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5975165', 'linkType': 'ieee'}">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>10%</td><td>999</td><td>96</td><td>355</td><td>26</td><td>252</td><td>5</td></tr><tr><td>e8de844fefd54541b71c9823416daa238be65546</td><td>visual_phrases</td><td>Phrasal Recognition</td><td><a href="papers/e8de844fefd54541b71c9823416daa238be65546.html">Recognition using visual phrases</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995711', 'linkType': 'ieee'}">[pdf]</a></td><td>CVPR 2011</td><td></td><td></td><td></td><td></td><td>41%</td><td>233</td><td>95</td><td>138</td><td>18</td><td>174</td><td>5</td></tr><tr><td>16c7c31a7553d99f1837fc6e88e77b5ccbb346b8</td><td>prid</td><td>PRID</td><td><a href="papers/16c7c31a7553d99f1837fc6e88e77b5ccbb346b8.html">Person Re-identification by Descriptive and Discriminative Classification</a></td><td><a href="http://pdfs.semanticscholar.org/4c1b/f0592be3e535faf256c95e27982db9b3d3d3.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>27%</td><td>352</td><td>94</td><td>258</td><td>26</td><td>195</td><td>3</td></tr><tr><td>1dc35905a1deff8bc74688f2d7e2f48fd2273275</td><td>caltech_pedestrians</td><td>Caltech Pedestrians</td><td><a href="papers/1dc35905a1deff8bc74688f2d7e2f48fd2273275.html">Pedestrian detection: A benchmark</a></td><td><a href="http://vision.ucsd.edu/~pdollar/files/papers/DollarCVPR09peds.pdf">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>17%</td><td>519</td><td>89</td><td>430</td><td>27</td><td>286</td><td>2</td></tr><tr><td>833fa04463d90aab4a9fe2870d480f0b40df446e</td><td>sun_attributes</td><td>SUN</td><td><a href="papers/833fa04463d90aab4a9fe2870d480f0b40df446e.html">SUN attribute database: Discovering, annotating, and recognizing scene attributes</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2012.6247998">[pdf]</a></td><td>2012 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>32%</td><td>260</td><td>84</td><td>85</td><td>18</td><td>143</td><td>10</td></tr><tr><td>35b0331dfcd2897abd5749b49ff5e2b8ba0f7a62</td><td>coco_qa</td><td>COCO QA</td><td><a href="papers/35b0331dfcd2897abd5749b49ff5e2b8ba0f7a62.html">Exploring Models and Data for Image Question Answering</a></td><td><a href="http://pdfs.semanticscholar.org/aa79/9c29c0d44ece1864467af520fe70540c069b.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>191</td><td>83</td><td>108</td><td>12</td><td>163</td><td>1</td></tr><tr><td>291265db88023e92bb8c8e6390438e5da148e8f5</td><td>msceleb</td><td>MsCeleb</td><td><a href="papers/291265db88023e92bb8c8e6390438e5da148e8f5.html">MS-Celeb-1M: A Dataset and Benchmark for Large-Scale Face Recognition</a></td><td><a href="http://pdfs.semanticscholar.org/4603/cb8e05258bb0572ae912ad20903b8f99f4b1.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>167</td><td>83</td><td>84</td><td>15</td><td>131</td><td>27</td></tr><tr><td>13f06b08f371ba8b5d31c3e288b4deb61335b462</td><td>eth_andreas_ess</td><td>ETHZ Pedestrian</td><td><a href="papers/13f06b08f371ba8b5d31c3e288b4deb61335b462.html">Depth and Appearance for Mobile Scene Analysis</a></td><td><a href="http://www.mmp.rwth-aachen.de/publications/pdf/ess-depthandappearance-iccv07.pdf/at_download/file">[pdf]</a></td><td>2007 IEEE 11th International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td>25%</td><td>319</td><td>79</td><td>240</td><td>27</td><td>192</td><td>0</td></tr><tr><td>52d7eb0fbc3522434c13cc247549f74bb9609c5d</td><td>wider_face</td><td>WIDER FACE</td><td><a href="papers/52d7eb0fbc3522434c13cc247549f74bb9609c5d.html">WIDER FACE: A Face Detection Benchmark</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1511.06523.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>53%</td><td>148</td><td>78</td><td>70</td><td>16</td><td>107</td><td>34</td></tr><tr><td>2485c98aa44131d1a2f7d1355b1e372f2bb148ad</td><td>cas_peal</td><td>CAS-PEAL</td><td><a href="papers/2485c98aa44131d1a2f7d1355b1e372f2bb148ad.html">The CAS-PEAL Large-Scale Chinese Face Database and Baseline Evaluations</a></td><td><a href="https://doi.org/10.1109/TSMCA.2007.909557">[pdf]</a></td><td>IEEE Transactions on Systems, Man, and Cybernetics - Part A: Systems and Humans</td><td></td><td></td><td></td><td></td><td>18%</td><td>415</td><td>76</td><td>339</td><td>39</td><td>182</td><td>35</td></tr><tr><td>1aad2da473888cb7ebc1bfaa15bfa0f1502ce005</td><td>jpl_pose</td><td>JPL-Interaction dataset</td><td><a href="papers/1aad2da473888cb7ebc1bfaa15bfa0f1502ce005.html">First-Person Activity Recognition: What Are They Doing to Me?</a></td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2013/papers/Ryoo_First-Person_Activity_Recognition_2013_CVPR_paper.pdf">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>51%</td><td>148</td><td>76</td><td>72</td><td>8</td><td>109</td><td>3</td></tr><tr><td>2485c98aa44131d1a2f7d1355b1e372f2bb148ad</td><td>m2vts</td><td>m2vts</td><td><a href="papers/2485c98aa44131d1a2f7d1355b1e372f2bb148ad.html">The CAS-PEAL Large-Scale Chinese Face Database and Baseline Evaluations</a></td><td><a href="https://doi.org/10.1109/TSMCA.2007.909557">[pdf]</a></td><td>IEEE Transactions on Systems, Man, and Cybernetics - Part A: Systems and Humans</td><td></td><td></td><td></td><td></td><td>18%</td><td>415</td><td>76</td><td>339</td><td>39</td><td>182</td><td>35</td></tr><tr><td>436f798d1a4e54e5947c1e7d7375c31b2bdb4064</td><td>tud_multiview</td><td>TUD-Multiview</td><td><a href="papers/436f798d1a4e54e5947c1e7d7375c31b2bdb4064.html">Monocular 3D pose estimation and tracking by detection</a></td><td><a href="http://lmb.informatik.uni-freiburg.de/lectures/seminar_brox/seminar_ws1011/cvpr10_andriluka.pdf">[pdf]</a></td><td>2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>25%</td><td>302</td><td>76</td><td>226</td><td>32</td><td>199</td><td>1</td></tr><tr><td>436f798d1a4e54e5947c1e7d7375c31b2bdb4064</td><td>tud_stadtmitte</td><td>TUD-Stadtmitte</td><td><a href="papers/436f798d1a4e54e5947c1e7d7375c31b2bdb4064.html">Monocular 3D pose estimation and tracking by detection</a></td><td><a href="http://lmb.informatik.uni-freiburg.de/lectures/seminar_brox/seminar_ws1011/cvpr10_andriluka.pdf">[pdf]</a></td><td>2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>25%</td><td>302</td><td>76</td><td>226</td><td>32</td><td>199</td><td>1</td></tr><tr><td>133f01aec1534604d184d56de866a4bd531dac87</td><td>lfw_a</td><td>LFW-a</td><td><a href="papers/133f01aec1534604d184d56de866a4bd531dac87.html">Effective Unconstrained Face Recognition by Combining Multiple Descriptors and Learned Background Statistics</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/TPAMI.2010.230">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>42%</td><td>177</td><td>75</td><td>102</td><td>15</td><td>102</td><td>54</td></tr><tr><td>1be498d4bbc30c3bfd0029114c784bc2114d67c0</td><td>adience</td><td>Adience</td><td><a href="papers/1be498d4bbc30c3bfd0029114c784bc2114d67c0.html">Age and Gender Estimation of Unfiltered Faces</a></td><td><a href="http://www.openu.ac.il/home/hassner/Adience/EidingerEnbarHassner_tifs.pdf">[pdf]</a></td><td>IEEE Transactions on Information Forensics and Security</td><td></td><td></td><td></td><td></td><td>43%</td><td>168</td><td>72</td><td>96</td><td>7</td><td>89</td><td>53</td></tr><tr><td>21d9d0deed16f0ad62a4865e9acf0686f4f15492</td><td>images_of_groups</td><td>Images of Groups</td><td><a href="papers/21d9d0deed16f0ad62a4865e9acf0686f4f15492.html">Understanding images of groups of people</a></td><td><a href="http://amp.ece.cmu.edu/people/Andy/Andy_files/cvpr09.pdf">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>36%</td><td>202</td><td>72</td><td>130</td><td>12</td><td>126</td><td>24</td></tr><tr><td>18010284894ed0edcca74e5bf768ee2e15ef7841</td><td>deep_fashion</td><td>DeepFashion</td><td><a href="papers/18010284894ed0edcca74e5bf768ee2e15ef7841.html">DeepFashion: Powering Robust Clothes Recognition and Retrieval with Rich Annotations</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7780493', 'linkType': 'ieee'}">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>47%</td><td>150</td><td>71</td><td>79</td><td>4</td><td>111</td><td>8</td></tr><tr><td>4e4746094bf60ee83e40d8597a6191e463b57f76</td><td>leeds_sports_pose_extended</td><td>Leeds Sports Pose Extended</td><td><a href="papers/4e4746094bf60ee83e40d8597a6191e463b57f76.html">Learning effective human pose estimation from inaccurate annotation</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995318', 'linkType': 'ieee'}">[pdf]</a></td><td>CVPR 2011</td><td></td><td></td><td></td><td></td><td>40%</td><td>173</td><td>70</td><td>103</td><td>9</td><td>116</td><td>2</td></tr><tr><td>2eed184680edcdec8a3b605ad1a3ba8e8f7cc2e9</td><td>graz</td><td>Graz Pedestrian</td><td><a href="papers/2eed184680edcdec8a3b605ad1a3ba8e8f7cc2e9.html">Generic object recognition with boosting</a></td><td><a href="http://www.emt.tu-graz.ac.at/~pinz/onlinepapers/Reprint_Vol_28_No_3_2006.pdf">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>24%</td><td>286</td><td>69</td><td>217</td><td>16</td><td>189</td><td>0</td></tr><tr><td>44484d2866f222bbb9b6b0870890f9eea1ffb2d0</td><td>cuhk01</td><td>CUHK01</td><td><a href="papers/44484d2866f222bbb9b6b0870890f9eea1ffb2d0.html">Human Reidentification with Transferred Metric Learning</a></td><td><a href="http://pdfs.semanticscholar.org/4448/4d2866f222bbb9b6b0870890f9eea1ffb2d0.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>26%</td><td>258</td><td>67</td><td>191</td><td>12</td><td>141</td><td>1</td></tr><tr><td>96e0cfcd81cdeb8282e29ef9ec9962b125f379b0</td><td>megaface</td><td>MegaFace</td><td><a href="papers/96e0cfcd81cdeb8282e29ef9ec9962b125f379b0.html">The MegaFace Benchmark: 1 Million Faces for Recognition at Scale</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.527">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>University of Washington</td><td>47.65432380</td><td>-122.30800894</td><td>55%</td><td>121</td><td>66</td><td>55</td><td>11</td><td>98</td><td>20</td></tr><tr><td>9361b784e73e9238d5cefbea5ac40d35d1e3103f</td><td>towncenter</td><td>TownCenter</td><td><a href="papers/9361b784e73e9238d5cefbea5ac40d35d1e3103f.html">Stable Multi-Target Tracking in Real-Time Surveillance Video (Preprint)</a></td><td><a href="http://pdfs.semanticscholar.org/9361/b784e73e9238d5cefbea5ac40d35d1e3103f.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>21%</td><td>310</td><td>64</td><td>246</td><td>24</td><td>177</td><td>4</td></tr><tr><td>10195a163ab6348eef37213a46f60a3d87f289c5</td><td>imdb_wiki</td><td>IMDB</td><td><a href="papers/10195a163ab6348eef37213a46f60a3d87f289c5.html">Deep Expectation of Real and Apparent Age from a Single Image Without Facial Landmarks</a></td><td><a href="https://doi.org/10.1007/s11263-016-0940-3">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td>44%</td><td>133</td><td>59</td><td>74</td><td>14</td><td>90</td><td>28</td></tr><tr><td>b1f4423c227fa37b9680787be38857069247a307</td><td>afew_va</td><td>AFEW-VA</td><td><a href="papers/b1f4423c227fa37b9680787be38857069247a307.html">Collecting Large, Richly Annotated Facial-Expression Databases from Movies</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6200254', 'linkType': 'ieee'}">[pdf]</a></td><td>IEEE MultiMedia</td><td>edu</td><td>Australian National University</td><td>-35.27769990</td><td>149.11852700</td><td>33%</td><td>175</td><td>58</td><td>66</td><td>7</td><td>54</td><td>49</td></tr><tr><td>2acf7e58f0a526b957be2099c10aab693f795973</td><td>bosphorus</td><td>The Bosphorus</td><td><a href="papers/2acf7e58f0a526b957be2099c10aab693f795973.html">Bosphorus Database for 3D Face Analysis</a></td><td><a href="http://pdfs.semanticscholar.org/4254/fbba3846008f50671edc9cf70b99d7304543.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>18%</td><td>328</td><td>58</td><td>270</td><td>18</td><td>143</td><td>37</td></tr><tr><td>27a2fad58dd8727e280f97036e0d2bc55ef5424c</td><td>duke_mtmc</td><td>Duke MTMC</td><td><a href="papers/27a2fad58dd8727e280f97036e0d2bc55ef5424c.html">Performance Measures and a Data Set for Multi-target, Multi-camera Tracking</a></td><td><a href="http://pdfs.semanticscholar.org/b5f2/4f49f9a5e47d6601399dc068158ad88d7651.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>136</td><td>58</td><td>78</td><td>6</td><td>107</td><td>0</td></tr><tr><td>27a2fad58dd8727e280f97036e0d2bc55ef5424c</td><td>mot</td><td>MOT</td><td><a href="papers/27a2fad58dd8727e280f97036e0d2bc55ef5424c.html">Performance Measures and a Data Set for Multi-target, Multi-camera Tracking</a></td><td><a href="http://pdfs.semanticscholar.org/b5f2/4f49f9a5e47d6601399dc068158ad88d7651.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>136</td><td>58</td><td>78</td><td>6</td><td>107</td><td>0</td></tr><tr><td>56ffa7d906b08d02d6d5a12c7377a57e24ef3391</td><td>unbc_shoulder_pain</td><td>UNBC-McMaster Pain</td><td><a href="papers/56ffa7d906b08d02d6d5a12c7377a57e24ef3391.html">Painful data: The UNBC-McMaster shoulder pain expression archive database</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5771462', 'linkType': 'ieee'}">[pdf]</a></td><td>Face and Gesture 2011</td><td></td><td></td><td></td><td></td><td>32%</td><td>184</td><td>58</td><td>126</td><td>23</td><td>112</td><td>23</td></tr><tr><td>38b55d95189c5e69cf4ab45098a48fba407609b4</td><td>cuhk02</td><td>CUHK02</td><td><a href="papers/38b55d95189c5e69cf4ab45098a48fba407609b4.html">Locally Aligned Feature Transforms across Views</a></td><td><a href="http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_CVPR2013/data/Papers/4989d594.pdf">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>24%</td><td>242</td><td>57</td><td>185</td><td>17</td><td>139</td><td>1</td></tr><tr><td>2f5d44dc3e1b5955942133ff872ebd31716ec604</td><td>frav3d</td><td>FRAV3D</td><td><a href="papers/2f5d44dc3e1b5955942133ff872ebd31716ec604.html">2D and 3D face recognition: A survey</a></td><td><a href="http://pdfs.semanticscholar.org/2f5d/44dc3e1b5955942133ff872ebd31716ec604.pdf">[pdf]</a></td><td>Pattern Recognition Letters</td><td></td><td></td><td></td><td></td><td>15%</td><td>389</td><td>57</td><td>332</td><td>28</td><td>198</td><td>17</td></tr><tr><td>a6e695ddd07aad719001c0fc1129328452385949</td><td>yfcc_100m</td><td>YFCC100M</td><td><a href="papers/a6e695ddd07aad719001c0fc1129328452385949.html">The New Data and New Challenges in Multimedia Research</a></td><td><span class="gray">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>36%</td><td>160</td><td>57</td><td>103</td><td>11</td><td>105</td><td>4</td></tr><tr><td>0d3bb75852098b25d90f31d2f48fd0cb4944702b</td><td>face_scrub</td><td>FaceScrub</td><td><a href="papers/0d3bb75852098b25d90f31d2f48fd0cb4944702b.html">A data-driven approach to cleaning large face datasets</a></td><td><a href="https://doi.org/10.1109/ICIP.2014.7025068">[pdf]</a></td><td>2014 IEEE International Conference on Image Processing (ICIP)</td><td>edu</td><td>University of Illinois, Urbana-Champaign</td><td>40.11116745</td><td>-88.22587665</td><td>46%</td><td>123</td><td>56</td><td>67</td><td>6</td><td>95</td><td>21</td></tr><tr><td>b91f54e1581fbbf60392364323d00a0cd43e493c</td><td>bp4d_spontanous</td><td>BP4D-Spontanous</td><td><a href="papers/b91f54e1581fbbf60392364323d00a0cd43e493c.html">A high-resolution spontaneous 3D dynamic facial expression database</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6553788', 'linkType': 'ieee'}">[pdf]</a></td><td>2013 10th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)</td><td>edu</td><td>SUNY Binghamton</td><td>42.08779975</td><td>-75.97066066</td><td>36%</td><td>151</td><td>54</td><td>97</td><td>7</td><td>85</td><td>26</td></tr><tr><td>4f93cd09785c6e77bf4bc5a788e079df524c8d21</td><td>soton</td><td>SOTON HiD</td><td><a href="papers/4f93cd09785c6e77bf4bc5a788e079df524c8d21.html">On a large sequence-based human gait database</a></td><td><a href="http://pdfs.semanticscholar.org/4f93/cd09785c6e77bf4bc5a788e079df524c8d21.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>36%</td><td>148</td><td>54</td><td>94</td><td>16</td><td>98</td><td>0</td></tr><tr><td>0df0d1adea39a5bef318b74faa37de7f3e00b452</td><td>mpii_gaze</td><td>MPIIGaze</td><td><a href="papers/0df0d1adea39a5bef318b74faa37de7f3e00b452.html">Appearance-based gaze estimation in the wild</a></td><td><a href="https://scalable.mpi-inf.mpg.de/files/2015/09/zhang_CVPR15.pdf">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>Max Planck Institute for Informatics</td><td>49.25795660</td><td>7.04577417</td><td>38%</td><td>138</td><td>52</td><td>86</td><td>3</td><td>94</td><td>7</td></tr><tr><td>e4754afaa15b1b53e70743880484b8d0736990ff</td><td>fiw_300</td><td>300-W</td><td><a href="papers/e4754afaa15b1b53e70743880484b8d0736990ff.html">300 Faces In-The-Wild Challenge: database and results</a></td><td><a href="{'url': 'http://doi.org/10.1016/j.imavis.2016.01.002', 'linkType': 'doi'}">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td>45%</td><td>114</td><td>51</td><td>63</td><td>10</td><td>70</td><td>31</td></tr><tr><td>c0387e788a52f10bf35d4d50659cfa515d89fbec</td><td>mars</td><td>MARS</td><td><a href="papers/c0387e788a52f10bf35d4d50659cfa515d89fbec.html">MARS: A Video Benchmark for Large-Scale Person Re-Identification</a></td><td><a href="http://pdfs.semanticscholar.org/c038/7e788a52f10bf35d4d50659cfa515d89fbec.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>34%</td><td>146</td><td>49</td><td>97</td><td>6</td><td>96</td><td>0</td></tr><tr><td>98bb029afe2a1239c3fdab517323066f0957b81b</td><td>sdu_vid</td><td>SDU-VID</td><td><a href="papers/98bb029afe2a1239c3fdab517323066f0957b81b.html">Person Re-identification by Video Ranking</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>25%</td><td>196</td><td>49</td><td>124</td><td>11</td><td>98</td><td>1</td></tr><tr><td>04c2cda00e5536f4b1508cbd80041e9552880e67</td><td>hipsterwars</td><td>Hipsterwars</td><td><a href="papers/04c2cda00e5536f4b1508cbd80041e9552880e67.html">Hipster Wars: Discovering Elements of Fashion Styles</a></td><td><a href="http://pdfs.semanticscholar.org/04c2/cda00e5536f4b1508cbd80041e9552880e67.pdf">[pdf]</a></td><td></td><td>edu</td><td>Tohoku University</td><td>38.25309450</td><td>140.87365930</td><td>53%</td><td>91</td><td>48</td><td>43</td><td>5</td><td>60</td><td>15</td></tr><tr><td>109df0e8e5969ddf01e073143e83599228a1163f</td><td>multi_pie</td><td>MULTIPIE</td><td><a href="papers/109df0e8e5969ddf01e073143e83599228a1163f.html">Scheduling heterogeneous multi-cores through performance impact estimation (PIE)</a></td><td><a href="http://dl.acm.org/citation.cfm?id=2337184">[pdf]</a></td><td>2012 39th Annual International Symposium on Computer Architecture (ISCA)</td><td></td><td></td><td></td><td></td><td>25%</td><td>192</td><td>48</td><td>144</td><td>8</td><td>99</td><td>0</td></tr><tr><td>32c801cb7fbeb742edfd94cccfca4934baec71da</td><td>ucf_crowd</td><td>UCF-CC-50</td><td><a href="papers/32c801cb7fbeb742edfd94cccfca4934baec71da.html">Multi-source Multi-scale Counting in Extremely Dense Crowd Images</a></td><td><a href="http://www.cs.ucf.edu/~haroon/datafiles/Idrees_Counting_CVPR_2013.pdf">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>38%</td><td>125</td><td>48</td><td>77</td><td>6</td><td>72</td><td>1</td></tr><tr><td>2d3482dcff69c7417c7b933f22de606a0e8e42d4</td><td>lfw</td><td>LFW</td><td><a href="papers/2d3482dcff69c7417c7b933f22de606a0e8e42d4.html">Labeled Faces in the Wild : Updates and New Reporting Procedures</a></td><td><a href="http://pdfs.semanticscholar.org/2d34/82dcff69c7417c7b933f22de606a0e8e42d4.pdf">[pdf]</a></td><td></td><td>edu</td><td>University of Massachusetts</td><td>42.38897850</td><td>-72.52869870</td><td>41%</td><td>116</td><td>47</td><td>46</td><td>4</td><td>62</td><td>19</td></tr><tr><td>8355d095d3534ef511a9af68a3b2893339e3f96b</td><td>imdb_wiki</td><td>IMDB</td><td><a href="papers/8355d095d3534ef511a9af68a3b2893339e3f96b.html">DEX: Deep EXpectation of Apparent Age from a Single Image</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406390', 'linkType': 'ieee'}">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision Workshop (ICCVW)</td><td></td><td></td><td></td><td></td><td>44%</td><td>102</td><td>45</td><td>54</td><td>6</td><td>62</td><td>28</td></tr><tr><td>66e6f08873325d37e0ec20a4769ce881e04e964e</td><td>sun_attributes</td><td>SUN</td><td><a href="papers/66e6f08873325d37e0ec20a4769ce881e04e964e.html">The SUN Attribute Database: Beyond Categories for Deeper Scene Understanding</a></td><td><a href="http://www.cc.gatech.edu/~hays/papers/attribute_ijcv.pdf">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td>38%</td><td>112</td><td>43</td><td>69</td><td>14</td><td>83</td><td>2</td></tr><tr><td>46a01565e6afe7c074affb752e7069ee3bf2e4ef</td><td>sdu_vid</td><td>SDU-VID</td><td><a href="papers/46a01565e6afe7c074affb752e7069ee3bf2e4ef.html">Local Descriptors Encoded by Fisher Vectors for Person Re-identification</a></td><td><a href="http://pdfs.semanticscholar.org/dd1d/51c3a59cb71cbfe1433ebeb4d973f7f9ddc1.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>22%</td><td>193</td><td>42</td><td>133</td><td>15</td><td>101</td><td>2</td></tr><tr><td>6ad5a38df8dd4cdddd74f31996ce096d41219f72</td><td>tud_brussels</td><td>TUD-Brussels</td><td><a href="papers/6ad5a38df8dd4cdddd74f31996ce096d41219f72.html">Multi-cue onboard pedestrian detection</a></td><td><a href="https://www.mpi-inf.mpg.de/fileadmin/inf/d2/wojek/poster_cwojek_cvpr09.pdf">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>19%</td><td>217</td><td>41</td><td>176</td><td>14</td><td>131</td><td>1</td></tr><tr><td>6ad5a38df8dd4cdddd74f31996ce096d41219f72</td><td>tud_motionpairs</td><td>TUD-Motionparis</td><td><a href="papers/6ad5a38df8dd4cdddd74f31996ce096d41219f72.html">Multi-cue onboard pedestrian detection</a></td><td><a href="https://www.mpi-inf.mpg.de/fileadmin/inf/d2/wojek/poster_cwojek_cvpr09.pdf">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>19%</td><td>217</td><td>41</td><td>176</td><td>14</td><td>131</td><td>1</td></tr><tr><td>0486214fb58ee9a04edfe7d6a74c6d0f661a7668</td><td>chokepoint</td><td>ChokePoint</td><td><a href="papers/0486214fb58ee9a04edfe7d6a74c6d0f661a7668.html">Patch-based probabilistic image quality assessment for face selection and improved video-based face recognition</a></td><td><a href="http://conradsanderson.id.au/pdfs/wong_face_selection_cvpr_biometrics_2011.pdf">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td>edu</td><td>University of Queensland</td><td>-27.49741805</td><td>153.01316956</td><td>30%</td><td>128</td><td>39</td><td>89</td><td>6</td><td>68</td><td>14</td></tr><tr><td>2a4bbee0b4cf52d5aadbbc662164f7efba89566c</td><td>peta</td><td>PETA</td><td><a href="papers/2a4bbee0b4cf52d5aadbbc662164f7efba89566c.html">Pedestrian Attribute Recognition At Far Distance</a></td><td><a href="http://personal.ie.cuhk.edu.hk/~pluo/pdf/mm14.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>46%</td><td>80</td><td>37</td><td>43</td><td>2</td><td>51</td><td>3</td></tr><tr><td>636b8ffc09b1b23ff714ac8350bb35635e49fa3c</td><td>caltech_10k_web_faces</td><td>Caltech 10K Web Faces</td><td><a href="papers/636b8ffc09b1b23ff714ac8350bb35635e49fa3c.html">Pruning training sets for learning of object categories</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1467308', 'linkType': 'ieee'}">[pdf]</a></td><td>2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)</td><td></td><td></td><td></td><td></td><td>58%</td><td>60</td><td>35</td><td>25</td><td>5</td><td>42</td><td>12</td></tr><tr><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td><td>mafl</td><td>MAFL</td><td><a href="papers/a0fd85b3400c7b3e11122f44dc5870ae2de9009a.html">Learning Deep Representation for Face Alignment with Auxiliary Attributes</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1408.3967.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>33%</td><td>105</td><td>35</td><td>50</td><td>8</td><td>55</td><td>16</td></tr><tr><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td><td>mtfl</td><td>MTFL</td><td><a href="papers/a0fd85b3400c7b3e11122f44dc5870ae2de9009a.html">Learning Deep Representation for Face Alignment with Auxiliary Attributes</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1408.3967.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>33%</td><td>105</td><td>35</td><td>50</td><td>8</td><td>55</td><td>16</td></tr><tr><td>3b5b6d19d4733ab606c39c69a889f9e67967f151</td><td>qmul_grid</td><td>GRID</td><td><a href="papers/3b5b6d19d4733ab606c39c69a889f9e67967f151.html">Multi-camera activity correlation analysis</a></td><td><a href="http://vision.lbl.gov/Conferences/cvpr/Papers/data/papers/0163.pdf">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>25%</td><td>138</td><td>35</td><td>103</td><td>8</td><td>76</td><td>1</td></tr><tr><td>214c966d1f9c2a4b66f4535d9a0d4078e63a5867</td><td>brainwash</td><td>Brainwash</td><td><a href="papers/214c966d1f9c2a4b66f4535d9a0d4078e63a5867.html">Brainwash: A Data System for Feature Engineering</a></td><td><a href="http://pdfs.semanticscholar.org/ae44/8015b2ff2bd3b8a5c9a3266f954f5af9ffa9.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>57</td><td>34</td><td>23</td><td>2</td><td>50</td><td>0</td></tr><tr><td>0dc11a37cadda92886c56a6fb5191ded62099c28</td><td>stickmen_family</td><td>We Are Family Stickmen</td><td><a href="papers/0dc11a37cadda92886c56a6fb5191ded62099c28.html">We Are Family: Joint Pose Estimation of Multiple Persons</a></td><td><a href="http://pdfs.semanticscholar.org/0dc1/1a37cadda92886c56a6fb5191ded62099c28.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>44%</td><td>77</td><td>34</td><td>43</td><td>4</td><td>57</td><td>1</td></tr><tr><td>4df3143922bcdf7db78eb91e6b5359d6ada004d2</td><td>cfd</td><td>CFD</td><td><a href="papers/4df3143922bcdf7db78eb91e6b5359d6ada004d2.html">The Chicago face database: A free stimulus set of faces and norming data.</a></td><td><a href="http://pdfs.semanticscholar.org/4df3/143922bcdf7db78eb91e6b5359d6ada004d2.pdf">[pdf]</a></td><td>Behavior research methods</td><td></td><td></td><td></td><td></td><td>39%</td><td>83</td><td>32</td><td>51</td><td>2</td><td>62</td><td>3</td></tr><tr><td>c900e0ad4c95948baaf0acd8449fde26f9b4952a</td><td>emotio_net</td><td>EmotioNet Database</td><td><a href="papers/c900e0ad4c95948baaf0acd8449fde26f9b4952a.html">EmotioNet: An Accurate, Real-Time Algorithm for the Automatic Annotation of a Million Facial Expressions in the Wild</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7780969', 'linkType': 'ieee'}">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>44%</td><td>72</td><td>32</td><td>40</td><td>7</td><td>54</td><td>8</td></tr><tr><td>0c4a139bb87c6743c7905b29a3cfec27a5130652</td><td>feret</td><td>FERET</td><td><a href="papers/0c4a139bb87c6743c7905b29a3cfec27a5130652.html">The FERET Verification Testing Protocol for Face Recognition Algorithms</a></td><td><a href="http://pdfs.semanticscholar.org/0c4a/139bb87c6743c7905b29a3cfec27a5130652.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>28%</td><td>112</td><td>31</td><td>81</td><td>12</td><td>76</td><td>4</td></tr><tr><td>3cd40bfa1ff193a96bde0207e5140a399476466c</td><td>tvhi</td><td>TVHI</td><td><a href="papers/3cd40bfa1ff193a96bde0207e5140a399476466c.html">High Five: Recognising human interactions in TV shows</a></td><td><a href="http://pdfs.semanticscholar.org/3cd4/0bfa1ff193a96bde0207e5140a399476466c.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>34%</td><td>91</td><td>31</td><td>60</td><td>11</td><td>64</td><td>1</td></tr><tr><td>6618cff7f2ed440a0d2fa9e74ad5469df5cdbe4c</td><td>afad</td><td>AFAD</td><td><a href="papers/6618cff7f2ed440a0d2fa9e74ad5469df5cdbe4c.html">Ordinal Regression with Multiple Output CNN for Age Estimation</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.532">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>44%</td><td>68</td><td>30</td><td>38</td><td>8</td><td>49</td><td>7</td></tr><tr><td>fcc6fe6007c322641796cb8792718641856a22a7</td><td>miw</td><td>MIW</td><td><a href="papers/fcc6fe6007c322641796cb8792718641856a22a7.html">Automatic facial makeup detection with application in face recognition</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6612994', 'linkType': 'ieee'}">[pdf]</a></td><td>2013 International Conference on Biometrics (ICB)</td><td>edu</td><td>West Virginia University</td><td>39.65404635</td><td>-79.96475355</td><td>65%</td><td>46</td><td>30</td><td>16</td><td>1</td><td>18</td><td>21</td></tr><tr><td>0a85bdff552615643dd74646ac881862a7c7072d</td><td>pipa</td><td>PIPA</td><td><a href="papers/0a85bdff552615643dd74646ac881862a7c7072d.html">Beyond frontal faces: Improving Person Recognition using multiple cues</a></td><td><a href="https://doi.org/10.1109/CVPR.2015.7299113">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>60%</td><td>50</td><td>30</td><td>19</td><td>2</td><td>40</td><td>4</td></tr><tr><td>fcc6fe6007c322641796cb8792718641856a22a7</td><td>youtube_makeup</td><td>YMU</td><td><a href="papers/fcc6fe6007c322641796cb8792718641856a22a7.html">Automatic facial makeup detection with application in face recognition</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6612994', 'linkType': 'ieee'}">[pdf]</a></td><td>2013 International Conference on Biometrics (ICB)</td><td>edu</td><td>West Virginia University</td><td>39.65404635</td><td>-79.96475355</td><td>65%</td><td>46</td><td>30</td><td>16</td><td>1</td><td>18</td><td>21</td></tr><tr><td>51eba481dac6b229a7490f650dff7b17ce05df73</td><td>imsitu</td><td>imSitu</td><td><a href="papers/51eba481dac6b229a7490f650dff7b17ce05df73.html">Situation Recognition: Visual Semantic Role Labeling for Image Understanding</a></td><td><a href="http://grail.cs.washington.edu/wp-content/uploads/2016/09/yatskar2016srv.pdf">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>University of Washington</td><td>47.65432380</td><td>-122.30800894</td><td>60%</td><td>48</td><td>29</td><td>19</td><td>2</td><td>45</td><td>2</td></tr><tr><td>7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22</td><td>lfw</td><td>LFW</td><td><a href="papers/7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22.html">Labeled Faces in the Wild: A Survey</a></td><td><a href="http://pdfs.semanticscholar.org/7de6/e81d775e9cd7becbfd1bd685f4e2a5eebb22.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>29%</td><td>99</td><td>29</td><td>70</td><td>9</td><td>63</td><td>12</td></tr><tr><td>2bf8541199728262f78d4dced6fb91479b39b738</td><td>clothing_co_parsing</td><td>CCP</td><td><a href="papers/2bf8541199728262f78d4dced6fb91479b39b738.html">Clothing Co-parsing by Joint Image Segmentation and Labeling</a></td><td><a href="https://arxiv.org/pdf/1502.00739v1.pdf">[pdf]</a></td><td>2014 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>47%</td><td>60</td><td>28</td><td>32</td><td>0</td><td>36</td><td>6</td></tr><tr><td>42505464808dfb446f521fc6ff2cfeffd4d68ff1</td><td>gavab_db</td><td>Gavab</td><td><a href="papers/42505464808dfb446f521fc6ff2cfeffd4d68ff1.html">Expression invariant 3D face recognition with a Morphable Model</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4813376', 'linkType': 'ieee'}">[pdf]</a></td><td>2008 8th IEEE International Conference on Automatic Face & Gesture Recognition</td><td></td><td></td><td></td><td></td><td>29%</td><td>94</td><td>27</td><td>67</td><td>10</td><td>57</td><td>5</td></tr><tr><td>066000d44d6691d27202896691f08b27117918b9</td><td>psu</td><td>PSU</td><td><a href="papers/066000d44d6691d27202896691f08b27117918b9.html">Vision-Based Analysis of Small Groups in Pedestrian Crowds</a></td><td><a href="http://vision.cse.psu.edu/publications/pdfs/GeCollinsRubackPAMI2011.pdf">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>18%</td><td>151</td><td>27</td><td>124</td><td>9</td><td>78</td><td>2</td></tr><tr><td>3b4ec8af470948a72a6ed37a9fd226719a874ebc</td><td>sdu_vid</td><td>SDU-VID</td><td><a href="papers/3b4ec8af470948a72a6ed37a9fd226719a874ebc.html">A Spatio-Temporal Appearance Representation for Video-Based Pedestrian Re-Identification</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.434">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td>32%</td><td>85</td><td>27</td><td>58</td><td>9</td><td>51</td><td>0</td></tr><tr><td>47aeb3b82f54b5ae8142b4bdda7b614433e69b9a</td><td>am_fed</td><td>AM-FED</td><td><a href="papers/47aeb3b82f54b5ae8142b4bdda7b614433e69b9a.html">Affectiva-MIT Facial Expression Dataset (AM-FED): Naturalistic and Spontaneous Facial Expressions Collected "In-the-Wild"</a></td><td><a href="http://pdfs.semanticscholar.org/5d06/437656dd94616d7d87260d5eb77513ded30f.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>36%</td><td>73</td><td>26</td><td>47</td><td>6</td><td>39</td><td>16</td></tr><tr><td>356b431d4f7a2a0a38cf971c84568207dcdbf189</td><td>wider</td><td>WIDER</td><td><a href="papers/356b431d4f7a2a0a38cf971c84568207dcdbf189.html">Recognize complex events from static images by fusing deep channels</a></td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Xiong_Recognize_Complex_Events_2015_CVPR_paper.pdf">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>Shenzhen Institutes of Advanced Technology</td><td>22.59805605</td><td>113.98533784</td><td>58%</td><td>45</td><td>26</td><td>19</td><td>1</td><td>30</td><td>12</td></tr><tr><td>9c23859ec7313f2e756a3e85575735e0c52249f4</td><td>facebook_100</td><td>Facebook100</td><td><a href="papers/9c23859ec7313f2e756a3e85575735e0c52249f4.html">Scaling up biologically-inspired computer vision: A case study in unconstrained face recognition on facebook</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5981788', 'linkType': 'ieee'}">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td>edu</td><td>Harvard University</td><td>42.36782045</td><td>-71.12666653</td><td>50%</td><td>50</td><td>25</td><td>25</td><td>3</td><td>39</td><td>4</td></tr><tr><td>0b84f07af44f964817675ad961def8a51406dd2e</td><td>prw</td><td>PRW</td><td><a href="papers/0b84f07af44f964817675ad961def8a51406dd2e.html">Person Re-identification in the Wild</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2017.357">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>University of Technology Sydney</td><td>-33.88096510</td><td>151.20107299</td><td>38%</td><td>65</td><td>25</td><td>40</td><td>1</td><td>46</td><td>0</td></tr><tr><td>9c23859ec7313f2e756a3e85575735e0c52249f4</td><td>pubfig_83</td><td>pubfig83</td><td><a href="papers/9c23859ec7313f2e756a3e85575735e0c52249f4.html">Scaling up biologically-inspired computer vision: A case study in unconstrained face recognition on facebook</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5981788', 'linkType': 'ieee'}">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td>edu</td><td>Harvard University</td><td>42.36782045</td><td>-71.12666653</td><td>50%</td><td>50</td><td>25</td><td>25</td><td>3</td><td>39</td><td>4</td></tr><tr><td>2160788824c4c29ffe213b2cbeb3f52972d73f37</td><td>3d_rma</td><td>3D-RMA</td><td><a href="papers/2160788824c4c29ffe213b2cbeb3f52972d73f37.html">Automatic 3D face authentication</a></td><td><a href="http://pdfs.semanticscholar.org/2160/788824c4c29ffe213b2cbeb3f52972d73f37.pdf">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td>25%</td><td>95</td><td>24</td><td>71</td><td>8</td><td>60</td><td>2</td></tr><tr><td>37d6f0eb074d207b53885bd2eb78ccc8a04be597</td><td>vmu</td><td>VMU</td><td><a href="papers/37d6f0eb074d207b53885bd2eb78ccc8a04be597.html">Can facial cosmetics affect the matching accuracy of face recognition systems?</a></td><td><a href="http://www.cse.msu.edu/~climer/DantchevaChenRossFaceCosmetics_BTAS2012.pdf">[pdf]</a></td><td>2012 IEEE Fifth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td></td><td></td><td></td><td></td><td>49%</td><td>49</td><td>24</td><td>25</td><td>0</td><td>18</td><td>22</td></tr><tr><td>37d6f0eb074d207b53885bd2eb78ccc8a04be597</td><td>youtube_makeup</td><td>YMU</td><td><a href="papers/37d6f0eb074d207b53885bd2eb78ccc8a04be597.html">Can facial cosmetics affect the matching accuracy of face recognition systems?</a></td><td><a href="http://www.cse.msu.edu/~climer/DantchevaChenRossFaceCosmetics_BTAS2012.pdf">[pdf]</a></td><td>2012 IEEE Fifth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td></td><td></td><td></td><td></td><td>49%</td><td>49</td><td>24</td><td>25</td><td>0</td><td>18</td><td>22</td></tr><tr><td>070de852bc6eb275d7ca3a9cdde8f6be8795d1a3</td><td>d3dfacs</td><td>D3DFACS</td><td><a href="papers/070de852bc6eb275d7ca3a9cdde8f6be8795d1a3.html">A FACS valid 3D dynamic action unit database with applications to 3D dynamic morphable facial modeling</a></td><td><a href="http://www.cs.bath.ac.uk/~dpc/D3DFACS/ICCV_final_2011.pdf">[pdf]</a></td><td>2011 International Conference on Computer Vision</td><td>edu</td><td>Jacobs University</td><td>53.41291480</td><td>-2.96897915</td><td>44%</td><td>52</td><td>23</td><td>29</td><td>5</td><td>37</td><td>4</td></tr><tr><td>3394168ff0719b03ff65bcea35336a76b21fe5e4</td><td>penn_fudan</td><td>Penn Fudan</td><td><a href="papers/3394168ff0719b03ff65bcea35336a76b21fe5e4.html">Object Detection Combining Recognition and Segmentation</a></td><td><a href="http://pdfs.semanticscholar.org/f531/a554cade14b9b340de6730683a28c292dd74.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>23%</td><td>101</td><td>23</td><td>78</td><td>11</td><td>58</td><td>0</td></tr><tr><td>2e8d0f1802e50cccfd3c0aabac0d0beab3a7846e</td><td>3dpes</td><td>3DPeS</td><td><a href="papers/2e8d0f1802e50cccfd3c0aabac0d0beab3a7846e.html">3DPeS: 3D people dataset for surveillance and forensics</a></td><td><a href="http://doi.acm.org/10.1145/2072572.2072590">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>18%</td><td>122</td><td>22</td><td>100</td><td>11</td><td>71</td><td>1</td></tr><tr><td>08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7</td><td>kin_face</td><td>UB KinFace</td><td><a href="papers/08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7.html">Understanding Kin Relationships in a Photo</a></td><td><a href="http://www1.ece.neu.edu/~yunfu/papers/Kinship-TMM.pdf">[pdf]</a></td><td>IEEE Transactions on Multimedia</td><td></td><td></td><td></td><td></td><td>24%</td><td>89</td><td>21</td><td>46</td><td>2</td><td>20</td><td>14</td></tr><tr><td>eb027969f9310e0ae941e2adee2d42cdf07d938c</td><td>vgg_faces2</td><td>VGG Face2</td><td><a href="papers/eb027969f9310e0ae941e2adee2d42cdf07d938c.html">VGGFace2: A Dataset for Recognising Faces across Pose and Age</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1710.08092.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2018 13th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2018)</td><td>edu</td><td>University of Oxford</td><td>51.75345380</td><td>-1.25400997</td><td>38%</td><td>56</td><td>21</td><td>35</td><td>6</td><td>50</td><td>3</td></tr><tr><td>f1af714b92372c8e606485a3982eab2f16772ad8</td><td>mug_faces</td><td>MUG Faces</td><td><a href="papers/f1af714b92372c8e606485a3982eab2f16772ad8.html">The MUG facial expression database</a></td><td><a href="http://ieeexplore.ieee.org/document/5617662/">[pdf]</a></td><td>11th International Workshop on Image Analysis for Multimedia Interactive Services WIAMIS 10</td><td>edu</td><td>Aristotle University of Thessaloniki</td><td>40.62984145</td><td>22.95889350</td><td>28%</td><td>68</td><td>19</td><td>49</td><td>5</td><td>28</td><td>19</td></tr><tr><td>2ce2560cf59db59ce313bbeb004e8ce55c5ce928</td><td>texas_3dfrd</td><td>Texas 3DFRD</td><td><a href="papers/2ce2560cf59db59ce313bbeb004e8ce55c5ce928.html">Anthropometric 3D Face Recognition</a></td><td><a href="http://live.ece.utexas.edu/publications/2010/sg_ijcv_june10.pdf">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td>22%</td><td>88</td><td>19</td><td>43</td><td>4</td><td>42</td><td>2</td></tr><tr><td>31b05f65405534a696a847dd19c621b7b8588263</td><td>umd_faces</td><td>UMD</td><td><a href="papers/31b05f65405534a696a847dd19c621b7b8588263.html">UMDFaces: An annotated face dataset for training deep networks</a></td><td><a href="http://arxiv.org/abs/1611.01484">[pdf]</a></td><td>2017 IEEE International Joint Conference on Biometrics (IJCB)</td><td></td><td></td><td></td><td></td><td>54%</td><td>35</td><td>19</td><td>16</td><td>5</td><td>28</td><td>6</td></tr><tr><td>8b2dd5c61b23ead5ae5508bb8ce808b5ea266730</td><td>10k_US_adult_faces</td><td>10K US Adult Faces</td><td><a href="papers/8b2dd5c61b23ead5ae5508bb8ce808b5ea266730.html">The intrinsic memorability of face photographs.</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/8b2d/d5c61b23ead5ae5508bb8ce808b5ea266730.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Journal of experimental psychology. General</td><td></td><td></td><td></td><td></td><td>36%</td><td>47</td><td>17</td><td>30</td><td>3</td><td>33</td><td>1</td></tr><tr><td>69a68f9cf874c69e2232f47808016c2736b90c35</td><td>celeba_plus</td><td>CelebFaces+</td><td><a href="papers/69a68f9cf874c69e2232f47808016c2736b90c35.html">Learning Deep Representation for Imbalanced Classification</a></td><td><a href="http://personal.ie.cuhk.edu.hk/~ccloy/files/cvpr_2016_imbalanced.pdf">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>Shenzhen Institutes of Advanced Technology</td><td>22.59805605</td><td>113.98533784</td><td>33%</td><td>51</td><td>17</td><td>34</td><td>1</td><td>39</td><td>2</td></tr><tr><td>79828e6e9f137a583082b8b5a9dfce0c301989b8</td><td>mapillary</td><td>Mapillary</td><td><a href="papers/79828e6e9f137a583082b8b5a9dfce0c301989b8.html">The Mapillary Vistas Dataset for Semantic Understanding of Street Scenes</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237796', 'linkType': 'ieee'}">[pdf]</a></td><td>2017 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td>39%</td><td>44</td><td>17</td><td>27</td><td>0</td><td>36</td><td>0</td></tr><tr><td>5194cbd51f9769ab25260446b4fa17204752e799</td><td>violent_flows</td><td>Violent Flows</td><td><a href="papers/5194cbd51f9769ab25260446b4fa17204752e799.html">Violent flows: Real-time detection of violent crowd behavior</a></td><td><a href="http://www.wisdom.weizmann.ac.il/mathusers/kliper/Papers/violent_flows.pdf">[pdf]</a></td><td>2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops</td><td></td><td></td><td></td><td></td><td>20%</td><td>83</td><td>17</td><td>66</td><td>6</td><td>42</td><td>2</td></tr><tr><td>20388099cc415c772926e47bcbbe554e133343d1</td><td>cafe</td><td>CAFE</td><td><a href="papers/20388099cc415c772926e47bcbbe554e133343d1.html">The Child Affective Facial Expression (CAFE) set: validity and reliability from untrained adults</a></td><td><a href="http://pdfs.semanticscholar.org/2038/8099cc415c772926e47bcbbe554e133343d1.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>48%</td><td>33</td><td>16</td><td>17</td><td>3</td><td>28</td><td>1</td></tr><tr><td>c34532fe6bfbd1e6df477c9ffdbb043b77e7804d</td><td>columbia_gaze</td><td>Columbia Gaze</td><td><a href="papers/c34532fe6bfbd1e6df477c9ffdbb043b77e7804d.html">A 3D Morphable Eye Region Model for Gaze Estimation</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/0d43/3b9435b874a1eea6d7999e86986c910fa285.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Carnegie Mellon University</td><td>37.41021930</td><td>-122.05965487</td><td>67%</td><td>24</td><td>16</td><td>8</td><td>0</td><td>18</td><td>4</td></tr><tr><td>47662d1a368daf70ba70ef2d59eb6209f98b675d</td><td>fia</td><td>CMU FiA</td><td><a href="papers/47662d1a368daf70ba70ef2d59eb6209f98b675d.html">The CMU Face In Action (FIA) Database</a></td><td><a href="http://pdfs.semanticscholar.org/bb47/a03401811f9d2ca2da12138697acbc7b97a3.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>29%</td><td>55</td><td>16</td><td>39</td><td>5</td><td>38</td><td>7</td></tr><tr><td>213a579af9e4f57f071b884aa872651372b661fd</td><td>bbc_pose</td><td>BBC Pose</td><td><a href="papers/213a579af9e4f57f071b884aa872651372b661fd.html">Automatic and Efficient Human Pose Estimation for Sign Language Videos</a></td><td><a href="https://doi.org/10.1007/s11263-013-0672-6">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td>60%</td><td>25</td><td>15</td><td>10</td><td>1</td><td>18</td><td>1</td></tr><tr><td>1e3df3ca8feab0b36fd293fe689f93bb2aaac591</td><td>immediacy</td><td>Immediacy</td><td><a href="papers/1e3df3ca8feab0b36fd293fe689f93bb2aaac591.html">Multi-task Recurrent Neural Network for Immediacy Prediction</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.383">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td>60%</td><td>25</td><td>15</td><td>10</td><td>2</td><td>20</td><td>0</td></tr><tr><td>a5a44a32a91474f00a3cda671a802e87c899fbb4</td><td>moments_in_time</td><td>Moments in Time</td><td><a href="papers/a5a44a32a91474f00a3cda671a802e87c899fbb4.html">Moments in Time Dataset: one million videos for event understanding</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1801.03150.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>60%</td><td>25</td><td>15</td><td>10</td><td>2</td><td>25</td><td>0</td></tr><tr><td>4946ba10a4d5a7d0a38372f23e6622bd347ae273</td><td>coco_action</td><td>COCO-a</td><td><a href="papers/4946ba10a4d5a7d0a38372f23e6622bd347ae273.html">RONCHI AND PERONA: DESCRIBING COMMON HUMAN VISUAL ACTIONS IN IMAGES 1 Describing Common Human Visual Actions in Images</a></td><td><a href="http://pdfs.semanticscholar.org/b38d/cf5fa5174c0d718d65cc4f3889b03c4a21df.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>26</td><td>14</td><td>12</td><td>0</td><td>25</td><td>0</td></tr><tr><td>2161f6b7ee3c0acc81603b01dc0df689683577b9</td><td>large_scale_person_search</td><td>Large Scale Person Search</td><td><a href="papers/2161f6b7ee3c0acc81603b01dc0df689683577b9.html">End-to-End Deep Learning for Person Search</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/2161/f6b7ee3c0acc81603b01dc0df689683577b9.pdf', 'linkType': 's2'}">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>34%</td><td>41</td><td>14</td><td>27</td><td>2</td><td>27</td><td>0</td></tr><tr><td>1c2802c2199b6d15ecefe7ba0c39bfe44363de38</td><td>youtube_poses</td><td>YouTube Pose</td><td><a href="papers/1c2802c2199b6d15ecefe7ba0c39bfe44363de38.html">Personalizing Human Video Pose Estimation</a></td><td><a href="http://arxiv.org/pdf/1511.06676v1.pdf">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>44%</td><td>32</td><td>14</td><td>18</td><td>2</td><td>27</td><td>0</td></tr><tr><td>ea050801199f98a1c7c1df6769f23f658299a3ae</td><td>mpi_large</td><td>Large MPI Facial Expression</td><td><a href="papers/ea050801199f98a1c7c1df6769f23f658299a3ae.html">The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</a></td><td><a href="http://pdfs.semanticscholar.org/ea05/0801199f98a1c7c1df6769f23f658299a3ae.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>46%</td><td>28</td><td>13</td><td>15</td><td>4</td><td>24</td><td>3</td></tr><tr><td>ea050801199f98a1c7c1df6769f23f658299a3ae</td><td>mpi_small</td><td>Small MPI Facial Expression</td><td><a href="papers/ea050801199f98a1c7c1df6769f23f658299a3ae.html">The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</a></td><td><a href="http://pdfs.semanticscholar.org/ea05/0801199f98a1c7c1df6769f23f658299a3ae.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>46%</td><td>28</td><td>13</td><td>15</td><td>4</td><td>24</td><td>3</td></tr><tr><td>16e8b0a1e8451d5f697b94c0c2b32a00abee1d52</td><td>umb</td><td>UMB</td><td><a href="papers/16e8b0a1e8451d5f697b94c0c2b32a00abee1d52.html">UMB-DB: A database of partially occluded 3D faces</a></td><td><a href="https://doi.org/10.1109/ICCVW.2011.6130509">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td>29%</td><td>45</td><td>13</td><td>32</td><td>2</td><td>20</td><td>3</td></tr><tr><td>d08cc366a4a0192a01e9a7495af1eb5d9f9e73ae</td><td>b3d_ac</td><td>B3D(AC)</td><td><a href="papers/d08cc366a4a0192a01e9a7495af1eb5d9f9e73ae.html">A 3-D Audio-Visual Corpus of Affective Communication</a></td><td><a href="http://files.is.tue.mpg.de/jgall/download/jgall_avcorpus_mm10.pdf">[pdf]</a></td><td>IEEE Transactions on Multimedia</td><td></td><td></td><td></td><td></td><td>31%</td><td>39</td><td>12</td><td>27</td><td>2</td><td>27</td><td>7</td></tr><tr><td>0b440695c822a8e35184fb2f60dcdaa8a6de84ae</td><td>kinectface</td><td>KinectFaceDB</td><td><a href="papers/0b440695c822a8e35184fb2f60dcdaa8a6de84ae.html">KinectFaceDB: A Kinect Database for Face Recognition</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6866883', 'linkType': 'ieee'}">[pdf]</a></td><td>IEEE Transactions on Systems, Man, and Cybernetics: Systems</td><td></td><td></td><td></td><td></td><td>16%</td><td>75</td><td>12</td><td>63</td><td>6</td><td>25</td><td>8</td></tr><tr><td>45e616093a92e5f1e61a7c6037d5f637aa8964af</td><td>malf</td><td>MALF</td><td><a href="papers/45e616093a92e5f1e61a7c6037d5f637aa8964af.html">Fine-grained evaluation on face detection in the wild</a></td><td><a href="http://www.cs.toronto.edu/~byang/papers/malf_fg15.pdf">[pdf]</a></td><td>2015 11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)</td><td>edu</td><td>Chinese Academy of Sciences</td><td>40.00447950</td><td>116.37023800</td><td>71%</td><td>17</td><td>12</td><td>5</td><td>0</td><td>13</td><td>4</td></tr><tr><td>2edb87494278ad11641b6cf7a3f8996de12b8e14</td><td>qmul_grid</td><td>GRID</td><td><a href="papers/2edb87494278ad11641b6cf7a3f8996de12b8e14.html">Time-Delayed Correlation Analysis for Multi-Camera Activity Understanding</a></td><td><a href="https://doi.org/10.1007/s11263-010-0347-5">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td>16%</td><td>77</td><td>12</td><td>39</td><td>3</td><td>32</td><td>0</td></tr><tr><td>09d78009687bec46e70efcf39d4612822e61cb8c</td><td>raid</td><td>RAiD</td><td><a href="papers/09d78009687bec46e70efcf39d4612822e61cb8c.html">Consistent Re-identification in a Camera Network</a></td><td><a href="http://pdfs.semanticscholar.org/c27f/099e6e7e3f7f9979cbe9e0a5175fc5848ea0.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>27%</td><td>45</td><td>12</td><td>33</td><td>7</td><td>34</td><td>1</td></tr><tr><td>22646e00a7ba34d1b5fbe3b1efcd91a1e1be3c2b</td><td>saivt</td><td>SAIVT SoftBio</td><td><a href="papers/22646e00a7ba34d1b5fbe3b1efcd91a1e1be3c2b.html">A Database for Person Re-Identification in Multi-Camera Surveillance Networks</a></td><td><a href="http://eprints.qut.edu.au/53437/3/Bialkowski_Database4PersonReID_DICTA.pdf">[pdf]</a></td><td>2012 International Conference on Digital Image Computing Techniques and Applications (DICTA)</td><td></td><td></td><td></td><td></td><td>21%</td><td>58</td><td>12</td><td>46</td><td>7</td><td>40</td><td>1</td></tr><tr><td>44d23df380af207f5ac5b41459c722c87283e1eb</td><td>wider_attribute</td><td>WIDER Attribute</td><td><a href="papers/44d23df380af207f5ac5b41459c722c87283e1eb.html">Human Attribute Recognition by Deep Hierarchical Contexts</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/8e28/07f2dd53b03a759e372e07f7191cae65c9fd.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>67%</td><td>18</td><td>12</td><td>6</td><td>0</td><td>16</td><td>0</td></tr><tr><td>28d4e027c7e90b51b7d8908fce68128d1964668a</td><td>megaface</td><td>MegaFace</td><td><a href="papers/28d4e027c7e90b51b7d8908fce68128d1964668a.html">Level Playing Field for Million Scale Face Recognition</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1705.00393.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>41%</td><td>27</td><td>11</td><td>16</td><td>2</td><td>22</td><td>4</td></tr><tr><td>221c18238b829c12b911706947ab38fd017acef7</td><td>rap_pedestrian</td><td>RAP</td><td><a href="papers/221c18238b829c12b911706947ab38fd017acef7.html">A Richly Annotated Dataset for Pedestrian Attribute Recognition</a></td><td><a href="http://pdfs.semanticscholar.org/221c/18238b829c12b911706947ab38fd017acef7.pdf">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>52%</td><td>21</td><td>11</td><td>10</td><td>0</td><td>18</td><td>0</td></tr><tr><td>4d58f886f5150b2d5e48fd1b5a49e09799bf895d</td><td>texas_3dfrd</td><td>Texas 3DFRD</td><td><a href="papers/4d58f886f5150b2d5e48fd1b5a49e09799bf895d.html">Texas 3D Face Recognition Database</a></td><td><a href="http://live.ece.utexas.edu/publications/2010/sg_ssiai_may10.pdf">[pdf]</a></td><td>2010 IEEE Southwest Symposium on Image Analysis & Interpretation (SSIAI)</td><td></td><td></td><td></td><td></td><td>18%</td><td>61</td><td>11</td><td>50</td><td>3</td><td>36</td><td>2</td></tr><tr><td>488e475eeb3bb39a145f23ede197cd3620f1d98a</td><td>apis</td><td>APiS1.0</td><td><a href="papers/488e475eeb3bb39a145f23ede197cd3620f1d98a.html">Pedestrian Attribute Classification in Surveillance: Database and Evaluation</a></td><td><a href="http://www.cv-foundation.org/openaccess/content_iccv_workshops_2013/W10/papers/Zhu_Pedestrian_Attribute_Classification_2013_ICCV_paper.pdf">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision Workshops</td><td></td><td></td><td></td><td></td><td>38%</td><td>26</td><td>10</td><td>16</td><td>1</td><td>13</td><td>2</td></tr><tr><td>53ae38a6bb2b21b42bac4f0c4c8ed1f9fa02f9d4</td><td>bp4d_plus</td><td>BP4D+</td><td><a href="papers/53ae38a6bb2b21b42bac4f0c4c8ed1f9fa02f9d4.html">Multimodal Spontaneous Emotion Corpus for Human Behavior Analysis</a></td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Zhang_Multimodal_Spontaneous_Emotion_CVPR_2016_paper.pdf">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>25%</td><td>40</td><td>10</td><td>30</td><td>0</td><td>20</td><td>6</td></tr><tr><td>298cbc3dfbbb3a20af4eed97906650a4ea1c29e0</td><td>ferplus</td><td>FER+</td><td><a href="papers/298cbc3dfbbb3a20af4eed97906650a4ea1c29e0.html">Training deep networks for facial expression recognition with crowd-sourced label distribution</a></td><td><a href="http://arxiv.org/pdf/1608.01041v1.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>34%</td><td>29</td><td>10</td><td>19</td><td>0</td><td>15</td><td>3</td></tr><tr><td>488e475eeb3bb39a145f23ede197cd3620f1d98a</td><td>svs</td><td>SVS</td><td><a href="papers/488e475eeb3bb39a145f23ede197cd3620f1d98a.html">Pedestrian Attribute Classification in Surveillance: Database and Evaluation</a></td><td><a href="http://www.cv-foundation.org/openaccess/content_iccv_workshops_2013/W10/papers/Zhu_Pedestrian_Attribute_Classification_2013_ICCV_paper.pdf">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision Workshops</td><td></td><td></td><td></td><td></td><td>38%</td><td>26</td><td>10</td><td>16</td><td>1</td><td>13</td><td>2</td></tr><tr><td>5753b2b5e442eaa3be066daa4a2ca8d8a0bb1725</td><td>50_people_one_question</td><td>50 People One Question</td><td><a href="papers/5753b2b5e442eaa3be066daa4a2ca8d8a0bb1725.html">Merging Pose Estimates Across Space and Time</a></td><td><a href="http://pdfs.semanticscholar.org/63b2/f5348af0f969dfc2afb4977732393c6459ec.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>15</td><td>9</td><td>6</td><td>0</td><td>11</td><td>2</td></tr><tr><td>6dcf418c778f528b5792104760f1fbfe90c6dd6a</td><td>agedb</td><td>AgeDB</td><td><a href="papers/6dcf418c778f528b5792104760f1fbfe90c6dd6a.html">AgeDB: The First Manually Collected, In-the-Wild Age Database</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014984', 'linkType': 'ieee'}">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td></td><td></td><td></td><td></td><td>82%</td><td>11</td><td>9</td><td>2</td><td>0</td><td>10</td><td>0</td></tr><tr><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td><td>casablanca</td><td>Casablanca</td><td><a href="papers/0ceda9dae8b9f322df65ca2ef02caca9758aec6f.html">Context-Aware CNNs for Person Head Detection</a></td><td><a href="http://arxiv.org/pdf/1511.07917v1.pdf">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td>33%</td><td>27</td><td>9</td><td>18</td><td>1</td><td>22</td><td>0</td></tr><tr><td>4fefd1bc8dc4e0ab37ee3324ddfa43ad9d6a04a7</td><td>deep_fashion</td><td>DeepFashion</td><td><a href="papers/4fefd1bc8dc4e0ab37ee3324ddfa43ad9d6a04a7.html">Fashion Landmark Detection in the Wild</a></td><td><a href="http://pdfs.semanticscholar.org/d8ca/e259c1c5bba0c096f480dc7322bbaebfac1a.pdf">[pdf]</a></td><td></td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>39%</td><td>23</td><td>9</td><td>8</td><td>0</td><td>15</td><td>0</td></tr><tr><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td><td>hollywood_headset</td><td>HollywoodHeads</td><td><a href="papers/0ceda9dae8b9f322df65ca2ef02caca9758aec6f.html">Context-Aware CNNs for Person Head Detection</a></td><td><a href="http://arxiv.org/pdf/1511.07917v1.pdf">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td>33%</td><td>27</td><td>9</td><td>18</td><td>1</td><td>22</td><td>0</td></tr><tr><td>faf40ce28857aedf183e193486f5b4b0a8c478a2</td><td>iit_dehli_ear</td><td>IIT Dehli Ear</td><td><a href="papers/faf40ce28857aedf183e193486f5b4b0a8c478a2.html">Automated Human Identification Using Ear Imaging</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/faf4/0ce28857aedf183e193486f5b4b0a8c478a2.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>13%</td><td>70</td><td>9</td><td>61</td><td>6</td><td>28</td><td>1</td></tr><tr><td>ca3e88d87e1344d076c964ea89d91a75c417f5ee</td><td>imfdb</td><td>IMFDB</td><td><a href="papers/ca3e88d87e1344d076c964ea89d91a75c417f5ee.html">Indian Movie Face Database: A benchmark for face recognition under wide variations</a></td><td><span class="gray">[pdf]</a></td><td>2013 Fourth National Conference on Computer Vision, Pattern Recognition, Image Processing and Graphics (NCVPRIPG)</td><td></td><td></td><td></td><td></td><td>60%</td><td>15</td><td>9</td><td>6</td><td>0</td><td>10</td><td>4</td></tr><tr><td>4793f11fbca4a7dba898b9fff68f70d868e2497c</td><td>kin_face</td><td>UB KinFace</td><td><a href="papers/4793f11fbca4a7dba898b9fff68f70d868e2497c.html">Kinship Verification through Transfer Learning</a></td><td><a href="http://pdfs.semanticscholar.org/4793/f11fbca4a7dba898b9fff68f70d868e2497c.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>14%</td><td>66</td><td>9</td><td>39</td><td>2</td><td>18</td><td>5</td></tr><tr><td>7ace44190729927e5cb0dd5d363fcae966fe13f7</td><td>nudedetection</td><td>Nude Detection</td><td><a href="papers/7ace44190729927e5cb0dd5d363fcae966fe13f7.html">A bag-of-features approach based on Hue-SIFT descriptor for nude detection</a></td><td><a href="http://ieeexplore.ieee.org/document/7077625/">[pdf]</a></td><td>2009 17th European Signal Processing Conference</td><td></td><td></td><td></td><td></td><td>18%</td><td>51</td><td>9</td><td>42</td><td>1</td><td>18</td><td>0</td></tr><tr><td>4e6ee936eb50dd032f7138702fa39b7c18ee8907</td><td>dartmouth_children</td><td>Dartmouth Children</td><td><a href="papers/4e6ee936eb50dd032f7138702fa39b7c18ee8907.html">The Dartmouth Database of Children’s Faces: Acquisition and Validation of a New Face Stimulus Set</a></td><td><a href="http://pdfs.semanticscholar.org/4e6e/e936eb50dd032f7138702fa39b7c18ee8907.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>40%</td><td>20</td><td>8</td><td>12</td><td>2</td><td>16</td><td>0</td></tr><tr><td>fd8168f1c50de85bac58a8d328df0a50248b16ae</td><td>nd_2006</td><td>ND-2006</td><td><a href="papers/fd8168f1c50de85bac58a8d328df0a50248b16ae.html">Using a Multi-Instance Enrollment Representation to Improve 3D Face Recognition</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4401928', 'linkType': 'ieee'}">[pdf]</a></td><td>2007 First IEEE International Conference on Biometrics: Theory, Applications, and Systems</td><td></td><td></td><td></td><td></td><td>25%</td><td>32</td><td>8</td><td>24</td><td>3</td><td>16</td><td>1</td></tr><tr><td>774cbb45968607a027ae4729077734db000a1ec5</td><td>urban_tribes</td><td>Urban Tribes</td><td><a href="papers/774cbb45968607a027ae4729077734db000a1ec5.html">From Bikers to Surfers: Visual Recognition of Urban Tribes</a></td><td><a href="http://pdfs.semanticscholar.org/774c/bb45968607a027ae4729077734db000a1ec5.pdf">[pdf]</a></td><td></td><td>edu</td><td>Columbia University</td><td>40.84198360</td><td>-73.94368971</td><td>47%</td><td>17</td><td>8</td><td>9</td><td>1</td><td>12</td><td>1</td></tr><tr><td>2624d84503bc2f8e190e061c5480b6aa4d89277a</td><td>afew_va</td><td>AFEW-VA</td><td><a href="papers/2624d84503bc2f8e190e061c5480b6aa4d89277a.html">AFEW-VA database for valence and arousal estimation in-the-wild</a></td><td><a href="http://pdfs.semanticscholar.org/2624/d84503bc2f8e190e061c5480b6aa4d89277a.pdf">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td>47%</td><td>15</td><td>7</td><td>8</td><td>1</td><td>10</td><td>3</td></tr><tr><td>84fe5b4ac805af63206012d29523a1e033bc827e</td><td>awe_ears</td><td>AWE Ears</td><td><a href="papers/84fe5b4ac805af63206012d29523a1e033bc827e.html">Ear recognition: More than a survey</a></td><td><a href="http://pdfs.semanticscholar.org/84fe/5b4ac805af63206012d29523a1e033bc827e.pdf">[pdf]</a></td><td>Neurocomputing</td><td></td><td></td><td></td><td></td><td>29%</td><td>24</td><td>7</td><td>17</td><td>0</td><td>11</td><td>0</td></tr><tr><td>5801690199c1917fa58c35c3dead177c0b8f9f2d</td><td>camel</td><td>CAMEL</td><td><a href="papers/5801690199c1917fa58c35c3dead177c0b8f9f2d.html">Application of Object Based Classification and High Resolution Satellite Imagery for Savanna Ecosystem Analysis</a></td><td><a href="http://pdfs.semanticscholar.org/5801/690199c1917fa58c35c3dead177c0b8f9f2d.pdf">[pdf]</a></td><td>Remote Sensing</td><td></td><td></td><td></td><td></td><td>37%</td><td>19</td><td>7</td><td>12</td><td>1</td><td>16</td><td>0</td></tr><tr><td>0297448f3ed948e136bb06ceff10eccb34e5bb77</td><td>ilids_mcts</td><td></td><td><a href="papers/0297448f3ed948e136bb06ceff10eccb34e5bb77.html">Imagery Library for Intelligent Detection Systems (i-LIDS); A Standard for Testing Video Based Detection Systems</a></td><td><span class="gray">[pdf]</a></td><td>Proceedings 40th Annual 2006 International Carnahan Conference on Security Technology</td><td></td><td></td><td></td><td></td><td>22%</td><td>32</td><td>7</td><td>25</td><td>2</td><td>17</td><td>0</td></tr><tr><td>ec792ad2433b6579f2566c932ee414111e194537</td><td>msmt_17</td><td>MSMT17</td><td><a href="papers/ec792ad2433b6579f2566c932ee414111e194537.html">Person Transfer GAN to Bridge Domain Gap for Person Re-Identification</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1711.08565.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>50%</td><td>14</td><td>7</td><td>7</td><td>1</td><td>11</td><td>0</td></tr><tr><td>b92a1ed9622b8268ae3ac9090e25789fc41cc9b8</td><td>pornodb</td><td>Pornography DB</td><td><a href="papers/b92a1ed9622b8268ae3ac9090e25789fc41cc9b8.html">Pooling in image representation: The visual codeword point of view</a></td><td><a href="http://pdfs.semanticscholar.org/b92a/1ed9622b8268ae3ac9090e25789fc41cc9b8.pdf">[pdf]</a></td><td>Computer Vision and Image Understanding</td><td></td><td></td><td></td><td></td><td>9%</td><td>77</td><td>7</td><td>70</td><td>7</td><td>43</td><td>2</td></tr><tr><td>19d1b811df60f86cbd5e04a094b07f32fff7a32a</td><td>york_3d</td><td>UOY 3D Face Database</td><td><a href="papers/19d1b811df60f86cbd5e04a094b07f32fff7a32a.html">Three-dimensional face recognition: an eigensurface approach</a></td><td><a href="http://www-users.cs.york.ac.uk/~nep/research/3Dface/tomh/3DFaceRecognition-Eigensurface-ICIP(web)2.pdf">[pdf]</a></td><td>2004 International Conference on Image Processing, 2004. ICIP '04.</td><td></td><td></td><td></td><td></td><td>19%</td><td>36</td><td>7</td><td>29</td><td>4</td><td>25</td><td>1</td></tr><tr><td>2a171f8d14b6b8735001a11c217af9587d095848</td><td>expw</td><td>ExpW</td><td><a href="papers/2a171f8d14b6b8735001a11c217af9587d095848.html">Learning Social Relation Traits from Face Images</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.414">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>30%</td><td>20</td><td>6</td><td>14</td><td>5</td><td>15</td><td>0</td></tr><tr><td>25474c21613607f6bb7687a281d5f9d4ffa1f9f3</td><td>faceplace</td><td>Face Place</td><td><a href="papers/25474c21613607f6bb7687a281d5f9d4ffa1f9f3.html">Recognizing disguised faces</a></td><td><a href="http://pdfs.semanticscholar.org/d936/7ceb0be378c3a9ddf7cb741c678c1a3c574c.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>25%</td><td>24</td><td>6</td><td>18</td><td>0</td><td>16</td><td>1</td></tr><tr><td>0cb2dd5f178e3a297a0c33068961018659d0f443</td><td>ijb_b</td><td>IJB-B</td><td><a href="papers/0cb2dd5f178e3a297a0c33068961018659d0f443.html">IARPA Janus Benchmark-B Face Dataset</a></td><td><a href="http://www.vislab.ucr.edu/Biometrics2017/program_slides/Noblis_CVPRW_IJBB.pdf">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td></td><td></td><td></td><td></td><td>24%</td><td>25</td><td>6</td><td>19</td><td>6</td><td>21</td><td>3</td></tr><tr><td>bd26dabab576adb6af30484183c9c9c8379bf2e0</td><td>scut_fbp</td><td>SCUT-FBP</td><td><a href="papers/bd26dabab576adb6af30484183c9c9c8379bf2e0.html">SCUT-FBP: A Benchmark Dataset for Facial Beauty Perception</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1511.02459.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2015 IEEE International Conference on Systems, Man, and Cybernetics</td><td>edu</td><td>South China University of Technology</td><td>23.05020420</td><td>113.39880323</td><td>43%</td><td>14</td><td>6</td><td>8</td><td>3</td><td>5</td><td>7</td></tr><tr><td>2a171f8d14b6b8735001a11c217af9587d095848</td><td>social_relation</td><td>Social Relation</td><td><a href="papers/2a171f8d14b6b8735001a11c217af9587d095848.html">Learning Social Relation Traits from Face Images</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.414">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>30%</td><td>20</td><td>6</td><td>14</td><td>5</td><td>15</td><td>0</td></tr><tr><td>4156b7e88f2e0ab0a7c095b9bab199ae2b23bd06</td><td>distance_nighttime</td><td>Long Distance Heterogeneous Face</td><td><a href="papers/4156b7e88f2e0ab0a7c095b9bab199ae2b23bd06.html">Nighttime Face Recognition at Long Distance: Cross-Distance and Cross-Spectral Matching</a></td><td><a href="http://pdfs.semanticscholar.org/4156/b7e88f2e0ab0a7c095b9bab199ae2b23bd06.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>24%</td><td>21</td><td>5</td><td>16</td><td>3</td><td>11</td><td>1</td></tr><tr><td>2f43b614607163abf41dfe5d17ef6749a1b61304</td><td>hrt_transgender</td><td>HRT Transgender</td><td><a href="papers/2f43b614607163abf41dfe5d17ef6749a1b61304.html">Investigating the Periocular-Based Face Recognition Across Gender Transformation</a></td><td><a href="https://doi.org/10.1109/TIFS.2014.2361479">[pdf]</a></td><td>IEEE Transactions on Information Forensics and Security</td><td>edu</td><td>University of North Carolina at Wilmington</td><td>34.22498270</td><td>-77.86907744</td><td>38%</td><td>13</td><td>5</td><td>3</td><td>0</td><td>3</td><td>3</td></tr><tr><td>c570d1247e337f91e555c3be0e8c8a5aba539d9f</td><td>mcgill</td><td>McGill Real World</td><td><a href="papers/c570d1247e337f91e555c3be0e8c8a5aba539d9f.html">Robust semi-automatic head pose labeling for real-world face video sequences</a></td><td><a href="https://doi.org/10.1007/s11042-012-1352-1">[pdf]</a></td><td>Multimedia Tools and Applications</td><td>edu</td><td>McGill University</td><td>45.50397610</td><td>-73.57496870</td><td>28%</td><td>18</td><td>5</td><td>6</td><td>0</td><td>7</td><td>4</td></tr><tr><td>6f3c76b7c0bd8e1d122c6ea808a271fd4749c951</td><td>ward</td><td>WARD</td><td><a href="papers/6f3c76b7c0bd8e1d122c6ea808a271fd4749c951.html">Re-identify people in wide area camera network</a></td><td><a href="https://doi.org/10.1109/CVPRW.2012.6239203">[pdf]</a></td><td>2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops</td><td></td><td></td><td></td><td></td><td>9%</td><td>55</td><td>5</td><td>50</td><td>2</td><td>35</td><td>0</td></tr><tr><td>6403117f9c005ae81f1e8e6d1302f4a045e3d99d</td><td>alert_airport</td><td>ALERT Airport</td><td><a href="papers/6403117f9c005ae81f1e8e6d1302f4a045e3d99d.html">A Systematic Evaluation and Benchmark for Person Re-Identification: Features, Metrics, and Datasets.</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1605.09653.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>IEEE transactions on pattern analysis and machine intelligence</td><td></td><td></td><td></td><td></td><td>27%</td><td>15</td><td>4</td><td>11</td><td>1</td><td>10</td><td>0</td></tr><tr><td>014b8df0180f33b9fea98f34ae611c6447d761d2</td><td>buhmap_db</td><td>BUHMAP-DB </td><td><a href="papers/014b8df0180f33b9fea98f34ae611c6447d761d2.html">Facial feature tracking and expression recognition for sign language</a></td><td><a href="http://www.cmpe.boun.edu.tr/pilab/pilabfiles/databases/buhmap/files/ari2008facialfeaturetracking.pdf">[pdf]</a></td><td>2008 23rd International Symposium on Computer and Information Sciences</td><td></td><td></td><td></td><td></td><td>16%</td><td>25</td><td>4</td><td>21</td><td>1</td><td>10</td><td>2</td></tr><tr><td>57fe081950f21ca03b5b375ae3e84b399c015861</td><td>cvc_01_barcelona</td><td>CVC-01</td><td><a href="papers/57fe081950f21ca03b5b375ae3e84b399c015861.html">Adaptive Image Sampling and Windows Classification for On–board Pedestrian Detection</a></td><td><a href="http://pdfs.semanticscholar.org/57fe/081950f21ca03b5b375ae3e84b399c015861.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>9%</td><td>44</td><td>4</td><td>40</td><td>1</td><td>21</td><td>0</td></tr><tr><td>22f656d0f8426c84a33a267977f511f127bfd7f3</td><td>expw</td><td>ExpW</td><td><a href="papers/22f656d0f8426c84a33a267977f511f127bfd7f3.html">From Facial Expression Recognition to Interpersonal Relation Prediction</a></td><td><a href="http://arxiv.org/abs/1609.06426">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td>44%</td><td>9</td><td>4</td><td>5</td><td>0</td><td>5</td><td>1</td></tr><tr><td>b71d1aa90dcbe3638888725314c0d56640c1fef1</td><td>ifdb</td><td>IFDB</td><td><a href="papers/b71d1aa90dcbe3638888725314c0d56640c1fef1.html">Iranian Face Database with age, pose and expression</a></td><td><span class="gray">[pdf]</a></td><td>2007 International Conference on Machine Vision</td><td></td><td></td><td></td><td></td><td>20%</td><td>20</td><td>4</td><td>16</td><td>2</td><td>11</td><td>3</td></tr><tr><td>2eb84aaba316b095d4bb51da1a3e4365bbf9ab1d</td><td>kin_face</td><td>UB KinFace</td><td><a href="papers/2eb84aaba316b095d4bb51da1a3e4365bbf9ab1d.html">Genealogical face recognition based on UB KinFace database</a></td><td><a href="https://doi.org/10.1109/CVPRW.2011.5981801">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td>edu</td><td>SUNY Buffalo</td><td>42.93362780</td><td>-78.88394479</td><td>13%</td><td>30</td><td>4</td><td>26</td><td>1</td><td>9</td><td>5</td></tr><tr><td>e27ef52c641c2b5100a1b34fd0b819e84a31b4df</td><td>sarc3d</td><td>Sarc3D</td><td><a href="papers/e27ef52c641c2b5100a1b34fd0b819e84a31b4df.html">SARC3D: A New 3D Body Model for People Tracking and Re-identification</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/e27e/f52c641c2b5100a1b34fd0b819e84a31b4df.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>14%</td><td>29</td><td>4</td><td>25</td><td>3</td><td>17</td><td>0</td></tr><tr><td>22f656d0f8426c84a33a267977f511f127bfd7f3</td><td>social_relation</td><td>Social Relation</td><td><a href="papers/22f656d0f8426c84a33a267977f511f127bfd7f3.html">From Facial Expression Recognition to Interpersonal Relation Prediction</a></td><td><a href="http://arxiv.org/abs/1609.06426">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td>44%</td><td>9</td><td>4</td><td>5</td><td>0</td><td>5</td><td>1</td></tr><tr><td>1a40092b493c6b8840257ab7f96051d1a4dbfeb2</td><td>sports_videos_in_the_wild</td><td>SVW</td><td><a href="papers/1a40092b493c6b8840257ab7f96051d1a4dbfeb2.html">Sports Videos in the Wild (SVW): A video dataset for sports analysis</a></td><td><a href="http://web.cse.msu.edu/~liuxm/publication/Safdarnejad_Liu_Udpa_Andrus_Wood_Craven_FG2015.pdf">[pdf]</a></td><td>2015 11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)</td><td>edu</td><td>Michigan State University</td><td>42.71856800</td><td>-84.47791571</td><td>67%</td><td>6</td><td>4</td><td>2</td><td>1</td><td>5</td><td>0</td></tr><tr><td>7ebb153704706e457ab57b432793d2b6e5d12592</td><td>vgg_celebs_in_places</td><td>CIP</td><td><a href="papers/7ebb153704706e457ab57b432793d2b6e5d12592.html">Faces in Places: compound query retrieval</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/7ebb/153704706e457ab57b432793d2b6e5d12592.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td>edu</td><td>University of Oxford</td><td>51.75345380</td><td>-1.25400997</td><td>80%</td><td>5</td><td>4</td><td>1</td><td>0</td><td>4</td><td>0</td></tr><tr><td>8d5998cd984e7cce307da7d46f155f9db99c6590</td><td>chalearn</td><td>ChaLearn</td><td><a href="papers/8d5998cd984e7cce307da7d46f155f9db99c6590.html">ChaLearn looking at people: A review of events and resources</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1701.02664.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2017 International Joint Conference on Neural Networks (IJCNN)</td><td></td><td></td><td></td><td></td><td>30%</td><td>10</td><td>3</td><td>7</td><td>1</td><td>6</td><td>0</td></tr><tr><td>a5acda0e8c0937bfed013e6382da127103e41395</td><td>disfa</td><td>DISFA</td><td><a href="papers/a5acda0e8c0937bfed013e6382da127103e41395.html">Extended DISFA Dataset: Investigating Posed and Spontaneous Facial Expressions</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7789672', 'linkType': 'ieee'}">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td></td><td></td><td></td><td></td><td>38%</td><td>8</td><td>3</td><td>5</td><td>1</td><td>5</td><td>0</td></tr><tr><td>137aa2f891d474fce1e7a1d1e9b3aefe21e22b34</td><td>hrt_transgender</td><td>HRT Transgender</td><td><a href="papers/137aa2f891d474fce1e7a1d1e9b3aefe21e22b34.html">Is the eye region more reliable than the face? A preliminary study of face-based recognition on a transgender dataset</a></td><td><a href="https://doi.org/10.1109/BTAS.2013.6712710">[pdf]</a></td><td>2013 IEEE Sixth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td>edu</td><td>University of North Carolina Wilmington</td><td>34.23755810</td><td>-77.92701290</td><td>43%</td><td>7</td><td>3</td><td>4</td><td>1</td><td>2</td><td>3</td></tr><tr><td>57178b36c21fd7f4529ac6748614bb3374714e91</td><td>ijb_c</td><td>IJB-C</td><td><a href="papers/57178b36c21fd7f4529ac6748614bb3374714e91.html">IARPA Janus Benchmark - C: Face Dataset and Protocol</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411217', 'linkType': 'ieee'}">[pdf]</a></td><td>2018 International Conference on Biometrics (ICB)</td><td></td><td></td><td></td><td></td><td>33%</td><td>9</td><td>3</td><td>6</td><td>2</td><td>9</td><td>0</td></tr><tr><td>07fcbae86f7a3ad3ea1cf95178459ee9eaf77cb1</td><td>uccs</td><td>UCCS</td><td><a href="papers/07fcbae86f7a3ad3ea1cf95178459ee9eaf77cb1.html">Large scale unconstrained open set face database</a></td><td><a href="http://www.vast.uccs.edu/~tboult/PAPERS/BTAS13-Sapkota-Boult-UCCSFaceDB.pdf">[pdf]</a></td><td>2013 IEEE Sixth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td>edu</td><td>University of Colorado at Colorado Springs</td><td>38.89646790</td><td>-104.80505940</td><td>60%</td><td>5</td><td>3</td><td>2</td><td>0</td><td>3</td><td>0</td></tr><tr><td>8627f019882b024aef92e4eb9355c499c733e5b7</td><td>used</td><td>USED Social Event Dataset</td><td><a href="papers/8627f019882b024aef92e4eb9355c499c733e5b7.html">USED: a large-scale social event detection dataset</a></td><td><a href="http://doi.acm.org/10.1145/2910017.2910624">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>7</td><td>3</td><td>4</td><td>0</td><td>3</td><td>2</td></tr><tr><td>4563b46d42079242f06567b3f2e2f7a80cb3befe</td><td>vadana</td><td>VADANA</td><td><a href="papers/4563b46d42079242f06567b3f2e2f7a80cb3befe.html">VADANA: A dense dataset for facial image analysis</a></td><td><a href="http://vims.cis.udel.edu/publications/VADANA_BeFIT2011.pdf">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td>19%</td><td>16</td><td>3</td><td>13</td><td>0</td><td>6</td><td>6</td></tr><tr><td>56ae6d94fc6097ec4ca861f0daa87941d1c10b70</td><td>cmdp</td><td>CMDP</td><td><a href="papers/56ae6d94fc6097ec4ca861f0daa87941d1c10b70.html">Distance Estimation of an Unknown Person from a Portrait</a></td><td><a href="http://pdfs.semanticscholar.org/56ae/6d94fc6097ec4ca861f0daa87941d1c10b70.pdf">[pdf]</a></td><td></td><td>edu</td><td>California Institute of Technology</td><td>34.13710185</td><td>-118.12527487</td><td>22%</td><td>9</td><td>2</td><td>7</td><td>0</td><td>6</td><td>1</td></tr><tr><td>670637d0303a863c1548d5b19f705860a23e285c</td><td>face_tracer</td><td>FaceTracer</td><td><a href="papers/670637d0303a863c1548d5b19f705860a23e285c.html">Face swapping: automatically replacing faces in photographs</a></td><td><a href="https://classes.cs.uoregon.edu/16F/cis607photo/faces.pdf">[pdf]</a></td><td>ACM Trans. Graph.</td><td>edu</td><td>Columbia University</td><td>40.84198360</td><td>-73.94368971</td><td>100%</td><td>2</td><td>2</td><td>0</td><td>0</td><td>1</td><td>1</td></tr><tr><td>dd65f71dac86e36eecbd3ed225d016c3336b4a13</td><td>families_in_the_wild</td><td>FIW</td><td><a href="papers/dd65f71dac86e36eecbd3ed225d016c3336b4a13.html">Visual Kinship Recognition of Families in the Wild</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8337841', 'linkType': 'ieee'}">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>67%</td><td>3</td><td>2</td><td>1</td><td>0</td><td>2</td><td>0</td></tr><tr><td>6dbe8e5121c534339d6e41f8683e85f87e6abf81</td><td>gallagher</td><td>Gallagher</td><td><a href="papers/6dbe8e5121c534339d6e41f8683e85f87e6abf81.html">Clothing Cosegmentation for Shopping Images With Cluttered Background</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7423747', 'linkType': 'ieee'}">[pdf]</a></td><td>IEEE Transactions on Multimedia</td><td></td><td></td><td></td><td></td><td>33%</td><td>6</td><td>2</td><td>4</td><td>0</td><td>3</td><td>2</td></tr><tr><td>8f02ec0be21461fbcedf51d864f944cfc42c875f</td><td>hda_plus</td><td>HDA+</td><td><a href="papers/8f02ec0be21461fbcedf51d864f944cfc42c875f.html">The HDA+ Data Set for Research on Fully Automated Re-identification Systems</a></td><td><a href="http://pdfs.semanticscholar.org/8f02/ec0be21461fbcedf51d864f944cfc42c875f.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>12%</td><td>17</td><td>2</td><td>15</td><td>2</td><td>11</td><td>0</td></tr><tr><td>99eb4cea0d9bc9fe777a5c5172f8638a37a7f262</td><td>ilids_vid_reid</td><td>iLIDS-VID</td><td><a href="papers/99eb4cea0d9bc9fe777a5c5172f8638a37a7f262.html">Person Re-identification by Exploiting Spatio-Temporal Cues and Multi-view Metric Learning</a></td><td><a href="https://doi.org/10.1109/LSP.2016.2574323">[pdf]</a></td><td>IEEE Signal Processing Letters</td><td></td><td></td><td></td><td></td><td>29%</td><td>7</td><td>2</td><td>5</td><td>0</td><td>4</td><td>0</td></tr><tr><td>0d2dd4fc016cb6a517d8fb43a7cc3ff62964832e</td><td>lag</td><td>LAG</td><td><a href="papers/0d2dd4fc016cb6a517d8fb43a7cc3ff62964832e.html">Large Age-Gap face verification by feature injection in deep networks</a></td><td><a href="http://pdfs.semanticscholar.org/0d2d/d4fc016cb6a517d8fb43a7cc3ff62964832e.pdf">[pdf]</a></td><td>Pattern Recognition Letters</td><td></td><td></td><td></td><td></td><td>29%</td><td>7</td><td>2</td><td>5</td><td>0</td><td>3</td><td>2</td></tr><tr><td>a7fe834a0af614ce6b50dc093132b031dd9a856b</td><td>market1203</td><td>Market 1203</td><td><a href="papers/a7fe834a0af614ce6b50dc093132b031dd9a856b.html">Orientation Driven Bag of Appearances for Person Re-identification</a></td><td><a href="http://pdfs.semanticscholar.org/a7fe/834a0af614ce6b50dc093132b031dd9a856b.pdf">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>29%</td><td>7</td><td>2</td><td>5</td><td>0</td><td>3</td><td>0</td></tr><tr><td>ad01687649d95cd5b56d7399a9603c4b8e2217d7</td><td>mrp_drone</td><td>MRP Drone</td><td><a href="papers/ad01687649d95cd5b56d7399a9603c4b8e2217d7.html">Investigating Open-World Person Re-identi cation Using a Drone</a></td><td><a href="http://pdfs.semanticscholar.org/ad01/687649d95cd5b56d7399a9603c4b8e2217d7.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>40%</td><td>5</td><td>2</td><td>3</td><td>0</td><td>3</td><td>0</td></tr><tr><td>a7fe834a0af614ce6b50dc093132b031dd9a856b</td><td>pku_reid</td><td>PKU-Reid</td><td><a href="papers/a7fe834a0af614ce6b50dc093132b031dd9a856b.html">Orientation Driven Bag of Appearances for Person Re-identification</a></td><td><a href="http://pdfs.semanticscholar.org/a7fe/834a0af614ce6b50dc093132b031dd9a856b.pdf">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>29%</td><td>7</td><td>2</td><td>5</td><td>0</td><td>3</td><td>0</td></tr><tr><td>041d3eedf5e45ce5c5229f0181c5c576ed1fafd6</td><td>ucf_selfie</td><td>UCF Selfie</td><td><a href="papers/041d3eedf5e45ce5c5229f0181c5c576ed1fafd6.html">How to Take a Good Selfie?</a></td><td><a href="http://doi.acm.org/10.1145/2733373.2806365">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>22%</td><td>9</td><td>2</td><td>7</td><td>0</td><td>5</td><td>0</td></tr><tr><td>4b4106614c1d553365bad75d7866bff0de6056ed</td><td>czech_news_agency</td><td>UFI</td><td><a href="papers/4b4106614c1d553365bad75d7866bff0de6056ed.html">Unconstrained Facial Images: Database for Face Recognition Under Real-World Conditions</a></td><td><a href="http://pdfs.semanticscholar.org/4b41/06614c1d553365bad75d7866bff0de6056ed.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>10%</td><td>10</td><td>1</td><td>9</td><td>0</td><td>4</td><td>2</td></tr><tr><td>563c940054e4b456661762c1ab858e6f730c3159</td><td>data_61</td><td>Data61 Pedestrian</td><td><a href="papers/563c940054e4b456661762c1ab858e6f730c3159.html">A Multi-modal Graphical Model for Scene Analysis</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/WACV.2015.139">[pdf]</a></td><td>2015 IEEE Winter Conference on Applications of Computer Vision</td><td></td><td></td><td></td><td></td><td>12%</td><td>8</td><td>1</td><td>7</td><td>0</td><td>5</td><td>0</td></tr><tr><td>c6526dd3060d63a6c90e8b7ff340383c4e0e0dd8</td><td>face_research_lab</td><td>Face Research Lab London</td><td><a href="papers/c6526dd3060d63a6c90e8b7ff340383c4e0e0dd8.html">Anxiety promotes memory for mood-congruent faces but does not alter loss aversion.</a></td><td><a href="http://pdfs.semanticscholar.org/c652/6dd3060d63a6c90e8b7ff340383c4e0e0dd8.pdf">[pdf]</a></td><td>Scientific reports</td><td>edu</td><td>University College London</td><td>51.52316070</td><td>-0.12820370</td><td>25%</td><td>4</td><td>1</td><td>3</td><td>0</td><td>2</td><td>2</td></tr><tr><td>17b46e2dad927836c689d6787ddb3387c6159ece</td><td>geofaces</td><td>GeoFaces</td><td><a href="papers/17b46e2dad927836c689d6787ddb3387c6159ece.html">GeoFaceExplorer: exploring the geo-dependence of facial attributes</a></td><td><a href="http://doi.acm.org/10.1145/2676440.2676443">[pdf]</a></td><td></td><td>edu</td><td>University of Kentucky</td><td>38.03337420</td><td>-84.50177580</td><td>50%</td><td>2</td><td>1</td><td>1</td><td>0</td><td>1</td><td>0</td></tr><tr><td>55c40cbcf49a0225e72d911d762c27bb1c2d14aa</td><td>ifad</td><td>IFAD</td><td><a href="papers/55c40cbcf49a0225e72d911d762c27bb1c2d14aa.html">Indian Face Age Database : A Database for Face Recognition with Age Variation</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/55c4/0cbcf49a0225e72d911d762c27bb1c2d14aa.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>50%</td><td>2</td><td>1</td><td>1</td><td>0</td><td>2</td><td>0</td></tr><tr><td>e3e44385a71a52fd483c58eb3cdf8d03960c0b70</td><td>mcgill</td><td>McGill Real World</td><td><a href="papers/e3e44385a71a52fd483c58eb3cdf8d03960c0b70.html">A Hierarchical Graphical Model for Recognizing Human Actions and Interactions in Video</a></td><td><a href="http://pdfs.semanticscholar.org/e3e4/4385a71a52fd483c58eb3cdf8d03960c0b70.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>2</td><td>1</td><td>1</td><td>0</td><td>1</td><td>0</td></tr><tr><td>d80a3d1f3a438e02a6685e66ee908446766fefa9</td><td>megaage</td><td>MegaAge</td><td><a href="papers/d80a3d1f3a438e02a6685e66ee908446766fefa9.html">Quantifying Facial Age by Posterior of Age Comparisons</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1708.09687.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>25%</td><td>4</td><td>1</td><td>3</td><td>1</td><td>4</td><td>0</td></tr><tr><td>23e824d1dfc33f3780dd18076284f07bd99f1c43</td><td>mifs</td><td>MIFS</td><td><a href="papers/23e824d1dfc33f3780dd18076284f07bd99f1c43.html">Spoofing faces using makeup: An investigative study</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7947686', 'linkType': 'ieee'}">[pdf]</a></td><td>2017 IEEE International Conference on Identity, Security and Behavior Analysis (ISBA)</td><td>edu</td><td>INRIA Méditerranée</td><td>43.61581310</td><td>7.06838000</td><td>20%</td><td>5</td><td>1</td><td>4</td><td>0</td><td>1</td><td>2</td></tr><tr><td>fb82681ac5d3487bd8e52dbb3d1fa220eeac855e</td><td>pilot_parliament</td><td>PPB</td><td><a href="papers/fb82681ac5d3487bd8e52dbb3d1fa220eeac855e.html">1 Network Notebook</a></td><td><a href="http://pdfs.semanticscholar.org/fb82/681ac5d3487bd8e52dbb3d1fa220eeac855e.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>9%</td><td>11</td><td>1</td><td>10</td><td>1</td><td>10</td><td>0</td></tr><tr><td>9e5378e7b336c89735d3bb15cf67eff96f86d39a</td><td>precarious</td><td>Precarious</td><td><a href="papers/9e5378e7b336c89735d3bb15cf67eff96f86d39a.html">Expecting the Unexpected: Training Detectors for Unusual Pedestrians with Adversarial Imposters</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1703.06283.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>8%</td><td>12</td><td>1</td><td>11</td><td>1</td><td>10</td><td>0</td></tr><tr><td>54983972aafc8e149259d913524581357b0f91c3</td><td>reseed</td><td>ReSEED</td><td><a href="papers/54983972aafc8e149259d913524581357b0f91c3.html">ReSEED: social event dEtection dataset</a></td><td><a href="https://pub.uni-bielefeld.de/download/2663466/2686734">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>17%</td><td>6</td><td>1</td><td>5</td><td>1</td><td>1</td><td>1</td></tr><tr><td>c9bda86e23cab9e4f15ea0c4cbb6cc02b9dfb709</td><td>stanford_drone</td><td>Stanford Drone</td><td><a href="papers/c9bda86e23cab9e4f15ea0c4cbb6cc02b9dfb709.html">Learning to predict human behaviour in crowded scenes</a></td><td><a href="http://pdfs.semanticscholar.org/c9bd/a86e23cab9e4f15ea0c4cbb6cc02b9dfb709.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>20%</td><td>5</td><td>1</td><td>4</td><td>1</td><td>5</td><td>0</td></tr><tr><td>9696ad8b164f5e10fcfe23aacf74bd6168aebb15</td><td>4dfab</td><td>4DFAB</td><td><a href="papers/9696ad8b164f5e10fcfe23aacf74bd6168aebb15.html">4DFAB: A Large Scale 4D Facial Expression Database for Biometric Applications</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1712.01443.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>0%</td><td>4</td><td>0</td><td>4</td><td>0</td><td>2</td><td>0</td></tr><tr><td>f152b6ee251cca940dd853c54e6a7b78fbc6b235</td><td>affectnet</td><td>AffectNet</td><td><a href="papers/f152b6ee251cca940dd853c54e6a7b78fbc6b235.html">Skybiometry and AffectNet on Facial Emotion Recognition Using Supervised Machine Learning Algorithms</a></td><td><a href="{'url': 'http://dl.acm.org/citation.cfm?id=3232665', 'linkType': 'acm'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>1ed1a49534ad8dd00f81939449f6389cfbc25321</td><td>bjut_3d</td><td>BJUT-3D</td><td><a href="papers/1ed1a49534ad8dd00f81939449f6389cfbc25321.html">A novel face recognition method based on 3D face model</a></td><td><a href="https://doi.org/10.1109/ROBIO.2007.4522202">[pdf]</a></td><td>2007 IEEE International Conference on Robotics and Biomimetics (ROBIO)</td><td></td><td></td><td></td><td></td><td>0%</td><td>2</td><td>0</td><td>2</td><td>0</td><td>1</td><td>1</td></tr><tr><td>65355cbb581a219bd7461d48b3afd115263ea760</td><td>complex_activities</td><td>Ongoing Complex Activities</td><td><a href="papers/65355cbb581a219bd7461d48b3afd115263ea760.html">Recognition of ongoing complex activities by sequence prediction over a hierarchical label space</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/WACV.2016.7477586">[pdf]</a></td><td>2016 IEEE Winter Conference on Applications of Computer Vision (WACV)</td><td></td><td></td><td></td><td></td><td>0%</td><td>2</td><td>0</td><td>2</td><td>0</td><td>2</td><td>0</td></tr><tr><td>f0e17f27f029db4ad650ff278fe3c10ecb6cb0c4</td><td>europersons</td><td>EuroCity Persons</td><td><a href="papers/f0e17f27f029db4ad650ff278fe3c10ecb6cb0c4.html">The EuroCity Persons Dataset: A Novel Benchmark for Object Detection</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1805.07193.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>0%</td><td>1</td><td>0</td><td>1</td><td>0</td><td>1</td><td>0</td></tr><tr><td>75da1df4ed319926c544eefe17ec8d720feef8c0</td><td>fddb</td><td>FDDB</td><td><a href="papers/75da1df4ed319926c544eefe17ec8d720feef8c0.html">FDDB: A Benchmark for Face Detection in Unconstrained Settings</a></td><td><a href="http://pdfs.semanticscholar.org/75da/1df4ed319926c544eefe17ec8d720feef8c0.pdf">[pdf]</a></td><td></td><td>edu</td><td>University of Massachusetts</td><td>42.38897850</td><td>-72.52869870</td><td>0%</td><td>1</td><td>0</td><td>1</td><td>0</td><td>0</td><td>0</td></tr><tr><td>b6b1b0632eb9d4ab1427278f5e5c46f97753c73d</td><td>fei</td><td>FEI</td><td><a href="papers/b6b1b0632eb9d4ab1427278f5e5c46f97753c73d.html">Generalização cartográfica automatizada para um banco de dados cadastral</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/b6b1/b0632eb9d4ab1427278f5e5c46f97753c73d.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>3dc3f0b64ef80f573e3a5f96e456e52ee980b877</td><td>georgia_tech_face_database</td><td>Georgia Tech Face</td><td><a href="papers/3dc3f0b64ef80f573e3a5f96e456e52ee980b877.html">Maximum Likelihood Training of the Embedded HMM for Face Detection and Recognition</a></td><td><a href="http://pdfs.semanticscholar.org/3dc3/f0b64ef80f573e3a5f96e456e52ee980b877.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>3</td><td>0</td><td>3</td><td>0</td><td>2</td><td>0</td></tr><tr><td>bd88bb2e4f351352d88ee7375af834360e223498</td><td>hda_plus</td><td>HDA+</td><td><a href="papers/bd88bb2e4f351352d88ee7375af834360e223498.html">A Multi - camera video data set for research on High - Definition surveillance</a></td><td><a href="http://pdfs.semanticscholar.org/bd88/bb2e4f351352d88ee7375af834360e223498.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>1</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>24830e3979d4ed01b9fd0feebf4a8fd22e0c35fd</td><td>hi4d_adsip</td><td>Hi4D-ADSIP</td><td><a href="papers/24830e3979d4ed01b9fd0feebf4a8fd22e0c35fd.html">High-resolution comprehensive 3-D dynamic database for facial articulation analysis</a></td><td><a href="http://www.researchgate.net/profile/Wei_Quan3/publication/221430048_High-resolution_comprehensive_3-D_dynamic_database_for_facial_articulation_analysis/links/0deec534309495805d000000.pdf">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td>0%</td><td>5</td><td>0</td><td>5</td><td>0</td><td>1</td><td>0</td></tr><tr><td>066d71fcd997033dce4ca58df924397dfe0b5fd1</td><td>ifdb</td><td>IFDB</td><td><a href="papers/066d71fcd997033dce4ca58df924397dfe0b5fd1.html">Iranian Face Database and Evaluation with a New Detection Algorithm</a></td><td><a href="http://pdfs.semanticscholar.org/066d/71fcd997033dce4ca58df924397dfe0b5fd1.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>ad62c6e17bc39b4dec20d32f6ac667ae42d2c118</td><td>jiku_mobile</td><td>Jiku Mobile Video Dataset</td><td><a href="papers/ad62c6e17bc39b4dec20d32f6ac667ae42d2c118.html">A Synchronization Ground Truth for the Jiku Mobile Video Dataset</a></td><td><a href="http://pdfs.semanticscholar.org/ad62/c6e17bc39b4dec20d32f6ac667ae42d2c118.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>1</td><td>0</td><td>1</td><td>0</td><td>0</td><td>0</td></tr><tr><td>079a0a3bf5200994e1f972b1b9197bf2f90e87d4</td><td>mit_cbcl</td><td>MIT CBCL</td><td><a href="papers/079a0a3bf5200994e1f972b1b9197bf2f90e87d4.html">Component-Based Face Recognition with 3D Morphable Models</a></td><td><a href="http://www.bheisele.com/avbpa2003.pdf">[pdf]</a></td><td>2004 Conference on Computer Vision and Pattern Recognition Workshop</td><td></td><td></td><td></td><td></td><td>0%</td><td>12</td><td>0</td><td>12</td><td>0</td><td>8</td><td>0</td></tr><tr><td>c06b13d0ec3f5c43e2782cd22542588e233733c3</td><td>nova_emotions</td><td>Novaemötions Dataset</td><td><a href="papers/c06b13d0ec3f5c43e2782cd22542588e233733c3.html">Crowdsourcing facial expressions for affective-interaction</a></td><td><a href="https://doi.org/10.1016/j.cviu.2016.02.001">[pdf]</a></td><td>Computer Vision and Image Understanding</td><td></td><td></td><td></td><td></td><td>0%</td><td>1</td><td>0</td><td>1</td><td>0</td><td>1</td><td>0</td></tr><tr><td>7f4040b482d16354d5938c1d1b926b544652bf5b</td><td>nova_emotions</td><td>Novaemötions Dataset</td><td><a href="papers/7f4040b482d16354d5938c1d1b926b544652bf5b.html">Competitive affective gaming: winning with a smile</a></td><td><a href="http://doi.acm.org/10.1145/2502081.2502115">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>8</td><td>0</td><td>3</td><td>0</td><td>2</td><td>0</td></tr><tr><td>22909dd19a0ec3b6065334cb5be5392cb24d839d</td><td>pets</td><td>PETS 2017</td><td><a href="papers/22909dd19a0ec3b6065334cb5be5392cb24d839d.html">PETS 2017: Dataset and Challenge</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014998', 'linkType': 'ieee'}">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td></td><td></td><td></td><td></td><td>0%</td><td>8</td><td>0</td><td>8</td><td>0</td><td>2</td><td>0</td></tr><tr><td>f6c8d5e35d7e4d60a0104f233ac1a3ab757da53f</td><td>pku</td><td>PKU</td><td><a href="papers/f6c8d5e35d7e4d60a0104f233ac1a3ab757da53f.html">Swiss-System Based Cascade Ranking for Gait-Based Person Re-Identification</a></td><td><a href="http://pdfs.semanticscholar.org/f6c8/d5e35d7e4d60a0104f233ac1a3ab757da53f.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>3</td><td>0</td><td>3</td><td>0</td><td>1</td><td>0</td></tr><tr><td>c866a2afc871910e3282fd9498dce4ab20f6a332</td><td>qmul_surv_face</td><td>QMUL-SurvFace</td><td><a href="papers/c866a2afc871910e3282fd9498dce4ab20f6a332.html">Surveillance Face Recognition Challenge</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1804.09691.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>f3b84a03985de3890b400b68e2a92c0a00afd9d0</td><td>scface</td><td>SCface</td><td><a href="papers/f3b84a03985de3890b400b68e2a92c0a00afd9d0.html">Large Variability Surveillance Camera Face Database</a></td><td><span class="gray">[pdf]</a></td><td>2015 Seventh International Conference on Computational Intelligence, Modelling and Simulation (CIMSim)</td><td></td><td></td><td></td><td></td><td>0%</td><td>1</td><td>0</td><td>1</td><td>0</td><td>0</td><td>0</td></tr><tr><td>d3f5a1848b0028d8ab51d0b0673732cad2e3c8c9</td><td>stair_actions</td><td>STAIR Action</td><td><a href="papers/d3f5a1848b0028d8ab51d0b0673732cad2e3c8c9.html">STAIR Actions: A Video Dataset of Everyday Home Actions</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1804.04326.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>377f2b65e6a9300448bdccf678cde59449ecd337</td><td>ufdd</td><td>UFDD</td><td><a href="papers/377f2b65e6a9300448bdccf678cde59449ecd337.html">Pushing the Limits of Unconstrained Face Detection: a Challenge Dataset and Baseline Results</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1804.10275.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td>edu</td><td>Johns Hopkins University</td><td>39.32905300</td><td>-76.61942500</td><td>0%</td><td>2</td><td>0</td><td>2</td><td>0</td><td>2</td><td>0</td></tr><tr><td>447d8893a4bdc29fa1214e53499ffe67b28a6db5</td><td>umd_faces</td><td>UMD</td><td><a href="papers/447d8893a4bdc29fa1214e53499ffe67b28a6db5.html">Electronic Transport in Quantum Confined Systems</a></td><td><a href="http://pdfs.semanticscholar.org/447d/8893a4bdc29fa1214e53499ffe67b28a6db5.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>1</td><td>0</td><td>1</td><td>0</td><td>1</td><td>0</td></tr><tr><td>922e0a51a3b8c67c4c6ac09a577ff674cbd28b34</td><td>v47</td><td>V47</td><td><a href="papers/922e0a51a3b8c67c4c6ac09a577ff674cbd28b34.html">Re-identification of pedestrians with variable occlusion and scale</a></td><td><a href="https://doi.org/10.1109/ICCVW.2011.6130477">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td>0%</td><td>10</td><td>0</td><td>10</td><td>2</td><td>6</td><td>0</td></tr><tr><td>9b9bf5e623cb8af7407d2d2d857bc3f1b531c182</td><td>who_goes_there</td><td>WGT</td><td><a href="papers/9b9bf5e623cb8af7407d2d2d857bc3f1b531c182.html">Who goes there?: approaches to mapping facial appearance diversity</a></td><td><a href="http://doi.acm.org/10.1145/2996913.2996997">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>77c81c13a110a341c140995bedb98101b9e84f7f</td><td>wildtrack</td><td>WildTrack</td><td><a href="papers/77c81c13a110a341c140995bedb98101b9e84f7f.html">WILDTRACK : A Multi-camera HD Dataset for Dense Unscripted Pedestrian Detection</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/fe1c/ec4e4995b8615855572374ae3efc94949105.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>5ad4e9f947c1653c247d418f05dad758a3f9277b</td><td>wlfdb</td><td></td><td><a href="papers/5ad4e9f947c1653c247d418f05dad758a3f9277b.html">WLFDB: Weakly Labeled Face Databases</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/5ad4/e9f947c1653c247d418f05dad758a3f9277b.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>0%</td><td>1</td><td>0</td><td>1</td><td>0</td><td>0</td><td>0</td></tr><tr><td>a94cae786d515d3450d48267e12ca954aab791c4</td><td>yawdd</td><td>YawDD</td><td><a href="papers/a94cae786d515d3450d48267e12ca954aab791c4.html">YawDD: a yawning detection dataset</a></td><td><a href="http://www.site.uottawa.ca/~shervin/pubs/CogniVue-Dataset-ACM-MMSys2014.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>14</td><td>0</td><td>14</td><td>1</td><td>2</td><td>1</td></tr></table></body></html>
\ No newline at end of file +<!doctype html><html><head><meta charset='utf-8'><title>Coverage</title><link rel='stylesheet' href='reports.css'></head><body><h2>Coverage</h2><table border='1' cellpadding='3' cellspacing='3'><th>Paper ID</th><th>Megapixels Key</th><th>Megapixels Name</th><th>Report Link</th><th>PDF Link</th><th>Journal</th><th>Type</th><th>Address</th><th>Lat</th><th>Lng</th><th>Coverage</th><th>Total Citations</th><th>Geocoded Citations</th><th>Unknown Citations</th><th>Empty Citations</th><th>With PDF</th><th>With DOI</th><tr><td>b5f2846a506fc417e7da43f6a7679146d99c5e96</td><td>ucf_101</td><td>UCF101</td><td><a href="papers/b5f2846a506fc417e7da43f6a7679146d99c5e96.html">UCF101: A Dataset of 101 Human Actions Classes From Videos in The Wild</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1212.0402.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td>edu</td><td>University of Central Florida</td><td>28.59899755</td><td>-81.19712501</td><td>54%</td><td>999</td><td>535</td><td>464</td><td>73</td><td>708</td><td>212</td></tr><tr><td>0e986f51fe45b00633de9fd0c94d082d2be51406</td><td>afw</td><td>AFW</td><td><a href="papers/0e986f51fe45b00633de9fd0c94d082d2be51406.html">Face detection, pose estimation, and landmark localization in the wild</a></td><td><a href="http://vision.ics.uci.edu/papers/ZhuR_CVPR_2012/ZhuR_CVPR_2012.pdf">[pdf]</a></td><td>2012 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>University of California, Irvine</td><td>33.64319010</td><td>-117.84016494</td><td>52%</td><td>999</td><td>521</td><td>478</td><td>59</td><td>607</td><td>273</td></tr><tr><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td><td>3dddb_unconstrained</td><td>3D Dynamic</td><td><a href="papers/370b5757a5379b15e30d619e4d3fb9e8e13f3256.html">Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments</a></td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>999</td><td>472</td><td>526</td><td>71</td><td>619</td><td>260</td></tr><tr><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td><td>ar_facedb</td><td>AR Face</td><td><a href="papers/370b5757a5379b15e30d619e4d3fb9e8e13f3256.html">Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments</a></td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>999</td><td>472</td><td>526</td><td>71</td><td>619</td><td>260</td></tr><tr><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td><td>m2vtsdb_extended</td><td>xm2vtsdb</td><td><a href="papers/370b5757a5379b15e30d619e4d3fb9e8e13f3256.html">Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments</a></td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>999</td><td>472</td><td>526</td><td>71</td><td>619</td><td>260</td></tr><tr><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td><td>put_face</td><td>Put Face</td><td><a href="papers/370b5757a5379b15e30d619e4d3fb9e8e13f3256.html">Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments</a></td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>999</td><td>472</td><td>526</td><td>71</td><td>619</td><td>260</td></tr><tr><td>759a3b3821d9f0e08e0b0a62c8b693230afc3f8d</td><td>pubfig</td><td>PubFig</td><td><a href="papers/759a3b3821d9f0e08e0b0a62c8b693230afc3f8d.html">Attribute and simile classifiers for face verification</a></td><td><a href="http://homes.cs.washington.edu/~neeraj/projects/faceverification/base/papers/nk_iccv2009_attrs.pdf">[pdf]</a></td><td>2009 IEEE 12th International Conference on Computer Vision</td><td>edu</td><td>Columbia University</td><td>40.84198360</td><td>-73.94368971</td><td>51%</td><td>894</td><td>454</td><td>440</td><td>55</td><td>587</td><td>222</td></tr><tr><td>18c72175ddbb7d5956d180b65a96005c100f6014</td><td>yale_faces</td><td>YaleFaces</td><td><a href="papers/18c72175ddbb7d5956d180b65a96005c100f6014.html">From Few to Many: Illumination Cone Models for Face Recognition under Variable Lighting and Pose</a></td><td><a href="http://pdfs.semanticscholar.org/97bb/c2b439a79d4dc0dc7199d71ed96ad5e3fd0e.pdf">[pdf]</a></td><td>IEEE Trans. Pattern Anal. Mach. Intell.</td><td></td><td></td><td></td><td></td><td>42%</td><td>999</td><td>422</td><td>577</td><td>80</td><td>538</td><td>295</td></tr><tr><td>18c72175ddbb7d5956d180b65a96005c100f6014</td><td>yale_faces</td><td>YaleFaces</td><td><a href="papers/18c72175ddbb7d5956d180b65a96005c100f6014.html">From Few to Many: Illumination Cone Models for Face Recognition under Variable Lighting and Pose</a></td><td><a href="http://pdfs.semanticscholar.org/97bb/c2b439a79d4dc0dc7199d71ed96ad5e3fd0e.pdf">[pdf]</a></td><td>IEEE Trans. Pattern Anal. Mach. Intell.</td><td></td><td></td><td></td><td></td><td>42%</td><td>999</td><td>422</td><td>577</td><td>80</td><td>538</td><td>295</td></tr><tr><td>4d9a02d080636e9666c4d1cc438b9893391ec6c7</td><td>cohn_kanade_plus</td><td>CK+</td><td><a href="papers/4d9a02d080636e9666c4d1cc438b9893391ec6c7.html">The Extended Cohn-Kanade Dataset (CK+): A complete dataset for action unit and emotion-specified expression</a></td><td><a href="http://www.iainm.com/iainm/Publications_files/2010_The%20Extended.pdf">[pdf]</a></td><td>2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition - Workshops</td><td></td><td></td><td></td><td></td><td>41%</td><td>975</td><td>403</td><td>572</td><td>65</td><td>460</td><td>345</td></tr><tr><td>2e384f057211426ac5922f1b33d2aa8df5d51f57</td><td>a_pascal_yahoo</td><td>aPascal</td><td><a href="papers/2e384f057211426ac5922f1b33d2aa8df5d51f57.html">Describing objects by their attributes</a></td><td><a href="http://www-2.cs.cmu.edu/~dhoiem/publications/cvpr2009_attributes.pdf">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>39%</td><td>999</td><td>393</td><td>606</td><td>71</td><td>727</td><td>73</td></tr><tr><td>162ea969d1929ed180cc6de9f0bf116993ff6e06</td><td>vgg_faces</td><td>VGG Face</td><td><a href="papers/162ea969d1929ed180cc6de9f0bf116993ff6e06.html">Deep Face Recognition</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/f372/ab9b3270d4e4f6a0258c83c2736c3a5c0454.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>39%</td><td>999</td><td>393</td><td>606</td><td>71</td><td>621</td><td>156</td></tr><tr><td>23fc83c8cfff14a16df7ca497661264fc54ed746</td><td>cohn_kanade</td><td>CK</td><td><a href="papers/23fc83c8cfff14a16df7ca497661264fc54ed746.html">Comprehensive Database for Facial Expression Analysis</a></td><td><a href="http://pdfs.semanticscholar.org/23fc/83c8cfff14a16df7ca497661264fc54ed746.pdf">[pdf]</a></td><td></td><td>edu</td><td>Carnegie Mellon University</td><td>37.41021930</td><td>-122.05965487</td><td>38%</td><td>999</td><td>380</td><td>619</td><td>75</td><td>555</td><td>252</td></tr><tr><td>01959ef569f74c286956024866c1d107099199f7</td><td>vqa</td><td>VQA</td><td><a href="papers/01959ef569f74c286956024866c1d107099199f7.html">VQA: Visual Question Answering</a></td><td><a href="http://arxiv.org/pdf/1505.00468v3.pdf">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td>47%</td><td>731</td><td>344</td><td>387</td><td>47</td><td>628</td><td>4</td></tr><tr><td>6424b69f3ff4d35249c0bb7ef912fbc2c86f4ff4</td><td>celeba</td><td>CelebA</td><td><a href="papers/6424b69f3ff4d35249c0bb7ef912fbc2c86f4ff4.html">Deep Learning Face Attributes in the Wild</a></td><td><a href="http://arxiv.org/pdf/1411.7766v2.pdf">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>42%</td><td>808</td><td>340</td><td>468</td><td>69</td><td>666</td><td>50</td></tr><tr><td>4d423acc78273b75134e2afd1777ba6d3a398973</td><td>cmu_pie</td><td>CMU PIE</td><td><a href="papers/4d423acc78273b75134e2afd1777ba6d3a398973.html">International Conference on Automatic Face and Gesture Recognition The CMU Pose , Illumination , and Expression ( PIE ) Database</a></td><td><a href="http://pdfs.semanticscholar.org/4d42/3acc78273b75134e2afd1777ba6d3a398973.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>44%</td><td>742</td><td>330</td><td>412</td><td>61</td><td>410</td><td>232</td></tr><tr><td>abe9f3b91fd26fa1b50cd685c0d20debfb372f73</td><td>voc</td><td>VOC</td><td><a href="papers/abe9f3b91fd26fa1b50cd685c0d20debfb372f73.html">The Pascal Visual Object Classes Challenge: A Retrospective</a></td><td><a href="http://homepages.inf.ed.ac.uk/ckiw/postscript/ijcv_voc14.pdf">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td>32%</td><td>999</td><td>315</td><td>684</td><td>75</td><td>698</td><td>6</td></tr><tr><td>45c31cde87258414f33412b3b12fc5bec7cb3ba9</td><td>jaffe</td><td>JAFFE</td><td><a href="papers/45c31cde87258414f33412b3b12fc5bec7cb3ba9.html">Coding Facial Expressions with Gabor Wavelets</a></td><td><a href="http://pdfs.semanticscholar.org/45c3/1cde87258414f33412b3b12fc5bec7cb3ba9.pdf">[pdf]</a></td><td></td><td>edu</td><td>Kyushu University</td><td>33.59914655</td><td>130.22359848</td><td>36%</td><td>848</td><td>308</td><td>540</td><td>56</td><td>413</td><td>255</td></tr><tr><td>31b58ced31f22eab10bd3ee2d9174e7c14c27c01</td><td>tiny_images</td><td>Tiny Images</td><td><a href="papers/31b58ced31f22eab10bd3ee2d9174e7c14c27c01.html">Nonparametric Object and Scene Recognition</a></td><td><a href="http://pdfs.semanticscholar.org/31b5/8ced31f22eab10bd3ee2d9174e7c14c27c01.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>31%</td><td>999</td><td>305</td><td>694</td><td>93</td><td>670</td><td>9</td></tr><tr><td>140438a77a771a8fb656b39a78ff488066eb6b50</td><td>lfw_p</td><td>LFWP</td><td><a href="papers/140438a77a771a8fb656b39a78ff488066eb6b50.html">Localizing Parts of Faces Using a Consensus of Exemplars</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2011.5995602">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td>edu</td><td>Columbia University</td><td>40.84198360</td><td>-73.94368971</td><td>53%</td><td>521</td><td>274</td><td>247</td><td>40</td><td>321</td><td>144</td></tr><tr><td>18ae7c9a4bbc832b8b14bc4122070d7939f5e00e</td><td>frgc</td><td>FRGC</td><td><a href="papers/18ae7c9a4bbc832b8b14bc4122070d7939f5e00e.html">Overview of the face recognition grand challenge</a></td><td><a href="http://www3.nd.edu/~kwb/PhillipsEtAlCVPR_2005.pdf">[pdf]</a></td><td>2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)</td><td></td><td></td><td></td><td></td><td>25%</td><td>999</td><td>253</td><td>746</td><td>110</td><td>572</td><td>64</td></tr><tr><td>32cde90437ab5a70cf003ea36f66f2de0e24b3ab</td><td>cityscapes</td><td>Cityscapes</td><td><a href="papers/32cde90437ab5a70cf003ea36f66f2de0e24b3ab.html">The Cityscapes Dataset for Semantic Urban Scene Understanding</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1604.01685.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>33%</td><td>771</td><td>252</td><td>519</td><td>54</td><td>622</td><td>0</td></tr><tr><td>32cde90437ab5a70cf003ea36f66f2de0e24b3ab</td><td>cityscapes</td><td>Cityscapes</td><td><a href="papers/32cde90437ab5a70cf003ea36f66f2de0e24b3ab.html">The Cityscapes Dataset for Semantic Urban Scene Understanding</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1604.01685.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>33%</td><td>771</td><td>252</td><td>519</td><td>54</td><td>622</td><td>0</td></tr><tr><td>560e0e58d0059259ddf86fcec1fa7975dee6a868</td><td>youtube_faces</td><td>YouTubeFaces</td><td><a href="papers/560e0e58d0059259ddf86fcec1fa7975dee6a868.html">Face recognition in unconstrained videos with matched background similarity</a></td><td><a href="http://www.cs.tau.ac.il/~wolf/papers/lvfw.pdf">[pdf]</a></td><td>CVPR 2011</td><td>edu</td><td>Open University of Israel</td><td>32.77824165</td><td>34.99565673</td><td>50%</td><td>485</td><td>244</td><td>240</td><td>32</td><td>290</td><td>140</td></tr><tr><td>3607afdb204de9a5a9300ae98aa4635d9effcda2</td><td>sheffield</td><td>Sheffield Face</td><td><a href="papers/3607afdb204de9a5a9300ae98aa4635d9effcda2.html">Face Description with Local Binary Patterns: Application to Face Recognition</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/TPAMI.2006.244">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>24%</td><td>999</td><td>238</td><td>761</td><td>65</td><td>483</td><td>87</td></tr><tr><td>853bd61bc48a431b9b1c7cab10c603830c488e39</td><td>casia_webface</td><td>CASIA Webface</td><td><a href="papers/853bd61bc48a431b9b1c7cab10c603830c488e39.html">Learning Face Representation from Scratch</a></td><td><a href="http://pdfs.semanticscholar.org/b8a2/0ed7e74325da76d7183d1ab77b082a92b447.pdf">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>53%</td><td>436</td><td>233</td><td>203</td><td>32</td><td>284</td><td>115</td></tr><tr><td>2830fb5282de23d7784b4b4bc37065d27839a412</td><td>h3d</td><td>H3D</td><td><a href="papers/2830fb5282de23d7784b4b4bc37065d27839a412.html">Poselets: Body part detectors trained using 3D human pose annotations</a></td><td><a href="http://vision.stanford.edu/teaching/cs231b_spring1213/papers/ICCV09_BourdevMalik.pdf">[pdf]</a></td><td>2009 IEEE 12th International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td>32%</td><td>707</td><td>223</td><td>484</td><td>62</td><td>482</td><td>18</td></tr><tr><td>10d6b12fa07c7c8d6c8c3f42c7f1c061c131d4c5</td><td>inria_person</td><td>INRIA Pedestrian</td><td><a href="papers/10d6b12fa07c7c8d6c8c3f42c7f1c061c131d4c5.html">Histograms of oriented gradients for human detection</a></td><td><a href="http://nichol.as/papers/Dalai/Histograms%20of%20oriented%20gradients%20for%20human%20detection.pdf">[pdf]</a></td><td>2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)</td><td></td><td></td><td></td><td></td><td>22%</td><td>999</td><td>217</td><td>782</td><td>67</td><td>520</td><td>22</td></tr><tr><td>55206f0b5f57ce17358999145506cd01e570358c</td><td>orl</td><td>ORL</td><td><a href="papers/55206f0b5f57ce17358999145506cd01e570358c.html">O M 4 . 1 The Subject Database 4 . 2 Experiment Plan 5 . 1 Varying the Overlap 4 Experimental Setup 5 Parameterisation Results</a></td><td><a href="http://pdfs.semanticscholar.org/5520/6f0b5f57ce17358999145506cd01e570358c.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>21%</td><td>999</td><td>214</td><td>785</td><td>96</td><td>550</td><td>57</td></tr><tr><td>9055b155cbabdce3b98e16e5ac9c0edf00f9552f</td><td>morph</td><td>MORPH Commercial</td><td><a href="papers/9055b155cbabdce3b98e16e5ac9c0edf00f9552f.html">MORPH: a longitudinal image database of normal adult age-progression</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/FGR.2006.78">[pdf]</a></td><td>7th International Conference on Automatic Face and Gesture Recognition (FGR06)</td><td></td><td></td><td></td><td></td><td>46%</td><td>424</td><td>195</td><td>229</td><td>27</td><td>231</td><td>155</td></tr><tr><td>9055b155cbabdce3b98e16e5ac9c0edf00f9552f</td><td>morph_nc</td><td>MORPH Non-Commercial</td><td><a href="papers/9055b155cbabdce3b98e16e5ac9c0edf00f9552f.html">MORPH: a longitudinal image database of normal adult age-progression</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/FGR.2006.78">[pdf]</a></td><td>7th International Conference on Automatic Face and Gesture Recognition (FGR06)</td><td></td><td></td><td></td><td></td><td>46%</td><td>424</td><td>195</td><td>229</td><td>27</td><td>231</td><td>155</td></tr><tr><td>93884e46c49f7ae1c7c34046fbc28882f2bd6341</td><td>kdef</td><td>KDEF</td><td><a href="papers/93884e46c49f7ae1c7c34046fbc28882f2bd6341.html">Gaze fixation and the neural circuitry of face processing in autism</a></td><td><a href="{'url': 'http://doi.org/10.1038/nn1421', 'linkType': 'nature'}">[pdf]</a></td><td>Nature Neuroscience</td><td></td><td></td><td></td><td></td><td>31%</td><td>608</td><td>190</td><td>418</td><td>92</td><td>463</td><td>0</td></tr><tr><td>6bd36e9fd0ef20a3074e1430a6cc601e6d407fc3</td><td>cuhk03</td><td>CUHK03</td><td><a href="papers/6bd36e9fd0ef20a3074e1430a6cc601e6d407fc3.html">DeepReID: Deep Filter Pairing Neural Network for Person Re-identification</a></td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Li_DeepReID_Deep_Filter_2014_CVPR_paper.pdf">[pdf]</a></td><td>2014 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>35%</td><td>512</td><td>180</td><td>332</td><td>29</td><td>323</td><td>4</td></tr><tr><td>3325860c0c82a93b2eac654f5324dd6a776f609e</td><td>mpii_human_pose</td><td>MPII Human Pose</td><td><a href="papers/3325860c0c82a93b2eac654f5324dd6a776f609e.html">2D Human Pose Estimation: New Benchmark and State of the Art Analysis</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6909866', 'linkType': 'ieee'}">[pdf]</a></td><td>2014 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>50%</td><td>356</td><td>179</td><td>177</td><td>21</td><td>299</td><td>3</td></tr><tr><td>95f12d27c3b4914e0668a268360948bce92f7db3</td><td>helen</td><td>Helen</td><td><a href="papers/95f12d27c3b4914e0668a268360948bce92f7db3.html">Interactive Facial Feature Localization</a></td><td><a href="http://pdfs.semanticscholar.org/95f1/2d27c3b4914e0668a268360948bce92f7db3.pdf">[pdf]</a></td><td></td><td>edu</td><td>University of Illinois, Urbana-Champaign</td><td>40.11116745</td><td>-88.22587665</td><td>52%</td><td>339</td><td>177</td><td>162</td><td>27</td><td>208</td><td>100</td></tr><tr><td>2724ba85ec4a66de18da33925e537f3902f21249</td><td>cofw</td><td>COFW</td><td><a href="papers/2724ba85ec4a66de18da33925e537f3902f21249.html">Robust Face Landmark Estimation under Occlusion</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6751298', 'linkType': 'ieee'}">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td>55%</td><td>305</td><td>167</td><td>138</td><td>16</td><td>186</td><td>95</td></tr><tr><td>6273b3491e94ea4dd1ce42b791d77bdc96ee73a8</td><td>viper</td><td>VIPeR</td><td><a href="papers/6273b3491e94ea4dd1ce42b791d77bdc96ee73a8.html">Evaluating Appearance Models for Recognition, Reacquisition, and Tracking</a></td><td><a href="http://pdfs.semanticscholar.org/6273/b3491e94ea4dd1ce42b791d77bdc96ee73a8.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>27%</td><td>584</td><td>159</td><td>425</td><td>38</td><td>336</td><td>9</td></tr><tr><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td><td>aflw</td><td>AFLW</td><td><a href="papers/a74251efa970b92925b89eeef50a5e37d9281ad0.html">Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</a></td><td><a href="http://lrs.icg.tugraz.at/pubs/koestinger_befit_11.pdf">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td>53%</td><td>292</td><td>155</td><td>137</td><td>38</td><td>207</td><td>59</td></tr><tr><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td><td>imm_face</td><td>IMM Face Dataset</td><td><a href="papers/a74251efa970b92925b89eeef50a5e37d9281ad0.html">Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</a></td><td><a href="http://lrs.icg.tugraz.at/pubs/koestinger_befit_11.pdf">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td>53%</td><td>292</td><td>155</td><td>137</td><td>38</td><td>207</td><td>59</td></tr><tr><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td><td>muct</td><td>MUCT</td><td><a href="papers/a74251efa970b92925b89eeef50a5e37d9281ad0.html">Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</a></td><td><a href="http://lrs.icg.tugraz.at/pubs/koestinger_befit_11.pdf">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td>53%</td><td>292</td><td>155</td><td>137</td><td>38</td><td>207</td><td>59</td></tr><tr><td>4308bd8c28e37e2ed9a3fcfe74d5436cce34b410</td><td>market_1501</td><td>Market 1501</td><td><a href="papers/4308bd8c28e37e2ed9a3fcfe74d5436cce34b410.html">Scalable Person Re-identification: A Benchmark</a></td><td><a href="https://www.microsoft.com/en-us/research/wp-content/uploads/2017/01/ICCV15-ReIDDataset.pdf">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td>38%</td><td>394</td><td>149</td><td>245</td><td>18</td><td>271</td><td>3</td></tr><tr><td>2a75f34663a60ab1b04a0049ed1d14335129e908</td><td>mmi_facial_expression</td><td>MMI Facial Expression Dataset</td><td><a href="papers/2a75f34663a60ab1b04a0049ed1d14335129e908.html">Web-based database for facial expression analysis</a></td><td><a href="http://ibug.doc.ic.ac.uk/media/uploads/documents/PanticEtAl-ICME2005-final.pdf">[pdf]</a></td><td>2005 IEEE International Conference on Multimedia and Expo</td><td></td><td></td><td></td><td></td><td>32%</td><td>440</td><td>142</td><td>298</td><td>44</td><td>258</td><td>82</td></tr><tr><td>639937b3a1b8bded3f7e9a40e85bd3770016cf3c</td><td>bfm</td><td>BFM</td><td><a href="papers/639937b3a1b8bded3f7e9a40e85bd3770016cf3c.html">A 3D Face Model for Pose and Illumination Invariant Face Recognition</a></td><td><a href="https://pdfs.semanticscholar.org/6399/37b3a1b8bded3f7e9a40e85bd3770016cf3c.pdf">[pdf]</a></td><td>2009 Sixth IEEE International Conference on Advanced Video and Signal Based Surveillance</td><td></td><td></td><td></td><td></td><td>41%</td><td>323</td><td>131</td><td>192</td><td>29</td><td>221</td><td>25</td></tr><tr><td>cc589c499dcf323fe4a143bbef0074c3e31f9b60</td><td>bu_3dfe</td><td>BU-3DFE</td><td><a href="papers/cc589c499dcf323fe4a143bbef0074c3e31f9b60.html">A 3D facial expression database for facial behavior research</a></td><td><a href="http://www.cs.binghamton.edu/~lijun/Research/3DFE/Yin_FGR06_a.pdf">[pdf]</a></td><td>7th International Conference on Automatic Face and Gesture Recognition (FGR06)</td><td>edu</td><td>SUNY Binghamton</td><td>42.08779975</td><td>-75.97066066</td><td>24%</td><td>555</td><td>131</td><td>424</td><td>47</td><td>283</td><td>48</td></tr><tr><td>696ca58d93f6404fea0fc75c62d1d7b378f47628</td><td>coco</td><td>COCO</td><td><a href="papers/696ca58d93f6404fea0fc75c62d1d7b378f47628.html">Microsoft COCO Captions: Data Collection and Evaluation Server</a></td><td><a href="http://pdfs.semanticscholar.org/ba95/81c33a7eebe87c50e61763e4c8d1723538f2.pdf">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>46%</td><td>283</td><td>129</td><td>154</td><td>16</td><td>231</td><td>4</td></tr><tr><td>4053e3423fb70ad9140ca89351df49675197196a</td><td>bio_id</td><td>BioID Face</td><td><a href="papers/4053e3423fb70ad9140ca89351df49675197196a.html">Robust Face Detection Using the Hausdorff Distance</a></td><td><a href="http://pdfs.semanticscholar.org/4053/e3423fb70ad9140ca89351df49675197196a.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>26%</td><td>498</td><td>127</td><td>371</td><td>55</td><td>319</td><td>32</td></tr><tr><td>3765df816dc5a061bc261e190acc8bdd9d47bec0</td><td>rafd</td><td>RaFD</td><td><a href="papers/3765df816dc5a061bc261e190acc8bdd9d47bec0.html">Presentation and validation of the Radboud Faces Database</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/3765/df816dc5a061bc261e190acc8bdd9d47bec0.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>28%</td><td>446</td><td>127</td><td>319</td><td>43</td><td>307</td><td>19</td></tr><tr><td>2fda164863a06a92d3a910b96eef927269aeb730</td><td>names_and_faces_news</td><td>News Dataset</td><td><a href="papers/2fda164863a06a92d3a910b96eef927269aeb730.html">Names and faces in the news</a></td><td><a href="http://www.cs.utexas.edu/~grauman/courses/spring2007/395T/papers/berg_names_and_faces.pdf">[pdf]</a></td><td>Proceedings of the 2004 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2004. CVPR 2004.</td><td></td><td></td><td></td><td></td><td>41%</td><td>294</td><td>120</td><td>174</td><td>24</td><td>207</td><td>45</td></tr><tr><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td><td>leeds_sports_pose</td><td>Leeds Sports Pose</td><td><a href="papers/4b1d23d17476fcf78f4cbadf69fb130b1aa627c0.html">Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</a></td><td><a href="http://pdfs.semanticscholar.org/4b1d/23d17476fcf78f4cbadf69fb130b1aa627c0.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>278</td><td>119</td><td>159</td><td>12</td><td>198</td><td>2</td></tr><tr><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td><td>stickmen_buffy</td><td>Buffy Stickmen</td><td><a href="papers/4b1d23d17476fcf78f4cbadf69fb130b1aa627c0.html">Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</a></td><td><a href="http://pdfs.semanticscholar.org/4b1d/23d17476fcf78f4cbadf69fb130b1aa627c0.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>278</td><td>119</td><td>159</td><td>12</td><td>198</td><td>2</td></tr><tr><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td><td>stickmen_buffy</td><td>Buffy Stickmen</td><td><a href="papers/4b1d23d17476fcf78f4cbadf69fb130b1aa627c0.html">Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</a></td><td><a href="http://pdfs.semanticscholar.org/4b1d/23d17476fcf78f4cbadf69fb130b1aa627c0.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>278</td><td>119</td><td>159</td><td>12</td><td>198</td><td>2</td></tr><tr><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td><a href="papers/6dd0597f8513dc100cd0bc1b493768cde45098a9.html">Learning to parse images of articulated bodies</a></td><td><a href="http://pdfs.semanticscholar.org/9cd7/4c43dbf9be0b9caae4606ee53e6d45850471.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>31%</td><td>373</td><td>117</td><td>256</td><td>30</td><td>238</td><td>2</td></tr><tr><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td><a href="papers/6dd0597f8513dc100cd0bc1b493768cde45098a9.html">Learning to parse images of articulated bodies</a></td><td><a href="http://pdfs.semanticscholar.org/9cd7/4c43dbf9be0b9caae4606ee53e6d45850471.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>31%</td><td>373</td><td>117</td><td>256</td><td>30</td><td>238</td><td>2</td></tr><tr><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td><td>tud_campus</td><td>TUD-Campus</td><td><a href="papers/3316521a5527c7700af8ae6aef32a79a8b83672c.html">People-tracking-by-detection and people-detection-by-tracking</a></td><td><a href="http://mplab.ucsd.edu/wp-content/uploads/CVPR2008/Conference/data/papers/243.pdf">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>22%</td><td>529</td><td>116</td><td>413</td><td>41</td><td>316</td><td>1</td></tr><tr><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td><td>tud_crossing</td><td>TUD-Crossing</td><td><a href="papers/3316521a5527c7700af8ae6aef32a79a8b83672c.html">People-tracking-by-detection and people-detection-by-tracking</a></td><td><a href="http://mplab.ucsd.edu/wp-content/uploads/CVPR2008/Conference/data/papers/243.pdf">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>22%</td><td>529</td><td>116</td><td>413</td><td>41</td><td>316</td><td>1</td></tr><tr><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td><td>tud_pedestrian</td><td>TUD-Pedestrian</td><td><a href="papers/3316521a5527c7700af8ae6aef32a79a8b83672c.html">People-tracking-by-detection and people-detection-by-tracking</a></td><td><a href="http://mplab.ucsd.edu/wp-content/uploads/CVPR2008/Conference/data/papers/243.pdf">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>22%</td><td>529</td><td>116</td><td>413</td><td>41</td><td>316</td><td>1</td></tr><tr><td>0f0fcf041559703998abf310e56f8a2f90ee6f21</td><td>feret</td><td>FERET</td><td><a href="papers/0f0fcf041559703998abf310e56f8a2f90ee6f21.html">The FERET Evaluation Methodology for Face-Recognition Algorithms</a></td><td><a href="http://pdfs.semanticscholar.org/0f0f/cf041559703998abf310e56f8a2f90ee6f21.pdf">[pdf]</a></td><td>IEEE Trans. Pattern Anal. Mach. Intell.</td><td></td><td></td><td></td><td></td><td>11%</td><td>999</td><td>109</td><td>259</td><td>32</td><td>213</td><td>51</td></tr><tr><td>0f0fcf041559703998abf310e56f8a2f90ee6f21</td><td>feret</td><td>FERET</td><td><a href="papers/0f0fcf041559703998abf310e56f8a2f90ee6f21.html">The FERET Evaluation Methodology for Face-Recognition Algorithms</a></td><td><a href="http://pdfs.semanticscholar.org/0f0f/cf041559703998abf310e56f8a2f90ee6f21.pdf">[pdf]</a></td><td>IEEE Trans. Pattern Anal. Mach. Intell.</td><td></td><td></td><td></td><td></td><td>11%</td><td>999</td><td>109</td><td>259</td><td>32</td><td>213</td><td>51</td></tr><tr><td>0f0fcf041559703998abf310e56f8a2f90ee6f21</td><td>feret</td><td>FERET</td><td><a href="papers/0f0fcf041559703998abf310e56f8a2f90ee6f21.html">The FERET Evaluation Methodology for Face-Recognition Algorithms</a></td><td><a href="http://pdfs.semanticscholar.org/0f0f/cf041559703998abf310e56f8a2f90ee6f21.pdf">[pdf]</a></td><td>IEEE Trans. Pattern Anal. Mach. Intell.</td><td></td><td></td><td></td><td></td><td>11%</td><td>999</td><td>109</td><td>259</td><td>32</td><td>213</td><td>51</td></tr><tr><td>0f0fcf041559703998abf310e56f8a2f90ee6f21</td><td>feret</td><td>FERET</td><td><a href="papers/0f0fcf041559703998abf310e56f8a2f90ee6f21.html">The FERET Evaluation Methodology for Face-Recognition Algorithms</a></td><td><a href="http://pdfs.semanticscholar.org/0f0f/cf041559703998abf310e56f8a2f90ee6f21.pdf">[pdf]</a></td><td>IEEE Trans. Pattern Anal. Mach. Intell.</td><td></td><td></td><td></td><td></td><td>11%</td><td>999</td><td>109</td><td>259</td><td>32</td><td>213</td><td>51</td></tr><tr><td>140c95e53c619eac594d70f6369f518adfea12ef</td><td>ijb_a</td><td>IJB-A</td><td><a href="papers/140c95e53c619eac594d70f6369f518adfea12ef.html">Pushing the frontiers of unconstrained face detection and recognition: IARPA Janus Benchmark A</a></td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/1B_089_ext.pdf">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>48%</td><td>222</td><td>107</td><td>115</td><td>21</td><td>158</td><td>48</td></tr><tr><td>28312c3a47c1be3a67365700744d3d6665b86f22</td><td>hrt_transgender</td><td>HRT Transgender</td><td><a href="papers/28312c3a47c1be3a67365700744d3d6665b86f22.html">Face Recognition: A Literature Survey1</a></td><td><a href="http://pdfs.semanticscholar.org/2831/2c3a47c1be3a67365700744d3d6665b86f22.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>11%</td><td>999</td><td>105</td><td>265</td><td>32</td><td>217</td><td>39</td></tr><tr><td>28312c3a47c1be3a67365700744d3d6665b86f22</td><td>hrt_transgender</td><td>HRT Transgender</td><td><a href="papers/28312c3a47c1be3a67365700744d3d6665b86f22.html">Face Recognition: A Literature Survey1</a></td><td><a href="http://pdfs.semanticscholar.org/2831/2c3a47c1be3a67365700744d3d6665b86f22.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>11%</td><td>999</td><td>105</td><td>265</td><td>32</td><td>217</td><td>39</td></tr><tr><td>28312c3a47c1be3a67365700744d3d6665b86f22</td><td>hrt_transgender</td><td>HRT Transgender</td><td><a href="papers/28312c3a47c1be3a67365700744d3d6665b86f22.html">Face Recognition: A Literature Survey1</a></td><td><a href="http://pdfs.semanticscholar.org/2831/2c3a47c1be3a67365700744d3d6665b86f22.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>11%</td><td>999</td><td>105</td><td>265</td><td>32</td><td>217</td><td>39</td></tr><tr><td>013909077ad843eb6df7a3e8e290cfd5575999d2</td><td>fiw_300</td><td>300-W</td><td><a href="papers/013909077ad843eb6df7a3e8e290cfd5575999d2.html">A Semi-automatic Methodology for Facial Landmark Annotation</a></td><td><a href="http://ibug.doc.ic.ac.uk/media/uploads/documents/sagonas_cvpr_2013_amfg_w.pdf">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition Workshops</td><td>edu</td><td>University of Twente</td><td>52.23801390</td><td>6.85667610</td><td>59%</td><td>169</td><td>100</td><td>69</td><td>14</td><td>112</td><td>49</td></tr><tr><td>013909077ad843eb6df7a3e8e290cfd5575999d2</td><td>fiw_300</td><td>300-W</td><td><a href="papers/013909077ad843eb6df7a3e8e290cfd5575999d2.html">A Semi-automatic Methodology for Facial Landmark Annotation</a></td><td><a href="http://ibug.doc.ic.ac.uk/media/uploads/documents/sagonas_cvpr_2013_amfg_w.pdf">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition Workshops</td><td>edu</td><td>University of Twente</td><td>52.23801390</td><td>6.85667610</td><td>59%</td><td>169</td><td>100</td><td>69</td><td>14</td><td>112</td><td>49</td></tr><tr><td>013909077ad843eb6df7a3e8e290cfd5575999d2</td><td>fiw_300</td><td>300-W</td><td><a href="papers/013909077ad843eb6df7a3e8e290cfd5575999d2.html">A Semi-automatic Methodology for Facial Landmark Annotation</a></td><td><a href="http://ibug.doc.ic.ac.uk/media/uploads/documents/sagonas_cvpr_2013_amfg_w.pdf">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition Workshops</td><td>edu</td><td>University of Twente</td><td>52.23801390</td><td>6.85667610</td><td>59%</td><td>169</td><td>100</td><td>69</td><td>14</td><td>112</td><td>49</td></tr><tr><td>7808937b46acad36e43c30ae4e9f3fd57462853d</td><td>berkeley_pose</td><td>BPAD</td><td><a href="papers/7808937b46acad36e43c30ae4e9f3fd57462853d.html">Describing people: A poselet-based approach to attribute classification</a></td><td><a href="http://ttic.uchicago.edu/~smaji/papers/attributes-iccv11.pdf">[pdf]</a></td><td>2011 International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td>43%</td><td>221</td><td>96</td><td>125</td><td>14</td><td>160</td><td>23</td></tr><tr><td>f72f6a45ee240cc99296a287ff725aaa7e7ebb35</td><td>caltech_pedestrians</td><td>Caltech Pedestrians</td><td><a href="papers/f72f6a45ee240cc99296a287ff725aaa7e7ebb35.html">Pedestrian Detection: An Evaluation of the State of the Art</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5975165', 'linkType': 'ieee'}">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>10%</td><td>999</td><td>96</td><td>355</td><td>26</td><td>252</td><td>5</td></tr><tr><td>f72f6a45ee240cc99296a287ff725aaa7e7ebb35</td><td>caltech_pedestrians</td><td>Caltech Pedestrians</td><td><a href="papers/f72f6a45ee240cc99296a287ff725aaa7e7ebb35.html">Pedestrian Detection: An Evaluation of the State of the Art</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5975165', 'linkType': 'ieee'}">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>10%</td><td>999</td><td>96</td><td>355</td><td>26</td><td>252</td><td>5</td></tr><tr><td>e8de844fefd54541b71c9823416daa238be65546</td><td>visual_phrases</td><td>Phrasal Recognition</td><td><a href="papers/e8de844fefd54541b71c9823416daa238be65546.html">Recognition using visual phrases</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995711', 'linkType': 'ieee'}">[pdf]</a></td><td>CVPR 2011</td><td></td><td></td><td></td><td></td><td>41%</td><td>233</td><td>95</td><td>138</td><td>18</td><td>174</td><td>5</td></tr><tr><td>16c7c31a7553d99f1837fc6e88e77b5ccbb346b8</td><td>prid</td><td>PRID</td><td><a href="papers/16c7c31a7553d99f1837fc6e88e77b5ccbb346b8.html">Person Re-identification by Descriptive and Discriminative Classification</a></td><td><a href="http://pdfs.semanticscholar.org/4c1b/f0592be3e535faf256c95e27982db9b3d3d3.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>27%</td><td>352</td><td>94</td><td>258</td><td>26</td><td>195</td><td>3</td></tr><tr><td>833fa04463d90aab4a9fe2870d480f0b40df446e</td><td>sun_attributes</td><td>SUN</td><td><a href="papers/833fa04463d90aab4a9fe2870d480f0b40df446e.html">SUN attribute database: Discovering, annotating, and recognizing scene attributes</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2012.6247998">[pdf]</a></td><td>2012 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>32%</td><td>260</td><td>84</td><td>85</td><td>18</td><td>143</td><td>10</td></tr><tr><td>833fa04463d90aab4a9fe2870d480f0b40df446e</td><td>sun_attributes</td><td>SUN</td><td><a href="papers/833fa04463d90aab4a9fe2870d480f0b40df446e.html">SUN attribute database: Discovering, annotating, and recognizing scene attributes</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2012.6247998">[pdf]</a></td><td>2012 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>32%</td><td>260</td><td>84</td><td>85</td><td>18</td><td>143</td><td>10</td></tr><tr><td>35b0331dfcd2897abd5749b49ff5e2b8ba0f7a62</td><td>coco_qa</td><td>COCO QA</td><td><a href="papers/35b0331dfcd2897abd5749b49ff5e2b8ba0f7a62.html">Exploring Models and Data for Image Question Answering</a></td><td><a href="http://pdfs.semanticscholar.org/aa79/9c29c0d44ece1864467af520fe70540c069b.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>191</td><td>83</td><td>108</td><td>12</td><td>163</td><td>1</td></tr><tr><td>291265db88023e92bb8c8e6390438e5da148e8f5</td><td>msceleb</td><td>MsCeleb</td><td><a href="papers/291265db88023e92bb8c8e6390438e5da148e8f5.html">MS-Celeb-1M: A Dataset and Benchmark for Large-Scale Face Recognition</a></td><td><a href="http://pdfs.semanticscholar.org/4603/cb8e05258bb0572ae912ad20903b8f99f4b1.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>167</td><td>83</td><td>84</td><td>15</td><td>131</td><td>27</td></tr><tr><td>13f06b08f371ba8b5d31c3e288b4deb61335b462</td><td>eth_andreas_ess</td><td>ETHZ Pedestrian</td><td><a href="papers/13f06b08f371ba8b5d31c3e288b4deb61335b462.html">Depth and Appearance for Mobile Scene Analysis</a></td><td><a href="http://www.mmp.rwth-aachen.de/publications/pdf/ess-depthandappearance-iccv07.pdf/at_download/file">[pdf]</a></td><td>2007 IEEE 11th International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td>25%</td><td>319</td><td>79</td><td>240</td><td>27</td><td>192</td><td>0</td></tr><tr><td>52d7eb0fbc3522434c13cc247549f74bb9609c5d</td><td>wider_face</td><td>WIDER FACE</td><td><a href="papers/52d7eb0fbc3522434c13cc247549f74bb9609c5d.html">WIDER FACE: A Face Detection Benchmark</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1511.06523.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>53%</td><td>148</td><td>78</td><td>70</td><td>16</td><td>107</td><td>34</td></tr><tr><td>2485c98aa44131d1a2f7d1355b1e372f2bb148ad</td><td>cas_peal</td><td>CAS-PEAL</td><td><a href="papers/2485c98aa44131d1a2f7d1355b1e372f2bb148ad.html">The CAS-PEAL Large-Scale Chinese Face Database and Baseline Evaluations</a></td><td><a href="https://doi.org/10.1109/TSMCA.2007.909557">[pdf]</a></td><td>IEEE Transactions on Systems, Man, and Cybernetics - Part A: Systems and Humans</td><td></td><td></td><td></td><td></td><td>18%</td><td>415</td><td>76</td><td>339</td><td>39</td><td>182</td><td>35</td></tr><tr><td>1aad2da473888cb7ebc1bfaa15bfa0f1502ce005</td><td>jpl_pose</td><td>JPL-Interaction dataset</td><td><a href="papers/1aad2da473888cb7ebc1bfaa15bfa0f1502ce005.html">First-Person Activity Recognition: What Are They Doing to Me?</a></td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2013/papers/Ryoo_First-Person_Activity_Recognition_2013_CVPR_paper.pdf">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>51%</td><td>148</td><td>76</td><td>72</td><td>8</td><td>109</td><td>3</td></tr><tr><td>2485c98aa44131d1a2f7d1355b1e372f2bb148ad</td><td>m2vts</td><td>m2vts</td><td><a href="papers/2485c98aa44131d1a2f7d1355b1e372f2bb148ad.html">The CAS-PEAL Large-Scale Chinese Face Database and Baseline Evaluations</a></td><td><a href="https://doi.org/10.1109/TSMCA.2007.909557">[pdf]</a></td><td>IEEE Transactions on Systems, Man, and Cybernetics - Part A: Systems and Humans</td><td></td><td></td><td></td><td></td><td>18%</td><td>415</td><td>76</td><td>339</td><td>39</td><td>182</td><td>35</td></tr><tr><td>436f798d1a4e54e5947c1e7d7375c31b2bdb4064</td><td>tud_multiview</td><td>TUD-Multiview</td><td><a href="papers/436f798d1a4e54e5947c1e7d7375c31b2bdb4064.html">Monocular 3D pose estimation and tracking by detection</a></td><td><a href="http://lmb.informatik.uni-freiburg.de/lectures/seminar_brox/seminar_ws1011/cvpr10_andriluka.pdf">[pdf]</a></td><td>2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>25%</td><td>302</td><td>76</td><td>226</td><td>32</td><td>199</td><td>1</td></tr><tr><td>436f798d1a4e54e5947c1e7d7375c31b2bdb4064</td><td>tud_stadtmitte</td><td>TUD-Stadtmitte</td><td><a href="papers/436f798d1a4e54e5947c1e7d7375c31b2bdb4064.html">Monocular 3D pose estimation and tracking by detection</a></td><td><a href="http://lmb.informatik.uni-freiburg.de/lectures/seminar_brox/seminar_ws1011/cvpr10_andriluka.pdf">[pdf]</a></td><td>2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>25%</td><td>302</td><td>76</td><td>226</td><td>32</td><td>199</td><td>1</td></tr><tr><td>133f01aec1534604d184d56de866a4bd531dac87</td><td>lfw_a</td><td>LFW-a</td><td><a href="papers/133f01aec1534604d184d56de866a4bd531dac87.html">Effective Unconstrained Face Recognition by Combining Multiple Descriptors and Learned Background Statistics</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/TPAMI.2010.230">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>42%</td><td>177</td><td>75</td><td>102</td><td>15</td><td>102</td><td>54</td></tr><tr><td>1be498d4bbc30c3bfd0029114c784bc2114d67c0</td><td>adience</td><td>Adience</td><td><a href="papers/1be498d4bbc30c3bfd0029114c784bc2114d67c0.html">Age and Gender Estimation of Unfiltered Faces</a></td><td><a href="http://www.openu.ac.il/home/hassner/Adience/EidingerEnbarHassner_tifs.pdf">[pdf]</a></td><td>IEEE Transactions on Information Forensics and Security</td><td></td><td></td><td></td><td></td><td>43%</td><td>168</td><td>72</td><td>96</td><td>7</td><td>89</td><td>53</td></tr><tr><td>21d9d0deed16f0ad62a4865e9acf0686f4f15492</td><td>images_of_groups</td><td>Images of Groups</td><td><a href="papers/21d9d0deed16f0ad62a4865e9acf0686f4f15492.html">Understanding images of groups of people</a></td><td><a href="http://amp.ece.cmu.edu/people/Andy/Andy_files/cvpr09.pdf">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>36%</td><td>202</td><td>72</td><td>130</td><td>12</td><td>126</td><td>24</td></tr><tr><td>4e4746094bf60ee83e40d8597a6191e463b57f76</td><td>leeds_sports_pose_extended</td><td>Leeds Sports Pose Extended</td><td><a href="papers/4e4746094bf60ee83e40d8597a6191e463b57f76.html">Learning effective human pose estimation from inaccurate annotation</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995318', 'linkType': 'ieee'}">[pdf]</a></td><td>CVPR 2011</td><td></td><td></td><td></td><td></td><td>40%</td><td>173</td><td>70</td><td>103</td><td>9</td><td>116</td><td>2</td></tr><tr><td>44484d2866f222bbb9b6b0870890f9eea1ffb2d0</td><td>cuhk01</td><td>CUHK01</td><td><a href="papers/44484d2866f222bbb9b6b0870890f9eea1ffb2d0.html">Human Reidentification with Transferred Metric Learning</a></td><td><a href="http://pdfs.semanticscholar.org/4448/4d2866f222bbb9b6b0870890f9eea1ffb2d0.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>26%</td><td>258</td><td>67</td><td>191</td><td>12</td><td>141</td><td>1</td></tr><tr><td>9361b784e73e9238d5cefbea5ac40d35d1e3103f</td><td>towncenter</td><td>TownCenter</td><td><a href="papers/9361b784e73e9238d5cefbea5ac40d35d1e3103f.html">Stable Multi-Target Tracking in Real-Time Surveillance Video (Preprint)</a></td><td><a href="http://pdfs.semanticscholar.org/9361/b784e73e9238d5cefbea5ac40d35d1e3103f.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>21%</td><td>310</td><td>64</td><td>246</td><td>24</td><td>177</td><td>4</td></tr><tr><td>b1f4423c227fa37b9680787be38857069247a307</td><td>afew_va</td><td>AFEW-VA</td><td><a href="papers/b1f4423c227fa37b9680787be38857069247a307.html">Collecting Large, Richly Annotated Facial-Expression Databases from Movies</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6200254', 'linkType': 'ieee'}">[pdf]</a></td><td>IEEE MultiMedia</td><td>edu</td><td>Australian National University</td><td>-35.27769990</td><td>149.11852700</td><td>33%</td><td>175</td><td>58</td><td>66</td><td>7</td><td>54</td><td>49</td></tr><tr><td>b1f4423c227fa37b9680787be38857069247a307</td><td>afew_va</td><td>AFEW-VA</td><td><a href="papers/b1f4423c227fa37b9680787be38857069247a307.html">Collecting Large, Richly Annotated Facial-Expression Databases from Movies</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6200254', 'linkType': 'ieee'}">[pdf]</a></td><td>IEEE MultiMedia</td><td>edu</td><td>Australian National University</td><td>-35.27769990</td><td>149.11852700</td><td>33%</td><td>175</td><td>58</td><td>66</td><td>7</td><td>54</td><td>49</td></tr><tr><td>2acf7e58f0a526b957be2099c10aab693f795973</td><td>bosphorus</td><td>The Bosphorus</td><td><a href="papers/2acf7e58f0a526b957be2099c10aab693f795973.html">Bosphorus Database for 3D Face Analysis</a></td><td><a href="http://pdfs.semanticscholar.org/4254/fbba3846008f50671edc9cf70b99d7304543.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>18%</td><td>328</td><td>58</td><td>270</td><td>18</td><td>143</td><td>37</td></tr><tr><td>27a2fad58dd8727e280f97036e0d2bc55ef5424c</td><td>duke_mtmc</td><td>Duke MTMC</td><td><a href="papers/27a2fad58dd8727e280f97036e0d2bc55ef5424c.html">Performance Measures and a Data Set for Multi-target, Multi-camera Tracking</a></td><td><a href="http://pdfs.semanticscholar.org/b5f2/4f49f9a5e47d6601399dc068158ad88d7651.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>136</td><td>58</td><td>78</td><td>6</td><td>107</td><td>0</td></tr><tr><td>56ffa7d906b08d02d6d5a12c7377a57e24ef3391</td><td>unbc_shoulder_pain</td><td>UNBC-McMaster Pain</td><td><a href="papers/56ffa7d906b08d02d6d5a12c7377a57e24ef3391.html">Painful data: The UNBC-McMaster shoulder pain expression archive database</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5771462', 'linkType': 'ieee'}">[pdf]</a></td><td>Face and Gesture 2011</td><td></td><td></td><td></td><td></td><td>32%</td><td>184</td><td>58</td><td>126</td><td>23</td><td>112</td><td>23</td></tr><tr><td>38b55d95189c5e69cf4ab45098a48fba407609b4</td><td>cuhk02</td><td>CUHK02</td><td><a href="papers/38b55d95189c5e69cf4ab45098a48fba407609b4.html">Locally Aligned Feature Transforms across Views</a></td><td><a href="http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_CVPR2013/data/Papers/4989d594.pdf">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>24%</td><td>242</td><td>57</td><td>185</td><td>17</td><td>139</td><td>1</td></tr><tr><td>2f5d44dc3e1b5955942133ff872ebd31716ec604</td><td>frav3d</td><td>FRAV3D</td><td><a href="papers/2f5d44dc3e1b5955942133ff872ebd31716ec604.html">2D and 3D face recognition: A survey</a></td><td><a href="http://pdfs.semanticscholar.org/2f5d/44dc3e1b5955942133ff872ebd31716ec604.pdf">[pdf]</a></td><td>Pattern Recognition Letters</td><td></td><td></td><td></td><td></td><td>15%</td><td>389</td><td>57</td><td>332</td><td>28</td><td>198</td><td>17</td></tr><tr><td>a6e695ddd07aad719001c0fc1129328452385949</td><td>yfcc_100m</td><td>YFCC100M</td><td><a href="papers/a6e695ddd07aad719001c0fc1129328452385949.html">The New Data and New Challenges in Multimedia Research</a></td><td><span class="gray">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>36%</td><td>160</td><td>57</td><td>103</td><td>11</td><td>105</td><td>4</td></tr><tr><td>0d3bb75852098b25d90f31d2f48fd0cb4944702b</td><td>face_scrub</td><td>FaceScrub</td><td><a href="papers/0d3bb75852098b25d90f31d2f48fd0cb4944702b.html">A data-driven approach to cleaning large face datasets</a></td><td><a href="https://doi.org/10.1109/ICIP.2014.7025068">[pdf]</a></td><td>2014 IEEE International Conference on Image Processing (ICIP)</td><td>edu</td><td>University of Illinois, Urbana-Champaign</td><td>40.11116745</td><td>-88.22587665</td><td>46%</td><td>123</td><td>56</td><td>67</td><td>6</td><td>95</td><td>21</td></tr><tr><td>b91f54e1581fbbf60392364323d00a0cd43e493c</td><td>bp4d_spontanous</td><td>BP4D-Spontanous</td><td><a href="papers/b91f54e1581fbbf60392364323d00a0cd43e493c.html">A high-resolution spontaneous 3D dynamic facial expression database</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6553788', 'linkType': 'ieee'}">[pdf]</a></td><td>2013 10th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)</td><td>edu</td><td>SUNY Binghamton</td><td>42.08779975</td><td>-75.97066066</td><td>36%</td><td>151</td><td>54</td><td>97</td><td>7</td><td>85</td><td>26</td></tr><tr><td>4f93cd09785c6e77bf4bc5a788e079df524c8d21</td><td>soton</td><td>SOTON HiD</td><td><a href="papers/4f93cd09785c6e77bf4bc5a788e079df524c8d21.html">On a large sequence-based human gait database</a></td><td><a href="http://pdfs.semanticscholar.org/4f93/cd09785c6e77bf4bc5a788e079df524c8d21.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>36%</td><td>148</td><td>54</td><td>94</td><td>16</td><td>98</td><td>0</td></tr><tr><td>0df0d1adea39a5bef318b74faa37de7f3e00b452</td><td>mpii_gaze</td><td>MPIIGaze</td><td><a href="papers/0df0d1adea39a5bef318b74faa37de7f3e00b452.html">Appearance-based gaze estimation in the wild</a></td><td><a href="https://scalable.mpi-inf.mpg.de/files/2015/09/zhang_CVPR15.pdf">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>Max Planck Institute for Informatics</td><td>49.25795660</td><td>7.04577417</td><td>38%</td><td>138</td><td>52</td><td>86</td><td>3</td><td>94</td><td>7</td></tr><tr><td>c0387e788a52f10bf35d4d50659cfa515d89fbec</td><td>mars</td><td>MARS</td><td><a href="papers/c0387e788a52f10bf35d4d50659cfa515d89fbec.html">MARS: A Video Benchmark for Large-Scale Person Re-Identification</a></td><td><a href="http://pdfs.semanticscholar.org/c038/7e788a52f10bf35d4d50659cfa515d89fbec.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>34%</td><td>146</td><td>49</td><td>97</td><td>6</td><td>96</td><td>0</td></tr><tr><td>98bb029afe2a1239c3fdab517323066f0957b81b</td><td>sdu_vid</td><td>SDU-VID</td><td><a href="papers/98bb029afe2a1239c3fdab517323066f0957b81b.html">Person Re-identification by Video Ranking</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>25%</td><td>196</td><td>49</td><td>124</td><td>11</td><td>98</td><td>1</td></tr><tr><td>98bb029afe2a1239c3fdab517323066f0957b81b</td><td>sdu_vid</td><td>SDU-VID</td><td><a href="papers/98bb029afe2a1239c3fdab517323066f0957b81b.html">Person Re-identification by Video Ranking</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>25%</td><td>196</td><td>49</td><td>124</td><td>11</td><td>98</td><td>1</td></tr><tr><td>98bb029afe2a1239c3fdab517323066f0957b81b</td><td>sdu_vid</td><td>SDU-VID</td><td><a href="papers/98bb029afe2a1239c3fdab517323066f0957b81b.html">Person Re-identification by Video Ranking</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>25%</td><td>196</td><td>49</td><td>124</td><td>11</td><td>98</td><td>1</td></tr><tr><td>04c2cda00e5536f4b1508cbd80041e9552880e67</td><td>hipsterwars</td><td>Hipsterwars</td><td><a href="papers/04c2cda00e5536f4b1508cbd80041e9552880e67.html">Hipster Wars: Discovering Elements of Fashion Styles</a></td><td><a href="http://pdfs.semanticscholar.org/04c2/cda00e5536f4b1508cbd80041e9552880e67.pdf">[pdf]</a></td><td></td><td>edu</td><td>Tohoku University</td><td>38.25309450</td><td>140.87365930</td><td>53%</td><td>91</td><td>48</td><td>43</td><td>5</td><td>60</td><td>15</td></tr><tr><td>109df0e8e5969ddf01e073143e83599228a1163f</td><td>multi_pie</td><td>MULTIPIE</td><td><a href="papers/109df0e8e5969ddf01e073143e83599228a1163f.html">Scheduling heterogeneous multi-cores through performance impact estimation (PIE)</a></td><td><a href="http://dl.acm.org/citation.cfm?id=2337184">[pdf]</a></td><td>2012 39th Annual International Symposium on Computer Architecture (ISCA)</td><td></td><td></td><td></td><td></td><td>25%</td><td>192</td><td>48</td><td>144</td><td>8</td><td>99</td><td>0</td></tr><tr><td>32c801cb7fbeb742edfd94cccfca4934baec71da</td><td>ucf_crowd</td><td>UCF-CC-50</td><td><a href="papers/32c801cb7fbeb742edfd94cccfca4934baec71da.html">Multi-source Multi-scale Counting in Extremely Dense Crowd Images</a></td><td><a href="http://www.cs.ucf.edu/~haroon/datafiles/Idrees_Counting_CVPR_2013.pdf">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>38%</td><td>125</td><td>48</td><td>77</td><td>6</td><td>72</td><td>1</td></tr><tr><td>2d3482dcff69c7417c7b933f22de606a0e8e42d4</td><td>lfw</td><td>LFW</td><td><a href="papers/2d3482dcff69c7417c7b933f22de606a0e8e42d4.html">Labeled Faces in the Wild : Updates and New Reporting Procedures</a></td><td><a href="http://pdfs.semanticscholar.org/2d34/82dcff69c7417c7b933f22de606a0e8e42d4.pdf">[pdf]</a></td><td></td><td>edu</td><td>University of Massachusetts</td><td>42.38897850</td><td>-72.52869870</td><td>41%</td><td>116</td><td>47</td><td>46</td><td>4</td><td>62</td><td>19</td></tr><tr><td>2d3482dcff69c7417c7b933f22de606a0e8e42d4</td><td>lfw</td><td>LFW</td><td><a href="papers/2d3482dcff69c7417c7b933f22de606a0e8e42d4.html">Labeled Faces in the Wild : Updates and New Reporting Procedures</a></td><td><a href="http://pdfs.semanticscholar.org/2d34/82dcff69c7417c7b933f22de606a0e8e42d4.pdf">[pdf]</a></td><td></td><td>edu</td><td>University of Massachusetts</td><td>42.38897850</td><td>-72.52869870</td><td>41%</td><td>116</td><td>47</td><td>46</td><td>4</td><td>62</td><td>19</td></tr><tr><td>2d3482dcff69c7417c7b933f22de606a0e8e42d4</td><td>lfw</td><td>LFW</td><td><a href="papers/2d3482dcff69c7417c7b933f22de606a0e8e42d4.html">Labeled Faces in the Wild : Updates and New Reporting Procedures</a></td><td><a href="http://pdfs.semanticscholar.org/2d34/82dcff69c7417c7b933f22de606a0e8e42d4.pdf">[pdf]</a></td><td></td><td>edu</td><td>University of Massachusetts</td><td>42.38897850</td><td>-72.52869870</td><td>41%</td><td>116</td><td>47</td><td>46</td><td>4</td><td>62</td><td>19</td></tr><tr><td>8355d095d3534ef511a9af68a3b2893339e3f96b</td><td>imdb_wiki</td><td>IMDB</td><td><a href="papers/8355d095d3534ef511a9af68a3b2893339e3f96b.html">DEX: Deep EXpectation of Apparent Age from a Single Image</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406390', 'linkType': 'ieee'}">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision Workshop (ICCVW)</td><td></td><td></td><td></td><td></td><td>44%</td><td>102</td><td>45</td><td>54</td><td>6</td><td>62</td><td>28</td></tr><tr><td>8355d095d3534ef511a9af68a3b2893339e3f96b</td><td>imdb_wiki</td><td>IMDB</td><td><a href="papers/8355d095d3534ef511a9af68a3b2893339e3f96b.html">DEX: Deep EXpectation of Apparent Age from a Single Image</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406390', 'linkType': 'ieee'}">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision Workshop (ICCVW)</td><td></td><td></td><td></td><td></td><td>44%</td><td>102</td><td>45</td><td>54</td><td>6</td><td>62</td><td>28</td></tr><tr><td>6ad5a38df8dd4cdddd74f31996ce096d41219f72</td><td>tud_brussels</td><td>TUD-Brussels</td><td><a href="papers/6ad5a38df8dd4cdddd74f31996ce096d41219f72.html">Multi-cue onboard pedestrian detection</a></td><td><a href="https://www.mpi-inf.mpg.de/fileadmin/inf/d2/wojek/poster_cwojek_cvpr09.pdf">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>19%</td><td>217</td><td>41</td><td>176</td><td>14</td><td>131</td><td>1</td></tr><tr><td>6ad5a38df8dd4cdddd74f31996ce096d41219f72</td><td>tud_motionpairs</td><td>TUD-Motionparis</td><td><a href="papers/6ad5a38df8dd4cdddd74f31996ce096d41219f72.html">Multi-cue onboard pedestrian detection</a></td><td><a href="https://www.mpi-inf.mpg.de/fileadmin/inf/d2/wojek/poster_cwojek_cvpr09.pdf">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>19%</td><td>217</td><td>41</td><td>176</td><td>14</td><td>131</td><td>1</td></tr><tr><td>0486214fb58ee9a04edfe7d6a74c6d0f661a7668</td><td>chokepoint</td><td>ChokePoint</td><td><a href="papers/0486214fb58ee9a04edfe7d6a74c6d0f661a7668.html">Patch-based probabilistic image quality assessment for face selection and improved video-based face recognition</a></td><td><a href="http://conradsanderson.id.au/pdfs/wong_face_selection_cvpr_biometrics_2011.pdf">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td>edu</td><td>University of Queensland</td><td>-27.49741805</td><td>153.01316956</td><td>30%</td><td>128</td><td>39</td><td>89</td><td>6</td><td>68</td><td>14</td></tr><tr><td>2a4bbee0b4cf52d5aadbbc662164f7efba89566c</td><td>peta</td><td>PETA</td><td><a href="papers/2a4bbee0b4cf52d5aadbbc662164f7efba89566c.html">Pedestrian Attribute Recognition At Far Distance</a></td><td><a href="http://personal.ie.cuhk.edu.hk/~pluo/pdf/mm14.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>46%</td><td>80</td><td>37</td><td>43</td><td>2</td><td>51</td><td>3</td></tr><tr><td>636b8ffc09b1b23ff714ac8350bb35635e49fa3c</td><td>caltech_10k_web_faces</td><td>Caltech 10K Web Faces</td><td><a href="papers/636b8ffc09b1b23ff714ac8350bb35635e49fa3c.html">Pruning training sets for learning of object categories</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1467308', 'linkType': 'ieee'}">[pdf]</a></td><td>2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)</td><td></td><td></td><td></td><td></td><td>58%</td><td>60</td><td>35</td><td>25</td><td>5</td><td>42</td><td>12</td></tr><tr><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td><td>mafl</td><td>MAFL</td><td><a href="papers/a0fd85b3400c7b3e11122f44dc5870ae2de9009a.html">Learning Deep Representation for Face Alignment with Auxiliary Attributes</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1408.3967.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>33%</td><td>105</td><td>35</td><td>50</td><td>8</td><td>55</td><td>16</td></tr><tr><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td><td>mafl</td><td>MAFL</td><td><a href="papers/a0fd85b3400c7b3e11122f44dc5870ae2de9009a.html">Learning Deep Representation for Face Alignment with Auxiliary Attributes</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1408.3967.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>33%</td><td>105</td><td>35</td><td>50</td><td>8</td><td>55</td><td>16</td></tr><tr><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td><td>mtfl</td><td>MTFL</td><td><a href="papers/a0fd85b3400c7b3e11122f44dc5870ae2de9009a.html">Learning Deep Representation for Face Alignment with Auxiliary Attributes</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1408.3967.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>33%</td><td>105</td><td>35</td><td>50</td><td>8</td><td>55</td><td>16</td></tr><tr><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td><td>mtfl</td><td>MTFL</td><td><a href="papers/a0fd85b3400c7b3e11122f44dc5870ae2de9009a.html">Learning Deep Representation for Face Alignment with Auxiliary Attributes</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1408.3967.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>33%</td><td>105</td><td>35</td><td>50</td><td>8</td><td>55</td><td>16</td></tr><tr><td>214c966d1f9c2a4b66f4535d9a0d4078e63a5867</td><td>brainwash</td><td>Brainwash</td><td><a href="papers/214c966d1f9c2a4b66f4535d9a0d4078e63a5867.html">Brainwash: A Data System for Feature Engineering</a></td><td><a href="http://pdfs.semanticscholar.org/ae44/8015b2ff2bd3b8a5c9a3266f954f5af9ffa9.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>57</td><td>34</td><td>23</td><td>2</td><td>50</td><td>0</td></tr><tr><td>0dc11a37cadda92886c56a6fb5191ded62099c28</td><td>stickmen_family</td><td>We Are Family Stickmen</td><td><a href="papers/0dc11a37cadda92886c56a6fb5191ded62099c28.html">We Are Family: Joint Pose Estimation of Multiple Persons</a></td><td><a href="http://pdfs.semanticscholar.org/0dc1/1a37cadda92886c56a6fb5191ded62099c28.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>44%</td><td>77</td><td>34</td><td>43</td><td>4</td><td>57</td><td>1</td></tr><tr><td>4df3143922bcdf7db78eb91e6b5359d6ada004d2</td><td>cfd</td><td>CFD</td><td><a href="papers/4df3143922bcdf7db78eb91e6b5359d6ada004d2.html">The Chicago face database: A free stimulus set of faces and norming data.</a></td><td><a href="http://pdfs.semanticscholar.org/4df3/143922bcdf7db78eb91e6b5359d6ada004d2.pdf">[pdf]</a></td><td>Behavior research methods</td><td></td><td></td><td></td><td></td><td>39%</td><td>83</td><td>32</td><td>51</td><td>2</td><td>62</td><td>3</td></tr><tr><td>c900e0ad4c95948baaf0acd8449fde26f9b4952a</td><td>emotio_net</td><td>EmotioNet Database</td><td><a href="papers/c900e0ad4c95948baaf0acd8449fde26f9b4952a.html">EmotioNet: An Accurate, Real-Time Algorithm for the Automatic Annotation of a Million Facial Expressions in the Wild</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7780969', 'linkType': 'ieee'}">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>44%</td><td>72</td><td>32</td><td>40</td><td>7</td><td>54</td><td>8</td></tr><tr><td>3cd40bfa1ff193a96bde0207e5140a399476466c</td><td>tvhi</td><td>TVHI</td><td><a href="papers/3cd40bfa1ff193a96bde0207e5140a399476466c.html">High Five: Recognising human interactions in TV shows</a></td><td><a href="http://pdfs.semanticscholar.org/3cd4/0bfa1ff193a96bde0207e5140a399476466c.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>34%</td><td>91</td><td>31</td><td>60</td><td>11</td><td>64</td><td>1</td></tr><tr><td>6618cff7f2ed440a0d2fa9e74ad5469df5cdbe4c</td><td>afad</td><td>AFAD</td><td><a href="papers/6618cff7f2ed440a0d2fa9e74ad5469df5cdbe4c.html">Ordinal Regression with Multiple Output CNN for Age Estimation</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.532">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>44%</td><td>68</td><td>30</td><td>38</td><td>8</td><td>49</td><td>7</td></tr><tr><td>fcc6fe6007c322641796cb8792718641856a22a7</td><td>miw</td><td>MIW</td><td><a href="papers/fcc6fe6007c322641796cb8792718641856a22a7.html">Automatic facial makeup detection with application in face recognition</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6612994', 'linkType': 'ieee'}">[pdf]</a></td><td>2013 International Conference on Biometrics (ICB)</td><td>edu</td><td>West Virginia University</td><td>39.65404635</td><td>-79.96475355</td><td>65%</td><td>46</td><td>30</td><td>16</td><td>1</td><td>18</td><td>21</td></tr><tr><td>0a85bdff552615643dd74646ac881862a7c7072d</td><td>pipa</td><td>PIPA</td><td><a href="papers/0a85bdff552615643dd74646ac881862a7c7072d.html">Beyond frontal faces: Improving Person Recognition using multiple cues</a></td><td><a href="https://doi.org/10.1109/CVPR.2015.7299113">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>60%</td><td>50</td><td>30</td><td>19</td><td>2</td><td>40</td><td>4</td></tr><tr><td>fcc6fe6007c322641796cb8792718641856a22a7</td><td>youtube_makeup</td><td>YMU</td><td><a href="papers/fcc6fe6007c322641796cb8792718641856a22a7.html">Automatic facial makeup detection with application in face recognition</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6612994', 'linkType': 'ieee'}">[pdf]</a></td><td>2013 International Conference on Biometrics (ICB)</td><td>edu</td><td>West Virginia University</td><td>39.65404635</td><td>-79.96475355</td><td>65%</td><td>46</td><td>30</td><td>16</td><td>1</td><td>18</td><td>21</td></tr><tr><td>fcc6fe6007c322641796cb8792718641856a22a7</td><td>youtube_makeup</td><td>YMU</td><td><a href="papers/fcc6fe6007c322641796cb8792718641856a22a7.html">Automatic facial makeup detection with application in face recognition</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6612994', 'linkType': 'ieee'}">[pdf]</a></td><td>2013 International Conference on Biometrics (ICB)</td><td>edu</td><td>West Virginia University</td><td>39.65404635</td><td>-79.96475355</td><td>65%</td><td>46</td><td>30</td><td>16</td><td>1</td><td>18</td><td>21</td></tr><tr><td>51eba481dac6b229a7490f650dff7b17ce05df73</td><td>imsitu</td><td>imSitu</td><td><a href="papers/51eba481dac6b229a7490f650dff7b17ce05df73.html">Situation Recognition: Visual Semantic Role Labeling for Image Understanding</a></td><td><a href="http://grail.cs.washington.edu/wp-content/uploads/2016/09/yatskar2016srv.pdf">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>University of Washington</td><td>47.65432380</td><td>-122.30800894</td><td>60%</td><td>48</td><td>29</td><td>19</td><td>2</td><td>45</td><td>2</td></tr><tr><td>2bf8541199728262f78d4dced6fb91479b39b738</td><td>clothing_co_parsing</td><td>CCP</td><td><a href="papers/2bf8541199728262f78d4dced6fb91479b39b738.html">Clothing Co-parsing by Joint Image Segmentation and Labeling</a></td><td><a href="https://arxiv.org/pdf/1502.00739v1.pdf">[pdf]</a></td><td>2014 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>47%</td><td>60</td><td>28</td><td>32</td><td>0</td><td>36</td><td>6</td></tr><tr><td>42505464808dfb446f521fc6ff2cfeffd4d68ff1</td><td>gavab_db</td><td>Gavab</td><td><a href="papers/42505464808dfb446f521fc6ff2cfeffd4d68ff1.html">Expression invariant 3D face recognition with a Morphable Model</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4813376', 'linkType': 'ieee'}">[pdf]</a></td><td>2008 8th IEEE International Conference on Automatic Face & Gesture Recognition</td><td></td><td></td><td></td><td></td><td>29%</td><td>94</td><td>27</td><td>67</td><td>10</td><td>57</td><td>5</td></tr><tr><td>066000d44d6691d27202896691f08b27117918b9</td><td>psu</td><td>PSU</td><td><a href="papers/066000d44d6691d27202896691f08b27117918b9.html">Vision-Based Analysis of Small Groups in Pedestrian Crowds</a></td><td><a href="http://vision.cse.psu.edu/publications/pdfs/GeCollinsRubackPAMI2011.pdf">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>18%</td><td>151</td><td>27</td><td>124</td><td>9</td><td>78</td><td>2</td></tr><tr><td>47aeb3b82f54b5ae8142b4bdda7b614433e69b9a</td><td>am_fed</td><td>AM-FED</td><td><a href="papers/47aeb3b82f54b5ae8142b4bdda7b614433e69b9a.html">Affectiva-MIT Facial Expression Dataset (AM-FED): Naturalistic and Spontaneous Facial Expressions Collected "In-the-Wild"</a></td><td><a href="http://pdfs.semanticscholar.org/5d06/437656dd94616d7d87260d5eb77513ded30f.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>36%</td><td>73</td><td>26</td><td>47</td><td>6</td><td>39</td><td>16</td></tr><tr><td>356b431d4f7a2a0a38cf971c84568207dcdbf189</td><td>wider</td><td>WIDER</td><td><a href="papers/356b431d4f7a2a0a38cf971c84568207dcdbf189.html">Recognize complex events from static images by fusing deep channels</a></td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Xiong_Recognize_Complex_Events_2015_CVPR_paper.pdf">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>Shenzhen Institutes of Advanced Technology</td><td>22.59805605</td><td>113.98533784</td><td>58%</td><td>45</td><td>26</td><td>19</td><td>1</td><td>30</td><td>12</td></tr><tr><td>9c23859ec7313f2e756a3e85575735e0c52249f4</td><td>facebook_100</td><td>Facebook100</td><td><a href="papers/9c23859ec7313f2e756a3e85575735e0c52249f4.html">Scaling up biologically-inspired computer vision: A case study in unconstrained face recognition on facebook</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5981788', 'linkType': 'ieee'}">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td>edu</td><td>Harvard University</td><td>42.36782045</td><td>-71.12666653</td><td>50%</td><td>50</td><td>25</td><td>25</td><td>3</td><td>39</td><td>4</td></tr><tr><td>0b84f07af44f964817675ad961def8a51406dd2e</td><td>prw</td><td>PRW</td><td><a href="papers/0b84f07af44f964817675ad961def8a51406dd2e.html">Person Re-identification in the Wild</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2017.357">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>University of Technology Sydney</td><td>-33.88096510</td><td>151.20107299</td><td>38%</td><td>65</td><td>25</td><td>40</td><td>1</td><td>46</td><td>0</td></tr><tr><td>9c23859ec7313f2e756a3e85575735e0c52249f4</td><td>pubfig_83</td><td>pubfig83</td><td><a href="papers/9c23859ec7313f2e756a3e85575735e0c52249f4.html">Scaling up biologically-inspired computer vision: A case study in unconstrained face recognition on facebook</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5981788', 'linkType': 'ieee'}">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td>edu</td><td>Harvard University</td><td>42.36782045</td><td>-71.12666653</td><td>50%</td><td>50</td><td>25</td><td>25</td><td>3</td><td>39</td><td>4</td></tr><tr><td>2160788824c4c29ffe213b2cbeb3f52972d73f37</td><td>3d_rma</td><td>3D-RMA</td><td><a href="papers/2160788824c4c29ffe213b2cbeb3f52972d73f37.html">Automatic 3D face authentication</a></td><td><a href="http://pdfs.semanticscholar.org/2160/788824c4c29ffe213b2cbeb3f52972d73f37.pdf">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td>25%</td><td>95</td><td>24</td><td>71</td><td>8</td><td>60</td><td>2</td></tr><tr><td>37d6f0eb074d207b53885bd2eb78ccc8a04be597</td><td>vmu</td><td>VMU</td><td><a href="papers/37d6f0eb074d207b53885bd2eb78ccc8a04be597.html">Can facial cosmetics affect the matching accuracy of face recognition systems?</a></td><td><a href="http://www.cse.msu.edu/~climer/DantchevaChenRossFaceCosmetics_BTAS2012.pdf">[pdf]</a></td><td>2012 IEEE Fifth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td></td><td></td><td></td><td></td><td>49%</td><td>49</td><td>24</td><td>25</td><td>0</td><td>18</td><td>22</td></tr><tr><td>070de852bc6eb275d7ca3a9cdde8f6be8795d1a3</td><td>d3dfacs</td><td>D3DFACS</td><td><a href="papers/070de852bc6eb275d7ca3a9cdde8f6be8795d1a3.html">A FACS valid 3D dynamic action unit database with applications to 3D dynamic morphable facial modeling</a></td><td><a href="http://www.cs.bath.ac.uk/~dpc/D3DFACS/ICCV_final_2011.pdf">[pdf]</a></td><td>2011 International Conference on Computer Vision</td><td>edu</td><td>Jacobs University</td><td>53.41291480</td><td>-2.96897915</td><td>44%</td><td>52</td><td>23</td><td>29</td><td>5</td><td>37</td><td>4</td></tr><tr><td>3394168ff0719b03ff65bcea35336a76b21fe5e4</td><td>penn_fudan</td><td>Penn Fudan</td><td><a href="papers/3394168ff0719b03ff65bcea35336a76b21fe5e4.html">Object Detection Combining Recognition and Segmentation</a></td><td><a href="http://pdfs.semanticscholar.org/f531/a554cade14b9b340de6730683a28c292dd74.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>23%</td><td>101</td><td>23</td><td>78</td><td>11</td><td>58</td><td>0</td></tr><tr><td>2e8d0f1802e50cccfd3c0aabac0d0beab3a7846e</td><td>3dpes</td><td>3DPeS</td><td><a href="papers/2e8d0f1802e50cccfd3c0aabac0d0beab3a7846e.html">3DPeS: 3D people dataset for surveillance and forensics</a></td><td><a href="http://doi.acm.org/10.1145/2072572.2072590">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>18%</td><td>122</td><td>22</td><td>100</td><td>11</td><td>71</td><td>1</td></tr><tr><td>08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7</td><td>kin_face</td><td>UB KinFace</td><td><a href="papers/08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7.html">Understanding Kin Relationships in a Photo</a></td><td><a href="http://www1.ece.neu.edu/~yunfu/papers/Kinship-TMM.pdf">[pdf]</a></td><td>IEEE Transactions on Multimedia</td><td></td><td></td><td></td><td></td><td>24%</td><td>89</td><td>21</td><td>46</td><td>2</td><td>20</td><td>14</td></tr><tr><td>08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7</td><td>kin_face</td><td>UB KinFace</td><td><a href="papers/08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7.html">Understanding Kin Relationships in a Photo</a></td><td><a href="http://www1.ece.neu.edu/~yunfu/papers/Kinship-TMM.pdf">[pdf]</a></td><td>IEEE Transactions on Multimedia</td><td></td><td></td><td></td><td></td><td>24%</td><td>89</td><td>21</td><td>46</td><td>2</td><td>20</td><td>14</td></tr><tr><td>08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7</td><td>kin_face</td><td>UB KinFace</td><td><a href="papers/08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7.html">Understanding Kin Relationships in a Photo</a></td><td><a href="http://www1.ece.neu.edu/~yunfu/papers/Kinship-TMM.pdf">[pdf]</a></td><td>IEEE Transactions on Multimedia</td><td></td><td></td><td></td><td></td><td>24%</td><td>89</td><td>21</td><td>46</td><td>2</td><td>20</td><td>14</td></tr><tr><td>eb027969f9310e0ae941e2adee2d42cdf07d938c</td><td>vgg_faces2</td><td>VGG Face2</td><td><a href="papers/eb027969f9310e0ae941e2adee2d42cdf07d938c.html">VGGFace2: A Dataset for Recognising Faces across Pose and Age</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1710.08092.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2018 13th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2018)</td><td>edu</td><td>University of Oxford</td><td>51.75345380</td><td>-1.25400997</td><td>38%</td><td>56</td><td>21</td><td>35</td><td>6</td><td>50</td><td>3</td></tr><tr><td>f1af714b92372c8e606485a3982eab2f16772ad8</td><td>mug_faces</td><td>MUG Faces</td><td><a href="papers/f1af714b92372c8e606485a3982eab2f16772ad8.html">The MUG facial expression database</a></td><td><a href="http://ieeexplore.ieee.org/document/5617662/">[pdf]</a></td><td>11th International Workshop on Image Analysis for Multimedia Interactive Services WIAMIS 10</td><td>edu</td><td>Aristotle University of Thessaloniki</td><td>40.62984145</td><td>22.95889350</td><td>28%</td><td>68</td><td>19</td><td>49</td><td>5</td><td>28</td><td>19</td></tr><tr><td>2ce2560cf59db59ce313bbeb004e8ce55c5ce928</td><td>texas_3dfrd</td><td>Texas 3DFRD</td><td><a href="papers/2ce2560cf59db59ce313bbeb004e8ce55c5ce928.html">Anthropometric 3D Face Recognition</a></td><td><a href="http://live.ece.utexas.edu/publications/2010/sg_ijcv_june10.pdf">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td>22%</td><td>88</td><td>19</td><td>43</td><td>4</td><td>42</td><td>2</td></tr><tr><td>2ce2560cf59db59ce313bbeb004e8ce55c5ce928</td><td>texas_3dfrd</td><td>Texas 3DFRD</td><td><a href="papers/2ce2560cf59db59ce313bbeb004e8ce55c5ce928.html">Anthropometric 3D Face Recognition</a></td><td><a href="http://live.ece.utexas.edu/publications/2010/sg_ijcv_june10.pdf">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td>22%</td><td>88</td><td>19</td><td>43</td><td>4</td><td>42</td><td>2</td></tr><tr><td>8b2dd5c61b23ead5ae5508bb8ce808b5ea266730</td><td>10k_US_adult_faces</td><td>10K US Adult Faces</td><td><a href="papers/8b2dd5c61b23ead5ae5508bb8ce808b5ea266730.html">The intrinsic memorability of face photographs.</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/8b2d/d5c61b23ead5ae5508bb8ce808b5ea266730.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Journal of experimental psychology. General</td><td></td><td></td><td></td><td></td><td>36%</td><td>47</td><td>17</td><td>30</td><td>3</td><td>33</td><td>1</td></tr><tr><td>69a68f9cf874c69e2232f47808016c2736b90c35</td><td>celeba_plus</td><td>CelebFaces+</td><td><a href="papers/69a68f9cf874c69e2232f47808016c2736b90c35.html">Learning Deep Representation for Imbalanced Classification</a></td><td><a href="http://personal.ie.cuhk.edu.hk/~ccloy/files/cvpr_2016_imbalanced.pdf">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>Shenzhen Institutes of Advanced Technology</td><td>22.59805605</td><td>113.98533784</td><td>33%</td><td>51</td><td>17</td><td>34</td><td>1</td><td>39</td><td>2</td></tr><tr><td>79828e6e9f137a583082b8b5a9dfce0c301989b8</td><td>mapillary</td><td>Mapillary</td><td><a href="papers/79828e6e9f137a583082b8b5a9dfce0c301989b8.html">The Mapillary Vistas Dataset for Semantic Understanding of Street Scenes</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237796', 'linkType': 'ieee'}">[pdf]</a></td><td>2017 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td>39%</td><td>44</td><td>17</td><td>27</td><td>0</td><td>36</td><td>0</td></tr><tr><td>5194cbd51f9769ab25260446b4fa17204752e799</td><td>violent_flows</td><td>Violent Flows</td><td><a href="papers/5194cbd51f9769ab25260446b4fa17204752e799.html">Violent flows: Real-time detection of violent crowd behavior</a></td><td><a href="http://www.wisdom.weizmann.ac.il/mathusers/kliper/Papers/violent_flows.pdf">[pdf]</a></td><td>2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops</td><td></td><td></td><td></td><td></td><td>20%</td><td>83</td><td>17</td><td>66</td><td>6</td><td>42</td><td>2</td></tr><tr><td>20388099cc415c772926e47bcbbe554e133343d1</td><td>cafe</td><td>CAFE</td><td><a href="papers/20388099cc415c772926e47bcbbe554e133343d1.html">The Child Affective Facial Expression (CAFE) set: validity and reliability from untrained adults</a></td><td><a href="http://pdfs.semanticscholar.org/2038/8099cc415c772926e47bcbbe554e133343d1.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>48%</td><td>33</td><td>16</td><td>17</td><td>3</td><td>28</td><td>1</td></tr><tr><td>c34532fe6bfbd1e6df477c9ffdbb043b77e7804d</td><td>columbia_gaze</td><td>Columbia Gaze</td><td><a href="papers/c34532fe6bfbd1e6df477c9ffdbb043b77e7804d.html">A 3D Morphable Eye Region Model for Gaze Estimation</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/0d43/3b9435b874a1eea6d7999e86986c910fa285.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Carnegie Mellon University</td><td>37.41021930</td><td>-122.05965487</td><td>67%</td><td>24</td><td>16</td><td>8</td><td>0</td><td>18</td><td>4</td></tr><tr><td>47662d1a368daf70ba70ef2d59eb6209f98b675d</td><td>fia</td><td>CMU FiA</td><td><a href="papers/47662d1a368daf70ba70ef2d59eb6209f98b675d.html">The CMU Face In Action (FIA) Database</a></td><td><a href="http://pdfs.semanticscholar.org/bb47/a03401811f9d2ca2da12138697acbc7b97a3.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>29%</td><td>55</td><td>16</td><td>39</td><td>5</td><td>38</td><td>7</td></tr><tr><td>213a579af9e4f57f071b884aa872651372b661fd</td><td>bbc_pose</td><td>BBC Pose</td><td><a href="papers/213a579af9e4f57f071b884aa872651372b661fd.html">Automatic and Efficient Human Pose Estimation for Sign Language Videos</a></td><td><a href="https://doi.org/10.1007/s11263-013-0672-6">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td>60%</td><td>25</td><td>15</td><td>10</td><td>1</td><td>18</td><td>1</td></tr><tr><td>1e3df3ca8feab0b36fd293fe689f93bb2aaac591</td><td>immediacy</td><td>Immediacy</td><td><a href="papers/1e3df3ca8feab0b36fd293fe689f93bb2aaac591.html">Multi-task Recurrent Neural Network for Immediacy Prediction</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.383">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td>60%</td><td>25</td><td>15</td><td>10</td><td>2</td><td>20</td><td>0</td></tr><tr><td>a5a44a32a91474f00a3cda671a802e87c899fbb4</td><td>moments_in_time</td><td>Moments in Time</td><td><a href="papers/a5a44a32a91474f00a3cda671a802e87c899fbb4.html">Moments in Time Dataset: one million videos for event understanding</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1801.03150.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>60%</td><td>25</td><td>15</td><td>10</td><td>2</td><td>25</td><td>0</td></tr><tr><td>4946ba10a4d5a7d0a38372f23e6622bd347ae273</td><td>coco_action</td><td>COCO-a</td><td><a href="papers/4946ba10a4d5a7d0a38372f23e6622bd347ae273.html">RONCHI AND PERONA: DESCRIBING COMMON HUMAN VISUAL ACTIONS IN IMAGES 1 Describing Common Human Visual Actions in Images</a></td><td><a href="http://pdfs.semanticscholar.org/b38d/cf5fa5174c0d718d65cc4f3889b03c4a21df.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>26</td><td>14</td><td>12</td><td>0</td><td>25</td><td>0</td></tr><tr><td>2161f6b7ee3c0acc81603b01dc0df689683577b9</td><td>large_scale_person_search</td><td>Large Scale Person Search</td><td><a href="papers/2161f6b7ee3c0acc81603b01dc0df689683577b9.html">End-to-End Deep Learning for Person Search</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/2161/f6b7ee3c0acc81603b01dc0df689683577b9.pdf', 'linkType': 's2'}">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>34%</td><td>41</td><td>14</td><td>27</td><td>2</td><td>27</td><td>0</td></tr><tr><td>1c2802c2199b6d15ecefe7ba0c39bfe44363de38</td><td>youtube_poses</td><td>YouTube Pose</td><td><a href="papers/1c2802c2199b6d15ecefe7ba0c39bfe44363de38.html">Personalizing Human Video Pose Estimation</a></td><td><a href="http://arxiv.org/pdf/1511.06676v1.pdf">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>44%</td><td>32</td><td>14</td><td>18</td><td>2</td><td>27</td><td>0</td></tr><tr><td>ea050801199f98a1c7c1df6769f23f658299a3ae</td><td>mpi_large</td><td>Large MPI Facial Expression</td><td><a href="papers/ea050801199f98a1c7c1df6769f23f658299a3ae.html">The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</a></td><td><a href="http://pdfs.semanticscholar.org/ea05/0801199f98a1c7c1df6769f23f658299a3ae.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>46%</td><td>28</td><td>13</td><td>15</td><td>4</td><td>24</td><td>3</td></tr><tr><td>ea050801199f98a1c7c1df6769f23f658299a3ae</td><td>mpi_small</td><td>Small MPI Facial Expression</td><td><a href="papers/ea050801199f98a1c7c1df6769f23f658299a3ae.html">The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</a></td><td><a href="http://pdfs.semanticscholar.org/ea05/0801199f98a1c7c1df6769f23f658299a3ae.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>46%</td><td>28</td><td>13</td><td>15</td><td>4</td><td>24</td><td>3</td></tr><tr><td>16e8b0a1e8451d5f697b94c0c2b32a00abee1d52</td><td>umb</td><td>UMB</td><td><a href="papers/16e8b0a1e8451d5f697b94c0c2b32a00abee1d52.html">UMB-DB: A database of partially occluded 3D faces</a></td><td><a href="https://doi.org/10.1109/ICCVW.2011.6130509">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td>29%</td><td>45</td><td>13</td><td>32</td><td>2</td><td>20</td><td>3</td></tr><tr><td>d08cc366a4a0192a01e9a7495af1eb5d9f9e73ae</td><td>b3d_ac</td><td>B3D(AC)</td><td><a href="papers/d08cc366a4a0192a01e9a7495af1eb5d9f9e73ae.html">A 3-D Audio-Visual Corpus of Affective Communication</a></td><td><a href="http://files.is.tue.mpg.de/jgall/download/jgall_avcorpus_mm10.pdf">[pdf]</a></td><td>IEEE Transactions on Multimedia</td><td></td><td></td><td></td><td></td><td>31%</td><td>39</td><td>12</td><td>27</td><td>2</td><td>27</td><td>7</td></tr><tr><td>0b440695c822a8e35184fb2f60dcdaa8a6de84ae</td><td>kinectface</td><td>KinectFaceDB</td><td><a href="papers/0b440695c822a8e35184fb2f60dcdaa8a6de84ae.html">KinectFaceDB: A Kinect Database for Face Recognition</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6866883', 'linkType': 'ieee'}">[pdf]</a></td><td>IEEE Transactions on Systems, Man, and Cybernetics: Systems</td><td></td><td></td><td></td><td></td><td>16%</td><td>75</td><td>12</td><td>63</td><td>6</td><td>25</td><td>8</td></tr><tr><td>45e616093a92e5f1e61a7c6037d5f637aa8964af</td><td>malf</td><td>MALF</td><td><a href="papers/45e616093a92e5f1e61a7c6037d5f637aa8964af.html">Fine-grained evaluation on face detection in the wild</a></td><td><a href="http://www.cs.toronto.edu/~byang/papers/malf_fg15.pdf">[pdf]</a></td><td>2015 11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)</td><td>edu</td><td>Chinese Academy of Sciences</td><td>40.00447950</td><td>116.37023800</td><td>71%</td><td>17</td><td>12</td><td>5</td><td>0</td><td>13</td><td>4</td></tr><tr><td>2edb87494278ad11641b6cf7a3f8996de12b8e14</td><td>qmul_grid</td><td>GRID</td><td><a href="papers/2edb87494278ad11641b6cf7a3f8996de12b8e14.html">Time-Delayed Correlation Analysis for Multi-Camera Activity Understanding</a></td><td><a href="https://doi.org/10.1007/s11263-010-0347-5">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td>16%</td><td>77</td><td>12</td><td>39</td><td>3</td><td>32</td><td>0</td></tr><tr><td>2edb87494278ad11641b6cf7a3f8996de12b8e14</td><td>qmul_grid</td><td>GRID</td><td><a href="papers/2edb87494278ad11641b6cf7a3f8996de12b8e14.html">Time-Delayed Correlation Analysis for Multi-Camera Activity Understanding</a></td><td><a href="https://doi.org/10.1007/s11263-010-0347-5">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td>16%</td><td>77</td><td>12</td><td>39</td><td>3</td><td>32</td><td>0</td></tr><tr><td>09d78009687bec46e70efcf39d4612822e61cb8c</td><td>raid</td><td>RAiD</td><td><a href="papers/09d78009687bec46e70efcf39d4612822e61cb8c.html">Consistent Re-identification in a Camera Network</a></td><td><a href="http://pdfs.semanticscholar.org/c27f/099e6e7e3f7f9979cbe9e0a5175fc5848ea0.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>27%</td><td>45</td><td>12</td><td>33</td><td>7</td><td>34</td><td>1</td></tr><tr><td>22646e00a7ba34d1b5fbe3b1efcd91a1e1be3c2b</td><td>saivt</td><td>SAIVT SoftBio</td><td><a href="papers/22646e00a7ba34d1b5fbe3b1efcd91a1e1be3c2b.html">A Database for Person Re-Identification in Multi-Camera Surveillance Networks</a></td><td><a href="http://eprints.qut.edu.au/53437/3/Bialkowski_Database4PersonReID_DICTA.pdf">[pdf]</a></td><td>2012 International Conference on Digital Image Computing Techniques and Applications (DICTA)</td><td></td><td></td><td></td><td></td><td>21%</td><td>58</td><td>12</td><td>46</td><td>7</td><td>40</td><td>1</td></tr><tr><td>44d23df380af207f5ac5b41459c722c87283e1eb</td><td>wider_attribute</td><td>WIDER Attribute</td><td><a href="papers/44d23df380af207f5ac5b41459c722c87283e1eb.html">Human Attribute Recognition by Deep Hierarchical Contexts</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/8e28/07f2dd53b03a759e372e07f7191cae65c9fd.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>67%</td><td>18</td><td>12</td><td>6</td><td>0</td><td>16</td><td>0</td></tr><tr><td>28d4e027c7e90b51b7d8908fce68128d1964668a</td><td>megaface</td><td>MegaFace</td><td><a href="papers/28d4e027c7e90b51b7d8908fce68128d1964668a.html">Level Playing Field for Million Scale Face Recognition</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1705.00393.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>41%</td><td>27</td><td>11</td><td>16</td><td>2</td><td>22</td><td>4</td></tr><tr><td>28d4e027c7e90b51b7d8908fce68128d1964668a</td><td>megaface</td><td>MegaFace</td><td><a href="papers/28d4e027c7e90b51b7d8908fce68128d1964668a.html">Level Playing Field for Million Scale Face Recognition</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1705.00393.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>41%</td><td>27</td><td>11</td><td>16</td><td>2</td><td>22</td><td>4</td></tr><tr><td>221c18238b829c12b911706947ab38fd017acef7</td><td>rap_pedestrian</td><td>RAP</td><td><a href="papers/221c18238b829c12b911706947ab38fd017acef7.html">A Richly Annotated Dataset for Pedestrian Attribute Recognition</a></td><td><a href="http://pdfs.semanticscholar.org/221c/18238b829c12b911706947ab38fd017acef7.pdf">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>52%</td><td>21</td><td>11</td><td>10</td><td>0</td><td>18</td><td>0</td></tr><tr><td>488e475eeb3bb39a145f23ede197cd3620f1d98a</td><td>apis</td><td>APiS1.0</td><td><a href="papers/488e475eeb3bb39a145f23ede197cd3620f1d98a.html">Pedestrian Attribute Classification in Surveillance: Database and Evaluation</a></td><td><a href="http://www.cv-foundation.org/openaccess/content_iccv_workshops_2013/W10/papers/Zhu_Pedestrian_Attribute_Classification_2013_ICCV_paper.pdf">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision Workshops</td><td></td><td></td><td></td><td></td><td>38%</td><td>26</td><td>10</td><td>16</td><td>1</td><td>13</td><td>2</td></tr><tr><td>53ae38a6bb2b21b42bac4f0c4c8ed1f9fa02f9d4</td><td>bp4d_plus</td><td>BP4D+</td><td><a href="papers/53ae38a6bb2b21b42bac4f0c4c8ed1f9fa02f9d4.html">Multimodal Spontaneous Emotion Corpus for Human Behavior Analysis</a></td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Zhang_Multimodal_Spontaneous_Emotion_CVPR_2016_paper.pdf">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>25%</td><td>40</td><td>10</td><td>30</td><td>0</td><td>20</td><td>6</td></tr><tr><td>298cbc3dfbbb3a20af4eed97906650a4ea1c29e0</td><td>ferplus</td><td>FER+</td><td><a href="papers/298cbc3dfbbb3a20af4eed97906650a4ea1c29e0.html">Training deep networks for facial expression recognition with crowd-sourced label distribution</a></td><td><a href="http://arxiv.org/pdf/1608.01041v1.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>34%</td><td>29</td><td>10</td><td>19</td><td>0</td><td>15</td><td>3</td></tr><tr><td>488e475eeb3bb39a145f23ede197cd3620f1d98a</td><td>svs</td><td>SVS</td><td><a href="papers/488e475eeb3bb39a145f23ede197cd3620f1d98a.html">Pedestrian Attribute Classification in Surveillance: Database and Evaluation</a></td><td><a href="http://www.cv-foundation.org/openaccess/content_iccv_workshops_2013/W10/papers/Zhu_Pedestrian_Attribute_Classification_2013_ICCV_paper.pdf">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision Workshops</td><td></td><td></td><td></td><td></td><td>38%</td><td>26</td><td>10</td><td>16</td><td>1</td><td>13</td><td>2</td></tr><tr><td>5753b2b5e442eaa3be066daa4a2ca8d8a0bb1725</td><td>50_people_one_question</td><td>50 People One Question</td><td><a href="papers/5753b2b5e442eaa3be066daa4a2ca8d8a0bb1725.html">Merging Pose Estimates Across Space and Time</a></td><td><a href="http://pdfs.semanticscholar.org/63b2/f5348af0f969dfc2afb4977732393c6459ec.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>15</td><td>9</td><td>6</td><td>0</td><td>11</td><td>2</td></tr><tr><td>6dcf418c778f528b5792104760f1fbfe90c6dd6a</td><td>agedb</td><td>AgeDB</td><td><a href="papers/6dcf418c778f528b5792104760f1fbfe90c6dd6a.html">AgeDB: The First Manually Collected, In-the-Wild Age Database</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014984', 'linkType': 'ieee'}">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td></td><td></td><td></td><td></td><td>82%</td><td>11</td><td>9</td><td>2</td><td>0</td><td>10</td><td>0</td></tr><tr><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td><td>casablanca</td><td>Casablanca</td><td><a href="papers/0ceda9dae8b9f322df65ca2ef02caca9758aec6f.html">Context-Aware CNNs for Person Head Detection</a></td><td><a href="http://arxiv.org/pdf/1511.07917v1.pdf">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td>33%</td><td>27</td><td>9</td><td>18</td><td>1</td><td>22</td><td>0</td></tr><tr><td>4fefd1bc8dc4e0ab37ee3324ddfa43ad9d6a04a7</td><td>deep_fashion</td><td>DeepFashion</td><td><a href="papers/4fefd1bc8dc4e0ab37ee3324ddfa43ad9d6a04a7.html">Fashion Landmark Detection in the Wild</a></td><td><a href="http://pdfs.semanticscholar.org/d8ca/e259c1c5bba0c096f480dc7322bbaebfac1a.pdf">[pdf]</a></td><td></td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>39%</td><td>23</td><td>9</td><td>8</td><td>0</td><td>15</td><td>0</td></tr><tr><td>4fefd1bc8dc4e0ab37ee3324ddfa43ad9d6a04a7</td><td>deep_fashion</td><td>DeepFashion</td><td><a href="papers/4fefd1bc8dc4e0ab37ee3324ddfa43ad9d6a04a7.html">Fashion Landmark Detection in the Wild</a></td><td><a href="http://pdfs.semanticscholar.org/d8ca/e259c1c5bba0c096f480dc7322bbaebfac1a.pdf">[pdf]</a></td><td></td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>39%</td><td>23</td><td>9</td><td>8</td><td>0</td><td>15</td><td>0</td></tr><tr><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td><td>hollywood_headset</td><td>HollywoodHeads</td><td><a href="papers/0ceda9dae8b9f322df65ca2ef02caca9758aec6f.html">Context-Aware CNNs for Person Head Detection</a></td><td><a href="http://arxiv.org/pdf/1511.07917v1.pdf">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td>33%</td><td>27</td><td>9</td><td>18</td><td>1</td><td>22</td><td>0</td></tr><tr><td>faf40ce28857aedf183e193486f5b4b0a8c478a2</td><td>iit_dehli_ear</td><td>IIT Dehli Ear</td><td><a href="papers/faf40ce28857aedf183e193486f5b4b0a8c478a2.html">Automated Human Identification Using Ear Imaging</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/faf4/0ce28857aedf183e193486f5b4b0a8c478a2.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>13%</td><td>70</td><td>9</td><td>61</td><td>6</td><td>28</td><td>1</td></tr><tr><td>ca3e88d87e1344d076c964ea89d91a75c417f5ee</td><td>imfdb</td><td>IMFDB</td><td><a href="papers/ca3e88d87e1344d076c964ea89d91a75c417f5ee.html">Indian Movie Face Database: A benchmark for face recognition under wide variations</a></td><td><span class="gray">[pdf]</a></td><td>2013 Fourth National Conference on Computer Vision, Pattern Recognition, Image Processing and Graphics (NCVPRIPG)</td><td></td><td></td><td></td><td></td><td>60%</td><td>15</td><td>9</td><td>6</td><td>0</td><td>10</td><td>4</td></tr><tr><td>7ace44190729927e5cb0dd5d363fcae966fe13f7</td><td>nudedetection</td><td>Nude Detection</td><td><a href="papers/7ace44190729927e5cb0dd5d363fcae966fe13f7.html">A bag-of-features approach based on Hue-SIFT descriptor for nude detection</a></td><td><a href="http://ieeexplore.ieee.org/document/7077625/">[pdf]</a></td><td>2009 17th European Signal Processing Conference</td><td></td><td></td><td></td><td></td><td>18%</td><td>51</td><td>9</td><td>42</td><td>1</td><td>18</td><td>0</td></tr><tr><td>4e6ee936eb50dd032f7138702fa39b7c18ee8907</td><td>dartmouth_children</td><td>Dartmouth Children</td><td><a href="papers/4e6ee936eb50dd032f7138702fa39b7c18ee8907.html">The Dartmouth Database of Children’s Faces: Acquisition and Validation of a New Face Stimulus Set</a></td><td><a href="http://pdfs.semanticscholar.org/4e6e/e936eb50dd032f7138702fa39b7c18ee8907.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>40%</td><td>20</td><td>8</td><td>12</td><td>2</td><td>16</td><td>0</td></tr><tr><td>fd8168f1c50de85bac58a8d328df0a50248b16ae</td><td>nd_2006</td><td>ND-2006</td><td><a href="papers/fd8168f1c50de85bac58a8d328df0a50248b16ae.html">Using a Multi-Instance Enrollment Representation to Improve 3D Face Recognition</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4401928', 'linkType': 'ieee'}">[pdf]</a></td><td>2007 First IEEE International Conference on Biometrics: Theory, Applications, and Systems</td><td></td><td></td><td></td><td></td><td>25%</td><td>32</td><td>8</td><td>24</td><td>3</td><td>16</td><td>1</td></tr><tr><td>774cbb45968607a027ae4729077734db000a1ec5</td><td>urban_tribes</td><td>Urban Tribes</td><td><a href="papers/774cbb45968607a027ae4729077734db000a1ec5.html">From Bikers to Surfers: Visual Recognition of Urban Tribes</a></td><td><a href="http://pdfs.semanticscholar.org/774c/bb45968607a027ae4729077734db000a1ec5.pdf">[pdf]</a></td><td></td><td>edu</td><td>Columbia University</td><td>40.84198360</td><td>-73.94368971</td><td>47%</td><td>17</td><td>8</td><td>9</td><td>1</td><td>12</td><td>1</td></tr><tr><td>84fe5b4ac805af63206012d29523a1e033bc827e</td><td>awe_ears</td><td>AWE Ears</td><td><a href="papers/84fe5b4ac805af63206012d29523a1e033bc827e.html">Ear recognition: More than a survey</a></td><td><a href="http://pdfs.semanticscholar.org/84fe/5b4ac805af63206012d29523a1e033bc827e.pdf">[pdf]</a></td><td>Neurocomputing</td><td></td><td></td><td></td><td></td><td>29%</td><td>24</td><td>7</td><td>17</td><td>0</td><td>11</td><td>0</td></tr><tr><td>5801690199c1917fa58c35c3dead177c0b8f9f2d</td><td>camel</td><td>CAMEL</td><td><a href="papers/5801690199c1917fa58c35c3dead177c0b8f9f2d.html">Application of Object Based Classification and High Resolution Satellite Imagery for Savanna Ecosystem Analysis</a></td><td><a href="http://pdfs.semanticscholar.org/5801/690199c1917fa58c35c3dead177c0b8f9f2d.pdf">[pdf]</a></td><td>Remote Sensing</td><td></td><td></td><td></td><td></td><td>37%</td><td>19</td><td>7</td><td>12</td><td>1</td><td>16</td><td>0</td></tr><tr><td>0297448f3ed948e136bb06ceff10eccb34e5bb77</td><td>ilids_mcts</td><td></td><td><a href="papers/0297448f3ed948e136bb06ceff10eccb34e5bb77.html">Imagery Library for Intelligent Detection Systems (i-LIDS); A Standard for Testing Video Based Detection Systems</a></td><td><span class="gray">[pdf]</a></td><td>Proceedings 40th Annual 2006 International Carnahan Conference on Security Technology</td><td></td><td></td><td></td><td></td><td>22%</td><td>32</td><td>7</td><td>25</td><td>2</td><td>17</td><td>0</td></tr><tr><td>ec792ad2433b6579f2566c932ee414111e194537</td><td>msmt_17</td><td>MSMT17</td><td><a href="papers/ec792ad2433b6579f2566c932ee414111e194537.html">Person Transfer GAN to Bridge Domain Gap for Person Re-Identification</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1711.08565.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>50%</td><td>14</td><td>7</td><td>7</td><td>1</td><td>11</td><td>0</td></tr><tr><td>b92a1ed9622b8268ae3ac9090e25789fc41cc9b8</td><td>pornodb</td><td>Pornography DB</td><td><a href="papers/b92a1ed9622b8268ae3ac9090e25789fc41cc9b8.html">Pooling in image representation: The visual codeword point of view</a></td><td><a href="http://pdfs.semanticscholar.org/b92a/1ed9622b8268ae3ac9090e25789fc41cc9b8.pdf">[pdf]</a></td><td>Computer Vision and Image Understanding</td><td></td><td></td><td></td><td></td><td>9%</td><td>77</td><td>7</td><td>70</td><td>7</td><td>43</td><td>2</td></tr><tr><td>19d1b811df60f86cbd5e04a094b07f32fff7a32a</td><td>york_3d</td><td>UOY 3D Face Database</td><td><a href="papers/19d1b811df60f86cbd5e04a094b07f32fff7a32a.html">Three-dimensional face recognition: an eigensurface approach</a></td><td><a href="http://www-users.cs.york.ac.uk/~nep/research/3Dface/tomh/3DFaceRecognition-Eigensurface-ICIP(web)2.pdf">[pdf]</a></td><td>2004 International Conference on Image Processing, 2004. ICIP '04.</td><td></td><td></td><td></td><td></td><td>19%</td><td>36</td><td>7</td><td>29</td><td>4</td><td>25</td><td>1</td></tr><tr><td>25474c21613607f6bb7687a281d5f9d4ffa1f9f3</td><td>faceplace</td><td>Face Place</td><td><a href="papers/25474c21613607f6bb7687a281d5f9d4ffa1f9f3.html">Recognizing disguised faces</a></td><td><a href="http://pdfs.semanticscholar.org/d936/7ceb0be378c3a9ddf7cb741c678c1a3c574c.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>25%</td><td>24</td><td>6</td><td>18</td><td>0</td><td>16</td><td>1</td></tr><tr><td>0cb2dd5f178e3a297a0c33068961018659d0f443</td><td>ijb_b</td><td>IJB-B</td><td><a href="papers/0cb2dd5f178e3a297a0c33068961018659d0f443.html">IARPA Janus Benchmark-B Face Dataset</a></td><td><a href="http://www.vislab.ucr.edu/Biometrics2017/program_slides/Noblis_CVPRW_IJBB.pdf">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td></td><td></td><td></td><td></td><td>24%</td><td>25</td><td>6</td><td>19</td><td>6</td><td>21</td><td>3</td></tr><tr><td>bd26dabab576adb6af30484183c9c9c8379bf2e0</td><td>scut_fbp</td><td>SCUT-FBP</td><td><a href="papers/bd26dabab576adb6af30484183c9c9c8379bf2e0.html">SCUT-FBP: A Benchmark Dataset for Facial Beauty Perception</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1511.02459.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2015 IEEE International Conference on Systems, Man, and Cybernetics</td><td>edu</td><td>South China University of Technology</td><td>23.05020420</td><td>113.39880323</td><td>43%</td><td>14</td><td>6</td><td>8</td><td>3</td><td>5</td><td>7</td></tr><tr><td>2a171f8d14b6b8735001a11c217af9587d095848</td><td>social_relation</td><td>Social Relation</td><td><a href="papers/2a171f8d14b6b8735001a11c217af9587d095848.html">Learning Social Relation Traits from Face Images</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.414">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>30%</td><td>20</td><td>6</td><td>14</td><td>5</td><td>15</td><td>0</td></tr><tr><td>2a171f8d14b6b8735001a11c217af9587d095848</td><td>social_relation</td><td>Social Relation</td><td><a href="papers/2a171f8d14b6b8735001a11c217af9587d095848.html">Learning Social Relation Traits from Face Images</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.414">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>30%</td><td>20</td><td>6</td><td>14</td><td>5</td><td>15</td><td>0</td></tr><tr><td>4156b7e88f2e0ab0a7c095b9bab199ae2b23bd06</td><td>distance_nighttime</td><td>Long Distance Heterogeneous Face</td><td><a href="papers/4156b7e88f2e0ab0a7c095b9bab199ae2b23bd06.html">Nighttime Face Recognition at Long Distance: Cross-Distance and Cross-Spectral Matching</a></td><td><a href="http://pdfs.semanticscholar.org/4156/b7e88f2e0ab0a7c095b9bab199ae2b23bd06.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>24%</td><td>21</td><td>5</td><td>16</td><td>3</td><td>11</td><td>1</td></tr><tr><td>c570d1247e337f91e555c3be0e8c8a5aba539d9f</td><td>mcgill</td><td>McGill Real World</td><td><a href="papers/c570d1247e337f91e555c3be0e8c8a5aba539d9f.html">Robust semi-automatic head pose labeling for real-world face video sequences</a></td><td><a href="https://doi.org/10.1007/s11042-012-1352-1">[pdf]</a></td><td>Multimedia Tools and Applications</td><td>edu</td><td>McGill University</td><td>45.50397610</td><td>-73.57496870</td><td>28%</td><td>18</td><td>5</td><td>6</td><td>0</td><td>7</td><td>4</td></tr><tr><td>c570d1247e337f91e555c3be0e8c8a5aba539d9f</td><td>mcgill</td><td>McGill Real World</td><td><a href="papers/c570d1247e337f91e555c3be0e8c8a5aba539d9f.html">Robust semi-automatic head pose labeling for real-world face video sequences</a></td><td><a href="https://doi.org/10.1007/s11042-012-1352-1">[pdf]</a></td><td>Multimedia Tools and Applications</td><td>edu</td><td>McGill University</td><td>45.50397610</td><td>-73.57496870</td><td>28%</td><td>18</td><td>5</td><td>6</td><td>0</td><td>7</td><td>4</td></tr><tr><td>6f3c76b7c0bd8e1d122c6ea808a271fd4749c951</td><td>ward</td><td>WARD</td><td><a href="papers/6f3c76b7c0bd8e1d122c6ea808a271fd4749c951.html">Re-identify people in wide area camera network</a></td><td><a href="https://doi.org/10.1109/CVPRW.2012.6239203">[pdf]</a></td><td>2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops</td><td></td><td></td><td></td><td></td><td>9%</td><td>55</td><td>5</td><td>50</td><td>2</td><td>35</td><td>0</td></tr><tr><td>6403117f9c005ae81f1e8e6d1302f4a045e3d99d</td><td>alert_airport</td><td>ALERT Airport</td><td><a href="papers/6403117f9c005ae81f1e8e6d1302f4a045e3d99d.html">A Systematic Evaluation and Benchmark for Person Re-Identification: Features, Metrics, and Datasets.</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1605.09653.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>IEEE transactions on pattern analysis and machine intelligence</td><td></td><td></td><td></td><td></td><td>27%</td><td>15</td><td>4</td><td>11</td><td>1</td><td>10</td><td>0</td></tr><tr><td>014b8df0180f33b9fea98f34ae611c6447d761d2</td><td>buhmap_db</td><td>BUHMAP-DB </td><td><a href="papers/014b8df0180f33b9fea98f34ae611c6447d761d2.html">Facial feature tracking and expression recognition for sign language</a></td><td><a href="http://www.cmpe.boun.edu.tr/pilab/pilabfiles/databases/buhmap/files/ari2008facialfeaturetracking.pdf">[pdf]</a></td><td>2008 23rd International Symposium on Computer and Information Sciences</td><td></td><td></td><td></td><td></td><td>16%</td><td>25</td><td>4</td><td>21</td><td>1</td><td>10</td><td>2</td></tr><tr><td>57fe081950f21ca03b5b375ae3e84b399c015861</td><td>cvc_01_barcelona</td><td>CVC-01</td><td><a href="papers/57fe081950f21ca03b5b375ae3e84b399c015861.html">Adaptive Image Sampling and Windows Classification for On–board Pedestrian Detection</a></td><td><a href="http://pdfs.semanticscholar.org/57fe/081950f21ca03b5b375ae3e84b399c015861.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>9%</td><td>44</td><td>4</td><td>40</td><td>1</td><td>21</td><td>0</td></tr><tr><td>22f656d0f8426c84a33a267977f511f127bfd7f3</td><td>expw</td><td>ExpW</td><td><a href="papers/22f656d0f8426c84a33a267977f511f127bfd7f3.html">From Facial Expression Recognition to Interpersonal Relation Prediction</a></td><td><a href="http://arxiv.org/abs/1609.06426">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td>44%</td><td>9</td><td>4</td><td>5</td><td>0</td><td>5</td><td>1</td></tr><tr><td>22f656d0f8426c84a33a267977f511f127bfd7f3</td><td>expw</td><td>ExpW</td><td><a href="papers/22f656d0f8426c84a33a267977f511f127bfd7f3.html">From Facial Expression Recognition to Interpersonal Relation Prediction</a></td><td><a href="http://arxiv.org/abs/1609.06426">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td>44%</td><td>9</td><td>4</td><td>5</td><td>0</td><td>5</td><td>1</td></tr><tr><td>e27ef52c641c2b5100a1b34fd0b819e84a31b4df</td><td>sarc3d</td><td>Sarc3D</td><td><a href="papers/e27ef52c641c2b5100a1b34fd0b819e84a31b4df.html">SARC3D: A New 3D Body Model for People Tracking and Re-identification</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/e27e/f52c641c2b5100a1b34fd0b819e84a31b4df.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>14%</td><td>29</td><td>4</td><td>25</td><td>3</td><td>17</td><td>0</td></tr><tr><td>1a40092b493c6b8840257ab7f96051d1a4dbfeb2</td><td>sports_videos_in_the_wild</td><td>SVW</td><td><a href="papers/1a40092b493c6b8840257ab7f96051d1a4dbfeb2.html">Sports Videos in the Wild (SVW): A video dataset for sports analysis</a></td><td><a href="http://web.cse.msu.edu/~liuxm/publication/Safdarnejad_Liu_Udpa_Andrus_Wood_Craven_FG2015.pdf">[pdf]</a></td><td>2015 11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)</td><td>edu</td><td>Michigan State University</td><td>42.71856800</td><td>-84.47791571</td><td>67%</td><td>6</td><td>4</td><td>2</td><td>1</td><td>5</td><td>0</td></tr><tr><td>7ebb153704706e457ab57b432793d2b6e5d12592</td><td>vgg_celebs_in_places</td><td>CIP</td><td><a href="papers/7ebb153704706e457ab57b432793d2b6e5d12592.html">Faces in Places: compound query retrieval</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/7ebb/153704706e457ab57b432793d2b6e5d12592.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td>edu</td><td>University of Oxford</td><td>51.75345380</td><td>-1.25400997</td><td>80%</td><td>5</td><td>4</td><td>1</td><td>0</td><td>4</td><td>0</td></tr><tr><td>8d5998cd984e7cce307da7d46f155f9db99c6590</td><td>chalearn</td><td>ChaLearn</td><td><a href="papers/8d5998cd984e7cce307da7d46f155f9db99c6590.html">ChaLearn looking at people: A review of events and resources</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1701.02664.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2017 International Joint Conference on Neural Networks (IJCNN)</td><td></td><td></td><td></td><td></td><td>30%</td><td>10</td><td>3</td><td>7</td><td>1</td><td>6</td><td>0</td></tr><tr><td>a5acda0e8c0937bfed013e6382da127103e41395</td><td>disfa</td><td>DISFA</td><td><a href="papers/a5acda0e8c0937bfed013e6382da127103e41395.html">Extended DISFA Dataset: Investigating Posed and Spontaneous Facial Expressions</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7789672', 'linkType': 'ieee'}">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td></td><td></td><td></td><td></td><td>38%</td><td>8</td><td>3</td><td>5</td><td>1</td><td>5</td><td>0</td></tr><tr><td>57178b36c21fd7f4529ac6748614bb3374714e91</td><td>ijb_c</td><td>IJB-C</td><td><a href="papers/57178b36c21fd7f4529ac6748614bb3374714e91.html">IARPA Janus Benchmark - C: Face Dataset and Protocol</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411217', 'linkType': 'ieee'}">[pdf]</a></td><td>2018 International Conference on Biometrics (ICB)</td><td></td><td></td><td></td><td></td><td>33%</td><td>9</td><td>3</td><td>6</td><td>2</td><td>9</td><td>0</td></tr><tr><td>07fcbae86f7a3ad3ea1cf95178459ee9eaf77cb1</td><td>uccs</td><td>UCCS</td><td><a href="papers/07fcbae86f7a3ad3ea1cf95178459ee9eaf77cb1.html">Large scale unconstrained open set face database</a></td><td><a href="http://www.vast.uccs.edu/~tboult/PAPERS/BTAS13-Sapkota-Boult-UCCSFaceDB.pdf">[pdf]</a></td><td>2013 IEEE Sixth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td>edu</td><td>University of Colorado at Colorado Springs</td><td>38.89646790</td><td>-104.80505940</td><td>60%</td><td>5</td><td>3</td><td>2</td><td>0</td><td>3</td><td>0</td></tr><tr><td>8627f019882b024aef92e4eb9355c499c733e5b7</td><td>used</td><td>USED Social Event Dataset</td><td><a href="papers/8627f019882b024aef92e4eb9355c499c733e5b7.html">USED: a large-scale social event detection dataset</a></td><td><a href="http://doi.acm.org/10.1145/2910017.2910624">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>7</td><td>3</td><td>4</td><td>0</td><td>3</td><td>2</td></tr><tr><td>4563b46d42079242f06567b3f2e2f7a80cb3befe</td><td>vadana</td><td>VADANA</td><td><a href="papers/4563b46d42079242f06567b3f2e2f7a80cb3befe.html">VADANA: A dense dataset for facial image analysis</a></td><td><a href="http://vims.cis.udel.edu/publications/VADANA_BeFIT2011.pdf">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td>19%</td><td>16</td><td>3</td><td>13</td><td>0</td><td>6</td><td>6</td></tr><tr><td>56ae6d94fc6097ec4ca861f0daa87941d1c10b70</td><td>cmdp</td><td>CMDP</td><td><a href="papers/56ae6d94fc6097ec4ca861f0daa87941d1c10b70.html">Distance Estimation of an Unknown Person from a Portrait</a></td><td><a href="http://pdfs.semanticscholar.org/56ae/6d94fc6097ec4ca861f0daa87941d1c10b70.pdf">[pdf]</a></td><td></td><td>edu</td><td>California Institute of Technology</td><td>34.13710185</td><td>-118.12527487</td><td>22%</td><td>9</td><td>2</td><td>7</td><td>0</td><td>6</td><td>1</td></tr><tr><td>670637d0303a863c1548d5b19f705860a23e285c</td><td>face_tracer</td><td>FaceTracer</td><td><a href="papers/670637d0303a863c1548d5b19f705860a23e285c.html">Face swapping: automatically replacing faces in photographs</a></td><td><a href="https://classes.cs.uoregon.edu/16F/cis607photo/faces.pdf">[pdf]</a></td><td>ACM Trans. Graph.</td><td>edu</td><td>Columbia University</td><td>40.84198360</td><td>-73.94368971</td><td>100%</td><td>2</td><td>2</td><td>0</td><td>0</td><td>1</td><td>1</td></tr><tr><td>670637d0303a863c1548d5b19f705860a23e285c</td><td>face_tracer</td><td>FaceTracer</td><td><a href="papers/670637d0303a863c1548d5b19f705860a23e285c.html">Face swapping: automatically replacing faces in photographs</a></td><td><a href="https://classes.cs.uoregon.edu/16F/cis607photo/faces.pdf">[pdf]</a></td><td>ACM Trans. Graph.</td><td>edu</td><td>Columbia University</td><td>40.84198360</td><td>-73.94368971</td><td>100%</td><td>2</td><td>2</td><td>0</td><td>0</td><td>1</td><td>1</td></tr><tr><td>dd65f71dac86e36eecbd3ed225d016c3336b4a13</td><td>families_in_the_wild</td><td>FIW</td><td><a href="papers/dd65f71dac86e36eecbd3ed225d016c3336b4a13.html">Visual Kinship Recognition of Families in the Wild</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8337841', 'linkType': 'ieee'}">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>67%</td><td>3</td><td>2</td><td>1</td><td>0</td><td>2</td><td>0</td></tr><tr><td>6dbe8e5121c534339d6e41f8683e85f87e6abf81</td><td>gallagher</td><td>Gallagher</td><td><a href="papers/6dbe8e5121c534339d6e41f8683e85f87e6abf81.html">Clothing Cosegmentation for Shopping Images With Cluttered Background</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7423747', 'linkType': 'ieee'}">[pdf]</a></td><td>IEEE Transactions on Multimedia</td><td></td><td></td><td></td><td></td><td>33%</td><td>6</td><td>2</td><td>4</td><td>0</td><td>3</td><td>2</td></tr><tr><td>99eb4cea0d9bc9fe777a5c5172f8638a37a7f262</td><td>ilids_vid_reid</td><td>iLIDS-VID</td><td><a href="papers/99eb4cea0d9bc9fe777a5c5172f8638a37a7f262.html">Person Re-identification by Exploiting Spatio-Temporal Cues and Multi-view Metric Learning</a></td><td><a href="https://doi.org/10.1109/LSP.2016.2574323">[pdf]</a></td><td>IEEE Signal Processing Letters</td><td></td><td></td><td></td><td></td><td>29%</td><td>7</td><td>2</td><td>5</td><td>0</td><td>4</td><td>0</td></tr><tr><td>0d2dd4fc016cb6a517d8fb43a7cc3ff62964832e</td><td>lag</td><td>LAG</td><td><a href="papers/0d2dd4fc016cb6a517d8fb43a7cc3ff62964832e.html">Large Age-Gap face verification by feature injection in deep networks</a></td><td><a href="http://pdfs.semanticscholar.org/0d2d/d4fc016cb6a517d8fb43a7cc3ff62964832e.pdf">[pdf]</a></td><td>Pattern Recognition Letters</td><td></td><td></td><td></td><td></td><td>29%</td><td>7</td><td>2</td><td>5</td><td>0</td><td>3</td><td>2</td></tr><tr><td>a7fe834a0af614ce6b50dc093132b031dd9a856b</td><td>market1203</td><td>Market 1203</td><td><a href="papers/a7fe834a0af614ce6b50dc093132b031dd9a856b.html">Orientation Driven Bag of Appearances for Person Re-identification</a></td><td><a href="http://pdfs.semanticscholar.org/a7fe/834a0af614ce6b50dc093132b031dd9a856b.pdf">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>29%</td><td>7</td><td>2</td><td>5</td><td>0</td><td>3</td><td>0</td></tr><tr><td>ad01687649d95cd5b56d7399a9603c4b8e2217d7</td><td>mrp_drone</td><td>MRP Drone</td><td><a href="papers/ad01687649d95cd5b56d7399a9603c4b8e2217d7.html">Investigating Open-World Person Re-identi cation Using a Drone</a></td><td><a href="http://pdfs.semanticscholar.org/ad01/687649d95cd5b56d7399a9603c4b8e2217d7.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>40%</td><td>5</td><td>2</td><td>3</td><td>0</td><td>3</td><td>0</td></tr><tr><td>a7fe834a0af614ce6b50dc093132b031dd9a856b</td><td>pku_reid</td><td>PKU-Reid</td><td><a href="papers/a7fe834a0af614ce6b50dc093132b031dd9a856b.html">Orientation Driven Bag of Appearances for Person Re-identification</a></td><td><a href="http://pdfs.semanticscholar.org/a7fe/834a0af614ce6b50dc093132b031dd9a856b.pdf">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>29%</td><td>7</td><td>2</td><td>5</td><td>0</td><td>3</td><td>0</td></tr><tr><td>041d3eedf5e45ce5c5229f0181c5c576ed1fafd6</td><td>ucf_selfie</td><td>UCF Selfie</td><td><a href="papers/041d3eedf5e45ce5c5229f0181c5c576ed1fafd6.html">How to Take a Good Selfie?</a></td><td><a href="http://doi.acm.org/10.1145/2733373.2806365">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>22%</td><td>9</td><td>2</td><td>7</td><td>0</td><td>5</td><td>0</td></tr><tr><td>4b4106614c1d553365bad75d7866bff0de6056ed</td><td>czech_news_agency</td><td>UFI</td><td><a href="papers/4b4106614c1d553365bad75d7866bff0de6056ed.html">Unconstrained Facial Images: Database for Face Recognition Under Real-World Conditions</a></td><td><a href="http://pdfs.semanticscholar.org/4b41/06614c1d553365bad75d7866bff0de6056ed.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>10%</td><td>10</td><td>1</td><td>9</td><td>0</td><td>4</td><td>2</td></tr><tr><td>563c940054e4b456661762c1ab858e6f730c3159</td><td>data_61</td><td>Data61 Pedestrian</td><td><a href="papers/563c940054e4b456661762c1ab858e6f730c3159.html">A Multi-modal Graphical Model for Scene Analysis</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/WACV.2015.139">[pdf]</a></td><td>2015 IEEE Winter Conference on Applications of Computer Vision</td><td></td><td></td><td></td><td></td><td>12%</td><td>8</td><td>1</td><td>7</td><td>0</td><td>5</td><td>0</td></tr><tr><td>c6526dd3060d63a6c90e8b7ff340383c4e0e0dd8</td><td>face_research_lab</td><td>Face Research Lab London</td><td><a href="papers/c6526dd3060d63a6c90e8b7ff340383c4e0e0dd8.html">Anxiety promotes memory for mood-congruent faces but does not alter loss aversion.</a></td><td><a href="http://pdfs.semanticscholar.org/c652/6dd3060d63a6c90e8b7ff340383c4e0e0dd8.pdf">[pdf]</a></td><td>Scientific reports</td><td>edu</td><td>University College London</td><td>51.52316070</td><td>-0.12820370</td><td>25%</td><td>4</td><td>1</td><td>3</td><td>0</td><td>2</td><td>2</td></tr><tr><td>17b46e2dad927836c689d6787ddb3387c6159ece</td><td>geofaces</td><td>GeoFaces</td><td><a href="papers/17b46e2dad927836c689d6787ddb3387c6159ece.html">GeoFaceExplorer: exploring the geo-dependence of facial attributes</a></td><td><a href="http://doi.acm.org/10.1145/2676440.2676443">[pdf]</a></td><td></td><td>edu</td><td>University of Kentucky</td><td>38.03337420</td><td>-84.50177580</td><td>50%</td><td>2</td><td>1</td><td>1</td><td>0</td><td>1</td><td>0</td></tr><tr><td>55c40cbcf49a0225e72d911d762c27bb1c2d14aa</td><td>ifad</td><td>IFAD</td><td><a href="papers/55c40cbcf49a0225e72d911d762c27bb1c2d14aa.html">Indian Face Age Database : A Database for Face Recognition with Age Variation</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/55c4/0cbcf49a0225e72d911d762c27bb1c2d14aa.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>50%</td><td>2</td><td>1</td><td>1</td><td>0</td><td>2</td><td>0</td></tr><tr><td>d80a3d1f3a438e02a6685e66ee908446766fefa9</td><td>megaage</td><td>MegaAge</td><td><a href="papers/d80a3d1f3a438e02a6685e66ee908446766fefa9.html">Quantifying Facial Age by Posterior of Age Comparisons</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1708.09687.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>25%</td><td>4</td><td>1</td><td>3</td><td>1</td><td>4</td><td>0</td></tr><tr><td>23e824d1dfc33f3780dd18076284f07bd99f1c43</td><td>mifs</td><td>MIFS</td><td><a href="papers/23e824d1dfc33f3780dd18076284f07bd99f1c43.html">Spoofing faces using makeup: An investigative study</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7947686', 'linkType': 'ieee'}">[pdf]</a></td><td>2017 IEEE International Conference on Identity, Security and Behavior Analysis (ISBA)</td><td>edu</td><td>INRIA Méditerranée</td><td>43.61581310</td><td>7.06838000</td><td>20%</td><td>5</td><td>1</td><td>4</td><td>0</td><td>1</td><td>2</td></tr><tr><td>fb82681ac5d3487bd8e52dbb3d1fa220eeac855e</td><td>pilot_parliament</td><td>PPB</td><td><a href="papers/fb82681ac5d3487bd8e52dbb3d1fa220eeac855e.html">1 Network Notebook</a></td><td><a href="http://pdfs.semanticscholar.org/fb82/681ac5d3487bd8e52dbb3d1fa220eeac855e.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>9%</td><td>11</td><td>1</td><td>10</td><td>1</td><td>10</td><td>0</td></tr><tr><td>9e5378e7b336c89735d3bb15cf67eff96f86d39a</td><td>precarious</td><td>Precarious</td><td><a href="papers/9e5378e7b336c89735d3bb15cf67eff96f86d39a.html">Expecting the Unexpected: Training Detectors for Unusual Pedestrians with Adversarial Imposters</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1703.06283.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>8%</td><td>12</td><td>1</td><td>11</td><td>1</td><td>10</td><td>0</td></tr><tr><td>54983972aafc8e149259d913524581357b0f91c3</td><td>reseed</td><td>ReSEED</td><td><a href="papers/54983972aafc8e149259d913524581357b0f91c3.html">ReSEED: social event dEtection dataset</a></td><td><a href="https://pub.uni-bielefeld.de/download/2663466/2686734">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>17%</td><td>6</td><td>1</td><td>5</td><td>1</td><td>1</td><td>1</td></tr><tr><td>c9bda86e23cab9e4f15ea0c4cbb6cc02b9dfb709</td><td>stanford_drone</td><td>Stanford Drone</td><td><a href="papers/c9bda86e23cab9e4f15ea0c4cbb6cc02b9dfb709.html">Learning to predict human behaviour in crowded scenes</a></td><td><a href="http://pdfs.semanticscholar.org/c9bd/a86e23cab9e4f15ea0c4cbb6cc02b9dfb709.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>20%</td><td>5</td><td>1</td><td>4</td><td>1</td><td>5</td><td>0</td></tr><tr><td>9696ad8b164f5e10fcfe23aacf74bd6168aebb15</td><td>4dfab</td><td>4DFAB</td><td><a href="papers/9696ad8b164f5e10fcfe23aacf74bd6168aebb15.html">4DFAB: A Large Scale 4D Facial Expression Database for Biometric Applications</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1712.01443.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>0%</td><td>4</td><td>0</td><td>4</td><td>0</td><td>2</td><td>0</td></tr><tr><td>f152b6ee251cca940dd853c54e6a7b78fbc6b235</td><td>affectnet</td><td>AffectNet</td><td><a href="papers/f152b6ee251cca940dd853c54e6a7b78fbc6b235.html">Skybiometry and AffectNet on Facial Emotion Recognition Using Supervised Machine Learning Algorithms</a></td><td><a href="{'url': 'http://dl.acm.org/citation.cfm?id=3232665', 'linkType': 'acm'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>1ed1a49534ad8dd00f81939449f6389cfbc25321</td><td>bjut_3d</td><td>BJUT-3D</td><td><a href="papers/1ed1a49534ad8dd00f81939449f6389cfbc25321.html">A novel face recognition method based on 3D face model</a></td><td><a href="https://doi.org/10.1109/ROBIO.2007.4522202">[pdf]</a></td><td>2007 IEEE International Conference on Robotics and Biomimetics (ROBIO)</td><td></td><td></td><td></td><td></td><td>0%</td><td>2</td><td>0</td><td>2</td><td>0</td><td>1</td><td>1</td></tr><tr><td>65355cbb581a219bd7461d48b3afd115263ea760</td><td>complex_activities</td><td>Ongoing Complex Activities</td><td><a href="papers/65355cbb581a219bd7461d48b3afd115263ea760.html">Recognition of ongoing complex activities by sequence prediction over a hierarchical label space</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/WACV.2016.7477586">[pdf]</a></td><td>2016 IEEE Winter Conference on Applications of Computer Vision (WACV)</td><td></td><td></td><td></td><td></td><td>0%</td><td>2</td><td>0</td><td>2</td><td>0</td><td>2</td><td>0</td></tr><tr><td>f0e17f27f029db4ad650ff278fe3c10ecb6cb0c4</td><td>europersons</td><td>EuroCity Persons</td><td><a href="papers/f0e17f27f029db4ad650ff278fe3c10ecb6cb0c4.html">The EuroCity Persons Dataset: A Novel Benchmark for Object Detection</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1805.07193.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>0%</td><td>1</td><td>0</td><td>1</td><td>0</td><td>1</td><td>0</td></tr><tr><td>75da1df4ed319926c544eefe17ec8d720feef8c0</td><td>fddb</td><td>FDDB</td><td><a href="papers/75da1df4ed319926c544eefe17ec8d720feef8c0.html">FDDB: A Benchmark for Face Detection in Unconstrained Settings</a></td><td><a href="http://pdfs.semanticscholar.org/75da/1df4ed319926c544eefe17ec8d720feef8c0.pdf">[pdf]</a></td><td></td><td>edu</td><td>University of Massachusetts</td><td>42.38897850</td><td>-72.52869870</td><td>0%</td><td>1</td><td>0</td><td>1</td><td>0</td><td>0</td><td>0</td></tr><tr><td>b6b1b0632eb9d4ab1427278f5e5c46f97753c73d</td><td>fei</td><td>FEI</td><td><a href="papers/b6b1b0632eb9d4ab1427278f5e5c46f97753c73d.html">Generalização cartográfica automatizada para um banco de dados cadastral</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/b6b1/b0632eb9d4ab1427278f5e5c46f97753c73d.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>3dc3f0b64ef80f573e3a5f96e456e52ee980b877</td><td>georgia_tech_face_database</td><td>Georgia Tech Face</td><td><a href="papers/3dc3f0b64ef80f573e3a5f96e456e52ee980b877.html">Maximum Likelihood Training of the Embedded HMM for Face Detection and Recognition</a></td><td><a href="http://pdfs.semanticscholar.org/3dc3/f0b64ef80f573e3a5f96e456e52ee980b877.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>3</td><td>0</td><td>3</td><td>0</td><td>2</td><td>0</td></tr><tr><td>bd88bb2e4f351352d88ee7375af834360e223498</td><td>hda_plus</td><td>HDA+</td><td><a href="papers/bd88bb2e4f351352d88ee7375af834360e223498.html">A Multi - camera video data set for research on High - Definition surveillance</a></td><td><a href="http://pdfs.semanticscholar.org/bd88/bb2e4f351352d88ee7375af834360e223498.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>1</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>bd88bb2e4f351352d88ee7375af834360e223498</td><td>hda_plus</td><td>HDA+</td><td><a href="papers/bd88bb2e4f351352d88ee7375af834360e223498.html">A Multi - camera video data set for research on High - Definition surveillance</a></td><td><a href="http://pdfs.semanticscholar.org/bd88/bb2e4f351352d88ee7375af834360e223498.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>1</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>24830e3979d4ed01b9fd0feebf4a8fd22e0c35fd</td><td>hi4d_adsip</td><td>Hi4D-ADSIP</td><td><a href="papers/24830e3979d4ed01b9fd0feebf4a8fd22e0c35fd.html">High-resolution comprehensive 3-D dynamic database for facial articulation analysis</a></td><td><a href="http://www.researchgate.net/profile/Wei_Quan3/publication/221430048_High-resolution_comprehensive_3-D_dynamic_database_for_facial_articulation_analysis/links/0deec534309495805d000000.pdf">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td>0%</td><td>5</td><td>0</td><td>5</td><td>0</td><td>1</td><td>0</td></tr><tr><td>066d71fcd997033dce4ca58df924397dfe0b5fd1</td><td>ifdb</td><td>IFDB</td><td><a href="papers/066d71fcd997033dce4ca58df924397dfe0b5fd1.html">Iranian Face Database and Evaluation with a New Detection Algorithm</a></td><td><a href="http://pdfs.semanticscholar.org/066d/71fcd997033dce4ca58df924397dfe0b5fd1.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>066d71fcd997033dce4ca58df924397dfe0b5fd1</td><td>ifdb</td><td>IFDB</td><td><a href="papers/066d71fcd997033dce4ca58df924397dfe0b5fd1.html">Iranian Face Database and Evaluation with a New Detection Algorithm</a></td><td><a href="http://pdfs.semanticscholar.org/066d/71fcd997033dce4ca58df924397dfe0b5fd1.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>ad62c6e17bc39b4dec20d32f6ac667ae42d2c118</td><td>jiku_mobile</td><td>Jiku Mobile Video Dataset</td><td><a href="papers/ad62c6e17bc39b4dec20d32f6ac667ae42d2c118.html">A Synchronization Ground Truth for the Jiku Mobile Video Dataset</a></td><td><a href="http://pdfs.semanticscholar.org/ad62/c6e17bc39b4dec20d32f6ac667ae42d2c118.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>1</td><td>0</td><td>1</td><td>0</td><td>0</td><td>0</td></tr><tr><td>079a0a3bf5200994e1f972b1b9197bf2f90e87d4</td><td>mit_cbcl</td><td>MIT CBCL</td><td><a href="papers/079a0a3bf5200994e1f972b1b9197bf2f90e87d4.html">Component-Based Face Recognition with 3D Morphable Models</a></td><td><a href="http://www.bheisele.com/avbpa2003.pdf">[pdf]</a></td><td>2004 Conference on Computer Vision and Pattern Recognition Workshop</td><td></td><td></td><td></td><td></td><td>0%</td><td>12</td><td>0</td><td>12</td><td>0</td><td>8</td><td>0</td></tr><tr><td>7f4040b482d16354d5938c1d1b926b544652bf5b</td><td>nova_emotions</td><td>Novaemötions Dataset</td><td><a href="papers/7f4040b482d16354d5938c1d1b926b544652bf5b.html">Competitive affective gaming: winning with a smile</a></td><td><a href="http://doi.acm.org/10.1145/2502081.2502115">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>8</td><td>0</td><td>3</td><td>0</td><td>2</td><td>0</td></tr><tr><td>7f4040b482d16354d5938c1d1b926b544652bf5b</td><td>nova_emotions</td><td>Novaemötions Dataset</td><td><a href="papers/7f4040b482d16354d5938c1d1b926b544652bf5b.html">Competitive affective gaming: winning with a smile</a></td><td><a href="http://doi.acm.org/10.1145/2502081.2502115">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>8</td><td>0</td><td>3</td><td>0</td><td>2</td><td>0</td></tr><tr><td>22909dd19a0ec3b6065334cb5be5392cb24d839d</td><td>pets</td><td>PETS 2017</td><td><a href="papers/22909dd19a0ec3b6065334cb5be5392cb24d839d.html">PETS 2017: Dataset and Challenge</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014998', 'linkType': 'ieee'}">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td></td><td></td><td></td><td></td><td>0%</td><td>8</td><td>0</td><td>8</td><td>0</td><td>2</td><td>0</td></tr><tr><td>f6c8d5e35d7e4d60a0104f233ac1a3ab757da53f</td><td>pku</td><td>PKU</td><td><a href="papers/f6c8d5e35d7e4d60a0104f233ac1a3ab757da53f.html">Swiss-System Based Cascade Ranking for Gait-Based Person Re-Identification</a></td><td><a href="http://pdfs.semanticscholar.org/f6c8/d5e35d7e4d60a0104f233ac1a3ab757da53f.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>3</td><td>0</td><td>3</td><td>0</td><td>1</td><td>0</td></tr><tr><td>c866a2afc871910e3282fd9498dce4ab20f6a332</td><td>qmul_surv_face</td><td>QMUL-SurvFace</td><td><a href="papers/c866a2afc871910e3282fd9498dce4ab20f6a332.html">Surveillance Face Recognition Challenge</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1804.09691.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>f3b84a03985de3890b400b68e2a92c0a00afd9d0</td><td>scface</td><td>SCface</td><td><a href="papers/f3b84a03985de3890b400b68e2a92c0a00afd9d0.html">Large Variability Surveillance Camera Face Database</a></td><td><span class="gray">[pdf]</a></td><td>2015 Seventh International Conference on Computational Intelligence, Modelling and Simulation (CIMSim)</td><td></td><td></td><td></td><td></td><td>0%</td><td>1</td><td>0</td><td>1</td><td>0</td><td>0</td><td>0</td></tr><tr><td>d3f5a1848b0028d8ab51d0b0673732cad2e3c8c9</td><td>stair_actions</td><td>STAIR Action</td><td><a href="papers/d3f5a1848b0028d8ab51d0b0673732cad2e3c8c9.html">STAIR Actions: A Video Dataset of Everyday Home Actions</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1804.04326.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>377f2b65e6a9300448bdccf678cde59449ecd337</td><td>ufdd</td><td>UFDD</td><td><a href="papers/377f2b65e6a9300448bdccf678cde59449ecd337.html">Pushing the Limits of Unconstrained Face Detection: a Challenge Dataset and Baseline Results</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1804.10275.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td>edu</td><td>Johns Hopkins University</td><td>39.32905300</td><td>-76.61942500</td><td>0%</td><td>2</td><td>0</td><td>2</td><td>0</td><td>2</td><td>0</td></tr><tr><td>447d8893a4bdc29fa1214e53499ffe67b28a6db5</td><td>umd_faces</td><td>UMD</td><td><a href="papers/447d8893a4bdc29fa1214e53499ffe67b28a6db5.html">Electronic Transport in Quantum Confined Systems</a></td><td><a href="http://pdfs.semanticscholar.org/447d/8893a4bdc29fa1214e53499ffe67b28a6db5.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>1</td><td>0</td><td>1</td><td>0</td><td>1</td><td>0</td></tr><tr><td>447d8893a4bdc29fa1214e53499ffe67b28a6db5</td><td>umd_faces</td><td>UMD</td><td><a href="papers/447d8893a4bdc29fa1214e53499ffe67b28a6db5.html">Electronic Transport in Quantum Confined Systems</a></td><td><a href="http://pdfs.semanticscholar.org/447d/8893a4bdc29fa1214e53499ffe67b28a6db5.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>1</td><td>0</td><td>1</td><td>0</td><td>1</td><td>0</td></tr><tr><td>922e0a51a3b8c67c4c6ac09a577ff674cbd28b34</td><td>v47</td><td>V47</td><td><a href="papers/922e0a51a3b8c67c4c6ac09a577ff674cbd28b34.html">Re-identification of pedestrians with variable occlusion and scale</a></td><td><a href="https://doi.org/10.1109/ICCVW.2011.6130477">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td>0%</td><td>10</td><td>0</td><td>10</td><td>2</td><td>6</td><td>0</td></tr><tr><td>9b9bf5e623cb8af7407d2d2d857bc3f1b531c182</td><td>who_goes_there</td><td>WGT</td><td><a href="papers/9b9bf5e623cb8af7407d2d2d857bc3f1b531c182.html">Who goes there?: approaches to mapping facial appearance diversity</a></td><td><a href="http://doi.acm.org/10.1145/2996913.2996997">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>77c81c13a110a341c140995bedb98101b9e84f7f</td><td>wildtrack</td><td>WildTrack</td><td><a href="papers/77c81c13a110a341c140995bedb98101b9e84f7f.html">WILDTRACK : A Multi-camera HD Dataset for Dense Unscripted Pedestrian Detection</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/fe1c/ec4e4995b8615855572374ae3efc94949105.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>5ad4e9f947c1653c247d418f05dad758a3f9277b</td><td>wlfdb</td><td></td><td><a href="papers/5ad4e9f947c1653c247d418f05dad758a3f9277b.html">WLFDB: Weakly Labeled Face Databases</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/5ad4/e9f947c1653c247d418f05dad758a3f9277b.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>0%</td><td>1</td><td>0</td><td>1</td><td>0</td><td>0</td><td>0</td></tr><tr><td>a94cae786d515d3450d48267e12ca954aab791c4</td><td>yawdd</td><td>YawDD</td><td><a href="papers/a94cae786d515d3450d48267e12ca954aab791c4.html">YawDD: a yawning detection dataset</a></td><td><a href="http://www.site.uottawa.ca/~shervin/pubs/CogniVue-Dataset-ACM-MMSys2014.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>14</td><td>0</td><td>14</td><td>1</td><td>2</td><td>1</td></tr></table></body></html>
\ No newline at end of file diff --git a/scraper/reports/report_index.html b/scraper/reports/report_index.html index ea6b5111..933f2953 100644 --- a/scraper/reports/report_index.html +++ b/scraper/reports/report_index.html @@ -1 +1 @@ -<!doctype html><html><head><meta charset='utf-8'><title>All Papers</title><link rel='stylesheet' href='reports.css'></head><body><h2>All Papers</h2><table border='1' cellpadding='3' cellspacing='3'><th>Paper ID</th><th>Megapixels Key</th><th>Megapixels Name</th><th>Report Link</th><th>PDF Link</th><th>Journal</th><th>Type</th><th>Address</th><th>Lat</th><th>Lng</th><th>Coverage</th><th>Total Citations</th><th>Geocoded Citations</th><th>Unknown Citations</th><th>Empty Citations</th><th>With PDF</th><th>With DOI</th><tr><td>fb82681ac5d3487bd8e52dbb3d1fa220eeac855e</td><td>pilot_parliament</td><td>PPB</td><td><a href="papers/fb82681ac5d3487bd8e52dbb3d1fa220eeac855e.html">1 Network Notebook</a></td><td><a href="http://pdfs.semanticscholar.org/fb82/681ac5d3487bd8e52dbb3d1fa220eeac855e.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>9%</td><td>11</td><td>1</td><td>10</td><td>1</td><td>10</td><td>0</td></tr><tr><td>3325860c0c82a93b2eac654f5324dd6a776f609e</td><td>mpii_human_pose</td><td>MPII Human Pose</td><td><a href="papers/3325860c0c82a93b2eac654f5324dd6a776f609e.html">2D Human Pose Estimation: New Benchmark and State of the Art Analysis</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6909866', 'linkType': 'ieee'}">[pdf]</a></td><td>2014 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>50%</td><td>356</td><td>179</td><td>177</td><td>21</td><td>299</td><td>3</td></tr><tr><td>2f5d44dc3e1b5955942133ff872ebd31716ec604</td><td>frav3d</td><td>FRAV3D</td><td><a href="papers/2f5d44dc3e1b5955942133ff872ebd31716ec604.html">2D and 3D face recognition: A survey</a></td><td><a href="http://pdfs.semanticscholar.org/2f5d/44dc3e1b5955942133ff872ebd31716ec604.pdf">[pdf]</a></td><td>Pattern Recognition Letters</td><td></td><td></td><td></td><td></td><td>15%</td><td>389</td><td>57</td><td>332</td><td>28</td><td>198</td><td>17</td></tr><tr><td>e4754afaa15b1b53e70743880484b8d0736990ff</td><td>fiw_300</td><td>300-W</td><td><a href="papers/e4754afaa15b1b53e70743880484b8d0736990ff.html">300 Faces In-The-Wild Challenge: database and results</a></td><td><a href="{'url': 'http://doi.org/10.1016/j.imavis.2016.01.002', 'linkType': 'doi'}">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td>45%</td><td>114</td><td>51</td><td>63</td><td>10</td><td>70</td><td>31</td></tr><tr><td>044d9a8c61383312cdafbcc44b9d00d650b21c70</td><td>fiw_300</td><td>300-W</td><td><a href="papers/044d9a8c61383312cdafbcc44b9d00d650b21c70.html">300 Faces in-the-Wild Challenge: The First Facial Landmark Localization Challenge</a></td><td><a href="https://ibug.doc.ic.ac.uk/media/uploads/documents/sagonas_iccv_2013_300_w.pdf">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision Workshops</td><td>edu</td><td>University of Twente</td><td>52.23801390</td><td>6.85667610</td><td>56%</td><td>285</td><td>159</td><td>125</td><td>28</td><td>188</td><td>82</td></tr><tr><td>2e8d0f1802e50cccfd3c0aabac0d0beab3a7846e</td><td>3dpes</td><td>3DPeS</td><td><a href="papers/2e8d0f1802e50cccfd3c0aabac0d0beab3a7846e.html">3DPeS: 3D people dataset for surveillance and forensics</a></td><td><a href="http://doi.acm.org/10.1145/2072572.2072590">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>18%</td><td>122</td><td>22</td><td>100</td><td>11</td><td>71</td><td>1</td></tr><tr><td>9696ad8b164f5e10fcfe23aacf74bd6168aebb15</td><td>4dfab</td><td>4DFAB</td><td><a href="papers/9696ad8b164f5e10fcfe23aacf74bd6168aebb15.html">4DFAB: A Large Scale 4D Facial Expression Database for Biometric Applications</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1712.01443.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>0%</td><td>4</td><td>0</td><td>4</td><td>0</td><td>2</td><td>0</td></tr><tr><td>d08cc366a4a0192a01e9a7495af1eb5d9f9e73ae</td><td>b3d_ac</td><td>B3D(AC)</td><td><a href="papers/d08cc366a4a0192a01e9a7495af1eb5d9f9e73ae.html">A 3-D Audio-Visual Corpus of Affective Communication</a></td><td><a href="http://files.is.tue.mpg.de/jgall/download/jgall_avcorpus_mm10.pdf">[pdf]</a></td><td>IEEE Transactions on Multimedia</td><td></td><td></td><td></td><td></td><td>31%</td><td>39</td><td>12</td><td>27</td><td>2</td><td>27</td><td>7</td></tr><tr><td>639937b3a1b8bded3f7e9a40e85bd3770016cf3c</td><td>bfm</td><td>BFM</td><td><a href="papers/639937b3a1b8bded3f7e9a40e85bd3770016cf3c.html">A 3D Face Model for Pose and Illumination Invariant Face Recognition</a></td><td><a href="https://pdfs.semanticscholar.org/6399/37b3a1b8bded3f7e9a40e85bd3770016cf3c.pdf">[pdf]</a></td><td>2009 Sixth IEEE International Conference on Advanced Video and Signal Based Surveillance</td><td></td><td></td><td></td><td></td><td>41%</td><td>323</td><td>131</td><td>192</td><td>29</td><td>221</td><td>25</td></tr><tr><td>c34532fe6bfbd1e6df477c9ffdbb043b77e7804d</td><td>columbia_gaze</td><td>Columbia Gaze</td><td><a href="papers/c34532fe6bfbd1e6df477c9ffdbb043b77e7804d.html">A 3D Morphable Eye Region Model for Gaze Estimation</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/0d43/3b9435b874a1eea6d7999e86986c910fa285.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Carnegie Mellon University</td><td>37.41021930</td><td>-122.05965487</td><td>67%</td><td>24</td><td>16</td><td>8</td><td>0</td><td>18</td><td>4</td></tr><tr><td>cc589c499dcf323fe4a143bbef0074c3e31f9b60</td><td>bu_3dfe</td><td>BU-3DFE</td><td><a href="papers/cc589c499dcf323fe4a143bbef0074c3e31f9b60.html">A 3D facial expression database for facial behavior research</a></td><td><a href="http://www.cs.binghamton.edu/~lijun/Research/3DFE/Yin_FGR06_a.pdf">[pdf]</a></td><td>7th International Conference on Automatic Face and Gesture Recognition (FGR06)</td><td>edu</td><td>SUNY Binghamton</td><td>42.08779975</td><td>-75.97066066</td><td>24%</td><td>555</td><td>131</td><td>424</td><td>47</td><td>283</td><td>48</td></tr><tr><td>22646e00a7ba34d1b5fbe3b1efcd91a1e1be3c2b</td><td>saivt</td><td>SAIVT SoftBio</td><td><a href="papers/22646e00a7ba34d1b5fbe3b1efcd91a1e1be3c2b.html">A Database for Person Re-Identification in Multi-Camera Surveillance Networks</a></td><td><a href="http://eprints.qut.edu.au/53437/3/Bialkowski_Database4PersonReID_DICTA.pdf">[pdf]</a></td><td>2012 International Conference on Digital Image Computing Techniques and Applications (DICTA)</td><td></td><td></td><td></td><td></td><td>21%</td><td>58</td><td>12</td><td>46</td><td>7</td><td>40</td><td>1</td></tr><tr><td>070de852bc6eb275d7ca3a9cdde8f6be8795d1a3</td><td>d3dfacs</td><td>D3DFACS</td><td><a href="papers/070de852bc6eb275d7ca3a9cdde8f6be8795d1a3.html">A FACS valid 3D dynamic action unit database with applications to 3D dynamic morphable facial modeling</a></td><td><a href="http://www.cs.bath.ac.uk/~dpc/D3DFACS/ICCV_final_2011.pdf">[pdf]</a></td><td>2011 International Conference on Computer Vision</td><td>edu</td><td>Jacobs University</td><td>53.41291480</td><td>-2.96897915</td><td>44%</td><td>52</td><td>23</td><td>29</td><td>5</td><td>37</td><td>4</td></tr><tr><td>e3e44385a71a52fd483c58eb3cdf8d03960c0b70</td><td>mcgill</td><td>McGill Real World</td><td><a href="papers/e3e44385a71a52fd483c58eb3cdf8d03960c0b70.html">A Hierarchical Graphical Model for Recognizing Human Actions and Interactions in Video</a></td><td><a href="http://pdfs.semanticscholar.org/e3e4/4385a71a52fd483c58eb3cdf8d03960c0b70.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>2</td><td>1</td><td>1</td><td>0</td><td>1</td><td>0</td></tr><tr><td>bd88bb2e4f351352d88ee7375af834360e223498</td><td>hda_plus</td><td>HDA+</td><td><a href="papers/bd88bb2e4f351352d88ee7375af834360e223498.html">A Multi - camera video data set for research on High - Definition surveillance</a></td><td><a href="http://pdfs.semanticscholar.org/bd88/bb2e4f351352d88ee7375af834360e223498.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>1</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>563c940054e4b456661762c1ab858e6f730c3159</td><td>data_61</td><td>Data61 Pedestrian</td><td><a href="papers/563c940054e4b456661762c1ab858e6f730c3159.html">A Multi-modal Graphical Model for Scene Analysis</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/WACV.2015.139">[pdf]</a></td><td>2015 IEEE Winter Conference on Applications of Computer Vision</td><td></td><td></td><td></td><td></td><td>12%</td><td>8</td><td>1</td><td>7</td><td>0</td><td>5</td><td>0</td></tr><tr><td>221c18238b829c12b911706947ab38fd017acef7</td><td>rap_pedestrian</td><td>RAP</td><td><a href="papers/221c18238b829c12b911706947ab38fd017acef7.html">A Richly Annotated Dataset for Pedestrian Attribute Recognition</a></td><td><a href="http://pdfs.semanticscholar.org/221c/18238b829c12b911706947ab38fd017acef7.pdf">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>52%</td><td>21</td><td>11</td><td>10</td><td>0</td><td>18</td><td>0</td></tr><tr><td>013909077ad843eb6df7a3e8e290cfd5575999d2</td><td>fiw_300</td><td>300-W</td><td><a href="papers/013909077ad843eb6df7a3e8e290cfd5575999d2.html">A Semi-automatic Methodology for Facial Landmark Annotation</a></td><td><a href="http://ibug.doc.ic.ac.uk/media/uploads/documents/sagonas_cvpr_2013_amfg_w.pdf">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition Workshops</td><td>edu</td><td>University of Twente</td><td>52.23801390</td><td>6.85667610</td><td>59%</td><td>169</td><td>100</td><td>69</td><td>14</td><td>112</td><td>49</td></tr><tr><td>3b4ec8af470948a72a6ed37a9fd226719a874ebc</td><td>sdu_vid</td><td>SDU-VID</td><td><a href="papers/3b4ec8af470948a72a6ed37a9fd226719a874ebc.html">A Spatio-Temporal Appearance Representation for Video-Based Pedestrian Re-Identification</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.434">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td>32%</td><td>85</td><td>27</td><td>58</td><td>9</td><td>51</td><td>0</td></tr><tr><td>ad62c6e17bc39b4dec20d32f6ac667ae42d2c118</td><td>jiku_mobile</td><td>Jiku Mobile Video Dataset</td><td><a href="papers/ad62c6e17bc39b4dec20d32f6ac667ae42d2c118.html">A Synchronization Ground Truth for the Jiku Mobile Video Dataset</a></td><td><a href="http://pdfs.semanticscholar.org/ad62/c6e17bc39b4dec20d32f6ac667ae42d2c118.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>1</td><td>0</td><td>1</td><td>0</td><td>0</td><td>0</td></tr><tr><td>6403117f9c005ae81f1e8e6d1302f4a045e3d99d</td><td>alert_airport</td><td>ALERT Airport</td><td><a href="papers/6403117f9c005ae81f1e8e6d1302f4a045e3d99d.html">A Systematic Evaluation and Benchmark for Person Re-Identification: Features, Metrics, and Datasets.</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1605.09653.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>IEEE transactions on pattern analysis and machine intelligence</td><td></td><td></td><td></td><td></td><td>27%</td><td>15</td><td>4</td><td>11</td><td>1</td><td>10</td><td>0</td></tr><tr><td>7ace44190729927e5cb0dd5d363fcae966fe13f7</td><td>nudedetection</td><td>Nude Detection</td><td><a href="papers/7ace44190729927e5cb0dd5d363fcae966fe13f7.html">A bag-of-features approach based on Hue-SIFT descriptor for nude detection</a></td><td><a href="http://ieeexplore.ieee.org/document/7077625/">[pdf]</a></td><td>2009 17th European Signal Processing Conference</td><td></td><td></td><td></td><td></td><td>18%</td><td>51</td><td>9</td><td>42</td><td>1</td><td>18</td><td>0</td></tr><tr><td>0d3bb75852098b25d90f31d2f48fd0cb4944702b</td><td>face_scrub</td><td>FaceScrub</td><td><a href="papers/0d3bb75852098b25d90f31d2f48fd0cb4944702b.html">A data-driven approach to cleaning large face datasets</a></td><td><a href="https://doi.org/10.1109/ICIP.2014.7025068">[pdf]</a></td><td>2014 IEEE International Conference on Image Processing (ICIP)</td><td>edu</td><td>University of Illinois, Urbana-Champaign</td><td>40.11116745</td><td>-88.22587665</td><td>46%</td><td>123</td><td>56</td><td>67</td><td>6</td><td>95</td><td>21</td></tr><tr><td>b91f54e1581fbbf60392364323d00a0cd43e493c</td><td>bp4d_spontanous</td><td>BP4D-Spontanous</td><td><a href="papers/b91f54e1581fbbf60392364323d00a0cd43e493c.html">A high-resolution spontaneous 3D dynamic facial expression database</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6553788', 'linkType': 'ieee'}">[pdf]</a></td><td>2013 10th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)</td><td>edu</td><td>SUNY Binghamton</td><td>42.08779975</td><td>-75.97066066</td><td>36%</td><td>151</td><td>54</td><td>97</td><td>7</td><td>85</td><td>26</td></tr><tr><td>1ed1a49534ad8dd00f81939449f6389cfbc25321</td><td>bjut_3d</td><td>BJUT-3D</td><td><a href="papers/1ed1a49534ad8dd00f81939449f6389cfbc25321.html">A novel face recognition method based on 3D face model</a></td><td><a href="https://doi.org/10.1109/ROBIO.2007.4522202">[pdf]</a></td><td>2007 IEEE International Conference on Robotics and Biomimetics (ROBIO)</td><td></td><td></td><td></td><td></td><td>0%</td><td>2</td><td>0</td><td>2</td><td>0</td><td>1</td><td>1</td></tr><tr><td>2624d84503bc2f8e190e061c5480b6aa4d89277a</td><td>afew_va</td><td>AFEW-VA</td><td><a href="papers/2624d84503bc2f8e190e061c5480b6aa4d89277a.html">AFEW-VA database for valence and arousal estimation in-the-wild</a></td><td><a href="http://pdfs.semanticscholar.org/2624/d84503bc2f8e190e061c5480b6aa4d89277a.pdf">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td>47%</td><td>15</td><td>7</td><td>8</td><td>1</td><td>10</td><td>3</td></tr><tr><td>2ad0ee93d029e790ebb50574f403a09854b65b7e</td><td>yale_faces</td><td>YaleFaces</td><td><a href="papers/2ad0ee93d029e790ebb50574f403a09854b65b7e.html">Acquiring linear subspaces for face recognition under variable lighting</a></td><td><a href="http://vision.cornell.edu/se3/wp-content/uploads/2014/09/pami05.pdf">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>25%</td><td>999</td><td>251</td><td>748</td><td>110</td><td>509</td><td>113</td></tr><tr><td>57fe081950f21ca03b5b375ae3e84b399c015861</td><td>cvc_01_barcelona</td><td>CVC-01</td><td><a href="papers/57fe081950f21ca03b5b375ae3e84b399c015861.html">Adaptive Image Sampling and Windows Classification for On–board Pedestrian Detection</a></td><td><a href="http://pdfs.semanticscholar.org/57fe/081950f21ca03b5b375ae3e84b399c015861.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>9%</td><td>44</td><td>4</td><td>40</td><td>1</td><td>21</td><td>0</td></tr><tr><td>47aeb3b82f54b5ae8142b4bdda7b614433e69b9a</td><td>am_fed</td><td>AM-FED</td><td><a href="papers/47aeb3b82f54b5ae8142b4bdda7b614433e69b9a.html">Affectiva-MIT Facial Expression Dataset (AM-FED): Naturalistic and Spontaneous Facial Expressions Collected "In-the-Wild"</a></td><td><a href="http://pdfs.semanticscholar.org/5d06/437656dd94616d7d87260d5eb77513ded30f.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>36%</td><td>73</td><td>26</td><td>47</td><td>6</td><td>39</td><td>16</td></tr><tr><td>1be498d4bbc30c3bfd0029114c784bc2114d67c0</td><td>adience</td><td>Adience</td><td><a href="papers/1be498d4bbc30c3bfd0029114c784bc2114d67c0.html">Age and Gender Estimation of Unfiltered Faces</a></td><td><a href="http://www.openu.ac.il/home/hassner/Adience/EidingerEnbarHassner_tifs.pdf">[pdf]</a></td><td>IEEE Transactions on Information Forensics and Security</td><td></td><td></td><td></td><td></td><td>43%</td><td>168</td><td>72</td><td>96</td><td>7</td><td>89</td><td>53</td></tr><tr><td>6dcf418c778f528b5792104760f1fbfe90c6dd6a</td><td>agedb</td><td>AgeDB</td><td><a href="papers/6dcf418c778f528b5792104760f1fbfe90c6dd6a.html">AgeDB: The First Manually Collected, In-the-Wild Age Database</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014984', 'linkType': 'ieee'}">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td></td><td></td><td></td><td></td><td>82%</td><td>11</td><td>9</td><td>2</td><td>0</td><td>10</td><td>0</td></tr><tr><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td><td>aflw</td><td>AFLW</td><td><a href="papers/a74251efa970b92925b89eeef50a5e37d9281ad0.html">Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</a></td><td><a href="http://lrs.icg.tugraz.at/pubs/koestinger_befit_11.pdf">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td>53%</td><td>292</td><td>155</td><td>137</td><td>38</td><td>207</td><td>59</td></tr><tr><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td><td>imm_face</td><td>IMM Face Dataset</td><td><a href="papers/a74251efa970b92925b89eeef50a5e37d9281ad0.html">Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</a></td><td><a href="http://lrs.icg.tugraz.at/pubs/koestinger_befit_11.pdf">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td>53%</td><td>292</td><td>155</td><td>137</td><td>38</td><td>207</td><td>59</td></tr><tr><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td><td>muct</td><td>MUCT</td><td><a href="papers/a74251efa970b92925b89eeef50a5e37d9281ad0.html">Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</a></td><td><a href="http://lrs.icg.tugraz.at/pubs/koestinger_befit_11.pdf">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td>53%</td><td>292</td><td>155</td><td>137</td><td>38</td><td>207</td><td>59</td></tr><tr><td>2ce2560cf59db59ce313bbeb004e8ce55c5ce928</td><td>texas_3dfrd</td><td>Texas 3DFRD</td><td><a href="papers/2ce2560cf59db59ce313bbeb004e8ce55c5ce928.html">Anthropometric 3D Face Recognition</a></td><td><a href="http://live.ece.utexas.edu/publications/2010/sg_ijcv_june10.pdf">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td>22%</td><td>88</td><td>19</td><td>43</td><td>4</td><td>42</td><td>2</td></tr><tr><td>c6526dd3060d63a6c90e8b7ff340383c4e0e0dd8</td><td>face_research_lab</td><td>Face Research Lab London</td><td><a href="papers/c6526dd3060d63a6c90e8b7ff340383c4e0e0dd8.html">Anxiety promotes memory for mood-congruent faces but does not alter loss aversion.</a></td><td><a href="http://pdfs.semanticscholar.org/c652/6dd3060d63a6c90e8b7ff340383c4e0e0dd8.pdf">[pdf]</a></td><td>Scientific reports</td><td>edu</td><td>University College London</td><td>51.52316070</td><td>-0.12820370</td><td>25%</td><td>4</td><td>1</td><td>3</td><td>0</td><td>2</td><td>2</td></tr><tr><td>0df0d1adea39a5bef318b74faa37de7f3e00b452</td><td>mpii_gaze</td><td>MPIIGaze</td><td><a href="papers/0df0d1adea39a5bef318b74faa37de7f3e00b452.html">Appearance-based gaze estimation in the wild</a></td><td><a href="https://scalable.mpi-inf.mpg.de/files/2015/09/zhang_CVPR15.pdf">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>Max Planck Institute for Informatics</td><td>49.25795660</td><td>7.04577417</td><td>38%</td><td>138</td><td>52</td><td>86</td><td>3</td><td>94</td><td>7</td></tr><tr><td>5801690199c1917fa58c35c3dead177c0b8f9f2d</td><td>camel</td><td>CAMEL</td><td><a href="papers/5801690199c1917fa58c35c3dead177c0b8f9f2d.html">Application of Object Based Classification and High Resolution Satellite Imagery for Savanna Ecosystem Analysis</a></td><td><a href="http://pdfs.semanticscholar.org/5801/690199c1917fa58c35c3dead177c0b8f9f2d.pdf">[pdf]</a></td><td>Remote Sensing</td><td></td><td></td><td></td><td></td><td>37%</td><td>19</td><td>7</td><td>12</td><td>1</td><td>16</td><td>0</td></tr><tr><td>759a3b3821d9f0e08e0b0a62c8b693230afc3f8d</td><td>pubfig</td><td>PubFig</td><td><a href="papers/759a3b3821d9f0e08e0b0a62c8b693230afc3f8d.html">Attribute and simile classifiers for face verification</a></td><td><a href="http://homes.cs.washington.edu/~neeraj/projects/faceverification/base/papers/nk_iccv2009_attrs.pdf">[pdf]</a></td><td>2009 IEEE 12th International Conference on Computer Vision</td><td>edu</td><td>Columbia University</td><td>40.84198360</td><td>-73.94368971</td><td>51%</td><td>894</td><td>454</td><td>440</td><td>55</td><td>587</td><td>222</td></tr><tr><td>faf40ce28857aedf183e193486f5b4b0a8c478a2</td><td>iit_dehli_ear</td><td>IIT Dehli Ear</td><td><a href="papers/faf40ce28857aedf183e193486f5b4b0a8c478a2.html">Automated Human Identification Using Ear Imaging</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/faf4/0ce28857aedf183e193486f5b4b0a8c478a2.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>13%</td><td>70</td><td>9</td><td>61</td><td>6</td><td>28</td><td>1</td></tr><tr><td>2160788824c4c29ffe213b2cbeb3f52972d73f37</td><td>3d_rma</td><td>3D-RMA</td><td><a href="papers/2160788824c4c29ffe213b2cbeb3f52972d73f37.html">Automatic 3D face authentication</a></td><td><a href="http://pdfs.semanticscholar.org/2160/788824c4c29ffe213b2cbeb3f52972d73f37.pdf">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td>25%</td><td>95</td><td>24</td><td>71</td><td>8</td><td>60</td><td>2</td></tr><tr><td>213a579af9e4f57f071b884aa872651372b661fd</td><td>bbc_pose</td><td>BBC Pose</td><td><a href="papers/213a579af9e4f57f071b884aa872651372b661fd.html">Automatic and Efficient Human Pose Estimation for Sign Language Videos</a></td><td><a href="https://doi.org/10.1007/s11263-013-0672-6">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td>60%</td><td>25</td><td>15</td><td>10</td><td>1</td><td>18</td><td>1</td></tr><tr><td>fcc6fe6007c322641796cb8792718641856a22a7</td><td>miw</td><td>MIW</td><td><a href="papers/fcc6fe6007c322641796cb8792718641856a22a7.html">Automatic facial makeup detection with application in face recognition</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6612994', 'linkType': 'ieee'}">[pdf]</a></td><td>2013 International Conference on Biometrics (ICB)</td><td>edu</td><td>West Virginia University</td><td>39.65404635</td><td>-79.96475355</td><td>65%</td><td>46</td><td>30</td><td>16</td><td>1</td><td>18</td><td>21</td></tr><tr><td>fcc6fe6007c322641796cb8792718641856a22a7</td><td>youtube_makeup</td><td>YMU</td><td><a href="papers/fcc6fe6007c322641796cb8792718641856a22a7.html">Automatic facial makeup detection with application in face recognition</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6612994', 'linkType': 'ieee'}">[pdf]</a></td><td>2013 International Conference on Biometrics (ICB)</td><td>edu</td><td>West Virginia University</td><td>39.65404635</td><td>-79.96475355</td><td>65%</td><td>46</td><td>30</td><td>16</td><td>1</td><td>18</td><td>21</td></tr><tr><td>0a85bdff552615643dd74646ac881862a7c7072d</td><td>pipa</td><td>PIPA</td><td><a href="papers/0a85bdff552615643dd74646ac881862a7c7072d.html">Beyond frontal faces: Improving Person Recognition using multiple cues</a></td><td><a href="https://doi.org/10.1109/CVPR.2015.7299113">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>60%</td><td>50</td><td>30</td><td>19</td><td>2</td><td>40</td><td>4</td></tr><tr><td>2acf7e58f0a526b957be2099c10aab693f795973</td><td>bosphorus</td><td>The Bosphorus</td><td><a href="papers/2acf7e58f0a526b957be2099c10aab693f795973.html">Bosphorus Database for 3D Face Analysis</a></td><td><a href="http://pdfs.semanticscholar.org/4254/fbba3846008f50671edc9cf70b99d7304543.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>18%</td><td>328</td><td>58</td><td>270</td><td>18</td><td>143</td><td>37</td></tr><tr><td>214c966d1f9c2a4b66f4535d9a0d4078e63a5867</td><td>brainwash</td><td>Brainwash</td><td><a href="papers/214c966d1f9c2a4b66f4535d9a0d4078e63a5867.html">Brainwash: A Data System for Feature Engineering</a></td><td><a href="http://pdfs.semanticscholar.org/ae44/8015b2ff2bd3b8a5c9a3266f954f5af9ffa9.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>57</td><td>34</td><td>23</td><td>2</td><td>50</td><td>0</td></tr><tr><td>37d6f0eb074d207b53885bd2eb78ccc8a04be597</td><td>vmu</td><td>VMU</td><td><a href="papers/37d6f0eb074d207b53885bd2eb78ccc8a04be597.html">Can facial cosmetics affect the matching accuracy of face recognition systems?</a></td><td><a href="http://www.cse.msu.edu/~climer/DantchevaChenRossFaceCosmetics_BTAS2012.pdf">[pdf]</a></td><td>2012 IEEE Fifth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td></td><td></td><td></td><td></td><td>49%</td><td>49</td><td>24</td><td>25</td><td>0</td><td>18</td><td>22</td></tr><tr><td>37d6f0eb074d207b53885bd2eb78ccc8a04be597</td><td>youtube_makeup</td><td>YMU</td><td><a href="papers/37d6f0eb074d207b53885bd2eb78ccc8a04be597.html">Can facial cosmetics affect the matching accuracy of face recognition systems?</a></td><td><a href="http://www.cse.msu.edu/~climer/DantchevaChenRossFaceCosmetics_BTAS2012.pdf">[pdf]</a></td><td>2012 IEEE Fifth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td></td><td></td><td></td><td></td><td>49%</td><td>49</td><td>24</td><td>25</td><td>0</td><td>18</td><td>22</td></tr><tr><td>8d5998cd984e7cce307da7d46f155f9db99c6590</td><td>chalearn</td><td>ChaLearn</td><td><a href="papers/8d5998cd984e7cce307da7d46f155f9db99c6590.html">ChaLearn looking at people: A review of events and resources</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1701.02664.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2017 International Joint Conference on Neural Networks (IJCNN)</td><td></td><td></td><td></td><td></td><td>30%</td><td>10</td><td>3</td><td>7</td><td>1</td><td>6</td><td>0</td></tr><tr><td>2bf8541199728262f78d4dced6fb91479b39b738</td><td>clothing_co_parsing</td><td>CCP</td><td><a href="papers/2bf8541199728262f78d4dced6fb91479b39b738.html">Clothing Co-parsing by Joint Image Segmentation and Labeling</a></td><td><a href="https://arxiv.org/pdf/1502.00739v1.pdf">[pdf]</a></td><td>2014 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>47%</td><td>60</td><td>28</td><td>32</td><td>0</td><td>36</td><td>6</td></tr><tr><td>6dbe8e5121c534339d6e41f8683e85f87e6abf81</td><td>gallagher</td><td>Gallagher</td><td><a href="papers/6dbe8e5121c534339d6e41f8683e85f87e6abf81.html">Clothing Cosegmentation for Shopping Images With Cluttered Background</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7423747', 'linkType': 'ieee'}">[pdf]</a></td><td>IEEE Transactions on Multimedia</td><td></td><td></td><td></td><td></td><td>33%</td><td>6</td><td>2</td><td>4</td><td>0</td><td>3</td><td>2</td></tr><tr><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td><td>leeds_sports_pose</td><td>Leeds Sports Pose</td><td><a href="papers/4b1d23d17476fcf78f4cbadf69fb130b1aa627c0.html">Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</a></td><td><a href="http://pdfs.semanticscholar.org/4b1d/23d17476fcf78f4cbadf69fb130b1aa627c0.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>278</td><td>119</td><td>159</td><td>12</td><td>198</td><td>2</td></tr><tr><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td><td>stickmen_buffy</td><td>Buffy Stickmen</td><td><a href="papers/4b1d23d17476fcf78f4cbadf69fb130b1aa627c0.html">Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</a></td><td><a href="http://pdfs.semanticscholar.org/4b1d/23d17476fcf78f4cbadf69fb130b1aa627c0.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>278</td><td>119</td><td>159</td><td>12</td><td>198</td><td>2</td></tr><tr><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td><a href="papers/4b1d23d17476fcf78f4cbadf69fb130b1aa627c0.html">Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</a></td><td><a href="http://pdfs.semanticscholar.org/4b1d/23d17476fcf78f4cbadf69fb130b1aa627c0.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>278</td><td>119</td><td>159</td><td>12</td><td>198</td><td>2</td></tr><tr><td>45c31cde87258414f33412b3b12fc5bec7cb3ba9</td><td>jaffe</td><td>JAFFE</td><td><a href="papers/45c31cde87258414f33412b3b12fc5bec7cb3ba9.html">Coding Facial Expressions with Gabor Wavelets</a></td><td><a href="http://pdfs.semanticscholar.org/45c3/1cde87258414f33412b3b12fc5bec7cb3ba9.pdf">[pdf]</a></td><td></td><td>edu</td><td>Kyushu University</td><td>33.59914655</td><td>130.22359848</td><td>36%</td><td>848</td><td>308</td><td>540</td><td>56</td><td>413</td><td>255</td></tr><tr><td>b1f4423c227fa37b9680787be38857069247a307</td><td>afew_va</td><td>AFEW-VA</td><td><a href="papers/b1f4423c227fa37b9680787be38857069247a307.html">Collecting Large, Richly Annotated Facial-Expression Databases from Movies</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6200254', 'linkType': 'ieee'}">[pdf]</a></td><td>IEEE MultiMedia</td><td>edu</td><td>Australian National University</td><td>-35.27769990</td><td>149.11852700</td><td>33%</td><td>175</td><td>58</td><td>66</td><td>7</td><td>54</td><td>49</td></tr><tr><td>7f4040b482d16354d5938c1d1b926b544652bf5b</td><td>nova_emotions</td><td>Novaemötions Dataset</td><td><a href="papers/7f4040b482d16354d5938c1d1b926b544652bf5b.html">Competitive affective gaming: winning with a smile</a></td><td><a href="http://doi.acm.org/10.1145/2502081.2502115">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>8</td><td>0</td><td>3</td><td>0</td><td>2</td><td>0</td></tr><tr><td>079a0a3bf5200994e1f972b1b9197bf2f90e87d4</td><td>mit_cbcl</td><td>MIT CBCL</td><td><a href="papers/079a0a3bf5200994e1f972b1b9197bf2f90e87d4.html">Component-Based Face Recognition with 3D Morphable Models</a></td><td><a href="http://www.bheisele.com/avbpa2003.pdf">[pdf]</a></td><td>2004 Conference on Computer Vision and Pattern Recognition Workshop</td><td></td><td></td><td></td><td></td><td>0%</td><td>12</td><td>0</td><td>12</td><td>0</td><td>8</td><td>0</td></tr><tr><td>23fc83c8cfff14a16df7ca497661264fc54ed746</td><td>cohn_kanade</td><td>CK</td><td><a href="papers/23fc83c8cfff14a16df7ca497661264fc54ed746.html">Comprehensive Database for Facial Expression Analysis</a></td><td><a href="http://pdfs.semanticscholar.org/23fc/83c8cfff14a16df7ca497661264fc54ed746.pdf">[pdf]</a></td><td></td><td>edu</td><td>Carnegie Mellon University</td><td>37.41021930</td><td>-122.05965487</td><td>38%</td><td>999</td><td>380</td><td>619</td><td>75</td><td>555</td><td>252</td></tr><tr><td>09d78009687bec46e70efcf39d4612822e61cb8c</td><td>raid</td><td>RAiD</td><td><a href="papers/09d78009687bec46e70efcf39d4612822e61cb8c.html">Consistent Re-identification in a Camera Network</a></td><td><a href="http://pdfs.semanticscholar.org/c27f/099e6e7e3f7f9979cbe9e0a5175fc5848ea0.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>27%</td><td>45</td><td>12</td><td>33</td><td>7</td><td>34</td><td>1</td></tr><tr><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td><td>casablanca</td><td>Casablanca</td><td><a href="papers/0ceda9dae8b9f322df65ca2ef02caca9758aec6f.html">Context-Aware CNNs for Person Head Detection</a></td><td><a href="http://arxiv.org/pdf/1511.07917v1.pdf">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td>33%</td><td>27</td><td>9</td><td>18</td><td>1</td><td>22</td><td>0</td></tr><tr><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td><td>hollywood_headset</td><td>HollywoodHeads</td><td><a href="papers/0ceda9dae8b9f322df65ca2ef02caca9758aec6f.html">Context-Aware CNNs for Person Head Detection</a></td><td><a href="http://arxiv.org/pdf/1511.07917v1.pdf">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td>33%</td><td>27</td><td>9</td><td>18</td><td>1</td><td>22</td><td>0</td></tr><tr><td>c06b13d0ec3f5c43e2782cd22542588e233733c3</td><td>nova_emotions</td><td>Novaemötions Dataset</td><td><a href="papers/c06b13d0ec3f5c43e2782cd22542588e233733c3.html">Crowdsourcing facial expressions for affective-interaction</a></td><td><a href="https://doi.org/10.1016/j.cviu.2016.02.001">[pdf]</a></td><td>Computer Vision and Image Understanding</td><td></td><td></td><td></td><td></td><td>0%</td><td>1</td><td>0</td><td>1</td><td>0</td><td>1</td><td>0</td></tr><tr><td>8355d095d3534ef511a9af68a3b2893339e3f96b</td><td>imdb_wiki</td><td>IMDB</td><td><a href="papers/8355d095d3534ef511a9af68a3b2893339e3f96b.html">DEX: Deep EXpectation of Apparent Age from a Single Image</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406390', 'linkType': 'ieee'}">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision Workshop (ICCVW)</td><td></td><td></td><td></td><td></td><td>44%</td><td>102</td><td>45</td><td>54</td><td>6</td><td>62</td><td>28</td></tr><tr><td>10195a163ab6348eef37213a46f60a3d87f289c5</td><td>imdb_wiki</td><td>IMDB</td><td><a href="papers/10195a163ab6348eef37213a46f60a3d87f289c5.html">Deep Expectation of Real and Apparent Age from a Single Image Without Facial Landmarks</a></td><td><a href="https://doi.org/10.1007/s11263-016-0940-3">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td>44%</td><td>133</td><td>59</td><td>74</td><td>14</td><td>90</td><td>28</td></tr><tr><td>162ea969d1929ed180cc6de9f0bf116993ff6e06</td><td>vgg_faces</td><td>VGG Face</td><td><a href="papers/162ea969d1929ed180cc6de9f0bf116993ff6e06.html">Deep Face Recognition</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/f372/ab9b3270d4e4f6a0258c83c2736c3a5c0454.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>39%</td><td>999</td><td>393</td><td>606</td><td>71</td><td>621</td><td>156</td></tr><tr><td>6424b69f3ff4d35249c0bb7ef912fbc2c86f4ff4</td><td>celeba</td><td>CelebA</td><td><a href="papers/6424b69f3ff4d35249c0bb7ef912fbc2c86f4ff4.html">Deep Learning Face Attributes in the Wild</a></td><td><a href="http://arxiv.org/pdf/1411.7766v2.pdf">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>42%</td><td>808</td><td>340</td><td>468</td><td>69</td><td>666</td><td>50</td></tr><tr><td>18010284894ed0edcca74e5bf768ee2e15ef7841</td><td>deep_fashion</td><td>DeepFashion</td><td><a href="papers/18010284894ed0edcca74e5bf768ee2e15ef7841.html">DeepFashion: Powering Robust Clothes Recognition and Retrieval with Rich Annotations</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7780493', 'linkType': 'ieee'}">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>47%</td><td>150</td><td>71</td><td>79</td><td>4</td><td>111</td><td>8</td></tr><tr><td>6bd36e9fd0ef20a3074e1430a6cc601e6d407fc3</td><td>cuhk03</td><td>CUHK03</td><td><a href="papers/6bd36e9fd0ef20a3074e1430a6cc601e6d407fc3.html">DeepReID: Deep Filter Pairing Neural Network for Person Re-identification</a></td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Li_DeepReID_Deep_Filter_2014_CVPR_paper.pdf">[pdf]</a></td><td>2014 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>35%</td><td>512</td><td>180</td><td>332</td><td>29</td><td>323</td><td>4</td></tr><tr><td>13f06b08f371ba8b5d31c3e288b4deb61335b462</td><td>eth_andreas_ess</td><td>ETHZ Pedestrian</td><td><a href="papers/13f06b08f371ba8b5d31c3e288b4deb61335b462.html">Depth and Appearance for Mobile Scene Analysis</a></td><td><a href="http://www.mmp.rwth-aachen.de/publications/pdf/ess-depthandappearance-iccv07.pdf/at_download/file">[pdf]</a></td><td>2007 IEEE 11th International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td>25%</td><td>319</td><td>79</td><td>240</td><td>27</td><td>192</td><td>0</td></tr><tr><td>2e384f057211426ac5922f1b33d2aa8df5d51f57</td><td>a_pascal_yahoo</td><td>aPascal</td><td><a href="papers/2e384f057211426ac5922f1b33d2aa8df5d51f57.html">Describing objects by their attributes</a></td><td><a href="http://www-2.cs.cmu.edu/~dhoiem/publications/cvpr2009_attributes.pdf">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>39%</td><td>999</td><td>393</td><td>606</td><td>71</td><td>727</td><td>73</td></tr><tr><td>7808937b46acad36e43c30ae4e9f3fd57462853d</td><td>berkeley_pose</td><td>BPAD</td><td><a href="papers/7808937b46acad36e43c30ae4e9f3fd57462853d.html">Describing people: A poselet-based approach to attribute classification</a></td><td><a href="http://ttic.uchicago.edu/~smaji/papers/attributes-iccv11.pdf">[pdf]</a></td><td>2011 International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td>43%</td><td>221</td><td>96</td><td>125</td><td>14</td><td>160</td><td>23</td></tr><tr><td>56ae6d94fc6097ec4ca861f0daa87941d1c10b70</td><td>cmdp</td><td>CMDP</td><td><a href="papers/56ae6d94fc6097ec4ca861f0daa87941d1c10b70.html">Distance Estimation of an Unknown Person from a Portrait</a></td><td><a href="http://pdfs.semanticscholar.org/56ae/6d94fc6097ec4ca861f0daa87941d1c10b70.pdf">[pdf]</a></td><td></td><td>edu</td><td>California Institute of Technology</td><td>34.13710185</td><td>-118.12527487</td><td>22%</td><td>9</td><td>2</td><td>7</td><td>0</td><td>6</td><td>1</td></tr><tr><td>84fe5b4ac805af63206012d29523a1e033bc827e</td><td>awe_ears</td><td>AWE Ears</td><td><a href="papers/84fe5b4ac805af63206012d29523a1e033bc827e.html">Ear recognition: More than a survey</a></td><td><a href="http://pdfs.semanticscholar.org/84fe/5b4ac805af63206012d29523a1e033bc827e.pdf">[pdf]</a></td><td>Neurocomputing</td><td></td><td></td><td></td><td></td><td>29%</td><td>24</td><td>7</td><td>17</td><td>0</td><td>11</td><td>0</td></tr><tr><td>133f01aec1534604d184d56de866a4bd531dac87</td><td>lfw_a</td><td>LFW-a</td><td><a href="papers/133f01aec1534604d184d56de866a4bd531dac87.html">Effective Unconstrained Face Recognition by Combining Multiple Descriptors and Learned Background Statistics</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/TPAMI.2010.230">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>42%</td><td>177</td><td>75</td><td>102</td><td>15</td><td>102</td><td>54</td></tr><tr><td>447d8893a4bdc29fa1214e53499ffe67b28a6db5</td><td>umd_faces</td><td>UMD</td><td><a href="papers/447d8893a4bdc29fa1214e53499ffe67b28a6db5.html">Electronic Transport in Quantum Confined Systems</a></td><td><a href="http://pdfs.semanticscholar.org/447d/8893a4bdc29fa1214e53499ffe67b28a6db5.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>1</td><td>0</td><td>1</td><td>0</td><td>1</td><td>0</td></tr><tr><td>c900e0ad4c95948baaf0acd8449fde26f9b4952a</td><td>emotio_net</td><td>EmotioNet Database</td><td><a href="papers/c900e0ad4c95948baaf0acd8449fde26f9b4952a.html">EmotioNet: An Accurate, Real-Time Algorithm for the Automatic Annotation of a Million Facial Expressions in the Wild</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7780969', 'linkType': 'ieee'}">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>44%</td><td>72</td><td>32</td><td>40</td><td>7</td><td>54</td><td>8</td></tr><tr><td>2161f6b7ee3c0acc81603b01dc0df689683577b9</td><td>large_scale_person_search</td><td>Large Scale Person Search</td><td><a href="papers/2161f6b7ee3c0acc81603b01dc0df689683577b9.html">End-to-End Deep Learning for Person Search</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/2161/f6b7ee3c0acc81603b01dc0df689683577b9.pdf', 'linkType': 's2'}">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>34%</td><td>41</td><td>14</td><td>27</td><td>2</td><td>27</td><td>0</td></tr><tr><td>6273b3491e94ea4dd1ce42b791d77bdc96ee73a8</td><td>viper</td><td>VIPeR</td><td><a href="papers/6273b3491e94ea4dd1ce42b791d77bdc96ee73a8.html">Evaluating Appearance Models for Recognition, Reacquisition, and Tracking</a></td><td><a href="http://pdfs.semanticscholar.org/6273/b3491e94ea4dd1ce42b791d77bdc96ee73a8.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>27%</td><td>584</td><td>159</td><td>425</td><td>38</td><td>336</td><td>9</td></tr><tr><td>2258e01865367018ed6f4262c880df85b94959f8</td><td>mot</td><td>MOT</td><td><a href="papers/2258e01865367018ed6f4262c880df85b94959f8.html">Evaluating Multiple Object Tracking Performance: The CLEAR MOT Metrics</a></td><td><a href="http://pdfs.semanticscholar.org/2e0b/00f4043e2d4b04c59c88bb54bcd907d0dcd4.pdf">[pdf]</a></td><td>EURASIP J. Image and Video Processing</td><td></td><td></td><td></td><td></td><td>20%</td><td>586</td><td>119</td><td>467</td><td>48</td><td>336</td><td>3</td></tr><tr><td>9e5378e7b336c89735d3bb15cf67eff96f86d39a</td><td>precarious</td><td>Precarious</td><td><a href="papers/9e5378e7b336c89735d3bb15cf67eff96f86d39a.html">Expecting the Unexpected: Training Detectors for Unusual Pedestrians with Adversarial Imposters</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1703.06283.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>8%</td><td>12</td><td>1</td><td>11</td><td>1</td><td>10</td><td>0</td></tr><tr><td>35b0331dfcd2897abd5749b49ff5e2b8ba0f7a62</td><td>coco_qa</td><td>COCO QA</td><td><a href="papers/35b0331dfcd2897abd5749b49ff5e2b8ba0f7a62.html">Exploring Models and Data for Image Question Answering</a></td><td><a href="http://pdfs.semanticscholar.org/aa79/9c29c0d44ece1864467af520fe70540c069b.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>191</td><td>83</td><td>108</td><td>12</td><td>163</td><td>1</td></tr><tr><td>42505464808dfb446f521fc6ff2cfeffd4d68ff1</td><td>gavab_db</td><td>Gavab</td><td><a href="papers/42505464808dfb446f521fc6ff2cfeffd4d68ff1.html">Expression invariant 3D face recognition with a Morphable Model</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4813376', 'linkType': 'ieee'}">[pdf]</a></td><td>2008 8th IEEE International Conference on Automatic Face & Gesture Recognition</td><td></td><td></td><td></td><td></td><td>29%</td><td>94</td><td>27</td><td>67</td><td>10</td><td>57</td><td>5</td></tr><tr><td>a5acda0e8c0937bfed013e6382da127103e41395</td><td>disfa</td><td>DISFA</td><td><a href="papers/a5acda0e8c0937bfed013e6382da127103e41395.html">Extended DISFA Dataset: Investigating Posed and Spontaneous Facial Expressions</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7789672', 'linkType': 'ieee'}">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td></td><td></td><td></td><td></td><td>38%</td><td>8</td><td>3</td><td>5</td><td>1</td><td>5</td><td>0</td></tr><tr><td>75da1df4ed319926c544eefe17ec8d720feef8c0</td><td>fddb</td><td>FDDB</td><td><a href="papers/75da1df4ed319926c544eefe17ec8d720feef8c0.html">FDDB: A Benchmark for Face Detection in Unconstrained Settings</a></td><td><a href="http://pdfs.semanticscholar.org/75da/1df4ed319926c544eefe17ec8d720feef8c0.pdf">[pdf]</a></td><td></td><td>edu</td><td>University of Massachusetts</td><td>42.38897850</td><td>-72.52869870</td><td>0%</td><td>1</td><td>0</td><td>1</td><td>0</td><td>0</td><td>0</td></tr><tr><td>3607afdb204de9a5a9300ae98aa4635d9effcda2</td><td>sheffield</td><td>Sheffield Face</td><td><a href="papers/3607afdb204de9a5a9300ae98aa4635d9effcda2.html">Face Description with Local Binary Patterns: Application to Face Recognition</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/TPAMI.2006.244">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>24%</td><td>999</td><td>238</td><td>761</td><td>65</td><td>483</td><td>87</td></tr><tr><td>28312c3a47c1be3a67365700744d3d6665b86f22</td><td>hrt_transgender</td><td>HRT Transgender</td><td><a href="papers/28312c3a47c1be3a67365700744d3d6665b86f22.html">Face Recognition: A Literature Survey1</a></td><td><a href="http://pdfs.semanticscholar.org/2831/2c3a47c1be3a67365700744d3d6665b86f22.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>11%</td><td>999</td><td>105</td><td>265</td><td>32</td><td>217</td><td>39</td></tr><tr><td>0e986f51fe45b00633de9fd0c94d082d2be51406</td><td>afw</td><td>AFW</td><td><a href="papers/0e986f51fe45b00633de9fd0c94d082d2be51406.html">Face detection, pose estimation, and landmark localization in the wild</a></td><td><a href="http://vision.ics.uci.edu/papers/ZhuR_CVPR_2012/ZhuR_CVPR_2012.pdf">[pdf]</a></td><td>2012 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>University of California, Irvine</td><td>33.64319010</td><td>-117.84016494</td><td>52%</td><td>999</td><td>521</td><td>478</td><td>59</td><td>607</td><td>273</td></tr><tr><td>560e0e58d0059259ddf86fcec1fa7975dee6a868</td><td>youtube_faces</td><td>YouTubeFaces</td><td><a href="papers/560e0e58d0059259ddf86fcec1fa7975dee6a868.html">Face recognition in unconstrained videos with matched background similarity</a></td><td><a href="http://www.cs.tau.ac.il/~wolf/papers/lvfw.pdf">[pdf]</a></td><td>CVPR 2011</td><td>edu</td><td>Open University of Israel</td><td>32.77824165</td><td>34.99565673</td><td>50%</td><td>485</td><td>244</td><td>240</td><td>32</td><td>290</td><td>140</td></tr><tr><td>670637d0303a863c1548d5b19f705860a23e285c</td><td>face_tracer</td><td>FaceTracer</td><td><a href="papers/670637d0303a863c1548d5b19f705860a23e285c.html">Face swapping: automatically replacing faces in photographs</a></td><td><a href="https://classes.cs.uoregon.edu/16F/cis607photo/faces.pdf">[pdf]</a></td><td>ACM Trans. Graph.</td><td>edu</td><td>Columbia University</td><td>40.84198360</td><td>-73.94368971</td><td>100%</td><td>2</td><td>2</td><td>0</td><td>0</td><td>1</td><td>1</td></tr><tr><td>4c170a0dcc8de75587dae21ca508dab2f9343974</td><td>face_tracer</td><td>FaceTracer</td><td><a href="papers/4c170a0dcc8de75587dae21ca508dab2f9343974.html">FaceTracer: A Search Engine for Large Collections of Images with Faces</a></td><td><a href="http://pdfs.semanticscholar.org/73a8/1d311eedac8dea3ca24dc15b6990fa4a725e.pdf">[pdf]</a></td><td></td><td>edu</td><td>Columbia University</td><td>40.84198360</td><td>-73.94368971</td><td>48%</td><td>218</td><td>105</td><td>113</td><td>17</td><td>146</td><td>52</td></tr><tr><td>7ebb153704706e457ab57b432793d2b6e5d12592</td><td>vgg_celebs_in_places</td><td>CIP</td><td><a href="papers/7ebb153704706e457ab57b432793d2b6e5d12592.html">Faces in Places: compound query retrieval</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/7ebb/153704706e457ab57b432793d2b6e5d12592.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td>edu</td><td>University of Oxford</td><td>51.75345380</td><td>-1.25400997</td><td>80%</td><td>5</td><td>4</td><td>1</td><td>0</td><td>4</td><td>0</td></tr><tr><td>8a3c5507237957d013a0fe0f082cab7f757af6ee</td><td>mafl</td><td>MAFL</td><td><a href="papers/8a3c5507237957d013a0fe0f082cab7f757af6ee.html">Facial Landmark Detection by Deep Multi-task Learning</a></td><td><a href="http://pdfs.semanticscholar.org/fcd7/1c18192928a2e0b264edd4d919ab2f8f652a.pdf">[pdf]</a></td><td></td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>48%</td><td>383</td><td>182</td><td>201</td><td>25</td><td>259</td><td>60</td></tr><tr><td>8a3c5507237957d013a0fe0f082cab7f757af6ee</td><td>mtfl</td><td>MTFL</td><td><a href="papers/8a3c5507237957d013a0fe0f082cab7f757af6ee.html">Facial Landmark Detection by Deep Multi-task Learning</a></td><td><a href="http://pdfs.semanticscholar.org/fcd7/1c18192928a2e0b264edd4d919ab2f8f652a.pdf">[pdf]</a></td><td></td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>48%</td><td>383</td><td>182</td><td>201</td><td>25</td><td>259</td><td>60</td></tr><tr><td>014b8df0180f33b9fea98f34ae611c6447d761d2</td><td>buhmap_db</td><td>BUHMAP-DB </td><td><a href="papers/014b8df0180f33b9fea98f34ae611c6447d761d2.html">Facial feature tracking and expression recognition for sign language</a></td><td><a href="http://www.cmpe.boun.edu.tr/pilab/pilabfiles/databases/buhmap/files/ari2008facialfeaturetracking.pdf">[pdf]</a></td><td>2008 23rd International Symposium on Computer and Information Sciences</td><td></td><td></td><td></td><td></td><td>16%</td><td>25</td><td>4</td><td>21</td><td>1</td><td>10</td><td>2</td></tr><tr><td>4fefd1bc8dc4e0ab37ee3324ddfa43ad9d6a04a7</td><td>deep_fashion</td><td>DeepFashion</td><td><a href="papers/4fefd1bc8dc4e0ab37ee3324ddfa43ad9d6a04a7.html">Fashion Landmark Detection in the Wild</a></td><td><a href="http://pdfs.semanticscholar.org/d8ca/e259c1c5bba0c096f480dc7322bbaebfac1a.pdf">[pdf]</a></td><td></td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>39%</td><td>23</td><td>9</td><td>8</td><td>0</td><td>15</td><td>0</td></tr><tr><td>45e616093a92e5f1e61a7c6037d5f637aa8964af</td><td>malf</td><td>MALF</td><td><a href="papers/45e616093a92e5f1e61a7c6037d5f637aa8964af.html">Fine-grained evaluation on face detection in the wild</a></td><td><a href="http://www.cs.toronto.edu/~byang/papers/malf_fg15.pdf">[pdf]</a></td><td>2015 11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)</td><td>edu</td><td>Chinese Academy of Sciences</td><td>40.00447950</td><td>116.37023800</td><td>71%</td><td>17</td><td>12</td><td>5</td><td>0</td><td>13</td><td>4</td></tr><tr><td>1aad2da473888cb7ebc1bfaa15bfa0f1502ce005</td><td>jpl_pose</td><td>JPL-Interaction dataset</td><td><a href="papers/1aad2da473888cb7ebc1bfaa15bfa0f1502ce005.html">First-Person Activity Recognition: What Are They Doing to Me?</a></td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2013/papers/Ryoo_First-Person_Activity_Recognition_2013_CVPR_paper.pdf">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>51%</td><td>148</td><td>76</td><td>72</td><td>8</td><td>109</td><td>3</td></tr><tr><td>774cbb45968607a027ae4729077734db000a1ec5</td><td>urban_tribes</td><td>Urban Tribes</td><td><a href="papers/774cbb45968607a027ae4729077734db000a1ec5.html">From Bikers to Surfers: Visual Recognition of Urban Tribes</a></td><td><a href="http://pdfs.semanticscholar.org/774c/bb45968607a027ae4729077734db000a1ec5.pdf">[pdf]</a></td><td></td><td>edu</td><td>Columbia University</td><td>40.84198360</td><td>-73.94368971</td><td>47%</td><td>17</td><td>8</td><td>9</td><td>1</td><td>12</td><td>1</td></tr><tr><td>22f656d0f8426c84a33a267977f511f127bfd7f3</td><td>expw</td><td>ExpW</td><td><a href="papers/22f656d0f8426c84a33a267977f511f127bfd7f3.html">From Facial Expression Recognition to Interpersonal Relation Prediction</a></td><td><a href="http://arxiv.org/abs/1609.06426">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td>44%</td><td>9</td><td>4</td><td>5</td><td>0</td><td>5</td><td>1</td></tr><tr><td>22f656d0f8426c84a33a267977f511f127bfd7f3</td><td>social_relation</td><td>Social Relation</td><td><a href="papers/22f656d0f8426c84a33a267977f511f127bfd7f3.html">From Facial Expression Recognition to Interpersonal Relation Prediction</a></td><td><a href="http://arxiv.org/abs/1609.06426">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td>44%</td><td>9</td><td>4</td><td>5</td><td>0</td><td>5</td><td>1</td></tr><tr><td>18c72175ddbb7d5956d180b65a96005c100f6014</td><td>yale_faces</td><td>YaleFaces</td><td><a href="papers/18c72175ddbb7d5956d180b65a96005c100f6014.html">From Few to Many: Illumination Cone Models for Face Recognition under Variable Lighting and Pose</a></td><td><a href="http://pdfs.semanticscholar.org/97bb/c2b439a79d4dc0dc7199d71ed96ad5e3fd0e.pdf">[pdf]</a></td><td>IEEE Trans. Pattern Anal. Mach. Intell.</td><td></td><td></td><td></td><td></td><td>42%</td><td>999</td><td>422</td><td>577</td><td>80</td><td>538</td><td>295</td></tr><tr><td>93884e46c49f7ae1c7c34046fbc28882f2bd6341</td><td>kdef</td><td>KDEF</td><td><a href="papers/93884e46c49f7ae1c7c34046fbc28882f2bd6341.html">Gaze fixation and the neural circuitry of face processing in autism</a></td><td><a href="{'url': 'http://doi.org/10.1038/nn1421', 'linkType': 'nature'}">[pdf]</a></td><td>Nature Neuroscience</td><td></td><td></td><td></td><td></td><td>31%</td><td>608</td><td>190</td><td>418</td><td>92</td><td>463</td><td>0</td></tr><tr><td>2eb84aaba316b095d4bb51da1a3e4365bbf9ab1d</td><td>kin_face</td><td>UB KinFace</td><td><a href="papers/2eb84aaba316b095d4bb51da1a3e4365bbf9ab1d.html">Genealogical face recognition based on UB KinFace database</a></td><td><a href="https://doi.org/10.1109/CVPRW.2011.5981801">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td>edu</td><td>SUNY Buffalo</td><td>42.93362780</td><td>-78.88394479</td><td>13%</td><td>30</td><td>4</td><td>26</td><td>1</td><td>9</td><td>5</td></tr><tr><td>b6b1b0632eb9d4ab1427278f5e5c46f97753c73d</td><td>fei</td><td>FEI</td><td><a href="papers/b6b1b0632eb9d4ab1427278f5e5c46f97753c73d.html">Generalização cartográfica automatizada para um banco de dados cadastral</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/b6b1/b0632eb9d4ab1427278f5e5c46f97753c73d.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>2eed184680edcdec8a3b605ad1a3ba8e8f7cc2e9</td><td>graz</td><td>Graz Pedestrian</td><td><a href="papers/2eed184680edcdec8a3b605ad1a3ba8e8f7cc2e9.html">Generic object recognition with boosting</a></td><td><a href="http://www.emt.tu-graz.ac.at/~pinz/onlinepapers/Reprint_Vol_28_No_3_2006.pdf">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>24%</td><td>286</td><td>69</td><td>217</td><td>16</td><td>189</td><td>0</td></tr><tr><td>17b46e2dad927836c689d6787ddb3387c6159ece</td><td>geofaces</td><td>GeoFaces</td><td><a href="papers/17b46e2dad927836c689d6787ddb3387c6159ece.html">GeoFaceExplorer: exploring the geo-dependence of facial attributes</a></td><td><a href="http://doi.acm.org/10.1145/2676440.2676443">[pdf]</a></td><td></td><td>edu</td><td>University of Kentucky</td><td>38.03337420</td><td>-84.50177580</td><td>50%</td><td>2</td><td>1</td><td>1</td><td>0</td><td>1</td><td>0</td></tr><tr><td>3cd40bfa1ff193a96bde0207e5140a399476466c</td><td>tvhi</td><td>TVHI</td><td><a href="papers/3cd40bfa1ff193a96bde0207e5140a399476466c.html">High Five: Recognising human interactions in TV shows</a></td><td><a href="http://pdfs.semanticscholar.org/3cd4/0bfa1ff193a96bde0207e5140a399476466c.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>34%</td><td>91</td><td>31</td><td>60</td><td>11</td><td>64</td><td>1</td></tr><tr><td>24830e3979d4ed01b9fd0feebf4a8fd22e0c35fd</td><td>hi4d_adsip</td><td>Hi4D-ADSIP</td><td><a href="papers/24830e3979d4ed01b9fd0feebf4a8fd22e0c35fd.html">High-resolution comprehensive 3-D dynamic database for facial articulation analysis</a></td><td><a href="http://www.researchgate.net/profile/Wei_Quan3/publication/221430048_High-resolution_comprehensive_3-D_dynamic_database_for_facial_articulation_analysis/links/0deec534309495805d000000.pdf">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td>0%</td><td>5</td><td>0</td><td>5</td><td>0</td><td>1</td><td>0</td></tr><tr><td>04c2cda00e5536f4b1508cbd80041e9552880e67</td><td>hipsterwars</td><td>Hipsterwars</td><td><a href="papers/04c2cda00e5536f4b1508cbd80041e9552880e67.html">Hipster Wars: Discovering Elements of Fashion Styles</a></td><td><a href="http://pdfs.semanticscholar.org/04c2/cda00e5536f4b1508cbd80041e9552880e67.pdf">[pdf]</a></td><td></td><td>edu</td><td>Tohoku University</td><td>38.25309450</td><td>140.87365930</td><td>53%</td><td>91</td><td>48</td><td>43</td><td>5</td><td>60</td><td>15</td></tr><tr><td>10d6b12fa07c7c8d6c8c3f42c7f1c061c131d4c5</td><td>inria_person</td><td>INRIA Pedestrian</td><td><a href="papers/10d6b12fa07c7c8d6c8c3f42c7f1c061c131d4c5.html">Histograms of oriented gradients for human detection</a></td><td><a href="http://nichol.as/papers/Dalai/Histograms%20of%20oriented%20gradients%20for%20human%20detection.pdf">[pdf]</a></td><td>2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)</td><td></td><td></td><td></td><td></td><td>22%</td><td>999</td><td>217</td><td>782</td><td>67</td><td>520</td><td>22</td></tr><tr><td>041d3eedf5e45ce5c5229f0181c5c576ed1fafd6</td><td>ucf_selfie</td><td>UCF Selfie</td><td><a href="papers/041d3eedf5e45ce5c5229f0181c5c576ed1fafd6.html">How to Take a Good Selfie?</a></td><td><a href="http://doi.acm.org/10.1145/2733373.2806365">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>22%</td><td>9</td><td>2</td><td>7</td><td>0</td><td>5</td><td>0</td></tr><tr><td>44d23df380af207f5ac5b41459c722c87283e1eb</td><td>wider_attribute</td><td>WIDER Attribute</td><td><a href="papers/44d23df380af207f5ac5b41459c722c87283e1eb.html">Human Attribute Recognition by Deep Hierarchical Contexts</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/8e28/07f2dd53b03a759e372e07f7191cae65c9fd.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>67%</td><td>18</td><td>12</td><td>6</td><td>0</td><td>16</td><td>0</td></tr><tr><td>44484d2866f222bbb9b6b0870890f9eea1ffb2d0</td><td>cuhk01</td><td>CUHK01</td><td><a href="papers/44484d2866f222bbb9b6b0870890f9eea1ffb2d0.html">Human Reidentification with Transferred Metric Learning</a></td><td><a href="http://pdfs.semanticscholar.org/4448/4d2866f222bbb9b6b0870890f9eea1ffb2d0.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>26%</td><td>258</td><td>67</td><td>191</td><td>12</td><td>141</td><td>1</td></tr><tr><td>57178b36c21fd7f4529ac6748614bb3374714e91</td><td>ijb_c</td><td>IJB-C</td><td><a href="papers/57178b36c21fd7f4529ac6748614bb3374714e91.html">IARPA Janus Benchmark - C: Face Dataset and Protocol</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411217', 'linkType': 'ieee'}">[pdf]</a></td><td>2018 International Conference on Biometrics (ICB)</td><td></td><td></td><td></td><td></td><td>33%</td><td>9</td><td>3</td><td>6</td><td>2</td><td>9</td><td>0</td></tr><tr><td>0cb2dd5f178e3a297a0c33068961018659d0f443</td><td>ijb_b</td><td>IJB-B</td><td><a href="papers/0cb2dd5f178e3a297a0c33068961018659d0f443.html">IARPA Janus Benchmark-B Face Dataset</a></td><td><a href="http://www.vislab.ucr.edu/Biometrics2017/program_slides/Noblis_CVPRW_IJBB.pdf">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td></td><td></td><td></td><td></td><td>24%</td><td>25</td><td>6</td><td>19</td><td>6</td><td>21</td><td>3</td></tr><tr><td>0297448f3ed948e136bb06ceff10eccb34e5bb77</td><td>ilids_mcts</td><td></td><td><a href="papers/0297448f3ed948e136bb06ceff10eccb34e5bb77.html">Imagery Library for Intelligent Detection Systems (i-LIDS); A Standard for Testing Video Based Detection Systems</a></td><td><span class="gray">[pdf]</a></td><td>Proceedings 40th Annual 2006 International Carnahan Conference on Security Technology</td><td></td><td></td><td></td><td></td><td>22%</td><td>32</td><td>7</td><td>25</td><td>2</td><td>17</td><td>0</td></tr><tr><td>55c40cbcf49a0225e72d911d762c27bb1c2d14aa</td><td>ifad</td><td>IFAD</td><td><a href="papers/55c40cbcf49a0225e72d911d762c27bb1c2d14aa.html">Indian Face Age Database : A Database for Face Recognition with Age Variation</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/55c4/0cbcf49a0225e72d911d762c27bb1c2d14aa.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>50%</td><td>2</td><td>1</td><td>1</td><td>0</td><td>2</td><td>0</td></tr><tr><td>ca3e88d87e1344d076c964ea89d91a75c417f5ee</td><td>imfdb</td><td>IMFDB</td><td><a href="papers/ca3e88d87e1344d076c964ea89d91a75c417f5ee.html">Indian Movie Face Database: A benchmark for face recognition under wide variations</a></td><td><span class="gray">[pdf]</a></td><td>2013 Fourth National Conference on Computer Vision, Pattern Recognition, Image Processing and Graphics (NCVPRIPG)</td><td></td><td></td><td></td><td></td><td>60%</td><td>15</td><td>9</td><td>6</td><td>0</td><td>10</td><td>4</td></tr><tr><td>95f12d27c3b4914e0668a268360948bce92f7db3</td><td>helen</td><td>Helen</td><td><a href="papers/95f12d27c3b4914e0668a268360948bce92f7db3.html">Interactive Facial Feature Localization</a></td><td><a href="http://pdfs.semanticscholar.org/95f1/2d27c3b4914e0668a268360948bce92f7db3.pdf">[pdf]</a></td><td></td><td>edu</td><td>University of Illinois, Urbana-Champaign</td><td>40.11116745</td><td>-88.22587665</td><td>52%</td><td>339</td><td>177</td><td>162</td><td>27</td><td>208</td><td>100</td></tr><tr><td>4d423acc78273b75134e2afd1777ba6d3a398973</td><td>cmu_pie</td><td>CMU PIE</td><td><a href="papers/4d423acc78273b75134e2afd1777ba6d3a398973.html">International Conference on Automatic Face and Gesture Recognition The CMU Pose , Illumination , and Expression ( PIE ) Database</a></td><td><a href="http://pdfs.semanticscholar.org/4d42/3acc78273b75134e2afd1777ba6d3a398973.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>44%</td><td>742</td><td>330</td><td>412</td><td>61</td><td>410</td><td>232</td></tr><tr><td>ad01687649d95cd5b56d7399a9603c4b8e2217d7</td><td>mrp_drone</td><td>MRP Drone</td><td><a href="papers/ad01687649d95cd5b56d7399a9603c4b8e2217d7.html">Investigating Open-World Person Re-identi cation Using a Drone</a></td><td><a href="http://pdfs.semanticscholar.org/ad01/687649d95cd5b56d7399a9603c4b8e2217d7.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>40%</td><td>5</td><td>2</td><td>3</td><td>0</td><td>3</td><td>0</td></tr><tr><td>2f43b614607163abf41dfe5d17ef6749a1b61304</td><td>hrt_transgender</td><td>HRT Transgender</td><td><a href="papers/2f43b614607163abf41dfe5d17ef6749a1b61304.html">Investigating the Periocular-Based Face Recognition Across Gender Transformation</a></td><td><a href="https://doi.org/10.1109/TIFS.2014.2361479">[pdf]</a></td><td>IEEE Transactions on Information Forensics and Security</td><td>edu</td><td>University of North Carolina at Wilmington</td><td>34.22498270</td><td>-77.86907744</td><td>38%</td><td>13</td><td>5</td><td>3</td><td>0</td><td>3</td><td>3</td></tr><tr><td>066d71fcd997033dce4ca58df924397dfe0b5fd1</td><td>ifdb</td><td>IFDB</td><td><a href="papers/066d71fcd997033dce4ca58df924397dfe0b5fd1.html">Iranian Face Database and Evaluation with a New Detection Algorithm</a></td><td><a href="http://pdfs.semanticscholar.org/066d/71fcd997033dce4ca58df924397dfe0b5fd1.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>b71d1aa90dcbe3638888725314c0d56640c1fef1</td><td>ifdb</td><td>IFDB</td><td><a href="papers/b71d1aa90dcbe3638888725314c0d56640c1fef1.html">Iranian Face Database with age, pose and expression</a></td><td><span class="gray">[pdf]</a></td><td>2007 International Conference on Machine Vision</td><td></td><td></td><td></td><td></td><td>20%</td><td>20</td><td>4</td><td>16</td><td>2</td><td>11</td><td>3</td></tr><tr><td>137aa2f891d474fce1e7a1d1e9b3aefe21e22b34</td><td>hrt_transgender</td><td>HRT Transgender</td><td><a href="papers/137aa2f891d474fce1e7a1d1e9b3aefe21e22b34.html">Is the eye region more reliable than the face? A preliminary study of face-based recognition on a transgender dataset</a></td><td><a href="https://doi.org/10.1109/BTAS.2013.6712710">[pdf]</a></td><td>2013 IEEE Sixth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td>edu</td><td>University of North Carolina Wilmington</td><td>34.23755810</td><td>-77.92701290</td><td>43%</td><td>7</td><td>3</td><td>4</td><td>1</td><td>2</td><td>3</td></tr><tr><td>0b440695c822a8e35184fb2f60dcdaa8a6de84ae</td><td>kinectface</td><td>KinectFaceDB</td><td><a href="papers/0b440695c822a8e35184fb2f60dcdaa8a6de84ae.html">KinectFaceDB: A Kinect Database for Face Recognition</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6866883', 'linkType': 'ieee'}">[pdf]</a></td><td>IEEE Transactions on Systems, Man, and Cybernetics: Systems</td><td></td><td></td><td></td><td></td><td>16%</td><td>75</td><td>12</td><td>63</td><td>6</td><td>25</td><td>8</td></tr><tr><td>4793f11fbca4a7dba898b9fff68f70d868e2497c</td><td>kin_face</td><td>UB KinFace</td><td><a href="papers/4793f11fbca4a7dba898b9fff68f70d868e2497c.html">Kinship Verification through Transfer Learning</a></td><td><a href="http://pdfs.semanticscholar.org/4793/f11fbca4a7dba898b9fff68f70d868e2497c.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>14%</td><td>66</td><td>9</td><td>39</td><td>2</td><td>18</td><td>5</td></tr><tr><td>2d3482dcff69c7417c7b933f22de606a0e8e42d4</td><td>lfw</td><td>LFW</td><td><a href="papers/2d3482dcff69c7417c7b933f22de606a0e8e42d4.html">Labeled Faces in the Wild : Updates and New Reporting Procedures</a></td><td><a href="http://pdfs.semanticscholar.org/2d34/82dcff69c7417c7b933f22de606a0e8e42d4.pdf">[pdf]</a></td><td></td><td>edu</td><td>University of Massachusetts</td><td>42.38897850</td><td>-72.52869870</td><td>41%</td><td>116</td><td>47</td><td>46</td><td>4</td><td>62</td><td>19</td></tr><tr><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td><td>3dddb_unconstrained</td><td>3D Dynamic</td><td><a href="papers/370b5757a5379b15e30d619e4d3fb9e8e13f3256.html">Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments</a></td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>999</td><td>472</td><td>526</td><td>71</td><td>619</td><td>260</td></tr><tr><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td><td>ar_facedb</td><td>AR Face</td><td><a href="papers/370b5757a5379b15e30d619e4d3fb9e8e13f3256.html">Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments</a></td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>999</td><td>472</td><td>526</td><td>71</td><td>619</td><td>260</td></tr><tr><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td><td>lfw</td><td>LFW</td><td><a href="papers/370b5757a5379b15e30d619e4d3fb9e8e13f3256.html">Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments</a></td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>999</td><td>472</td><td>526</td><td>71</td><td>619</td><td>260</td></tr><tr><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td><td>m2vtsdb_extended</td><td>xm2vtsdb</td><td><a href="papers/370b5757a5379b15e30d619e4d3fb9e8e13f3256.html">Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments</a></td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>999</td><td>472</td><td>526</td><td>71</td><td>619</td><td>260</td></tr><tr><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td><td>put_face</td><td>Put Face</td><td><a href="papers/370b5757a5379b15e30d619e4d3fb9e8e13f3256.html">Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments</a></td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>999</td><td>472</td><td>526</td><td>71</td><td>619</td><td>260</td></tr><tr><td>7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22</td><td>lfw</td><td>LFW</td><td><a href="papers/7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22.html">Labeled Faces in the Wild: A Survey</a></td><td><a href="http://pdfs.semanticscholar.org/7de6/e81d775e9cd7becbfd1bd685f4e2a5eebb22.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>29%</td><td>99</td><td>29</td><td>70</td><td>9</td><td>63</td><td>12</td></tr><tr><td>0d2dd4fc016cb6a517d8fb43a7cc3ff62964832e</td><td>lag</td><td>LAG</td><td><a href="papers/0d2dd4fc016cb6a517d8fb43a7cc3ff62964832e.html">Large Age-Gap face verification by feature injection in deep networks</a></td><td><a href="http://pdfs.semanticscholar.org/0d2d/d4fc016cb6a517d8fb43a7cc3ff62964832e.pdf">[pdf]</a></td><td>Pattern Recognition Letters</td><td></td><td></td><td></td><td></td><td>29%</td><td>7</td><td>2</td><td>5</td><td>0</td><td>3</td><td>2</td></tr><tr><td>f3b84a03985de3890b400b68e2a92c0a00afd9d0</td><td>scface</td><td>SCface</td><td><a href="papers/f3b84a03985de3890b400b68e2a92c0a00afd9d0.html">Large Variability Surveillance Camera Face Database</a></td><td><span class="gray">[pdf]</a></td><td>2015 Seventh International Conference on Computational Intelligence, Modelling and Simulation (CIMSim)</td><td></td><td></td><td></td><td></td><td>0%</td><td>1</td><td>0</td><td>1</td><td>0</td><td>0</td><td>0</td></tr><tr><td>07fcbae86f7a3ad3ea1cf95178459ee9eaf77cb1</td><td>uccs</td><td>UCCS</td><td><a href="papers/07fcbae86f7a3ad3ea1cf95178459ee9eaf77cb1.html">Large scale unconstrained open set face database</a></td><td><a href="http://www.vast.uccs.edu/~tboult/PAPERS/BTAS13-Sapkota-Boult-UCCSFaceDB.pdf">[pdf]</a></td><td>2013 IEEE Sixth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td>edu</td><td>University of Colorado at Colorado Springs</td><td>38.89646790</td><td>-104.80505940</td><td>60%</td><td>5</td><td>3</td><td>2</td><td>0</td><td>3</td><td>0</td></tr><tr><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td><td>mafl</td><td>MAFL</td><td><a href="papers/a0fd85b3400c7b3e11122f44dc5870ae2de9009a.html">Learning Deep Representation for Face Alignment with Auxiliary Attributes</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1408.3967.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>33%</td><td>105</td><td>35</td><td>50</td><td>8</td><td>55</td><td>16</td></tr><tr><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td><td>mtfl</td><td>MTFL</td><td><a href="papers/a0fd85b3400c7b3e11122f44dc5870ae2de9009a.html">Learning Deep Representation for Face Alignment with Auxiliary Attributes</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1408.3967.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>33%</td><td>105</td><td>35</td><td>50</td><td>8</td><td>55</td><td>16</td></tr><tr><td>69a68f9cf874c69e2232f47808016c2736b90c35</td><td>celeba_plus</td><td>CelebFaces+</td><td><a href="papers/69a68f9cf874c69e2232f47808016c2736b90c35.html">Learning Deep Representation for Imbalanced Classification</a></td><td><a href="http://personal.ie.cuhk.edu.hk/~ccloy/files/cvpr_2016_imbalanced.pdf">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>Shenzhen Institutes of Advanced Technology</td><td>22.59805605</td><td>113.98533784</td><td>33%</td><td>51</td><td>17</td><td>34</td><td>1</td><td>39</td><td>2</td></tr><tr><td>853bd61bc48a431b9b1c7cab10c603830c488e39</td><td>casia_webface</td><td>CASIA Webface</td><td><a href="papers/853bd61bc48a431b9b1c7cab10c603830c488e39.html">Learning Face Representation from Scratch</a></td><td><a href="http://pdfs.semanticscholar.org/b8a2/0ed7e74325da76d7183d1ab77b082a92b447.pdf">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>53%</td><td>436</td><td>233</td><td>203</td><td>32</td><td>284</td><td>115</td></tr><tr><td>2a171f8d14b6b8735001a11c217af9587d095848</td><td>expw</td><td>ExpW</td><td><a href="papers/2a171f8d14b6b8735001a11c217af9587d095848.html">Learning Social Relation Traits from Face Images</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.414">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>30%</td><td>20</td><td>6</td><td>14</td><td>5</td><td>15</td><td>0</td></tr><tr><td>2a171f8d14b6b8735001a11c217af9587d095848</td><td>social_relation</td><td>Social Relation</td><td><a href="papers/2a171f8d14b6b8735001a11c217af9587d095848.html">Learning Social Relation Traits from Face Images</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.414">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>30%</td><td>20</td><td>6</td><td>14</td><td>5</td><td>15</td><td>0</td></tr><tr><td>4e4746094bf60ee83e40d8597a6191e463b57f76</td><td>leeds_sports_pose_extended</td><td>Leeds Sports Pose Extended</td><td><a href="papers/4e4746094bf60ee83e40d8597a6191e463b57f76.html">Learning effective human pose estimation from inaccurate annotation</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995318', 'linkType': 'ieee'}">[pdf]</a></td><td>CVPR 2011</td><td></td><td></td><td></td><td></td><td>40%</td><td>173</td><td>70</td><td>103</td><td>9</td><td>116</td><td>2</td></tr><tr><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td><td>stickmen_buffy</td><td>Buffy Stickmen</td><td><a href="papers/6dd0597f8513dc100cd0bc1b493768cde45098a9.html">Learning to parse images of articulated bodies</a></td><td><a href="http://pdfs.semanticscholar.org/9cd7/4c43dbf9be0b9caae4606ee53e6d45850471.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>31%</td><td>373</td><td>117</td><td>256</td><td>30</td><td>238</td><td>2</td></tr><tr><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td><a href="papers/6dd0597f8513dc100cd0bc1b493768cde45098a9.html">Learning to parse images of articulated bodies</a></td><td><a href="http://pdfs.semanticscholar.org/9cd7/4c43dbf9be0b9caae4606ee53e6d45850471.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>31%</td><td>373</td><td>117</td><td>256</td><td>30</td><td>238</td><td>2</td></tr><tr><td>c9bda86e23cab9e4f15ea0c4cbb6cc02b9dfb709</td><td>stanford_drone</td><td>Stanford Drone</td><td><a href="papers/c9bda86e23cab9e4f15ea0c4cbb6cc02b9dfb709.html">Learning to predict human behaviour in crowded scenes</a></td><td><a href="http://pdfs.semanticscholar.org/c9bd/a86e23cab9e4f15ea0c4cbb6cc02b9dfb709.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>20%</td><td>5</td><td>1</td><td>4</td><td>1</td><td>5</td><td>0</td></tr><tr><td>28d4e027c7e90b51b7d8908fce68128d1964668a</td><td>megaface</td><td>MegaFace</td><td><a href="papers/28d4e027c7e90b51b7d8908fce68128d1964668a.html">Level Playing Field for Million Scale Face Recognition</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1705.00393.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>41%</td><td>27</td><td>11</td><td>16</td><td>2</td><td>22</td><td>4</td></tr><tr><td>46a01565e6afe7c074affb752e7069ee3bf2e4ef</td><td>sdu_vid</td><td>SDU-VID</td><td><a href="papers/46a01565e6afe7c074affb752e7069ee3bf2e4ef.html">Local Descriptors Encoded by Fisher Vectors for Person Re-identification</a></td><td><a href="http://pdfs.semanticscholar.org/dd1d/51c3a59cb71cbfe1433ebeb4d973f7f9ddc1.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>22%</td><td>193</td><td>42</td><td>133</td><td>15</td><td>101</td><td>2</td></tr><tr><td>140438a77a771a8fb656b39a78ff488066eb6b50</td><td>lfw_p</td><td>LFWP</td><td><a href="papers/140438a77a771a8fb656b39a78ff488066eb6b50.html">Localizing Parts of Faces Using a Consensus of Exemplars</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2011.5995602">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td>edu</td><td>Columbia University</td><td>40.84198360</td><td>-73.94368971</td><td>53%</td><td>521</td><td>274</td><td>247</td><td>40</td><td>321</td><td>144</td></tr><tr><td>38b55d95189c5e69cf4ab45098a48fba407609b4</td><td>cuhk02</td><td>CUHK02</td><td><a href="papers/38b55d95189c5e69cf4ab45098a48fba407609b4.html">Locally Aligned Feature Transforms across Views</a></td><td><a href="http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_CVPR2013/data/Papers/4989d594.pdf">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>24%</td><td>242</td><td>57</td><td>185</td><td>17</td><td>139</td><td>1</td></tr><tr><td>c0387e788a52f10bf35d4d50659cfa515d89fbec</td><td>mars</td><td>MARS</td><td><a href="papers/c0387e788a52f10bf35d4d50659cfa515d89fbec.html">MARS: A Video Benchmark for Large-Scale Person Re-Identification</a></td><td><a href="http://pdfs.semanticscholar.org/c038/7e788a52f10bf35d4d50659cfa515d89fbec.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>34%</td><td>146</td><td>49</td><td>97</td><td>6</td><td>96</td><td>0</td></tr><tr><td>9055b155cbabdce3b98e16e5ac9c0edf00f9552f</td><td>morph</td><td>MORPH Commercial</td><td><a href="papers/9055b155cbabdce3b98e16e5ac9c0edf00f9552f.html">MORPH: a longitudinal image database of normal adult age-progression</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/FGR.2006.78">[pdf]</a></td><td>7th International Conference on Automatic Face and Gesture Recognition (FGR06)</td><td></td><td></td><td></td><td></td><td>46%</td><td>424</td><td>195</td><td>229</td><td>27</td><td>231</td><td>155</td></tr><tr><td>9055b155cbabdce3b98e16e5ac9c0edf00f9552f</td><td>morph_nc</td><td>MORPH Non-Commercial</td><td><a href="papers/9055b155cbabdce3b98e16e5ac9c0edf00f9552f.html">MORPH: a longitudinal image database of normal adult age-progression</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/FGR.2006.78">[pdf]</a></td><td>7th International Conference on Automatic Face and Gesture Recognition (FGR06)</td><td></td><td></td><td></td><td></td><td>46%</td><td>424</td><td>195</td><td>229</td><td>27</td><td>231</td><td>155</td></tr><tr><td>291265db88023e92bb8c8e6390438e5da148e8f5</td><td>msceleb</td><td>MsCeleb</td><td><a href="papers/291265db88023e92bb8c8e6390438e5da148e8f5.html">MS-Celeb-1M: A Dataset and Benchmark for Large-Scale Face Recognition</a></td><td><a href="http://pdfs.semanticscholar.org/4603/cb8e05258bb0572ae912ad20903b8f99f4b1.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>167</td><td>83</td><td>84</td><td>15</td><td>131</td><td>27</td></tr><tr><td>3dc3f0b64ef80f573e3a5f96e456e52ee980b877</td><td>georgia_tech_face_database</td><td>Georgia Tech Face</td><td><a href="papers/3dc3f0b64ef80f573e3a5f96e456e52ee980b877.html">Maximum Likelihood Training of the Embedded HMM for Face Detection and Recognition</a></td><td><a href="http://pdfs.semanticscholar.org/3dc3/f0b64ef80f573e3a5f96e456e52ee980b877.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>3</td><td>0</td><td>3</td><td>0</td><td>2</td><td>0</td></tr><tr><td>5753b2b5e442eaa3be066daa4a2ca8d8a0bb1725</td><td>50_people_one_question</td><td>50 People One Question</td><td><a href="papers/5753b2b5e442eaa3be066daa4a2ca8d8a0bb1725.html">Merging Pose Estimates Across Space and Time</a></td><td><a href="http://pdfs.semanticscholar.org/63b2/f5348af0f969dfc2afb4977732393c6459ec.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>15</td><td>9</td><td>6</td><td>0</td><td>11</td><td>2</td></tr><tr><td>696ca58d93f6404fea0fc75c62d1d7b378f47628</td><td>coco</td><td>COCO</td><td><a href="papers/696ca58d93f6404fea0fc75c62d1d7b378f47628.html">Microsoft COCO Captions: Data Collection and Evaluation Server</a></td><td><a href="http://pdfs.semanticscholar.org/ba95/81c33a7eebe87c50e61763e4c8d1723538f2.pdf">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>46%</td><td>283</td><td>129</td><td>154</td><td>16</td><td>231</td><td>4</td></tr><tr><td>a5a44a32a91474f00a3cda671a802e87c899fbb4</td><td>moments_in_time</td><td>Moments in Time</td><td><a href="papers/a5a44a32a91474f00a3cda671a802e87c899fbb4.html">Moments in Time Dataset: one million videos for event understanding</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1801.03150.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>60%</td><td>25</td><td>15</td><td>10</td><td>2</td><td>25</td><td>0</td></tr><tr><td>436f798d1a4e54e5947c1e7d7375c31b2bdb4064</td><td>tud_multiview</td><td>TUD-Multiview</td><td><a href="papers/436f798d1a4e54e5947c1e7d7375c31b2bdb4064.html">Monocular 3D pose estimation and tracking by detection</a></td><td><a href="http://lmb.informatik.uni-freiburg.de/lectures/seminar_brox/seminar_ws1011/cvpr10_andriluka.pdf">[pdf]</a></td><td>2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>25%</td><td>302</td><td>76</td><td>226</td><td>32</td><td>199</td><td>1</td></tr><tr><td>436f798d1a4e54e5947c1e7d7375c31b2bdb4064</td><td>tud_stadtmitte</td><td>TUD-Stadtmitte</td><td><a href="papers/436f798d1a4e54e5947c1e7d7375c31b2bdb4064.html">Monocular 3D pose estimation and tracking by detection</a></td><td><a href="http://lmb.informatik.uni-freiburg.de/lectures/seminar_brox/seminar_ws1011/cvpr10_andriluka.pdf">[pdf]</a></td><td>2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>25%</td><td>302</td><td>76</td><td>226</td><td>32</td><td>199</td><td>1</td></tr><tr><td>3b5b6d19d4733ab606c39c69a889f9e67967f151</td><td>qmul_grid</td><td>GRID</td><td><a href="papers/3b5b6d19d4733ab606c39c69a889f9e67967f151.html">Multi-camera activity correlation analysis</a></td><td><a href="http://vision.lbl.gov/Conferences/cvpr/Papers/data/papers/0163.pdf">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>25%</td><td>138</td><td>35</td><td>103</td><td>8</td><td>76</td><td>1</td></tr><tr><td>6ad5a38df8dd4cdddd74f31996ce096d41219f72</td><td>tud_brussels</td><td>TUD-Brussels</td><td><a href="papers/6ad5a38df8dd4cdddd74f31996ce096d41219f72.html">Multi-cue onboard pedestrian detection</a></td><td><a href="https://www.mpi-inf.mpg.de/fileadmin/inf/d2/wojek/poster_cwojek_cvpr09.pdf">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>19%</td><td>217</td><td>41</td><td>176</td><td>14</td><td>131</td><td>1</td></tr><tr><td>6ad5a38df8dd4cdddd74f31996ce096d41219f72</td><td>tud_motionpairs</td><td>TUD-Motionparis</td><td><a href="papers/6ad5a38df8dd4cdddd74f31996ce096d41219f72.html">Multi-cue onboard pedestrian detection</a></td><td><a href="https://www.mpi-inf.mpg.de/fileadmin/inf/d2/wojek/poster_cwojek_cvpr09.pdf">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>19%</td><td>217</td><td>41</td><td>176</td><td>14</td><td>131</td><td>1</td></tr><tr><td>32c801cb7fbeb742edfd94cccfca4934baec71da</td><td>ucf_crowd</td><td>UCF-CC-50</td><td><a href="papers/32c801cb7fbeb742edfd94cccfca4934baec71da.html">Multi-source Multi-scale Counting in Extremely Dense Crowd Images</a></td><td><a href="http://www.cs.ucf.edu/~haroon/datafiles/Idrees_Counting_CVPR_2013.pdf">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>38%</td><td>125</td><td>48</td><td>77</td><td>6</td><td>72</td><td>1</td></tr><tr><td>1e3df3ca8feab0b36fd293fe689f93bb2aaac591</td><td>immediacy</td><td>Immediacy</td><td><a href="papers/1e3df3ca8feab0b36fd293fe689f93bb2aaac591.html">Multi-task Recurrent Neural Network for Immediacy Prediction</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.383">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td>60%</td><td>25</td><td>15</td><td>10</td><td>2</td><td>20</td><td>0</td></tr><tr><td>53ae38a6bb2b21b42bac4f0c4c8ed1f9fa02f9d4</td><td>bp4d_plus</td><td>BP4D+</td><td><a href="papers/53ae38a6bb2b21b42bac4f0c4c8ed1f9fa02f9d4.html">Multimodal Spontaneous Emotion Corpus for Human Behavior Analysis</a></td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Zhang_Multimodal_Spontaneous_Emotion_CVPR_2016_paper.pdf">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>25%</td><td>40</td><td>10</td><td>30</td><td>0</td><td>20</td><td>6</td></tr><tr><td>2fda164863a06a92d3a910b96eef927269aeb730</td><td>names_and_faces_news</td><td>News Dataset</td><td><a href="papers/2fda164863a06a92d3a910b96eef927269aeb730.html">Names and faces in the news</a></td><td><a href="http://www.cs.utexas.edu/~grauman/courses/spring2007/395T/papers/berg_names_and_faces.pdf">[pdf]</a></td><td>Proceedings of the 2004 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2004. CVPR 2004.</td><td></td><td></td><td></td><td></td><td>41%</td><td>294</td><td>120</td><td>174</td><td>24</td><td>207</td><td>45</td></tr><tr><td>4156b7e88f2e0ab0a7c095b9bab199ae2b23bd06</td><td>distance_nighttime</td><td>Long Distance Heterogeneous Face</td><td><a href="papers/4156b7e88f2e0ab0a7c095b9bab199ae2b23bd06.html">Nighttime Face Recognition at Long Distance: Cross-Distance and Cross-Spectral Matching</a></td><td><a href="http://pdfs.semanticscholar.org/4156/b7e88f2e0ab0a7c095b9bab199ae2b23bd06.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>24%</td><td>21</td><td>5</td><td>16</td><td>3</td><td>11</td><td>1</td></tr><tr><td>31b58ced31f22eab10bd3ee2d9174e7c14c27c01</td><td>tiny_images</td><td>Tiny Images</td><td><a href="papers/31b58ced31f22eab10bd3ee2d9174e7c14c27c01.html">Nonparametric Object and Scene Recognition</a></td><td><a href="http://pdfs.semanticscholar.org/31b5/8ced31f22eab10bd3ee2d9174e7c14c27c01.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>31%</td><td>999</td><td>305</td><td>694</td><td>93</td><td>670</td><td>9</td></tr><tr><td>55206f0b5f57ce17358999145506cd01e570358c</td><td>orl</td><td>ORL</td><td><a href="papers/55206f0b5f57ce17358999145506cd01e570358c.html">O M 4 . 1 The Subject Database 4 . 2 Experiment Plan 5 . 1 Varying the Overlap 4 Experimental Setup 5 Parameterisation Results</a></td><td><a href="http://pdfs.semanticscholar.org/5520/6f0b5f57ce17358999145506cd01e570358c.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>21%</td><td>999</td><td>214</td><td>785</td><td>96</td><td>550</td><td>57</td></tr><tr><td>3394168ff0719b03ff65bcea35336a76b21fe5e4</td><td>penn_fudan</td><td>Penn Fudan</td><td><a href="papers/3394168ff0719b03ff65bcea35336a76b21fe5e4.html">Object Detection Combining Recognition and Segmentation</a></td><td><a href="http://pdfs.semanticscholar.org/f531/a554cade14b9b340de6730683a28c292dd74.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>23%</td><td>101</td><td>23</td><td>78</td><td>11</td><td>58</td><td>0</td></tr><tr><td>4f93cd09785c6e77bf4bc5a788e079df524c8d21</td><td>soton</td><td>SOTON HiD</td><td><a href="papers/4f93cd09785c6e77bf4bc5a788e079df524c8d21.html">On a large sequence-based human gait database</a></td><td><a href="http://pdfs.semanticscholar.org/4f93/cd09785c6e77bf4bc5a788e079df524c8d21.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>36%</td><td>148</td><td>54</td><td>94</td><td>16</td><td>98</td><td>0</td></tr><tr><td>6618cff7f2ed440a0d2fa9e74ad5469df5cdbe4c</td><td>afad</td><td>AFAD</td><td><a href="papers/6618cff7f2ed440a0d2fa9e74ad5469df5cdbe4c.html">Ordinal Regression with Multiple Output CNN for Age Estimation</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.532">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>44%</td><td>68</td><td>30</td><td>38</td><td>8</td><td>49</td><td>7</td></tr><tr><td>a7fe834a0af614ce6b50dc093132b031dd9a856b</td><td>market1203</td><td>Market 1203</td><td><a href="papers/a7fe834a0af614ce6b50dc093132b031dd9a856b.html">Orientation Driven Bag of Appearances for Person Re-identification</a></td><td><a href="http://pdfs.semanticscholar.org/a7fe/834a0af614ce6b50dc093132b031dd9a856b.pdf">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>29%</td><td>7</td><td>2</td><td>5</td><td>0</td><td>3</td><td>0</td></tr><tr><td>a7fe834a0af614ce6b50dc093132b031dd9a856b</td><td>pku_reid</td><td>PKU-Reid</td><td><a href="papers/a7fe834a0af614ce6b50dc093132b031dd9a856b.html">Orientation Driven Bag of Appearances for Person Re-identification</a></td><td><a href="http://pdfs.semanticscholar.org/a7fe/834a0af614ce6b50dc093132b031dd9a856b.pdf">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>29%</td><td>7</td><td>2</td><td>5</td><td>0</td><td>3</td><td>0</td></tr><tr><td>18ae7c9a4bbc832b8b14bc4122070d7939f5e00e</td><td>frgc</td><td>FRGC</td><td><a href="papers/18ae7c9a4bbc832b8b14bc4122070d7939f5e00e.html">Overview of the face recognition grand challenge</a></td><td><a href="http://www3.nd.edu/~kwb/PhillipsEtAlCVPR_2005.pdf">[pdf]</a></td><td>2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)</td><td></td><td></td><td></td><td></td><td>25%</td><td>999</td><td>253</td><td>746</td><td>110</td><td>572</td><td>64</td></tr><tr><td>22909dd19a0ec3b6065334cb5be5392cb24d839d</td><td>pets</td><td>PETS 2017</td><td><a href="papers/22909dd19a0ec3b6065334cb5be5392cb24d839d.html">PETS 2017: Dataset and Challenge</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014998', 'linkType': 'ieee'}">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td></td><td></td><td></td><td></td><td>0%</td><td>8</td><td>0</td><td>8</td><td>0</td><td>2</td><td>0</td></tr><tr><td>56ffa7d906b08d02d6d5a12c7377a57e24ef3391</td><td>unbc_shoulder_pain</td><td>UNBC-McMaster Pain</td><td><a href="papers/56ffa7d906b08d02d6d5a12c7377a57e24ef3391.html">Painful data: The UNBC-McMaster shoulder pain expression archive database</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5771462', 'linkType': 'ieee'}">[pdf]</a></td><td>Face and Gesture 2011</td><td></td><td></td><td></td><td></td><td>32%</td><td>184</td><td>58</td><td>126</td><td>23</td><td>112</td><td>23</td></tr><tr><td>0486214fb58ee9a04edfe7d6a74c6d0f661a7668</td><td>chokepoint</td><td>ChokePoint</td><td><a href="papers/0486214fb58ee9a04edfe7d6a74c6d0f661a7668.html">Patch-based probabilistic image quality assessment for face selection and improved video-based face recognition</a></td><td><a href="http://conradsanderson.id.au/pdfs/wong_face_selection_cvpr_biometrics_2011.pdf">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td>edu</td><td>University of Queensland</td><td>-27.49741805</td><td>153.01316956</td><td>30%</td><td>128</td><td>39</td><td>89</td><td>6</td><td>68</td><td>14</td></tr><tr><td>488e475eeb3bb39a145f23ede197cd3620f1d98a</td><td>apis</td><td>APiS1.0</td><td><a href="papers/488e475eeb3bb39a145f23ede197cd3620f1d98a.html">Pedestrian Attribute Classification in Surveillance: Database and Evaluation</a></td><td><a href="http://www.cv-foundation.org/openaccess/content_iccv_workshops_2013/W10/papers/Zhu_Pedestrian_Attribute_Classification_2013_ICCV_paper.pdf">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision Workshops</td><td></td><td></td><td></td><td></td><td>38%</td><td>26</td><td>10</td><td>16</td><td>1</td><td>13</td><td>2</td></tr><tr><td>488e475eeb3bb39a145f23ede197cd3620f1d98a</td><td>svs</td><td>SVS</td><td><a href="papers/488e475eeb3bb39a145f23ede197cd3620f1d98a.html">Pedestrian Attribute Classification in Surveillance: Database and Evaluation</a></td><td><a href="http://www.cv-foundation.org/openaccess/content_iccv_workshops_2013/W10/papers/Zhu_Pedestrian_Attribute_Classification_2013_ICCV_paper.pdf">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision Workshops</td><td></td><td></td><td></td><td></td><td>38%</td><td>26</td><td>10</td><td>16</td><td>1</td><td>13</td><td>2</td></tr><tr><td>2a4bbee0b4cf52d5aadbbc662164f7efba89566c</td><td>peta</td><td>PETA</td><td><a href="papers/2a4bbee0b4cf52d5aadbbc662164f7efba89566c.html">Pedestrian Attribute Recognition At Far Distance</a></td><td><a href="http://personal.ie.cuhk.edu.hk/~pluo/pdf/mm14.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>46%</td><td>80</td><td>37</td><td>43</td><td>2</td><td>51</td><td>3</td></tr><tr><td>f72f6a45ee240cc99296a287ff725aaa7e7ebb35</td><td>caltech_pedestrians</td><td>Caltech Pedestrians</td><td><a href="papers/f72f6a45ee240cc99296a287ff725aaa7e7ebb35.html">Pedestrian Detection: An Evaluation of the State of the Art</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5975165', 'linkType': 'ieee'}">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>10%</td><td>999</td><td>96</td><td>355</td><td>26</td><td>252</td><td>5</td></tr><tr><td>1dc35905a1deff8bc74688f2d7e2f48fd2273275</td><td>caltech_pedestrians</td><td>Caltech Pedestrians</td><td><a href="papers/1dc35905a1deff8bc74688f2d7e2f48fd2273275.html">Pedestrian detection: A benchmark</a></td><td><a href="http://vision.ucsd.edu/~pdollar/files/papers/DollarCVPR09peds.pdf">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>17%</td><td>519</td><td>89</td><td>430</td><td>27</td><td>286</td><td>2</td></tr><tr><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td><td>tud_campus</td><td>TUD-Campus</td><td><a href="papers/3316521a5527c7700af8ae6aef32a79a8b83672c.html">People-tracking-by-detection and people-detection-by-tracking</a></td><td><a href="http://mplab.ucsd.edu/wp-content/uploads/CVPR2008/Conference/data/papers/243.pdf">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>22%</td><td>529</td><td>116</td><td>413</td><td>41</td><td>316</td><td>1</td></tr><tr><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td><td>tud_crossing</td><td>TUD-Crossing</td><td><a href="papers/3316521a5527c7700af8ae6aef32a79a8b83672c.html">People-tracking-by-detection and people-detection-by-tracking</a></td><td><a href="http://mplab.ucsd.edu/wp-content/uploads/CVPR2008/Conference/data/papers/243.pdf">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>22%</td><td>529</td><td>116</td><td>413</td><td>41</td><td>316</td><td>1</td></tr><tr><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td><td>tud_pedestrian</td><td>TUD-Pedestrian</td><td><a href="papers/3316521a5527c7700af8ae6aef32a79a8b83672c.html">People-tracking-by-detection and people-detection-by-tracking</a></td><td><a href="http://mplab.ucsd.edu/wp-content/uploads/CVPR2008/Conference/data/papers/243.pdf">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>22%</td><td>529</td><td>116</td><td>413</td><td>41</td><td>316</td><td>1</td></tr><tr><td>27a2fad58dd8727e280f97036e0d2bc55ef5424c</td><td>duke_mtmc</td><td>Duke MTMC</td><td><a href="papers/27a2fad58dd8727e280f97036e0d2bc55ef5424c.html">Performance Measures and a Data Set for Multi-target, Multi-camera Tracking</a></td><td><a href="http://pdfs.semanticscholar.org/b5f2/4f49f9a5e47d6601399dc068158ad88d7651.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>136</td><td>58</td><td>78</td><td>6</td><td>107</td><td>0</td></tr><tr><td>27a2fad58dd8727e280f97036e0d2bc55ef5424c</td><td>mot</td><td>MOT</td><td><a href="papers/27a2fad58dd8727e280f97036e0d2bc55ef5424c.html">Performance Measures and a Data Set for Multi-target, Multi-camera Tracking</a></td><td><a href="http://pdfs.semanticscholar.org/b5f2/4f49f9a5e47d6601399dc068158ad88d7651.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>136</td><td>58</td><td>78</td><td>6</td><td>107</td><td>0</td></tr><tr><td>16c7c31a7553d99f1837fc6e88e77b5ccbb346b8</td><td>prid</td><td>PRID</td><td><a href="papers/16c7c31a7553d99f1837fc6e88e77b5ccbb346b8.html">Person Re-identification by Descriptive and Discriminative Classification</a></td><td><a href="http://pdfs.semanticscholar.org/4c1b/f0592be3e535faf256c95e27982db9b3d3d3.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>27%</td><td>352</td><td>94</td><td>258</td><td>26</td><td>195</td><td>3</td></tr><tr><td>99eb4cea0d9bc9fe777a5c5172f8638a37a7f262</td><td>ilids_vid_reid</td><td>iLIDS-VID</td><td><a href="papers/99eb4cea0d9bc9fe777a5c5172f8638a37a7f262.html">Person Re-identification by Exploiting Spatio-Temporal Cues and Multi-view Metric Learning</a></td><td><a href="https://doi.org/10.1109/LSP.2016.2574323">[pdf]</a></td><td>IEEE Signal Processing Letters</td><td></td><td></td><td></td><td></td><td>29%</td><td>7</td><td>2</td><td>5</td><td>0</td><td>4</td><td>0</td></tr><tr><td>98bb029afe2a1239c3fdab517323066f0957b81b</td><td>sdu_vid</td><td>SDU-VID</td><td><a href="papers/98bb029afe2a1239c3fdab517323066f0957b81b.html">Person Re-identification by Video Ranking</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>25%</td><td>196</td><td>49</td><td>124</td><td>11</td><td>98</td><td>1</td></tr><tr><td>0b84f07af44f964817675ad961def8a51406dd2e</td><td>prw</td><td>PRW</td><td><a href="papers/0b84f07af44f964817675ad961def8a51406dd2e.html">Person Re-identification in the Wild</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2017.357">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>University of Technology Sydney</td><td>-33.88096510</td><td>151.20107299</td><td>38%</td><td>65</td><td>25</td><td>40</td><td>1</td><td>46</td><td>0</td></tr><tr><td>ec792ad2433b6579f2566c932ee414111e194537</td><td>msmt_17</td><td>MSMT17</td><td><a href="papers/ec792ad2433b6579f2566c932ee414111e194537.html">Person Transfer GAN to Bridge Domain Gap for Person Re-Identification</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1711.08565.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>50%</td><td>14</td><td>7</td><td>7</td><td>1</td><td>11</td><td>0</td></tr><tr><td>1c2802c2199b6d15ecefe7ba0c39bfe44363de38</td><td>youtube_poses</td><td>YouTube Pose</td><td><a href="papers/1c2802c2199b6d15ecefe7ba0c39bfe44363de38.html">Personalizing Human Video Pose Estimation</a></td><td><a href="http://arxiv.org/pdf/1511.06676v1.pdf">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>44%</td><td>32</td><td>14</td><td>18</td><td>2</td><td>27</td><td>0</td></tr><tr><td>b92a1ed9622b8268ae3ac9090e25789fc41cc9b8</td><td>pornodb</td><td>Pornography DB</td><td><a href="papers/b92a1ed9622b8268ae3ac9090e25789fc41cc9b8.html">Pooling in image representation: The visual codeword point of view</a></td><td><a href="http://pdfs.semanticscholar.org/b92a/1ed9622b8268ae3ac9090e25789fc41cc9b8.pdf">[pdf]</a></td><td>Computer Vision and Image Understanding</td><td></td><td></td><td></td><td></td><td>9%</td><td>77</td><td>7</td><td>70</td><td>7</td><td>43</td><td>2</td></tr><tr><td>2830fb5282de23d7784b4b4bc37065d27839a412</td><td>h3d</td><td>H3D</td><td><a href="papers/2830fb5282de23d7784b4b4bc37065d27839a412.html">Poselets: Body part detectors trained using 3D human pose annotations</a></td><td><a href="http://vision.stanford.edu/teaching/cs231b_spring1213/papers/ICCV09_BourdevMalik.pdf">[pdf]</a></td><td>2009 IEEE 12th International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td>32%</td><td>707</td><td>223</td><td>484</td><td>62</td><td>482</td><td>18</td></tr><tr><td>3765df816dc5a061bc261e190acc8bdd9d47bec0</td><td>rafd</td><td>RaFD</td><td><a href="papers/3765df816dc5a061bc261e190acc8bdd9d47bec0.html">Presentation and validation of the Radboud Faces Database</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/3765/df816dc5a061bc261e190acc8bdd9d47bec0.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>28%</td><td>446</td><td>127</td><td>319</td><td>43</td><td>307</td><td>19</td></tr><tr><td>636b8ffc09b1b23ff714ac8350bb35635e49fa3c</td><td>caltech_10k_web_faces</td><td>Caltech 10K Web Faces</td><td><a href="papers/636b8ffc09b1b23ff714ac8350bb35635e49fa3c.html">Pruning training sets for learning of object categories</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1467308', 'linkType': 'ieee'}">[pdf]</a></td><td>2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)</td><td></td><td></td><td></td><td></td><td>58%</td><td>60</td><td>35</td><td>25</td><td>5</td><td>42</td><td>12</td></tr><tr><td>377f2b65e6a9300448bdccf678cde59449ecd337</td><td>ufdd</td><td>UFDD</td><td><a href="papers/377f2b65e6a9300448bdccf678cde59449ecd337.html">Pushing the Limits of Unconstrained Face Detection: a Challenge Dataset and Baseline Results</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1804.10275.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td>edu</td><td>Johns Hopkins University</td><td>39.32905300</td><td>-76.61942500</td><td>0%</td><td>2</td><td>0</td><td>2</td><td>0</td><td>2</td><td>0</td></tr><tr><td>140c95e53c619eac594d70f6369f518adfea12ef</td><td>ijb_a</td><td>IJB-A</td><td><a href="papers/140c95e53c619eac594d70f6369f518adfea12ef.html">Pushing the frontiers of unconstrained face detection and recognition: IARPA Janus Benchmark A</a></td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/1B_089_ext.pdf">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>48%</td><td>222</td><td>107</td><td>115</td><td>21</td><td>158</td><td>48</td></tr><tr><td>d80a3d1f3a438e02a6685e66ee908446766fefa9</td><td>megaage</td><td>MegaAge</td><td><a href="papers/d80a3d1f3a438e02a6685e66ee908446766fefa9.html">Quantifying Facial Age by Posterior of Age Comparisons</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1708.09687.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>25%</td><td>4</td><td>1</td><td>3</td><td>1</td><td>4</td><td>0</td></tr><tr><td>4946ba10a4d5a7d0a38372f23e6622bd347ae273</td><td>coco_action</td><td>COCO-a</td><td><a href="papers/4946ba10a4d5a7d0a38372f23e6622bd347ae273.html">RONCHI AND PERONA: DESCRIBING COMMON HUMAN VISUAL ACTIONS IN IMAGES 1 Describing Common Human Visual Actions in Images</a></td><td><a href="http://pdfs.semanticscholar.org/b38d/cf5fa5174c0d718d65cc4f3889b03c4a21df.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>26</td><td>14</td><td>12</td><td>0</td><td>25</td><td>0</td></tr><tr><td>922e0a51a3b8c67c4c6ac09a577ff674cbd28b34</td><td>v47</td><td>V47</td><td><a href="papers/922e0a51a3b8c67c4c6ac09a577ff674cbd28b34.html">Re-identification of pedestrians with variable occlusion and scale</a></td><td><a href="https://doi.org/10.1109/ICCVW.2011.6130477">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td>0%</td><td>10</td><td>0</td><td>10</td><td>2</td><td>6</td><td>0</td></tr><tr><td>6f3c76b7c0bd8e1d122c6ea808a271fd4749c951</td><td>ward</td><td>WARD</td><td><a href="papers/6f3c76b7c0bd8e1d122c6ea808a271fd4749c951.html">Re-identify people in wide area camera network</a></td><td><a href="https://doi.org/10.1109/CVPRW.2012.6239203">[pdf]</a></td><td>2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops</td><td></td><td></td><td></td><td></td><td>9%</td><td>55</td><td>5</td><td>50</td><td>2</td><td>35</td><td>0</td></tr><tr><td>54983972aafc8e149259d913524581357b0f91c3</td><td>reseed</td><td>ReSEED</td><td><a href="papers/54983972aafc8e149259d913524581357b0f91c3.html">ReSEED: social event dEtection dataset</a></td><td><a href="https://pub.uni-bielefeld.de/download/2663466/2686734">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>17%</td><td>6</td><td>1</td><td>5</td><td>1</td><td>1</td><td>1</td></tr><tr><td>65355cbb581a219bd7461d48b3afd115263ea760</td><td>complex_activities</td><td>Ongoing Complex Activities</td><td><a href="papers/65355cbb581a219bd7461d48b3afd115263ea760.html">Recognition of ongoing complex activities by sequence prediction over a hierarchical label space</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/WACV.2016.7477586">[pdf]</a></td><td>2016 IEEE Winter Conference on Applications of Computer Vision (WACV)</td><td></td><td></td><td></td><td></td><td>0%</td><td>2</td><td>0</td><td>2</td><td>0</td><td>2</td><td>0</td></tr><tr><td>e8de844fefd54541b71c9823416daa238be65546</td><td>visual_phrases</td><td>Phrasal Recognition</td><td><a href="papers/e8de844fefd54541b71c9823416daa238be65546.html">Recognition using visual phrases</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995711', 'linkType': 'ieee'}">[pdf]</a></td><td>CVPR 2011</td><td></td><td></td><td></td><td></td><td>41%</td><td>233</td><td>95</td><td>138</td><td>18</td><td>174</td><td>5</td></tr><tr><td>356b431d4f7a2a0a38cf971c84568207dcdbf189</td><td>wider</td><td>WIDER</td><td><a href="papers/356b431d4f7a2a0a38cf971c84568207dcdbf189.html">Recognize complex events from static images by fusing deep channels</a></td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Xiong_Recognize_Complex_Events_2015_CVPR_paper.pdf">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>Shenzhen Institutes of Advanced Technology</td><td>22.59805605</td><td>113.98533784</td><td>58%</td><td>45</td><td>26</td><td>19</td><td>1</td><td>30</td><td>12</td></tr><tr><td>25474c21613607f6bb7687a281d5f9d4ffa1f9f3</td><td>faceplace</td><td>Face Place</td><td><a href="papers/25474c21613607f6bb7687a281d5f9d4ffa1f9f3.html">Recognizing disguised faces</a></td><td><a href="http://pdfs.semanticscholar.org/d936/7ceb0be378c3a9ddf7cb741c678c1a3c574c.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>25%</td><td>24</td><td>6</td><td>18</td><td>0</td><td>16</td><td>1</td></tr><tr><td>4053e3423fb70ad9140ca89351df49675197196a</td><td>bio_id</td><td>BioID Face</td><td><a href="papers/4053e3423fb70ad9140ca89351df49675197196a.html">Robust Face Detection Using the Hausdorff Distance</a></td><td><a href="http://pdfs.semanticscholar.org/4053/e3423fb70ad9140ca89351df49675197196a.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>26%</td><td>498</td><td>127</td><td>371</td><td>55</td><td>319</td><td>32</td></tr><tr><td>2724ba85ec4a66de18da33925e537f3902f21249</td><td>cofw</td><td>COFW</td><td><a href="papers/2724ba85ec4a66de18da33925e537f3902f21249.html">Robust Face Landmark Estimation under Occlusion</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6751298', 'linkType': 'ieee'}">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td>55%</td><td>305</td><td>167</td><td>138</td><td>16</td><td>186</td><td>95</td></tr><tr><td>c570d1247e337f91e555c3be0e8c8a5aba539d9f</td><td>mcgill</td><td>McGill Real World</td><td><a href="papers/c570d1247e337f91e555c3be0e8c8a5aba539d9f.html">Robust semi-automatic head pose labeling for real-world face video sequences</a></td><td><a href="https://doi.org/10.1007/s11042-012-1352-1">[pdf]</a></td><td>Multimedia Tools and Applications</td><td>edu</td><td>McGill University</td><td>45.50397610</td><td>-73.57496870</td><td>28%</td><td>18</td><td>5</td><td>6</td><td>0</td><td>7</td><td>4</td></tr><tr><td>e27ef52c641c2b5100a1b34fd0b819e84a31b4df</td><td>sarc3d</td><td>Sarc3D</td><td><a href="papers/e27ef52c641c2b5100a1b34fd0b819e84a31b4df.html">SARC3D: A New 3D Body Model for People Tracking and Re-identification</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/e27e/f52c641c2b5100a1b34fd0b819e84a31b4df.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>14%</td><td>29</td><td>4</td><td>25</td><td>3</td><td>17</td><td>0</td></tr><tr><td>bd26dabab576adb6af30484183c9c9c8379bf2e0</td><td>scut_fbp</td><td>SCUT-FBP</td><td><a href="papers/bd26dabab576adb6af30484183c9c9c8379bf2e0.html">SCUT-FBP: A Benchmark Dataset for Facial Beauty Perception</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1511.02459.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2015 IEEE International Conference on Systems, Man, and Cybernetics</td><td>edu</td><td>South China University of Technology</td><td>23.05020420</td><td>113.39880323</td><td>43%</td><td>14</td><td>6</td><td>8</td><td>3</td><td>5</td><td>7</td></tr><tr><td>d3f5a1848b0028d8ab51d0b0673732cad2e3c8c9</td><td>stair_actions</td><td>STAIR Action</td><td><a href="papers/d3f5a1848b0028d8ab51d0b0673732cad2e3c8c9.html">STAIR Actions: A Video Dataset of Everyday Home Actions</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1804.04326.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>833fa04463d90aab4a9fe2870d480f0b40df446e</td><td>sun_attributes</td><td>SUN</td><td><a href="papers/833fa04463d90aab4a9fe2870d480f0b40df446e.html">SUN attribute database: Discovering, annotating, and recognizing scene attributes</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2012.6247998">[pdf]</a></td><td>2012 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>32%</td><td>260</td><td>84</td><td>85</td><td>18</td><td>143</td><td>10</td></tr><tr><td>4308bd8c28e37e2ed9a3fcfe74d5436cce34b410</td><td>market_1501</td><td>Market 1501</td><td><a href="papers/4308bd8c28e37e2ed9a3fcfe74d5436cce34b410.html">Scalable Person Re-identification: A Benchmark</a></td><td><a href="https://www.microsoft.com/en-us/research/wp-content/uploads/2017/01/ICCV15-ReIDDataset.pdf">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td>38%</td><td>394</td><td>149</td><td>245</td><td>18</td><td>271</td><td>3</td></tr><tr><td>9c23859ec7313f2e756a3e85575735e0c52249f4</td><td>facebook_100</td><td>Facebook100</td><td><a href="papers/9c23859ec7313f2e756a3e85575735e0c52249f4.html">Scaling up biologically-inspired computer vision: A case study in unconstrained face recognition on facebook</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5981788', 'linkType': 'ieee'}">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td>edu</td><td>Harvard University</td><td>42.36782045</td><td>-71.12666653</td><td>50%</td><td>50</td><td>25</td><td>25</td><td>3</td><td>39</td><td>4</td></tr><tr><td>9c23859ec7313f2e756a3e85575735e0c52249f4</td><td>pubfig_83</td><td>pubfig83</td><td><a href="papers/9c23859ec7313f2e756a3e85575735e0c52249f4.html">Scaling up biologically-inspired computer vision: A case study in unconstrained face recognition on facebook</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5981788', 'linkType': 'ieee'}">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td>edu</td><td>Harvard University</td><td>42.36782045</td><td>-71.12666653</td><td>50%</td><td>50</td><td>25</td><td>25</td><td>3</td><td>39</td><td>4</td></tr><tr><td>109df0e8e5969ddf01e073143e83599228a1163f</td><td>multi_pie</td><td>MULTIPIE</td><td><a href="papers/109df0e8e5969ddf01e073143e83599228a1163f.html">Scheduling heterogeneous multi-cores through performance impact estimation (PIE)</a></td><td><a href="http://dl.acm.org/citation.cfm?id=2337184">[pdf]</a></td><td>2012 39th Annual International Symposium on Computer Architecture (ISCA)</td><td></td><td></td><td></td><td></td><td>25%</td><td>192</td><td>48</td><td>144</td><td>8</td><td>99</td><td>0</td></tr><tr><td>51eba481dac6b229a7490f650dff7b17ce05df73</td><td>imsitu</td><td>imSitu</td><td><a href="papers/51eba481dac6b229a7490f650dff7b17ce05df73.html">Situation Recognition: Visual Semantic Role Labeling for Image Understanding</a></td><td><a href="http://grail.cs.washington.edu/wp-content/uploads/2016/09/yatskar2016srv.pdf">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>University of Washington</td><td>47.65432380</td><td>-122.30800894</td><td>60%</td><td>48</td><td>29</td><td>19</td><td>2</td><td>45</td><td>2</td></tr><tr><td>f152b6ee251cca940dd853c54e6a7b78fbc6b235</td><td>affectnet</td><td>AffectNet</td><td><a href="papers/f152b6ee251cca940dd853c54e6a7b78fbc6b235.html">Skybiometry and AffectNet on Facial Emotion Recognition Using Supervised Machine Learning Algorithms</a></td><td><a href="{'url': 'http://dl.acm.org/citation.cfm?id=3232665', 'linkType': 'acm'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>23e824d1dfc33f3780dd18076284f07bd99f1c43</td><td>mifs</td><td>MIFS</td><td><a href="papers/23e824d1dfc33f3780dd18076284f07bd99f1c43.html">Spoofing faces using makeup: An investigative study</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7947686', 'linkType': 'ieee'}">[pdf]</a></td><td>2017 IEEE International Conference on Identity, Security and Behavior Analysis (ISBA)</td><td>edu</td><td>INRIA Méditerranée</td><td>43.61581310</td><td>7.06838000</td><td>20%</td><td>5</td><td>1</td><td>4</td><td>0</td><td>1</td><td>2</td></tr><tr><td>1a40092b493c6b8840257ab7f96051d1a4dbfeb2</td><td>sports_videos_in_the_wild</td><td>SVW</td><td><a href="papers/1a40092b493c6b8840257ab7f96051d1a4dbfeb2.html">Sports Videos in the Wild (SVW): A video dataset for sports analysis</a></td><td><a href="http://web.cse.msu.edu/~liuxm/publication/Safdarnejad_Liu_Udpa_Andrus_Wood_Craven_FG2015.pdf">[pdf]</a></td><td>2015 11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)</td><td>edu</td><td>Michigan State University</td><td>42.71856800</td><td>-84.47791571</td><td>67%</td><td>6</td><td>4</td><td>2</td><td>1</td><td>5</td><td>0</td></tr><tr><td>9361b784e73e9238d5cefbea5ac40d35d1e3103f</td><td>towncenter</td><td>TownCenter</td><td><a href="papers/9361b784e73e9238d5cefbea5ac40d35d1e3103f.html">Stable Multi-Target Tracking in Real-Time Surveillance Video (Preprint)</a></td><td><a href="http://pdfs.semanticscholar.org/9361/b784e73e9238d5cefbea5ac40d35d1e3103f.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>21%</td><td>310</td><td>64</td><td>246</td><td>24</td><td>177</td><td>4</td></tr><tr><td>c866a2afc871910e3282fd9498dce4ab20f6a332</td><td>qmul_surv_face</td><td>QMUL-SurvFace</td><td><a href="papers/c866a2afc871910e3282fd9498dce4ab20f6a332.html">Surveillance Face Recognition Challenge</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1804.09691.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>f6c8d5e35d7e4d60a0104f233ac1a3ab757da53f</td><td>pku</td><td>PKU</td><td><a href="papers/f6c8d5e35d7e4d60a0104f233ac1a3ab757da53f.html">Swiss-System Based Cascade Ranking for Gait-Based Person Re-Identification</a></td><td><a href="http://pdfs.semanticscholar.org/f6c8/d5e35d7e4d60a0104f233ac1a3ab757da53f.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>3</td><td>0</td><td>3</td><td>0</td><td>1</td><td>0</td></tr><tr><td>4d58f886f5150b2d5e48fd1b5a49e09799bf895d</td><td>texas_3dfrd</td><td>Texas 3DFRD</td><td><a href="papers/4d58f886f5150b2d5e48fd1b5a49e09799bf895d.html">Texas 3D Face Recognition Database</a></td><td><a href="http://live.ece.utexas.edu/publications/2010/sg_ssiai_may10.pdf">[pdf]</a></td><td>2010 IEEE Southwest Symposium on Image Analysis & Interpretation (SSIAI)</td><td></td><td></td><td></td><td></td><td>18%</td><td>61</td><td>11</td><td>50</td><td>3</td><td>36</td><td>2</td></tr><tr><td>2485c98aa44131d1a2f7d1355b1e372f2bb148ad</td><td>cas_peal</td><td>CAS-PEAL</td><td><a href="papers/2485c98aa44131d1a2f7d1355b1e372f2bb148ad.html">The CAS-PEAL Large-Scale Chinese Face Database and Baseline Evaluations</a></td><td><a href="https://doi.org/10.1109/TSMCA.2007.909557">[pdf]</a></td><td>IEEE Transactions on Systems, Man, and Cybernetics - Part A: Systems and Humans</td><td></td><td></td><td></td><td></td><td>18%</td><td>415</td><td>76</td><td>339</td><td>39</td><td>182</td><td>35</td></tr><tr><td>2485c98aa44131d1a2f7d1355b1e372f2bb148ad</td><td>m2vts</td><td>m2vts</td><td><a href="papers/2485c98aa44131d1a2f7d1355b1e372f2bb148ad.html">The CAS-PEAL Large-Scale Chinese Face Database and Baseline Evaluations</a></td><td><a href="https://doi.org/10.1109/TSMCA.2007.909557">[pdf]</a></td><td>IEEE Transactions on Systems, Man, and Cybernetics - Part A: Systems and Humans</td><td></td><td></td><td></td><td></td><td>18%</td><td>415</td><td>76</td><td>339</td><td>39</td><td>182</td><td>35</td></tr><tr><td>47662d1a368daf70ba70ef2d59eb6209f98b675d</td><td>fia</td><td>CMU FiA</td><td><a href="papers/47662d1a368daf70ba70ef2d59eb6209f98b675d.html">The CMU Face In Action (FIA) Database</a></td><td><a href="http://pdfs.semanticscholar.org/bb47/a03401811f9d2ca2da12138697acbc7b97a3.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>29%</td><td>55</td><td>16</td><td>39</td><td>5</td><td>38</td><td>7</td></tr><tr><td>4df3143922bcdf7db78eb91e6b5359d6ada004d2</td><td>cfd</td><td>CFD</td><td><a href="papers/4df3143922bcdf7db78eb91e6b5359d6ada004d2.html">The Chicago face database: A free stimulus set of faces and norming data.</a></td><td><a href="http://pdfs.semanticscholar.org/4df3/143922bcdf7db78eb91e6b5359d6ada004d2.pdf">[pdf]</a></td><td>Behavior research methods</td><td></td><td></td><td></td><td></td><td>39%</td><td>83</td><td>32</td><td>51</td><td>2</td><td>62</td><td>3</td></tr><tr><td>20388099cc415c772926e47bcbbe554e133343d1</td><td>cafe</td><td>CAFE</td><td><a href="papers/20388099cc415c772926e47bcbbe554e133343d1.html">The Child Affective Facial Expression (CAFE) set: validity and reliability from untrained adults</a></td><td><a href="http://pdfs.semanticscholar.org/2038/8099cc415c772926e47bcbbe554e133343d1.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>48%</td><td>33</td><td>16</td><td>17</td><td>3</td><td>28</td><td>1</td></tr><tr><td>32cde90437ab5a70cf003ea36f66f2de0e24b3ab</td><td>cityscapes</td><td>Cityscapes</td><td><a href="papers/32cde90437ab5a70cf003ea36f66f2de0e24b3ab.html">The Cityscapes Dataset for Semantic Urban Scene Understanding</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1604.01685.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>33%</td><td>771</td><td>252</td><td>519</td><td>54</td><td>622</td><td>0</td></tr><tr><td>32cde90437ab5a70cf003ea36f66f2de0e24b3ab</td><td>cityscapes</td><td>Cityscapes</td><td><a href="papers/32cde90437ab5a70cf003ea36f66f2de0e24b3ab.html">The Cityscapes Dataset for Semantic Urban Scene Understanding</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1604.01685.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>33%</td><td>771</td><td>252</td><td>519</td><td>54</td><td>622</td><td>0</td></tr><tr><td>4e6ee936eb50dd032f7138702fa39b7c18ee8907</td><td>dartmouth_children</td><td>Dartmouth Children</td><td><a href="papers/4e6ee936eb50dd032f7138702fa39b7c18ee8907.html">The Dartmouth Database of Children’s Faces: Acquisition and Validation of a New Face Stimulus Set</a></td><td><a href="http://pdfs.semanticscholar.org/4e6e/e936eb50dd032f7138702fa39b7c18ee8907.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>40%</td><td>20</td><td>8</td><td>12</td><td>2</td><td>16</td><td>0</td></tr><tr><td>f0e17f27f029db4ad650ff278fe3c10ecb6cb0c4</td><td>europersons</td><td>EuroCity Persons</td><td><a href="papers/f0e17f27f029db4ad650ff278fe3c10ecb6cb0c4.html">The EuroCity Persons Dataset: A Novel Benchmark for Object Detection</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1805.07193.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>0%</td><td>1</td><td>0</td><td>1</td><td>0</td><td>1</td><td>0</td></tr><tr><td>4d9a02d080636e9666c4d1cc438b9893391ec6c7</td><td>cohn_kanade_plus</td><td>CK+</td><td><a href="papers/4d9a02d080636e9666c4d1cc438b9893391ec6c7.html">The Extended Cohn-Kanade Dataset (CK+): A complete dataset for action unit and emotion-specified expression</a></td><td><a href="http://www.iainm.com/iainm/Publications_files/2010_The%20Extended.pdf">[pdf]</a></td><td>2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition - Workshops</td><td></td><td></td><td></td><td></td><td>41%</td><td>975</td><td>403</td><td>572</td><td>65</td><td>460</td><td>345</td></tr><tr><td>0f0fcf041559703998abf310e56f8a2f90ee6f21</td><td>feret</td><td>FERET</td><td><a href="papers/0f0fcf041559703998abf310e56f8a2f90ee6f21.html">The FERET Evaluation Methodology for Face-Recognition Algorithms</a></td><td><a href="http://pdfs.semanticscholar.org/0f0f/cf041559703998abf310e56f8a2f90ee6f21.pdf">[pdf]</a></td><td>IEEE Trans. Pattern Anal. Mach. Intell.</td><td></td><td></td><td></td><td></td><td>11%</td><td>999</td><td>109</td><td>259</td><td>32</td><td>213</td><td>51</td></tr><tr><td>0c4a139bb87c6743c7905b29a3cfec27a5130652</td><td>feret</td><td>FERET</td><td><a href="papers/0c4a139bb87c6743c7905b29a3cfec27a5130652.html">The FERET Verification Testing Protocol for Face Recognition Algorithms</a></td><td><a href="http://pdfs.semanticscholar.org/0c4a/139bb87c6743c7905b29a3cfec27a5130652.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>28%</td><td>112</td><td>31</td><td>81</td><td>12</td><td>76</td><td>4</td></tr><tr><td>8f02ec0be21461fbcedf51d864f944cfc42c875f</td><td>hda_plus</td><td>HDA+</td><td><a href="papers/8f02ec0be21461fbcedf51d864f944cfc42c875f.html">The HDA+ Data Set for Research on Fully Automated Re-identification Systems</a></td><td><a href="http://pdfs.semanticscholar.org/8f02/ec0be21461fbcedf51d864f944cfc42c875f.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>12%</td><td>17</td><td>2</td><td>15</td><td>2</td><td>11</td><td>0</td></tr><tr><td>ea050801199f98a1c7c1df6769f23f658299a3ae</td><td>mpi_large</td><td>Large MPI Facial Expression</td><td><a href="papers/ea050801199f98a1c7c1df6769f23f658299a3ae.html">The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</a></td><td><a href="http://pdfs.semanticscholar.org/ea05/0801199f98a1c7c1df6769f23f658299a3ae.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>46%</td><td>28</td><td>13</td><td>15</td><td>4</td><td>24</td><td>3</td></tr><tr><td>ea050801199f98a1c7c1df6769f23f658299a3ae</td><td>mpi_small</td><td>Small MPI Facial Expression</td><td><a href="papers/ea050801199f98a1c7c1df6769f23f658299a3ae.html">The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</a></td><td><a href="http://pdfs.semanticscholar.org/ea05/0801199f98a1c7c1df6769f23f658299a3ae.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>46%</td><td>28</td><td>13</td><td>15</td><td>4</td><td>24</td><td>3</td></tr><tr><td>f1af714b92372c8e606485a3982eab2f16772ad8</td><td>mug_faces</td><td>MUG Faces</td><td><a href="papers/f1af714b92372c8e606485a3982eab2f16772ad8.html">The MUG facial expression database</a></td><td><a href="http://ieeexplore.ieee.org/document/5617662/">[pdf]</a></td><td>11th International Workshop on Image Analysis for Multimedia Interactive Services WIAMIS 10</td><td>edu</td><td>Aristotle University of Thessaloniki</td><td>40.62984145</td><td>22.95889350</td><td>28%</td><td>68</td><td>19</td><td>49</td><td>5</td><td>28</td><td>19</td></tr><tr><td>79828e6e9f137a583082b8b5a9dfce0c301989b8</td><td>mapillary</td><td>Mapillary</td><td><a href="papers/79828e6e9f137a583082b8b5a9dfce0c301989b8.html">The Mapillary Vistas Dataset for Semantic Understanding of Street Scenes</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237796', 'linkType': 'ieee'}">[pdf]</a></td><td>2017 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td>39%</td><td>44</td><td>17</td><td>27</td><td>0</td><td>36</td><td>0</td></tr><tr><td>96e0cfcd81cdeb8282e29ef9ec9962b125f379b0</td><td>megaface</td><td>MegaFace</td><td><a href="papers/96e0cfcd81cdeb8282e29ef9ec9962b125f379b0.html">The MegaFace Benchmark: 1 Million Faces for Recognition at Scale</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.527">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>University of Washington</td><td>47.65432380</td><td>-122.30800894</td><td>55%</td><td>121</td><td>66</td><td>55</td><td>11</td><td>98</td><td>20</td></tr><tr><td>a6e695ddd07aad719001c0fc1129328452385949</td><td>yfcc_100m</td><td>YFCC100M</td><td><a href="papers/a6e695ddd07aad719001c0fc1129328452385949.html">The New Data and New Challenges in Multimedia Research</a></td><td><span class="gray">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>36%</td><td>160</td><td>57</td><td>103</td><td>11</td><td>105</td><td>4</td></tr><tr><td>abe9f3b91fd26fa1b50cd685c0d20debfb372f73</td><td>voc</td><td>VOC</td><td><a href="papers/abe9f3b91fd26fa1b50cd685c0d20debfb372f73.html">The Pascal Visual Object Classes Challenge: A Retrospective</a></td><td><a href="http://homepages.inf.ed.ac.uk/ckiw/postscript/ijcv_voc14.pdf">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td>32%</td><td>999</td><td>315</td><td>684</td><td>75</td><td>698</td><td>6</td></tr><tr><td>66e6f08873325d37e0ec20a4769ce881e04e964e</td><td>sun_attributes</td><td>SUN</td><td><a href="papers/66e6f08873325d37e0ec20a4769ce881e04e964e.html">The SUN Attribute Database: Beyond Categories for Deeper Scene Understanding</a></td><td><a href="http://www.cc.gatech.edu/~hays/papers/attribute_ijcv.pdf">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td>38%</td><td>112</td><td>43</td><td>69</td><td>14</td><td>83</td><td>2</td></tr><tr><td>8b2dd5c61b23ead5ae5508bb8ce808b5ea266730</td><td>10k_US_adult_faces</td><td>10K US Adult Faces</td><td><a href="papers/8b2dd5c61b23ead5ae5508bb8ce808b5ea266730.html">The intrinsic memorability of face photographs.</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/8b2d/d5c61b23ead5ae5508bb8ce808b5ea266730.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Journal of experimental psychology. General</td><td></td><td></td><td></td><td></td><td>36%</td><td>47</td><td>17</td><td>30</td><td>3</td><td>33</td><td>1</td></tr><tr><td>19d1b811df60f86cbd5e04a094b07f32fff7a32a</td><td>york_3d</td><td>UOY 3D Face Database</td><td><a href="papers/19d1b811df60f86cbd5e04a094b07f32fff7a32a.html">Three-dimensional face recognition: an eigensurface approach</a></td><td><a href="http://www-users.cs.york.ac.uk/~nep/research/3Dface/tomh/3DFaceRecognition-Eigensurface-ICIP(web)2.pdf">[pdf]</a></td><td>2004 International Conference on Image Processing, 2004. ICIP '04.</td><td></td><td></td><td></td><td></td><td>19%</td><td>36</td><td>7</td><td>29</td><td>4</td><td>25</td><td>1</td></tr><tr><td>2edb87494278ad11641b6cf7a3f8996de12b8e14</td><td>qmul_grid</td><td>GRID</td><td><a href="papers/2edb87494278ad11641b6cf7a3f8996de12b8e14.html">Time-Delayed Correlation Analysis for Multi-Camera Activity Understanding</a></td><td><a href="https://doi.org/10.1007/s11263-010-0347-5">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td>16%</td><td>77</td><td>12</td><td>39</td><td>3</td><td>32</td><td>0</td></tr><tr><td>298cbc3dfbbb3a20af4eed97906650a4ea1c29e0</td><td>ferplus</td><td>FER+</td><td><a href="papers/298cbc3dfbbb3a20af4eed97906650a4ea1c29e0.html">Training deep networks for facial expression recognition with crowd-sourced label distribution</a></td><td><a href="http://arxiv.org/pdf/1608.01041v1.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>34%</td><td>29</td><td>10</td><td>19</td><td>0</td><td>15</td><td>3</td></tr><tr><td>b5f2846a506fc417e7da43f6a7679146d99c5e96</td><td>ucf_101</td><td>UCF101</td><td><a href="papers/b5f2846a506fc417e7da43f6a7679146d99c5e96.html">UCF101: A Dataset of 101 Human Actions Classes From Videos in The Wild</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1212.0402.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td>edu</td><td>University of Central Florida</td><td>28.59899755</td><td>-81.19712501</td><td>54%</td><td>999</td><td>535</td><td>464</td><td>73</td><td>708</td><td>212</td></tr><tr><td>16e8b0a1e8451d5f697b94c0c2b32a00abee1d52</td><td>umb</td><td>UMB</td><td><a href="papers/16e8b0a1e8451d5f697b94c0c2b32a00abee1d52.html">UMB-DB: A database of partially occluded 3D faces</a></td><td><a href="https://doi.org/10.1109/ICCVW.2011.6130509">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td>29%</td><td>45</td><td>13</td><td>32</td><td>2</td><td>20</td><td>3</td></tr><tr><td>31b05f65405534a696a847dd19c621b7b8588263</td><td>umd_faces</td><td>UMD</td><td><a href="papers/31b05f65405534a696a847dd19c621b7b8588263.html">UMDFaces: An annotated face dataset for training deep networks</a></td><td><a href="http://arxiv.org/abs/1611.01484">[pdf]</a></td><td>2017 IEEE International Joint Conference on Biometrics (IJCB)</td><td></td><td></td><td></td><td></td><td>54%</td><td>35</td><td>19</td><td>16</td><td>5</td><td>28</td><td>6</td></tr><tr><td>8627f019882b024aef92e4eb9355c499c733e5b7</td><td>used</td><td>USED Social Event Dataset</td><td><a href="papers/8627f019882b024aef92e4eb9355c499c733e5b7.html">USED: a large-scale social event detection dataset</a></td><td><a href="http://doi.acm.org/10.1145/2910017.2910624">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>7</td><td>3</td><td>4</td><td>0</td><td>3</td><td>2</td></tr><tr><td>4b4106614c1d553365bad75d7866bff0de6056ed</td><td>czech_news_agency</td><td>UFI</td><td><a href="papers/4b4106614c1d553365bad75d7866bff0de6056ed.html">Unconstrained Facial Images: Database for Face Recognition Under Real-World Conditions</a></td><td><a href="http://pdfs.semanticscholar.org/4b41/06614c1d553365bad75d7866bff0de6056ed.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>10%</td><td>10</td><td>1</td><td>9</td><td>0</td><td>4</td><td>2</td></tr><tr><td>08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7</td><td>kin_face</td><td>UB KinFace</td><td><a href="papers/08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7.html">Understanding Kin Relationships in a Photo</a></td><td><a href="http://www1.ece.neu.edu/~yunfu/papers/Kinship-TMM.pdf">[pdf]</a></td><td>IEEE Transactions on Multimedia</td><td></td><td></td><td></td><td></td><td>24%</td><td>89</td><td>21</td><td>46</td><td>2</td><td>20</td><td>14</td></tr><tr><td>21d9d0deed16f0ad62a4865e9acf0686f4f15492</td><td>images_of_groups</td><td>Images of Groups</td><td><a href="papers/21d9d0deed16f0ad62a4865e9acf0686f4f15492.html">Understanding images of groups of people</a></td><td><a href="http://amp.ece.cmu.edu/people/Andy/Andy_files/cvpr09.pdf">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>36%</td><td>202</td><td>72</td><td>130</td><td>12</td><td>126</td><td>24</td></tr><tr><td>fd8168f1c50de85bac58a8d328df0a50248b16ae</td><td>nd_2006</td><td>ND-2006</td><td><a href="papers/fd8168f1c50de85bac58a8d328df0a50248b16ae.html">Using a Multi-Instance Enrollment Representation to Improve 3D Face Recognition</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4401928', 'linkType': 'ieee'}">[pdf]</a></td><td>2007 First IEEE International Conference on Biometrics: Theory, Applications, and Systems</td><td></td><td></td><td></td><td></td><td>25%</td><td>32</td><td>8</td><td>24</td><td>3</td><td>16</td><td>1</td></tr><tr><td>4563b46d42079242f06567b3f2e2f7a80cb3befe</td><td>vadana</td><td>VADANA</td><td><a href="papers/4563b46d42079242f06567b3f2e2f7a80cb3befe.html">VADANA: A dense dataset for facial image analysis</a></td><td><a href="http://vims.cis.udel.edu/publications/VADANA_BeFIT2011.pdf">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td>19%</td><td>16</td><td>3</td><td>13</td><td>0</td><td>6</td><td>6</td></tr><tr><td>eb027969f9310e0ae941e2adee2d42cdf07d938c</td><td>vgg_faces2</td><td>VGG Face2</td><td><a href="papers/eb027969f9310e0ae941e2adee2d42cdf07d938c.html">VGGFace2: A Dataset for Recognising Faces across Pose and Age</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1710.08092.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2018 13th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2018)</td><td>edu</td><td>University of Oxford</td><td>51.75345380</td><td>-1.25400997</td><td>38%</td><td>56</td><td>21</td><td>35</td><td>6</td><td>50</td><td>3</td></tr><tr><td>01959ef569f74c286956024866c1d107099199f7</td><td>vqa</td><td>VQA</td><td><a href="papers/01959ef569f74c286956024866c1d107099199f7.html">VQA: Visual Question Answering</a></td><td><a href="http://arxiv.org/pdf/1505.00468v3.pdf">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td>47%</td><td>731</td><td>344</td><td>387</td><td>47</td><td>628</td><td>4</td></tr><tr><td>5194cbd51f9769ab25260446b4fa17204752e799</td><td>violent_flows</td><td>Violent Flows</td><td><a href="papers/5194cbd51f9769ab25260446b4fa17204752e799.html">Violent flows: Real-time detection of violent crowd behavior</a></td><td><a href="http://www.wisdom.weizmann.ac.il/mathusers/kliper/Papers/violent_flows.pdf">[pdf]</a></td><td>2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops</td><td></td><td></td><td></td><td></td><td>20%</td><td>83</td><td>17</td><td>66</td><td>6</td><td>42</td><td>2</td></tr><tr><td>066000d44d6691d27202896691f08b27117918b9</td><td>psu</td><td>PSU</td><td><a href="papers/066000d44d6691d27202896691f08b27117918b9.html">Vision-Based Analysis of Small Groups in Pedestrian Crowds</a></td><td><a href="http://vision.cse.psu.edu/publications/pdfs/GeCollinsRubackPAMI2011.pdf">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>18%</td><td>151</td><td>27</td><td>124</td><td>9</td><td>78</td><td>2</td></tr><tr><td>dd65f71dac86e36eecbd3ed225d016c3336b4a13</td><td>families_in_the_wild</td><td>FIW</td><td><a href="papers/dd65f71dac86e36eecbd3ed225d016c3336b4a13.html">Visual Kinship Recognition of Families in the Wild</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8337841', 'linkType': 'ieee'}">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>67%</td><td>3</td><td>2</td><td>1</td><td>0</td><td>2</td><td>0</td></tr><tr><td>52d7eb0fbc3522434c13cc247549f74bb9609c5d</td><td>wider_face</td><td>WIDER FACE</td><td><a href="papers/52d7eb0fbc3522434c13cc247549f74bb9609c5d.html">WIDER FACE: A Face Detection Benchmark</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1511.06523.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>53%</td><td>148</td><td>78</td><td>70</td><td>16</td><td>107</td><td>34</td></tr><tr><td>77c81c13a110a341c140995bedb98101b9e84f7f</td><td>wildtrack</td><td>WildTrack</td><td><a href="papers/77c81c13a110a341c140995bedb98101b9e84f7f.html">WILDTRACK : A Multi-camera HD Dataset for Dense Unscripted Pedestrian Detection</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/fe1c/ec4e4995b8615855572374ae3efc94949105.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>5ad4e9f947c1653c247d418f05dad758a3f9277b</td><td>wlfdb</td><td></td><td><a href="papers/5ad4e9f947c1653c247d418f05dad758a3f9277b.html">WLFDB: Weakly Labeled Face Databases</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/5ad4/e9f947c1653c247d418f05dad758a3f9277b.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>0%</td><td>1</td><td>0</td><td>1</td><td>0</td><td>0</td><td>0</td></tr><tr><td>0dc11a37cadda92886c56a6fb5191ded62099c28</td><td>stickmen_family</td><td>We Are Family Stickmen</td><td><a href="papers/0dc11a37cadda92886c56a6fb5191ded62099c28.html">We Are Family: Joint Pose Estimation of Multiple Persons</a></td><td><a href="http://pdfs.semanticscholar.org/0dc1/1a37cadda92886c56a6fb5191ded62099c28.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>44%</td><td>77</td><td>34</td><td>43</td><td>4</td><td>57</td><td>1</td></tr><tr><td>2a75f34663a60ab1b04a0049ed1d14335129e908</td><td>mmi_facial_expression</td><td>MMI Facial Expression Dataset</td><td><a href="papers/2a75f34663a60ab1b04a0049ed1d14335129e908.html">Web-based database for facial expression analysis</a></td><td><a href="http://ibug.doc.ic.ac.uk/media/uploads/documents/PanticEtAl-ICME2005-final.pdf">[pdf]</a></td><td>2005 IEEE International Conference on Multimedia and Expo</td><td></td><td></td><td></td><td></td><td>32%</td><td>440</td><td>142</td><td>298</td><td>44</td><td>258</td><td>82</td></tr><tr><td>9b9bf5e623cb8af7407d2d2d857bc3f1b531c182</td><td>who_goes_there</td><td>WGT</td><td><a href="papers/9b9bf5e623cb8af7407d2d2d857bc3f1b531c182.html">Who goes there?: approaches to mapping facial appearance diversity</a></td><td><a href="http://doi.acm.org/10.1145/2996913.2996997">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>a94cae786d515d3450d48267e12ca954aab791c4</td><td>yawdd</td><td>YawDD</td><td><a href="papers/a94cae786d515d3450d48267e12ca954aab791c4.html">YawDD: a yawning detection dataset</a></td><td><a href="http://www.site.uottawa.ca/~shervin/pubs/CogniVue-Dataset-ACM-MMSys2014.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>14</td><td>0</td><td>14</td><td>1</td><td>2</td><td>1</td></tr></table></body></html>
\ No newline at end of file +<!doctype html><html><head><meta charset='utf-8'><title>All Papers</title><link rel='stylesheet' href='reports.css'></head><body><h2>All Papers</h2><table border='1' cellpadding='3' cellspacing='3'><th>Paper ID</th><th>Megapixels Key</th><th>Megapixels Name</th><th>Report Link</th><th>PDF Link</th><th>Journal</th><th>Type</th><th>Address</th><th>Lat</th><th>Lng</th><th>Coverage</th><th>Total Citations</th><th>Geocoded Citations</th><th>Unknown Citations</th><th>Empty Citations</th><th>With PDF</th><th>With DOI</th><tr><td>fb82681ac5d3487bd8e52dbb3d1fa220eeac855e</td><td>pilot_parliament</td><td>PPB</td><td><a href="papers/fb82681ac5d3487bd8e52dbb3d1fa220eeac855e.html">1 Network Notebook</a></td><td><a href="http://pdfs.semanticscholar.org/fb82/681ac5d3487bd8e52dbb3d1fa220eeac855e.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>9%</td><td>11</td><td>1</td><td>10</td><td>1</td><td>10</td><td>0</td></tr><tr><td>3325860c0c82a93b2eac654f5324dd6a776f609e</td><td>mpii_human_pose</td><td>MPII Human Pose</td><td><a href="papers/3325860c0c82a93b2eac654f5324dd6a776f609e.html">2D Human Pose Estimation: New Benchmark and State of the Art Analysis</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6909866', 'linkType': 'ieee'}">[pdf]</a></td><td>2014 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>50%</td><td>356</td><td>179</td><td>177</td><td>21</td><td>299</td><td>3</td></tr><tr><td>2f5d44dc3e1b5955942133ff872ebd31716ec604</td><td>frav3d</td><td>FRAV3D</td><td><a href="papers/2f5d44dc3e1b5955942133ff872ebd31716ec604.html">2D and 3D face recognition: A survey</a></td><td><a href="http://pdfs.semanticscholar.org/2f5d/44dc3e1b5955942133ff872ebd31716ec604.pdf">[pdf]</a></td><td>Pattern Recognition Letters</td><td></td><td></td><td></td><td></td><td>15%</td><td>389</td><td>57</td><td>332</td><td>28</td><td>198</td><td>17</td></tr><tr><td>2e8d0f1802e50cccfd3c0aabac0d0beab3a7846e</td><td>3dpes</td><td>3DPeS</td><td><a href="papers/2e8d0f1802e50cccfd3c0aabac0d0beab3a7846e.html">3DPeS: 3D people dataset for surveillance and forensics</a></td><td><a href="http://doi.acm.org/10.1145/2072572.2072590">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>18%</td><td>122</td><td>22</td><td>100</td><td>11</td><td>71</td><td>1</td></tr><tr><td>9696ad8b164f5e10fcfe23aacf74bd6168aebb15</td><td>4dfab</td><td>4DFAB</td><td><a href="papers/9696ad8b164f5e10fcfe23aacf74bd6168aebb15.html">4DFAB: A Large Scale 4D Facial Expression Database for Biometric Applications</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1712.01443.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>0%</td><td>4</td><td>0</td><td>4</td><td>0</td><td>2</td><td>0</td></tr><tr><td>d08cc366a4a0192a01e9a7495af1eb5d9f9e73ae</td><td>b3d_ac</td><td>B3D(AC)</td><td><a href="papers/d08cc366a4a0192a01e9a7495af1eb5d9f9e73ae.html">A 3-D Audio-Visual Corpus of Affective Communication</a></td><td><a href="http://files.is.tue.mpg.de/jgall/download/jgall_avcorpus_mm10.pdf">[pdf]</a></td><td>IEEE Transactions on Multimedia</td><td></td><td></td><td></td><td></td><td>31%</td><td>39</td><td>12</td><td>27</td><td>2</td><td>27</td><td>7</td></tr><tr><td>639937b3a1b8bded3f7e9a40e85bd3770016cf3c</td><td>bfm</td><td>BFM</td><td><a href="papers/639937b3a1b8bded3f7e9a40e85bd3770016cf3c.html">A 3D Face Model for Pose and Illumination Invariant Face Recognition</a></td><td><a href="https://pdfs.semanticscholar.org/6399/37b3a1b8bded3f7e9a40e85bd3770016cf3c.pdf">[pdf]</a></td><td>2009 Sixth IEEE International Conference on Advanced Video and Signal Based Surveillance</td><td></td><td></td><td></td><td></td><td>41%</td><td>323</td><td>131</td><td>192</td><td>29</td><td>221</td><td>25</td></tr><tr><td>c34532fe6bfbd1e6df477c9ffdbb043b77e7804d</td><td>columbia_gaze</td><td>Columbia Gaze</td><td><a href="papers/c34532fe6bfbd1e6df477c9ffdbb043b77e7804d.html">A 3D Morphable Eye Region Model for Gaze Estimation</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/0d43/3b9435b874a1eea6d7999e86986c910fa285.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Carnegie Mellon University</td><td>37.41021930</td><td>-122.05965487</td><td>67%</td><td>24</td><td>16</td><td>8</td><td>0</td><td>18</td><td>4</td></tr><tr><td>cc589c499dcf323fe4a143bbef0074c3e31f9b60</td><td>bu_3dfe</td><td>BU-3DFE</td><td><a href="papers/cc589c499dcf323fe4a143bbef0074c3e31f9b60.html">A 3D facial expression database for facial behavior research</a></td><td><a href="http://www.cs.binghamton.edu/~lijun/Research/3DFE/Yin_FGR06_a.pdf">[pdf]</a></td><td>7th International Conference on Automatic Face and Gesture Recognition (FGR06)</td><td>edu</td><td>SUNY Binghamton</td><td>42.08779975</td><td>-75.97066066</td><td>24%</td><td>555</td><td>131</td><td>424</td><td>47</td><td>283</td><td>48</td></tr><tr><td>22646e00a7ba34d1b5fbe3b1efcd91a1e1be3c2b</td><td>saivt</td><td>SAIVT SoftBio</td><td><a href="papers/22646e00a7ba34d1b5fbe3b1efcd91a1e1be3c2b.html">A Database for Person Re-Identification in Multi-Camera Surveillance Networks</a></td><td><a href="http://eprints.qut.edu.au/53437/3/Bialkowski_Database4PersonReID_DICTA.pdf">[pdf]</a></td><td>2012 International Conference on Digital Image Computing Techniques and Applications (DICTA)</td><td></td><td></td><td></td><td></td><td>21%</td><td>58</td><td>12</td><td>46</td><td>7</td><td>40</td><td>1</td></tr><tr><td>070de852bc6eb275d7ca3a9cdde8f6be8795d1a3</td><td>d3dfacs</td><td>D3DFACS</td><td><a href="papers/070de852bc6eb275d7ca3a9cdde8f6be8795d1a3.html">A FACS valid 3D dynamic action unit database with applications to 3D dynamic morphable facial modeling</a></td><td><a href="http://www.cs.bath.ac.uk/~dpc/D3DFACS/ICCV_final_2011.pdf">[pdf]</a></td><td>2011 International Conference on Computer Vision</td><td>edu</td><td>Jacobs University</td><td>53.41291480</td><td>-2.96897915</td><td>44%</td><td>52</td><td>23</td><td>29</td><td>5</td><td>37</td><td>4</td></tr><tr><td>bd88bb2e4f351352d88ee7375af834360e223498</td><td>hda_plus</td><td>HDA+</td><td><a href="papers/bd88bb2e4f351352d88ee7375af834360e223498.html">A Multi - camera video data set for research on High - Definition surveillance</a></td><td><a href="http://pdfs.semanticscholar.org/bd88/bb2e4f351352d88ee7375af834360e223498.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>1</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>bd88bb2e4f351352d88ee7375af834360e223498</td><td>hda_plus</td><td>HDA+</td><td><a href="papers/bd88bb2e4f351352d88ee7375af834360e223498.html">A Multi - camera video data set for research on High - Definition surveillance</a></td><td><a href="http://pdfs.semanticscholar.org/bd88/bb2e4f351352d88ee7375af834360e223498.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>1</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>563c940054e4b456661762c1ab858e6f730c3159</td><td>data_61</td><td>Data61 Pedestrian</td><td><a href="papers/563c940054e4b456661762c1ab858e6f730c3159.html">A Multi-modal Graphical Model for Scene Analysis</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/WACV.2015.139">[pdf]</a></td><td>2015 IEEE Winter Conference on Applications of Computer Vision</td><td></td><td></td><td></td><td></td><td>12%</td><td>8</td><td>1</td><td>7</td><td>0</td><td>5</td><td>0</td></tr><tr><td>221c18238b829c12b911706947ab38fd017acef7</td><td>rap_pedestrian</td><td>RAP</td><td><a href="papers/221c18238b829c12b911706947ab38fd017acef7.html">A Richly Annotated Dataset for Pedestrian Attribute Recognition</a></td><td><a href="http://pdfs.semanticscholar.org/221c/18238b829c12b911706947ab38fd017acef7.pdf">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>52%</td><td>21</td><td>11</td><td>10</td><td>0</td><td>18</td><td>0</td></tr><tr><td>013909077ad843eb6df7a3e8e290cfd5575999d2</td><td>fiw_300</td><td>300-W</td><td><a href="papers/013909077ad843eb6df7a3e8e290cfd5575999d2.html">A Semi-automatic Methodology for Facial Landmark Annotation</a></td><td><a href="http://ibug.doc.ic.ac.uk/media/uploads/documents/sagonas_cvpr_2013_amfg_w.pdf">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition Workshops</td><td>edu</td><td>University of Twente</td><td>52.23801390</td><td>6.85667610</td><td>59%</td><td>169</td><td>100</td><td>69</td><td>14</td><td>112</td><td>49</td></tr><tr><td>013909077ad843eb6df7a3e8e290cfd5575999d2</td><td>fiw_300</td><td>300-W</td><td><a href="papers/013909077ad843eb6df7a3e8e290cfd5575999d2.html">A Semi-automatic Methodology for Facial Landmark Annotation</a></td><td><a href="http://ibug.doc.ic.ac.uk/media/uploads/documents/sagonas_cvpr_2013_amfg_w.pdf">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition Workshops</td><td>edu</td><td>University of Twente</td><td>52.23801390</td><td>6.85667610</td><td>59%</td><td>169</td><td>100</td><td>69</td><td>14</td><td>112</td><td>49</td></tr><tr><td>013909077ad843eb6df7a3e8e290cfd5575999d2</td><td>fiw_300</td><td>300-W</td><td><a href="papers/013909077ad843eb6df7a3e8e290cfd5575999d2.html">A Semi-automatic Methodology for Facial Landmark Annotation</a></td><td><a href="http://ibug.doc.ic.ac.uk/media/uploads/documents/sagonas_cvpr_2013_amfg_w.pdf">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition Workshops</td><td>edu</td><td>University of Twente</td><td>52.23801390</td><td>6.85667610</td><td>59%</td><td>169</td><td>100</td><td>69</td><td>14</td><td>112</td><td>49</td></tr><tr><td>ad62c6e17bc39b4dec20d32f6ac667ae42d2c118</td><td>jiku_mobile</td><td>Jiku Mobile Video Dataset</td><td><a href="papers/ad62c6e17bc39b4dec20d32f6ac667ae42d2c118.html">A Synchronization Ground Truth for the Jiku Mobile Video Dataset</a></td><td><a href="http://pdfs.semanticscholar.org/ad62/c6e17bc39b4dec20d32f6ac667ae42d2c118.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>1</td><td>0</td><td>1</td><td>0</td><td>0</td><td>0</td></tr><tr><td>6403117f9c005ae81f1e8e6d1302f4a045e3d99d</td><td>alert_airport</td><td>ALERT Airport</td><td><a href="papers/6403117f9c005ae81f1e8e6d1302f4a045e3d99d.html">A Systematic Evaluation and Benchmark for Person Re-Identification: Features, Metrics, and Datasets.</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1605.09653.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>IEEE transactions on pattern analysis and machine intelligence</td><td></td><td></td><td></td><td></td><td>27%</td><td>15</td><td>4</td><td>11</td><td>1</td><td>10</td><td>0</td></tr><tr><td>7ace44190729927e5cb0dd5d363fcae966fe13f7</td><td>nudedetection</td><td>Nude Detection</td><td><a href="papers/7ace44190729927e5cb0dd5d363fcae966fe13f7.html">A bag-of-features approach based on Hue-SIFT descriptor for nude detection</a></td><td><a href="http://ieeexplore.ieee.org/document/7077625/">[pdf]</a></td><td>2009 17th European Signal Processing Conference</td><td></td><td></td><td></td><td></td><td>18%</td><td>51</td><td>9</td><td>42</td><td>1</td><td>18</td><td>0</td></tr><tr><td>0d3bb75852098b25d90f31d2f48fd0cb4944702b</td><td>face_scrub</td><td>FaceScrub</td><td><a href="papers/0d3bb75852098b25d90f31d2f48fd0cb4944702b.html">A data-driven approach to cleaning large face datasets</a></td><td><a href="https://doi.org/10.1109/ICIP.2014.7025068">[pdf]</a></td><td>2014 IEEE International Conference on Image Processing (ICIP)</td><td>edu</td><td>University of Illinois, Urbana-Champaign</td><td>40.11116745</td><td>-88.22587665</td><td>46%</td><td>123</td><td>56</td><td>67</td><td>6</td><td>95</td><td>21</td></tr><tr><td>b91f54e1581fbbf60392364323d00a0cd43e493c</td><td>bp4d_spontanous</td><td>BP4D-Spontanous</td><td><a href="papers/b91f54e1581fbbf60392364323d00a0cd43e493c.html">A high-resolution spontaneous 3D dynamic facial expression database</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6553788', 'linkType': 'ieee'}">[pdf]</a></td><td>2013 10th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)</td><td>edu</td><td>SUNY Binghamton</td><td>42.08779975</td><td>-75.97066066</td><td>36%</td><td>151</td><td>54</td><td>97</td><td>7</td><td>85</td><td>26</td></tr><tr><td>1ed1a49534ad8dd00f81939449f6389cfbc25321</td><td>bjut_3d</td><td>BJUT-3D</td><td><a href="papers/1ed1a49534ad8dd00f81939449f6389cfbc25321.html">A novel face recognition method based on 3D face model</a></td><td><a href="https://doi.org/10.1109/ROBIO.2007.4522202">[pdf]</a></td><td>2007 IEEE International Conference on Robotics and Biomimetics (ROBIO)</td><td></td><td></td><td></td><td></td><td>0%</td><td>2</td><td>0</td><td>2</td><td>0</td><td>1</td><td>1</td></tr><tr><td>57fe081950f21ca03b5b375ae3e84b399c015861</td><td>cvc_01_barcelona</td><td>CVC-01</td><td><a href="papers/57fe081950f21ca03b5b375ae3e84b399c015861.html">Adaptive Image Sampling and Windows Classification for On–board Pedestrian Detection</a></td><td><a href="http://pdfs.semanticscholar.org/57fe/081950f21ca03b5b375ae3e84b399c015861.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>9%</td><td>44</td><td>4</td><td>40</td><td>1</td><td>21</td><td>0</td></tr><tr><td>47aeb3b82f54b5ae8142b4bdda7b614433e69b9a</td><td>am_fed</td><td>AM-FED</td><td><a href="papers/47aeb3b82f54b5ae8142b4bdda7b614433e69b9a.html">Affectiva-MIT Facial Expression Dataset (AM-FED): Naturalistic and Spontaneous Facial Expressions Collected "In-the-Wild"</a></td><td><a href="http://pdfs.semanticscholar.org/5d06/437656dd94616d7d87260d5eb77513ded30f.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>36%</td><td>73</td><td>26</td><td>47</td><td>6</td><td>39</td><td>16</td></tr><tr><td>1be498d4bbc30c3bfd0029114c784bc2114d67c0</td><td>adience</td><td>Adience</td><td><a href="papers/1be498d4bbc30c3bfd0029114c784bc2114d67c0.html">Age and Gender Estimation of Unfiltered Faces</a></td><td><a href="http://www.openu.ac.il/home/hassner/Adience/EidingerEnbarHassner_tifs.pdf">[pdf]</a></td><td>IEEE Transactions on Information Forensics and Security</td><td></td><td></td><td></td><td></td><td>43%</td><td>168</td><td>72</td><td>96</td><td>7</td><td>89</td><td>53</td></tr><tr><td>6dcf418c778f528b5792104760f1fbfe90c6dd6a</td><td>agedb</td><td>AgeDB</td><td><a href="papers/6dcf418c778f528b5792104760f1fbfe90c6dd6a.html">AgeDB: The First Manually Collected, In-the-Wild Age Database</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014984', 'linkType': 'ieee'}">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td></td><td></td><td></td><td></td><td>82%</td><td>11</td><td>9</td><td>2</td><td>0</td><td>10</td><td>0</td></tr><tr><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td><td>aflw</td><td>AFLW</td><td><a href="papers/a74251efa970b92925b89eeef50a5e37d9281ad0.html">Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</a></td><td><a href="http://lrs.icg.tugraz.at/pubs/koestinger_befit_11.pdf">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td>53%</td><td>292</td><td>155</td><td>137</td><td>38</td><td>207</td><td>59</td></tr><tr><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td><td>imm_face</td><td>IMM Face Dataset</td><td><a href="papers/a74251efa970b92925b89eeef50a5e37d9281ad0.html">Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</a></td><td><a href="http://lrs.icg.tugraz.at/pubs/koestinger_befit_11.pdf">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td>53%</td><td>292</td><td>155</td><td>137</td><td>38</td><td>207</td><td>59</td></tr><tr><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td><td>muct</td><td>MUCT</td><td><a href="papers/a74251efa970b92925b89eeef50a5e37d9281ad0.html">Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</a></td><td><a href="http://lrs.icg.tugraz.at/pubs/koestinger_befit_11.pdf">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td>53%</td><td>292</td><td>155</td><td>137</td><td>38</td><td>207</td><td>59</td></tr><tr><td>2ce2560cf59db59ce313bbeb004e8ce55c5ce928</td><td>texas_3dfrd</td><td>Texas 3DFRD</td><td><a href="papers/2ce2560cf59db59ce313bbeb004e8ce55c5ce928.html">Anthropometric 3D Face Recognition</a></td><td><a href="http://live.ece.utexas.edu/publications/2010/sg_ijcv_june10.pdf">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td>22%</td><td>88</td><td>19</td><td>43</td><td>4</td><td>42</td><td>2</td></tr><tr><td>2ce2560cf59db59ce313bbeb004e8ce55c5ce928</td><td>texas_3dfrd</td><td>Texas 3DFRD</td><td><a href="papers/2ce2560cf59db59ce313bbeb004e8ce55c5ce928.html">Anthropometric 3D Face Recognition</a></td><td><a href="http://live.ece.utexas.edu/publications/2010/sg_ijcv_june10.pdf">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td>22%</td><td>88</td><td>19</td><td>43</td><td>4</td><td>42</td><td>2</td></tr><tr><td>c6526dd3060d63a6c90e8b7ff340383c4e0e0dd8</td><td>face_research_lab</td><td>Face Research Lab London</td><td><a href="papers/c6526dd3060d63a6c90e8b7ff340383c4e0e0dd8.html">Anxiety promotes memory for mood-congruent faces but does not alter loss aversion.</a></td><td><a href="http://pdfs.semanticscholar.org/c652/6dd3060d63a6c90e8b7ff340383c4e0e0dd8.pdf">[pdf]</a></td><td>Scientific reports</td><td>edu</td><td>University College London</td><td>51.52316070</td><td>-0.12820370</td><td>25%</td><td>4</td><td>1</td><td>3</td><td>0</td><td>2</td><td>2</td></tr><tr><td>0df0d1adea39a5bef318b74faa37de7f3e00b452</td><td>mpii_gaze</td><td>MPIIGaze</td><td><a href="papers/0df0d1adea39a5bef318b74faa37de7f3e00b452.html">Appearance-based gaze estimation in the wild</a></td><td><a href="https://scalable.mpi-inf.mpg.de/files/2015/09/zhang_CVPR15.pdf">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>Max Planck Institute for Informatics</td><td>49.25795660</td><td>7.04577417</td><td>38%</td><td>138</td><td>52</td><td>86</td><td>3</td><td>94</td><td>7</td></tr><tr><td>5801690199c1917fa58c35c3dead177c0b8f9f2d</td><td>camel</td><td>CAMEL</td><td><a href="papers/5801690199c1917fa58c35c3dead177c0b8f9f2d.html">Application of Object Based Classification and High Resolution Satellite Imagery for Savanna Ecosystem Analysis</a></td><td><a href="http://pdfs.semanticscholar.org/5801/690199c1917fa58c35c3dead177c0b8f9f2d.pdf">[pdf]</a></td><td>Remote Sensing</td><td></td><td></td><td></td><td></td><td>37%</td><td>19</td><td>7</td><td>12</td><td>1</td><td>16</td><td>0</td></tr><tr><td>759a3b3821d9f0e08e0b0a62c8b693230afc3f8d</td><td>pubfig</td><td>PubFig</td><td><a href="papers/759a3b3821d9f0e08e0b0a62c8b693230afc3f8d.html">Attribute and simile classifiers for face verification</a></td><td><a href="http://homes.cs.washington.edu/~neeraj/projects/faceverification/base/papers/nk_iccv2009_attrs.pdf">[pdf]</a></td><td>2009 IEEE 12th International Conference on Computer Vision</td><td>edu</td><td>Columbia University</td><td>40.84198360</td><td>-73.94368971</td><td>51%</td><td>894</td><td>454</td><td>440</td><td>55</td><td>587</td><td>222</td></tr><tr><td>faf40ce28857aedf183e193486f5b4b0a8c478a2</td><td>iit_dehli_ear</td><td>IIT Dehli Ear</td><td><a href="papers/faf40ce28857aedf183e193486f5b4b0a8c478a2.html">Automated Human Identification Using Ear Imaging</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/faf4/0ce28857aedf183e193486f5b4b0a8c478a2.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>13%</td><td>70</td><td>9</td><td>61</td><td>6</td><td>28</td><td>1</td></tr><tr><td>2160788824c4c29ffe213b2cbeb3f52972d73f37</td><td>3d_rma</td><td>3D-RMA</td><td><a href="papers/2160788824c4c29ffe213b2cbeb3f52972d73f37.html">Automatic 3D face authentication</a></td><td><a href="http://pdfs.semanticscholar.org/2160/788824c4c29ffe213b2cbeb3f52972d73f37.pdf">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td>25%</td><td>95</td><td>24</td><td>71</td><td>8</td><td>60</td><td>2</td></tr><tr><td>213a579af9e4f57f071b884aa872651372b661fd</td><td>bbc_pose</td><td>BBC Pose</td><td><a href="papers/213a579af9e4f57f071b884aa872651372b661fd.html">Automatic and Efficient Human Pose Estimation for Sign Language Videos</a></td><td><a href="https://doi.org/10.1007/s11263-013-0672-6">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td>60%</td><td>25</td><td>15</td><td>10</td><td>1</td><td>18</td><td>1</td></tr><tr><td>fcc6fe6007c322641796cb8792718641856a22a7</td><td>miw</td><td>MIW</td><td><a href="papers/fcc6fe6007c322641796cb8792718641856a22a7.html">Automatic facial makeup detection with application in face recognition</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6612994', 'linkType': 'ieee'}">[pdf]</a></td><td>2013 International Conference on Biometrics (ICB)</td><td>edu</td><td>West Virginia University</td><td>39.65404635</td><td>-79.96475355</td><td>65%</td><td>46</td><td>30</td><td>16</td><td>1</td><td>18</td><td>21</td></tr><tr><td>fcc6fe6007c322641796cb8792718641856a22a7</td><td>youtube_makeup</td><td>YMU</td><td><a href="papers/fcc6fe6007c322641796cb8792718641856a22a7.html">Automatic facial makeup detection with application in face recognition</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6612994', 'linkType': 'ieee'}">[pdf]</a></td><td>2013 International Conference on Biometrics (ICB)</td><td>edu</td><td>West Virginia University</td><td>39.65404635</td><td>-79.96475355</td><td>65%</td><td>46</td><td>30</td><td>16</td><td>1</td><td>18</td><td>21</td></tr><tr><td>fcc6fe6007c322641796cb8792718641856a22a7</td><td>youtube_makeup</td><td>YMU</td><td><a href="papers/fcc6fe6007c322641796cb8792718641856a22a7.html">Automatic facial makeup detection with application in face recognition</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6612994', 'linkType': 'ieee'}">[pdf]</a></td><td>2013 International Conference on Biometrics (ICB)</td><td>edu</td><td>West Virginia University</td><td>39.65404635</td><td>-79.96475355</td><td>65%</td><td>46</td><td>30</td><td>16</td><td>1</td><td>18</td><td>21</td></tr><tr><td>0a85bdff552615643dd74646ac881862a7c7072d</td><td>pipa</td><td>PIPA</td><td><a href="papers/0a85bdff552615643dd74646ac881862a7c7072d.html">Beyond frontal faces: Improving Person Recognition using multiple cues</a></td><td><a href="https://doi.org/10.1109/CVPR.2015.7299113">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>60%</td><td>50</td><td>30</td><td>19</td><td>2</td><td>40</td><td>4</td></tr><tr><td>2acf7e58f0a526b957be2099c10aab693f795973</td><td>bosphorus</td><td>The Bosphorus</td><td><a href="papers/2acf7e58f0a526b957be2099c10aab693f795973.html">Bosphorus Database for 3D Face Analysis</a></td><td><a href="http://pdfs.semanticscholar.org/4254/fbba3846008f50671edc9cf70b99d7304543.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>18%</td><td>328</td><td>58</td><td>270</td><td>18</td><td>143</td><td>37</td></tr><tr><td>214c966d1f9c2a4b66f4535d9a0d4078e63a5867</td><td>brainwash</td><td>Brainwash</td><td><a href="papers/214c966d1f9c2a4b66f4535d9a0d4078e63a5867.html">Brainwash: A Data System for Feature Engineering</a></td><td><a href="http://pdfs.semanticscholar.org/ae44/8015b2ff2bd3b8a5c9a3266f954f5af9ffa9.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>57</td><td>34</td><td>23</td><td>2</td><td>50</td><td>0</td></tr><tr><td>37d6f0eb074d207b53885bd2eb78ccc8a04be597</td><td>vmu</td><td>VMU</td><td><a href="papers/37d6f0eb074d207b53885bd2eb78ccc8a04be597.html">Can facial cosmetics affect the matching accuracy of face recognition systems?</a></td><td><a href="http://www.cse.msu.edu/~climer/DantchevaChenRossFaceCosmetics_BTAS2012.pdf">[pdf]</a></td><td>2012 IEEE Fifth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td></td><td></td><td></td><td></td><td>49%</td><td>49</td><td>24</td><td>25</td><td>0</td><td>18</td><td>22</td></tr><tr><td>8d5998cd984e7cce307da7d46f155f9db99c6590</td><td>chalearn</td><td>ChaLearn</td><td><a href="papers/8d5998cd984e7cce307da7d46f155f9db99c6590.html">ChaLearn looking at people: A review of events and resources</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1701.02664.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2017 International Joint Conference on Neural Networks (IJCNN)</td><td></td><td></td><td></td><td></td><td>30%</td><td>10</td><td>3</td><td>7</td><td>1</td><td>6</td><td>0</td></tr><tr><td>2bf8541199728262f78d4dced6fb91479b39b738</td><td>clothing_co_parsing</td><td>CCP</td><td><a href="papers/2bf8541199728262f78d4dced6fb91479b39b738.html">Clothing Co-parsing by Joint Image Segmentation and Labeling</a></td><td><a href="https://arxiv.org/pdf/1502.00739v1.pdf">[pdf]</a></td><td>2014 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>47%</td><td>60</td><td>28</td><td>32</td><td>0</td><td>36</td><td>6</td></tr><tr><td>6dbe8e5121c534339d6e41f8683e85f87e6abf81</td><td>gallagher</td><td>Gallagher</td><td><a href="papers/6dbe8e5121c534339d6e41f8683e85f87e6abf81.html">Clothing Cosegmentation for Shopping Images With Cluttered Background</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7423747', 'linkType': 'ieee'}">[pdf]</a></td><td>IEEE Transactions on Multimedia</td><td></td><td></td><td></td><td></td><td>33%</td><td>6</td><td>2</td><td>4</td><td>0</td><td>3</td><td>2</td></tr><tr><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td><td>leeds_sports_pose</td><td>Leeds Sports Pose</td><td><a href="papers/4b1d23d17476fcf78f4cbadf69fb130b1aa627c0.html">Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</a></td><td><a href="http://pdfs.semanticscholar.org/4b1d/23d17476fcf78f4cbadf69fb130b1aa627c0.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>278</td><td>119</td><td>159</td><td>12</td><td>198</td><td>2</td></tr><tr><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td><td>stickmen_buffy</td><td>Buffy Stickmen</td><td><a href="papers/4b1d23d17476fcf78f4cbadf69fb130b1aa627c0.html">Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</a></td><td><a href="http://pdfs.semanticscholar.org/4b1d/23d17476fcf78f4cbadf69fb130b1aa627c0.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>278</td><td>119</td><td>159</td><td>12</td><td>198</td><td>2</td></tr><tr><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td><td>stickmen_buffy</td><td>Buffy Stickmen</td><td><a href="papers/4b1d23d17476fcf78f4cbadf69fb130b1aa627c0.html">Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</a></td><td><a href="http://pdfs.semanticscholar.org/4b1d/23d17476fcf78f4cbadf69fb130b1aa627c0.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>278</td><td>119</td><td>159</td><td>12</td><td>198</td><td>2</td></tr><tr><td>45c31cde87258414f33412b3b12fc5bec7cb3ba9</td><td>jaffe</td><td>JAFFE</td><td><a href="papers/45c31cde87258414f33412b3b12fc5bec7cb3ba9.html">Coding Facial Expressions with Gabor Wavelets</a></td><td><a href="http://pdfs.semanticscholar.org/45c3/1cde87258414f33412b3b12fc5bec7cb3ba9.pdf">[pdf]</a></td><td></td><td>edu</td><td>Kyushu University</td><td>33.59914655</td><td>130.22359848</td><td>36%</td><td>848</td><td>308</td><td>540</td><td>56</td><td>413</td><td>255</td></tr><tr><td>b1f4423c227fa37b9680787be38857069247a307</td><td>afew_va</td><td>AFEW-VA</td><td><a href="papers/b1f4423c227fa37b9680787be38857069247a307.html">Collecting Large, Richly Annotated Facial-Expression Databases from Movies</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6200254', 'linkType': 'ieee'}">[pdf]</a></td><td>IEEE MultiMedia</td><td>edu</td><td>Australian National University</td><td>-35.27769990</td><td>149.11852700</td><td>33%</td><td>175</td><td>58</td><td>66</td><td>7</td><td>54</td><td>49</td></tr><tr><td>b1f4423c227fa37b9680787be38857069247a307</td><td>afew_va</td><td>AFEW-VA</td><td><a href="papers/b1f4423c227fa37b9680787be38857069247a307.html">Collecting Large, Richly Annotated Facial-Expression Databases from Movies</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6200254', 'linkType': 'ieee'}">[pdf]</a></td><td>IEEE MultiMedia</td><td>edu</td><td>Australian National University</td><td>-35.27769990</td><td>149.11852700</td><td>33%</td><td>175</td><td>58</td><td>66</td><td>7</td><td>54</td><td>49</td></tr><tr><td>7f4040b482d16354d5938c1d1b926b544652bf5b</td><td>nova_emotions</td><td>Novaemötions Dataset</td><td><a href="papers/7f4040b482d16354d5938c1d1b926b544652bf5b.html">Competitive affective gaming: winning with a smile</a></td><td><a href="http://doi.acm.org/10.1145/2502081.2502115">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>8</td><td>0</td><td>3</td><td>0</td><td>2</td><td>0</td></tr><tr><td>7f4040b482d16354d5938c1d1b926b544652bf5b</td><td>nova_emotions</td><td>Novaemötions Dataset</td><td><a href="papers/7f4040b482d16354d5938c1d1b926b544652bf5b.html">Competitive affective gaming: winning with a smile</a></td><td><a href="http://doi.acm.org/10.1145/2502081.2502115">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>8</td><td>0</td><td>3</td><td>0</td><td>2</td><td>0</td></tr><tr><td>079a0a3bf5200994e1f972b1b9197bf2f90e87d4</td><td>mit_cbcl</td><td>MIT CBCL</td><td><a href="papers/079a0a3bf5200994e1f972b1b9197bf2f90e87d4.html">Component-Based Face Recognition with 3D Morphable Models</a></td><td><a href="http://www.bheisele.com/avbpa2003.pdf">[pdf]</a></td><td>2004 Conference on Computer Vision and Pattern Recognition Workshop</td><td></td><td></td><td></td><td></td><td>0%</td><td>12</td><td>0</td><td>12</td><td>0</td><td>8</td><td>0</td></tr><tr><td>23fc83c8cfff14a16df7ca497661264fc54ed746</td><td>cohn_kanade</td><td>CK</td><td><a href="papers/23fc83c8cfff14a16df7ca497661264fc54ed746.html">Comprehensive Database for Facial Expression Analysis</a></td><td><a href="http://pdfs.semanticscholar.org/23fc/83c8cfff14a16df7ca497661264fc54ed746.pdf">[pdf]</a></td><td></td><td>edu</td><td>Carnegie Mellon University</td><td>37.41021930</td><td>-122.05965487</td><td>38%</td><td>999</td><td>380</td><td>619</td><td>75</td><td>555</td><td>252</td></tr><tr><td>09d78009687bec46e70efcf39d4612822e61cb8c</td><td>raid</td><td>RAiD</td><td><a href="papers/09d78009687bec46e70efcf39d4612822e61cb8c.html">Consistent Re-identification in a Camera Network</a></td><td><a href="http://pdfs.semanticscholar.org/c27f/099e6e7e3f7f9979cbe9e0a5175fc5848ea0.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>27%</td><td>45</td><td>12</td><td>33</td><td>7</td><td>34</td><td>1</td></tr><tr><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td><td>casablanca</td><td>Casablanca</td><td><a href="papers/0ceda9dae8b9f322df65ca2ef02caca9758aec6f.html">Context-Aware CNNs for Person Head Detection</a></td><td><a href="http://arxiv.org/pdf/1511.07917v1.pdf">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td>33%</td><td>27</td><td>9</td><td>18</td><td>1</td><td>22</td><td>0</td></tr><tr><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td><td>hollywood_headset</td><td>HollywoodHeads</td><td><a href="papers/0ceda9dae8b9f322df65ca2ef02caca9758aec6f.html">Context-Aware CNNs for Person Head Detection</a></td><td><a href="http://arxiv.org/pdf/1511.07917v1.pdf">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td>33%</td><td>27</td><td>9</td><td>18</td><td>1</td><td>22</td><td>0</td></tr><tr><td>8355d095d3534ef511a9af68a3b2893339e3f96b</td><td>imdb_wiki</td><td>IMDB</td><td><a href="papers/8355d095d3534ef511a9af68a3b2893339e3f96b.html">DEX: Deep EXpectation of Apparent Age from a Single Image</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406390', 'linkType': 'ieee'}">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision Workshop (ICCVW)</td><td></td><td></td><td></td><td></td><td>44%</td><td>102</td><td>45</td><td>54</td><td>6</td><td>62</td><td>28</td></tr><tr><td>8355d095d3534ef511a9af68a3b2893339e3f96b</td><td>imdb_wiki</td><td>IMDB</td><td><a href="papers/8355d095d3534ef511a9af68a3b2893339e3f96b.html">DEX: Deep EXpectation of Apparent Age from a Single Image</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406390', 'linkType': 'ieee'}">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision Workshop (ICCVW)</td><td></td><td></td><td></td><td></td><td>44%</td><td>102</td><td>45</td><td>54</td><td>6</td><td>62</td><td>28</td></tr><tr><td>162ea969d1929ed180cc6de9f0bf116993ff6e06</td><td>vgg_faces</td><td>VGG Face</td><td><a href="papers/162ea969d1929ed180cc6de9f0bf116993ff6e06.html">Deep Face Recognition</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/f372/ab9b3270d4e4f6a0258c83c2736c3a5c0454.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>39%</td><td>999</td><td>393</td><td>606</td><td>71</td><td>621</td><td>156</td></tr><tr><td>6424b69f3ff4d35249c0bb7ef912fbc2c86f4ff4</td><td>celeba</td><td>CelebA</td><td><a href="papers/6424b69f3ff4d35249c0bb7ef912fbc2c86f4ff4.html">Deep Learning Face Attributes in the Wild</a></td><td><a href="http://arxiv.org/pdf/1411.7766v2.pdf">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>42%</td><td>808</td><td>340</td><td>468</td><td>69</td><td>666</td><td>50</td></tr><tr><td>6bd36e9fd0ef20a3074e1430a6cc601e6d407fc3</td><td>cuhk03</td><td>CUHK03</td><td><a href="papers/6bd36e9fd0ef20a3074e1430a6cc601e6d407fc3.html">DeepReID: Deep Filter Pairing Neural Network for Person Re-identification</a></td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Li_DeepReID_Deep_Filter_2014_CVPR_paper.pdf">[pdf]</a></td><td>2014 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>35%</td><td>512</td><td>180</td><td>332</td><td>29</td><td>323</td><td>4</td></tr><tr><td>13f06b08f371ba8b5d31c3e288b4deb61335b462</td><td>eth_andreas_ess</td><td>ETHZ Pedestrian</td><td><a href="papers/13f06b08f371ba8b5d31c3e288b4deb61335b462.html">Depth and Appearance for Mobile Scene Analysis</a></td><td><a href="http://www.mmp.rwth-aachen.de/publications/pdf/ess-depthandappearance-iccv07.pdf/at_download/file">[pdf]</a></td><td>2007 IEEE 11th International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td>25%</td><td>319</td><td>79</td><td>240</td><td>27</td><td>192</td><td>0</td></tr><tr><td>2e384f057211426ac5922f1b33d2aa8df5d51f57</td><td>a_pascal_yahoo</td><td>aPascal</td><td><a href="papers/2e384f057211426ac5922f1b33d2aa8df5d51f57.html">Describing objects by their attributes</a></td><td><a href="http://www-2.cs.cmu.edu/~dhoiem/publications/cvpr2009_attributes.pdf">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>39%</td><td>999</td><td>393</td><td>606</td><td>71</td><td>727</td><td>73</td></tr><tr><td>7808937b46acad36e43c30ae4e9f3fd57462853d</td><td>berkeley_pose</td><td>BPAD</td><td><a href="papers/7808937b46acad36e43c30ae4e9f3fd57462853d.html">Describing people: A poselet-based approach to attribute classification</a></td><td><a href="http://ttic.uchicago.edu/~smaji/papers/attributes-iccv11.pdf">[pdf]</a></td><td>2011 International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td>43%</td><td>221</td><td>96</td><td>125</td><td>14</td><td>160</td><td>23</td></tr><tr><td>56ae6d94fc6097ec4ca861f0daa87941d1c10b70</td><td>cmdp</td><td>CMDP</td><td><a href="papers/56ae6d94fc6097ec4ca861f0daa87941d1c10b70.html">Distance Estimation of an Unknown Person from a Portrait</a></td><td><a href="http://pdfs.semanticscholar.org/56ae/6d94fc6097ec4ca861f0daa87941d1c10b70.pdf">[pdf]</a></td><td></td><td>edu</td><td>California Institute of Technology</td><td>34.13710185</td><td>-118.12527487</td><td>22%</td><td>9</td><td>2</td><td>7</td><td>0</td><td>6</td><td>1</td></tr><tr><td>84fe5b4ac805af63206012d29523a1e033bc827e</td><td>awe_ears</td><td>AWE Ears</td><td><a href="papers/84fe5b4ac805af63206012d29523a1e033bc827e.html">Ear recognition: More than a survey</a></td><td><a href="http://pdfs.semanticscholar.org/84fe/5b4ac805af63206012d29523a1e033bc827e.pdf">[pdf]</a></td><td>Neurocomputing</td><td></td><td></td><td></td><td></td><td>29%</td><td>24</td><td>7</td><td>17</td><td>0</td><td>11</td><td>0</td></tr><tr><td>133f01aec1534604d184d56de866a4bd531dac87</td><td>lfw_a</td><td>LFW-a</td><td><a href="papers/133f01aec1534604d184d56de866a4bd531dac87.html">Effective Unconstrained Face Recognition by Combining Multiple Descriptors and Learned Background Statistics</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/TPAMI.2010.230">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>42%</td><td>177</td><td>75</td><td>102</td><td>15</td><td>102</td><td>54</td></tr><tr><td>447d8893a4bdc29fa1214e53499ffe67b28a6db5</td><td>umd_faces</td><td>UMD</td><td><a href="papers/447d8893a4bdc29fa1214e53499ffe67b28a6db5.html">Electronic Transport in Quantum Confined Systems</a></td><td><a href="http://pdfs.semanticscholar.org/447d/8893a4bdc29fa1214e53499ffe67b28a6db5.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>1</td><td>0</td><td>1</td><td>0</td><td>1</td><td>0</td></tr><tr><td>447d8893a4bdc29fa1214e53499ffe67b28a6db5</td><td>umd_faces</td><td>UMD</td><td><a href="papers/447d8893a4bdc29fa1214e53499ffe67b28a6db5.html">Electronic Transport in Quantum Confined Systems</a></td><td><a href="http://pdfs.semanticscholar.org/447d/8893a4bdc29fa1214e53499ffe67b28a6db5.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>1</td><td>0</td><td>1</td><td>0</td><td>1</td><td>0</td></tr><tr><td>c900e0ad4c95948baaf0acd8449fde26f9b4952a</td><td>emotio_net</td><td>EmotioNet Database</td><td><a href="papers/c900e0ad4c95948baaf0acd8449fde26f9b4952a.html">EmotioNet: An Accurate, Real-Time Algorithm for the Automatic Annotation of a Million Facial Expressions in the Wild</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7780969', 'linkType': 'ieee'}">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>44%</td><td>72</td><td>32</td><td>40</td><td>7</td><td>54</td><td>8</td></tr><tr><td>2161f6b7ee3c0acc81603b01dc0df689683577b9</td><td>large_scale_person_search</td><td>Large Scale Person Search</td><td><a href="papers/2161f6b7ee3c0acc81603b01dc0df689683577b9.html">End-to-End Deep Learning for Person Search</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/2161/f6b7ee3c0acc81603b01dc0df689683577b9.pdf', 'linkType': 's2'}">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>34%</td><td>41</td><td>14</td><td>27</td><td>2</td><td>27</td><td>0</td></tr><tr><td>6273b3491e94ea4dd1ce42b791d77bdc96ee73a8</td><td>viper</td><td>VIPeR</td><td><a href="papers/6273b3491e94ea4dd1ce42b791d77bdc96ee73a8.html">Evaluating Appearance Models for Recognition, Reacquisition, and Tracking</a></td><td><a href="http://pdfs.semanticscholar.org/6273/b3491e94ea4dd1ce42b791d77bdc96ee73a8.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>27%</td><td>584</td><td>159</td><td>425</td><td>38</td><td>336</td><td>9</td></tr><tr><td>9e5378e7b336c89735d3bb15cf67eff96f86d39a</td><td>precarious</td><td>Precarious</td><td><a href="papers/9e5378e7b336c89735d3bb15cf67eff96f86d39a.html">Expecting the Unexpected: Training Detectors for Unusual Pedestrians with Adversarial Imposters</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1703.06283.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>8%</td><td>12</td><td>1</td><td>11</td><td>1</td><td>10</td><td>0</td></tr><tr><td>35b0331dfcd2897abd5749b49ff5e2b8ba0f7a62</td><td>coco_qa</td><td>COCO QA</td><td><a href="papers/35b0331dfcd2897abd5749b49ff5e2b8ba0f7a62.html">Exploring Models and Data for Image Question Answering</a></td><td><a href="http://pdfs.semanticscholar.org/aa79/9c29c0d44ece1864467af520fe70540c069b.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>191</td><td>83</td><td>108</td><td>12</td><td>163</td><td>1</td></tr><tr><td>42505464808dfb446f521fc6ff2cfeffd4d68ff1</td><td>gavab_db</td><td>Gavab</td><td><a href="papers/42505464808dfb446f521fc6ff2cfeffd4d68ff1.html">Expression invariant 3D face recognition with a Morphable Model</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4813376', 'linkType': 'ieee'}">[pdf]</a></td><td>2008 8th IEEE International Conference on Automatic Face & Gesture Recognition</td><td></td><td></td><td></td><td></td><td>29%</td><td>94</td><td>27</td><td>67</td><td>10</td><td>57</td><td>5</td></tr><tr><td>a5acda0e8c0937bfed013e6382da127103e41395</td><td>disfa</td><td>DISFA</td><td><a href="papers/a5acda0e8c0937bfed013e6382da127103e41395.html">Extended DISFA Dataset: Investigating Posed and Spontaneous Facial Expressions</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7789672', 'linkType': 'ieee'}">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td></td><td></td><td></td><td></td><td>38%</td><td>8</td><td>3</td><td>5</td><td>1</td><td>5</td><td>0</td></tr><tr><td>75da1df4ed319926c544eefe17ec8d720feef8c0</td><td>fddb</td><td>FDDB</td><td><a href="papers/75da1df4ed319926c544eefe17ec8d720feef8c0.html">FDDB: A Benchmark for Face Detection in Unconstrained Settings</a></td><td><a href="http://pdfs.semanticscholar.org/75da/1df4ed319926c544eefe17ec8d720feef8c0.pdf">[pdf]</a></td><td></td><td>edu</td><td>University of Massachusetts</td><td>42.38897850</td><td>-72.52869870</td><td>0%</td><td>1</td><td>0</td><td>1</td><td>0</td><td>0</td><td>0</td></tr><tr><td>3607afdb204de9a5a9300ae98aa4635d9effcda2</td><td>sheffield</td><td>Sheffield Face</td><td><a href="papers/3607afdb204de9a5a9300ae98aa4635d9effcda2.html">Face Description with Local Binary Patterns: Application to Face Recognition</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/TPAMI.2006.244">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>24%</td><td>999</td><td>238</td><td>761</td><td>65</td><td>483</td><td>87</td></tr><tr><td>28312c3a47c1be3a67365700744d3d6665b86f22</td><td>hrt_transgender</td><td>HRT Transgender</td><td><a href="papers/28312c3a47c1be3a67365700744d3d6665b86f22.html">Face Recognition: A Literature Survey1</a></td><td><a href="http://pdfs.semanticscholar.org/2831/2c3a47c1be3a67365700744d3d6665b86f22.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>11%</td><td>999</td><td>105</td><td>265</td><td>32</td><td>217</td><td>39</td></tr><tr><td>28312c3a47c1be3a67365700744d3d6665b86f22</td><td>hrt_transgender</td><td>HRT Transgender</td><td><a href="papers/28312c3a47c1be3a67365700744d3d6665b86f22.html">Face Recognition: A Literature Survey1</a></td><td><a href="http://pdfs.semanticscholar.org/2831/2c3a47c1be3a67365700744d3d6665b86f22.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>11%</td><td>999</td><td>105</td><td>265</td><td>32</td><td>217</td><td>39</td></tr><tr><td>28312c3a47c1be3a67365700744d3d6665b86f22</td><td>hrt_transgender</td><td>HRT Transgender</td><td><a href="papers/28312c3a47c1be3a67365700744d3d6665b86f22.html">Face Recognition: A Literature Survey1</a></td><td><a href="http://pdfs.semanticscholar.org/2831/2c3a47c1be3a67365700744d3d6665b86f22.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>11%</td><td>999</td><td>105</td><td>265</td><td>32</td><td>217</td><td>39</td></tr><tr><td>0e986f51fe45b00633de9fd0c94d082d2be51406</td><td>afw</td><td>AFW</td><td><a href="papers/0e986f51fe45b00633de9fd0c94d082d2be51406.html">Face detection, pose estimation, and landmark localization in the wild</a></td><td><a href="http://vision.ics.uci.edu/papers/ZhuR_CVPR_2012/ZhuR_CVPR_2012.pdf">[pdf]</a></td><td>2012 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>University of California, Irvine</td><td>33.64319010</td><td>-117.84016494</td><td>52%</td><td>999</td><td>521</td><td>478</td><td>59</td><td>607</td><td>273</td></tr><tr><td>560e0e58d0059259ddf86fcec1fa7975dee6a868</td><td>youtube_faces</td><td>YouTubeFaces</td><td><a href="papers/560e0e58d0059259ddf86fcec1fa7975dee6a868.html">Face recognition in unconstrained videos with matched background similarity</a></td><td><a href="http://www.cs.tau.ac.il/~wolf/papers/lvfw.pdf">[pdf]</a></td><td>CVPR 2011</td><td>edu</td><td>Open University of Israel</td><td>32.77824165</td><td>34.99565673</td><td>50%</td><td>485</td><td>244</td><td>240</td><td>32</td><td>290</td><td>140</td></tr><tr><td>670637d0303a863c1548d5b19f705860a23e285c</td><td>face_tracer</td><td>FaceTracer</td><td><a href="papers/670637d0303a863c1548d5b19f705860a23e285c.html">Face swapping: automatically replacing faces in photographs</a></td><td><a href="https://classes.cs.uoregon.edu/16F/cis607photo/faces.pdf">[pdf]</a></td><td>ACM Trans. Graph.</td><td>edu</td><td>Columbia University</td><td>40.84198360</td><td>-73.94368971</td><td>100%</td><td>2</td><td>2</td><td>0</td><td>0</td><td>1</td><td>1</td></tr><tr><td>670637d0303a863c1548d5b19f705860a23e285c</td><td>face_tracer</td><td>FaceTracer</td><td><a href="papers/670637d0303a863c1548d5b19f705860a23e285c.html">Face swapping: automatically replacing faces in photographs</a></td><td><a href="https://classes.cs.uoregon.edu/16F/cis607photo/faces.pdf">[pdf]</a></td><td>ACM Trans. Graph.</td><td>edu</td><td>Columbia University</td><td>40.84198360</td><td>-73.94368971</td><td>100%</td><td>2</td><td>2</td><td>0</td><td>0</td><td>1</td><td>1</td></tr><tr><td>7ebb153704706e457ab57b432793d2b6e5d12592</td><td>vgg_celebs_in_places</td><td>CIP</td><td><a href="papers/7ebb153704706e457ab57b432793d2b6e5d12592.html">Faces in Places: compound query retrieval</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/7ebb/153704706e457ab57b432793d2b6e5d12592.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td>edu</td><td>University of Oxford</td><td>51.75345380</td><td>-1.25400997</td><td>80%</td><td>5</td><td>4</td><td>1</td><td>0</td><td>4</td><td>0</td></tr><tr><td>014b8df0180f33b9fea98f34ae611c6447d761d2</td><td>buhmap_db</td><td>BUHMAP-DB </td><td><a href="papers/014b8df0180f33b9fea98f34ae611c6447d761d2.html">Facial feature tracking and expression recognition for sign language</a></td><td><a href="http://www.cmpe.boun.edu.tr/pilab/pilabfiles/databases/buhmap/files/ari2008facialfeaturetracking.pdf">[pdf]</a></td><td>2008 23rd International Symposium on Computer and Information Sciences</td><td></td><td></td><td></td><td></td><td>16%</td><td>25</td><td>4</td><td>21</td><td>1</td><td>10</td><td>2</td></tr><tr><td>4fefd1bc8dc4e0ab37ee3324ddfa43ad9d6a04a7</td><td>deep_fashion</td><td>DeepFashion</td><td><a href="papers/4fefd1bc8dc4e0ab37ee3324ddfa43ad9d6a04a7.html">Fashion Landmark Detection in the Wild</a></td><td><a href="http://pdfs.semanticscholar.org/d8ca/e259c1c5bba0c096f480dc7322bbaebfac1a.pdf">[pdf]</a></td><td></td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>39%</td><td>23</td><td>9</td><td>8</td><td>0</td><td>15</td><td>0</td></tr><tr><td>4fefd1bc8dc4e0ab37ee3324ddfa43ad9d6a04a7</td><td>deep_fashion</td><td>DeepFashion</td><td><a href="papers/4fefd1bc8dc4e0ab37ee3324ddfa43ad9d6a04a7.html">Fashion Landmark Detection in the Wild</a></td><td><a href="http://pdfs.semanticscholar.org/d8ca/e259c1c5bba0c096f480dc7322bbaebfac1a.pdf">[pdf]</a></td><td></td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>39%</td><td>23</td><td>9</td><td>8</td><td>0</td><td>15</td><td>0</td></tr><tr><td>45e616093a92e5f1e61a7c6037d5f637aa8964af</td><td>malf</td><td>MALF</td><td><a href="papers/45e616093a92e5f1e61a7c6037d5f637aa8964af.html">Fine-grained evaluation on face detection in the wild</a></td><td><a href="http://www.cs.toronto.edu/~byang/papers/malf_fg15.pdf">[pdf]</a></td><td>2015 11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)</td><td>edu</td><td>Chinese Academy of Sciences</td><td>40.00447950</td><td>116.37023800</td><td>71%</td><td>17</td><td>12</td><td>5</td><td>0</td><td>13</td><td>4</td></tr><tr><td>1aad2da473888cb7ebc1bfaa15bfa0f1502ce005</td><td>jpl_pose</td><td>JPL-Interaction dataset</td><td><a href="papers/1aad2da473888cb7ebc1bfaa15bfa0f1502ce005.html">First-Person Activity Recognition: What Are They Doing to Me?</a></td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2013/papers/Ryoo_First-Person_Activity_Recognition_2013_CVPR_paper.pdf">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>51%</td><td>148</td><td>76</td><td>72</td><td>8</td><td>109</td><td>3</td></tr><tr><td>774cbb45968607a027ae4729077734db000a1ec5</td><td>urban_tribes</td><td>Urban Tribes</td><td><a href="papers/774cbb45968607a027ae4729077734db000a1ec5.html">From Bikers to Surfers: Visual Recognition of Urban Tribes</a></td><td><a href="http://pdfs.semanticscholar.org/774c/bb45968607a027ae4729077734db000a1ec5.pdf">[pdf]</a></td><td></td><td>edu</td><td>Columbia University</td><td>40.84198360</td><td>-73.94368971</td><td>47%</td><td>17</td><td>8</td><td>9</td><td>1</td><td>12</td><td>1</td></tr><tr><td>22f656d0f8426c84a33a267977f511f127bfd7f3</td><td>expw</td><td>ExpW</td><td><a href="papers/22f656d0f8426c84a33a267977f511f127bfd7f3.html">From Facial Expression Recognition to Interpersonal Relation Prediction</a></td><td><a href="http://arxiv.org/abs/1609.06426">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td>44%</td><td>9</td><td>4</td><td>5</td><td>0</td><td>5</td><td>1</td></tr><tr><td>22f656d0f8426c84a33a267977f511f127bfd7f3</td><td>expw</td><td>ExpW</td><td><a href="papers/22f656d0f8426c84a33a267977f511f127bfd7f3.html">From Facial Expression Recognition to Interpersonal Relation Prediction</a></td><td><a href="http://arxiv.org/abs/1609.06426">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td>44%</td><td>9</td><td>4</td><td>5</td><td>0</td><td>5</td><td>1</td></tr><tr><td>18c72175ddbb7d5956d180b65a96005c100f6014</td><td>yale_faces</td><td>YaleFaces</td><td><a href="papers/18c72175ddbb7d5956d180b65a96005c100f6014.html">From Few to Many: Illumination Cone Models for Face Recognition under Variable Lighting and Pose</a></td><td><a href="http://pdfs.semanticscholar.org/97bb/c2b439a79d4dc0dc7199d71ed96ad5e3fd0e.pdf">[pdf]</a></td><td>IEEE Trans. Pattern Anal. Mach. Intell.</td><td></td><td></td><td></td><td></td><td>42%</td><td>999</td><td>422</td><td>577</td><td>80</td><td>538</td><td>295</td></tr><tr><td>18c72175ddbb7d5956d180b65a96005c100f6014</td><td>yale_faces</td><td>YaleFaces</td><td><a href="papers/18c72175ddbb7d5956d180b65a96005c100f6014.html">From Few to Many: Illumination Cone Models for Face Recognition under Variable Lighting and Pose</a></td><td><a href="http://pdfs.semanticscholar.org/97bb/c2b439a79d4dc0dc7199d71ed96ad5e3fd0e.pdf">[pdf]</a></td><td>IEEE Trans. Pattern Anal. Mach. Intell.</td><td></td><td></td><td></td><td></td><td>42%</td><td>999</td><td>422</td><td>577</td><td>80</td><td>538</td><td>295</td></tr><tr><td>93884e46c49f7ae1c7c34046fbc28882f2bd6341</td><td>kdef</td><td>KDEF</td><td><a href="papers/93884e46c49f7ae1c7c34046fbc28882f2bd6341.html">Gaze fixation and the neural circuitry of face processing in autism</a></td><td><a href="{'url': 'http://doi.org/10.1038/nn1421', 'linkType': 'nature'}">[pdf]</a></td><td>Nature Neuroscience</td><td></td><td></td><td></td><td></td><td>31%</td><td>608</td><td>190</td><td>418</td><td>92</td><td>463</td><td>0</td></tr><tr><td>b6b1b0632eb9d4ab1427278f5e5c46f97753c73d</td><td>fei</td><td>FEI</td><td><a href="papers/b6b1b0632eb9d4ab1427278f5e5c46f97753c73d.html">Generalização cartográfica automatizada para um banco de dados cadastral</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/b6b1/b0632eb9d4ab1427278f5e5c46f97753c73d.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>17b46e2dad927836c689d6787ddb3387c6159ece</td><td>geofaces</td><td>GeoFaces</td><td><a href="papers/17b46e2dad927836c689d6787ddb3387c6159ece.html">GeoFaceExplorer: exploring the geo-dependence of facial attributes</a></td><td><a href="http://doi.acm.org/10.1145/2676440.2676443">[pdf]</a></td><td></td><td>edu</td><td>University of Kentucky</td><td>38.03337420</td><td>-84.50177580</td><td>50%</td><td>2</td><td>1</td><td>1</td><td>0</td><td>1</td><td>0</td></tr><tr><td>3cd40bfa1ff193a96bde0207e5140a399476466c</td><td>tvhi</td><td>TVHI</td><td><a href="papers/3cd40bfa1ff193a96bde0207e5140a399476466c.html">High Five: Recognising human interactions in TV shows</a></td><td><a href="http://pdfs.semanticscholar.org/3cd4/0bfa1ff193a96bde0207e5140a399476466c.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>34%</td><td>91</td><td>31</td><td>60</td><td>11</td><td>64</td><td>1</td></tr><tr><td>24830e3979d4ed01b9fd0feebf4a8fd22e0c35fd</td><td>hi4d_adsip</td><td>Hi4D-ADSIP</td><td><a href="papers/24830e3979d4ed01b9fd0feebf4a8fd22e0c35fd.html">High-resolution comprehensive 3-D dynamic database for facial articulation analysis</a></td><td><a href="http://www.researchgate.net/profile/Wei_Quan3/publication/221430048_High-resolution_comprehensive_3-D_dynamic_database_for_facial_articulation_analysis/links/0deec534309495805d000000.pdf">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td>0%</td><td>5</td><td>0</td><td>5</td><td>0</td><td>1</td><td>0</td></tr><tr><td>04c2cda00e5536f4b1508cbd80041e9552880e67</td><td>hipsterwars</td><td>Hipsterwars</td><td><a href="papers/04c2cda00e5536f4b1508cbd80041e9552880e67.html">Hipster Wars: Discovering Elements of Fashion Styles</a></td><td><a href="http://pdfs.semanticscholar.org/04c2/cda00e5536f4b1508cbd80041e9552880e67.pdf">[pdf]</a></td><td></td><td>edu</td><td>Tohoku University</td><td>38.25309450</td><td>140.87365930</td><td>53%</td><td>91</td><td>48</td><td>43</td><td>5</td><td>60</td><td>15</td></tr><tr><td>10d6b12fa07c7c8d6c8c3f42c7f1c061c131d4c5</td><td>inria_person</td><td>INRIA Pedestrian</td><td><a href="papers/10d6b12fa07c7c8d6c8c3f42c7f1c061c131d4c5.html">Histograms of oriented gradients for human detection</a></td><td><a href="http://nichol.as/papers/Dalai/Histograms%20of%20oriented%20gradients%20for%20human%20detection.pdf">[pdf]</a></td><td>2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)</td><td></td><td></td><td></td><td></td><td>22%</td><td>999</td><td>217</td><td>782</td><td>67</td><td>520</td><td>22</td></tr><tr><td>041d3eedf5e45ce5c5229f0181c5c576ed1fafd6</td><td>ucf_selfie</td><td>UCF Selfie</td><td><a href="papers/041d3eedf5e45ce5c5229f0181c5c576ed1fafd6.html">How to Take a Good Selfie?</a></td><td><a href="http://doi.acm.org/10.1145/2733373.2806365">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>22%</td><td>9</td><td>2</td><td>7</td><td>0</td><td>5</td><td>0</td></tr><tr><td>44d23df380af207f5ac5b41459c722c87283e1eb</td><td>wider_attribute</td><td>WIDER Attribute</td><td><a href="papers/44d23df380af207f5ac5b41459c722c87283e1eb.html">Human Attribute Recognition by Deep Hierarchical Contexts</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/8e28/07f2dd53b03a759e372e07f7191cae65c9fd.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>67%</td><td>18</td><td>12</td><td>6</td><td>0</td><td>16</td><td>0</td></tr><tr><td>44484d2866f222bbb9b6b0870890f9eea1ffb2d0</td><td>cuhk01</td><td>CUHK01</td><td><a href="papers/44484d2866f222bbb9b6b0870890f9eea1ffb2d0.html">Human Reidentification with Transferred Metric Learning</a></td><td><a href="http://pdfs.semanticscholar.org/4448/4d2866f222bbb9b6b0870890f9eea1ffb2d0.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>26%</td><td>258</td><td>67</td><td>191</td><td>12</td><td>141</td><td>1</td></tr><tr><td>57178b36c21fd7f4529ac6748614bb3374714e91</td><td>ijb_c</td><td>IJB-C</td><td><a href="papers/57178b36c21fd7f4529ac6748614bb3374714e91.html">IARPA Janus Benchmark - C: Face Dataset and Protocol</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411217', 'linkType': 'ieee'}">[pdf]</a></td><td>2018 International Conference on Biometrics (ICB)</td><td></td><td></td><td></td><td></td><td>33%</td><td>9</td><td>3</td><td>6</td><td>2</td><td>9</td><td>0</td></tr><tr><td>0cb2dd5f178e3a297a0c33068961018659d0f443</td><td>ijb_b</td><td>IJB-B</td><td><a href="papers/0cb2dd5f178e3a297a0c33068961018659d0f443.html">IARPA Janus Benchmark-B Face Dataset</a></td><td><a href="http://www.vislab.ucr.edu/Biometrics2017/program_slides/Noblis_CVPRW_IJBB.pdf">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td></td><td></td><td></td><td></td><td>24%</td><td>25</td><td>6</td><td>19</td><td>6</td><td>21</td><td>3</td></tr><tr><td>0297448f3ed948e136bb06ceff10eccb34e5bb77</td><td>ilids_mcts</td><td></td><td><a href="papers/0297448f3ed948e136bb06ceff10eccb34e5bb77.html">Imagery Library for Intelligent Detection Systems (i-LIDS); A Standard for Testing Video Based Detection Systems</a></td><td><span class="gray">[pdf]</a></td><td>Proceedings 40th Annual 2006 International Carnahan Conference on Security Technology</td><td></td><td></td><td></td><td></td><td>22%</td><td>32</td><td>7</td><td>25</td><td>2</td><td>17</td><td>0</td></tr><tr><td>55c40cbcf49a0225e72d911d762c27bb1c2d14aa</td><td>ifad</td><td>IFAD</td><td><a href="papers/55c40cbcf49a0225e72d911d762c27bb1c2d14aa.html">Indian Face Age Database : A Database for Face Recognition with Age Variation</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/55c4/0cbcf49a0225e72d911d762c27bb1c2d14aa.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>50%</td><td>2</td><td>1</td><td>1</td><td>0</td><td>2</td><td>0</td></tr><tr><td>ca3e88d87e1344d076c964ea89d91a75c417f5ee</td><td>imfdb</td><td>IMFDB</td><td><a href="papers/ca3e88d87e1344d076c964ea89d91a75c417f5ee.html">Indian Movie Face Database: A benchmark for face recognition under wide variations</a></td><td><span class="gray">[pdf]</a></td><td>2013 Fourth National Conference on Computer Vision, Pattern Recognition, Image Processing and Graphics (NCVPRIPG)</td><td></td><td></td><td></td><td></td><td>60%</td><td>15</td><td>9</td><td>6</td><td>0</td><td>10</td><td>4</td></tr><tr><td>95f12d27c3b4914e0668a268360948bce92f7db3</td><td>helen</td><td>Helen</td><td><a href="papers/95f12d27c3b4914e0668a268360948bce92f7db3.html">Interactive Facial Feature Localization</a></td><td><a href="http://pdfs.semanticscholar.org/95f1/2d27c3b4914e0668a268360948bce92f7db3.pdf">[pdf]</a></td><td></td><td>edu</td><td>University of Illinois, Urbana-Champaign</td><td>40.11116745</td><td>-88.22587665</td><td>52%</td><td>339</td><td>177</td><td>162</td><td>27</td><td>208</td><td>100</td></tr><tr><td>4d423acc78273b75134e2afd1777ba6d3a398973</td><td>cmu_pie</td><td>CMU PIE</td><td><a href="papers/4d423acc78273b75134e2afd1777ba6d3a398973.html">International Conference on Automatic Face and Gesture Recognition The CMU Pose , Illumination , and Expression ( PIE ) Database</a></td><td><a href="http://pdfs.semanticscholar.org/4d42/3acc78273b75134e2afd1777ba6d3a398973.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>44%</td><td>742</td><td>330</td><td>412</td><td>61</td><td>410</td><td>232</td></tr><tr><td>ad01687649d95cd5b56d7399a9603c4b8e2217d7</td><td>mrp_drone</td><td>MRP Drone</td><td><a href="papers/ad01687649d95cd5b56d7399a9603c4b8e2217d7.html">Investigating Open-World Person Re-identi cation Using a Drone</a></td><td><a href="http://pdfs.semanticscholar.org/ad01/687649d95cd5b56d7399a9603c4b8e2217d7.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>40%</td><td>5</td><td>2</td><td>3</td><td>0</td><td>3</td><td>0</td></tr><tr><td>066d71fcd997033dce4ca58df924397dfe0b5fd1</td><td>ifdb</td><td>IFDB</td><td><a href="papers/066d71fcd997033dce4ca58df924397dfe0b5fd1.html">Iranian Face Database and Evaluation with a New Detection Algorithm</a></td><td><a href="http://pdfs.semanticscholar.org/066d/71fcd997033dce4ca58df924397dfe0b5fd1.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>066d71fcd997033dce4ca58df924397dfe0b5fd1</td><td>ifdb</td><td>IFDB</td><td><a href="papers/066d71fcd997033dce4ca58df924397dfe0b5fd1.html">Iranian Face Database and Evaluation with a New Detection Algorithm</a></td><td><a href="http://pdfs.semanticscholar.org/066d/71fcd997033dce4ca58df924397dfe0b5fd1.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>0b440695c822a8e35184fb2f60dcdaa8a6de84ae</td><td>kinectface</td><td>KinectFaceDB</td><td><a href="papers/0b440695c822a8e35184fb2f60dcdaa8a6de84ae.html">KinectFaceDB: A Kinect Database for Face Recognition</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6866883', 'linkType': 'ieee'}">[pdf]</a></td><td>IEEE Transactions on Systems, Man, and Cybernetics: Systems</td><td></td><td></td><td></td><td></td><td>16%</td><td>75</td><td>12</td><td>63</td><td>6</td><td>25</td><td>8</td></tr><tr><td>2d3482dcff69c7417c7b933f22de606a0e8e42d4</td><td>lfw</td><td>LFW</td><td><a href="papers/2d3482dcff69c7417c7b933f22de606a0e8e42d4.html">Labeled Faces in the Wild : Updates and New Reporting Procedures</a></td><td><a href="http://pdfs.semanticscholar.org/2d34/82dcff69c7417c7b933f22de606a0e8e42d4.pdf">[pdf]</a></td><td></td><td>edu</td><td>University of Massachusetts</td><td>42.38897850</td><td>-72.52869870</td><td>41%</td><td>116</td><td>47</td><td>46</td><td>4</td><td>62</td><td>19</td></tr><tr><td>2d3482dcff69c7417c7b933f22de606a0e8e42d4</td><td>lfw</td><td>LFW</td><td><a href="papers/2d3482dcff69c7417c7b933f22de606a0e8e42d4.html">Labeled Faces in the Wild : Updates and New Reporting Procedures</a></td><td><a href="http://pdfs.semanticscholar.org/2d34/82dcff69c7417c7b933f22de606a0e8e42d4.pdf">[pdf]</a></td><td></td><td>edu</td><td>University of Massachusetts</td><td>42.38897850</td><td>-72.52869870</td><td>41%</td><td>116</td><td>47</td><td>46</td><td>4</td><td>62</td><td>19</td></tr><tr><td>2d3482dcff69c7417c7b933f22de606a0e8e42d4</td><td>lfw</td><td>LFW</td><td><a href="papers/2d3482dcff69c7417c7b933f22de606a0e8e42d4.html">Labeled Faces in the Wild : Updates and New Reporting Procedures</a></td><td><a href="http://pdfs.semanticscholar.org/2d34/82dcff69c7417c7b933f22de606a0e8e42d4.pdf">[pdf]</a></td><td></td><td>edu</td><td>University of Massachusetts</td><td>42.38897850</td><td>-72.52869870</td><td>41%</td><td>116</td><td>47</td><td>46</td><td>4</td><td>62</td><td>19</td></tr><tr><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td><td>3dddb_unconstrained</td><td>3D Dynamic</td><td><a href="papers/370b5757a5379b15e30d619e4d3fb9e8e13f3256.html">Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments</a></td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>999</td><td>472</td><td>526</td><td>71</td><td>619</td><td>260</td></tr><tr><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td><td>ar_facedb</td><td>AR Face</td><td><a href="papers/370b5757a5379b15e30d619e4d3fb9e8e13f3256.html">Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments</a></td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>999</td><td>472</td><td>526</td><td>71</td><td>619</td><td>260</td></tr><tr><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td><td>m2vtsdb_extended</td><td>xm2vtsdb</td><td><a href="papers/370b5757a5379b15e30d619e4d3fb9e8e13f3256.html">Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments</a></td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>999</td><td>472</td><td>526</td><td>71</td><td>619</td><td>260</td></tr><tr><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td><td>put_face</td><td>Put Face</td><td><a href="papers/370b5757a5379b15e30d619e4d3fb9e8e13f3256.html">Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments</a></td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>999</td><td>472</td><td>526</td><td>71</td><td>619</td><td>260</td></tr><tr><td>0d2dd4fc016cb6a517d8fb43a7cc3ff62964832e</td><td>lag</td><td>LAG</td><td><a href="papers/0d2dd4fc016cb6a517d8fb43a7cc3ff62964832e.html">Large Age-Gap face verification by feature injection in deep networks</a></td><td><a href="http://pdfs.semanticscholar.org/0d2d/d4fc016cb6a517d8fb43a7cc3ff62964832e.pdf">[pdf]</a></td><td>Pattern Recognition Letters</td><td></td><td></td><td></td><td></td><td>29%</td><td>7</td><td>2</td><td>5</td><td>0</td><td>3</td><td>2</td></tr><tr><td>f3b84a03985de3890b400b68e2a92c0a00afd9d0</td><td>scface</td><td>SCface</td><td><a href="papers/f3b84a03985de3890b400b68e2a92c0a00afd9d0.html">Large Variability Surveillance Camera Face Database</a></td><td><span class="gray">[pdf]</a></td><td>2015 Seventh International Conference on Computational Intelligence, Modelling and Simulation (CIMSim)</td><td></td><td></td><td></td><td></td><td>0%</td><td>1</td><td>0</td><td>1</td><td>0</td><td>0</td><td>0</td></tr><tr><td>07fcbae86f7a3ad3ea1cf95178459ee9eaf77cb1</td><td>uccs</td><td>UCCS</td><td><a href="papers/07fcbae86f7a3ad3ea1cf95178459ee9eaf77cb1.html">Large scale unconstrained open set face database</a></td><td><a href="http://www.vast.uccs.edu/~tboult/PAPERS/BTAS13-Sapkota-Boult-UCCSFaceDB.pdf">[pdf]</a></td><td>2013 IEEE Sixth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td>edu</td><td>University of Colorado at Colorado Springs</td><td>38.89646790</td><td>-104.80505940</td><td>60%</td><td>5</td><td>3</td><td>2</td><td>0</td><td>3</td><td>0</td></tr><tr><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td><td>mafl</td><td>MAFL</td><td><a href="papers/a0fd85b3400c7b3e11122f44dc5870ae2de9009a.html">Learning Deep Representation for Face Alignment with Auxiliary Attributes</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1408.3967.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>33%</td><td>105</td><td>35</td><td>50</td><td>8</td><td>55</td><td>16</td></tr><tr><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td><td>mafl</td><td>MAFL</td><td><a href="papers/a0fd85b3400c7b3e11122f44dc5870ae2de9009a.html">Learning Deep Representation for Face Alignment with Auxiliary Attributes</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1408.3967.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>33%</td><td>105</td><td>35</td><td>50</td><td>8</td><td>55</td><td>16</td></tr><tr><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td><td>mtfl</td><td>MTFL</td><td><a href="papers/a0fd85b3400c7b3e11122f44dc5870ae2de9009a.html">Learning Deep Representation for Face Alignment with Auxiliary Attributes</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1408.3967.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>33%</td><td>105</td><td>35</td><td>50</td><td>8</td><td>55</td><td>16</td></tr><tr><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td><td>mtfl</td><td>MTFL</td><td><a href="papers/a0fd85b3400c7b3e11122f44dc5870ae2de9009a.html">Learning Deep Representation for Face Alignment with Auxiliary Attributes</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1408.3967.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>33%</td><td>105</td><td>35</td><td>50</td><td>8</td><td>55</td><td>16</td></tr><tr><td>69a68f9cf874c69e2232f47808016c2736b90c35</td><td>celeba_plus</td><td>CelebFaces+</td><td><a href="papers/69a68f9cf874c69e2232f47808016c2736b90c35.html">Learning Deep Representation for Imbalanced Classification</a></td><td><a href="http://personal.ie.cuhk.edu.hk/~ccloy/files/cvpr_2016_imbalanced.pdf">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>Shenzhen Institutes of Advanced Technology</td><td>22.59805605</td><td>113.98533784</td><td>33%</td><td>51</td><td>17</td><td>34</td><td>1</td><td>39</td><td>2</td></tr><tr><td>853bd61bc48a431b9b1c7cab10c603830c488e39</td><td>casia_webface</td><td>CASIA Webface</td><td><a href="papers/853bd61bc48a431b9b1c7cab10c603830c488e39.html">Learning Face Representation from Scratch</a></td><td><a href="http://pdfs.semanticscholar.org/b8a2/0ed7e74325da76d7183d1ab77b082a92b447.pdf">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>53%</td><td>436</td><td>233</td><td>203</td><td>32</td><td>284</td><td>115</td></tr><tr><td>2a171f8d14b6b8735001a11c217af9587d095848</td><td>social_relation</td><td>Social Relation</td><td><a href="papers/2a171f8d14b6b8735001a11c217af9587d095848.html">Learning Social Relation Traits from Face Images</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.414">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>30%</td><td>20</td><td>6</td><td>14</td><td>5</td><td>15</td><td>0</td></tr><tr><td>2a171f8d14b6b8735001a11c217af9587d095848</td><td>social_relation</td><td>Social Relation</td><td><a href="papers/2a171f8d14b6b8735001a11c217af9587d095848.html">Learning Social Relation Traits from Face Images</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.414">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>30%</td><td>20</td><td>6</td><td>14</td><td>5</td><td>15</td><td>0</td></tr><tr><td>4e4746094bf60ee83e40d8597a6191e463b57f76</td><td>leeds_sports_pose_extended</td><td>Leeds Sports Pose Extended</td><td><a href="papers/4e4746094bf60ee83e40d8597a6191e463b57f76.html">Learning effective human pose estimation from inaccurate annotation</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995318', 'linkType': 'ieee'}">[pdf]</a></td><td>CVPR 2011</td><td></td><td></td><td></td><td></td><td>40%</td><td>173</td><td>70</td><td>103</td><td>9</td><td>116</td><td>2</td></tr><tr><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td><a href="papers/6dd0597f8513dc100cd0bc1b493768cde45098a9.html">Learning to parse images of articulated bodies</a></td><td><a href="http://pdfs.semanticscholar.org/9cd7/4c43dbf9be0b9caae4606ee53e6d45850471.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>31%</td><td>373</td><td>117</td><td>256</td><td>30</td><td>238</td><td>2</td></tr><tr><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td><a href="papers/6dd0597f8513dc100cd0bc1b493768cde45098a9.html">Learning to parse images of articulated bodies</a></td><td><a href="http://pdfs.semanticscholar.org/9cd7/4c43dbf9be0b9caae4606ee53e6d45850471.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>31%</td><td>373</td><td>117</td><td>256</td><td>30</td><td>238</td><td>2</td></tr><tr><td>c9bda86e23cab9e4f15ea0c4cbb6cc02b9dfb709</td><td>stanford_drone</td><td>Stanford Drone</td><td><a href="papers/c9bda86e23cab9e4f15ea0c4cbb6cc02b9dfb709.html">Learning to predict human behaviour in crowded scenes</a></td><td><a href="http://pdfs.semanticscholar.org/c9bd/a86e23cab9e4f15ea0c4cbb6cc02b9dfb709.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>20%</td><td>5</td><td>1</td><td>4</td><td>1</td><td>5</td><td>0</td></tr><tr><td>28d4e027c7e90b51b7d8908fce68128d1964668a</td><td>megaface</td><td>MegaFace</td><td><a href="papers/28d4e027c7e90b51b7d8908fce68128d1964668a.html">Level Playing Field for Million Scale Face Recognition</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1705.00393.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>41%</td><td>27</td><td>11</td><td>16</td><td>2</td><td>22</td><td>4</td></tr><tr><td>28d4e027c7e90b51b7d8908fce68128d1964668a</td><td>megaface</td><td>MegaFace</td><td><a href="papers/28d4e027c7e90b51b7d8908fce68128d1964668a.html">Level Playing Field for Million Scale Face Recognition</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1705.00393.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>41%</td><td>27</td><td>11</td><td>16</td><td>2</td><td>22</td><td>4</td></tr><tr><td>140438a77a771a8fb656b39a78ff488066eb6b50</td><td>lfw_p</td><td>LFWP</td><td><a href="papers/140438a77a771a8fb656b39a78ff488066eb6b50.html">Localizing Parts of Faces Using a Consensus of Exemplars</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2011.5995602">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td>edu</td><td>Columbia University</td><td>40.84198360</td><td>-73.94368971</td><td>53%</td><td>521</td><td>274</td><td>247</td><td>40</td><td>321</td><td>144</td></tr><tr><td>38b55d95189c5e69cf4ab45098a48fba407609b4</td><td>cuhk02</td><td>CUHK02</td><td><a href="papers/38b55d95189c5e69cf4ab45098a48fba407609b4.html">Locally Aligned Feature Transforms across Views</a></td><td><a href="http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_CVPR2013/data/Papers/4989d594.pdf">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>24%</td><td>242</td><td>57</td><td>185</td><td>17</td><td>139</td><td>1</td></tr><tr><td>c0387e788a52f10bf35d4d50659cfa515d89fbec</td><td>mars</td><td>MARS</td><td><a href="papers/c0387e788a52f10bf35d4d50659cfa515d89fbec.html">MARS: A Video Benchmark for Large-Scale Person Re-Identification</a></td><td><a href="http://pdfs.semanticscholar.org/c038/7e788a52f10bf35d4d50659cfa515d89fbec.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>34%</td><td>146</td><td>49</td><td>97</td><td>6</td><td>96</td><td>0</td></tr><tr><td>9055b155cbabdce3b98e16e5ac9c0edf00f9552f</td><td>morph</td><td>MORPH Commercial</td><td><a href="papers/9055b155cbabdce3b98e16e5ac9c0edf00f9552f.html">MORPH: a longitudinal image database of normal adult age-progression</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/FGR.2006.78">[pdf]</a></td><td>7th International Conference on Automatic Face and Gesture Recognition (FGR06)</td><td></td><td></td><td></td><td></td><td>46%</td><td>424</td><td>195</td><td>229</td><td>27</td><td>231</td><td>155</td></tr><tr><td>9055b155cbabdce3b98e16e5ac9c0edf00f9552f</td><td>morph_nc</td><td>MORPH Non-Commercial</td><td><a href="papers/9055b155cbabdce3b98e16e5ac9c0edf00f9552f.html">MORPH: a longitudinal image database of normal adult age-progression</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/FGR.2006.78">[pdf]</a></td><td>7th International Conference on Automatic Face and Gesture Recognition (FGR06)</td><td></td><td></td><td></td><td></td><td>46%</td><td>424</td><td>195</td><td>229</td><td>27</td><td>231</td><td>155</td></tr><tr><td>291265db88023e92bb8c8e6390438e5da148e8f5</td><td>msceleb</td><td>MsCeleb</td><td><a href="papers/291265db88023e92bb8c8e6390438e5da148e8f5.html">MS-Celeb-1M: A Dataset and Benchmark for Large-Scale Face Recognition</a></td><td><a href="http://pdfs.semanticscholar.org/4603/cb8e05258bb0572ae912ad20903b8f99f4b1.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>167</td><td>83</td><td>84</td><td>15</td><td>131</td><td>27</td></tr><tr><td>3dc3f0b64ef80f573e3a5f96e456e52ee980b877</td><td>georgia_tech_face_database</td><td>Georgia Tech Face</td><td><a href="papers/3dc3f0b64ef80f573e3a5f96e456e52ee980b877.html">Maximum Likelihood Training of the Embedded HMM for Face Detection and Recognition</a></td><td><a href="http://pdfs.semanticscholar.org/3dc3/f0b64ef80f573e3a5f96e456e52ee980b877.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>3</td><td>0</td><td>3</td><td>0</td><td>2</td><td>0</td></tr><tr><td>5753b2b5e442eaa3be066daa4a2ca8d8a0bb1725</td><td>50_people_one_question</td><td>50 People One Question</td><td><a href="papers/5753b2b5e442eaa3be066daa4a2ca8d8a0bb1725.html">Merging Pose Estimates Across Space and Time</a></td><td><a href="http://pdfs.semanticscholar.org/63b2/f5348af0f969dfc2afb4977732393c6459ec.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>15</td><td>9</td><td>6</td><td>0</td><td>11</td><td>2</td></tr><tr><td>696ca58d93f6404fea0fc75c62d1d7b378f47628</td><td>coco</td><td>COCO</td><td><a href="papers/696ca58d93f6404fea0fc75c62d1d7b378f47628.html">Microsoft COCO Captions: Data Collection and Evaluation Server</a></td><td><a href="http://pdfs.semanticscholar.org/ba95/81c33a7eebe87c50e61763e4c8d1723538f2.pdf">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>46%</td><td>283</td><td>129</td><td>154</td><td>16</td><td>231</td><td>4</td></tr><tr><td>a5a44a32a91474f00a3cda671a802e87c899fbb4</td><td>moments_in_time</td><td>Moments in Time</td><td><a href="papers/a5a44a32a91474f00a3cda671a802e87c899fbb4.html">Moments in Time Dataset: one million videos for event understanding</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1801.03150.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>60%</td><td>25</td><td>15</td><td>10</td><td>2</td><td>25</td><td>0</td></tr><tr><td>436f798d1a4e54e5947c1e7d7375c31b2bdb4064</td><td>tud_multiview</td><td>TUD-Multiview</td><td><a href="papers/436f798d1a4e54e5947c1e7d7375c31b2bdb4064.html">Monocular 3D pose estimation and tracking by detection</a></td><td><a href="http://lmb.informatik.uni-freiburg.de/lectures/seminar_brox/seminar_ws1011/cvpr10_andriluka.pdf">[pdf]</a></td><td>2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>25%</td><td>302</td><td>76</td><td>226</td><td>32</td><td>199</td><td>1</td></tr><tr><td>436f798d1a4e54e5947c1e7d7375c31b2bdb4064</td><td>tud_stadtmitte</td><td>TUD-Stadtmitte</td><td><a href="papers/436f798d1a4e54e5947c1e7d7375c31b2bdb4064.html">Monocular 3D pose estimation and tracking by detection</a></td><td><a href="http://lmb.informatik.uni-freiburg.de/lectures/seminar_brox/seminar_ws1011/cvpr10_andriluka.pdf">[pdf]</a></td><td>2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>25%</td><td>302</td><td>76</td><td>226</td><td>32</td><td>199</td><td>1</td></tr><tr><td>6ad5a38df8dd4cdddd74f31996ce096d41219f72</td><td>tud_brussels</td><td>TUD-Brussels</td><td><a href="papers/6ad5a38df8dd4cdddd74f31996ce096d41219f72.html">Multi-cue onboard pedestrian detection</a></td><td><a href="https://www.mpi-inf.mpg.de/fileadmin/inf/d2/wojek/poster_cwojek_cvpr09.pdf">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>19%</td><td>217</td><td>41</td><td>176</td><td>14</td><td>131</td><td>1</td></tr><tr><td>6ad5a38df8dd4cdddd74f31996ce096d41219f72</td><td>tud_motionpairs</td><td>TUD-Motionparis</td><td><a href="papers/6ad5a38df8dd4cdddd74f31996ce096d41219f72.html">Multi-cue onboard pedestrian detection</a></td><td><a href="https://www.mpi-inf.mpg.de/fileadmin/inf/d2/wojek/poster_cwojek_cvpr09.pdf">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>19%</td><td>217</td><td>41</td><td>176</td><td>14</td><td>131</td><td>1</td></tr><tr><td>32c801cb7fbeb742edfd94cccfca4934baec71da</td><td>ucf_crowd</td><td>UCF-CC-50</td><td><a href="papers/32c801cb7fbeb742edfd94cccfca4934baec71da.html">Multi-source Multi-scale Counting in Extremely Dense Crowd Images</a></td><td><a href="http://www.cs.ucf.edu/~haroon/datafiles/Idrees_Counting_CVPR_2013.pdf">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>38%</td><td>125</td><td>48</td><td>77</td><td>6</td><td>72</td><td>1</td></tr><tr><td>1e3df3ca8feab0b36fd293fe689f93bb2aaac591</td><td>immediacy</td><td>Immediacy</td><td><a href="papers/1e3df3ca8feab0b36fd293fe689f93bb2aaac591.html">Multi-task Recurrent Neural Network for Immediacy Prediction</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.383">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td>60%</td><td>25</td><td>15</td><td>10</td><td>2</td><td>20</td><td>0</td></tr><tr><td>53ae38a6bb2b21b42bac4f0c4c8ed1f9fa02f9d4</td><td>bp4d_plus</td><td>BP4D+</td><td><a href="papers/53ae38a6bb2b21b42bac4f0c4c8ed1f9fa02f9d4.html">Multimodal Spontaneous Emotion Corpus for Human Behavior Analysis</a></td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Zhang_Multimodal_Spontaneous_Emotion_CVPR_2016_paper.pdf">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>25%</td><td>40</td><td>10</td><td>30</td><td>0</td><td>20</td><td>6</td></tr><tr><td>2fda164863a06a92d3a910b96eef927269aeb730</td><td>names_and_faces_news</td><td>News Dataset</td><td><a href="papers/2fda164863a06a92d3a910b96eef927269aeb730.html">Names and faces in the news</a></td><td><a href="http://www.cs.utexas.edu/~grauman/courses/spring2007/395T/papers/berg_names_and_faces.pdf">[pdf]</a></td><td>Proceedings of the 2004 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2004. CVPR 2004.</td><td></td><td></td><td></td><td></td><td>41%</td><td>294</td><td>120</td><td>174</td><td>24</td><td>207</td><td>45</td></tr><tr><td>4156b7e88f2e0ab0a7c095b9bab199ae2b23bd06</td><td>distance_nighttime</td><td>Long Distance Heterogeneous Face</td><td><a href="papers/4156b7e88f2e0ab0a7c095b9bab199ae2b23bd06.html">Nighttime Face Recognition at Long Distance: Cross-Distance and Cross-Spectral Matching</a></td><td><a href="http://pdfs.semanticscholar.org/4156/b7e88f2e0ab0a7c095b9bab199ae2b23bd06.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>24%</td><td>21</td><td>5</td><td>16</td><td>3</td><td>11</td><td>1</td></tr><tr><td>31b58ced31f22eab10bd3ee2d9174e7c14c27c01</td><td>tiny_images</td><td>Tiny Images</td><td><a href="papers/31b58ced31f22eab10bd3ee2d9174e7c14c27c01.html">Nonparametric Object and Scene Recognition</a></td><td><a href="http://pdfs.semanticscholar.org/31b5/8ced31f22eab10bd3ee2d9174e7c14c27c01.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>31%</td><td>999</td><td>305</td><td>694</td><td>93</td><td>670</td><td>9</td></tr><tr><td>55206f0b5f57ce17358999145506cd01e570358c</td><td>orl</td><td>ORL</td><td><a href="papers/55206f0b5f57ce17358999145506cd01e570358c.html">O M 4 . 1 The Subject Database 4 . 2 Experiment Plan 5 . 1 Varying the Overlap 4 Experimental Setup 5 Parameterisation Results</a></td><td><a href="http://pdfs.semanticscholar.org/5520/6f0b5f57ce17358999145506cd01e570358c.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>21%</td><td>999</td><td>214</td><td>785</td><td>96</td><td>550</td><td>57</td></tr><tr><td>3394168ff0719b03ff65bcea35336a76b21fe5e4</td><td>penn_fudan</td><td>Penn Fudan</td><td><a href="papers/3394168ff0719b03ff65bcea35336a76b21fe5e4.html">Object Detection Combining Recognition and Segmentation</a></td><td><a href="http://pdfs.semanticscholar.org/f531/a554cade14b9b340de6730683a28c292dd74.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>23%</td><td>101</td><td>23</td><td>78</td><td>11</td><td>58</td><td>0</td></tr><tr><td>4f93cd09785c6e77bf4bc5a788e079df524c8d21</td><td>soton</td><td>SOTON HiD</td><td><a href="papers/4f93cd09785c6e77bf4bc5a788e079df524c8d21.html">On a large sequence-based human gait database</a></td><td><a href="http://pdfs.semanticscholar.org/4f93/cd09785c6e77bf4bc5a788e079df524c8d21.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>36%</td><td>148</td><td>54</td><td>94</td><td>16</td><td>98</td><td>0</td></tr><tr><td>6618cff7f2ed440a0d2fa9e74ad5469df5cdbe4c</td><td>afad</td><td>AFAD</td><td><a href="papers/6618cff7f2ed440a0d2fa9e74ad5469df5cdbe4c.html">Ordinal Regression with Multiple Output CNN for Age Estimation</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.532">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>44%</td><td>68</td><td>30</td><td>38</td><td>8</td><td>49</td><td>7</td></tr><tr><td>a7fe834a0af614ce6b50dc093132b031dd9a856b</td><td>market1203</td><td>Market 1203</td><td><a href="papers/a7fe834a0af614ce6b50dc093132b031dd9a856b.html">Orientation Driven Bag of Appearances for Person Re-identification</a></td><td><a href="http://pdfs.semanticscholar.org/a7fe/834a0af614ce6b50dc093132b031dd9a856b.pdf">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>29%</td><td>7</td><td>2</td><td>5</td><td>0</td><td>3</td><td>0</td></tr><tr><td>a7fe834a0af614ce6b50dc093132b031dd9a856b</td><td>pku_reid</td><td>PKU-Reid</td><td><a href="papers/a7fe834a0af614ce6b50dc093132b031dd9a856b.html">Orientation Driven Bag of Appearances for Person Re-identification</a></td><td><a href="http://pdfs.semanticscholar.org/a7fe/834a0af614ce6b50dc093132b031dd9a856b.pdf">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>29%</td><td>7</td><td>2</td><td>5</td><td>0</td><td>3</td><td>0</td></tr><tr><td>18ae7c9a4bbc832b8b14bc4122070d7939f5e00e</td><td>frgc</td><td>FRGC</td><td><a href="papers/18ae7c9a4bbc832b8b14bc4122070d7939f5e00e.html">Overview of the face recognition grand challenge</a></td><td><a href="http://www3.nd.edu/~kwb/PhillipsEtAlCVPR_2005.pdf">[pdf]</a></td><td>2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)</td><td></td><td></td><td></td><td></td><td>25%</td><td>999</td><td>253</td><td>746</td><td>110</td><td>572</td><td>64</td></tr><tr><td>22909dd19a0ec3b6065334cb5be5392cb24d839d</td><td>pets</td><td>PETS 2017</td><td><a href="papers/22909dd19a0ec3b6065334cb5be5392cb24d839d.html">PETS 2017: Dataset and Challenge</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014998', 'linkType': 'ieee'}">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td></td><td></td><td></td><td></td><td>0%</td><td>8</td><td>0</td><td>8</td><td>0</td><td>2</td><td>0</td></tr><tr><td>56ffa7d906b08d02d6d5a12c7377a57e24ef3391</td><td>unbc_shoulder_pain</td><td>UNBC-McMaster Pain</td><td><a href="papers/56ffa7d906b08d02d6d5a12c7377a57e24ef3391.html">Painful data: The UNBC-McMaster shoulder pain expression archive database</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5771462', 'linkType': 'ieee'}">[pdf]</a></td><td>Face and Gesture 2011</td><td></td><td></td><td></td><td></td><td>32%</td><td>184</td><td>58</td><td>126</td><td>23</td><td>112</td><td>23</td></tr><tr><td>0486214fb58ee9a04edfe7d6a74c6d0f661a7668</td><td>chokepoint</td><td>ChokePoint</td><td><a href="papers/0486214fb58ee9a04edfe7d6a74c6d0f661a7668.html">Patch-based probabilistic image quality assessment for face selection and improved video-based face recognition</a></td><td><a href="http://conradsanderson.id.au/pdfs/wong_face_selection_cvpr_biometrics_2011.pdf">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td>edu</td><td>University of Queensland</td><td>-27.49741805</td><td>153.01316956</td><td>30%</td><td>128</td><td>39</td><td>89</td><td>6</td><td>68</td><td>14</td></tr><tr><td>488e475eeb3bb39a145f23ede197cd3620f1d98a</td><td>apis</td><td>APiS1.0</td><td><a href="papers/488e475eeb3bb39a145f23ede197cd3620f1d98a.html">Pedestrian Attribute Classification in Surveillance: Database and Evaluation</a></td><td><a href="http://www.cv-foundation.org/openaccess/content_iccv_workshops_2013/W10/papers/Zhu_Pedestrian_Attribute_Classification_2013_ICCV_paper.pdf">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision Workshops</td><td></td><td></td><td></td><td></td><td>38%</td><td>26</td><td>10</td><td>16</td><td>1</td><td>13</td><td>2</td></tr><tr><td>488e475eeb3bb39a145f23ede197cd3620f1d98a</td><td>svs</td><td>SVS</td><td><a href="papers/488e475eeb3bb39a145f23ede197cd3620f1d98a.html">Pedestrian Attribute Classification in Surveillance: Database and Evaluation</a></td><td><a href="http://www.cv-foundation.org/openaccess/content_iccv_workshops_2013/W10/papers/Zhu_Pedestrian_Attribute_Classification_2013_ICCV_paper.pdf">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision Workshops</td><td></td><td></td><td></td><td></td><td>38%</td><td>26</td><td>10</td><td>16</td><td>1</td><td>13</td><td>2</td></tr><tr><td>2a4bbee0b4cf52d5aadbbc662164f7efba89566c</td><td>peta</td><td>PETA</td><td><a href="papers/2a4bbee0b4cf52d5aadbbc662164f7efba89566c.html">Pedestrian Attribute Recognition At Far Distance</a></td><td><a href="http://personal.ie.cuhk.edu.hk/~pluo/pdf/mm14.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>46%</td><td>80</td><td>37</td><td>43</td><td>2</td><td>51</td><td>3</td></tr><tr><td>f72f6a45ee240cc99296a287ff725aaa7e7ebb35</td><td>caltech_pedestrians</td><td>Caltech Pedestrians</td><td><a href="papers/f72f6a45ee240cc99296a287ff725aaa7e7ebb35.html">Pedestrian Detection: An Evaluation of the State of the Art</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5975165', 'linkType': 'ieee'}">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>10%</td><td>999</td><td>96</td><td>355</td><td>26</td><td>252</td><td>5</td></tr><tr><td>f72f6a45ee240cc99296a287ff725aaa7e7ebb35</td><td>caltech_pedestrians</td><td>Caltech Pedestrians</td><td><a href="papers/f72f6a45ee240cc99296a287ff725aaa7e7ebb35.html">Pedestrian Detection: An Evaluation of the State of the Art</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5975165', 'linkType': 'ieee'}">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>10%</td><td>999</td><td>96</td><td>355</td><td>26</td><td>252</td><td>5</td></tr><tr><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td><td>tud_campus</td><td>TUD-Campus</td><td><a href="papers/3316521a5527c7700af8ae6aef32a79a8b83672c.html">People-tracking-by-detection and people-detection-by-tracking</a></td><td><a href="http://mplab.ucsd.edu/wp-content/uploads/CVPR2008/Conference/data/papers/243.pdf">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>22%</td><td>529</td><td>116</td><td>413</td><td>41</td><td>316</td><td>1</td></tr><tr><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td><td>tud_crossing</td><td>TUD-Crossing</td><td><a href="papers/3316521a5527c7700af8ae6aef32a79a8b83672c.html">People-tracking-by-detection and people-detection-by-tracking</a></td><td><a href="http://mplab.ucsd.edu/wp-content/uploads/CVPR2008/Conference/data/papers/243.pdf">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>22%</td><td>529</td><td>116</td><td>413</td><td>41</td><td>316</td><td>1</td></tr><tr><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td><td>tud_pedestrian</td><td>TUD-Pedestrian</td><td><a href="papers/3316521a5527c7700af8ae6aef32a79a8b83672c.html">People-tracking-by-detection and people-detection-by-tracking</a></td><td><a href="http://mplab.ucsd.edu/wp-content/uploads/CVPR2008/Conference/data/papers/243.pdf">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>22%</td><td>529</td><td>116</td><td>413</td><td>41</td><td>316</td><td>1</td></tr><tr><td>27a2fad58dd8727e280f97036e0d2bc55ef5424c</td><td>duke_mtmc</td><td>Duke MTMC</td><td><a href="papers/27a2fad58dd8727e280f97036e0d2bc55ef5424c.html">Performance Measures and a Data Set for Multi-target, Multi-camera Tracking</a></td><td><a href="http://pdfs.semanticscholar.org/b5f2/4f49f9a5e47d6601399dc068158ad88d7651.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>136</td><td>58</td><td>78</td><td>6</td><td>107</td><td>0</td></tr><tr><td>16c7c31a7553d99f1837fc6e88e77b5ccbb346b8</td><td>prid</td><td>PRID</td><td><a href="papers/16c7c31a7553d99f1837fc6e88e77b5ccbb346b8.html">Person Re-identification by Descriptive and Discriminative Classification</a></td><td><a href="http://pdfs.semanticscholar.org/4c1b/f0592be3e535faf256c95e27982db9b3d3d3.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>27%</td><td>352</td><td>94</td><td>258</td><td>26</td><td>195</td><td>3</td></tr><tr><td>99eb4cea0d9bc9fe777a5c5172f8638a37a7f262</td><td>ilids_vid_reid</td><td>iLIDS-VID</td><td><a href="papers/99eb4cea0d9bc9fe777a5c5172f8638a37a7f262.html">Person Re-identification by Exploiting Spatio-Temporal Cues and Multi-view Metric Learning</a></td><td><a href="https://doi.org/10.1109/LSP.2016.2574323">[pdf]</a></td><td>IEEE Signal Processing Letters</td><td></td><td></td><td></td><td></td><td>29%</td><td>7</td><td>2</td><td>5</td><td>0</td><td>4</td><td>0</td></tr><tr><td>98bb029afe2a1239c3fdab517323066f0957b81b</td><td>sdu_vid</td><td>SDU-VID</td><td><a href="papers/98bb029afe2a1239c3fdab517323066f0957b81b.html">Person Re-identification by Video Ranking</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>25%</td><td>196</td><td>49</td><td>124</td><td>11</td><td>98</td><td>1</td></tr><tr><td>98bb029afe2a1239c3fdab517323066f0957b81b</td><td>sdu_vid</td><td>SDU-VID</td><td><a href="papers/98bb029afe2a1239c3fdab517323066f0957b81b.html">Person Re-identification by Video Ranking</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>25%</td><td>196</td><td>49</td><td>124</td><td>11</td><td>98</td><td>1</td></tr><tr><td>98bb029afe2a1239c3fdab517323066f0957b81b</td><td>sdu_vid</td><td>SDU-VID</td><td><a href="papers/98bb029afe2a1239c3fdab517323066f0957b81b.html">Person Re-identification by Video Ranking</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>25%</td><td>196</td><td>49</td><td>124</td><td>11</td><td>98</td><td>1</td></tr><tr><td>0b84f07af44f964817675ad961def8a51406dd2e</td><td>prw</td><td>PRW</td><td><a href="papers/0b84f07af44f964817675ad961def8a51406dd2e.html">Person Re-identification in the Wild</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2017.357">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>University of Technology Sydney</td><td>-33.88096510</td><td>151.20107299</td><td>38%</td><td>65</td><td>25</td><td>40</td><td>1</td><td>46</td><td>0</td></tr><tr><td>ec792ad2433b6579f2566c932ee414111e194537</td><td>msmt_17</td><td>MSMT17</td><td><a href="papers/ec792ad2433b6579f2566c932ee414111e194537.html">Person Transfer GAN to Bridge Domain Gap for Person Re-Identification</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1711.08565.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>50%</td><td>14</td><td>7</td><td>7</td><td>1</td><td>11</td><td>0</td></tr><tr><td>1c2802c2199b6d15ecefe7ba0c39bfe44363de38</td><td>youtube_poses</td><td>YouTube Pose</td><td><a href="papers/1c2802c2199b6d15ecefe7ba0c39bfe44363de38.html">Personalizing Human Video Pose Estimation</a></td><td><a href="http://arxiv.org/pdf/1511.06676v1.pdf">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>44%</td><td>32</td><td>14</td><td>18</td><td>2</td><td>27</td><td>0</td></tr><tr><td>b92a1ed9622b8268ae3ac9090e25789fc41cc9b8</td><td>pornodb</td><td>Pornography DB</td><td><a href="papers/b92a1ed9622b8268ae3ac9090e25789fc41cc9b8.html">Pooling in image representation: The visual codeword point of view</a></td><td><a href="http://pdfs.semanticscholar.org/b92a/1ed9622b8268ae3ac9090e25789fc41cc9b8.pdf">[pdf]</a></td><td>Computer Vision and Image Understanding</td><td></td><td></td><td></td><td></td><td>9%</td><td>77</td><td>7</td><td>70</td><td>7</td><td>43</td><td>2</td></tr><tr><td>2830fb5282de23d7784b4b4bc37065d27839a412</td><td>h3d</td><td>H3D</td><td><a href="papers/2830fb5282de23d7784b4b4bc37065d27839a412.html">Poselets: Body part detectors trained using 3D human pose annotations</a></td><td><a href="http://vision.stanford.edu/teaching/cs231b_spring1213/papers/ICCV09_BourdevMalik.pdf">[pdf]</a></td><td>2009 IEEE 12th International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td>32%</td><td>707</td><td>223</td><td>484</td><td>62</td><td>482</td><td>18</td></tr><tr><td>3765df816dc5a061bc261e190acc8bdd9d47bec0</td><td>rafd</td><td>RaFD</td><td><a href="papers/3765df816dc5a061bc261e190acc8bdd9d47bec0.html">Presentation and validation of the Radboud Faces Database</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/3765/df816dc5a061bc261e190acc8bdd9d47bec0.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>28%</td><td>446</td><td>127</td><td>319</td><td>43</td><td>307</td><td>19</td></tr><tr><td>636b8ffc09b1b23ff714ac8350bb35635e49fa3c</td><td>caltech_10k_web_faces</td><td>Caltech 10K Web Faces</td><td><a href="papers/636b8ffc09b1b23ff714ac8350bb35635e49fa3c.html">Pruning training sets for learning of object categories</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1467308', 'linkType': 'ieee'}">[pdf]</a></td><td>2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)</td><td></td><td></td><td></td><td></td><td>58%</td><td>60</td><td>35</td><td>25</td><td>5</td><td>42</td><td>12</td></tr><tr><td>377f2b65e6a9300448bdccf678cde59449ecd337</td><td>ufdd</td><td>UFDD</td><td><a href="papers/377f2b65e6a9300448bdccf678cde59449ecd337.html">Pushing the Limits of Unconstrained Face Detection: a Challenge Dataset and Baseline Results</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1804.10275.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td>edu</td><td>Johns Hopkins University</td><td>39.32905300</td><td>-76.61942500</td><td>0%</td><td>2</td><td>0</td><td>2</td><td>0</td><td>2</td><td>0</td></tr><tr><td>140c95e53c619eac594d70f6369f518adfea12ef</td><td>ijb_a</td><td>IJB-A</td><td><a href="papers/140c95e53c619eac594d70f6369f518adfea12ef.html">Pushing the frontiers of unconstrained face detection and recognition: IARPA Janus Benchmark A</a></td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/1B_089_ext.pdf">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>48%</td><td>222</td><td>107</td><td>115</td><td>21</td><td>158</td><td>48</td></tr><tr><td>d80a3d1f3a438e02a6685e66ee908446766fefa9</td><td>megaage</td><td>MegaAge</td><td><a href="papers/d80a3d1f3a438e02a6685e66ee908446766fefa9.html">Quantifying Facial Age by Posterior of Age Comparisons</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1708.09687.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>25%</td><td>4</td><td>1</td><td>3</td><td>1</td><td>4</td><td>0</td></tr><tr><td>4946ba10a4d5a7d0a38372f23e6622bd347ae273</td><td>coco_action</td><td>COCO-a</td><td><a href="papers/4946ba10a4d5a7d0a38372f23e6622bd347ae273.html">RONCHI AND PERONA: DESCRIBING COMMON HUMAN VISUAL ACTIONS IN IMAGES 1 Describing Common Human Visual Actions in Images</a></td><td><a href="http://pdfs.semanticscholar.org/b38d/cf5fa5174c0d718d65cc4f3889b03c4a21df.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>26</td><td>14</td><td>12</td><td>0</td><td>25</td><td>0</td></tr><tr><td>922e0a51a3b8c67c4c6ac09a577ff674cbd28b34</td><td>v47</td><td>V47</td><td><a href="papers/922e0a51a3b8c67c4c6ac09a577ff674cbd28b34.html">Re-identification of pedestrians with variable occlusion and scale</a></td><td><a href="https://doi.org/10.1109/ICCVW.2011.6130477">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td>0%</td><td>10</td><td>0</td><td>10</td><td>2</td><td>6</td><td>0</td></tr><tr><td>6f3c76b7c0bd8e1d122c6ea808a271fd4749c951</td><td>ward</td><td>WARD</td><td><a href="papers/6f3c76b7c0bd8e1d122c6ea808a271fd4749c951.html">Re-identify people in wide area camera network</a></td><td><a href="https://doi.org/10.1109/CVPRW.2012.6239203">[pdf]</a></td><td>2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops</td><td></td><td></td><td></td><td></td><td>9%</td><td>55</td><td>5</td><td>50</td><td>2</td><td>35</td><td>0</td></tr><tr><td>54983972aafc8e149259d913524581357b0f91c3</td><td>reseed</td><td>ReSEED</td><td><a href="papers/54983972aafc8e149259d913524581357b0f91c3.html">ReSEED: social event dEtection dataset</a></td><td><a href="https://pub.uni-bielefeld.de/download/2663466/2686734">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>17%</td><td>6</td><td>1</td><td>5</td><td>1</td><td>1</td><td>1</td></tr><tr><td>65355cbb581a219bd7461d48b3afd115263ea760</td><td>complex_activities</td><td>Ongoing Complex Activities</td><td><a href="papers/65355cbb581a219bd7461d48b3afd115263ea760.html">Recognition of ongoing complex activities by sequence prediction over a hierarchical label space</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/WACV.2016.7477586">[pdf]</a></td><td>2016 IEEE Winter Conference on Applications of Computer Vision (WACV)</td><td></td><td></td><td></td><td></td><td>0%</td><td>2</td><td>0</td><td>2</td><td>0</td><td>2</td><td>0</td></tr><tr><td>e8de844fefd54541b71c9823416daa238be65546</td><td>visual_phrases</td><td>Phrasal Recognition</td><td><a href="papers/e8de844fefd54541b71c9823416daa238be65546.html">Recognition using visual phrases</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995711', 'linkType': 'ieee'}">[pdf]</a></td><td>CVPR 2011</td><td></td><td></td><td></td><td></td><td>41%</td><td>233</td><td>95</td><td>138</td><td>18</td><td>174</td><td>5</td></tr><tr><td>356b431d4f7a2a0a38cf971c84568207dcdbf189</td><td>wider</td><td>WIDER</td><td><a href="papers/356b431d4f7a2a0a38cf971c84568207dcdbf189.html">Recognize complex events from static images by fusing deep channels</a></td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Xiong_Recognize_Complex_Events_2015_CVPR_paper.pdf">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>Shenzhen Institutes of Advanced Technology</td><td>22.59805605</td><td>113.98533784</td><td>58%</td><td>45</td><td>26</td><td>19</td><td>1</td><td>30</td><td>12</td></tr><tr><td>25474c21613607f6bb7687a281d5f9d4ffa1f9f3</td><td>faceplace</td><td>Face Place</td><td><a href="papers/25474c21613607f6bb7687a281d5f9d4ffa1f9f3.html">Recognizing disguised faces</a></td><td><a href="http://pdfs.semanticscholar.org/d936/7ceb0be378c3a9ddf7cb741c678c1a3c574c.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>25%</td><td>24</td><td>6</td><td>18</td><td>0</td><td>16</td><td>1</td></tr><tr><td>4053e3423fb70ad9140ca89351df49675197196a</td><td>bio_id</td><td>BioID Face</td><td><a href="papers/4053e3423fb70ad9140ca89351df49675197196a.html">Robust Face Detection Using the Hausdorff Distance</a></td><td><a href="http://pdfs.semanticscholar.org/4053/e3423fb70ad9140ca89351df49675197196a.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>26%</td><td>498</td><td>127</td><td>371</td><td>55</td><td>319</td><td>32</td></tr><tr><td>2724ba85ec4a66de18da33925e537f3902f21249</td><td>cofw</td><td>COFW</td><td><a href="papers/2724ba85ec4a66de18da33925e537f3902f21249.html">Robust Face Landmark Estimation under Occlusion</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6751298', 'linkType': 'ieee'}">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td>55%</td><td>305</td><td>167</td><td>138</td><td>16</td><td>186</td><td>95</td></tr><tr><td>c570d1247e337f91e555c3be0e8c8a5aba539d9f</td><td>mcgill</td><td>McGill Real World</td><td><a href="papers/c570d1247e337f91e555c3be0e8c8a5aba539d9f.html">Robust semi-automatic head pose labeling for real-world face video sequences</a></td><td><a href="https://doi.org/10.1007/s11042-012-1352-1">[pdf]</a></td><td>Multimedia Tools and Applications</td><td>edu</td><td>McGill University</td><td>45.50397610</td><td>-73.57496870</td><td>28%</td><td>18</td><td>5</td><td>6</td><td>0</td><td>7</td><td>4</td></tr><tr><td>c570d1247e337f91e555c3be0e8c8a5aba539d9f</td><td>mcgill</td><td>McGill Real World</td><td><a href="papers/c570d1247e337f91e555c3be0e8c8a5aba539d9f.html">Robust semi-automatic head pose labeling for real-world face video sequences</a></td><td><a href="https://doi.org/10.1007/s11042-012-1352-1">[pdf]</a></td><td>Multimedia Tools and Applications</td><td>edu</td><td>McGill University</td><td>45.50397610</td><td>-73.57496870</td><td>28%</td><td>18</td><td>5</td><td>6</td><td>0</td><td>7</td><td>4</td></tr><tr><td>e27ef52c641c2b5100a1b34fd0b819e84a31b4df</td><td>sarc3d</td><td>Sarc3D</td><td><a href="papers/e27ef52c641c2b5100a1b34fd0b819e84a31b4df.html">SARC3D: A New 3D Body Model for People Tracking and Re-identification</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/e27e/f52c641c2b5100a1b34fd0b819e84a31b4df.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>14%</td><td>29</td><td>4</td><td>25</td><td>3</td><td>17</td><td>0</td></tr><tr><td>bd26dabab576adb6af30484183c9c9c8379bf2e0</td><td>scut_fbp</td><td>SCUT-FBP</td><td><a href="papers/bd26dabab576adb6af30484183c9c9c8379bf2e0.html">SCUT-FBP: A Benchmark Dataset for Facial Beauty Perception</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1511.02459.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2015 IEEE International Conference on Systems, Man, and Cybernetics</td><td>edu</td><td>South China University of Technology</td><td>23.05020420</td><td>113.39880323</td><td>43%</td><td>14</td><td>6</td><td>8</td><td>3</td><td>5</td><td>7</td></tr><tr><td>d3f5a1848b0028d8ab51d0b0673732cad2e3c8c9</td><td>stair_actions</td><td>STAIR Action</td><td><a href="papers/d3f5a1848b0028d8ab51d0b0673732cad2e3c8c9.html">STAIR Actions: A Video Dataset of Everyday Home Actions</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1804.04326.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>833fa04463d90aab4a9fe2870d480f0b40df446e</td><td>sun_attributes</td><td>SUN</td><td><a href="papers/833fa04463d90aab4a9fe2870d480f0b40df446e.html">SUN attribute database: Discovering, annotating, and recognizing scene attributes</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2012.6247998">[pdf]</a></td><td>2012 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>32%</td><td>260</td><td>84</td><td>85</td><td>18</td><td>143</td><td>10</td></tr><tr><td>833fa04463d90aab4a9fe2870d480f0b40df446e</td><td>sun_attributes</td><td>SUN</td><td><a href="papers/833fa04463d90aab4a9fe2870d480f0b40df446e.html">SUN attribute database: Discovering, annotating, and recognizing scene attributes</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2012.6247998">[pdf]</a></td><td>2012 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>32%</td><td>260</td><td>84</td><td>85</td><td>18</td><td>143</td><td>10</td></tr><tr><td>4308bd8c28e37e2ed9a3fcfe74d5436cce34b410</td><td>market_1501</td><td>Market 1501</td><td><a href="papers/4308bd8c28e37e2ed9a3fcfe74d5436cce34b410.html">Scalable Person Re-identification: A Benchmark</a></td><td><a href="https://www.microsoft.com/en-us/research/wp-content/uploads/2017/01/ICCV15-ReIDDataset.pdf">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td>38%</td><td>394</td><td>149</td><td>245</td><td>18</td><td>271</td><td>3</td></tr><tr><td>9c23859ec7313f2e756a3e85575735e0c52249f4</td><td>facebook_100</td><td>Facebook100</td><td><a href="papers/9c23859ec7313f2e756a3e85575735e0c52249f4.html">Scaling up biologically-inspired computer vision: A case study in unconstrained face recognition on facebook</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5981788', 'linkType': 'ieee'}">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td>edu</td><td>Harvard University</td><td>42.36782045</td><td>-71.12666653</td><td>50%</td><td>50</td><td>25</td><td>25</td><td>3</td><td>39</td><td>4</td></tr><tr><td>9c23859ec7313f2e756a3e85575735e0c52249f4</td><td>pubfig_83</td><td>pubfig83</td><td><a href="papers/9c23859ec7313f2e756a3e85575735e0c52249f4.html">Scaling up biologically-inspired computer vision: A case study in unconstrained face recognition on facebook</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5981788', 'linkType': 'ieee'}">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td>edu</td><td>Harvard University</td><td>42.36782045</td><td>-71.12666653</td><td>50%</td><td>50</td><td>25</td><td>25</td><td>3</td><td>39</td><td>4</td></tr><tr><td>109df0e8e5969ddf01e073143e83599228a1163f</td><td>multi_pie</td><td>MULTIPIE</td><td><a href="papers/109df0e8e5969ddf01e073143e83599228a1163f.html">Scheduling heterogeneous multi-cores through performance impact estimation (PIE)</a></td><td><a href="http://dl.acm.org/citation.cfm?id=2337184">[pdf]</a></td><td>2012 39th Annual International Symposium on Computer Architecture (ISCA)</td><td></td><td></td><td></td><td></td><td>25%</td><td>192</td><td>48</td><td>144</td><td>8</td><td>99</td><td>0</td></tr><tr><td>51eba481dac6b229a7490f650dff7b17ce05df73</td><td>imsitu</td><td>imSitu</td><td><a href="papers/51eba481dac6b229a7490f650dff7b17ce05df73.html">Situation Recognition: Visual Semantic Role Labeling for Image Understanding</a></td><td><a href="http://grail.cs.washington.edu/wp-content/uploads/2016/09/yatskar2016srv.pdf">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>University of Washington</td><td>47.65432380</td><td>-122.30800894</td><td>60%</td><td>48</td><td>29</td><td>19</td><td>2</td><td>45</td><td>2</td></tr><tr><td>f152b6ee251cca940dd853c54e6a7b78fbc6b235</td><td>affectnet</td><td>AffectNet</td><td><a href="papers/f152b6ee251cca940dd853c54e6a7b78fbc6b235.html">Skybiometry and AffectNet on Facial Emotion Recognition Using Supervised Machine Learning Algorithms</a></td><td><a href="{'url': 'http://dl.acm.org/citation.cfm?id=3232665', 'linkType': 'acm'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>23e824d1dfc33f3780dd18076284f07bd99f1c43</td><td>mifs</td><td>MIFS</td><td><a href="papers/23e824d1dfc33f3780dd18076284f07bd99f1c43.html">Spoofing faces using makeup: An investigative study</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7947686', 'linkType': 'ieee'}">[pdf]</a></td><td>2017 IEEE International Conference on Identity, Security and Behavior Analysis (ISBA)</td><td>edu</td><td>INRIA Méditerranée</td><td>43.61581310</td><td>7.06838000</td><td>20%</td><td>5</td><td>1</td><td>4</td><td>0</td><td>1</td><td>2</td></tr><tr><td>1a40092b493c6b8840257ab7f96051d1a4dbfeb2</td><td>sports_videos_in_the_wild</td><td>SVW</td><td><a href="papers/1a40092b493c6b8840257ab7f96051d1a4dbfeb2.html">Sports Videos in the Wild (SVW): A video dataset for sports analysis</a></td><td><a href="http://web.cse.msu.edu/~liuxm/publication/Safdarnejad_Liu_Udpa_Andrus_Wood_Craven_FG2015.pdf">[pdf]</a></td><td>2015 11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)</td><td>edu</td><td>Michigan State University</td><td>42.71856800</td><td>-84.47791571</td><td>67%</td><td>6</td><td>4</td><td>2</td><td>1</td><td>5</td><td>0</td></tr><tr><td>9361b784e73e9238d5cefbea5ac40d35d1e3103f</td><td>towncenter</td><td>TownCenter</td><td><a href="papers/9361b784e73e9238d5cefbea5ac40d35d1e3103f.html">Stable Multi-Target Tracking in Real-Time Surveillance Video (Preprint)</a></td><td><a href="http://pdfs.semanticscholar.org/9361/b784e73e9238d5cefbea5ac40d35d1e3103f.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>21%</td><td>310</td><td>64</td><td>246</td><td>24</td><td>177</td><td>4</td></tr><tr><td>c866a2afc871910e3282fd9498dce4ab20f6a332</td><td>qmul_surv_face</td><td>QMUL-SurvFace</td><td><a href="papers/c866a2afc871910e3282fd9498dce4ab20f6a332.html">Surveillance Face Recognition Challenge</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1804.09691.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>f6c8d5e35d7e4d60a0104f233ac1a3ab757da53f</td><td>pku</td><td>PKU</td><td><a href="papers/f6c8d5e35d7e4d60a0104f233ac1a3ab757da53f.html">Swiss-System Based Cascade Ranking for Gait-Based Person Re-Identification</a></td><td><a href="http://pdfs.semanticscholar.org/f6c8/d5e35d7e4d60a0104f233ac1a3ab757da53f.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>3</td><td>0</td><td>3</td><td>0</td><td>1</td><td>0</td></tr><tr><td>2485c98aa44131d1a2f7d1355b1e372f2bb148ad</td><td>cas_peal</td><td>CAS-PEAL</td><td><a href="papers/2485c98aa44131d1a2f7d1355b1e372f2bb148ad.html">The CAS-PEAL Large-Scale Chinese Face Database and Baseline Evaluations</a></td><td><a href="https://doi.org/10.1109/TSMCA.2007.909557">[pdf]</a></td><td>IEEE Transactions on Systems, Man, and Cybernetics - Part A: Systems and Humans</td><td></td><td></td><td></td><td></td><td>18%</td><td>415</td><td>76</td><td>339</td><td>39</td><td>182</td><td>35</td></tr><tr><td>2485c98aa44131d1a2f7d1355b1e372f2bb148ad</td><td>m2vts</td><td>m2vts</td><td><a href="papers/2485c98aa44131d1a2f7d1355b1e372f2bb148ad.html">The CAS-PEAL Large-Scale Chinese Face Database and Baseline Evaluations</a></td><td><a href="https://doi.org/10.1109/TSMCA.2007.909557">[pdf]</a></td><td>IEEE Transactions on Systems, Man, and Cybernetics - Part A: Systems and Humans</td><td></td><td></td><td></td><td></td><td>18%</td><td>415</td><td>76</td><td>339</td><td>39</td><td>182</td><td>35</td></tr><tr><td>47662d1a368daf70ba70ef2d59eb6209f98b675d</td><td>fia</td><td>CMU FiA</td><td><a href="papers/47662d1a368daf70ba70ef2d59eb6209f98b675d.html">The CMU Face In Action (FIA) Database</a></td><td><a href="http://pdfs.semanticscholar.org/bb47/a03401811f9d2ca2da12138697acbc7b97a3.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>29%</td><td>55</td><td>16</td><td>39</td><td>5</td><td>38</td><td>7</td></tr><tr><td>4df3143922bcdf7db78eb91e6b5359d6ada004d2</td><td>cfd</td><td>CFD</td><td><a href="papers/4df3143922bcdf7db78eb91e6b5359d6ada004d2.html">The Chicago face database: A free stimulus set of faces and norming data.</a></td><td><a href="http://pdfs.semanticscholar.org/4df3/143922bcdf7db78eb91e6b5359d6ada004d2.pdf">[pdf]</a></td><td>Behavior research methods</td><td></td><td></td><td></td><td></td><td>39%</td><td>83</td><td>32</td><td>51</td><td>2</td><td>62</td><td>3</td></tr><tr><td>20388099cc415c772926e47bcbbe554e133343d1</td><td>cafe</td><td>CAFE</td><td><a href="papers/20388099cc415c772926e47bcbbe554e133343d1.html">The Child Affective Facial Expression (CAFE) set: validity and reliability from untrained adults</a></td><td><a href="http://pdfs.semanticscholar.org/2038/8099cc415c772926e47bcbbe554e133343d1.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>48%</td><td>33</td><td>16</td><td>17</td><td>3</td><td>28</td><td>1</td></tr><tr><td>32cde90437ab5a70cf003ea36f66f2de0e24b3ab</td><td>cityscapes</td><td>Cityscapes</td><td><a href="papers/32cde90437ab5a70cf003ea36f66f2de0e24b3ab.html">The Cityscapes Dataset for Semantic Urban Scene Understanding</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1604.01685.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>33%</td><td>771</td><td>252</td><td>519</td><td>54</td><td>622</td><td>0</td></tr><tr><td>32cde90437ab5a70cf003ea36f66f2de0e24b3ab</td><td>cityscapes</td><td>Cityscapes</td><td><a href="papers/32cde90437ab5a70cf003ea36f66f2de0e24b3ab.html">The Cityscapes Dataset for Semantic Urban Scene Understanding</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1604.01685.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>33%</td><td>771</td><td>252</td><td>519</td><td>54</td><td>622</td><td>0</td></tr><tr><td>4e6ee936eb50dd032f7138702fa39b7c18ee8907</td><td>dartmouth_children</td><td>Dartmouth Children</td><td><a href="papers/4e6ee936eb50dd032f7138702fa39b7c18ee8907.html">The Dartmouth Database of Children’s Faces: Acquisition and Validation of a New Face Stimulus Set</a></td><td><a href="http://pdfs.semanticscholar.org/4e6e/e936eb50dd032f7138702fa39b7c18ee8907.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>40%</td><td>20</td><td>8</td><td>12</td><td>2</td><td>16</td><td>0</td></tr><tr><td>f0e17f27f029db4ad650ff278fe3c10ecb6cb0c4</td><td>europersons</td><td>EuroCity Persons</td><td><a href="papers/f0e17f27f029db4ad650ff278fe3c10ecb6cb0c4.html">The EuroCity Persons Dataset: A Novel Benchmark for Object Detection</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1805.07193.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>0%</td><td>1</td><td>0</td><td>1</td><td>0</td><td>1</td><td>0</td></tr><tr><td>4d9a02d080636e9666c4d1cc438b9893391ec6c7</td><td>cohn_kanade_plus</td><td>CK+</td><td><a href="papers/4d9a02d080636e9666c4d1cc438b9893391ec6c7.html">The Extended Cohn-Kanade Dataset (CK+): A complete dataset for action unit and emotion-specified expression</a></td><td><a href="http://www.iainm.com/iainm/Publications_files/2010_The%20Extended.pdf">[pdf]</a></td><td>2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition - Workshops</td><td></td><td></td><td></td><td></td><td>41%</td><td>975</td><td>403</td><td>572</td><td>65</td><td>460</td><td>345</td></tr><tr><td>0f0fcf041559703998abf310e56f8a2f90ee6f21</td><td>feret</td><td>FERET</td><td><a href="papers/0f0fcf041559703998abf310e56f8a2f90ee6f21.html">The FERET Evaluation Methodology for Face-Recognition Algorithms</a></td><td><a href="http://pdfs.semanticscholar.org/0f0f/cf041559703998abf310e56f8a2f90ee6f21.pdf">[pdf]</a></td><td>IEEE Trans. Pattern Anal. Mach. Intell.</td><td></td><td></td><td></td><td></td><td>11%</td><td>999</td><td>109</td><td>259</td><td>32</td><td>213</td><td>51</td></tr><tr><td>0f0fcf041559703998abf310e56f8a2f90ee6f21</td><td>feret</td><td>FERET</td><td><a href="papers/0f0fcf041559703998abf310e56f8a2f90ee6f21.html">The FERET Evaluation Methodology for Face-Recognition Algorithms</a></td><td><a href="http://pdfs.semanticscholar.org/0f0f/cf041559703998abf310e56f8a2f90ee6f21.pdf">[pdf]</a></td><td>IEEE Trans. Pattern Anal. Mach. Intell.</td><td></td><td></td><td></td><td></td><td>11%</td><td>999</td><td>109</td><td>259</td><td>32</td><td>213</td><td>51</td></tr><tr><td>0f0fcf041559703998abf310e56f8a2f90ee6f21</td><td>feret</td><td>FERET</td><td><a href="papers/0f0fcf041559703998abf310e56f8a2f90ee6f21.html">The FERET Evaluation Methodology for Face-Recognition Algorithms</a></td><td><a href="http://pdfs.semanticscholar.org/0f0f/cf041559703998abf310e56f8a2f90ee6f21.pdf">[pdf]</a></td><td>IEEE Trans. Pattern Anal. Mach. Intell.</td><td></td><td></td><td></td><td></td><td>11%</td><td>999</td><td>109</td><td>259</td><td>32</td><td>213</td><td>51</td></tr><tr><td>0f0fcf041559703998abf310e56f8a2f90ee6f21</td><td>feret</td><td>FERET</td><td><a href="papers/0f0fcf041559703998abf310e56f8a2f90ee6f21.html">The FERET Evaluation Methodology for Face-Recognition Algorithms</a></td><td><a href="http://pdfs.semanticscholar.org/0f0f/cf041559703998abf310e56f8a2f90ee6f21.pdf">[pdf]</a></td><td>IEEE Trans. Pattern Anal. Mach. Intell.</td><td></td><td></td><td></td><td></td><td>11%</td><td>999</td><td>109</td><td>259</td><td>32</td><td>213</td><td>51</td></tr><tr><td>ea050801199f98a1c7c1df6769f23f658299a3ae</td><td>mpi_large</td><td>Large MPI Facial Expression</td><td><a href="papers/ea050801199f98a1c7c1df6769f23f658299a3ae.html">The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</a></td><td><a href="http://pdfs.semanticscholar.org/ea05/0801199f98a1c7c1df6769f23f658299a3ae.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>46%</td><td>28</td><td>13</td><td>15</td><td>4</td><td>24</td><td>3</td></tr><tr><td>ea050801199f98a1c7c1df6769f23f658299a3ae</td><td>mpi_small</td><td>Small MPI Facial Expression</td><td><a href="papers/ea050801199f98a1c7c1df6769f23f658299a3ae.html">The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</a></td><td><a href="http://pdfs.semanticscholar.org/ea05/0801199f98a1c7c1df6769f23f658299a3ae.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>46%</td><td>28</td><td>13</td><td>15</td><td>4</td><td>24</td><td>3</td></tr><tr><td>f1af714b92372c8e606485a3982eab2f16772ad8</td><td>mug_faces</td><td>MUG Faces</td><td><a href="papers/f1af714b92372c8e606485a3982eab2f16772ad8.html">The MUG facial expression database</a></td><td><a href="http://ieeexplore.ieee.org/document/5617662/">[pdf]</a></td><td>11th International Workshop on Image Analysis for Multimedia Interactive Services WIAMIS 10</td><td>edu</td><td>Aristotle University of Thessaloniki</td><td>40.62984145</td><td>22.95889350</td><td>28%</td><td>68</td><td>19</td><td>49</td><td>5</td><td>28</td><td>19</td></tr><tr><td>79828e6e9f137a583082b8b5a9dfce0c301989b8</td><td>mapillary</td><td>Mapillary</td><td><a href="papers/79828e6e9f137a583082b8b5a9dfce0c301989b8.html">The Mapillary Vistas Dataset for Semantic Understanding of Street Scenes</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237796', 'linkType': 'ieee'}">[pdf]</a></td><td>2017 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td>39%</td><td>44</td><td>17</td><td>27</td><td>0</td><td>36</td><td>0</td></tr><tr><td>a6e695ddd07aad719001c0fc1129328452385949</td><td>yfcc_100m</td><td>YFCC100M</td><td><a href="papers/a6e695ddd07aad719001c0fc1129328452385949.html">The New Data and New Challenges in Multimedia Research</a></td><td><span class="gray">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>36%</td><td>160</td><td>57</td><td>103</td><td>11</td><td>105</td><td>4</td></tr><tr><td>abe9f3b91fd26fa1b50cd685c0d20debfb372f73</td><td>voc</td><td>VOC</td><td><a href="papers/abe9f3b91fd26fa1b50cd685c0d20debfb372f73.html">The Pascal Visual Object Classes Challenge: A Retrospective</a></td><td><a href="http://homepages.inf.ed.ac.uk/ckiw/postscript/ijcv_voc14.pdf">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td>32%</td><td>999</td><td>315</td><td>684</td><td>75</td><td>698</td><td>6</td></tr><tr><td>8b2dd5c61b23ead5ae5508bb8ce808b5ea266730</td><td>10k_US_adult_faces</td><td>10K US Adult Faces</td><td><a href="papers/8b2dd5c61b23ead5ae5508bb8ce808b5ea266730.html">The intrinsic memorability of face photographs.</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/8b2d/d5c61b23ead5ae5508bb8ce808b5ea266730.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Journal of experimental psychology. General</td><td></td><td></td><td></td><td></td><td>36%</td><td>47</td><td>17</td><td>30</td><td>3</td><td>33</td><td>1</td></tr><tr><td>19d1b811df60f86cbd5e04a094b07f32fff7a32a</td><td>york_3d</td><td>UOY 3D Face Database</td><td><a href="papers/19d1b811df60f86cbd5e04a094b07f32fff7a32a.html">Three-dimensional face recognition: an eigensurface approach</a></td><td><a href="http://www-users.cs.york.ac.uk/~nep/research/3Dface/tomh/3DFaceRecognition-Eigensurface-ICIP(web)2.pdf">[pdf]</a></td><td>2004 International Conference on Image Processing, 2004. ICIP '04.</td><td></td><td></td><td></td><td></td><td>19%</td><td>36</td><td>7</td><td>29</td><td>4</td><td>25</td><td>1</td></tr><tr><td>2edb87494278ad11641b6cf7a3f8996de12b8e14</td><td>qmul_grid</td><td>GRID</td><td><a href="papers/2edb87494278ad11641b6cf7a3f8996de12b8e14.html">Time-Delayed Correlation Analysis for Multi-Camera Activity Understanding</a></td><td><a href="https://doi.org/10.1007/s11263-010-0347-5">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td>16%</td><td>77</td><td>12</td><td>39</td><td>3</td><td>32</td><td>0</td></tr><tr><td>2edb87494278ad11641b6cf7a3f8996de12b8e14</td><td>qmul_grid</td><td>GRID</td><td><a href="papers/2edb87494278ad11641b6cf7a3f8996de12b8e14.html">Time-Delayed Correlation Analysis for Multi-Camera Activity Understanding</a></td><td><a href="https://doi.org/10.1007/s11263-010-0347-5">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td>16%</td><td>77</td><td>12</td><td>39</td><td>3</td><td>32</td><td>0</td></tr><tr><td>298cbc3dfbbb3a20af4eed97906650a4ea1c29e0</td><td>ferplus</td><td>FER+</td><td><a href="papers/298cbc3dfbbb3a20af4eed97906650a4ea1c29e0.html">Training deep networks for facial expression recognition with crowd-sourced label distribution</a></td><td><a href="http://arxiv.org/pdf/1608.01041v1.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>34%</td><td>29</td><td>10</td><td>19</td><td>0</td><td>15</td><td>3</td></tr><tr><td>b5f2846a506fc417e7da43f6a7679146d99c5e96</td><td>ucf_101</td><td>UCF101</td><td><a href="papers/b5f2846a506fc417e7da43f6a7679146d99c5e96.html">UCF101: A Dataset of 101 Human Actions Classes From Videos in The Wild</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1212.0402.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td>edu</td><td>University of Central Florida</td><td>28.59899755</td><td>-81.19712501</td><td>54%</td><td>999</td><td>535</td><td>464</td><td>73</td><td>708</td><td>212</td></tr><tr><td>16e8b0a1e8451d5f697b94c0c2b32a00abee1d52</td><td>umb</td><td>UMB</td><td><a href="papers/16e8b0a1e8451d5f697b94c0c2b32a00abee1d52.html">UMB-DB: A database of partially occluded 3D faces</a></td><td><a href="https://doi.org/10.1109/ICCVW.2011.6130509">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td>29%</td><td>45</td><td>13</td><td>32</td><td>2</td><td>20</td><td>3</td></tr><tr><td>8627f019882b024aef92e4eb9355c499c733e5b7</td><td>used</td><td>USED Social Event Dataset</td><td><a href="papers/8627f019882b024aef92e4eb9355c499c733e5b7.html">USED: a large-scale social event detection dataset</a></td><td><a href="http://doi.acm.org/10.1145/2910017.2910624">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>7</td><td>3</td><td>4</td><td>0</td><td>3</td><td>2</td></tr><tr><td>4b4106614c1d553365bad75d7866bff0de6056ed</td><td>czech_news_agency</td><td>UFI</td><td><a href="papers/4b4106614c1d553365bad75d7866bff0de6056ed.html">Unconstrained Facial Images: Database for Face Recognition Under Real-World Conditions</a></td><td><a href="http://pdfs.semanticscholar.org/4b41/06614c1d553365bad75d7866bff0de6056ed.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>10%</td><td>10</td><td>1</td><td>9</td><td>0</td><td>4</td><td>2</td></tr><tr><td>08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7</td><td>kin_face</td><td>UB KinFace</td><td><a href="papers/08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7.html">Understanding Kin Relationships in a Photo</a></td><td><a href="http://www1.ece.neu.edu/~yunfu/papers/Kinship-TMM.pdf">[pdf]</a></td><td>IEEE Transactions on Multimedia</td><td></td><td></td><td></td><td></td><td>24%</td><td>89</td><td>21</td><td>46</td><td>2</td><td>20</td><td>14</td></tr><tr><td>08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7</td><td>kin_face</td><td>UB KinFace</td><td><a href="papers/08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7.html">Understanding Kin Relationships in a Photo</a></td><td><a href="http://www1.ece.neu.edu/~yunfu/papers/Kinship-TMM.pdf">[pdf]</a></td><td>IEEE Transactions on Multimedia</td><td></td><td></td><td></td><td></td><td>24%</td><td>89</td><td>21</td><td>46</td><td>2</td><td>20</td><td>14</td></tr><tr><td>08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7</td><td>kin_face</td><td>UB KinFace</td><td><a href="papers/08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7.html">Understanding Kin Relationships in a Photo</a></td><td><a href="http://www1.ece.neu.edu/~yunfu/papers/Kinship-TMM.pdf">[pdf]</a></td><td>IEEE Transactions on Multimedia</td><td></td><td></td><td></td><td></td><td>24%</td><td>89</td><td>21</td><td>46</td><td>2</td><td>20</td><td>14</td></tr><tr><td>21d9d0deed16f0ad62a4865e9acf0686f4f15492</td><td>images_of_groups</td><td>Images of Groups</td><td><a href="papers/21d9d0deed16f0ad62a4865e9acf0686f4f15492.html">Understanding images of groups of people</a></td><td><a href="http://amp.ece.cmu.edu/people/Andy/Andy_files/cvpr09.pdf">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>36%</td><td>202</td><td>72</td><td>130</td><td>12</td><td>126</td><td>24</td></tr><tr><td>fd8168f1c50de85bac58a8d328df0a50248b16ae</td><td>nd_2006</td><td>ND-2006</td><td><a href="papers/fd8168f1c50de85bac58a8d328df0a50248b16ae.html">Using a Multi-Instance Enrollment Representation to Improve 3D Face Recognition</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4401928', 'linkType': 'ieee'}">[pdf]</a></td><td>2007 First IEEE International Conference on Biometrics: Theory, Applications, and Systems</td><td></td><td></td><td></td><td></td><td>25%</td><td>32</td><td>8</td><td>24</td><td>3</td><td>16</td><td>1</td></tr><tr><td>4563b46d42079242f06567b3f2e2f7a80cb3befe</td><td>vadana</td><td>VADANA</td><td><a href="papers/4563b46d42079242f06567b3f2e2f7a80cb3befe.html">VADANA: A dense dataset for facial image analysis</a></td><td><a href="http://vims.cis.udel.edu/publications/VADANA_BeFIT2011.pdf">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td>19%</td><td>16</td><td>3</td><td>13</td><td>0</td><td>6</td><td>6</td></tr><tr><td>eb027969f9310e0ae941e2adee2d42cdf07d938c</td><td>vgg_faces2</td><td>VGG Face2</td><td><a href="papers/eb027969f9310e0ae941e2adee2d42cdf07d938c.html">VGGFace2: A Dataset for Recognising Faces across Pose and Age</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1710.08092.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2018 13th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2018)</td><td>edu</td><td>University of Oxford</td><td>51.75345380</td><td>-1.25400997</td><td>38%</td><td>56</td><td>21</td><td>35</td><td>6</td><td>50</td><td>3</td></tr><tr><td>01959ef569f74c286956024866c1d107099199f7</td><td>vqa</td><td>VQA</td><td><a href="papers/01959ef569f74c286956024866c1d107099199f7.html">VQA: Visual Question Answering</a></td><td><a href="http://arxiv.org/pdf/1505.00468v3.pdf">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td>47%</td><td>731</td><td>344</td><td>387</td><td>47</td><td>628</td><td>4</td></tr><tr><td>5194cbd51f9769ab25260446b4fa17204752e799</td><td>violent_flows</td><td>Violent Flows</td><td><a href="papers/5194cbd51f9769ab25260446b4fa17204752e799.html">Violent flows: Real-time detection of violent crowd behavior</a></td><td><a href="http://www.wisdom.weizmann.ac.il/mathusers/kliper/Papers/violent_flows.pdf">[pdf]</a></td><td>2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops</td><td></td><td></td><td></td><td></td><td>20%</td><td>83</td><td>17</td><td>66</td><td>6</td><td>42</td><td>2</td></tr><tr><td>066000d44d6691d27202896691f08b27117918b9</td><td>psu</td><td>PSU</td><td><a href="papers/066000d44d6691d27202896691f08b27117918b9.html">Vision-Based Analysis of Small Groups in Pedestrian Crowds</a></td><td><a href="http://vision.cse.psu.edu/publications/pdfs/GeCollinsRubackPAMI2011.pdf">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>18%</td><td>151</td><td>27</td><td>124</td><td>9</td><td>78</td><td>2</td></tr><tr><td>dd65f71dac86e36eecbd3ed225d016c3336b4a13</td><td>families_in_the_wild</td><td>FIW</td><td><a href="papers/dd65f71dac86e36eecbd3ed225d016c3336b4a13.html">Visual Kinship Recognition of Families in the Wild</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8337841', 'linkType': 'ieee'}">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>67%</td><td>3</td><td>2</td><td>1</td><td>0</td><td>2</td><td>0</td></tr><tr><td>52d7eb0fbc3522434c13cc247549f74bb9609c5d</td><td>wider_face</td><td>WIDER FACE</td><td><a href="papers/52d7eb0fbc3522434c13cc247549f74bb9609c5d.html">WIDER FACE: A Face Detection Benchmark</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1511.06523.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>53%</td><td>148</td><td>78</td><td>70</td><td>16</td><td>107</td><td>34</td></tr><tr><td>77c81c13a110a341c140995bedb98101b9e84f7f</td><td>wildtrack</td><td>WildTrack</td><td><a href="papers/77c81c13a110a341c140995bedb98101b9e84f7f.html">WILDTRACK : A Multi-camera HD Dataset for Dense Unscripted Pedestrian Detection</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/fe1c/ec4e4995b8615855572374ae3efc94949105.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>5ad4e9f947c1653c247d418f05dad758a3f9277b</td><td>wlfdb</td><td></td><td><a href="papers/5ad4e9f947c1653c247d418f05dad758a3f9277b.html">WLFDB: Weakly Labeled Face Databases</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/5ad4/e9f947c1653c247d418f05dad758a3f9277b.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>0%</td><td>1</td><td>0</td><td>1</td><td>0</td><td>0</td><td>0</td></tr><tr><td>0dc11a37cadda92886c56a6fb5191ded62099c28</td><td>stickmen_family</td><td>We Are Family Stickmen</td><td><a href="papers/0dc11a37cadda92886c56a6fb5191ded62099c28.html">We Are Family: Joint Pose Estimation of Multiple Persons</a></td><td><a href="http://pdfs.semanticscholar.org/0dc1/1a37cadda92886c56a6fb5191ded62099c28.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>44%</td><td>77</td><td>34</td><td>43</td><td>4</td><td>57</td><td>1</td></tr><tr><td>2a75f34663a60ab1b04a0049ed1d14335129e908</td><td>mmi_facial_expression</td><td>MMI Facial Expression Dataset</td><td><a href="papers/2a75f34663a60ab1b04a0049ed1d14335129e908.html">Web-based database for facial expression analysis</a></td><td><a href="http://ibug.doc.ic.ac.uk/media/uploads/documents/PanticEtAl-ICME2005-final.pdf">[pdf]</a></td><td>2005 IEEE International Conference on Multimedia and Expo</td><td></td><td></td><td></td><td></td><td>32%</td><td>440</td><td>142</td><td>298</td><td>44</td><td>258</td><td>82</td></tr><tr><td>9b9bf5e623cb8af7407d2d2d857bc3f1b531c182</td><td>who_goes_there</td><td>WGT</td><td><a href="papers/9b9bf5e623cb8af7407d2d2d857bc3f1b531c182.html">Who goes there?: approaches to mapping facial appearance diversity</a></td><td><a href="http://doi.acm.org/10.1145/2996913.2996997">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>a94cae786d515d3450d48267e12ca954aab791c4</td><td>yawdd</td><td>YawDD</td><td><a href="papers/a94cae786d515d3450d48267e12ca954aab791c4.html">YawDD: a yawning detection dataset</a></td><td><a href="http://www.site.uottawa.ca/~shervin/pubs/CogniVue-Dataset-ACM-MMSys2014.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>14</td><td>0</td><td>14</td><td>1</td><td>2</td><td>1</td></tr></table></body></html>
\ No newline at end of file diff --git a/scraper/s2-citation-report.py b/scraper/s2-citation-report.py index e0d812d7..b5849329 100644 --- a/scraper/s2-citation-report.py +++ b/scraper/s2-citation-report.py @@ -32,16 +32,73 @@ def s2_citation_report(): print("citations: {}".format(paper_count)) print("geocoded: {} ({}%)".format(geocode_count, percent(geocode_count, paper_count))) - # fetch_google_sheet + write_master_report('{}/{}'.format(DIR_PUBLIC_CITATIONS, "datasets.csv"), papers) sts = subprocess.call([ - "s3cmd", "sync", + "s3cmd", "put", "-P", "--recursive", DIR_PUBLIC_CITATIONS + '/', "s3://megapixels/v1/citations/", ]) -def write_master_report(fn, title, papers, key): - keys, rows = fetch_google_sheet('statistics') +def write_master_report(fn, papers): + # first make a lookup of the keys that have papers + paper_key_lookup = {} + for paper in papers: + if paper['key'] not in paper_key_lookup: + paper_key_lookup[paper['key']] = paper + + # then fetch the statistics csv which has things like "year" + fields, rows = fetch_google_sheet('statistics') + master_papers = [] + statistics = {} + + def clean(n): + if n: + return int(n.replace(',','').replace('.','').replace('?','').strip()) + return None + + for row in rows: + key = row[0] + if key not in paper_key_lookup: + continue + paper = paper_key_lookup[key] + stats = {} + for index, field in enumerate(fields): + stats[field] = row[index] + report_fn = '../site/content/datasets/{}/index.md'.format(key) + has_report = os.path.exists(report_fn) + statistics[key] = stats + search_result = read_json('./datasets/s2/entries/{}.json'.format(paper['paperId'])) + + image_count = stats['images'] + if type(image_count) is str: + if len(image_count): + image_count = clean(image_count) + else: + image_count = None, + master_papers.append([ + stats['key'], + stats['name'], + '/datasets/{}/'.format(key) if has_report else '', + image_count, + clean(stats['faces_unique']) or None, + stats['year_published'], + clean(paper['citation_count']) or 0, + clean(search_result['citationStats']['numKeyCitations']) or 0, + # origin + ]) + master_paper_keys = [ + 'key', + 'title', + 'link', + 'images', + 'people', + 'year', + 'citations', + 'influenced', + # 'origin' + ] + write_csv(fn, keys=master_paper_keys, rows=master_papers) def write_papers_report(fn, title, papers, key, reverse=False): sorted_papers = [] @@ -275,7 +332,7 @@ def process_paper(row, addresses, success): f.write('<script src="../map.js"></script>') f.write("</html>") # template = env.get_template('paper.html') - with open('{}/{}.json'.format(DIR_PUBLIC_CITATIONS, paper.paper_id), 'w') as f: + with open('{}/{}.json'.format(DIR_PUBLIC_CITATIONS, paper.key), 'w') as f: json.dump({ 'id': paper.paper_id, 'paper': res, diff --git a/scraper/s2-search.py b/scraper/s2-search.py index 9ec20cc9..d9b1beca 100644 --- a/scraper/s2-search.py +++ b/scraper/s2-search.py @@ -38,22 +38,24 @@ def fetch_entries(index, refresh): if len(clean_title) < 2: continue dump_fn = './datasets/s2/dumps/{}.json'.format(key) - entry_fn = './datasets/s2/entries/{}.json'.format(key) result = None - if not refresh and os.path.exists(entry_fn): - result = read_json(entry_fn) + if not refresh and os.path.exists(dump_fn): + results = read_json(dump_fn) else: results = s2.search(clean_title) write_json(dump_fn, results) - if len(results['results']) == 0: - print("- {}".format(title)) - else: - print("+ {}".format(title)) - result = results['results'][0] - write_json(entry_fn, result) + + if len(results['results']) == 0: + print("- {}".format(title)) + else: + print("+ {}".format(title)) + result = results['results'][0] + if result: paper_id = result['id'] paper = fetch_paper(s2, paper_id) + entry_fn = './datasets/s2/entries/{}.json'.format(paper_id) + write_json(entry_fn, result) citation_lookup.append([key, name, title, paper_id]) write_csv("datasets/citation_lookup.csv", keys=['key', 'name', 'title', 'paper_id'], rows=citation_lookup) diff --git a/scraper/util.py b/scraper/util.py index c02f018c..47e5a4aa 100644 --- a/scraper/util.py +++ b/scraper/util.py @@ -259,7 +259,7 @@ class AddressBook (object): def __init__(self): entities = {} lookup = {} - keys, data = fetch_google_sheet() + keys, data = fetch_google_sheet('institutions') # keys, data = read_csv('reports/pdf_institutions_deduped.csv', keys=True) for index, line in enumerate(data): if line[0] == line[1] or line[0] not in entities: |
