diff options
| -rw-r--r-- | scraper/reports/report_coverage.html | 2 | ||||
| -rw-r--r-- | scraper/reports/report_index.html | 2 | ||||
| -rw-r--r-- | scraper/s2-final-report.py | 22 | ||||
| -rw-r--r-- | site/datasets/citations/brainwash.json | 2 | ||||
| -rw-r--r-- | site/datasets/final/brainwash.json | 2 | ||||
| -rw-r--r-- | site/datasets/unknown/brainwash.json | 2 | ||||
| -rw-r--r-- | site/datasets/verified/brainwash.json | 2 |
7 files changed, 19 insertions, 15 deletions
diff --git a/scraper/reports/report_coverage.html b/scraper/reports/report_coverage.html index 093dcab3..42046952 100644 --- a/scraper/reports/report_coverage.html +++ b/scraper/reports/report_coverage.html @@ -1 +1 @@ -<!doctype html><html><head><meta charset='utf-8'><title>Coverage</title><link rel='stylesheet' href='reports.css'></head><body><h2>Coverage</h2><table border='1' cellpadding='3' cellspacing='3'><th>Paper ID</th><th>Megapixels Key</th><th>Megapixels Name</th><th>Report Link</th><th>PDF Link</th><th>Journal</th><th>Type</th><th>Address</th><th>Country</th><th>Lat</th><th>Lng</th><th>Coverage</th><th>Total Citations</th><th>Geocoded Citations</th><th>Unknown Citations</th><th>Empty Citations</th><th>With PDF</th><th>With DOI</th><tr><td>0e986f51fe45b00633de9fd0c94d082d2be51406</td><td>afw</td><td>AFW</td><td><a href="papers/0e986f51fe45b00633de9fd0c94d082d2be51406.html" target="_blank">Face detection, pose estimation, and landmark localization in the wild</a></td><td><span class="gray">[pdf]</a></td><td>2012 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>71%</td><td>999</td><td>709</td><td>290</td><td>35</td><td>576</td><td>422</td></tr><tr><td>162ea969d1929ed180cc6de9f0bf116993ff6e06</td><td>vgg_faces</td><td>VGG Face</td><td><a href="papers/162ea969d1929ed180cc6de9f0bf116993ff6e06.html" target="_blank">Deep Face Recognition</a></td><td><a href="https://pdfs.semanticscholar.org/f372/ab9b3270d4e4f6a0258c83c2736c3a5c0454.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>65%</td><td>999</td><td>646</td><td>353</td><td>48</td><td>558</td><td>429</td></tr><tr><td>b5f2846a506fc417e7da43f6a7679146d99c5e96</td><td>ucf_101</td><td>UCF101</td><td><a href="papers/b5f2846a506fc417e7da43f6a7679146d99c5e96.html" target="_blank">UCF101: A Dataset of 101 Human Actions Classes From Videos in The Wild</a></td><td><a href="https://arxiv.org/pdf/1212.0402.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>999</td><td>643</td><td>356</td><td>56</td><td>628</td><td>362</td></tr><tr><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td><td>lfw</td><td>LFW</td><td><a href="papers/370b5757a5379b15e30d619e4d3fb9e8e13f3256.html" target="_blank">Labeled Faces in the Wild: A Database forStudying Face Recognition in Unconstrained Environments</a></td><td><a href="https://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>63%</td><td>999</td><td>632</td><td>367</td><td>59</td><td>598</td><td>382</td></tr><tr><td>5e0f8c355a37a5a89351c02f174e7a5ddcb98683</td><td>coco</td><td>COCO</td><td><a href="papers/5e0f8c355a37a5a89351c02f174e7a5ddcb98683.html" target="_blank">Microsoft COCO: Common Objects in Context</a></td><td><a href="https://arxiv.org/pdf/1405.0312.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>999</td><td>608</td><td>391</td><td>25</td><td>722</td><td>259</td></tr><tr><td>4d9a02d080636e9666c4d1cc438b9893391ec6c7</td><td>cohn_kanade_plus</td><td>CK+</td><td><a href="papers/4d9a02d080636e9666c4d1cc438b9893391ec6c7.html" target="_blank">The Extended Cohn-Kanade Dataset (CK+): A complete dataset for action unit and emotion-specified expression</a></td><td><span class="gray">[pdf]</a></td><td>2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition - Workshops</td><td>edu</td><td>University of Pittsburgh</td><td>United States</td><td>40.44415295</td><td>-79.96243993</td><td>61%</td><td>999</td><td>607</td><td>392</td><td>57</td><td>470</td><td>518</td></tr><tr><td>0ee1916a0cb2dc7d3add086b5f1092c3d4beb38a</td><td>voc</td><td>VOC</td><td><a href="papers/0ee1916a0cb2dc7d3add086b5f1092c3d4beb38a.html" target="_blank">The Pascal Visual Object Classes (VOC) Challenge</a></td><td><a href="http://eprints.pascal-network.org/archive/00006187/01/PascalVOC_IJCV2009.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td>company</td><td>Microsoft</td><td>United States</td><td>47.64233180</td><td>-122.13693020</td><td>61%</td><td>999</td><td>607</td><td>391</td><td>28</td><td>557</td><td>422</td></tr><tr><td>026e3363b7f76b51cc711886597a44d5f1fd1de2</td><td>kitti</td><td>KITTI</td><td><a href="papers/026e3363b7f76b51cc711886597a44d5f1fd1de2.html" target="_blank">Vision meets robotics: The KITTI dataset</a></td><td><a href="https://pdfs.semanticscholar.org/026e/3363b7f76b51cc711886597a44d5f1fd1de2.pdf" target="_blank">[pdf]</a></td><td>I. J. Robotics Res.</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>999</td><td>602</td><td>397</td><td>36</td><td>553</td><td>462</td></tr><tr><td>f72f6a45ee240cc99296a287ff725aaa7e7ebb35</td><td>caltech_pedestrians</td><td>Caltech Pedestrians</td><td><a href="papers/f72f6a45ee240cc99296a287ff725aaa7e7ebb35.html" target="_blank">Pedestrian Detection: An Evaluation of the State of the Art</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td>edu</td><td>California Institute of Technology</td><td>United States</td><td>34.13710185</td><td>-118.12527487</td><td>60%</td><td>999</td><td>599</td><td>400</td><td>69</td><td>527</td><td>466</td></tr><tr><td>759a3b3821d9f0e08e0b0a62c8b693230afc3f8d</td><td>pubfig</td><td>PubFig</td><td><a href="papers/759a3b3821d9f0e08e0b0a62c8b693230afc3f8d.html" target="_blank">Attribute and simile classifiers for face verification</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE 12th International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>914</td><td>585</td><td>329</td><td>47</td><td>586</td><td>316</td></tr><tr><td>6d96f946aaabc734af7fe3fc4454cf8547fcd5ed</td><td>ar_facedb</td><td>AR Face</td><td><a href="papers/6d96f946aaabc734af7fe3fc4454cf8547fcd5ed.html" target="_blank">The AR face database</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>999</td><td>579</td><td>420</td><td>58</td><td>458</td><td>530</td></tr><tr><td>31b58ced31f22eab10bd3ee2d9174e7c14c27c01</td><td>tiny_images</td><td>Tiny Images</td><td><a href="papers/31b58ced31f22eab10bd3ee2d9174e7c14c27c01.html" target="_blank">80 Million Tiny Images: A Large Data Set for Nonparametric Object and Scene Recognition</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>999</td><td>574</td><td>425</td><td>89</td><td>644</td><td>337</td></tr><tr><td>10d6b12fa07c7c8d6c8c3f42c7f1c061c131d4c5</td><td>inria_person</td><td>INRIA Pedestrian</td><td><a href="papers/10d6b12fa07c7c8d6c8c3f42c7f1c061c131d4c5.html" target="_blank">Histograms of oriented gradients for human detection</a></td><td><span class="gray">[pdf]</a></td><td>2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)</td><td>edu</td><td>INRIA Rhone-Alps, Montbonnot, France</td><td>France</td><td>45.21788600</td><td>5.80736900</td><td>57%</td><td>999</td><td>573</td><td>426</td><td>41</td><td>419</td><td>509</td></tr><tr><td>18ae7c9a4bbc832b8b14bc4122070d7939f5e00e</td><td>frgc</td><td>FRGC</td><td><a href="papers/18ae7c9a4bbc832b8b14bc4122070d7939f5e00e.html" target="_blank">Overview of the face recognition grand challenge</a></td><td><span class="gray">[pdf]</a></td><td>2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)</td><td>edu</td><td>NIST</td><td>United States</td><td>39.14004000</td><td>-77.21850600</td><td>57%</td><td>999</td><td>568</td><td>430</td><td>85</td><td>549</td><td>442</td></tr><tr><td>18c72175ddbb7d5956d180b65a96005c100f6014</td><td>yale_faces</td><td>YaleFaces</td><td><a href="papers/18c72175ddbb7d5956d180b65a96005c100f6014.html" target="_blank">From Few to Many: Illumination Cone Models for Face Recognition under Variable Lighting and Pose</a></td><td><a href="http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.147.1487&rep=rep1&type=pdf" target="_blank">[pdf]</a></td><td>IEEE Trans. Pattern Anal. Mach. Intell.</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>999</td><td>561</td><td>438</td><td>66</td><td>498</td><td>462</td></tr><tr><td>2ad0ee93d029e790ebb50574f403a09854b65b7e</td><td>yale_faces</td><td>YaleFaces</td><td><a href="papers/2ad0ee93d029e790ebb50574f403a09854b65b7e.html" target="_blank">Acquiring linear subspaces for face recognition under variable lighting</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>999</td><td>554</td><td>445</td><td>94</td><td>495</td><td>491</td></tr><tr><td>23fc83c8cfff14a16df7ca497661264fc54ed746</td><td>cohn_kanade</td><td>CK</td><td><a href="papers/23fc83c8cfff14a16df7ca497661264fc54ed746.html" target="_blank">Comprehensive Database for Facial Expression Analysis</a></td><td><a href="https://pdfs.semanticscholar.org/23fc/83c8cfff14a16df7ca497661264fc54ed746.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>999</td><td>553</td><td>446</td><td>69</td><td>540</td><td>439</td></tr><tr><td>b62628ac06bbac998a3ab825324a41a11bc3a988</td><td>m2vtsdb_extended</td><td>xm2vtsdb</td><td><a href="papers/b62628ac06bbac998a3ab825324a41a11bc3a988.html" target="_blank">XM2VTSDB : The extended M2VTS database</a></td><td><a href="https://pdfs.semanticscholar.org/b626/28ac06bbac998a3ab825324a41a11bc3a988.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>864</td><td>539</td><td>325</td><td>37</td><td>493</td><td>404</td></tr><tr><td>6424b69f3ff4d35249c0bb7ef912fbc2c86f4ff4</td><td>celeba</td><td>CelebA</td><td><a href="papers/6424b69f3ff4d35249c0bb7ef912fbc2c86f4ff4.html" target="_blank">Deep Learning Face Attributes in the Wild</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td>edu</td><td>Chinese University of Hong Kong</td><td>China</td><td>22.41626320</td><td>114.21093180</td><td>57%</td><td>919</td><td>526</td><td>392</td><td>61</td><td>694</td><td>201</td></tr><tr><td>dc8b25e35a3acb812beb499844734081722319b4</td><td>feret</td><td>FERET</td><td><a href="papers/dc8b25e35a3acb812beb499844734081722319b4.html" target="_blank">The FERET database and evaluation procedure for face-recognition algorithms</a></td><td><a href="http://biometrics.nist.gov/cs_links/face/frvt/feret/FERET_Database_evaluation_procedure.pdf" target="_blank">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>999</td><td>521</td><td>478</td><td>103</td><td>591</td><td>421</td></tr><tr><td>45c31cde87258414f33412b3b12fc5bec7cb3ba9</td><td>jaffe</td><td>JAFFE</td><td><a href="papers/45c31cde87258414f33412b3b12fc5bec7cb3ba9.html" target="_blank">Coding Facial Expressions with Gabor Wavelets</a></td><td><a href="http://physics.lbl.gov/patrecog/images/Facerecog_gabor.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>899</td><td>508</td><td>391</td><td>51</td><td>431</td><td>451</td></tr><tr><td>55206f0b5f57ce17358999145506cd01e570358c</td><td>orl</td><td>ORL</td><td><a href="papers/55206f0b5f57ce17358999145506cd01e570358c.html" target="_blank">Parameterisation of a stochastic model for human face identification</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>999</td><td>501</td><td>498</td><td>94</td><td>543</td><td>427</td></tr><tr><td>4d423acc78273b75134e2afd1777ba6d3a398973</td><td>cmu_pie</td><td>CMU PIE</td><td><a href="papers/4d423acc78273b75134e2afd1777ba6d3a398973.html" target="_blank">The CMU Pose, Illumination, and Expression (PIE) Database</a></td><td><a href="http://www.comp.nus.edu.sg/~tsim/piedb.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>59%</td><td>760</td><td>449</td><td>310</td><td>49</td><td>404</td><td>345</td></tr><tr><td>4d423acc78273b75134e2afd1777ba6d3a398973</td><td>multi_pie</td><td>MULTIPIE</td><td><a href="papers/4d423acc78273b75134e2afd1777ba6d3a398973.html" target="_blank">The CMU Pose, Illumination, and Expression (PIE) Database</a></td><td><a href="http://www.comp.nus.edu.sg/~tsim/piedb.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>59%</td><td>760</td><td>449</td><td>310</td><td>49</td><td>404</td><td>345</td></tr><tr><td>2830fb5282de23d7784b4b4bc37065d27839a412</td><td>h3d</td><td>H3D</td><td><a href="papers/2830fb5282de23d7784b4b4bc37065d27839a412.html" target="_blank">Poselets: Body part detectors trained using 3D human pose annotations</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE 12th International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>716</td><td>415</td><td>301</td><td>59</td><td>492</td><td>222</td></tr><tr><td>6273b3491e94ea4dd1ce42b791d77bdc96ee73a8</td><td>viper</td><td>VIPeR</td><td><a href="papers/6273b3491e94ea4dd1ce42b791d77bdc96ee73a8.html" target="_blank">Evaluating Appearance Models for Recognition, Reacquisition, and Tracking</a></td><td><a href="https://pdfs.semanticscholar.org/7847/b1fbccadb780b655e72c66d3f9e93ddb880c.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>edu</td><td>University of California, Santa Cruz</td><td>United States</td><td>36.99158470</td><td>-122.05827710</td><td>66%</td><td>624</td><td>414</td><td>210</td><td>33</td><td>342</td><td>276</td></tr><tr><td>6bd36e9fd0ef20a3074e1430a6cc601e6d407fc3</td><td>cuhk_campus_03</td><td>CUHK03 Campus</td><td><a href="papers/6bd36e9fd0ef20a3074e1430a6cc601e6d407fc3.html" target="_blank">DeepReID: Deep Filter Pairing Neural Network for Person Re-identification</a></td><td><span class="gray">[pdf]</a></td><td>2014 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>73%</td><td>568</td><td>412</td><td>156</td><td>19</td><td>320</td><td>235</td></tr><tr><td>2258e01865367018ed6f4262c880df85b94959f8</td><td>mot</td><td>MOT</td><td><a href="papers/2258e01865367018ed6f4262c880df85b94959f8.html" target="_blank">Evaluating Multiple Object Tracking Performance: The CLEAR MOT Metrics</a></td><td><a href="https://cvhci.anthropomatik.kit.edu/images/stories/msmmi/papers/eurasip2008.pdf" target="_blank">[pdf]</a></td><td>EURASIP J. Image and Video Processing</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>632</td><td>366</td><td>264</td><td>44</td><td>358</td><td>264</td></tr><tr><td>4308bd8c28e37e2ed9a3fcfe74d5436cce34b410</td><td>market_1501</td><td>Market 1501</td><td><a href="papers/4308bd8c28e37e2ed9a3fcfe74d5436cce34b410.html" target="_blank">Scalable Person Re-identification: A Benchmark</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td>company</td><td>Microsoft</td><td>United States</td><td>47.64233180</td><td>-122.13693020</td><td>77%</td><td>460</td><td>355</td><td>105</td><td>9</td><td>263</td><td>185</td></tr><tr><td>853bd61bc48a431b9b1c7cab10c603830c488e39</td><td>casia_webface</td><td>CASIA Webface</td><td><a href="papers/853bd61bc48a431b9b1c7cab10c603830c488e39.html" target="_blank">Learning Face Representation from Scratch</a></td><td><a href="https://arxiv.org/pdf/1411.7923.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td>edu</td><td>Chinese Academy of Sciences</td><td>China</td><td>40.00447950</td><td>116.37023800</td><td>71%</td><td>476</td><td>339</td><td>137</td><td>19</td><td>290</td><td>182</td></tr><tr><td>560e0e58d0059259ddf86fcec1fa7975dee6a868</td><td>youtube_faces</td><td>YouTubeFaces</td><td><a href="papers/560e0e58d0059259ddf86fcec1fa7975dee6a868.html" target="_blank">Face recognition in unconstrained videos with matched background similarity</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011</td><td>edu</td><td>Tel Aviv University</td><td>Israel</td><td>32.11198890</td><td>34.80459702</td><td>66%</td><td>509</td><td>338</td><td>170</td><td>23</td><td>294</td><td>216</td></tr><tr><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td><td>tud_campus</td><td>TUD-Campus</td><td><a href="papers/3316521a5527c7700af8ae6aef32a79a8b83672c.html" target="_blank">People-tracking-by-detection and people-detection-by-tracking</a></td><td><span class="gray">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>59%</td><td>545</td><td>324</td><td>220</td><td>37</td><td>330</td><td>218</td></tr><tr><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td><td>tud_crossing</td><td>TUD-Crossing</td><td><a href="papers/3316521a5527c7700af8ae6aef32a79a8b83672c.html" target="_blank">People-tracking-by-detection and people-detection-by-tracking</a></td><td><span class="gray">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>59%</td><td>545</td><td>324</td><td>220</td><td>37</td><td>330</td><td>218</td></tr><tr><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td><td>tud_pedestrian</td><td>TUD-Pedestrian</td><td><a href="papers/3316521a5527c7700af8ae6aef32a79a8b83672c.html" target="_blank">People-tracking-by-detection and people-detection-by-tracking</a></td><td><span class="gray">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>59%</td><td>545</td><td>324</td><td>220</td><td>37</td><td>330</td><td>218</td></tr><tr><td>cc589c499dcf323fe4a143bbef0074c3e31f9b60</td><td>bu_3dfe</td><td>BU-3DFE</td><td><a href="papers/cc589c499dcf323fe4a143bbef0074c3e31f9b60.html" target="_blank">A 3D facial expression database for facial behavior research</a></td><td><span class="gray">[pdf]</a></td><td>7th International Conference on Automatic Face and Gesture Recognition (FGR06)</td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>588</td><td>316</td><td>271</td><td>44</td><td>306</td><td>282</td></tr><tr><td>95f12d27c3b4914e0668a268360948bce92f7db3</td><td>helen</td><td>Helen</td><td><a href="papers/95f12d27c3b4914e0668a268360948bce92f7db3.html" target="_blank">Interactive Facial Feature Localization</a></td><td><a href="https://pdfs.semanticscholar.org/95f1/2d27c3b4914e0668a268360948bce92f7db3.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>company</td><td>Adobe</td><td>United States</td><td>37.33077030</td><td>-121.89409510</td><td>85%</td><td>352</td><td>298</td><td>54</td><td>8</td><td>212</td><td>146</td></tr><tr><td>4053e3423fb70ad9140ca89351df49675197196a</td><td>bio_id</td><td>BioID Face</td><td><a href="papers/4053e3423fb70ad9140ca89351df49675197196a.html" target="_blank">Robust Face Detection Using the Hausdorff Distance</a></td><td><a href="https://pdfs.semanticscholar.org/4053/e3423fb70ad9140ca89351df49675197196a.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>511</td><td>289</td><td>222</td><td>49</td><td>329</td><td>182</td></tr><tr><td>8a3c5507237957d013a0fe0f082cab7f757af6ee</td><td>mafl</td><td>MAFL</td><td><a href="papers/8a3c5507237957d013a0fe0f082cab7f757af6ee.html" target="_blank">Facial Landmark Detection by Deep Multi-task Learning</a></td><td><a href="https://pdfs.semanticscholar.org/fcd7/1c18192928a2e0b264edd4d919ab2f8f652a.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>70%</td><td>407</td><td>283</td><td>124</td><td>16</td><td>252</td><td>153</td></tr><tr><td>8a3c5507237957d013a0fe0f082cab7f757af6ee</td><td>mtfl</td><td>MTFL</td><td><a href="papers/8a3c5507237957d013a0fe0f082cab7f757af6ee.html" target="_blank">Facial Landmark Detection by Deep Multi-task Learning</a></td><td><a href="https://pdfs.semanticscholar.org/fcd7/1c18192928a2e0b264edd4d919ab2f8f652a.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>70%</td><td>407</td><td>283</td><td>124</td><td>16</td><td>252</td><td>153</td></tr><tr><td>16c7c31a7553d99f1837fc6e88e77b5ccbb346b8</td><td>prid</td><td>PRID</td><td><a href="papers/16c7c31a7553d99f1837fc6e88e77b5ccbb346b8.html" target="_blank">Person Re-identification by Descriptive and Discriminative Classification</a></td><td><a href="https://pdfs.semanticscholar.org/4c1b/f0592be3e535faf256c95e27982db9b3d3d3.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>68%</td><td>386</td><td>263</td><td>123</td><td>23</td><td>204</td><td>180</td></tr><tr><td>9055b155cbabdce3b98e16e5ac9c0edf00f9552f</td><td>morph</td><td>MORPH Commercial</td><td><a href="papers/9055b155cbabdce3b98e16e5ac9c0edf00f9552f.html" target="_blank">MORPH: a longitudinal image database of normal adult age-progression</a></td><td><span class="gray">[pdf]</a></td><td>7th International Conference on Automatic Face and Gesture Recognition (FGR06)</td><td>edu</td><td>North Carolina University</td><td>United States</td><td>34.22398690</td><td>-77.87013250</td><td>59%</td><td>437</td><td>258</td><td>178</td><td>22</td><td>228</td><td>203</td></tr><tr><td>9055b155cbabdce3b98e16e5ac9c0edf00f9552f</td><td>morph_nc</td><td>MORPH Non-Commercial</td><td><a href="papers/9055b155cbabdce3b98e16e5ac9c0edf00f9552f.html" target="_blank">MORPH: a longitudinal image database of normal adult age-progression</a></td><td><span class="gray">[pdf]</a></td><td>7th International Conference on Automatic Face and Gesture Recognition (FGR06)</td><td>edu</td><td>North Carolina University</td><td>United States</td><td>34.22398690</td><td>-77.87013250</td><td>59%</td><td>437</td><td>258</td><td>178</td><td>22</td><td>228</td><td>203</td></tr><tr><td>044d9a8c61383312cdafbcc44b9d00d650b21c70</td><td>fiw_300</td><td>300-W</td><td><a href="papers/044d9a8c61383312cdafbcc44b9d00d650b21c70.html" target="_blank">300 Faces in-the-Wild Challenge: The First Facial Landmark Localization Challenge</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision Workshops</td><td></td><td></td><td></td><td></td><td></td><td>79%</td><td>323</td><td>255</td><td>68</td><td>15</td><td>208</td><td>120</td></tr><tr><td>2485c98aa44131d1a2f7d1355b1e372f2bb148ad</td><td>cas_peal</td><td>CAS-PEAL</td><td><a href="papers/2485c98aa44131d1a2f7d1355b1e372f2bb148ad.html" target="_blank">The CAS-PEAL Large-Scale Chinese Face Database and Baseline Evaluations</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Systems, Man, and Cybernetics - Part A: Systems and Humans</td><td></td><td></td><td></td><td></td><td></td><td>59%</td><td>429</td><td>254</td><td>175</td><td>38</td><td>198</td><td>234</td></tr><tr><td>3325860c0c82a93b2eac654f5324dd6a776f609e</td><td>mpii_human_pose</td><td>MPII Human Pose</td><td><a href="papers/3325860c0c82a93b2eac654f5324dd6a776f609e.html" target="_blank">2D Human Pose Estimation: New Benchmark and State of the Art Analysis</a></td><td><span class="gray">[pdf]</a></td><td>2014 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>66%</td><td>387</td><td>254</td><td>133</td><td>19</td><td>291</td><td>96</td></tr><tr><td>2a75f34663a60ab1b04a0049ed1d14335129e908</td><td>mmi_facial_expression</td><td>MMI Facial Expression Dataset</td><td><a href="papers/2a75f34663a60ab1b04a0049ed1d14335129e908.html" target="_blank">Web-based database for facial expression analysis</a></td><td><span class="gray">[pdf]</a></td><td>2005 IEEE International Conference on Multimedia and Expo</td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>464</td><td>250</td><td>214</td><td>45</td><td>282</td><td>188</td></tr><tr><td>75da1df4ed319926c544eefe17ec8d720feef8c0</td><td>fddb</td><td>FDDB</td><td><a href="papers/75da1df4ed319926c544eefe17ec8d720feef8c0.html" target="_blank">FDDB: A benchmark for face detection in unconstrained settings</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>65%</td><td>380</td><td>247</td><td>133</td><td>16</td><td>202</td><td>164</td></tr><tr><td>3765df816dc5a061bc261e190acc8bdd9d47bec0</td><td>rafd</td><td>RaFD</td><td><a href="papers/3765df816dc5a061bc261e190acc8bdd9d47bec0.html" target="_blank">Presentation and validation of the Radboud Faces Database</a></td><td><a href="https://pdfs.semanticscholar.org/3765/df816dc5a061bc261e190acc8bdd9d47bec0.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>48%</td><td>487</td><td>234</td><td>253</td><td>39</td><td>342</td><td>144</td></tr><tr><td>2724ba85ec4a66de18da33925e537f3902f21249</td><td>cofw</td><td>COFW</td><td><a href="papers/2724ba85ec4a66de18da33925e537f3902f21249.html" target="_blank">Robust Face Landmark Estimation under Occlusion</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision</td><td>edu</td><td>California Institute of Technology</td><td>United States</td><td>34.13710185</td><td>-118.12527487</td><td>72%</td><td>325</td><td>233</td><td>92</td><td>12</td><td>194</td><td>133</td></tr><tr><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td><td>stickmen_buffy</td><td>Buffy Stickmen</td><td><a href="papers/6dd0597f8513dc100cd0bc1b493768cde45098a9.html" target="_blank">Learning to parse images of articulated bodies</a></td><td><a href="http://machinelearning.wustl.edu/mlpapers/paper_files/NIPS2006_899.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>369</td><td>227</td><td>141</td><td>32</td><td>237</td><td>131</td></tr><tr><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td><a href="papers/6dd0597f8513dc100cd0bc1b493768cde45098a9.html" target="_blank">Learning to parse images of articulated bodies</a></td><td><a href="http://machinelearning.wustl.edu/mlpapers/paper_files/NIPS2006_899.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>369</td><td>227</td><td>141</td><td>32</td><td>237</td><td>131</td></tr><tr><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td><a href="papers/6dd0597f8513dc100cd0bc1b493768cde45098a9.html" target="_blank">Learning to parse images of articulated bodies</a></td><td><a href="http://machinelearning.wustl.edu/mlpapers/paper_files/NIPS2006_899.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>369</td><td>227</td><td>141</td><td>32</td><td>237</td><td>131</td></tr><tr><td>9361b784e73e9238d5cefbea5ac40d35d1e3103f</td><td>oxford_town_centre</td><td>TownCentre</td><td><a href="papers/9361b784e73e9238d5cefbea5ac40d35d1e3103f.html" target="_blank">Stable multi-target tracking in real-time surveillance video</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011</td><td>edu</td><td>University of Oxford</td><td>United Kingdom</td><td>51.75345380</td><td>-1.25400997</td><td>68%</td><td>328</td><td>222</td><td>106</td><td>13</td><td>186</td><td>140</td></tr><tr><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td><td>aflw</td><td>AFLW</td><td><a href="papers/a74251efa970b92925b89eeef50a5e37d9281ad0.html" target="_blank">Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</a></td><td><span class="gray">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td>edu</td><td>TU Graz</td><td>Austria</td><td>47.07071400</td><td>15.43950400</td><td>69%</td><td>318</td><td>218</td><td>100</td><td>27</td><td>211</td><td>107</td></tr><tr><td>13f06b08f371ba8b5d31c3e288b4deb61335b462</td><td>eth_andreas_ess</td><td>ETHZ Pedestrian</td><td><a href="papers/13f06b08f371ba8b5d31c3e288b4deb61335b462.html" target="_blank">Depth and Appearance for Mobile Scene Analysis</a></td><td><span class="gray">[pdf]</a></td><td>2007 IEEE 11th International Conference on Computer Vision</td><td>edu</td><td>ETH Zurich</td><td>Switzerland</td><td>47.37631300</td><td>8.54766990</td><td>63%</td><td>324</td><td>203</td><td>121</td><td>26</td><td>193</td><td>127</td></tr><tr><td>5981e6479c3fd4e31644db35d236bfb84ae46514</td><td>mot</td><td>MOT</td><td><a href="papers/5981e6479c3fd4e31644db35d236bfb84ae46514.html" target="_blank">Learning to associate: HybridBoosted multi-target tracker for crowded scene</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>University of Southern California</td><td>United States</td><td>34.02241490</td><td>-118.28634407</td><td>61%</td><td>326</td><td>200</td><td>125</td><td>22</td><td>190</td><td>137</td></tr><tr><td>2acf7e58f0a526b957be2099c10aab693f795973</td><td>bosphorus</td><td>The Bosphorus</td><td><a href="papers/2acf7e58f0a526b957be2099c10aab693f795973.html" target="_blank">Bosphorus Database for 3D Face Analysis</a></td><td><a href="https://pdfs.semanticscholar.org/4254/fbba3846008f50671edc9cf70b99d7304543.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>352</td><td>198</td><td>154</td><td>17</td><td>162</td><td>188</td></tr><tr><td>639937b3a1b8bded3f7e9a40e85bd3770016cf3c</td><td>bfm</td><td>BFM</td><td><a href="papers/639937b3a1b8bded3f7e9a40e85bd3770016cf3c.html" target="_blank">A 3D Face Model for Pose and Illumination Invariant Face Recognition</a></td><td><span class="gray">[pdf]</a></td><td>2009 Sixth IEEE International Conference on Advanced Video and Signal Based Surveillance</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>343</td><td>194</td><td>149</td><td>23</td><td>223</td><td>114</td></tr><tr><td>44484d2866f222bbb9b6b0870890f9eea1ffb2d0</td><td>cuhk_campus_03</td><td>CUHK03 Campus</td><td><a href="papers/44484d2866f222bbb9b6b0870890f9eea1ffb2d0.html" target="_blank">Human Reidentification with Transferred Metric Learning</a></td><td><a href="http://www.ee.cuhk.edu.hk/~xgwang/papers/liZWaccv12.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>69%</td><td>280</td><td>194</td><td>86</td><td>9</td><td>139</td><td>137</td></tr><tr><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td><td>leeds_sports_pose</td><td>Leeds Sports Pose</td><td><a href="papers/4b1d23d17476fcf78f4cbadf69fb130b1aa627c0.html" target="_blank">Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</a></td><td><a href="http://www.bmva.org/bmvc/2010/conference/paper12/abstract12.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>66%</td><td>285</td><td>188</td><td>97</td><td>11</td><td>197</td><td>93</td></tr><tr><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td><td>stickmen_buffy</td><td>Buffy Stickmen</td><td><a href="papers/4b1d23d17476fcf78f4cbadf69fb130b1aa627c0.html" target="_blank">Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</a></td><td><a href="http://www.bmva.org/bmvc/2010/conference/paper12/abstract12.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>66%</td><td>285</td><td>188</td><td>97</td><td>11</td><td>197</td><td>93</td></tr><tr><td>436f798d1a4e54e5947c1e7d7375c31b2bdb4064</td><td>tud_multiview</td><td>TUD-Multiview</td><td><a href="papers/436f798d1a4e54e5947c1e7d7375c31b2bdb4064.html" target="_blank">Monocular 3D pose estimation and tracking by detection</a></td><td><span class="gray">[pdf]</a></td><td>2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>TU Darmstadt</td><td>Germany</td><td>49.87482770</td><td>8.65632810</td><td>60%</td><td>311</td><td>186</td><td>125</td><td>33</td><td>208</td><td>105</td></tr><tr><td>436f798d1a4e54e5947c1e7d7375c31b2bdb4064</td><td>tud_stadtmitte</td><td>TUD-Stadtmitte</td><td><a href="papers/436f798d1a4e54e5947c1e7d7375c31b2bdb4064.html" target="_blank">Monocular 3D pose estimation and tracking by detection</a></td><td><span class="gray">[pdf]</a></td><td>2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>TU Darmstadt</td><td>Germany</td><td>49.87482770</td><td>8.65632810</td><td>60%</td><td>311</td><td>186</td><td>125</td><td>33</td><td>208</td><td>105</td></tr><tr><td>010f0f4929e6a6644fb01f0e43820f91d0fad292</td><td>yfcc_100m</td><td>YFCC100M</td><td><a href="papers/010f0f4929e6a6644fb01f0e43820f91d0fad292.html" target="_blank">YFCC100M: the new data in multimedia research</a></td><td><span class="gray">[pdf]</a></td><td>Commun. ACM</td><td>edu</td><td>Carnegie Mellon University</td><td>United States</td><td>40.44416190</td><td>-79.94272826</td><td>64%</td><td>274</td><td>176</td><td>98</td><td>23</td><td>172</td><td>100</td></tr><tr><td>38b55d95189c5e69cf4ab45098a48fba407609b4</td><td>cuhk_campus_03</td><td>CUHK03 Campus</td><td><a href="papers/38b55d95189c5e69cf4ab45098a48fba407609b4.html" target="_blank">Locally Aligned Feature Transforms across Views</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>258</td><td>164</td><td>94</td><td>15</td><td>136</td><td>117</td></tr><tr><td>833fa04463d90aab4a9fe2870d480f0b40df446e</td><td>sun_attributes</td><td>SUN</td><td><a href="papers/833fa04463d90aab4a9fe2870d480f0b40df446e.html" target="_blank">SUN attribute database: Discovering, annotating, and recognizing scene attributes</a></td><td><span class="gray">[pdf]</a></td><td>2012 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>Brown University</td><td>United States</td><td>41.82686820</td><td>-71.40123146</td><td>60%</td><td>264</td><td>159</td><td>105</td><td>27</td><td>206</td><td>56</td></tr><tr><td>1be498d4bbc30c3bfd0029114c784bc2114d67c0</td><td>adience</td><td>Adience</td><td><a href="papers/1be498d4bbc30c3bfd0029114c784bc2114d67c0.html" target="_blank">Age and Gender Estimation of Unfiltered Faces</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Information Forensics and Security</td><td>edu</td><td>Open University of Israel</td><td>Israel</td><td>32.77824165</td><td>34.99565673</td><td>87%</td><td>179</td><td>156</td><td>23</td><td>1</td><td>98</td><td>80</td></tr><tr><td>140c95e53c619eac594d70f6369f518adfea12ef</td><td>ijb_a</td><td>IJB-A</td><td><a href="papers/140c95e53c619eac594d70f6369f518adfea12ef.html" target="_blank">Pushing the frontiers of unconstrained face detection and recognition: IARPA Janus Benchmark A</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>66%</td><td>237</td><td>156</td><td>81</td><td>14</td><td>159</td><td>76</td></tr><tr><td>2eed184680edcdec8a3b605ad1a3ba8e8f7cc2e9</td><td>graz</td><td>Graz Pedestrian</td><td><a href="papers/2eed184680edcdec8a3b605ad1a3ba8e8f7cc2e9.html" target="_blank">Generic object recognition with boosting</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td>edu</td><td>TU Graz</td><td>Austria</td><td>47.07071400</td><td>15.43950400</td><td>53%</td><td>293</td><td>155</td><td>138</td><td>16</td><td>195</td><td>97</td></tr><tr><td>6204776d31359d129a582057c2d788a14f8aadeb</td><td>youtube_celebrities</td><td>YouTube Celebrities</td><td><a href="papers/6204776d31359d129a582057c2d788a14f8aadeb.html" target="_blank">Face tracking and recognition with visual constraints in real-world videos</a></td><td><span class="gray">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>Rutgers University</td><td>United States</td><td>40.47913175</td><td>-74.43168868</td><td>57%</td><td>267</td><td>151</td><td>115</td><td>11</td><td>125</td><td>121</td></tr><tr><td>013909077ad843eb6df7a3e8e290cfd5575999d2</td><td>fiw_300</td><td>300-W</td><td><a href="papers/013909077ad843eb6df7a3e8e290cfd5575999d2.html" target="_blank">A Semi-automatic Methodology for Facial Landmark Annotation</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition Workshops</td><td></td><td></td><td></td><td></td><td></td><td>80%</td><td>184</td><td>148</td><td>36</td><td>8</td><td>120</td><td>67</td></tr><tr><td>27a2fad58dd8727e280f97036e0d2bc55ef5424c</td><td>duke_mtmc</td><td>Duke MTMC</td><td><a href="papers/27a2fad58dd8727e280f97036e0d2bc55ef5424c.html" target="_blank">Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</a></td><td><a href="https://arxiv.org/pdf/1609.01775.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Duke University</td><td>United States</td><td>35.99905220</td><td>-78.92906290</td><td>85%</td><td>169</td><td>144</td><td>25</td><td>3</td><td>113</td><td>54</td></tr><tr><td>4c170a0dcc8de75587dae21ca508dab2f9343974</td><td>face_tracer</td><td>FaceTracer</td><td><a href="papers/4c170a0dcc8de75587dae21ca508dab2f9343974.html" target="_blank">FaceTracer: A Search Engine for Large Collections of Images with Faces</a></td><td><a href="https://pdfs.semanticscholar.org/73a8/1d311eedac8dea3ca24dc15b6990fa4a725e.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>225</td><td>144</td><td>81</td><td>17</td><td>146</td><td>77</td></tr><tr><td>27a2fad58dd8727e280f97036e0d2bc55ef5424c</td><td>mot</td><td>MOT</td><td><a href="papers/27a2fad58dd8727e280f97036e0d2bc55ef5424c.html" target="_blank">Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</a></td><td><a href="https://arxiv.org/pdf/1609.01775.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Duke University</td><td>United States</td><td>35.99905220</td><td>-78.92906290</td><td>85%</td><td>169</td><td>144</td><td>25</td><td>3</td><td>113</td><td>54</td></tr><tr><td>98bb029afe2a1239c3fdab517323066f0957b81b</td><td>ilids_mcts_vid</td><td>iLIDS-VID</td><td><a href="papers/98bb029afe2a1239c3fdab517323066f0957b81b.html" target="_blank">Person Re-identification by Video Ranking</a></td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>68%</td><td>209</td><td>143</td><td>66</td><td>8</td><td>111</td><td>97</td></tr><tr><td>98bb029afe2a1239c3fdab517323066f0957b81b</td><td>sdu_vid</td><td>SDU-VID</td><td><a href="papers/98bb029afe2a1239c3fdab517323066f0957b81b.html" target="_blank">Person Re-identification by Video Ranking</a></td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>68%</td><td>209</td><td>143</td><td>66</td><td>8</td><td>111</td><td>97</td></tr><tr><td>e8de844fefd54541b71c9823416daa238be65546</td><td>visual_phrases</td><td>Phrasal Recognition</td><td><a href="papers/e8de844fefd54541b71c9823416daa238be65546.html" target="_blank">Recognition using visual phrases</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011</td><td>edu</td><td>University of Illinois, Urbana-Champaign</td><td>United States</td><td>40.11116745</td><td>-88.22587665</td><td>58%</td><td>246</td><td>143</td><td>103</td><td>17</td><td>170</td><td>68</td></tr><tr><td>291265db88023e92bb8c8e6390438e5da148e8f5</td><td>msceleb</td><td>MsCeleb</td><td><a href="papers/291265db88023e92bb8c8e6390438e5da148e8f5.html" target="_blank">MS-Celeb-1M: A Dataset and Benchmark for Large-Scale Face Recognition</a></td><td><a href="https://arxiv.org/pdf/1607.08221.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>company</td><td>Microsoft</td><td>United States</td><td>47.64233180</td><td>-122.13693020</td><td>78%</td><td>180</td><td>141</td><td>39</td><td>8</td><td>120</td><td>59</td></tr><tr><td>7808937b46acad36e43c30ae4e9f3fd57462853d</td><td>bpad</td><td>BPAD</td><td><a href="papers/7808937b46acad36e43c30ae4e9f3fd57462853d.html" target="_blank">Describing people: A poselet-based approach to attribute classification</a></td><td><span class="gray">[pdf]</a></td><td>2011 International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>230</td><td>140</td><td>90</td><td>14</td><td>163</td><td>66</td></tr><tr><td>46a01565e6afe7c074affb752e7069ee3bf2e4ef</td><td>sdu_vid</td><td>SDU-VID</td><td><a href="papers/46a01565e6afe7c074affb752e7069ee3bf2e4ef.html" target="_blank">Local Descriptors Encoded by Fisher Vectors for Person Re-identification</a></td><td><a href="https://pdfs.semanticscholar.org/a105/f1ef67b4b02da38eadce8ffb4e13aa301a93.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>67%</td><td>197</td><td>132</td><td>65</td><td>15</td><td>108</td><td>88</td></tr><tr><td>0c91808994a250d7be332400a534a9291ca3b60e</td><td>graz</td><td>Graz Pedestrian</td><td><a href="papers/0c91808994a250d7be332400a534a9291ca3b60e.html" target="_blank">Weak Hypotheses and Boosting for Generic Object Detection and Recognition</a></td><td><a href="https://pdfs.semanticscholar.org/0c91/808994a250d7be332400a534a9291ca3b60e.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>236</td><td>131</td><td>105</td><td>17</td><td>161</td><td>77</td></tr><tr><td>35b0331dfcd2897abd5749b49ff5e2b8ba0f7a62</td><td>coco_qa</td><td>COCO QA</td><td><a href="papers/35b0331dfcd2897abd5749b49ff5e2b8ba0f7a62.html" target="_blank">Exploring Models and Data for Image Question Answering</a></td><td><a href="https://arxiv.org/pdf/1505.02074.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>206</td><td>126</td><td>80</td><td>11</td><td>162</td><td>39</td></tr><tr><td>570f37ed63142312e6ccdf00ecc376341ec72b9f</td><td>stanford_drone</td><td>Stanford Drone</td><td><a href="papers/570f37ed63142312e6ccdf00ecc376341ec72b9f.html" target="_blank">Social LSTM: Human Trajectory Prediction in Crowded Spaces</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>224</td><td>125</td><td>99</td><td>3</td><td>140</td><td>81</td></tr><tr><td>4e4746094bf60ee83e40d8597a6191e463b57f76</td><td>leeds_sports_pose_extended</td><td>Leeds Sports Pose Extended</td><td><a href="papers/4e4746094bf60ee83e40d8597a6191e463b57f76.html" target="_blank">Learning effective human pose estimation from inaccurate annotation</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011</td><td>edu</td><td>University of Leeds</td><td>United Kingdom</td><td>53.80387185</td><td>-1.55245712</td><td>71%</td><td>169</td><td>120</td><td>49</td><td>7</td><td>108</td><td>65</td></tr><tr><td>52d7eb0fbc3522434c13cc247549f74bb9609c5d</td><td>wider_face</td><td>WIDER FACE</td><td><a href="papers/52d7eb0fbc3522434c13cc247549f74bb9609c5d.html" target="_blank">WIDER FACE: A Face Detection Benchmark</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>Chinese University of Hong Kong</td><td>China</td><td>22.41626320</td><td>114.21093180</td><td>66%</td><td>178</td><td>118</td><td>60</td><td>11</td><td>112</td><td>66</td></tr><tr><td>22ad2c8c0f4d6aa4328b38d894b814ec22579761</td><td>gallagher</td><td>Gallagher</td><td><a href="papers/22ad2c8c0f4d6aa4328b38d894b814ec22579761.html" target="_blank">Clothing cosegmentation for recognizing people</a></td><td><span class="gray">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>Carnegie Mellon University</td><td>United States</td><td>40.44416190</td><td>-79.94272826</td><td>65%</td><td>178</td><td>116</td><td>62</td><td>7</td><td>100</td><td>86</td></tr><tr><td>b1f4423c227fa37b9680787be38857069247a307</td><td>afew_va</td><td>AFEW-VA</td><td><a href="papers/b1f4423c227fa37b9680787be38857069247a307.html" target="_blank">Collecting Large, Richly Annotated Facial-Expression Databases from Movies</a></td><td><span class="gray">[pdf]</a></td><td>IEEE MultiMedia</td><td>edu</td><td>Australian National University</td><td>Australia</td><td>-35.27769990</td><td>149.11852700</td><td>64%</td><td>181</td><td>115</td><td>66</td><td>8</td><td>87</td><td>97</td></tr><tr><td>c0387e788a52f10bf35d4d50659cfa515d89fbec</td><td>mars</td><td>MARS</td><td><a href="papers/c0387e788a52f10bf35d4d50659cfa515d89fbec.html" target="_blank">MARS: A Video Benchmark for Large-Scale Person Re-Identification</a></td><td><a href="https://pdfs.semanticscholar.org/c038/7e788a52f10bf35d4d50659cfa515d89fbec.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>68%</td><td>168</td><td>115</td><td>53</td><td>4</td><td>97</td><td>69</td></tr><tr><td>32c801cb7fbeb742edfd94cccfca4934baec71da</td><td>ucf_crowd</td><td>UCF-CC-50</td><td><a href="papers/32c801cb7fbeb742edfd94cccfca4934baec71da.html" target="_blank">Multi-source Multi-scale Counting in Extremely Dense Crowd Images</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>76%</td><td>148</td><td>113</td><td>35</td><td>3</td><td>80</td><td>65</td></tr><tr><td>18010284894ed0edcca74e5bf768ee2e15ef7841</td><td>deep_fashion</td><td>DeepFashion</td><td><a href="papers/18010284894ed0edcca74e5bf768ee2e15ef7841.html" target="_blank">DeepFashion: Powering Robust Clothes Recognition and Retrieval with Rich Annotations</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>176</td><td>112</td><td>64</td><td>2</td><td>113</td><td>62</td></tr><tr><td>133f01aec1534604d184d56de866a4bd531dac87</td><td>lfw</td><td>LFW</td><td><a href="papers/133f01aec1534604d184d56de866a4bd531dac87.html" target="_blank">Effective Unconstrained Face Recognition by Combining Multiple Descriptors and Learned Background Statistics</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>183</td><td>109</td><td>74</td><td>13</td><td>103</td><td>77</td></tr><tr><td>0df0d1adea39a5bef318b74faa37de7f3e00b452</td><td>mpii_gaze</td><td>MPIIGaze</td><td><a href="papers/0df0d1adea39a5bef318b74faa37de7f3e00b452.html" target="_blank">Appearance-based gaze estimation in the wild</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>73%</td><td>149</td><td>109</td><td>40</td><td>3</td><td>94</td><td>54</td></tr><tr><td>96e0cfcd81cdeb8282e29ef9ec9962b125f379b0</td><td>megaface</td><td>MegaFace</td><td><a href="papers/96e0cfcd81cdeb8282e29ef9ec9962b125f379b0.html" target="_blank">The MegaFace Benchmark: 1 Million Faces for Recognition at Scale</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>76%</td><td>139</td><td>106</td><td>33</td><td>5</td><td>100</td><td>37</td></tr><tr><td>10195a163ab6348eef37213a46f60a3d87f289c5</td><td>imdb_wiki</td><td>IMDB-Wiki</td><td><a href="papers/10195a163ab6348eef37213a46f60a3d87f289c5.html" target="_blank">Deep Expectation of Real and Apparent Age from a Single Image Without Facial Landmarks</a></td><td><a href="http://www.vision.ee.ethz.ch/en/publications/papers/articles/eth_biwi_01299.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td>edu</td><td>ETH Zurich</td><td>Switzerland</td><td>47.37631300</td><td>8.54766990</td><td>72%</td><td>145</td><td>105</td><td>40</td><td>9</td><td>93</td><td>51</td></tr><tr><td>56ffa7d906b08d02d6d5a12c7377a57e24ef3391</td><td>unbc_shoulder_pain</td><td>UNBC-McMaster Pain</td><td><a href="papers/56ffa7d906b08d02d6d5a12c7377a57e24ef3391.html" target="_blank">Painful data: The UNBC-McMaster shoulder pain expression archive database</a></td><td><span class="gray">[pdf]</a></td><td>Face and Gesture 2011</td><td>edu</td><td>Carnegie Mellon University</td><td>United States</td><td>40.44416190</td><td>-79.94272826</td><td>54%</td><td>189</td><td>103</td><td>86</td><td>21</td><td>108</td><td>78</td></tr><tr><td>29a705a5fa76641e0d8963f1fdd67ee4c0d92d3d</td><td>scface</td><td>SCface</td><td><a href="papers/29a705a5fa76641e0d8963f1fdd67ee4c0d92d3d.html" target="_blank">SCface – surveillance cameras face database</a></td><td><a href="http://scface.org/SCface%20-%20Surveillance%20Cameras%20Face%20Database.pdf" target="_blank">[pdf]</a></td><td>Multimedia Tools and Applications</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>179</td><td>101</td><td>78</td><td>15</td><td>88</td><td>89</td></tr><tr><td>5a5f0287484f0d480fed1ce585dbf729586f0edc</td><td>disfa</td><td>DISFA</td><td><a href="papers/5a5f0287484f0d480fed1ce585dbf729586f0edc.html" target="_blank">DISFA: A Spontaneous Facial Action Intensity Database</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Affective Computing</td><td>edu</td><td>University of Denver</td><td>United States</td><td>39.67665410</td><td>-104.96220300</td><td>54%</td><td>184</td><td>100</td><td>84</td><td>17</td><td>96</td><td>89</td></tr><tr><td>1aad2da473888cb7ebc1bfaa15bfa0f1502ce005</td><td>jpl_pose</td><td>JPL-Interaction dataset</td><td><a href="papers/1aad2da473888cb7ebc1bfaa15bfa0f1502ce005.html" target="_blank">First-Person Activity Recognition: What Are They Doing to Me?</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>67%</td><td>148</td><td>99</td><td>49</td><td>7</td><td>105</td><td>43</td></tr><tr><td>3b5b6d19d4733ab606c39c69a889f9e67967f151</td><td>qmul_grid</td><td>GRID</td><td><a href="papers/3b5b6d19d4733ab606c39c69a889f9e67967f151.html" target="_blank">Multi-camera activity correlation analysis</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>Queen Mary University of London</td><td>United Kingdom</td><td>51.52472720</td><td>-0.03931035</td><td>69%</td><td>142</td><td>98</td><td>44</td><td>7</td><td>77</td><td>64</td></tr><tr><td>8355d095d3534ef511a9af68a3b2893339e3f96b</td><td>imdb_wiki</td><td>IMDB-Wiki</td><td><a href="papers/8355d095d3534ef511a9af68a3b2893339e3f96b.html" target="_blank">DEX: Deep EXpectation of Apparent Age from a Single Image</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision Workshop (ICCVW)</td><td></td><td></td><td></td><td></td><td></td><td>79%</td><td>122</td><td>96</td><td>26</td><td>4</td><td>75</td><td>48</td></tr><tr><td>4f93cd09785c6e77bf4bc5a788e079df524c8d21</td><td>soton</td><td>SOTON HiD</td><td><a href="papers/4f93cd09785c6e77bf4bc5a788e079df524c8d21.html" target="_blank">On a Large Sequence-Based Human Gait Database</a></td><td><a href="https://pdfs.semanticscholar.org/4f93/cd09785c6e77bf4bc5a788e079df524c8d21.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>63%</td><td>150</td><td>95</td><td>55</td><td>17</td><td>103</td><td>51</td></tr><tr><td>8b56e33f33e582f3e473dba573a16b598ed9bcdc</td><td>fei</td><td>FEI</td><td><a href="papers/8b56e33f33e582f3e473dba573a16b598ed9bcdc.html" target="_blank">A new ranking method for principal components analysis and its application to face image analysis</a></td><td><span class="gray">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>169</td><td>93</td><td>76</td><td>6</td><td>69</td><td>102</td></tr><tr><td>e4754afaa15b1b53e70743880484b8d0736990ff</td><td>fiw_300</td><td>300-W</td><td><a href="papers/e4754afaa15b1b53e70743880484b8d0736990ff.html" target="_blank">300 Faces In-The-Wild Challenge: database and results</a></td><td><a href="http://ibug.doc.ic.ac.uk/media/uploads/documents/1-s2.0-s0262885616000147-main.pdf" target="_blank">[pdf]</a></td><td>Image Vision Comput.</td><td>edu</td><td>Imperial College London</td><td>United Kingdom</td><td>51.49887085</td><td>-0.17560797</td><td>71%</td><td>129</td><td>92</td><td>37</td><td>6</td><td>74</td><td>55</td></tr><tr><td>066000d44d6691d27202896691f08b27117918b9</td><td>psu</td><td>PSU</td><td><a href="papers/066000d44d6691d27202896691f08b27117918b9.html" target="_blank">Vision-Based Analysis of Small Groups in Pedestrian Crowds</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>168</td><td>91</td><td>77</td><td>10</td><td>85</td><td>79</td></tr><tr><td>0d3bb75852098b25d90f31d2f48fd0cb4944702b</td><td>face_scrub</td><td>FaceScrub</td><td><a href="papers/0d3bb75852098b25d90f31d2f48fd0cb4944702b.html" target="_blank">A data-driven approach to cleaning large face datasets</a></td><td><span class="gray">[pdf]</a></td><td>2014 IEEE International Conference on Image Processing (ICIP)</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>138</td><td>89</td><td>49</td><td>1</td><td>95</td><td>41</td></tr><tr><td>2d3482dcff69c7417c7b933f22de606a0e8e42d4</td><td>lfw</td><td>LFW</td><td><a href="papers/2d3482dcff69c7417c7b933f22de606a0e8e42d4.html" target="_blank">Labeled Faces in the Wild : Updates and New Reporting Procedures</a></td><td><a href="https://pdfs.semanticscholar.org/2d34/82dcff69c7417c7b933f22de606a0e8e42d4.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>edu</td><td>University of Massachusetts</td><td>United States</td><td>42.38897850</td><td>-72.52869870</td><td>69%</td><td>123</td><td>85</td><td>38</td><td>3</td><td>71</td><td>51</td></tr><tr><td>5a4df9bef1872865f0b619ac3aacc97f49e4a035</td><td>cuhk_train_station</td><td>CUHK Train Station Dataset</td><td><a href="papers/5a4df9bef1872865f0b619ac3aacc97f49e4a035.html" target="_blank">Understanding collective crowd behaviors: Learning a Mixture model of Dynamic pedestrian-Agents</a></td><td><span class="gray">[pdf]</a></td><td>2012 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>Chinese University of Hong Kong</td><td>China</td><td>22.41626320</td><td>114.21093180</td><td>60%</td><td>141</td><td>84</td><td>57</td><td>4</td><td>60</td><td>75</td></tr><tr><td>0486214fb58ee9a04edfe7d6a74c6d0f661a7668</td><td>chokepoint</td><td>ChokePoint</td><td><a href="papers/0486214fb58ee9a04edfe7d6a74c6d0f661a7668.html" target="_blank">Patch-based probabilistic image quality assessment for face selection and improved video-based face recognition</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>138</td><td>83</td><td>55</td><td>6</td><td>76</td><td>63</td></tr><tr><td>2e8d0f1802e50cccfd3c0aabac0d0beab3a7846e</td><td>3dpes</td><td>3DPeS</td><td><a href="papers/2e8d0f1802e50cccfd3c0aabac0d0beab3a7846e.html" target="_blank">3DPeS: 3D people dataset for surveillance and forensics</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>133</td><td>82</td><td>51</td><td>9</td><td>73</td><td>58</td></tr><tr><td>b91f54e1581fbbf60392364323d00a0cd43e493c</td><td>bp4d_spontanous</td><td>BP4D-Spontanous</td><td><a href="papers/b91f54e1581fbbf60392364323d00a0cd43e493c.html" target="_blank">A high-resolution spontaneous 3D dynamic facial expression database</a></td><td><span class="gray">[pdf]</a></td><td>2013 10th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)</td><td>edu</td><td>SUNY Binghamton</td><td>United States</td><td>42.08779975</td><td>-75.97066066</td><td>52%</td><td>154</td><td>80</td><td>74</td><td>6</td><td>80</td><td>75</td></tr><tr><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td><td>mafl</td><td>MAFL</td><td><a href="papers/a0fd85b3400c7b3e11122f44dc5870ae2de9009a.html" target="_blank">Learning Deep Representation for Face Alignment with Auxiliary Attributes</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>71%</td><td>108</td><td>77</td><td>31</td><td>7</td><td>66</td><td>44</td></tr><tr><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td><td>mtfl</td><td>MTFL</td><td><a href="papers/a0fd85b3400c7b3e11122f44dc5870ae2de9009a.html" target="_blank">Learning Deep Representation for Face Alignment with Auxiliary Attributes</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>71%</td><td>108</td><td>77</td><td>31</td><td>7</td><td>66</td><td>44</td></tr><tr><td>7f23a4bb0c777dd72cca7665a5f370ac7980217e</td><td>duke_mtmc</td><td>Duke MTMC</td><td><a href="papers/7f23a4bb0c777dd72cca7665a5f370ac7980217e.html" target="_blank">Improving Person Re-identification by Attribute and Identity Learning</a></td><td><a href="https://arxiv.org/pdf/1703.07220.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>84%</td><td>87</td><td>73</td><td>14</td><td>0</td><td>43</td><td>42</td></tr><tr><td>7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22</td><td>lfw</td><td>LFW</td><td><a href="papers/7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22.html" target="_blank">Labeled Faces in the Wild: A Survey</a></td><td><a href="https://pdfs.semanticscholar.org/7de6/e81d775e9cd7becbfd1bd685f4e2a5eebb22.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Stevens Institute of Technology</td><td>United States</td><td>40.74225200</td><td>-74.02709490</td><td>64%</td><td>109</td><td>70</td><td>39</td><td>7</td><td>66</td><td>43</td></tr><tr><td>66e6f08873325d37e0ec20a4769ce881e04e964e</td><td>sun_attributes</td><td>SUN</td><td><a href="papers/66e6f08873325d37e0ec20a4769ce881e04e964e.html" target="_blank">The SUN Attribute Database: Beyond Categories for Deeper Scene Understanding</a></td><td><a href="http://www.cc.gatech.edu/~hays/papers/attribute_ijcv.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>116</td><td>70</td><td>46</td><td>14</td><td>84</td><td>31</td></tr><tr><td>2a4bbee0b4cf52d5aadbbc662164f7efba89566c</td><td>peta</td><td>PETA</td><td><a href="papers/2a4bbee0b4cf52d5aadbbc662164f7efba89566c.html" target="_blank">Pedestrian Attribute Recognition At Far Distance</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>75%</td><td>88</td><td>66</td><td>22</td><td>1</td><td>50</td><td>36</td></tr><tr><td>70c59dc3470ae867016f6ab0e008ac8ba03774a1</td><td>vgg_faces2</td><td>VGG Face2</td><td><a href="papers/70c59dc3470ae867016f6ab0e008ac8ba03774a1.html" target="_blank">VGGFace2: A Dataset for Recognising Faces across Pose and Age</a></td><td><a href="https://arxiv.org/pdf/1710.08092.pdf" target="_blank">[pdf]</a></td><td>2018 13th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2018)</td><td></td><td></td><td></td><td></td><td></td><td>80%</td><td>83</td><td>66</td><td>17</td><td>3</td><td>61</td><td>20</td></tr><tr><td>3394168ff0719b03ff65bcea35336a76b21fe5e4</td><td>penn_fudan</td><td>Penn Fudan</td><td><a href="papers/3394168ff0719b03ff65bcea35336a76b21fe5e4.html" target="_blank">Object Detection Combining Recognition and Segmentation</a></td><td><a href="https://pdfs.semanticscholar.org/3394/168ff0719b03ff65bcea35336a76b21fe5e4.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>105</td><td>64</td><td>41</td><td>9</td><td>58</td><td>43</td></tr><tr><td>3b4ec8af470948a72a6ed37a9fd226719a874ebc</td><td>sdu_vid</td><td>SDU-VID</td><td><a href="papers/3b4ec8af470948a72a6ed37a9fd226719a874ebc.html" target="_blank">A Spatio-Temporal Appearance Representation for Video-Based Pedestrian Re-Identification</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>66%</td><td>95</td><td>63</td><td>32</td><td>6</td><td>50</td><td>45</td></tr><tr><td>04c2cda00e5536f4b1508cbd80041e9552880e67</td><td>hipsterwars</td><td>Hipsterwars</td><td><a href="papers/04c2cda00e5536f4b1508cbd80041e9552880e67.html" target="_blank">Hipster Wars: Discovering Elements of Fashion Styles</a></td><td><a href="https://pdfs.semanticscholar.org/04c2/cda00e5536f4b1508cbd80041e9552880e67.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>95</td><td>61</td><td>34</td><td>4</td><td>59</td><td>35</td></tr><tr><td>06f02199690961ba52997cde1527e714d2b3bf8f</td><td>columbia_gaze</td><td>Columbia Gaze</td><td><a href="papers/06f02199690961ba52997cde1527e714d2b3bf8f.html" target="_blank">Gaze locking: passive eye contact detection for human-object interaction</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Columbia University</td><td>United States</td><td>40.84198360</td><td>-73.94368971</td><td>76%</td><td>79</td><td>60</td><td>19</td><td>0</td><td>49</td><td>34</td></tr><tr><td>4df3143922bcdf7db78eb91e6b5359d6ada004d2</td><td>cfd</td><td>CFD</td><td><a href="papers/4df3143922bcdf7db78eb91e6b5359d6ada004d2.html" target="_blank">The Chicago face database: A free stimulus set of faces and norming data.</a></td><td><a href="https://pdfs.semanticscholar.org/4df3/143922bcdf7db78eb91e6b5359d6ada004d2.pdf" target="_blank">[pdf]</a></td><td>Behavior research methods</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>99</td><td>59</td><td>40</td><td>1</td><td>73</td><td>21</td></tr><tr><td>0c4a139bb87c6743c7905b29a3cfec27a5130652</td><td>feret</td><td>FERET</td><td><a href="papers/0c4a139bb87c6743c7905b29a3cfec27a5130652.html" target="_blank">The FERET Verification Testing Protocol for Face Recognition Algorithms</a></td><td><a href="https://pdfs.semanticscholar.org/8d2a/1c768fce6f71584dd993fb97e7b6419aaf60.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>edu</td><td>City University of New York</td><td>United States</td><td>40.87228250</td><td>-73.89489171</td><td>51%</td><td>115</td><td>59</td><td>56</td><td>8</td><td>75</td><td>37</td></tr><tr><td>08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7</td><td>kin_face</td><td>UB KinFace</td><td><a href="papers/08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7.html" target="_blank">Understanding Kin Relationships in a Photo</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Multimedia</td><td></td><td></td><td></td><td></td><td></td><td>63%</td><td>94</td><td>59</td><td>35</td><td>1</td><td>33</td><td>61</td></tr><tr><td>2ce2560cf59db59ce313bbeb004e8ce55c5ce928</td><td>texas_3dfrd</td><td>Texas 3DFRD</td><td><a href="papers/2ce2560cf59db59ce313bbeb004e8ce55c5ce928.html" target="_blank">Anthropometric 3D Face Recognition</a></td><td><a href="http://live.ece.utexas.edu/publications/2010/sg_ijcv_june10.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>63%</td><td>91</td><td>57</td><td>34</td><td>5</td><td>60</td><td>31</td></tr><tr><td>5194cbd51f9769ab25260446b4fa17204752e799</td><td>violent_flows</td><td>Violent Flows</td><td><a href="papers/5194cbd51f9769ab25260446b4fa17204752e799.html" target="_blank">Violent flows: Real-time detection of violent crowd behavior</a></td><td><span class="gray">[pdf]</a></td><td>2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops</td><td>edu</td><td>Open University of Israel</td><td>Israel</td><td>32.77824165</td><td>34.99565673</td><td>65%</td><td>88</td><td>57</td><td>31</td><td>6</td><td>45</td><td>44</td></tr><tr><td>3cd40bfa1ff193a96bde0207e5140a399476466c</td><td>tvhi</td><td>TVHI</td><td><a href="papers/3cd40bfa1ff193a96bde0207e5140a399476466c.html" target="_blank">High Five: Recognising human interactions in TV shows</a></td><td><a href="https://pdfs.semanticscholar.org/3cd4/0bfa1ff193a96bde0207e5140a399476466c.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>98</td><td>56</td><td>42</td><td>10</td><td>66</td><td>28</td></tr><tr><td>2160788824c4c29ffe213b2cbeb3f52972d73f37</td><td>3d_rma</td><td>3D-RMA</td><td><a href="papers/2160788824c4c29ffe213b2cbeb3f52972d73f37.html" target="_blank">Automatic 3D face authentication</a></td><td><a href="http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.31.9190&rep=rep1&type=pdf" target="_blank">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>100</td><td>54</td><td>46</td><td>8</td><td>63</td><td>36</td></tr><tr><td>ae0aee03d946efffdc7af2362a42d3750e7dd48a</td><td>put_face</td><td>Put Face</td><td><a href="papers/ae0aee03d946efffdc7af2362a42d3750e7dd48a.html" target="_blank">The put face database</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>99</td><td>54</td><td>45</td><td>5</td><td>55</td><td>48</td></tr><tr><td>2edb87494278ad11641b6cf7a3f8996de12b8e14</td><td>qmul_grid</td><td>GRID</td><td><a href="papers/2edb87494278ad11641b6cf7a3f8996de12b8e14.html" target="_blank">Time-Delayed Correlation Analysis for Multi-Camera Activity Understanding</a></td><td><a href="http://www.eecs.qmul.ac.uk/~ccloy/files/ijcv_2010.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td>edu</td><td>Queen Mary University of London</td><td>United Kingdom</td><td>51.52472720</td><td>-0.03931035</td><td>63%</td><td>84</td><td>53</td><td>31</td><td>4</td><td>51</td><td>33</td></tr><tr><td>0b84f07af44f964817675ad961def8a51406dd2e</td><td>prw</td><td>PRW</td><td><a href="papers/0b84f07af44f964817675ad961def8a51406dd2e.html" target="_blank">Person Re-identification in the Wild</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>68%</td><td>77</td><td>52</td><td>25</td><td>1</td><td>47</td><td>27</td></tr><tr><td>0dc11a37cadda92886c56a6fb5191ded62099c28</td><td>stickmen_family</td><td>We Are Family Stickmen</td><td><a href="papers/0dc11a37cadda92886c56a6fb5191ded62099c28.html" target="_blank">We Are Family: Joint Pose Estimation of Multiple Persons</a></td><td><a href="http://eprints.pascal-network.org/archive/00007964/01/eichner10eccv.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>65%</td><td>78</td><td>51</td><td>27</td><td>5</td><td>54</td><td>23</td></tr><tr><td>0b440695c822a8e35184fb2f60dcdaa8a6de84ae</td><td>kinectface</td><td>KinectFaceDB</td><td><a href="papers/0b440695c822a8e35184fb2f60dcdaa8a6de84ae.html" target="_blank">KinectFaceDB: A Kinect Database for Face Recognition</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Systems, Man, and Cybernetics: Systems</td><td>edu</td><td>University of North Carolina at Chapel Hill</td><td>United States</td><td>35.91139710</td><td>-79.05045290</td><td>61%</td><td>82</td><td>50</td><td>32</td><td>6</td><td>28</td><td>52</td></tr><tr><td>0a85bdff552615643dd74646ac881862a7c7072d</td><td>pipa</td><td>PIPA</td><td><a href="papers/0a85bdff552615643dd74646ac881862a7c7072d.html" target="_blank">Beyond frontal faces: Improving Person Recognition using multiple cues</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>company</td><td>Facebook</td><td>United States</td><td>37.39367170</td><td>-122.08072620</td><td>91%</td><td>54</td><td>49</td><td>4</td><td>1</td><td>41</td><td>12</td></tr><tr><td>c900e0ad4c95948baaf0acd8449fde26f9b4952a</td><td>emotio_net</td><td>EmotioNet Database</td><td><a href="papers/c900e0ad4c95948baaf0acd8449fde26f9b4952a.html" target="_blank">EmotioNet: An Accurate, Real-Time Algorithm for the Automatic Annotation of a Million Facial Expressions in the Wild</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>86</td><td>45</td><td>41</td><td>7</td><td>54</td><td>29</td></tr><tr><td>f1af714b92372c8e606485a3982eab2f16772ad8</td><td>mug_faces</td><td>MUG Faces</td><td><a href="papers/f1af714b92372c8e606485a3982eab2f16772ad8.html" target="_blank">The MUG facial expression database</a></td><td><span class="gray">[pdf]</a></td><td>11th International Workshop on Image Analysis for Multimedia Interactive Services WIAMIS 10</td><td>edu</td><td>Aristotle University of Thessaloniki</td><td>Greece</td><td>40.62984145</td><td>22.95889350</td><td>55%</td><td>82</td><td>45</td><td>37</td><td>4</td><td>34</td><td>47</td></tr><tr><td>636b8ffc09b1b23ff714ac8350bb35635e49fa3c</td><td>caltech_10k_web_faces</td><td>Caltech 10K Web Faces</td><td><a href="papers/636b8ffc09b1b23ff714ac8350bb35635e49fa3c.html" target="_blank">Pruning training sets for learning of object categories</a></td><td><span class="gray">[pdf]</a></td><td>2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)</td><td></td><td></td><td></td><td></td><td></td><td>70%</td><td>63</td><td>44</td><td>19</td><td>4</td><td>42</td><td>20</td></tr><tr><td>6618cff7f2ed440a0d2fa9e74ad5469df5cdbe4c</td><td>afad</td><td>AFAD</td><td><a href="papers/6618cff7f2ed440a0d2fa9e74ad5469df5cdbe4c.html" target="_blank">Ordinal Regression with Multiple Output CNN for Age Estimation</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>78</td><td>43</td><td>35</td><td>8</td><td>44</td><td>31</td></tr><tr><td>2bf8541199728262f78d4dced6fb91479b39b738</td><td>clothing_co_parsing</td><td>CCP</td><td><a href="papers/2bf8541199728262f78d4dced6fb91479b39b738.html" target="_blank">Clothing Co-parsing by Joint Image Segmentation and Labeling</a></td><td><span class="gray">[pdf]</a></td><td>2014 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>70%</td><td>60</td><td>42</td><td>18</td><td>0</td><td>34</td><td>28</td></tr><tr><td>1bd1645a629f1b612960ab9bba276afd4cf7c666</td><td>brainwash</td><td>Brainwash</td><td><a href="papers/1bd1645a629f1b612960ab9bba276afd4cf7c666.html" target="_blank">End-to-End People Detection in Crowded Scenes</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>Stanford University</td><td>United States</td><td>37.43131385</td><td>-122.16936535</td><td>61%</td><td>67</td><td>41</td><td>25</td><td>3</td><td>42</td><td>23</td></tr><tr><td>4793f11fbca4a7dba898b9fff68f70d868e2497c</td><td>kin_face</td><td>UB KinFace</td><td><a href="papers/4793f11fbca4a7dba898b9fff68f70d868e2497c.html" target="_blank">Kinship Verification through Transfer Learning</a></td><td><a href="https://pdfs.semanticscholar.org/4793/f11fbca4a7dba898b9fff68f70d868e2497c.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>71</td><td>41</td><td>30</td><td>2</td><td>29</td><td>42</td></tr><tr><td>f41c7bb02fc97d5fb9cadd7a49c3e558a1c58a44</td><td>pa_100k</td><td>PA-100K</td><td><a href="papers/f41c7bb02fc97d5fb9cadd7a49c3e558a1c58a44.html" target="_blank">HydraPlus-Net: Attentive Deep Features for Pedestrian Analysis</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>75%</td><td>55</td><td>41</td><td>14</td><td>0</td><td>36</td><td>17</td></tr><tr><td>faf40ce28857aedf183e193486f5b4b0a8c478a2</td><td>iit_dehli_ear</td><td>IIT Dehli Ear</td><td><a href="papers/faf40ce28857aedf183e193486f5b4b0a8c478a2.html" target="_blank">Automated Human Identification Using Ear Imaging</a></td><td><a href="https://pdfs.semanticscholar.org/faf4/0ce28857aedf183e193486f5b4b0a8c478a2.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>80</td><td>40</td><td>40</td><td>6</td><td>35</td><td>44</td></tr><tr><td>4d58f886f5150b2d5e48fd1b5a49e09799bf895d</td><td>texas_3dfrd</td><td>Texas 3DFRD</td><td><a href="papers/4d58f886f5150b2d5e48fd1b5a49e09799bf895d.html" target="_blank">Texas 3D Face Recognition Database</a></td><td><span class="gray">[pdf]</a></td><td>2010 IEEE Southwest Symposium on Image Analysis & Interpretation (SSIAI)</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>66</td><td>40</td><td>26</td><td>3</td><td>40</td><td>27</td></tr><tr><td>31de9b3dd6106ce6eec9a35991b2b9083395fd0b</td><td>feret</td><td>FERET</td><td><a href="papers/31de9b3dd6106ce6eec9a35991b2b9083395fd0b.html" target="_blank">FERET ( Face Recognition Technology ) Recognition Algorithm Development and Test Results</a></td><td><a href="https://pdfs.semanticscholar.org/31de/9b3dd6106ce6eec9a35991b2b9083395fd0b.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>75</td><td>39</td><td>36</td><td>5</td><td>54</td><td>20</td></tr><tr><td>47aeb3b82f54b5ae8142b4bdda7b614433e69b9a</td><td>am_fed</td><td>AM-FED</td><td><a href="papers/47aeb3b82f54b5ae8142b4bdda7b614433e69b9a.html" target="_blank">Affectiva-MIT Facial Expression Dataset (AM-FED): Naturalistic and Spontaneous Facial Expressions Collected "In-the-Wild"</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition Workshops</td><td></td><td></td><td></td><td></td><td></td><td>46%</td><td>83</td><td>38</td><td>45</td><td>6</td><td>43</td><td>39</td></tr><tr><td>22646e00a7ba34d1b5fbe3b1efcd91a1e1be3c2b</td><td>saivt</td><td>SAIVT SoftBio</td><td><a href="papers/22646e00a7ba34d1b5fbe3b1efcd91a1e1be3c2b.html" target="_blank">A Database for Person Re-Identification in Multi-Camera Surveillance Networks</a></td><td><span class="gray">[pdf]</a></td><td>2012 International Conference on Digital Image Computing Techniques and Applications (DICTA)</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>65</td><td>38</td><td>27</td><td>6</td><td>45</td><td>20</td></tr><tr><td>79828e6e9f137a583082b8b5a9dfce0c301989b8</td><td>mapillary</td><td>Mapillary</td><td><a href="papers/79828e6e9f137a583082b8b5a9dfce0c301989b8.html" target="_blank">The Mapillary Vistas Dataset for Semantic Understanding of Street Scenes</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>61</td><td>37</td><td>24</td><td>0</td><td>43</td><td>16</td></tr><tr><td>3dc3f0b64ef80f573e3a5f96e456e52ee980b877</td><td>georgia_tech_face_database</td><td>Georgia Tech Face</td><td><a href="papers/3dc3f0b64ef80f573e3a5f96e456e52ee980b877.html" target="_blank">Maximum Likelihood Training of the Embedded HMM for Face Detection and Recognition</a></td><td><a href="http://www.researchgate.net/profile/Monson_Hayes/publication/221124512_Maximum_Likelihood_Training_of_the_Embedded_HMM_for_Face_Detection_and_Recognition/links/0deec53509be9d6f55000000.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>67</td><td>36</td><td>31</td><td>4</td><td>29</td><td>28</td></tr><tr><td>6f3c76b7c0bd8e1d122c6ea808a271fd4749c951</td><td>ward</td><td>WARD</td><td><a href="papers/6f3c76b7c0bd8e1d122c6ea808a271fd4749c951.html" target="_blank">Re-identify people in wide area camera network</a></td><td><span class="gray">[pdf]</a></td><td>2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops</td><td>edu</td><td>University of Udine</td><td>Italy</td><td>46.08107230</td><td>13.21194740</td><td>60%</td><td>60</td><td>36</td><td>24</td><td>1</td><td>38</td><td>21</td></tr><tr><td>fcc6fe6007c322641796cb8792718641856a22a7</td><td>miw</td><td>MIW</td><td><a href="papers/fcc6fe6007c322641796cb8792718641856a22a7.html" target="_blank">Automatic facial makeup detection with application in face recognition</a></td><td><span class="gray">[pdf]</a></td><td>2013 International Conference on Biometrics (ICB)</td><td>edu</td><td>West Virginia University</td><td>United States</td><td>39.65404635</td><td>-79.96475355</td><td>71%</td><td>49</td><td>35</td><td>14</td><td>1</td><td>19</td><td>29</td></tr><tr><td>09d78009687bec46e70efcf39d4612822e61cb8c</td><td>raid</td><td>RAiD</td><td><a href="papers/09d78009687bec46e70efcf39d4612822e61cb8c.html" target="_blank">Consistent Re-identification in a Camera Network</a></td><td><a href="https://pdfs.semanticscholar.org/c27f/099e6e7e3f7f9979cbe9e0a5175fc5848ea0.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>71%</td><td>49</td><td>35</td><td>14</td><td>3</td><td>34</td><td>13</td></tr><tr><td>fcc6fe6007c322641796cb8792718641856a22a7</td><td>youtube_makeup</td><td>YMU</td><td><a href="papers/fcc6fe6007c322641796cb8792718641856a22a7.html" target="_blank">Automatic facial makeup detection with application in face recognition</a></td><td><span class="gray">[pdf]</a></td><td>2013 International Conference on Biometrics (ICB)</td><td>edu</td><td>West Virginia University</td><td>United States</td><td>39.65404635</td><td>-79.96475355</td><td>71%</td><td>49</td><td>35</td><td>14</td><td>1</td><td>19</td><td>29</td></tr><tr><td>51eba481dac6b229a7490f650dff7b17ce05df73</td><td>imsitu</td><td>imSitu</td><td><a href="papers/51eba481dac6b229a7490f650dff7b17ce05df73.html" target="_blank">Situation Recognition: Visual Semantic Role Labeling for Image Understanding</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>65%</td><td>52</td><td>34</td><td>18</td><td>1</td><td>46</td><td>6</td></tr><tr><td>8be57cdad86fdf8c8290df4ca3149592f3c46dd3</td><td>m2vts</td><td>m2vts</td><td><a href="papers/8be57cdad86fdf8c8290df4ca3149592f3c46dd3.html" target="_blank">The M2VTS Multimodal Face Database (Release 1.00)</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>45%</td><td>73</td><td>33</td><td>40</td><td>2</td><td>39</td><td>33</td></tr><tr><td>31b05f65405534a696a847dd19c621b7b8588263</td><td>umd_faces</td><td>UMD</td><td><a href="papers/31b05f65405534a696a847dd19c621b7b8588263.html" target="_blank">UMDFaces: An annotated face dataset for training deep networks</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE International Joint Conference on Biometrics (IJCB)</td><td>edu</td><td>University of Maryland</td><td>United States</td><td>39.28996850</td><td>-76.62196103</td><td>79%</td><td>42</td><td>33</td><td>9</td><td>2</td><td>30</td><td>11</td></tr><tr><td>37d6f0eb074d207b53885bd2eb78ccc8a04be597</td><td>vmu</td><td>VMU</td><td><a href="papers/37d6f0eb074d207b53885bd2eb78ccc8a04be597.html" target="_blank">Can facial cosmetics affect the matching accuracy of face recognition systems?</a></td><td><span class="gray">[pdf]</a></td><td>2012 IEEE Fifth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td>edu</td><td>West Virginia University</td><td>United States</td><td>39.65404635</td><td>-79.96475355</td><td>62%</td><td>53</td><td>33</td><td>20</td><td>0</td><td>19</td><td>31</td></tr><tr><td>37d6f0eb074d207b53885bd2eb78ccc8a04be597</td><td>youtube_makeup</td><td>YMU</td><td><a href="papers/37d6f0eb074d207b53885bd2eb78ccc8a04be597.html" target="_blank">Can facial cosmetics affect the matching accuracy of face recognition systems?</a></td><td><span class="gray">[pdf]</a></td><td>2012 IEEE Fifth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td>edu</td><td>West Virginia University</td><td>United States</td><td>39.65404635</td><td>-79.96475355</td><td>62%</td><td>53</td><td>33</td><td>20</td><td>0</td><td>19</td><td>31</td></tr><tr><td>9c23859ec7313f2e756a3e85575735e0c52249f4</td><td>facebook_100</td><td>Facebook100</td><td><a href="papers/9c23859ec7313f2e756a3e85575735e0c52249f4.html" target="_blank">Scaling up biologically-inspired computer vision: A case study in unconstrained face recognition on facebook</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td>edu</td><td>Harvard University</td><td>United States</td><td>42.36782045</td><td>-71.12666653</td><td>62%</td><td>52</td><td>32</td><td>20</td><td>3</td><td>38</td><td>13</td></tr><tr><td>2161f6b7ee3c0acc81603b01dc0df689683577b9</td><td>large_scale_person_search</td><td>Large Scale Person Search</td><td><a href="papers/2161f6b7ee3c0acc81603b01dc0df689683577b9.html" target="_blank">End-to-End Deep Learning for Person Search</a></td><td><a href="https://pdfs.semanticscholar.org/2161/f6b7ee3c0acc81603b01dc0df689683577b9.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>70%</td><td>46</td><td>32</td><td>14</td><td>0</td><td>27</td><td>16</td></tr><tr><td>9c23859ec7313f2e756a3e85575735e0c52249f4</td><td>pubfig_83</td><td>pubfig83</td><td><a href="papers/9c23859ec7313f2e756a3e85575735e0c52249f4.html" target="_blank">Scaling up biologically-inspired computer vision: A case study in unconstrained face recognition on facebook</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td>edu</td><td>Harvard University</td><td>United States</td><td>42.36782045</td><td>-71.12666653</td><td>62%</td><td>52</td><td>32</td><td>20</td><td>3</td><td>38</td><td>13</td></tr><tr><td>18858cc936947fc96b5c06bbe3c6c2faa5614540</td><td>pilot_parliament</td><td>PPB</td><td><a href="papers/18858cc936947fc96b5c06bbe3c6c2faa5614540.html" target="_blank">Gender Shades: Intersectional Accuracy Disparities in Commercial Gender Classification</a></td><td><a href="http://proceedings.mlr.press/v81/buolamwini18a/buolamwini18a-supp.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>53%</td><td>59</td><td>31</td><td>28</td><td>0</td><td>47</td><td>10</td></tr><tr><td>16e8b0a1e8451d5f697b94c0c2b32a00abee1d52</td><td>umb</td><td>UMB</td><td><a href="papers/16e8b0a1e8451d5f697b94c0c2b32a00abee1d52.html" target="_blank">UMB-DB: A database of partially occluded 3D faces</a></td><td><span class="gray">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td></td><td>66%</td><td>47</td><td>31</td><td>16</td><td>2</td><td>22</td><td>24</td></tr><tr><td>356b431d4f7a2a0a38cf971c84568207dcdbf189</td><td>wider</td><td>WIDER</td><td><a href="papers/356b431d4f7a2a0a38cf971c84568207dcdbf189.html" target="_blank">Recognize complex events from static images by fusing deep channels</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>70%</td><td>44</td><td>31</td><td>13</td><td>1</td><td>29</td><td>15</td></tr><tr><td>8b2dd5c61b23ead5ae5508bb8ce808b5ea266730</td><td>10k_US_adult_faces</td><td>10K US Adult Faces</td><td><a href="papers/8b2dd5c61b23ead5ae5508bb8ce808b5ea266730.html" target="_blank">The intrinsic memorability of face photographs.</a></td><td><a href="https://pdfs.semanticscholar.org/8b2d/d5c61b23ead5ae5508bb8ce808b5ea266730.pdf" target="_blank">[pdf]</a></td><td>Journal of experimental psychology. General</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>52</td><td>29</td><td>23</td><td>2</td><td>36</td><td>14</td></tr><tr><td>28d4e027c7e90b51b7d8908fce68128d1964668a</td><td>megaface</td><td>MegaFace</td><td><a href="papers/28d4e027c7e90b51b7d8908fce68128d1964668a.html" target="_blank">Level Playing Field for Million Scale Face Recognition</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>University of Washington</td><td>United States</td><td>47.65432380</td><td>-122.30800894</td><td>72%</td><td>39</td><td>28</td><td>11</td><td>2</td><td>29</td><td>9</td></tr><tr><td>070de852bc6eb275d7ca3a9cdde8f6be8795d1a3</td><td>d3dfacs</td><td>D3DFACS</td><td><a href="papers/070de852bc6eb275d7ca3a9cdde8f6be8795d1a3.html" target="_blank">A FACS valid 3D dynamic action unit database with applications to 3D dynamic morphable facial modeling</a></td><td><span class="gray">[pdf]</a></td><td>2011 International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>50</td><td>26</td><td>24</td><td>5</td><td>31</td><td>18</td></tr><tr><td>47662d1a368daf70ba70ef2d59eb6209f98b675d</td><td>fia</td><td>CMU FiA</td><td><a href="papers/47662d1a368daf70ba70ef2d59eb6209f98b675d.html" target="_blank">The CMU Face In Action (FIA) Database</a></td><td><a href="https://pdfs.semanticscholar.org/4766/2d1a368daf70ba70ef2d59eb6209f98b675d.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>48%</td><td>54</td><td>26</td><td>28</td><td>5</td><td>40</td><td>16</td></tr><tr><td>298cbc3dfbbb3a20af4eed97906650a4ea1c29e0</td><td>ferplus</td><td>FER+</td><td><a href="papers/298cbc3dfbbb3a20af4eed97906650a4ea1c29e0.html" target="_blank">Training deep networks for facial expression recognition with crowd-sourced label distribution</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>74%</td><td>34</td><td>25</td><td>9</td><td>0</td><td>18</td><td>16</td></tr><tr><td>e27ef52c641c2b5100a1b34fd0b819e84a31b4df</td><td>sarc3d</td><td>Sarc3D</td><td><a href="papers/e27ef52c641c2b5100a1b34fd0b819e84a31b4df.html" target="_blank">SARC3D: A New 3D Body Model for People Tracking and Re-identification</a></td><td><a href="https://pdfs.semanticscholar.org/e27e/f52c641c2b5100a1b34fd0b819e84a31b4df.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>74%</td><td>34</td><td>25</td><td>9</td><td>2</td><td>21</td><td>12</td></tr><tr><td>57fe081950f21ca03b5b375ae3e84b399c015861</td><td>cvc_01_barcelona</td><td>CVC-01</td><td><a href="papers/57fe081950f21ca03b5b375ae3e84b399c015861.html" target="_blank">Adaptive Image Sampling and Windows Classification for On-board Pedestrian Detection</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>51%</td><td>47</td><td>24</td><td>23</td><td>1</td><td>23</td><td>24</td></tr><tr><td>8875ae233bc074f5cd6c4ebba447b536a7e847a5</td><td>voxceleb2</td><td>VoxCeleb2</td><td><a href="papers/8875ae233bc074f5cd6c4ebba447b536a7e847a5.html" target="_blank">VoxCeleb2: Deep Speaker Recognition.</a></td><td><a href="https://pdfs.semanticscholar.org/8875/ae233bc074f5cd6c4ebba447b536a7e847a5.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>71%</td><td>34</td><td>24</td><td>9</td><td>2</td><td>31</td><td>2</td></tr><tr><td>758d7e1be64cc668c59ef33ba8882c8597406e53</td><td>affectnet</td><td>AffectNet</td><td><a href="papers/758d7e1be64cc668c59ef33ba8882c8597406e53.html" target="_blank">AffectNet: A Database for Facial Expression, Valence, and Arousal Computing in the Wild</a></td><td><a href="https://arxiv.org/pdf/1708.03985.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>37</td><td>23</td><td>14</td><td>0</td><td>25</td><td>11</td></tr><tr><td>d08cc366a4a0192a01e9a7495af1eb5d9f9e73ae</td><td>b3d_ac</td><td>B3D(AC)</td><td><a href="papers/d08cc366a4a0192a01e9a7495af1eb5d9f9e73ae.html" target="_blank">A 3-D Audio-Visual Corpus of Affective Communication</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Multimedia</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>42</td><td>23</td><td>19</td><td>2</td><td>26</td><td>15</td></tr><tr><td>53ae38a6bb2b21b42bac4f0c4c8ed1f9fa02f9d4</td><td>bp4d_plus</td><td>BP4D+</td><td><a href="papers/53ae38a6bb2b21b42bac4f0c4c8ed1f9fa02f9d4.html" target="_blank">Multimodal Spontaneous Emotion Corpus for Human Behavior Analysis</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>42</td><td>23</td><td>19</td><td>0</td><td>17</td><td>26</td></tr><tr><td>1c2802c2199b6d15ecefe7ba0c39bfe44363de38</td><td>youtube_poses</td><td>YouTube Pose</td><td><a href="papers/1c2802c2199b6d15ecefe7ba0c39bfe44363de38.html" target="_blank">Personalizing Human Video Pose Estimation</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>Oxford University</td><td>United Kingdom</td><td>51.75208490</td><td>-1.25166460</td><td>64%</td><td>36</td><td>23</td><td>13</td><td>2</td><td>30</td><td>8</td></tr><tr><td>0cb2dd5f178e3a297a0c33068961018659d0f443</td><td>ijb_b</td><td>IJB-B</td><td><a href="papers/0cb2dd5f178e3a297a0c33068961018659d0f443.html" target="_blank">IARPA Janus Benchmark-B Face Dataset</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td>edu</td><td>Michigan State University</td><td>United States</td><td>42.71856800</td><td>-84.47791571</td><td>63%</td><td>35</td><td>22</td><td>13</td><td>3</td><td>25</td><td>8</td></tr><tr><td>41976ebc8ab76d9a6861487c97cc7fcbe3b6015f</td><td>moments_in_time</td><td>Moments in Time</td><td><a href="papers/41976ebc8ab76d9a6861487c97cc7fcbe3b6015f.html" target="_blank">Moments in Time Dataset: one million videos for event understanding</a></td><td><a href="https://arxiv.org/pdf/1801.03150.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td>edu</td><td>Columbia University</td><td>United States</td><td>40.84198360</td><td>-73.94368971</td><td>76%</td><td>29</td><td>22</td><td>7</td><td>2</td><td>27</td><td>2</td></tr><tr><td>a0cc5f73a37723a6dd465924143f1cb4976d0169</td><td>msmt_17</td><td>MSMT17</td><td><a href="papers/a0cc5f73a37723a6dd465924143f1cb4976d0169.html" target="_blank">Person Transfer GAN to Bridge Domain Gap for Person Re-identification</a></td><td><span class="gray">[pdf]</a></td><td>2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>92%</td><td>24</td><td>22</td><td>2</td><td>1</td><td>20</td><td>4</td></tr><tr><td>fd8168f1c50de85bac58a8d328df0a50248b16ae</td><td>nd_2006</td><td>ND-2006</td><td><a href="papers/fd8168f1c50de85bac58a8d328df0a50248b16ae.html" target="_blank">Using a Multi-Instance Enrollment Representation to Improve 3D Face Recognition</a></td><td><span class="gray">[pdf]</a></td><td>2007 First IEEE International Conference on Biometrics: Theory, Applications, and Systems</td><td>edu</td><td>University of Notre Dame</td><td>United States</td><td>41.70456775</td><td>-86.23822026</td><td>63%</td><td>35</td><td>22</td><td>13</td><td>3</td><td>18</td><td>15</td></tr><tr><td>488e475eeb3bb39a145f23ede197cd3620f1d98a</td><td>apis</td><td>APiS1.0</td><td><a href="papers/488e475eeb3bb39a145f23ede197cd3620f1d98a.html" target="_blank">Pedestrian Attribute Classification in Surveillance: Database and Evaluation</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision Workshops</td><td></td><td></td><td></td><td></td><td></td><td>71%</td><td>28</td><td>20</td><td>8</td><td>0</td><td>13</td><td>15</td></tr><tr><td>84fe5b4ac805af63206012d29523a1e033bc827e</td><td>awe_ears</td><td>AWE Ears</td><td><a href="papers/84fe5b4ac805af63206012d29523a1e033bc827e.html" target="_blank">Ear Recognition: More Than a Survey</a></td><td><a href="https://arxiv.org/pdf/1611.06203.pdf" target="_blank">[pdf]</a></td><td>Neurocomputing</td><td></td><td></td><td></td><td></td><td></td><td>77%</td><td>26</td><td>20</td><td>6</td><td>0</td><td>10</td><td>16</td></tr><tr><td>20388099cc415c772926e47bcbbe554e133343d1</td><td>cafe</td><td>#N/A</td><td><a href="papers/20388099cc415c772926e47bcbbe554e133343d1.html" target="_blank">The Child Affective Facial Expression (CAFE) set: validity and reliability from untrained adults</a></td><td><a href="https://pdfs.semanticscholar.org/2038/8099cc415c772926e47bcbbe554e133343d1.pdf" target="_blank">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>37</td><td>20</td><td>17</td><td>3</td><td>30</td><td>7</td></tr><tr><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td><td>casablanca</td><td>Casablanca</td><td><a href="papers/0ceda9dae8b9f322df65ca2ef02caca9758aec6f.html" target="_blank">Context-Aware CNNs for Person Head Detection</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>33</td><td>20</td><td>13</td><td>1</td><td>23</td><td>11</td></tr><tr><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td><td>hollywood_headset</td><td>HollywoodHeads</td><td><a href="papers/0ceda9dae8b9f322df65ca2ef02caca9758aec6f.html" target="_blank">Context-Aware CNNs for Person Head Detection</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>33</td><td>20</td><td>13</td><td>1</td><td>23</td><td>11</td></tr><tr><td>0297448f3ed948e136bb06ceff10eccb34e5bb77</td><td>ilids_mcts</td><td>i-LIDS Multiple-Camera</td><td><a href="papers/0297448f3ed948e136bb06ceff10eccb34e5bb77.html" target="_blank">Imagery Library for Intelligent Detection Systems (i-LIDS); A Standard for Testing Video Based Detection Systems</a></td><td><span class="gray">[pdf]</a></td><td>Proceedings 40th Annual 2006 International Carnahan Conference on Security Technology</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>35</td><td>20</td><td>15</td><td>2</td><td>21</td><td>14</td></tr><tr><td>488e475eeb3bb39a145f23ede197cd3620f1d98a</td><td>svs</td><td>SVS</td><td><a href="papers/488e475eeb3bb39a145f23ede197cd3620f1d98a.html" target="_blank">Pedestrian Attribute Classification in Surveillance: Database and Evaluation</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision Workshops</td><td></td><td></td><td></td><td></td><td></td><td>71%</td><td>28</td><td>20</td><td>8</td><td>0</td><td>13</td><td>15</td></tr><tr><td>4fefd1bc8dc4e0ab37ee3324ddfa43ad9d6a04a7</td><td>deep_fashion</td><td>DeepFashion</td><td><a href="papers/4fefd1bc8dc4e0ab37ee3324ddfa43ad9d6a04a7.html" target="_blank">Fashion Landmark Detection in the Wild</a></td><td><a href="https://arxiv.org/pdf/1608.03049.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>73%</td><td>26</td><td>19</td><td>7</td><td>1</td><td>16</td><td>10</td></tr><tr><td>64e0690dd176a93de9d4328f6e31fc4afe1e7536</td><td>duke_mtmc</td><td>Duke MTMC</td><td><a href="papers/64e0690dd176a93de9d4328f6e31fc4afe1e7536.html" target="_blank">Tracking Multiple People Online and in Real Time</a></td><td><a href="https://pdfs.semanticscholar.org/64e0/690dd176a93de9d4328f6e31fc4afe1e7536.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>78%</td><td>23</td><td>18</td><td>5</td><td>1</td><td>12</td><td>10</td></tr><tr><td>221c18238b829c12b911706947ab38fd017acef7</td><td>rap_pedestrian</td><td>RAP</td><td><a href="papers/221c18238b829c12b911706947ab38fd017acef7.html" target="_blank">A Richly Annotated Dataset for Pedestrian Attribute Recognition</a></td><td><a href="https://arxiv.org/pdf/1603.07054.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>69%</td><td>26</td><td>18</td><td>8</td><td>0</td><td>16</td><td>10</td></tr><tr><td>d818568838433a6d6831adde49a58cef05e0c89f</td><td>agedb</td><td>AgeDB</td><td><a href="papers/d818568838433a6d6831adde49a58cef05e0c89f.html" target="_blank">AgeDB: The First Manually Collected, In-the-Wild Age Database</a></td><td><a href="http://eprints.mdx.ac.uk/22044/1/agedb_kotsia.pdf" target="_blank">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td>edu</td><td>Imperial College London</td><td>United Kingdom</td><td>51.49887085</td><td>-0.17560797</td><td>94%</td><td>18</td><td>17</td><td>1</td><td>0</td><td>14</td><td>3</td></tr><tr><td>213a579af9e4f57f071b884aa872651372b661fd</td><td>bbc_pose</td><td>BBC Pose</td><td><a href="papers/213a579af9e4f57f071b884aa872651372b661fd.html" target="_blank">Automatic and Efficient Human Pose Estimation for Sign Language Videos</a></td><td><a href="http://tomas.pfister.fi/files/charles13ijcv.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>65%</td><td>26</td><td>17</td><td>9</td><td>1</td><td>16</td><td>11</td></tr><tr><td>4946ba10a4d5a7d0a38372f23e6622bd347ae273</td><td>coco_action</td><td>COCO-a</td><td><a href="papers/4946ba10a4d5a7d0a38372f23e6622bd347ae273.html" target="_blank">Describing Common Human Visual Actions in Images</a></td><td><a href="https://arxiv.org/pdf/1506.02203.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>68%</td><td>25</td><td>17</td><td>8</td><td>0</td><td>23</td><td>2</td></tr><tr><td>d178cde92ab3dc0dd2ebee5a76a33d556c39448b</td><td>jiku_mobile</td><td>Jiku Mobile Video Dataset</td><td><a href="papers/d178cde92ab3dc0dd2ebee5a76a33d556c39448b.html" target="_blank">The jiku mobile video dataset</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td>edu</td><td>National University of Singapore</td><td>Singapore</td><td>1.29620180</td><td>103.77689944</td><td>71%</td><td>24</td><td>17</td><td>7</td><td>0</td><td>6</td><td>19</td></tr><tr><td>2eb84aaba316b095d4bb51da1a3e4365bbf9ab1d</td><td>kin_face</td><td>UB KinFace</td><td><a href="papers/2eb84aaba316b095d4bb51da1a3e4365bbf9ab1d.html" target="_blank">Genealogical face recognition based on UB KinFace database</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td>edu</td><td>SUNY Buffalo</td><td>United States</td><td>42.93362780</td><td>-78.88394479</td><td>55%</td><td>31</td><td>17</td><td>14</td><td>0</td><td>11</td><td>21</td></tr><tr><td>ea050801199f98a1c7c1df6769f23f658299a3ae</td><td>mpi_large</td><td>Large MPI Facial Expression</td><td><a href="papers/ea050801199f98a1c7c1df6769f23f658299a3ae.html" target="_blank">The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</a></td><td><a href="https://pdfs.semanticscholar.org/ea05/0801199f98a1c7c1df6769f23f658299a3ae.pdf" target="_blank">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>33</td><td>17</td><td>16</td><td>4</td><td>29</td><td>4</td></tr><tr><td>ea050801199f98a1c7c1df6769f23f658299a3ae</td><td>mpi_small</td><td>Small MPI Facial Expression</td><td><a href="papers/ea050801199f98a1c7c1df6769f23f658299a3ae.html" target="_blank">The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</a></td><td><a href="https://pdfs.semanticscholar.org/ea05/0801199f98a1c7c1df6769f23f658299a3ae.pdf" target="_blank">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>33</td><td>17</td><td>16</td><td>4</td><td>29</td><td>4</td></tr><tr><td>1e3df3ca8feab0b36fd293fe689f93bb2aaac591</td><td>immediacy</td><td>Immediacy</td><td><a href="papers/1e3df3ca8feab0b36fd293fe689f93bb2aaac591.html" target="_blank">Multi-task Recurrent Neural Network for Immediacy Prediction</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>26</td><td>16</td><td>10</td><td>2</td><td>21</td><td>6</td></tr><tr><td>71b7fc715e2f1bb24c0030af8d7e7b6e7cd128a6</td><td>umd_faces</td><td>UMD</td><td><a href="papers/71b7fc715e2f1bb24c0030af8d7e7b6e7cd128a6.html" target="_blank">The Do’s and Don’ts for CNN-Based Face Verification</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE International Conference on Computer Vision Workshops (ICCVW)</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>26</td><td>16</td><td>10</td><td>2</td><td>16</td><td>8</td></tr><tr><td>19d1b811df60f86cbd5e04a094b07f32fff7a32a</td><td>york_3d</td><td>UOY 3D Face Database</td><td><a href="papers/19d1b811df60f86cbd5e04a094b07f32fff7a32a.html" target="_blank">Three-dimensional face recognition: an eigensurface approach</a></td><td><span class="gray">[pdf]</a></td><td>2004 International Conference on Image Processing, 2004. ICIP '04.</td><td></td><td></td><td></td><td></td><td></td><td>42%</td><td>38</td><td>16</td><td>22</td><td>4</td><td>24</td><td>13</td></tr><tr><td>b6c293f0420f7e945b5916ae44269fb53e139275</td><td>erce</td><td>ERCe</td><td><a href="papers/b6c293f0420f7e945b5916ae44269fb53e139275.html" target="_blank">Video Synopsis by Heterogeneous Multi-source Correlation</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>29</td><td>15</td><td>14</td><td>2</td><td>14</td><td>13</td></tr><tr><td>b6c293f0420f7e945b5916ae44269fb53e139275</td><td>tisi</td><td>Times Square Intersection</td><td><a href="papers/b6c293f0420f7e945b5916ae44269fb53e139275.html" target="_blank">Video Synopsis by Heterogeneous Multi-source Correlation</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>29</td><td>15</td><td>14</td><td>2</td><td>14</td><td>13</td></tr><tr><td>2a171f8d14b6b8735001a11c217af9587d095848</td><td>social_relation</td><td>Social Relation</td><td><a href="papers/2a171f8d14b6b8735001a11c217af9587d095848.html" target="_blank">Learning Social Relation Traits from Face Images</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>23</td><td>14</td><td>9</td><td>4</td><td>16</td><td>7</td></tr><tr><td>5753b2b5e442eaa3be066daa4a2ca8d8a0bb1725</td><td>fpoq</td><td>50 People One Question</td><td><a href="papers/5753b2b5e442eaa3be066daa4a2ca8d8a0bb1725.html" target="_blank">Merging Pose Estimates Across Space and Time</a></td><td><a href="http://authors.library.caltech.edu/41565/1/tracking_bmvc.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>81%</td><td>16</td><td>13</td><td>3</td><td>0</td><td>13</td><td>4</td></tr><tr><td>44d23df380af207f5ac5b41459c722c87283e1eb</td><td>wider_attribute</td><td>WIDER Attribute</td><td><a href="papers/44d23df380af207f5ac5b41459c722c87283e1eb.html" target="_blank">Human Attribute Recognition by Deep Hierarchical Contexts</a></td><td><a href="https://pdfs.semanticscholar.org/8e28/07f2dd53b03a759e372e07f7191cae65c9fd.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>72%</td><td>18</td><td>13</td><td>5</td><td>0</td><td>14</td><td>4</td></tr><tr><td>45e616093a92e5f1e61a7c6037d5f637aa8964af</td><td>malf</td><td>MALF</td><td><a href="papers/45e616093a92e5f1e61a7c6037d5f637aa8964af.html" target="_blank">Fine-grained evaluation on face detection in the wild</a></td><td><span class="gray">[pdf]</a></td><td>2015 11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)</td><td></td><td></td><td></td><td></td><td></td><td>71%</td><td>17</td><td>12</td><td>5</td><td>0</td><td>12</td><td>5</td></tr><tr><td>774cbb45968607a027ae4729077734db000a1ec5</td><td>urban_tribes</td><td>Urban Tribes</td><td><a href="papers/774cbb45968607a027ae4729077734db000a1ec5.html" target="_blank">From Bikers to Surfers: Visual Recognition of Urban Tribes</a></td><td><a href="http://vision.cornell.edu/se3/wp-content/uploads/2014/09/utribes_bmvc13_final.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>67%</td><td>18</td><td>12</td><td>6</td><td>1</td><td>12</td><td>6</td></tr><tr><td>a94cae786d515d3450d48267e12ca954aab791c4</td><td>yawdd</td><td>YawDD</td><td><a href="papers/a94cae786d515d3450d48267e12ca954aab791c4.html" target="_blank">YawDD: a yawning detection dataset</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>80%</td><td>15</td><td>12</td><td>3</td><td>1</td><td>2</td><td>13</td></tr><tr><td>4e6ee936eb50dd032f7138702fa39b7c18ee8907</td><td>dartmouth_children</td><td>Dartmouth Children</td><td><a href="papers/4e6ee936eb50dd032f7138702fa39b7c18ee8907.html" target="_blank">The Dartmouth Database of Children’s Faces: Acquisition and Validation of a New Face Stimulus Set</a></td><td><a href="https://pdfs.semanticscholar.org/4e6e/e936eb50dd032f7138702fa39b7c18ee8907.pdf" target="_blank">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>21</td><td>11</td><td>10</td><td>2</td><td>18</td><td>3</td></tr><tr><td>4156b7e88f2e0ab0a7c095b9bab199ae2b23bd06</td><td>distance_nighttime</td><td>Long Distance Heterogeneous Face</td><td><a href="papers/4156b7e88f2e0ab0a7c095b9bab199ae2b23bd06.html" target="_blank">Nighttime Face Recognition at Long Distance: Cross-Distance and Cross-Spectral Matching</a></td><td><a href="https://pdfs.semanticscholar.org/4156/b7e88f2e0ab0a7c095b9bab199ae2b23bd06.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>22</td><td>11</td><td>11</td><td>3</td><td>11</td><td>10</td></tr><tr><td>b71d1aa90dcbe3638888725314c0d56640c1fef1</td><td>ifdb</td><td>IFDB</td><td><a href="papers/b71d1aa90dcbe3638888725314c0d56640c1fef1.html" target="_blank">Iranian Face Database with age, pose and expression</a></td><td><span class="gray">[pdf]</a></td><td>2007 International Conference on Machine Vision</td><td>edu</td><td>Islamic Azad University</td><td>Iran</td><td>34.84529990</td><td>48.55962120</td><td>48%</td><td>23</td><td>11</td><td>12</td><td>2</td><td>14</td><td>9</td></tr><tr><td>57178b36c21fd7f4529ac6748614bb3374714e91</td><td>ijb_c</td><td>IJB-C</td><td><a href="papers/57178b36c21fd7f4529ac6748614bb3374714e91.html" target="_blank">IARPA Janus Benchmark - C: Face Dataset and Protocol</a></td><td><span class="gray">[pdf]</a></td><td>2018 International Conference on Biometrics (ICB)</td><td></td><td></td><td></td><td></td><td></td><td>79%</td><td>14</td><td>11</td><td>3</td><td>0</td><td>12</td><td>1</td></tr><tr><td>ca3e88d87e1344d076c964ea89d91a75c417f5ee</td><td>imfdb</td><td>IMFDB</td><td><a href="papers/ca3e88d87e1344d076c964ea89d91a75c417f5ee.html" target="_blank">Indian Movie Face Database: A benchmark for face recognition under wide variations</a></td><td><span class="gray">[pdf]</a></td><td>2013 Fourth National Conference on Computer Vision, Pattern Recognition, Image Processing and Graphics (NCVPRIPG)</td><td>edu</td><td>BVBCET, Hubli, India</td><td>India</td><td>15.36883320</td><td>75.12137960</td><td>65%</td><td>17</td><td>11</td><td>6</td><td>0</td><td>11</td><td>5</td></tr><tr><td>25474c21613607f6bb7687a281d5f9d4ffa1f9f3</td><td>faceplace</td><td>Face Place</td><td><a href="papers/25474c21613607f6bb7687a281d5f9d4ffa1f9f3.html" target="_blank">Recognizing disguised faces</a></td><td><a href="https://pdfs.semanticscholar.org/d936/7ceb0be378c3a9ddf7cb741c678c1a3c574c.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>34%</td><td>29</td><td>10</td><td>19</td><td>0</td><td>18</td><td>10</td></tr><tr><td>0f0fcf041559703998abf310e56f8a2f90ee6f21</td><td>feret</td><td>FERET</td><td><a href="papers/0f0fcf041559703998abf310e56f8a2f90ee6f21.html" target="_blank">The FERET Evaluation Methodology for Face-Recognition Algorithms</a></td><td><a href="https://pdfs.semanticscholar.org/5099/7a5605c1f61e09e9a96789ed7495be6625aa.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>34%</td><td>29</td><td>10</td><td>19</td><td>3</td><td>18</td><td>9</td></tr><tr><td>2f43b614607163abf41dfe5d17ef6749a1b61304</td><td>hrt_transgender</td><td>HRT Transgender</td><td><a href="papers/2f43b614607163abf41dfe5d17ef6749a1b61304.html" target="_blank">Investigating the Periocular-Based Face Recognition Across Gender Transformation</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Information Forensics and Security</td><td>edu</td><td>University of North Carolina at Wilmington</td><td>United States</td><td>34.22498270</td><td>-77.86907744</td><td>77%</td><td>13</td><td>10</td><td>3</td><td>0</td><td>6</td><td>8</td></tr><tr><td>4563b46d42079242f06567b3f2e2f7a80cb3befe</td><td>vadana</td><td>VADANA</td><td><a href="papers/4563b46d42079242f06567b3f2e2f7a80cb3befe.html" target="_blank">VADANA: A dense dataset for facial image analysis</a></td><td><span class="gray">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td>edu</td><td>University of Delaware</td><td>United States</td><td>39.68103280</td><td>-75.75401840</td><td>67%</td><td>15</td><td>10</td><td>5</td><td>0</td><td>5</td><td>10</td></tr><tr><td>2624d84503bc2f8e190e061c5480b6aa4d89277a</td><td>afew_va</td><td>AFEW-VA</td><td><a href="papers/2624d84503bc2f8e190e061c5480b6aa4d89277a.html" target="_blank">AFEW-VA database for valence and arousal estimation in-the-wild</a></td><td><a href="https://ibug.doc.ic.ac.uk/media/uploads/documents/afew-va.pdf" target="_blank">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>18</td><td>9</td><td>9</td><td>0</td><td>12</td><td>5</td></tr><tr><td>6403117f9c005ae81f1e8e6d1302f4a045e3d99d</td><td>alert_airport</td><td>ALERT Airport</td><td><a href="papers/6403117f9c005ae81f1e8e6d1302f4a045e3d99d.html" target="_blank">A Systematic Evaluation and Benchmark for Person Re-Identification: Features, Metrics, and Datasets</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>45%</td><td>20</td><td>9</td><td>11</td><td>0</td><td>9</td><td>11</td></tr><tr><td>8d5998cd984e7cce307da7d46f155f9db99c6590</td><td>chalearn</td><td>ChaLearn</td><td><a href="papers/8d5998cd984e7cce307da7d46f155f9db99c6590.html" target="_blank">ChaLearn looking at people: A review of events and resources</a></td><td><span class="gray">[pdf]</a></td><td>2017 International Joint Conference on Neural Networks (IJCNN)</td><td></td><td></td><td></td><td></td><td></td><td>69%</td><td>13</td><td>9</td><td>4</td><td>1</td><td>8</td><td>4</td></tr><tr><td>a8d0b149c2eadaa02204d3e4356fbc8eccf3b315</td><td>hi4d_adsip</td><td>Hi4D-ADSIP</td><td><a href="papers/a8d0b149c2eadaa02204d3e4356fbc8eccf3b315.html" target="_blank">Hi4D-ADSIP 3-D dynamic facial articulation database</a></td><td><span class="gray">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>15</td><td>9</td><td>6</td><td>1</td><td>4</td><td>11</td></tr><tr><td>bd26dabab576adb6af30484183c9c9c8379bf2e0</td><td>scut_fbp</td><td>SCUT-FBP</td><td><a href="papers/bd26dabab576adb6af30484183c9c9c8379bf2e0.html" target="_blank">SCUT-FBP: A Benchmark Dataset for Facial Beauty Perception</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Systems, Man, and Cybernetics</td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>19</td><td>9</td><td>10</td><td>2</td><td>6</td><td>13</td></tr><tr><td>060820f110a72cbf02c14a6d1085bd6e1d994f6a</td><td>caltech_crp</td><td>Caltech CRP</td><td><a href="papers/060820f110a72cbf02c14a6d1085bd6e1d994f6a.html" target="_blank">Fine-grained classification of pedestrians in video: Benchmark and state of the art</a></td><td><a href="https://arxiv.org/pdf/1605.06177.pdf" target="_blank">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>17</td><td>8</td><td>9</td><td>0</td><td>9</td><td>8</td></tr><tr><td>2b926b3586399d028b46315d7d9fb9d879e4f79c</td><td>frav3d</td><td>FRAV3D</td><td><a href="papers/2b926b3586399d028b46315d7d9fb9d879e4f79c.html" target="_blank">Multimodal 2D, 2.5D & 3D Face Verification</a></td><td><span class="gray">[pdf]</a></td><td>2006 International Conference on Image Processing</td><td>edu</td><td>Universidad Rey Juan Carlos, Spain</td><td>Spain</td><td>40.33586610</td><td>-3.87694320</td><td>57%</td><td>14</td><td>8</td><td>6</td><td>0</td><td>2</td><td>12</td></tr><tr><td>8f02ec0be21461fbcedf51d864f944cfc42c875f</td><td>hda_plus</td><td>HDA+</td><td><a href="papers/8f02ec0be21461fbcedf51d864f944cfc42c875f.html" target="_blank">The HDA+ Data Set for Research on Fully Automated Re-identification Systems</a></td><td><a href="https://pdfs.semanticscholar.org/8f02/ec0be21461fbcedf51d864f944cfc42c875f.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>16</td><td>8</td><td>8</td><td>1</td><td>10</td><td>6</td></tr><tr><td>c570d1247e337f91e555c3be0e8c8a5aba539d9f</td><td>mcgill</td><td>McGill Real World</td><td><a href="papers/c570d1247e337f91e555c3be0e8c8a5aba539d9f.html" target="_blank">Robust semi-automatic head pose labeling for real-world face video sequences</a></td><td><span class="gray">[pdf]</a></td><td>Multimedia Tools and Applications</td><td>edu</td><td>McGill University</td><td>Canada</td><td>45.50397610</td><td>-73.57496870</td><td>44%</td><td>18</td><td>8</td><td>10</td><td>0</td><td>13</td><td>7</td></tr><tr><td>041d3eedf5e45ce5c5229f0181c5c576ed1fafd6</td><td>ucf_selfie</td><td>UCF Selfie</td><td><a href="papers/041d3eedf5e45ce5c5229f0181c5c576ed1fafd6.html" target="_blank">How to Take a Good Selfie?</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>73%</td><td>11</td><td>8</td><td>3</td><td>0</td><td>7</td><td>5</td></tr><tr><td>633c851ebf625ad7abdda2324e9de093cf623141</td><td>appa_real</td><td>APPA-REAL</td><td><a href="papers/633c851ebf625ad7abdda2324e9de093cf623141.html" target="_blank">Apparent and Real Age Estimation in Still Images with Deep Residual Regressors on Appa-Real Database</a></td><td><span class="gray">[pdf]</a></td><td>2017 12th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2017)</td><td></td><td></td><td></td><td></td><td></td><td>70%</td><td>10</td><td>7</td><td>3</td><td>0</td><td>8</td><td>3</td></tr><tr><td>2cd7821fcf5fae53a185624f7eeda007434ae037</td><td>geofaces</td><td>GeoFaces</td><td><a href="papers/2cd7821fcf5fae53a185624f7eeda007434ae037.html" target="_blank">Exploring the geo-dependence of human face appearance</a></td><td><a href="http://cs.uky.edu/~jacobs/papers/islam2014faces.pdf" target="_blank">[pdf]</a></td><td>IEEE Winter Conference on Applications of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>88%</td><td>8</td><td>7</td><td>1</td><td>0</td><td>5</td><td>3</td></tr><tr><td>2cd7821fcf5fae53a185624f7eeda007434ae037</td><td>geofaces</td><td>GeoFaces</td><td><a href="papers/2cd7821fcf5fae53a185624f7eeda007434ae037.html" target="_blank">Exploring the geo-dependence of human face appearance</a></td><td><a href="http://cs.uky.edu/~jacobs/papers/islam2014faces.pdf" target="_blank">[pdf]</a></td><td>IEEE Winter Conference on Applications of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>88%</td><td>8</td><td>7</td><td>1</td><td>0</td><td>5</td><td>3</td></tr><tr><td>7f4040b482d16354d5938c1d1b926b544652bf5b</td><td>nova_emotions</td><td>Novaemötions Dataset</td><td><a href="papers/7f4040b482d16354d5938c1d1b926b544652bf5b.html" target="_blank">Competitive affective gaming: winning with a smile</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Universidade NOVA de Lisboa, Caparica, Portugal</td><td>Portugal</td><td>38.66096400</td><td>-9.20581300</td><td>78%</td><td>9</td><td>7</td><td>2</td><td>0</td><td>5</td><td>4</td></tr><tr><td>4b4106614c1d553365bad75d7866bff0de6056ed</td><td>ufi</td><td>UFI</td><td><a href="papers/4b4106614c1d553365bad75d7866bff0de6056ed.html" target="_blank">Unconstrained Facial Images: Database for Face Recognition Under Real-World Conditions</a></td><td><a href="http://home.zcu.cz/~pkral/papers/kral_micai15.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>12</td><td>6</td><td>6</td><td>0</td><td>4</td><td>6</td></tr><tr><td>22f656d0f8426c84a33a267977f511f127bfd7f3</td><td>expw</td><td>ExpW</td><td><a href="papers/22f656d0f8426c84a33a267977f511f127bfd7f3.html" target="_blank">From Facial Expression Recognition to Interpersonal Relation Prediction</a></td><td><a href="https://arxiv.org/pdf/1609.06426.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>11</td><td>6</td><td>5</td><td>0</td><td>5</td><td>4</td></tr><tr><td>4af89578ac237278be310f7660a408b03f12d603</td><td>geofaces</td><td>GeoFaces</td><td><a href="papers/4af89578ac237278be310f7660a408b03f12d603.html" target="_blank">Large-scale geo-facial image analysis</a></td><td><a href="https://pdfs.semanticscholar.org/3ede/3ed28329bf48fbd06438a69c4f855bef003f.pdf" target="_blank">[pdf]</a></td><td>EURASIP J. Image and Video Processing</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>6</td><td>6</td><td>0</td><td>0</td><td>4</td><td>2</td></tr><tr><td>2d45cfd838016a6e39f6b766ffe85acd649440c7</td><td>mcgill</td><td>McGill Real World</td><td><a href="papers/2d45cfd838016a6e39f6b766ffe85acd649440c7.html" target="_blank">Hierarchical temporal graphical model for head pose estimation and subsequent attribute classification in real-world videos</a></td><td><span class="gray">[pdf]</a></td><td>Computer Vision and Image Understanding</td><td></td><td></td><td></td><td></td><td></td><td>75%</td><td>8</td><td>6</td><td>2</td><td>0</td><td>5</td><td>3</td></tr><tr><td>1a40092b493c6b8840257ab7f96051d1a4dbfeb2</td><td>sports_videos_in_the_wild</td><td>SVW</td><td><a href="papers/1a40092b493c6b8840257ab7f96051d1a4dbfeb2.html" target="_blank">Sports Videos in the Wild (SVW): A video dataset for sports analysis</a></td><td><span class="gray">[pdf]</a></td><td>2015 11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)</td><td></td><td></td><td></td><td></td><td></td><td>86%</td><td>7</td><td>6</td><td>1</td><td>1</td><td>5</td><td>2</td></tr><tr><td>8627f019882b024aef92e4eb9355c499c733e5b7</td><td>used</td><td>USED Social Event Dataset</td><td><a href="papers/8627f019882b024aef92e4eb9355c499c733e5b7.html" target="_blank">USED: a large-scale social event detection dataset</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td>edu</td><td>University of Trento</td><td>Italy</td><td>46.06588360</td><td>11.11598940</td><td>86%</td><td>7</td><td>6</td><td>1</td><td>0</td><td>3</td><td>4</td></tr><tr><td>0d2dd4fc016cb6a517d8fb43a7cc3ff62964832e</td><td>lag</td><td>LAG</td><td><a href="papers/0d2dd4fc016cb6a517d8fb43a7cc3ff62964832e.html" target="_blank">Large age-gap face verification by feature injection in deep networks</a></td><td><a href="https://arxiv.org/pdf/1602.06149.pdf" target="_blank">[pdf]</a></td><td>Pattern Recognition Letters</td><td></td><td></td><td></td><td></td><td></td><td>71%</td><td>7</td><td>5</td><td>2</td><td>0</td><td>3</td><td>4</td></tr><tr><td>9e5378e7b336c89735d3bb15cf67eff96f86d39a</td><td>precarious</td><td>Precarious</td><td><a href="papers/9e5378e7b336c89735d3bb15cf67eff96f86d39a.html" target="_blank">Expecting the Unexpected: Training Detectors for Unusual Pedestrians with Adversarial Imposters</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>36%</td><td>14</td><td>5</td><td>9</td><td>0</td><td>12</td><td>1</td></tr><tr><td>07fcbae86f7a3ad3ea1cf95178459ee9eaf77cb1</td><td>uccs</td><td>UCCS</td><td><a href="papers/07fcbae86f7a3ad3ea1cf95178459ee9eaf77cb1.html" target="_blank">Large scale unconstrained open set face database</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE Sixth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td>company</td><td>Securics Inc., Colorado Springs, CO</td><td>United States</td><td>38.83388160</td><td>-104.82136340</td><td>83%</td><td>6</td><td>5</td><td>1</td><td>0</td><td>4</td><td>2</td></tr><tr><td>d4f1eb008eb80595bcfdac368e23ae9754e1e745</td><td>uccs</td><td>UCCS</td><td><a href="papers/d4f1eb008eb80595bcfdac368e23ae9754e1e745.html" target="_blank">Unconstrained Face Detection and Open-Set Face Recognition Challenge</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE International Joint Conference on Biometrics (IJCB)</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>5</td><td>5</td><td>0</td><td>0</td><td>4</td><td>1</td></tr><tr><td>922e0a51a3b8c67c4c6ac09a577ff674cbd28b34</td><td>v47</td><td>V47</td><td><a href="papers/922e0a51a3b8c67c4c6ac09a577ff674cbd28b34.html" target="_blank">Re-identification of pedestrians with variable occlusion and scale</a></td><td><span class="gray">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td>edu</td><td>Kingston University</td><td>United Kingdom</td><td>51.42930860</td><td>-0.26840440</td><td>56%</td><td>9</td><td>5</td><td>4</td><td>1</td><td>5</td><td>4</td></tr><tr><td>7ebb153704706e457ab57b432793d2b6e5d12592</td><td>vgg_celebs_in_places</td><td>CIP</td><td><a href="papers/7ebb153704706e457ab57b432793d2b6e5d12592.html" target="_blank">Faces in Places: compound query retrieval</a></td><td><a href="https://pdfs.semanticscholar.org/7ebb/153704706e457ab57b432793d2b6e5d12592.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>5</td><td>5</td><td>0</td><td>0</td><td>3</td><td>2</td></tr><tr><td>56ae6d94fc6097ec4ca861f0daa87941d1c10b70</td><td>cmdp</td><td>CMDP</td><td><a href="papers/56ae6d94fc6097ec4ca861f0daa87941d1c10b70.html" target="_blank">Distance Estimation of an Unknown Person from a Portrait</a></td><td><a href="http://authors.library.caltech.edu/49084/13/FaceDistanceEstimation_RONCHI.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>44%</td><td>9</td><td>4</td><td>5</td><td>0</td><td>6</td><td>3</td></tr><tr><td>563c940054e4b456661762c1ab858e6f730c3159</td><td>data_61</td><td>Data61 Pedestrian</td><td><a href="papers/563c940054e4b456661762c1ab858e6f730c3159.html" target="_blank">A Multi-modal Graphical Model for Scene Analysis</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE Winter Conference on Applications of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>8</td><td>4</td><td>4</td><td>0</td><td>5</td><td>3</td></tr><tr><td>287ddcb3db5562235d83aee318f318b8d5e43fb1</td><td>erce</td><td>ERCe</td><td><a href="papers/287ddcb3db5562235d83aee318f318b8d5e43fb1.html" target="_blank">Learning from Multiple Sources for Video Summarisation</a></td><td><a href="https://arxiv.org/pdf/1501.03069.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>7</td><td>4</td><td>3</td><td>0</td><td>4</td><td>3</td></tr><tr><td>dd65f71dac86e36eecbd3ed225d016c3336b4a13</td><td>families_in_the_wild</td><td>FIW</td><td><a href="papers/dd65f71dac86e36eecbd3ed225d016c3336b4a13.html" target="_blank">Visual Kinship Recognition of Families in the Wild</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td>edu</td><td>University of Massachusetts Dartmouth</td><td>United States</td><td>41.62772475</td><td>-71.00724501</td><td>80%</td><td>5</td><td>4</td><td>1</td><td>0</td><td>2</td><td>3</td></tr><tr><td>137aa2f891d474fce1e7a1d1e9b3aefe21e22b34</td><td>hrt_transgender</td><td>HRT Transgender</td><td><a href="papers/137aa2f891d474fce1e7a1d1e9b3aefe21e22b34.html" target="_blank">Is the eye region more reliable than the face? A preliminary study of face-based recognition on a transgender dataset</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE Sixth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>7</td><td>4</td><td>3</td><td>1</td><td>3</td><td>5</td></tr><tr><td>9cc8cf0c7d7fa7607659921b6ff657e17e135ecc</td><td>mafa</td><td>MAsked FAces</td><td><a href="papers/9cc8cf0c7d7fa7607659921b6ff657e17e135ecc.html" target="_blank">Detecting Masked Faces in the Wild with LLE-CNNs</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>80%</td><td>5</td><td>4</td><td>1</td><td>0</td><td>4</td><td>1</td></tr><tr><td>c72a2ea819df9b0e8cd267eebcc6528b8741e03d</td><td>megaage</td><td>MegaAge</td><td><a href="papers/c72a2ea819df9b0e8cd267eebcc6528b8741e03d.html" target="_blank">Quantifying Facial Age by Posterior of Age Comparisons</a></td><td><a href="https://arxiv.org/pdf/1708.09687.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>4</td><td>4</td><td>0</td><td>0</td><td>4</td><td>0</td></tr><tr><td>23e824d1dfc33f3780dd18076284f07bd99f1c43</td><td>mifs</td><td>MIFS</td><td><a href="papers/23e824d1dfc33f3780dd18076284f07bd99f1c43.html" target="_blank">Spoofing faces using makeup: An investigative study</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE International Conference on Identity, Security and Behavior Analysis (ISBA)</td><td>edu</td><td>INRIA Méditerranée</td><td>France</td><td>43.61581310</td><td>7.06838000</td><td>67%</td><td>6</td><td>4</td><td>2</td><td>0</td><td>1</td><td>5</td></tr><tr><td>22909dd19a0ec3b6065334cb5be5392cb24d839d</td><td>pets</td><td>PETS 2017</td><td><a href="papers/22909dd19a0ec3b6065334cb5be5392cb24d839d.html" target="_blank">PETS 2017: Dataset and Challenge</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td></td><td></td><td></td><td></td><td></td><td>44%</td><td>9</td><td>4</td><td>5</td><td>0</td><td>1</td><td>8</td></tr><tr><td>54983972aafc8e149259d913524581357b0f91c3</td><td>reseed</td><td>ReSEED</td><td><a href="papers/54983972aafc8e149259d913524581357b0f91c3.html" target="_blank">ReSEED: social event dEtection dataset</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>67%</td><td>6</td><td>4</td><td>2</td><td>1</td><td>1</td><td>5</td></tr><tr><td>287ddcb3db5562235d83aee318f318b8d5e43fb1</td><td>tisi</td><td>Times Square Intersection</td><td><a href="papers/287ddcb3db5562235d83aee318f318b8d5e43fb1.html" target="_blank">Learning from Multiple Sources for Video Summarisation</a></td><td><a href="https://arxiv.org/pdf/1501.03069.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>7</td><td>4</td><td>3</td><td>0</td><td>4</td><td>3</td></tr><tr><td>9e31e77f9543ab42474ba4e9330676e18c242e72</td><td>imdb_face</td><td>IMDb Face</td><td><a href="papers/9e31e77f9543ab42474ba4e9330676e18c242e72.html" target="_blank">The Devil of Face Recognition is in the Noise</a></td><td><a href="https://arxiv.org/pdf/1807.11649.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Nanyang Technological University</td><td>Singapore</td><td>1.34841040</td><td>103.68297965</td><td>50%</td><td>6</td><td>3</td><td>3</td><td>0</td><td>4</td><td>1</td></tr><tr><td>a7fe834a0af614ce6b50dc093132b031dd9a856b</td><td>market_1501</td><td>Market 1501</td><td><a href="papers/a7fe834a0af614ce6b50dc093132b031dd9a856b.html" target="_blank">Orientation Driven Bag of Appearances for Person Re-identification</a></td><td><a href="https://arxiv.org/pdf/1605.02464.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>7</td><td>3</td><td>4</td><td>0</td><td>4</td><td>4</td></tr><tr><td>578d4ad74818086bb64f182f72e2c8bd31e3d426</td><td>mr2</td><td>MR2</td><td><a href="papers/578d4ad74818086bb64f182f72e2c8bd31e3d426.html" target="_blank">The MR2: A multi-racial, mega-resolution database of facial stimuli.</a></td><td><a href="http://www.mpmlab.org/The%20MR2%20face%20database.pdf" target="_blank">[pdf]</a></td><td>Behavior research methods</td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>7</td><td>3</td><td>4</td><td>0</td><td>7</td><td>0</td></tr><tr><td>ad01687649d95cd5b56d7399a9603c4b8e2217d7</td><td>mrp_drone</td><td>MRP Drone</td><td><a href="papers/ad01687649d95cd5b56d7399a9603c4b8e2217d7.html" target="_blank">Investigating Open-World Person Re-identification Using a Drone</a></td><td><a href="https://pdfs.semanticscholar.org/ad01/687649d95cd5b56d7399a9603c4b8e2217d7.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>7</td><td>3</td><td>4</td><td>1</td><td>5</td><td>2</td></tr><tr><td>a7fe834a0af614ce6b50dc093132b031dd9a856b</td><td>pku_reid</td><td>PKU-Reid</td><td><a href="papers/a7fe834a0af614ce6b50dc093132b031dd9a856b.html" target="_blank">Orientation Driven Bag of Appearances for Person Re-identification</a></td><td><a href="https://arxiv.org/pdf/1605.02464.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>7</td><td>3</td><td>4</td><td>0</td><td>4</td><td>4</td></tr><tr><td>3531332efe19be21e7401ba1f04570a142617236</td><td>ufdd</td><td>UFDD</td><td><a href="papers/3531332efe19be21e7401ba1f04570a142617236.html" target="_blank">Pushing the Limits of Unconstrained Face Detection: a Challenge Dataset and Baseline Results</a></td><td><a href="https://arxiv.org/pdf/1804.10275.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>75%</td><td>4</td><td>3</td><td>1</td><td>0</td><td>4</td><td>0</td></tr><tr><td>17b46e2dad927836c689d6787ddb3387c6159ece</td><td>geofaces</td><td>GeoFaces</td><td><a href="papers/17b46e2dad927836c689d6787ddb3387c6159ece.html" target="_blank">GeoFaceExplorer: exploring the geo-dependence of facial attributes</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>2</td><td>2</td><td>0</td><td>0</td><td>1</td><td>1</td></tr><tr><td>e58dd160a76349d46f881bd6ddbc2921f08d1050</td><td>gfw</td><td>Grouping Face in the Wild</td><td><a href="papers/e58dd160a76349d46f881bd6ddbc2921f08d1050.html" target="_blank">Merge or Not? Learning to Group Faces via Imitation Learning</a></td><td><a href="https://arxiv.org/pdf/1707.03986.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>2</td><td>2</td><td>0</td><td>0</td><td>2</td><td>0</td></tr><tr><td>4eab317b5ac436a949849ed286baa3de2a541eef</td><td>laofiw</td><td>LAOFIW</td><td><a href="papers/4eab317b5ac436a949849ed286baa3de2a541eef.html" target="_blank">Turning a Blind Eye: Explicit Removal of Biases and Variation from Deep Neural Network Embeddings</a></td><td><a href="https://arxiv.org/pdf/1809.02169.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>2</td><td>2</td><td>0</td><td>0</td><td>2</td><td>0</td></tr><tr><td>f6c8d5e35d7e4d60a0104f233ac1a3ab757da53f</td><td>pku_reid</td><td>PKU-Reid</td><td><a href="papers/f6c8d5e35d7e4d60a0104f233ac1a3ab757da53f.html" target="_blank">Swiss-System Based Cascade Ranking for Gait-Based Person Re-Identification</a></td><td><a href="https://pdfs.semanticscholar.org/f6c8/d5e35d7e4d60a0104f233ac1a3ab757da53f.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>4</td><td>2</td><td>2</td><td>0</td><td>1</td><td>2</td></tr><tr><td>4d4bb462c9f1d4e4ab1e4aa6a75cc0bc71b38461</td><td>3dddb_unconstrained</td><td>3D Dynamic</td><td><a href="papers/4d4bb462c9f1d4e4ab1e4aa6a75cc0bc71b38461.html" target="_blank">A 3D Dynamic Database for Unconstrained Face Recognition</a></td><td><a href="https://pdfs.semanticscholar.org/4d4b/b462c9f1d4e4ab1e4aa6a75cc0bc71b38461.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>2</td><td>1</td><td>1</td><td>0</td><td>1</td><td>1</td></tr><tr><td>a40f9bfd3c45658ee8da70e1f2dfbe1f0c744d43</td><td>4dfab</td><td>4DFAB</td><td><a href="papers/a40f9bfd3c45658ee8da70e1f2dfbe1f0c744d43.html" target="_blank">4DFAB: A Large Scale 4D Facial Expression Database for Biometric Applications</a></td><td><a href="https://arxiv.org/pdf/1712.01443.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>25%</td><td>4</td><td>1</td><td>3</td><td>0</td><td>2</td><td>2</td></tr><tr><td>65355cbb581a219bd7461d48b3afd115263ea760</td><td>complex_activities</td><td>Ongoing Complex Activities</td><td><a href="papers/65355cbb581a219bd7461d48b3afd115263ea760.html" target="_blank">Recognition of ongoing complex activities by sequence prediction over a hierarchical label space</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Winter Conference on Applications of Computer Vision (WACV)</td><td></td><td></td><td></td><td></td><td></td><td>33%</td><td>3</td><td>1</td><td>2</td><td>0</td><td>3</td><td>0</td></tr><tr><td>55c40cbcf49a0225e72d911d762c27bb1c2d14aa</td><td>ifad</td><td>IFAD</td><td><a href="papers/55c40cbcf49a0225e72d911d762c27bb1c2d14aa.html" target="_blank">Indian Face Age Database: A Database for Face Recognition with Age Variation</a></td><td><a href="https://pdfs.semanticscholar.org/025e/4cf3fd3fdeced91e9373b56ee14af7ca432c.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>2</td><td>1</td><td>1</td><td>0</td><td>2</td><td>0</td></tr><tr><td>c06b13d0ec3f5c43e2782cd22542588e233733c3</td><td>nova_emotions</td><td>Novaemötions Dataset</td><td><a href="papers/c06b13d0ec3f5c43e2782cd22542588e233733c3.html" target="_blank">Crowdsourcing facial expressions for affective-interaction</a></td><td><span class="gray">[pdf]</a></td><td>Computer Vision and Image Understanding</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>1</td><td>1</td><td>0</td><td>0</td><td>1</td><td>0</td></tr><tr><td>2306b2a8fba28539306052764a77a0d0f5d1236a</td><td>qmul_surv_face</td><td>QMUL-SurvFace</td><td><a href="papers/2306b2a8fba28539306052764a77a0d0f5d1236a.html" target="_blank">Surveillance Face Recognition Challenge</a></td><td><a href="https://arxiv.org/pdf/1804.09691.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td>edu</td><td>Queen Mary University of London</td><td>United Kingdom</td><td>51.52472720</td><td>-0.03931035</td><td>100%</td><td>1</td><td>1</td><td>0</td><td>0</td><td>1</td><td>0</td></tr><tr><td>d3f5a1848b0028d8ab51d0b0673732cad2e3c8c9</td><td>stair_actions</td><td>STAIR Action</td><td><a href="papers/d3f5a1848b0028d8ab51d0b0673732cad2e3c8c9.html" target="_blank">STAIR Actions: A Video Dataset of Everyday Home Actions</a></td><td><a href="https://arxiv.org/pdf/1804.04326.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>1</td><td>1</td><td>0</td><td>0</td><td>1</td><td>0</td></tr><tr><td>5ad4e9f947c1653c247d418f05dad758a3f9277b</td><td>wlfdb</td><td>WLFDB</td><td><a href="papers/5ad4e9f947c1653c247d418f05dad758a3f9277b.html" target="_blank">WLFDB : Weakly Labeled Face Databases</a></td><td><a href="https://pdfs.semanticscholar.org/5ad4/e9f947c1653c247d418f05dad758a3f9277b.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>1</td><td>1</td><td>0</td><td>0</td><td>0</td><td>1</td></tr><tr><td>7b92d1e53cc87f7a4256695de590098a2f30261e</td><td>appa_real</td><td>APPA-REAL</td><td><a href="papers/7b92d1e53cc87f7a4256695de590098a2f30261e.html" target="_blank">From Apparent to Real Age: Gender, Age, Ethnic, Makeup, and Expression Bias Analysis in Real Age Estimation</a></td><td><span class="gray">[pdf]</a></td><td>2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>1dc35905a1deff8bc74688f2d7e2f48fd2273275</td><td>caltech_pedestrians</td><td>Caltech Pedestrians</td><td><a href="papers/1dc35905a1deff8bc74688f2d7e2f48fd2273275.html" target="_blank">Pedestrian detection: A benchmark</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>15e1af79939dbf90790b03d8aa02477783fb1d0f</td><td>duke_mtmc</td><td>Duke MTMC</td><td><a href="papers/15e1af79939dbf90790b03d8aa02477783fb1d0f.html" target="_blank">Unlabeled Samples Generated by GAN Improve the Person Re-identification Baseline in Vitro</a></td><td><a href="https://arxiv.org/pdf/1701.07717.pdf" target="_blank">[pdf]</a></td><td>2017 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>72a155c987816ae81c858fddbd6beab656d86220</td><td>europersons</td><td>EuroCity Persons</td><td><a href="papers/72a155c987816ae81c858fddbd6beab656d86220.html" target="_blank">The EuroCity Persons Dataset: A Novel Benchmark for Object Detection</a></td><td><a href="https://arxiv.org/pdf/1805.07193.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>2</td><td>0</td><td>2</td><td>0</td><td>2</td><td>0</td></tr><tr><td>670637d0303a863c1548d5b19f705860a23e285c</td><td>face_tracer</td><td>FaceTracer</td><td><a href="papers/670637d0303a863c1548d5b19f705860a23e285c.html" target="_blank">Face swapping: automatically replacing faces in photographs</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>12ad3b5bbbf407f8e54ea692c07633d1a867c566</td><td>graz</td><td>Graz Pedestrian</td><td><a href="papers/12ad3b5bbbf407f8e54ea692c07633d1a867c566.html" target="_blank">Object recognition using segmentation for feature detection</a></td><td><span class="gray">[pdf]</a></td><td>Proceedings of the 17th International Conference on Pattern Recognition, 2004. ICPR 2004.</td><td>edu</td><td>Inst. of Comput. Sci., Univ. of Leoben, Austria</td><td>Austria</td><td>47.38473720</td><td>15.09302010</td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>bd88bb2e4f351352d88ee7375af834360e223498</td><td>hda_plus</td><td>HDA+</td><td><a href="papers/bd88bb2e4f351352d88ee7375af834360e223498.html" target="_blank">HDA dataset-DRAFT 1 A Multi-camera video data set for research on High-Definition surveillance</a></td><td><a href="https://pdfs.semanticscholar.org/bd88/bb2e4f351352d88ee7375af834360e223498.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>2</td><td>0</td><td>2</td><td>0</td><td>1</td><td>2</td></tr><tr><td>0ab7cff2ccda7269b73ff6efd9d37e1318f7db25</td><td>ibm_dif</td><td>IBM Diversity in Faces</td><td><a href="papers/0ab7cff2ccda7269b73ff6efd9d37e1318f7db25.html" target="_blank">Facial Coding Scheme Reference 1 Craniofacial Distances</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>066d71fcd997033dce4ca58df924397dfe0b5fd1</td><td>ifdb</td><td>IFDB</td><td><a href="papers/066d71fcd997033dce4ca58df924397dfe0b5fd1.html" target="_blank">Iranian Face Database and Evaluation with a New Detection Algorithm</a></td><td><a href="https://pdfs.semanticscholar.org/066d/71fcd997033dce4ca58df924397dfe0b5fd1.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>21d9d0deed16f0ad62a4865e9acf0686f4f15492</td><td>images_of_groups</td><td>Images of Groups</td><td><a href="papers/21d9d0deed16f0ad62a4865e9acf0686f4f15492.html" target="_blank">Understanding images of groups of people</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>Carnegie Mellon University</td><td>United States</td><td>40.44416190</td><td>-79.94272826</td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>140438a77a771a8fb656b39a78ff488066eb6b50</td><td>lfpw</td><td>LFPW</td><td><a href="papers/140438a77a771a8fb656b39a78ff488066eb6b50.html" target="_blank">Localizing Parts of Faces Using a Consensus of Exemplars</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>079a0a3bf5200994e1f972b1b9197bf2f90e87d4</td><td>mit_cbcl</td><td>MIT CBCL</td><td><a href="papers/079a0a3bf5200994e1f972b1b9197bf2f90e87d4.html" target="_blank">Component-Based Face Recognition with 3D Morphable Models</a></td><td><span class="gray">[pdf]</a></td><td>2004 Conference on Computer Vision and Pattern Recognition Workshop</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>2fda164863a06a92d3a910b96eef927269aeb730</td><td>names_and_faces</td><td>News Dataset</td><td><a href="papers/2fda164863a06a92d3a910b96eef927269aeb730.html" target="_blank">Names and faces in the news</a></td><td><span class="gray">[pdf]</a></td><td>Proceedings of the 2004 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2004. CVPR 2004.</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>d3200d49a19a4a4e4e9745ee39649b65d80c834b</td><td>scut_head</td><td>SCUT HEAD</td><td><a href="papers/d3200d49a19a4a4e4e9745ee39649b65d80c834b.html" target="_blank">Detecting Heads using Feature Refine Net and Cascaded Multi-scale Architecture</a></td><td><a href="https://arxiv.org/pdf/1803.09256.pdf" target="_blank">[pdf]</a></td><td>2018 24th International Conference on Pattern Recognition (ICPR)</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>8990cdce3f917dad622e43e033db686b354d057c</td><td>tiny_faces</td><td>TinyFace</td><td><a href="papers/8990cdce3f917dad622e43e033db686b354d057c.html" target="_blank">Low-Resolution Face Recognition</a></td><td><a href="https://arxiv.org/pdf/1811.08965.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>6ad5a38df8dd4cdddd74f31996ce096d41219f72</td><td>tud_brussels</td><td>TUD-Brussels</td><td><a href="papers/6ad5a38df8dd4cdddd74f31996ce096d41219f72.html" target="_blank">Multi-cue onboard pedestrian detection</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>6ad5a38df8dd4cdddd74f31996ce096d41219f72</td><td>tud_motionpairs</td><td>TUD-Motionparis</td><td><a href="papers/6ad5a38df8dd4cdddd74f31996ce096d41219f72.html" target="_blank">Multi-cue onboard pedestrian detection</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>01959ef569f74c286956024866c1d107099199f7</td><td>vqa</td><td>VQA</td><td><a href="papers/01959ef569f74c286956024866c1d107099199f7.html" target="_blank">VQA: Visual Question Answering</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>9b9bf5e623cb8af7407d2d2d857bc3f1b531c182</td><td>who_goes_there</td><td>WGT</td><td><a href="papers/9b9bf5e623cb8af7407d2d2d857bc3f1b531c182.html" target="_blank">Who goes there?: approaches to mapping facial appearance diversity</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td>edu</td><td>University of Kentucky</td><td>United States</td><td>38.03337420</td><td>-84.50177580</td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>36bccfb2ad847096bc76777e544f305813cd8f5b</td><td>wildtrack</td><td>WildTrack</td><td><a href="papers/36bccfb2ad847096bc76777e544f305813cd8f5b.html" target="_blank">WILDTRACK: A Multi-camera HD Dataset for Dense Unscripted Pedestrian Detection</a></td><td><a href="http://openaccess.thecvf.com/content_cvpr_2018/Supplemental/1562-supp.pdf" target="_blank">[pdf]</a></td><td>2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr></table></body></html>
\ No newline at end of file +<!doctype html><html><head><meta charset='utf-8'><title>Coverage</title><link rel='stylesheet' href='reports.css'></head><body><h2>Coverage</h2><table border='1' cellpadding='3' cellspacing='3'><th>Paper ID</th><th>Megapixels Key</th><th>Megapixels Name</th><th>Report Link</th><th>PDF Link</th><th>Journal</th><th>Type</th><th>Address</th><th>Country</th><th>Lat</th><th>Lng</th><th>Coverage</th><th>Total Citations</th><th>Geocoded Citations</th><th>Unknown Citations</th><th>Empty Citations</th><th>With PDF</th><th>With DOI</th><tr><td>0e986f51fe45b00633de9fd0c94d082d2be51406</td><td>afw</td><td>AFW</td><td><a href="papers/0e986f51fe45b00633de9fd0c94d082d2be51406.html" target="_blank">Face detection, pose estimation, and landmark localization in the wild</a></td><td><span class="gray">[pdf]</a></td><td>2012 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>71%</td><td>999</td><td>709</td><td>290</td><td>35</td><td>576</td><td>422</td></tr><tr><td>162ea969d1929ed180cc6de9f0bf116993ff6e06</td><td>vgg_faces</td><td>VGG Face</td><td><a href="papers/162ea969d1929ed180cc6de9f0bf116993ff6e06.html" target="_blank">Deep Face Recognition</a></td><td><a href="https://pdfs.semanticscholar.org/f372/ab9b3270d4e4f6a0258c83c2736c3a5c0454.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>65%</td><td>999</td><td>646</td><td>353</td><td>48</td><td>558</td><td>429</td></tr><tr><td>b5f2846a506fc417e7da43f6a7679146d99c5e96</td><td>ucf_101</td><td>UCF101</td><td><a href="papers/b5f2846a506fc417e7da43f6a7679146d99c5e96.html" target="_blank">UCF101: A Dataset of 101 Human Actions Classes From Videos in The Wild</a></td><td><a href="https://arxiv.org/pdf/1212.0402.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>999</td><td>643</td><td>356</td><td>56</td><td>628</td><td>362</td></tr><tr><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td><td>lfw</td><td>LFW</td><td><a href="papers/370b5757a5379b15e30d619e4d3fb9e8e13f3256.html" target="_blank">Labeled Faces in the Wild: A Database forStudying Face Recognition in Unconstrained Environments</a></td><td><a href="https://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>63%</td><td>999</td><td>632</td><td>367</td><td>59</td><td>598</td><td>382</td></tr><tr><td>5e0f8c355a37a5a89351c02f174e7a5ddcb98683</td><td>coco</td><td>COCO</td><td><a href="papers/5e0f8c355a37a5a89351c02f174e7a5ddcb98683.html" target="_blank">Microsoft COCO: Common Objects in Context</a></td><td><a href="https://arxiv.org/pdf/1405.0312.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>999</td><td>608</td><td>391</td><td>25</td><td>722</td><td>259</td></tr><tr><td>4d9a02d080636e9666c4d1cc438b9893391ec6c7</td><td>cohn_kanade_plus</td><td>CK+</td><td><a href="papers/4d9a02d080636e9666c4d1cc438b9893391ec6c7.html" target="_blank">The Extended Cohn-Kanade Dataset (CK+): A complete dataset for action unit and emotion-specified expression</a></td><td><span class="gray">[pdf]</a></td><td>2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition - Workshops</td><td>edu</td><td>University of Pittsburgh</td><td>United States</td><td>40.44415295</td><td>-79.96243993</td><td>61%</td><td>999</td><td>607</td><td>392</td><td>57</td><td>470</td><td>518</td></tr><tr><td>0ee1916a0cb2dc7d3add086b5f1092c3d4beb38a</td><td>voc</td><td>VOC</td><td><a href="papers/0ee1916a0cb2dc7d3add086b5f1092c3d4beb38a.html" target="_blank">The Pascal Visual Object Classes (VOC) Challenge</a></td><td><a href="http://eprints.pascal-network.org/archive/00006187/01/PascalVOC_IJCV2009.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td>company</td><td>Microsoft</td><td>United States</td><td>47.64233180</td><td>-122.13693020</td><td>61%</td><td>999</td><td>607</td><td>391</td><td>28</td><td>557</td><td>422</td></tr><tr><td>026e3363b7f76b51cc711886597a44d5f1fd1de2</td><td>kitti</td><td>KITTI</td><td><a href="papers/026e3363b7f76b51cc711886597a44d5f1fd1de2.html" target="_blank">Vision meets robotics: The KITTI dataset</a></td><td><a href="https://pdfs.semanticscholar.org/026e/3363b7f76b51cc711886597a44d5f1fd1de2.pdf" target="_blank">[pdf]</a></td><td>I. J. Robotics Res.</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>999</td><td>602</td><td>397</td><td>36</td><td>553</td><td>462</td></tr><tr><td>f72f6a45ee240cc99296a287ff725aaa7e7ebb35</td><td>caltech_pedestrians</td><td>Caltech Pedestrians</td><td><a href="papers/f72f6a45ee240cc99296a287ff725aaa7e7ebb35.html" target="_blank">Pedestrian Detection: An Evaluation of the State of the Art</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td>edu</td><td>California Institute of Technology</td><td>United States</td><td>34.13710185</td><td>-118.12527487</td><td>60%</td><td>999</td><td>600</td><td>399</td><td>69</td><td>527</td><td>466</td></tr><tr><td>759a3b3821d9f0e08e0b0a62c8b693230afc3f8d</td><td>pubfig</td><td>PubFig</td><td><a href="papers/759a3b3821d9f0e08e0b0a62c8b693230afc3f8d.html" target="_blank">Attribute and simile classifiers for face verification</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE 12th International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>914</td><td>585</td><td>329</td><td>47</td><td>586</td><td>316</td></tr><tr><td>6d96f946aaabc734af7fe3fc4454cf8547fcd5ed</td><td>ar_facedb</td><td>AR Face</td><td><a href="papers/6d96f946aaabc734af7fe3fc4454cf8547fcd5ed.html" target="_blank">The AR face database</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>999</td><td>579</td><td>420</td><td>58</td><td>458</td><td>530</td></tr><tr><td>31b58ced31f22eab10bd3ee2d9174e7c14c27c01</td><td>tiny_images</td><td>Tiny Images</td><td><a href="papers/31b58ced31f22eab10bd3ee2d9174e7c14c27c01.html" target="_blank">80 Million Tiny Images: A Large Data Set for Nonparametric Object and Scene Recognition</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>999</td><td>574</td><td>425</td><td>89</td><td>644</td><td>337</td></tr><tr><td>10d6b12fa07c7c8d6c8c3f42c7f1c061c131d4c5</td><td>inria_person</td><td>INRIA Pedestrian</td><td><a href="papers/10d6b12fa07c7c8d6c8c3f42c7f1c061c131d4c5.html" target="_blank">Histograms of oriented gradients for human detection</a></td><td><span class="gray">[pdf]</a></td><td>2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)</td><td>edu</td><td>INRIA Rhone-Alps, Montbonnot, France</td><td>France</td><td>45.21788600</td><td>5.80736900</td><td>57%</td><td>999</td><td>573</td><td>426</td><td>41</td><td>419</td><td>509</td></tr><tr><td>18ae7c9a4bbc832b8b14bc4122070d7939f5e00e</td><td>frgc</td><td>FRGC</td><td><a href="papers/18ae7c9a4bbc832b8b14bc4122070d7939f5e00e.html" target="_blank">Overview of the face recognition grand challenge</a></td><td><span class="gray">[pdf]</a></td><td>2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)</td><td>edu</td><td>NIST</td><td>United States</td><td>39.14004000</td><td>-77.21850600</td><td>57%</td><td>999</td><td>568</td><td>430</td><td>85</td><td>549</td><td>442</td></tr><tr><td>18c72175ddbb7d5956d180b65a96005c100f6014</td><td>yale_faces</td><td>YaleFaces</td><td><a href="papers/18c72175ddbb7d5956d180b65a96005c100f6014.html" target="_blank">From Few to Many: Illumination Cone Models for Face Recognition under Variable Lighting and Pose</a></td><td><a href="http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.147.1487&rep=rep1&type=pdf" target="_blank">[pdf]</a></td><td>IEEE Trans. Pattern Anal. Mach. Intell.</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>999</td><td>561</td><td>438</td><td>66</td><td>498</td><td>462</td></tr><tr><td>2ad0ee93d029e790ebb50574f403a09854b65b7e</td><td>yale_faces</td><td>YaleFaces</td><td><a href="papers/2ad0ee93d029e790ebb50574f403a09854b65b7e.html" target="_blank">Acquiring linear subspaces for face recognition under variable lighting</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>999</td><td>554</td><td>445</td><td>94</td><td>495</td><td>491</td></tr><tr><td>23fc83c8cfff14a16df7ca497661264fc54ed746</td><td>cohn_kanade</td><td>CK</td><td><a href="papers/23fc83c8cfff14a16df7ca497661264fc54ed746.html" target="_blank">Comprehensive Database for Facial Expression Analysis</a></td><td><a href="https://pdfs.semanticscholar.org/23fc/83c8cfff14a16df7ca497661264fc54ed746.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>999</td><td>553</td><td>446</td><td>69</td><td>540</td><td>439</td></tr><tr><td>b62628ac06bbac998a3ab825324a41a11bc3a988</td><td>m2vtsdb_extended</td><td>xm2vtsdb</td><td><a href="papers/b62628ac06bbac998a3ab825324a41a11bc3a988.html" target="_blank">XM2VTSDB : The extended M2VTS database</a></td><td><a href="https://pdfs.semanticscholar.org/b626/28ac06bbac998a3ab825324a41a11bc3a988.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>864</td><td>539</td><td>325</td><td>37</td><td>493</td><td>404</td></tr><tr><td>6424b69f3ff4d35249c0bb7ef912fbc2c86f4ff4</td><td>celeba</td><td>CelebA</td><td><a href="papers/6424b69f3ff4d35249c0bb7ef912fbc2c86f4ff4.html" target="_blank">Deep Learning Face Attributes in the Wild</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td>edu</td><td>Chinese University of Hong Kong</td><td>China</td><td>22.41626320</td><td>114.21093180</td><td>57%</td><td>919</td><td>526</td><td>392</td><td>61</td><td>694</td><td>201</td></tr><tr><td>dc8b25e35a3acb812beb499844734081722319b4</td><td>feret</td><td>FERET</td><td><a href="papers/dc8b25e35a3acb812beb499844734081722319b4.html" target="_blank">The FERET database and evaluation procedure for face-recognition algorithms</a></td><td><a href="http://biometrics.nist.gov/cs_links/face/frvt/feret/FERET_Database_evaluation_procedure.pdf" target="_blank">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>999</td><td>521</td><td>478</td><td>103</td><td>591</td><td>421</td></tr><tr><td>45c31cde87258414f33412b3b12fc5bec7cb3ba9</td><td>jaffe</td><td>JAFFE</td><td><a href="papers/45c31cde87258414f33412b3b12fc5bec7cb3ba9.html" target="_blank">Coding Facial Expressions with Gabor Wavelets</a></td><td><a href="http://physics.lbl.gov/patrecog/images/Facerecog_gabor.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>899</td><td>508</td><td>391</td><td>51</td><td>431</td><td>451</td></tr><tr><td>55206f0b5f57ce17358999145506cd01e570358c</td><td>orl</td><td>ORL</td><td><a href="papers/55206f0b5f57ce17358999145506cd01e570358c.html" target="_blank">Parameterisation of a stochastic model for human face identification</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>999</td><td>501</td><td>498</td><td>94</td><td>543</td><td>427</td></tr><tr><td>4d423acc78273b75134e2afd1777ba6d3a398973</td><td>cmu_pie</td><td>CMU PIE</td><td><a href="papers/4d423acc78273b75134e2afd1777ba6d3a398973.html" target="_blank">The CMU Pose, Illumination, and Expression (PIE) Database</a></td><td><a href="http://www.comp.nus.edu.sg/~tsim/piedb.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>59%</td><td>760</td><td>449</td><td>310</td><td>49</td><td>404</td><td>345</td></tr><tr><td>4d423acc78273b75134e2afd1777ba6d3a398973</td><td>multi_pie</td><td>MULTIPIE</td><td><a href="papers/4d423acc78273b75134e2afd1777ba6d3a398973.html" target="_blank">The CMU Pose, Illumination, and Expression (PIE) Database</a></td><td><a href="http://www.comp.nus.edu.sg/~tsim/piedb.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>59%</td><td>760</td><td>449</td><td>310</td><td>49</td><td>404</td><td>345</td></tr><tr><td>2830fb5282de23d7784b4b4bc37065d27839a412</td><td>h3d</td><td>H3D</td><td><a href="papers/2830fb5282de23d7784b4b4bc37065d27839a412.html" target="_blank">Poselets: Body part detectors trained using 3D human pose annotations</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE 12th International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>716</td><td>415</td><td>301</td><td>59</td><td>492</td><td>222</td></tr><tr><td>6273b3491e94ea4dd1ce42b791d77bdc96ee73a8</td><td>viper</td><td>VIPeR</td><td><a href="papers/6273b3491e94ea4dd1ce42b791d77bdc96ee73a8.html" target="_blank">Evaluating Appearance Models for Recognition, Reacquisition, and Tracking</a></td><td><a href="https://pdfs.semanticscholar.org/7847/b1fbccadb780b655e72c66d3f9e93ddb880c.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>edu</td><td>University of California, Santa Cruz</td><td>United States</td><td>36.99158470</td><td>-122.05827710</td><td>66%</td><td>624</td><td>414</td><td>210</td><td>33</td><td>342</td><td>276</td></tr><tr><td>6bd36e9fd0ef20a3074e1430a6cc601e6d407fc3</td><td>cuhk_campus_03</td><td>CUHK03 Campus</td><td><a href="papers/6bd36e9fd0ef20a3074e1430a6cc601e6d407fc3.html" target="_blank">DeepReID: Deep Filter Pairing Neural Network for Person Re-identification</a></td><td><span class="gray">[pdf]</a></td><td>2014 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>73%</td><td>568</td><td>412</td><td>156</td><td>19</td><td>320</td><td>235</td></tr><tr><td>2258e01865367018ed6f4262c880df85b94959f8</td><td>mot</td><td>MOT</td><td><a href="papers/2258e01865367018ed6f4262c880df85b94959f8.html" target="_blank">Evaluating Multiple Object Tracking Performance: The CLEAR MOT Metrics</a></td><td><a href="https://cvhci.anthropomatik.kit.edu/images/stories/msmmi/papers/eurasip2008.pdf" target="_blank">[pdf]</a></td><td>EURASIP J. Image and Video Processing</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>632</td><td>366</td><td>264</td><td>44</td><td>358</td><td>264</td></tr><tr><td>4308bd8c28e37e2ed9a3fcfe74d5436cce34b410</td><td>market_1501</td><td>Market 1501</td><td><a href="papers/4308bd8c28e37e2ed9a3fcfe74d5436cce34b410.html" target="_blank">Scalable Person Re-identification: A Benchmark</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td>company</td><td>Microsoft</td><td>United States</td><td>47.64233180</td><td>-122.13693020</td><td>77%</td><td>460</td><td>355</td><td>105</td><td>9</td><td>263</td><td>185</td></tr><tr><td>853bd61bc48a431b9b1c7cab10c603830c488e39</td><td>casia_webface</td><td>CASIA Webface</td><td><a href="papers/853bd61bc48a431b9b1c7cab10c603830c488e39.html" target="_blank">Learning Face Representation from Scratch</a></td><td><a href="https://arxiv.org/pdf/1411.7923.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td>edu</td><td>Chinese Academy of Sciences</td><td>China</td><td>40.00447950</td><td>116.37023800</td><td>71%</td><td>476</td><td>339</td><td>137</td><td>19</td><td>290</td><td>182</td></tr><tr><td>560e0e58d0059259ddf86fcec1fa7975dee6a868</td><td>youtube_faces</td><td>YouTubeFaces</td><td><a href="papers/560e0e58d0059259ddf86fcec1fa7975dee6a868.html" target="_blank">Face recognition in unconstrained videos with matched background similarity</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011</td><td>edu</td><td>Tel Aviv University</td><td>Israel</td><td>32.11198890</td><td>34.80459702</td><td>66%</td><td>509</td><td>338</td><td>170</td><td>23</td><td>294</td><td>216</td></tr><tr><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td><td>tud_campus</td><td>TUD-Campus</td><td><a href="papers/3316521a5527c7700af8ae6aef32a79a8b83672c.html" target="_blank">People-tracking-by-detection and people-detection-by-tracking</a></td><td><span class="gray">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>59%</td><td>545</td><td>324</td><td>220</td><td>37</td><td>330</td><td>218</td></tr><tr><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td><td>tud_crossing</td><td>TUD-Crossing</td><td><a href="papers/3316521a5527c7700af8ae6aef32a79a8b83672c.html" target="_blank">People-tracking-by-detection and people-detection-by-tracking</a></td><td><span class="gray">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>59%</td><td>545</td><td>324</td><td>220</td><td>37</td><td>330</td><td>218</td></tr><tr><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td><td>tud_pedestrian</td><td>TUD-Pedestrian</td><td><a href="papers/3316521a5527c7700af8ae6aef32a79a8b83672c.html" target="_blank">People-tracking-by-detection and people-detection-by-tracking</a></td><td><span class="gray">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>59%</td><td>545</td><td>324</td><td>220</td><td>37</td><td>330</td><td>218</td></tr><tr><td>cc589c499dcf323fe4a143bbef0074c3e31f9b60</td><td>bu_3dfe</td><td>BU-3DFE</td><td><a href="papers/cc589c499dcf323fe4a143bbef0074c3e31f9b60.html" target="_blank">A 3D facial expression database for facial behavior research</a></td><td><span class="gray">[pdf]</a></td><td>7th International Conference on Automatic Face and Gesture Recognition (FGR06)</td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>588</td><td>316</td><td>271</td><td>44</td><td>306</td><td>282</td></tr><tr><td>95f12d27c3b4914e0668a268360948bce92f7db3</td><td>helen</td><td>Helen</td><td><a href="papers/95f12d27c3b4914e0668a268360948bce92f7db3.html" target="_blank">Interactive Facial Feature Localization</a></td><td><a href="https://pdfs.semanticscholar.org/95f1/2d27c3b4914e0668a268360948bce92f7db3.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>company</td><td>Adobe</td><td>United States</td><td>37.33077030</td><td>-121.89409510</td><td>85%</td><td>352</td><td>298</td><td>54</td><td>8</td><td>212</td><td>146</td></tr><tr><td>4053e3423fb70ad9140ca89351df49675197196a</td><td>bio_id</td><td>BioID Face</td><td><a href="papers/4053e3423fb70ad9140ca89351df49675197196a.html" target="_blank">Robust Face Detection Using the Hausdorff Distance</a></td><td><a href="https://pdfs.semanticscholar.org/4053/e3423fb70ad9140ca89351df49675197196a.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>511</td><td>289</td><td>222</td><td>49</td><td>329</td><td>182</td></tr><tr><td>8a3c5507237957d013a0fe0f082cab7f757af6ee</td><td>mafl</td><td>MAFL</td><td><a href="papers/8a3c5507237957d013a0fe0f082cab7f757af6ee.html" target="_blank">Facial Landmark Detection by Deep Multi-task Learning</a></td><td><a href="https://pdfs.semanticscholar.org/fcd7/1c18192928a2e0b264edd4d919ab2f8f652a.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>70%</td><td>407</td><td>283</td><td>124</td><td>16</td><td>252</td><td>153</td></tr><tr><td>8a3c5507237957d013a0fe0f082cab7f757af6ee</td><td>mtfl</td><td>MTFL</td><td><a href="papers/8a3c5507237957d013a0fe0f082cab7f757af6ee.html" target="_blank">Facial Landmark Detection by Deep Multi-task Learning</a></td><td><a href="https://pdfs.semanticscholar.org/fcd7/1c18192928a2e0b264edd4d919ab2f8f652a.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>70%</td><td>407</td><td>283</td><td>124</td><td>16</td><td>252</td><td>153</td></tr><tr><td>16c7c31a7553d99f1837fc6e88e77b5ccbb346b8</td><td>prid</td><td>PRID</td><td><a href="papers/16c7c31a7553d99f1837fc6e88e77b5ccbb346b8.html" target="_blank">Person Re-identification by Descriptive and Discriminative Classification</a></td><td><a href="https://pdfs.semanticscholar.org/4c1b/f0592be3e535faf256c95e27982db9b3d3d3.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>68%</td><td>386</td><td>263</td><td>123</td><td>23</td><td>204</td><td>180</td></tr><tr><td>9055b155cbabdce3b98e16e5ac9c0edf00f9552f</td><td>morph</td><td>MORPH Commercial</td><td><a href="papers/9055b155cbabdce3b98e16e5ac9c0edf00f9552f.html" target="_blank">MORPH: a longitudinal image database of normal adult age-progression</a></td><td><span class="gray">[pdf]</a></td><td>7th International Conference on Automatic Face and Gesture Recognition (FGR06)</td><td>edu</td><td>North Carolina University</td><td>United States</td><td>34.22398690</td><td>-77.87013250</td><td>59%</td><td>437</td><td>258</td><td>178</td><td>22</td><td>228</td><td>203</td></tr><tr><td>9055b155cbabdce3b98e16e5ac9c0edf00f9552f</td><td>morph_nc</td><td>MORPH Non-Commercial</td><td><a href="papers/9055b155cbabdce3b98e16e5ac9c0edf00f9552f.html" target="_blank">MORPH: a longitudinal image database of normal adult age-progression</a></td><td><span class="gray">[pdf]</a></td><td>7th International Conference on Automatic Face and Gesture Recognition (FGR06)</td><td>edu</td><td>North Carolina University</td><td>United States</td><td>34.22398690</td><td>-77.87013250</td><td>59%</td><td>437</td><td>258</td><td>178</td><td>22</td><td>228</td><td>203</td></tr><tr><td>044d9a8c61383312cdafbcc44b9d00d650b21c70</td><td>fiw_300</td><td>300-W</td><td><a href="papers/044d9a8c61383312cdafbcc44b9d00d650b21c70.html" target="_blank">300 Faces in-the-Wild Challenge: The First Facial Landmark Localization Challenge</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision Workshops</td><td></td><td></td><td></td><td></td><td></td><td>79%</td><td>323</td><td>255</td><td>68</td><td>15</td><td>208</td><td>120</td></tr><tr><td>2485c98aa44131d1a2f7d1355b1e372f2bb148ad</td><td>cas_peal</td><td>CAS-PEAL</td><td><a href="papers/2485c98aa44131d1a2f7d1355b1e372f2bb148ad.html" target="_blank">The CAS-PEAL Large-Scale Chinese Face Database and Baseline Evaluations</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Systems, Man, and Cybernetics - Part A: Systems and Humans</td><td></td><td></td><td></td><td></td><td></td><td>59%</td><td>429</td><td>254</td><td>175</td><td>38</td><td>198</td><td>234</td></tr><tr><td>3325860c0c82a93b2eac654f5324dd6a776f609e</td><td>mpii_human_pose</td><td>MPII Human Pose</td><td><a href="papers/3325860c0c82a93b2eac654f5324dd6a776f609e.html" target="_blank">2D Human Pose Estimation: New Benchmark and State of the Art Analysis</a></td><td><span class="gray">[pdf]</a></td><td>2014 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>66%</td><td>387</td><td>254</td><td>133</td><td>19</td><td>291</td><td>96</td></tr><tr><td>2a75f34663a60ab1b04a0049ed1d14335129e908</td><td>mmi_facial_expression</td><td>MMI Facial Expression Dataset</td><td><a href="papers/2a75f34663a60ab1b04a0049ed1d14335129e908.html" target="_blank">Web-based database for facial expression analysis</a></td><td><span class="gray">[pdf]</a></td><td>2005 IEEE International Conference on Multimedia and Expo</td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>464</td><td>250</td><td>214</td><td>45</td><td>282</td><td>188</td></tr><tr><td>75da1df4ed319926c544eefe17ec8d720feef8c0</td><td>fddb</td><td>FDDB</td><td><a href="papers/75da1df4ed319926c544eefe17ec8d720feef8c0.html" target="_blank">FDDB: A benchmark for face detection in unconstrained settings</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>65%</td><td>380</td><td>247</td><td>133</td><td>16</td><td>202</td><td>164</td></tr><tr><td>3765df816dc5a061bc261e190acc8bdd9d47bec0</td><td>rafd</td><td>RaFD</td><td><a href="papers/3765df816dc5a061bc261e190acc8bdd9d47bec0.html" target="_blank">Presentation and validation of the Radboud Faces Database</a></td><td><a href="https://pdfs.semanticscholar.org/3765/df816dc5a061bc261e190acc8bdd9d47bec0.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>48%</td><td>487</td><td>234</td><td>253</td><td>39</td><td>342</td><td>144</td></tr><tr><td>2724ba85ec4a66de18da33925e537f3902f21249</td><td>cofw</td><td>COFW</td><td><a href="papers/2724ba85ec4a66de18da33925e537f3902f21249.html" target="_blank">Robust Face Landmark Estimation under Occlusion</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision</td><td>edu</td><td>California Institute of Technology</td><td>United States</td><td>34.13710185</td><td>-118.12527487</td><td>72%</td><td>325</td><td>233</td><td>92</td><td>12</td><td>194</td><td>133</td></tr><tr><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td><td>stickmen_buffy</td><td>Buffy Stickmen</td><td><a href="papers/6dd0597f8513dc100cd0bc1b493768cde45098a9.html" target="_blank">Learning to parse images of articulated bodies</a></td><td><a href="http://machinelearning.wustl.edu/mlpapers/paper_files/NIPS2006_899.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>369</td><td>227</td><td>141</td><td>32</td><td>237</td><td>131</td></tr><tr><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td><a href="papers/6dd0597f8513dc100cd0bc1b493768cde45098a9.html" target="_blank">Learning to parse images of articulated bodies</a></td><td><a href="http://machinelearning.wustl.edu/mlpapers/paper_files/NIPS2006_899.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>369</td><td>227</td><td>141</td><td>32</td><td>237</td><td>131</td></tr><tr><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td><a href="papers/6dd0597f8513dc100cd0bc1b493768cde45098a9.html" target="_blank">Learning to parse images of articulated bodies</a></td><td><a href="http://machinelearning.wustl.edu/mlpapers/paper_files/NIPS2006_899.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>369</td><td>227</td><td>141</td><td>32</td><td>237</td><td>131</td></tr><tr><td>9361b784e73e9238d5cefbea5ac40d35d1e3103f</td><td>oxford_town_centre</td><td>TownCentre</td><td><a href="papers/9361b784e73e9238d5cefbea5ac40d35d1e3103f.html" target="_blank">Stable multi-target tracking in real-time surveillance video</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011</td><td>edu</td><td>University of Oxford</td><td>United Kingdom</td><td>51.75345380</td><td>-1.25400997</td><td>68%</td><td>328</td><td>222</td><td>106</td><td>13</td><td>186</td><td>140</td></tr><tr><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td><td>aflw</td><td>AFLW</td><td><a href="papers/a74251efa970b92925b89eeef50a5e37d9281ad0.html" target="_blank">Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</a></td><td><span class="gray">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td>edu</td><td>TU Graz</td><td>Austria</td><td>47.07071400</td><td>15.43950400</td><td>69%</td><td>318</td><td>218</td><td>100</td><td>27</td><td>211</td><td>107</td></tr><tr><td>13f06b08f371ba8b5d31c3e288b4deb61335b462</td><td>eth_andreas_ess</td><td>ETHZ Pedestrian</td><td><a href="papers/13f06b08f371ba8b5d31c3e288b4deb61335b462.html" target="_blank">Depth and Appearance for Mobile Scene Analysis</a></td><td><span class="gray">[pdf]</a></td><td>2007 IEEE 11th International Conference on Computer Vision</td><td>edu</td><td>ETH Zurich</td><td>Switzerland</td><td>47.37631300</td><td>8.54766990</td><td>63%</td><td>324</td><td>204</td><td>120</td><td>26</td><td>193</td><td>127</td></tr><tr><td>5981e6479c3fd4e31644db35d236bfb84ae46514</td><td>mot</td><td>MOT</td><td><a href="papers/5981e6479c3fd4e31644db35d236bfb84ae46514.html" target="_blank">Learning to associate: HybridBoosted multi-target tracker for crowded scene</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>University of Southern California</td><td>United States</td><td>34.02241490</td><td>-118.28634407</td><td>61%</td><td>326</td><td>200</td><td>125</td><td>22</td><td>190</td><td>137</td></tr><tr><td>2acf7e58f0a526b957be2099c10aab693f795973</td><td>bosphorus</td><td>The Bosphorus</td><td><a href="papers/2acf7e58f0a526b957be2099c10aab693f795973.html" target="_blank">Bosphorus Database for 3D Face Analysis</a></td><td><a href="https://pdfs.semanticscholar.org/4254/fbba3846008f50671edc9cf70b99d7304543.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>352</td><td>198</td><td>154</td><td>17</td><td>162</td><td>188</td></tr><tr><td>639937b3a1b8bded3f7e9a40e85bd3770016cf3c</td><td>bfm</td><td>BFM</td><td><a href="papers/639937b3a1b8bded3f7e9a40e85bd3770016cf3c.html" target="_blank">A 3D Face Model for Pose and Illumination Invariant Face Recognition</a></td><td><span class="gray">[pdf]</a></td><td>2009 Sixth IEEE International Conference on Advanced Video and Signal Based Surveillance</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>343</td><td>194</td><td>149</td><td>23</td><td>223</td><td>114</td></tr><tr><td>44484d2866f222bbb9b6b0870890f9eea1ffb2d0</td><td>cuhk_campus_03</td><td>CUHK03 Campus</td><td><a href="papers/44484d2866f222bbb9b6b0870890f9eea1ffb2d0.html" target="_blank">Human Reidentification with Transferred Metric Learning</a></td><td><a href="http://www.ee.cuhk.edu.hk/~xgwang/papers/liZWaccv12.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>69%</td><td>280</td><td>194</td><td>86</td><td>9</td><td>139</td><td>137</td></tr><tr><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td><td>leeds_sports_pose</td><td>Leeds Sports Pose</td><td><a href="papers/4b1d23d17476fcf78f4cbadf69fb130b1aa627c0.html" target="_blank">Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</a></td><td><a href="http://www.bmva.org/bmvc/2010/conference/paper12/abstract12.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>66%</td><td>285</td><td>188</td><td>97</td><td>11</td><td>197</td><td>93</td></tr><tr><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td><td>stickmen_buffy</td><td>Buffy Stickmen</td><td><a href="papers/4b1d23d17476fcf78f4cbadf69fb130b1aa627c0.html" target="_blank">Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</a></td><td><a href="http://www.bmva.org/bmvc/2010/conference/paper12/abstract12.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>66%</td><td>285</td><td>188</td><td>97</td><td>11</td><td>197</td><td>93</td></tr><tr><td>436f798d1a4e54e5947c1e7d7375c31b2bdb4064</td><td>tud_multiview</td><td>TUD-Multiview</td><td><a href="papers/436f798d1a4e54e5947c1e7d7375c31b2bdb4064.html" target="_blank">Monocular 3D pose estimation and tracking by detection</a></td><td><span class="gray">[pdf]</a></td><td>2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>TU Darmstadt</td><td>Germany</td><td>49.87482770</td><td>8.65632810</td><td>60%</td><td>311</td><td>186</td><td>125</td><td>33</td><td>208</td><td>105</td></tr><tr><td>436f798d1a4e54e5947c1e7d7375c31b2bdb4064</td><td>tud_stadtmitte</td><td>TUD-Stadtmitte</td><td><a href="papers/436f798d1a4e54e5947c1e7d7375c31b2bdb4064.html" target="_blank">Monocular 3D pose estimation and tracking by detection</a></td><td><span class="gray">[pdf]</a></td><td>2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>TU Darmstadt</td><td>Germany</td><td>49.87482770</td><td>8.65632810</td><td>60%</td><td>311</td><td>186</td><td>125</td><td>33</td><td>208</td><td>105</td></tr><tr><td>010f0f4929e6a6644fb01f0e43820f91d0fad292</td><td>yfcc_100m</td><td>YFCC100M</td><td><a href="papers/010f0f4929e6a6644fb01f0e43820f91d0fad292.html" target="_blank">YFCC100M: the new data in multimedia research</a></td><td><span class="gray">[pdf]</a></td><td>Commun. ACM</td><td>edu</td><td>Carnegie Mellon University</td><td>United States</td><td>40.44416190</td><td>-79.94272826</td><td>64%</td><td>274</td><td>176</td><td>98</td><td>23</td><td>172</td><td>100</td></tr><tr><td>38b55d95189c5e69cf4ab45098a48fba407609b4</td><td>cuhk_campus_03</td><td>CUHK03 Campus</td><td><a href="papers/38b55d95189c5e69cf4ab45098a48fba407609b4.html" target="_blank">Locally Aligned Feature Transforms across Views</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>258</td><td>164</td><td>94</td><td>15</td><td>136</td><td>117</td></tr><tr><td>833fa04463d90aab4a9fe2870d480f0b40df446e</td><td>sun_attributes</td><td>SUN</td><td><a href="papers/833fa04463d90aab4a9fe2870d480f0b40df446e.html" target="_blank">SUN attribute database: Discovering, annotating, and recognizing scene attributes</a></td><td><span class="gray">[pdf]</a></td><td>2012 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>Brown University</td><td>United States</td><td>41.82686820</td><td>-71.40123146</td><td>60%</td><td>264</td><td>159</td><td>105</td><td>27</td><td>206</td><td>56</td></tr><tr><td>1be498d4bbc30c3bfd0029114c784bc2114d67c0</td><td>adience</td><td>Adience</td><td><a href="papers/1be498d4bbc30c3bfd0029114c784bc2114d67c0.html" target="_blank">Age and Gender Estimation of Unfiltered Faces</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Information Forensics and Security</td><td>edu</td><td>Open University of Israel</td><td>Israel</td><td>32.77824165</td><td>34.99565673</td><td>87%</td><td>179</td><td>156</td><td>23</td><td>1</td><td>98</td><td>80</td></tr><tr><td>140c95e53c619eac594d70f6369f518adfea12ef</td><td>ijb_a</td><td>IJB-A</td><td><a href="papers/140c95e53c619eac594d70f6369f518adfea12ef.html" target="_blank">Pushing the frontiers of unconstrained face detection and recognition: IARPA Janus Benchmark A</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>66%</td><td>237</td><td>156</td><td>81</td><td>14</td><td>159</td><td>76</td></tr><tr><td>2eed184680edcdec8a3b605ad1a3ba8e8f7cc2e9</td><td>graz</td><td>Graz Pedestrian</td><td><a href="papers/2eed184680edcdec8a3b605ad1a3ba8e8f7cc2e9.html" target="_blank">Generic object recognition with boosting</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td>edu</td><td>TU Graz</td><td>Austria</td><td>47.07071400</td><td>15.43950400</td><td>53%</td><td>293</td><td>155</td><td>138</td><td>16</td><td>195</td><td>97</td></tr><tr><td>6204776d31359d129a582057c2d788a14f8aadeb</td><td>youtube_celebrities</td><td>YouTube Celebrities</td><td><a href="papers/6204776d31359d129a582057c2d788a14f8aadeb.html" target="_blank">Face tracking and recognition with visual constraints in real-world videos</a></td><td><span class="gray">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>Rutgers University</td><td>United States</td><td>40.47913175</td><td>-74.43168868</td><td>57%</td><td>267</td><td>151</td><td>115</td><td>11</td><td>125</td><td>121</td></tr><tr><td>013909077ad843eb6df7a3e8e290cfd5575999d2</td><td>fiw_300</td><td>300-W</td><td><a href="papers/013909077ad843eb6df7a3e8e290cfd5575999d2.html" target="_blank">A Semi-automatic Methodology for Facial Landmark Annotation</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition Workshops</td><td></td><td></td><td></td><td></td><td></td><td>80%</td><td>184</td><td>148</td><td>36</td><td>8</td><td>120</td><td>67</td></tr><tr><td>27a2fad58dd8727e280f97036e0d2bc55ef5424c</td><td>duke_mtmc</td><td>Duke MTMC</td><td><a href="papers/27a2fad58dd8727e280f97036e0d2bc55ef5424c.html" target="_blank">Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</a></td><td><a href="https://arxiv.org/pdf/1609.01775.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Duke University</td><td>United States</td><td>35.99905220</td><td>-78.92906290</td><td>85%</td><td>169</td><td>144</td><td>25</td><td>3</td><td>113</td><td>54</td></tr><tr><td>4c170a0dcc8de75587dae21ca508dab2f9343974</td><td>face_tracer</td><td>FaceTracer</td><td><a href="papers/4c170a0dcc8de75587dae21ca508dab2f9343974.html" target="_blank">FaceTracer: A Search Engine for Large Collections of Images with Faces</a></td><td><a href="https://pdfs.semanticscholar.org/73a8/1d311eedac8dea3ca24dc15b6990fa4a725e.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>225</td><td>144</td><td>81</td><td>17</td><td>146</td><td>77</td></tr><tr><td>27a2fad58dd8727e280f97036e0d2bc55ef5424c</td><td>mot</td><td>MOT</td><td><a href="papers/27a2fad58dd8727e280f97036e0d2bc55ef5424c.html" target="_blank">Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</a></td><td><a href="https://arxiv.org/pdf/1609.01775.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Duke University</td><td>United States</td><td>35.99905220</td><td>-78.92906290</td><td>85%</td><td>169</td><td>144</td><td>25</td><td>3</td><td>113</td><td>54</td></tr><tr><td>98bb029afe2a1239c3fdab517323066f0957b81b</td><td>ilids_mcts_vid</td><td>iLIDS-VID</td><td><a href="papers/98bb029afe2a1239c3fdab517323066f0957b81b.html" target="_blank">Person Re-identification by Video Ranking</a></td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>68%</td><td>209</td><td>143</td><td>66</td><td>8</td><td>111</td><td>97</td></tr><tr><td>98bb029afe2a1239c3fdab517323066f0957b81b</td><td>sdu_vid</td><td>SDU-VID</td><td><a href="papers/98bb029afe2a1239c3fdab517323066f0957b81b.html" target="_blank">Person Re-identification by Video Ranking</a></td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>68%</td><td>209</td><td>143</td><td>66</td><td>8</td><td>111</td><td>97</td></tr><tr><td>e8de844fefd54541b71c9823416daa238be65546</td><td>visual_phrases</td><td>Phrasal Recognition</td><td><a href="papers/e8de844fefd54541b71c9823416daa238be65546.html" target="_blank">Recognition using visual phrases</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011</td><td>edu</td><td>University of Illinois, Urbana-Champaign</td><td>United States</td><td>40.11116745</td><td>-88.22587665</td><td>58%</td><td>246</td><td>143</td><td>103</td><td>17</td><td>170</td><td>68</td></tr><tr><td>291265db88023e92bb8c8e6390438e5da148e8f5</td><td>msceleb</td><td>MsCeleb</td><td><a href="papers/291265db88023e92bb8c8e6390438e5da148e8f5.html" target="_blank">MS-Celeb-1M: A Dataset and Benchmark for Large-Scale Face Recognition</a></td><td><a href="https://arxiv.org/pdf/1607.08221.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>company</td><td>Microsoft</td><td>United States</td><td>47.64233180</td><td>-122.13693020</td><td>78%</td><td>180</td><td>141</td><td>39</td><td>8</td><td>120</td><td>59</td></tr><tr><td>7808937b46acad36e43c30ae4e9f3fd57462853d</td><td>bpad</td><td>BPAD</td><td><a href="papers/7808937b46acad36e43c30ae4e9f3fd57462853d.html" target="_blank">Describing people: A poselet-based approach to attribute classification</a></td><td><span class="gray">[pdf]</a></td><td>2011 International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>230</td><td>140</td><td>90</td><td>14</td><td>163</td><td>66</td></tr><tr><td>46a01565e6afe7c074affb752e7069ee3bf2e4ef</td><td>sdu_vid</td><td>SDU-VID</td><td><a href="papers/46a01565e6afe7c074affb752e7069ee3bf2e4ef.html" target="_blank">Local Descriptors Encoded by Fisher Vectors for Person Re-identification</a></td><td><a href="https://pdfs.semanticscholar.org/a105/f1ef67b4b02da38eadce8ffb4e13aa301a93.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>67%</td><td>197</td><td>132</td><td>65</td><td>15</td><td>108</td><td>88</td></tr><tr><td>0c91808994a250d7be332400a534a9291ca3b60e</td><td>graz</td><td>Graz Pedestrian</td><td><a href="papers/0c91808994a250d7be332400a534a9291ca3b60e.html" target="_blank">Weak Hypotheses and Boosting for Generic Object Detection and Recognition</a></td><td><a href="https://pdfs.semanticscholar.org/0c91/808994a250d7be332400a534a9291ca3b60e.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>236</td><td>131</td><td>105</td><td>17</td><td>161</td><td>77</td></tr><tr><td>35b0331dfcd2897abd5749b49ff5e2b8ba0f7a62</td><td>coco_qa</td><td>COCO QA</td><td><a href="papers/35b0331dfcd2897abd5749b49ff5e2b8ba0f7a62.html" target="_blank">Exploring Models and Data for Image Question Answering</a></td><td><a href="https://arxiv.org/pdf/1505.02074.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>206</td><td>126</td><td>80</td><td>11</td><td>162</td><td>39</td></tr><tr><td>570f37ed63142312e6ccdf00ecc376341ec72b9f</td><td>stanford_drone</td><td>Stanford Drone</td><td><a href="papers/570f37ed63142312e6ccdf00ecc376341ec72b9f.html" target="_blank">Social LSTM: Human Trajectory Prediction in Crowded Spaces</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>224</td><td>125</td><td>99</td><td>3</td><td>140</td><td>81</td></tr><tr><td>4e4746094bf60ee83e40d8597a6191e463b57f76</td><td>leeds_sports_pose_extended</td><td>Leeds Sports Pose Extended</td><td><a href="papers/4e4746094bf60ee83e40d8597a6191e463b57f76.html" target="_blank">Learning effective human pose estimation from inaccurate annotation</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011</td><td>edu</td><td>University of Leeds</td><td>United Kingdom</td><td>53.80387185</td><td>-1.55245712</td><td>71%</td><td>169</td><td>120</td><td>49</td><td>7</td><td>108</td><td>65</td></tr><tr><td>52d7eb0fbc3522434c13cc247549f74bb9609c5d</td><td>wider_face</td><td>WIDER FACE</td><td><a href="papers/52d7eb0fbc3522434c13cc247549f74bb9609c5d.html" target="_blank">WIDER FACE: A Face Detection Benchmark</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>Chinese University of Hong Kong</td><td>China</td><td>22.41626320</td><td>114.21093180</td><td>66%</td><td>178</td><td>118</td><td>60</td><td>11</td><td>112</td><td>66</td></tr><tr><td>22ad2c8c0f4d6aa4328b38d894b814ec22579761</td><td>gallagher</td><td>Gallagher</td><td><a href="papers/22ad2c8c0f4d6aa4328b38d894b814ec22579761.html" target="_blank">Clothing cosegmentation for recognizing people</a></td><td><span class="gray">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>Carnegie Mellon University</td><td>United States</td><td>40.44416190</td><td>-79.94272826</td><td>65%</td><td>178</td><td>116</td><td>62</td><td>7</td><td>100</td><td>86</td></tr><tr><td>b1f4423c227fa37b9680787be38857069247a307</td><td>afew_va</td><td>AFEW-VA</td><td><a href="papers/b1f4423c227fa37b9680787be38857069247a307.html" target="_blank">Collecting Large, Richly Annotated Facial-Expression Databases from Movies</a></td><td><span class="gray">[pdf]</a></td><td>IEEE MultiMedia</td><td>edu</td><td>Australian National University</td><td>Australia</td><td>-35.27769990</td><td>149.11852700</td><td>64%</td><td>181</td><td>115</td><td>66</td><td>8</td><td>87</td><td>97</td></tr><tr><td>c0387e788a52f10bf35d4d50659cfa515d89fbec</td><td>mars</td><td>MARS</td><td><a href="papers/c0387e788a52f10bf35d4d50659cfa515d89fbec.html" target="_blank">MARS: A Video Benchmark for Large-Scale Person Re-Identification</a></td><td><a href="https://pdfs.semanticscholar.org/c038/7e788a52f10bf35d4d50659cfa515d89fbec.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>68%</td><td>168</td><td>115</td><td>53</td><td>4</td><td>97</td><td>69</td></tr><tr><td>32c801cb7fbeb742edfd94cccfca4934baec71da</td><td>ucf_crowd</td><td>UCF-CC-50</td><td><a href="papers/32c801cb7fbeb742edfd94cccfca4934baec71da.html" target="_blank">Multi-source Multi-scale Counting in Extremely Dense Crowd Images</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>76%</td><td>148</td><td>113</td><td>35</td><td>3</td><td>80</td><td>65</td></tr><tr><td>18010284894ed0edcca74e5bf768ee2e15ef7841</td><td>deep_fashion</td><td>DeepFashion</td><td><a href="papers/18010284894ed0edcca74e5bf768ee2e15ef7841.html" target="_blank">DeepFashion: Powering Robust Clothes Recognition and Retrieval with Rich Annotations</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>176</td><td>112</td><td>64</td><td>2</td><td>113</td><td>62</td></tr><tr><td>133f01aec1534604d184d56de866a4bd531dac87</td><td>lfw</td><td>LFW</td><td><a href="papers/133f01aec1534604d184d56de866a4bd531dac87.html" target="_blank">Effective Unconstrained Face Recognition by Combining Multiple Descriptors and Learned Background Statistics</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>183</td><td>109</td><td>74</td><td>13</td><td>103</td><td>77</td></tr><tr><td>0df0d1adea39a5bef318b74faa37de7f3e00b452</td><td>mpii_gaze</td><td>MPIIGaze</td><td><a href="papers/0df0d1adea39a5bef318b74faa37de7f3e00b452.html" target="_blank">Appearance-based gaze estimation in the wild</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>73%</td><td>149</td><td>109</td><td>40</td><td>3</td><td>94</td><td>54</td></tr><tr><td>96e0cfcd81cdeb8282e29ef9ec9962b125f379b0</td><td>megaface</td><td>MegaFace</td><td><a href="papers/96e0cfcd81cdeb8282e29ef9ec9962b125f379b0.html" target="_blank">The MegaFace Benchmark: 1 Million Faces for Recognition at Scale</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>76%</td><td>139</td><td>106</td><td>33</td><td>5</td><td>100</td><td>37</td></tr><tr><td>10195a163ab6348eef37213a46f60a3d87f289c5</td><td>imdb_wiki</td><td>IMDB-Wiki</td><td><a href="papers/10195a163ab6348eef37213a46f60a3d87f289c5.html" target="_blank">Deep Expectation of Real and Apparent Age from a Single Image Without Facial Landmarks</a></td><td><a href="http://www.vision.ee.ethz.ch/en/publications/papers/articles/eth_biwi_01299.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td>edu</td><td>ETH Zurich</td><td>Switzerland</td><td>47.37631300</td><td>8.54766990</td><td>72%</td><td>145</td><td>105</td><td>40</td><td>9</td><td>93</td><td>51</td></tr><tr><td>56ffa7d906b08d02d6d5a12c7377a57e24ef3391</td><td>unbc_shoulder_pain</td><td>UNBC-McMaster Pain</td><td><a href="papers/56ffa7d906b08d02d6d5a12c7377a57e24ef3391.html" target="_blank">Painful data: The UNBC-McMaster shoulder pain expression archive database</a></td><td><span class="gray">[pdf]</a></td><td>Face and Gesture 2011</td><td>edu</td><td>Carnegie Mellon University</td><td>United States</td><td>40.44416190</td><td>-79.94272826</td><td>54%</td><td>189</td><td>103</td><td>86</td><td>21</td><td>108</td><td>78</td></tr><tr><td>29a705a5fa76641e0d8963f1fdd67ee4c0d92d3d</td><td>scface</td><td>SCface</td><td><a href="papers/29a705a5fa76641e0d8963f1fdd67ee4c0d92d3d.html" target="_blank">SCface – surveillance cameras face database</a></td><td><a href="http://scface.org/SCface%20-%20Surveillance%20Cameras%20Face%20Database.pdf" target="_blank">[pdf]</a></td><td>Multimedia Tools and Applications</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>179</td><td>101</td><td>78</td><td>15</td><td>88</td><td>89</td></tr><tr><td>5a5f0287484f0d480fed1ce585dbf729586f0edc</td><td>disfa</td><td>DISFA</td><td><a href="papers/5a5f0287484f0d480fed1ce585dbf729586f0edc.html" target="_blank">DISFA: A Spontaneous Facial Action Intensity Database</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Affective Computing</td><td>edu</td><td>University of Denver</td><td>United States</td><td>39.67665410</td><td>-104.96220300</td><td>54%</td><td>184</td><td>100</td><td>84</td><td>17</td><td>96</td><td>89</td></tr><tr><td>1aad2da473888cb7ebc1bfaa15bfa0f1502ce005</td><td>jpl_pose</td><td>JPL-Interaction dataset</td><td><a href="papers/1aad2da473888cb7ebc1bfaa15bfa0f1502ce005.html" target="_blank">First-Person Activity Recognition: What Are They Doing to Me?</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>67%</td><td>148</td><td>99</td><td>49</td><td>7</td><td>105</td><td>43</td></tr><tr><td>3b5b6d19d4733ab606c39c69a889f9e67967f151</td><td>qmul_grid</td><td>GRID</td><td><a href="papers/3b5b6d19d4733ab606c39c69a889f9e67967f151.html" target="_blank">Multi-camera activity correlation analysis</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>Queen Mary University of London</td><td>United Kingdom</td><td>51.52472720</td><td>-0.03931035</td><td>69%</td><td>142</td><td>98</td><td>44</td><td>7</td><td>77</td><td>64</td></tr><tr><td>8355d095d3534ef511a9af68a3b2893339e3f96b</td><td>imdb_wiki</td><td>IMDB-Wiki</td><td><a href="papers/8355d095d3534ef511a9af68a3b2893339e3f96b.html" target="_blank">DEX: Deep EXpectation of Apparent Age from a Single Image</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision Workshop (ICCVW)</td><td></td><td></td><td></td><td></td><td></td><td>79%</td><td>122</td><td>96</td><td>26</td><td>4</td><td>75</td><td>48</td></tr><tr><td>4f93cd09785c6e77bf4bc5a788e079df524c8d21</td><td>soton</td><td>SOTON HiD</td><td><a href="papers/4f93cd09785c6e77bf4bc5a788e079df524c8d21.html" target="_blank">On a Large Sequence-Based Human Gait Database</a></td><td><a href="https://pdfs.semanticscholar.org/4f93/cd09785c6e77bf4bc5a788e079df524c8d21.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>63%</td><td>150</td><td>95</td><td>55</td><td>17</td><td>103</td><td>51</td></tr><tr><td>8b56e33f33e582f3e473dba573a16b598ed9bcdc</td><td>fei</td><td>FEI</td><td><a href="papers/8b56e33f33e582f3e473dba573a16b598ed9bcdc.html" target="_blank">A new ranking method for principal components analysis and its application to face image analysis</a></td><td><span class="gray">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>169</td><td>93</td><td>76</td><td>6</td><td>69</td><td>102</td></tr><tr><td>e4754afaa15b1b53e70743880484b8d0736990ff</td><td>fiw_300</td><td>300-W</td><td><a href="papers/e4754afaa15b1b53e70743880484b8d0736990ff.html" target="_blank">300 Faces In-The-Wild Challenge: database and results</a></td><td><a href="http://ibug.doc.ic.ac.uk/media/uploads/documents/1-s2.0-s0262885616000147-main.pdf" target="_blank">[pdf]</a></td><td>Image Vision Comput.</td><td>edu</td><td>Imperial College London</td><td>United Kingdom</td><td>51.49887085</td><td>-0.17560797</td><td>71%</td><td>129</td><td>92</td><td>37</td><td>6</td><td>74</td><td>55</td></tr><tr><td>066000d44d6691d27202896691f08b27117918b9</td><td>psu</td><td>PSU</td><td><a href="papers/066000d44d6691d27202896691f08b27117918b9.html" target="_blank">Vision-Based Analysis of Small Groups in Pedestrian Crowds</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>168</td><td>91</td><td>77</td><td>10</td><td>85</td><td>79</td></tr><tr><td>0d3bb75852098b25d90f31d2f48fd0cb4944702b</td><td>face_scrub</td><td>FaceScrub</td><td><a href="papers/0d3bb75852098b25d90f31d2f48fd0cb4944702b.html" target="_blank">A data-driven approach to cleaning large face datasets</a></td><td><span class="gray">[pdf]</a></td><td>2014 IEEE International Conference on Image Processing (ICIP)</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>138</td><td>89</td><td>49</td><td>1</td><td>95</td><td>41</td></tr><tr><td>2d3482dcff69c7417c7b933f22de606a0e8e42d4</td><td>lfw</td><td>LFW</td><td><a href="papers/2d3482dcff69c7417c7b933f22de606a0e8e42d4.html" target="_blank">Labeled Faces in the Wild : Updates and New Reporting Procedures</a></td><td><a href="https://pdfs.semanticscholar.org/2d34/82dcff69c7417c7b933f22de606a0e8e42d4.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>edu</td><td>University of Massachusetts</td><td>United States</td><td>42.38897850</td><td>-72.52869870</td><td>69%</td><td>123</td><td>85</td><td>38</td><td>3</td><td>71</td><td>51</td></tr><tr><td>5a4df9bef1872865f0b619ac3aacc97f49e4a035</td><td>cuhk_train_station</td><td>CUHK Train Station Dataset</td><td><a href="papers/5a4df9bef1872865f0b619ac3aacc97f49e4a035.html" target="_blank">Understanding collective crowd behaviors: Learning a Mixture model of Dynamic pedestrian-Agents</a></td><td><span class="gray">[pdf]</a></td><td>2012 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>Chinese University of Hong Kong</td><td>China</td><td>22.41626320</td><td>114.21093180</td><td>60%</td><td>141</td><td>84</td><td>57</td><td>4</td><td>60</td><td>75</td></tr><tr><td>0486214fb58ee9a04edfe7d6a74c6d0f661a7668</td><td>chokepoint</td><td>ChokePoint</td><td><a href="papers/0486214fb58ee9a04edfe7d6a74c6d0f661a7668.html" target="_blank">Patch-based probabilistic image quality assessment for face selection and improved video-based face recognition</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>138</td><td>83</td><td>55</td><td>6</td><td>76</td><td>63</td></tr><tr><td>2e8d0f1802e50cccfd3c0aabac0d0beab3a7846e</td><td>3dpes</td><td>3DPeS</td><td><a href="papers/2e8d0f1802e50cccfd3c0aabac0d0beab3a7846e.html" target="_blank">3DPeS: 3D people dataset for surveillance and forensics</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>133</td><td>82</td><td>51</td><td>9</td><td>73</td><td>58</td></tr><tr><td>b91f54e1581fbbf60392364323d00a0cd43e493c</td><td>bp4d_spontanous</td><td>BP4D-Spontanous</td><td><a href="papers/b91f54e1581fbbf60392364323d00a0cd43e493c.html" target="_blank">A high-resolution spontaneous 3D dynamic facial expression database</a></td><td><span class="gray">[pdf]</a></td><td>2013 10th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)</td><td>edu</td><td>SUNY Binghamton</td><td>United States</td><td>42.08779975</td><td>-75.97066066</td><td>52%</td><td>154</td><td>80</td><td>74</td><td>6</td><td>80</td><td>75</td></tr><tr><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td><td>mafl</td><td>MAFL</td><td><a href="papers/a0fd85b3400c7b3e11122f44dc5870ae2de9009a.html" target="_blank">Learning Deep Representation for Face Alignment with Auxiliary Attributes</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>71%</td><td>108</td><td>77</td><td>31</td><td>7</td><td>66</td><td>44</td></tr><tr><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td><td>mtfl</td><td>MTFL</td><td><a href="papers/a0fd85b3400c7b3e11122f44dc5870ae2de9009a.html" target="_blank">Learning Deep Representation for Face Alignment with Auxiliary Attributes</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>71%</td><td>108</td><td>77</td><td>31</td><td>7</td><td>66</td><td>44</td></tr><tr><td>7f23a4bb0c777dd72cca7665a5f370ac7980217e</td><td>duke_mtmc</td><td>Duke MTMC</td><td><a href="papers/7f23a4bb0c777dd72cca7665a5f370ac7980217e.html" target="_blank">Improving Person Re-identification by Attribute and Identity Learning</a></td><td><a href="https://arxiv.org/pdf/1703.07220.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>84%</td><td>87</td><td>73</td><td>14</td><td>0</td><td>43</td><td>42</td></tr><tr><td>7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22</td><td>lfw</td><td>LFW</td><td><a href="papers/7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22.html" target="_blank">Labeled Faces in the Wild: A Survey</a></td><td><a href="https://pdfs.semanticscholar.org/7de6/e81d775e9cd7becbfd1bd685f4e2a5eebb22.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Stevens Institute of Technology</td><td>United States</td><td>40.74225200</td><td>-74.02709490</td><td>64%</td><td>109</td><td>70</td><td>39</td><td>7</td><td>66</td><td>43</td></tr><tr><td>66e6f08873325d37e0ec20a4769ce881e04e964e</td><td>sun_attributes</td><td>SUN</td><td><a href="papers/66e6f08873325d37e0ec20a4769ce881e04e964e.html" target="_blank">The SUN Attribute Database: Beyond Categories for Deeper Scene Understanding</a></td><td><a href="http://www.cc.gatech.edu/~hays/papers/attribute_ijcv.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>116</td><td>70</td><td>46</td><td>14</td><td>84</td><td>31</td></tr><tr><td>2a4bbee0b4cf52d5aadbbc662164f7efba89566c</td><td>peta</td><td>PETA</td><td><a href="papers/2a4bbee0b4cf52d5aadbbc662164f7efba89566c.html" target="_blank">Pedestrian Attribute Recognition At Far Distance</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>75%</td><td>88</td><td>66</td><td>22</td><td>1</td><td>50</td><td>36</td></tr><tr><td>70c59dc3470ae867016f6ab0e008ac8ba03774a1</td><td>vgg_faces2</td><td>VGG Face2</td><td><a href="papers/70c59dc3470ae867016f6ab0e008ac8ba03774a1.html" target="_blank">VGGFace2: A Dataset for Recognising Faces across Pose and Age</a></td><td><a href="https://arxiv.org/pdf/1710.08092.pdf" target="_blank">[pdf]</a></td><td>2018 13th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2018)</td><td></td><td></td><td></td><td></td><td></td><td>80%</td><td>83</td><td>66</td><td>17</td><td>3</td><td>61</td><td>20</td></tr><tr><td>3394168ff0719b03ff65bcea35336a76b21fe5e4</td><td>penn_fudan</td><td>Penn Fudan</td><td><a href="papers/3394168ff0719b03ff65bcea35336a76b21fe5e4.html" target="_blank">Object Detection Combining Recognition and Segmentation</a></td><td><a href="https://pdfs.semanticscholar.org/3394/168ff0719b03ff65bcea35336a76b21fe5e4.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>105</td><td>64</td><td>41</td><td>9</td><td>58</td><td>43</td></tr><tr><td>3b4ec8af470948a72a6ed37a9fd226719a874ebc</td><td>sdu_vid</td><td>SDU-VID</td><td><a href="papers/3b4ec8af470948a72a6ed37a9fd226719a874ebc.html" target="_blank">A Spatio-Temporal Appearance Representation for Video-Based Pedestrian Re-Identification</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>66%</td><td>95</td><td>63</td><td>32</td><td>6</td><td>50</td><td>45</td></tr><tr><td>04c2cda00e5536f4b1508cbd80041e9552880e67</td><td>hipsterwars</td><td>Hipsterwars</td><td><a href="papers/04c2cda00e5536f4b1508cbd80041e9552880e67.html" target="_blank">Hipster Wars: Discovering Elements of Fashion Styles</a></td><td><a href="https://pdfs.semanticscholar.org/04c2/cda00e5536f4b1508cbd80041e9552880e67.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>95</td><td>61</td><td>34</td><td>4</td><td>59</td><td>35</td></tr><tr><td>06f02199690961ba52997cde1527e714d2b3bf8f</td><td>columbia_gaze</td><td>Columbia Gaze</td><td><a href="papers/06f02199690961ba52997cde1527e714d2b3bf8f.html" target="_blank">Gaze locking: passive eye contact detection for human-object interaction</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Columbia University</td><td>United States</td><td>40.84198360</td><td>-73.94368971</td><td>76%</td><td>79</td><td>60</td><td>19</td><td>0</td><td>49</td><td>34</td></tr><tr><td>4df3143922bcdf7db78eb91e6b5359d6ada004d2</td><td>cfd</td><td>CFD</td><td><a href="papers/4df3143922bcdf7db78eb91e6b5359d6ada004d2.html" target="_blank">The Chicago face database: A free stimulus set of faces and norming data.</a></td><td><a href="https://pdfs.semanticscholar.org/4df3/143922bcdf7db78eb91e6b5359d6ada004d2.pdf" target="_blank">[pdf]</a></td><td>Behavior research methods</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>99</td><td>59</td><td>40</td><td>1</td><td>73</td><td>21</td></tr><tr><td>0c4a139bb87c6743c7905b29a3cfec27a5130652</td><td>feret</td><td>FERET</td><td><a href="papers/0c4a139bb87c6743c7905b29a3cfec27a5130652.html" target="_blank">The FERET Verification Testing Protocol for Face Recognition Algorithms</a></td><td><a href="https://pdfs.semanticscholar.org/8d2a/1c768fce6f71584dd993fb97e7b6419aaf60.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>edu</td><td>City University of New York</td><td>United States</td><td>40.87228250</td><td>-73.89489171</td><td>51%</td><td>115</td><td>59</td><td>56</td><td>8</td><td>75</td><td>37</td></tr><tr><td>08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7</td><td>kin_face</td><td>UB KinFace</td><td><a href="papers/08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7.html" target="_blank">Understanding Kin Relationships in a Photo</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Multimedia</td><td></td><td></td><td></td><td></td><td></td><td>63%</td><td>94</td><td>59</td><td>35</td><td>1</td><td>33</td><td>61</td></tr><tr><td>2ce2560cf59db59ce313bbeb004e8ce55c5ce928</td><td>texas_3dfrd</td><td>Texas 3DFRD</td><td><a href="papers/2ce2560cf59db59ce313bbeb004e8ce55c5ce928.html" target="_blank">Anthropometric 3D Face Recognition</a></td><td><a href="http://live.ece.utexas.edu/publications/2010/sg_ijcv_june10.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>63%</td><td>91</td><td>57</td><td>34</td><td>5</td><td>60</td><td>31</td></tr><tr><td>5194cbd51f9769ab25260446b4fa17204752e799</td><td>violent_flows</td><td>Violent Flows</td><td><a href="papers/5194cbd51f9769ab25260446b4fa17204752e799.html" target="_blank">Violent flows: Real-time detection of violent crowd behavior</a></td><td><span class="gray">[pdf]</a></td><td>2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops</td><td>edu</td><td>Open University of Israel</td><td>Israel</td><td>32.77824165</td><td>34.99565673</td><td>65%</td><td>88</td><td>57</td><td>31</td><td>6</td><td>45</td><td>44</td></tr><tr><td>3cd40bfa1ff193a96bde0207e5140a399476466c</td><td>tvhi</td><td>TVHI</td><td><a href="papers/3cd40bfa1ff193a96bde0207e5140a399476466c.html" target="_blank">High Five: Recognising human interactions in TV shows</a></td><td><a href="https://pdfs.semanticscholar.org/3cd4/0bfa1ff193a96bde0207e5140a399476466c.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>98</td><td>56</td><td>42</td><td>10</td><td>66</td><td>28</td></tr><tr><td>2160788824c4c29ffe213b2cbeb3f52972d73f37</td><td>3d_rma</td><td>3D-RMA</td><td><a href="papers/2160788824c4c29ffe213b2cbeb3f52972d73f37.html" target="_blank">Automatic 3D face authentication</a></td><td><a href="http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.31.9190&rep=rep1&type=pdf" target="_blank">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>100</td><td>54</td><td>46</td><td>8</td><td>63</td><td>36</td></tr><tr><td>ae0aee03d946efffdc7af2362a42d3750e7dd48a</td><td>put_face</td><td>Put Face</td><td><a href="papers/ae0aee03d946efffdc7af2362a42d3750e7dd48a.html" target="_blank">The put face database</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>99</td><td>54</td><td>45</td><td>5</td><td>55</td><td>48</td></tr><tr><td>2edb87494278ad11641b6cf7a3f8996de12b8e14</td><td>qmul_grid</td><td>GRID</td><td><a href="papers/2edb87494278ad11641b6cf7a3f8996de12b8e14.html" target="_blank">Time-Delayed Correlation Analysis for Multi-Camera Activity Understanding</a></td><td><a href="http://www.eecs.qmul.ac.uk/~ccloy/files/ijcv_2010.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td>edu</td><td>Queen Mary University of London</td><td>United Kingdom</td><td>51.52472720</td><td>-0.03931035</td><td>63%</td><td>84</td><td>53</td><td>31</td><td>4</td><td>51</td><td>33</td></tr><tr><td>0b84f07af44f964817675ad961def8a51406dd2e</td><td>prw</td><td>PRW</td><td><a href="papers/0b84f07af44f964817675ad961def8a51406dd2e.html" target="_blank">Person Re-identification in the Wild</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>68%</td><td>77</td><td>52</td><td>25</td><td>1</td><td>47</td><td>27</td></tr><tr><td>0dc11a37cadda92886c56a6fb5191ded62099c28</td><td>stickmen_family</td><td>We Are Family Stickmen</td><td><a href="papers/0dc11a37cadda92886c56a6fb5191ded62099c28.html" target="_blank">We Are Family: Joint Pose Estimation of Multiple Persons</a></td><td><a href="http://eprints.pascal-network.org/archive/00007964/01/eichner10eccv.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>65%</td><td>78</td><td>51</td><td>27</td><td>5</td><td>54</td><td>23</td></tr><tr><td>0b440695c822a8e35184fb2f60dcdaa8a6de84ae</td><td>kinectface</td><td>KinectFaceDB</td><td><a href="papers/0b440695c822a8e35184fb2f60dcdaa8a6de84ae.html" target="_blank">KinectFaceDB: A Kinect Database for Face Recognition</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Systems, Man, and Cybernetics: Systems</td><td>edu</td><td>University of North Carolina at Chapel Hill</td><td>United States</td><td>35.91139710</td><td>-79.05045290</td><td>61%</td><td>82</td><td>50</td><td>32</td><td>6</td><td>28</td><td>52</td></tr><tr><td>0a85bdff552615643dd74646ac881862a7c7072d</td><td>pipa</td><td>PIPA</td><td><a href="papers/0a85bdff552615643dd74646ac881862a7c7072d.html" target="_blank">Beyond frontal faces: Improving Person Recognition using multiple cues</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>company</td><td>Facebook</td><td>United States</td><td>37.39367170</td><td>-122.08072620</td><td>91%</td><td>54</td><td>49</td><td>4</td><td>1</td><td>41</td><td>12</td></tr><tr><td>c900e0ad4c95948baaf0acd8449fde26f9b4952a</td><td>emotio_net</td><td>EmotioNet Database</td><td><a href="papers/c900e0ad4c95948baaf0acd8449fde26f9b4952a.html" target="_blank">EmotioNet: An Accurate, Real-Time Algorithm for the Automatic Annotation of a Million Facial Expressions in the Wild</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>86</td><td>45</td><td>41</td><td>7</td><td>54</td><td>29</td></tr><tr><td>f1af714b92372c8e606485a3982eab2f16772ad8</td><td>mug_faces</td><td>MUG Faces</td><td><a href="papers/f1af714b92372c8e606485a3982eab2f16772ad8.html" target="_blank">The MUG facial expression database</a></td><td><span class="gray">[pdf]</a></td><td>11th International Workshop on Image Analysis for Multimedia Interactive Services WIAMIS 10</td><td>edu</td><td>Aristotle University of Thessaloniki</td><td>Greece</td><td>40.62984145</td><td>22.95889350</td><td>55%</td><td>82</td><td>45</td><td>37</td><td>4</td><td>34</td><td>47</td></tr><tr><td>1bd1645a629f1b612960ab9bba276afd4cf7c666</td><td>brainwash</td><td>Brainwash</td><td><a href="papers/1bd1645a629f1b612960ab9bba276afd4cf7c666.html" target="_blank">End-to-End People Detection in Crowded Scenes</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>Stanford University</td><td>United States</td><td>37.43131385</td><td>-122.16936535</td><td>66%</td><td>67</td><td>44</td><td>22</td><td>2</td><td>42</td><td>23</td></tr><tr><td>636b8ffc09b1b23ff714ac8350bb35635e49fa3c</td><td>caltech_10k_web_faces</td><td>Caltech 10K Web Faces</td><td><a href="papers/636b8ffc09b1b23ff714ac8350bb35635e49fa3c.html" target="_blank">Pruning training sets for learning of object categories</a></td><td><span class="gray">[pdf]</a></td><td>2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)</td><td></td><td></td><td></td><td></td><td></td><td>70%</td><td>63</td><td>44</td><td>19</td><td>4</td><td>42</td><td>20</td></tr><tr><td>6618cff7f2ed440a0d2fa9e74ad5469df5cdbe4c</td><td>afad</td><td>AFAD</td><td><a href="papers/6618cff7f2ed440a0d2fa9e74ad5469df5cdbe4c.html" target="_blank">Ordinal Regression with Multiple Output CNN for Age Estimation</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>78</td><td>43</td><td>35</td><td>8</td><td>44</td><td>31</td></tr><tr><td>2bf8541199728262f78d4dced6fb91479b39b738</td><td>clothing_co_parsing</td><td>CCP</td><td><a href="papers/2bf8541199728262f78d4dced6fb91479b39b738.html" target="_blank">Clothing Co-parsing by Joint Image Segmentation and Labeling</a></td><td><span class="gray">[pdf]</a></td><td>2014 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>70%</td><td>60</td><td>42</td><td>18</td><td>0</td><td>34</td><td>28</td></tr><tr><td>4793f11fbca4a7dba898b9fff68f70d868e2497c</td><td>kin_face</td><td>UB KinFace</td><td><a href="papers/4793f11fbca4a7dba898b9fff68f70d868e2497c.html" target="_blank">Kinship Verification through Transfer Learning</a></td><td><a href="https://pdfs.semanticscholar.org/4793/f11fbca4a7dba898b9fff68f70d868e2497c.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>71</td><td>41</td><td>30</td><td>2</td><td>29</td><td>42</td></tr><tr><td>f41c7bb02fc97d5fb9cadd7a49c3e558a1c58a44</td><td>pa_100k</td><td>PA-100K</td><td><a href="papers/f41c7bb02fc97d5fb9cadd7a49c3e558a1c58a44.html" target="_blank">HydraPlus-Net: Attentive Deep Features for Pedestrian Analysis</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>75%</td><td>55</td><td>41</td><td>14</td><td>0</td><td>36</td><td>17</td></tr><tr><td>faf40ce28857aedf183e193486f5b4b0a8c478a2</td><td>iit_dehli_ear</td><td>IIT Dehli Ear</td><td><a href="papers/faf40ce28857aedf183e193486f5b4b0a8c478a2.html" target="_blank">Automated Human Identification Using Ear Imaging</a></td><td><a href="https://pdfs.semanticscholar.org/faf4/0ce28857aedf183e193486f5b4b0a8c478a2.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>80</td><td>40</td><td>40</td><td>6</td><td>35</td><td>44</td></tr><tr><td>4d58f886f5150b2d5e48fd1b5a49e09799bf895d</td><td>texas_3dfrd</td><td>Texas 3DFRD</td><td><a href="papers/4d58f886f5150b2d5e48fd1b5a49e09799bf895d.html" target="_blank">Texas 3D Face Recognition Database</a></td><td><span class="gray">[pdf]</a></td><td>2010 IEEE Southwest Symposium on Image Analysis & Interpretation (SSIAI)</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>66</td><td>40</td><td>26</td><td>3</td><td>40</td><td>27</td></tr><tr><td>31de9b3dd6106ce6eec9a35991b2b9083395fd0b</td><td>feret</td><td>FERET</td><td><a href="papers/31de9b3dd6106ce6eec9a35991b2b9083395fd0b.html" target="_blank">FERET ( Face Recognition Technology ) Recognition Algorithm Development and Test Results</a></td><td><a href="https://pdfs.semanticscholar.org/31de/9b3dd6106ce6eec9a35991b2b9083395fd0b.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>75</td><td>39</td><td>36</td><td>5</td><td>54</td><td>20</td></tr><tr><td>47aeb3b82f54b5ae8142b4bdda7b614433e69b9a</td><td>am_fed</td><td>AM-FED</td><td><a href="papers/47aeb3b82f54b5ae8142b4bdda7b614433e69b9a.html" target="_blank">Affectiva-MIT Facial Expression Dataset (AM-FED): Naturalistic and Spontaneous Facial Expressions Collected "In-the-Wild"</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition Workshops</td><td></td><td></td><td></td><td></td><td></td><td>46%</td><td>83</td><td>38</td><td>45</td><td>6</td><td>43</td><td>39</td></tr><tr><td>22646e00a7ba34d1b5fbe3b1efcd91a1e1be3c2b</td><td>saivt</td><td>SAIVT SoftBio</td><td><a href="papers/22646e00a7ba34d1b5fbe3b1efcd91a1e1be3c2b.html" target="_blank">A Database for Person Re-Identification in Multi-Camera Surveillance Networks</a></td><td><span class="gray">[pdf]</a></td><td>2012 International Conference on Digital Image Computing Techniques and Applications (DICTA)</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>65</td><td>38</td><td>27</td><td>6</td><td>45</td><td>20</td></tr><tr><td>79828e6e9f137a583082b8b5a9dfce0c301989b8</td><td>mapillary</td><td>Mapillary</td><td><a href="papers/79828e6e9f137a583082b8b5a9dfce0c301989b8.html" target="_blank">The Mapillary Vistas Dataset for Semantic Understanding of Street Scenes</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>61</td><td>37</td><td>24</td><td>0</td><td>43</td><td>16</td></tr><tr><td>3dc3f0b64ef80f573e3a5f96e456e52ee980b877</td><td>georgia_tech_face_database</td><td>Georgia Tech Face</td><td><a href="papers/3dc3f0b64ef80f573e3a5f96e456e52ee980b877.html" target="_blank">Maximum Likelihood Training of the Embedded HMM for Face Detection and Recognition</a></td><td><a href="http://www.researchgate.net/profile/Monson_Hayes/publication/221124512_Maximum_Likelihood_Training_of_the_Embedded_HMM_for_Face_Detection_and_Recognition/links/0deec53509be9d6f55000000.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>67</td><td>36</td><td>31</td><td>4</td><td>29</td><td>28</td></tr><tr><td>6f3c76b7c0bd8e1d122c6ea808a271fd4749c951</td><td>ward</td><td>WARD</td><td><a href="papers/6f3c76b7c0bd8e1d122c6ea808a271fd4749c951.html" target="_blank">Re-identify people in wide area camera network</a></td><td><span class="gray">[pdf]</a></td><td>2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops</td><td>edu</td><td>University of Udine</td><td>Italy</td><td>46.08107230</td><td>13.21194740</td><td>60%</td><td>60</td><td>36</td><td>24</td><td>1</td><td>38</td><td>21</td></tr><tr><td>fcc6fe6007c322641796cb8792718641856a22a7</td><td>miw</td><td>MIW</td><td><a href="papers/fcc6fe6007c322641796cb8792718641856a22a7.html" target="_blank">Automatic facial makeup detection with application in face recognition</a></td><td><span class="gray">[pdf]</a></td><td>2013 International Conference on Biometrics (ICB)</td><td>edu</td><td>West Virginia University</td><td>United States</td><td>39.65404635</td><td>-79.96475355</td><td>71%</td><td>49</td><td>35</td><td>14</td><td>1</td><td>19</td><td>29</td></tr><tr><td>09d78009687bec46e70efcf39d4612822e61cb8c</td><td>raid</td><td>RAiD</td><td><a href="papers/09d78009687bec46e70efcf39d4612822e61cb8c.html" target="_blank">Consistent Re-identification in a Camera Network</a></td><td><a href="https://pdfs.semanticscholar.org/c27f/099e6e7e3f7f9979cbe9e0a5175fc5848ea0.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>71%</td><td>49</td><td>35</td><td>14</td><td>3</td><td>34</td><td>13</td></tr><tr><td>fcc6fe6007c322641796cb8792718641856a22a7</td><td>youtube_makeup</td><td>YMU</td><td><a href="papers/fcc6fe6007c322641796cb8792718641856a22a7.html" target="_blank">Automatic facial makeup detection with application in face recognition</a></td><td><span class="gray">[pdf]</a></td><td>2013 International Conference on Biometrics (ICB)</td><td>edu</td><td>West Virginia University</td><td>United States</td><td>39.65404635</td><td>-79.96475355</td><td>71%</td><td>49</td><td>35</td><td>14</td><td>1</td><td>19</td><td>29</td></tr><tr><td>51eba481dac6b229a7490f650dff7b17ce05df73</td><td>imsitu</td><td>imSitu</td><td><a href="papers/51eba481dac6b229a7490f650dff7b17ce05df73.html" target="_blank">Situation Recognition: Visual Semantic Role Labeling for Image Understanding</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>65%</td><td>52</td><td>34</td><td>18</td><td>1</td><td>46</td><td>6</td></tr><tr><td>8be57cdad86fdf8c8290df4ca3149592f3c46dd3</td><td>m2vts</td><td>m2vts</td><td><a href="papers/8be57cdad86fdf8c8290df4ca3149592f3c46dd3.html" target="_blank">The M2VTS Multimodal Face Database (Release 1.00)</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>45%</td><td>73</td><td>33</td><td>40</td><td>2</td><td>39</td><td>33</td></tr><tr><td>31b05f65405534a696a847dd19c621b7b8588263</td><td>umd_faces</td><td>UMD</td><td><a href="papers/31b05f65405534a696a847dd19c621b7b8588263.html" target="_blank">UMDFaces: An annotated face dataset for training deep networks</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE International Joint Conference on Biometrics (IJCB)</td><td>edu</td><td>University of Maryland</td><td>United States</td><td>39.28996850</td><td>-76.62196103</td><td>79%</td><td>42</td><td>33</td><td>9</td><td>2</td><td>30</td><td>11</td></tr><tr><td>37d6f0eb074d207b53885bd2eb78ccc8a04be597</td><td>vmu</td><td>VMU</td><td><a href="papers/37d6f0eb074d207b53885bd2eb78ccc8a04be597.html" target="_blank">Can facial cosmetics affect the matching accuracy of face recognition systems?</a></td><td><span class="gray">[pdf]</a></td><td>2012 IEEE Fifth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td>edu</td><td>West Virginia University</td><td>United States</td><td>39.65404635</td><td>-79.96475355</td><td>62%</td><td>53</td><td>33</td><td>20</td><td>0</td><td>19</td><td>31</td></tr><tr><td>37d6f0eb074d207b53885bd2eb78ccc8a04be597</td><td>youtube_makeup</td><td>YMU</td><td><a href="papers/37d6f0eb074d207b53885bd2eb78ccc8a04be597.html" target="_blank">Can facial cosmetics affect the matching accuracy of face recognition systems?</a></td><td><span class="gray">[pdf]</a></td><td>2012 IEEE Fifth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td>edu</td><td>West Virginia University</td><td>United States</td><td>39.65404635</td><td>-79.96475355</td><td>62%</td><td>53</td><td>33</td><td>20</td><td>0</td><td>19</td><td>31</td></tr><tr><td>9c23859ec7313f2e756a3e85575735e0c52249f4</td><td>facebook_100</td><td>Facebook100</td><td><a href="papers/9c23859ec7313f2e756a3e85575735e0c52249f4.html" target="_blank">Scaling up biologically-inspired computer vision: A case study in unconstrained face recognition on facebook</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td>edu</td><td>Harvard University</td><td>United States</td><td>42.36782045</td><td>-71.12666653</td><td>62%</td><td>52</td><td>32</td><td>20</td><td>3</td><td>38</td><td>13</td></tr><tr><td>2161f6b7ee3c0acc81603b01dc0df689683577b9</td><td>large_scale_person_search</td><td>Large Scale Person Search</td><td><a href="papers/2161f6b7ee3c0acc81603b01dc0df689683577b9.html" target="_blank">End-to-End Deep Learning for Person Search</a></td><td><a href="https://pdfs.semanticscholar.org/2161/f6b7ee3c0acc81603b01dc0df689683577b9.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>70%</td><td>46</td><td>32</td><td>14</td><td>0</td><td>27</td><td>16</td></tr><tr><td>9c23859ec7313f2e756a3e85575735e0c52249f4</td><td>pubfig_83</td><td>pubfig83</td><td><a href="papers/9c23859ec7313f2e756a3e85575735e0c52249f4.html" target="_blank">Scaling up biologically-inspired computer vision: A case study in unconstrained face recognition on facebook</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td>edu</td><td>Harvard University</td><td>United States</td><td>42.36782045</td><td>-71.12666653</td><td>62%</td><td>52</td><td>32</td><td>20</td><td>3</td><td>38</td><td>13</td></tr><tr><td>18858cc936947fc96b5c06bbe3c6c2faa5614540</td><td>pilot_parliament</td><td>PPB</td><td><a href="papers/18858cc936947fc96b5c06bbe3c6c2faa5614540.html" target="_blank">Gender Shades: Intersectional Accuracy Disparities in Commercial Gender Classification</a></td><td><a href="http://proceedings.mlr.press/v81/buolamwini18a/buolamwini18a-supp.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>53%</td><td>59</td><td>31</td><td>28</td><td>0</td><td>47</td><td>10</td></tr><tr><td>16e8b0a1e8451d5f697b94c0c2b32a00abee1d52</td><td>umb</td><td>UMB</td><td><a href="papers/16e8b0a1e8451d5f697b94c0c2b32a00abee1d52.html" target="_blank">UMB-DB: A database of partially occluded 3D faces</a></td><td><span class="gray">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td></td><td>66%</td><td>47</td><td>31</td><td>16</td><td>2</td><td>22</td><td>24</td></tr><tr><td>356b431d4f7a2a0a38cf971c84568207dcdbf189</td><td>wider</td><td>WIDER</td><td><a href="papers/356b431d4f7a2a0a38cf971c84568207dcdbf189.html" target="_blank">Recognize complex events from static images by fusing deep channels</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>70%</td><td>44</td><td>31</td><td>13</td><td>1</td><td>29</td><td>15</td></tr><tr><td>8b2dd5c61b23ead5ae5508bb8ce808b5ea266730</td><td>10k_US_adult_faces</td><td>10K US Adult Faces</td><td><a href="papers/8b2dd5c61b23ead5ae5508bb8ce808b5ea266730.html" target="_blank">The intrinsic memorability of face photographs.</a></td><td><a href="https://pdfs.semanticscholar.org/8b2d/d5c61b23ead5ae5508bb8ce808b5ea266730.pdf" target="_blank">[pdf]</a></td><td>Journal of experimental psychology. General</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>52</td><td>29</td><td>23</td><td>2</td><td>36</td><td>14</td></tr><tr><td>28d4e027c7e90b51b7d8908fce68128d1964668a</td><td>megaface</td><td>MegaFace</td><td><a href="papers/28d4e027c7e90b51b7d8908fce68128d1964668a.html" target="_blank">Level Playing Field for Million Scale Face Recognition</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>University of Washington</td><td>United States</td><td>47.65432380</td><td>-122.30800894</td><td>72%</td><td>39</td><td>28</td><td>11</td><td>2</td><td>29</td><td>9</td></tr><tr><td>070de852bc6eb275d7ca3a9cdde8f6be8795d1a3</td><td>d3dfacs</td><td>D3DFACS</td><td><a href="papers/070de852bc6eb275d7ca3a9cdde8f6be8795d1a3.html" target="_blank">A FACS valid 3D dynamic action unit database with applications to 3D dynamic morphable facial modeling</a></td><td><span class="gray">[pdf]</a></td><td>2011 International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>50</td><td>26</td><td>24</td><td>5</td><td>31</td><td>18</td></tr><tr><td>47662d1a368daf70ba70ef2d59eb6209f98b675d</td><td>fia</td><td>CMU FiA</td><td><a href="papers/47662d1a368daf70ba70ef2d59eb6209f98b675d.html" target="_blank">The CMU Face In Action (FIA) Database</a></td><td><a href="https://pdfs.semanticscholar.org/4766/2d1a368daf70ba70ef2d59eb6209f98b675d.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>48%</td><td>54</td><td>26</td><td>28</td><td>5</td><td>40</td><td>16</td></tr><tr><td>298cbc3dfbbb3a20af4eed97906650a4ea1c29e0</td><td>ferplus</td><td>FER+</td><td><a href="papers/298cbc3dfbbb3a20af4eed97906650a4ea1c29e0.html" target="_blank">Training deep networks for facial expression recognition with crowd-sourced label distribution</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>74%</td><td>34</td><td>25</td><td>9</td><td>0</td><td>18</td><td>16</td></tr><tr><td>e27ef52c641c2b5100a1b34fd0b819e84a31b4df</td><td>sarc3d</td><td>Sarc3D</td><td><a href="papers/e27ef52c641c2b5100a1b34fd0b819e84a31b4df.html" target="_blank">SARC3D: A New 3D Body Model for People Tracking and Re-identification</a></td><td><a href="https://pdfs.semanticscholar.org/e27e/f52c641c2b5100a1b34fd0b819e84a31b4df.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>74%</td><td>34</td><td>25</td><td>9</td><td>2</td><td>21</td><td>12</td></tr><tr><td>57fe081950f21ca03b5b375ae3e84b399c015861</td><td>cvc_01_barcelona</td><td>CVC-01</td><td><a href="papers/57fe081950f21ca03b5b375ae3e84b399c015861.html" target="_blank">Adaptive Image Sampling and Windows Classification for On-board Pedestrian Detection</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>51%</td><td>47</td><td>24</td><td>23</td><td>1</td><td>23</td><td>24</td></tr><tr><td>8875ae233bc074f5cd6c4ebba447b536a7e847a5</td><td>voxceleb2</td><td>VoxCeleb2</td><td><a href="papers/8875ae233bc074f5cd6c4ebba447b536a7e847a5.html" target="_blank">VoxCeleb2: Deep Speaker Recognition.</a></td><td><a href="https://pdfs.semanticscholar.org/8875/ae233bc074f5cd6c4ebba447b536a7e847a5.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>71%</td><td>34</td><td>24</td><td>9</td><td>2</td><td>31</td><td>2</td></tr><tr><td>758d7e1be64cc668c59ef33ba8882c8597406e53</td><td>affectnet</td><td>AffectNet</td><td><a href="papers/758d7e1be64cc668c59ef33ba8882c8597406e53.html" target="_blank">AffectNet: A Database for Facial Expression, Valence, and Arousal Computing in the Wild</a></td><td><a href="https://arxiv.org/pdf/1708.03985.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>37</td><td>23</td><td>14</td><td>0</td><td>25</td><td>11</td></tr><tr><td>d08cc366a4a0192a01e9a7495af1eb5d9f9e73ae</td><td>b3d_ac</td><td>B3D(AC)</td><td><a href="papers/d08cc366a4a0192a01e9a7495af1eb5d9f9e73ae.html" target="_blank">A 3-D Audio-Visual Corpus of Affective Communication</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Multimedia</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>42</td><td>23</td><td>19</td><td>2</td><td>26</td><td>15</td></tr><tr><td>53ae38a6bb2b21b42bac4f0c4c8ed1f9fa02f9d4</td><td>bp4d_plus</td><td>BP4D+</td><td><a href="papers/53ae38a6bb2b21b42bac4f0c4c8ed1f9fa02f9d4.html" target="_blank">Multimodal Spontaneous Emotion Corpus for Human Behavior Analysis</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>42</td><td>23</td><td>19</td><td>0</td><td>17</td><td>26</td></tr><tr><td>1c2802c2199b6d15ecefe7ba0c39bfe44363de38</td><td>youtube_poses</td><td>YouTube Pose</td><td><a href="papers/1c2802c2199b6d15ecefe7ba0c39bfe44363de38.html" target="_blank">Personalizing Human Video Pose Estimation</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>Oxford University</td><td>United Kingdom</td><td>51.75208490</td><td>-1.25166460</td><td>64%</td><td>36</td><td>23</td><td>13</td><td>2</td><td>30</td><td>8</td></tr><tr><td>0cb2dd5f178e3a297a0c33068961018659d0f443</td><td>ijb_b</td><td>IJB-B</td><td><a href="papers/0cb2dd5f178e3a297a0c33068961018659d0f443.html" target="_blank">IARPA Janus Benchmark-B Face Dataset</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td>edu</td><td>Michigan State University</td><td>United States</td><td>42.71856800</td><td>-84.47791571</td><td>63%</td><td>35</td><td>22</td><td>13</td><td>3</td><td>25</td><td>8</td></tr><tr><td>41976ebc8ab76d9a6861487c97cc7fcbe3b6015f</td><td>moments_in_time</td><td>Moments in Time</td><td><a href="papers/41976ebc8ab76d9a6861487c97cc7fcbe3b6015f.html" target="_blank">Moments in Time Dataset: one million videos for event understanding</a></td><td><a href="https://arxiv.org/pdf/1801.03150.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td>edu</td><td>Columbia University</td><td>United States</td><td>40.84198360</td><td>-73.94368971</td><td>76%</td><td>29</td><td>22</td><td>7</td><td>2</td><td>27</td><td>2</td></tr><tr><td>a0cc5f73a37723a6dd465924143f1cb4976d0169</td><td>msmt_17</td><td>MSMT17</td><td><a href="papers/a0cc5f73a37723a6dd465924143f1cb4976d0169.html" target="_blank">Person Transfer GAN to Bridge Domain Gap for Person Re-identification</a></td><td><span class="gray">[pdf]</a></td><td>2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>92%</td><td>24</td><td>22</td><td>2</td><td>1</td><td>20</td><td>4</td></tr><tr><td>fd8168f1c50de85bac58a8d328df0a50248b16ae</td><td>nd_2006</td><td>ND-2006</td><td><a href="papers/fd8168f1c50de85bac58a8d328df0a50248b16ae.html" target="_blank">Using a Multi-Instance Enrollment Representation to Improve 3D Face Recognition</a></td><td><span class="gray">[pdf]</a></td><td>2007 First IEEE International Conference on Biometrics: Theory, Applications, and Systems</td><td>edu</td><td>University of Notre Dame</td><td>United States</td><td>41.70456775</td><td>-86.23822026</td><td>63%</td><td>35</td><td>22</td><td>13</td><td>3</td><td>18</td><td>15</td></tr><tr><td>488e475eeb3bb39a145f23ede197cd3620f1d98a</td><td>apis</td><td>APiS1.0</td><td><a href="papers/488e475eeb3bb39a145f23ede197cd3620f1d98a.html" target="_blank">Pedestrian Attribute Classification in Surveillance: Database and Evaluation</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision Workshops</td><td></td><td></td><td></td><td></td><td></td><td>71%</td><td>28</td><td>20</td><td>8</td><td>0</td><td>13</td><td>15</td></tr><tr><td>84fe5b4ac805af63206012d29523a1e033bc827e</td><td>awe_ears</td><td>AWE Ears</td><td><a href="papers/84fe5b4ac805af63206012d29523a1e033bc827e.html" target="_blank">Ear Recognition: More Than a Survey</a></td><td><a href="https://arxiv.org/pdf/1611.06203.pdf" target="_blank">[pdf]</a></td><td>Neurocomputing</td><td></td><td></td><td></td><td></td><td></td><td>77%</td><td>26</td><td>20</td><td>6</td><td>0</td><td>10</td><td>16</td></tr><tr><td>20388099cc415c772926e47bcbbe554e133343d1</td><td>cafe</td><td>#N/A</td><td><a href="papers/20388099cc415c772926e47bcbbe554e133343d1.html" target="_blank">The Child Affective Facial Expression (CAFE) set: validity and reliability from untrained adults</a></td><td><a href="https://pdfs.semanticscholar.org/2038/8099cc415c772926e47bcbbe554e133343d1.pdf" target="_blank">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>37</td><td>20</td><td>17</td><td>3</td><td>30</td><td>7</td></tr><tr><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td><td>casablanca</td><td>Casablanca</td><td><a href="papers/0ceda9dae8b9f322df65ca2ef02caca9758aec6f.html" target="_blank">Context-Aware CNNs for Person Head Detection</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>33</td><td>20</td><td>13</td><td>1</td><td>23</td><td>11</td></tr><tr><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td><td>hollywood_headset</td><td>HollywoodHeads</td><td><a href="papers/0ceda9dae8b9f322df65ca2ef02caca9758aec6f.html" target="_blank">Context-Aware CNNs for Person Head Detection</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>33</td><td>20</td><td>13</td><td>1</td><td>23</td><td>11</td></tr><tr><td>0297448f3ed948e136bb06ceff10eccb34e5bb77</td><td>ilids_mcts</td><td>i-LIDS Multiple-Camera</td><td><a href="papers/0297448f3ed948e136bb06ceff10eccb34e5bb77.html" target="_blank">Imagery Library for Intelligent Detection Systems (i-LIDS); A Standard for Testing Video Based Detection Systems</a></td><td><span class="gray">[pdf]</a></td><td>Proceedings 40th Annual 2006 International Carnahan Conference on Security Technology</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>35</td><td>20</td><td>15</td><td>2</td><td>21</td><td>14</td></tr><tr><td>488e475eeb3bb39a145f23ede197cd3620f1d98a</td><td>svs</td><td>SVS</td><td><a href="papers/488e475eeb3bb39a145f23ede197cd3620f1d98a.html" target="_blank">Pedestrian Attribute Classification in Surveillance: Database and Evaluation</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision Workshops</td><td></td><td></td><td></td><td></td><td></td><td>71%</td><td>28</td><td>20</td><td>8</td><td>0</td><td>13</td><td>15</td></tr><tr><td>4fefd1bc8dc4e0ab37ee3324ddfa43ad9d6a04a7</td><td>deep_fashion</td><td>DeepFashion</td><td><a href="papers/4fefd1bc8dc4e0ab37ee3324ddfa43ad9d6a04a7.html" target="_blank">Fashion Landmark Detection in the Wild</a></td><td><a href="https://arxiv.org/pdf/1608.03049.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>73%</td><td>26</td><td>19</td><td>7</td><td>1</td><td>16</td><td>10</td></tr><tr><td>64e0690dd176a93de9d4328f6e31fc4afe1e7536</td><td>duke_mtmc</td><td>Duke MTMC</td><td><a href="papers/64e0690dd176a93de9d4328f6e31fc4afe1e7536.html" target="_blank">Tracking Multiple People Online and in Real Time</a></td><td><a href="https://pdfs.semanticscholar.org/64e0/690dd176a93de9d4328f6e31fc4afe1e7536.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>78%</td><td>23</td><td>18</td><td>5</td><td>1</td><td>12</td><td>10</td></tr><tr><td>221c18238b829c12b911706947ab38fd017acef7</td><td>rap_pedestrian</td><td>RAP</td><td><a href="papers/221c18238b829c12b911706947ab38fd017acef7.html" target="_blank">A Richly Annotated Dataset for Pedestrian Attribute Recognition</a></td><td><a href="https://arxiv.org/pdf/1603.07054.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>69%</td><td>26</td><td>18</td><td>8</td><td>0</td><td>16</td><td>10</td></tr><tr><td>d818568838433a6d6831adde49a58cef05e0c89f</td><td>agedb</td><td>AgeDB</td><td><a href="papers/d818568838433a6d6831adde49a58cef05e0c89f.html" target="_blank">AgeDB: The First Manually Collected, In-the-Wild Age Database</a></td><td><a href="http://eprints.mdx.ac.uk/22044/1/agedb_kotsia.pdf" target="_blank">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td>edu</td><td>Imperial College London</td><td>United Kingdom</td><td>51.49887085</td><td>-0.17560797</td><td>94%</td><td>18</td><td>17</td><td>1</td><td>0</td><td>14</td><td>3</td></tr><tr><td>213a579af9e4f57f071b884aa872651372b661fd</td><td>bbc_pose</td><td>BBC Pose</td><td><a href="papers/213a579af9e4f57f071b884aa872651372b661fd.html" target="_blank">Automatic and Efficient Human Pose Estimation for Sign Language Videos</a></td><td><a href="http://tomas.pfister.fi/files/charles13ijcv.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>65%</td><td>26</td><td>17</td><td>9</td><td>1</td><td>16</td><td>11</td></tr><tr><td>4946ba10a4d5a7d0a38372f23e6622bd347ae273</td><td>coco_action</td><td>COCO-a</td><td><a href="papers/4946ba10a4d5a7d0a38372f23e6622bd347ae273.html" target="_blank">Describing Common Human Visual Actions in Images</a></td><td><a href="https://arxiv.org/pdf/1506.02203.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>68%</td><td>25</td><td>17</td><td>8</td><td>0</td><td>23</td><td>2</td></tr><tr><td>d178cde92ab3dc0dd2ebee5a76a33d556c39448b</td><td>jiku_mobile</td><td>Jiku Mobile Video Dataset</td><td><a href="papers/d178cde92ab3dc0dd2ebee5a76a33d556c39448b.html" target="_blank">The jiku mobile video dataset</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td>edu</td><td>National University of Singapore</td><td>Singapore</td><td>1.29620180</td><td>103.77689944</td><td>71%</td><td>24</td><td>17</td><td>7</td><td>0</td><td>6</td><td>19</td></tr><tr><td>2eb84aaba316b095d4bb51da1a3e4365bbf9ab1d</td><td>kin_face</td><td>UB KinFace</td><td><a href="papers/2eb84aaba316b095d4bb51da1a3e4365bbf9ab1d.html" target="_blank">Genealogical face recognition based on UB KinFace database</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td>edu</td><td>SUNY Buffalo</td><td>United States</td><td>42.93362780</td><td>-78.88394479</td><td>55%</td><td>31</td><td>17</td><td>14</td><td>0</td><td>11</td><td>21</td></tr><tr><td>ea050801199f98a1c7c1df6769f23f658299a3ae</td><td>mpi_large</td><td>Large MPI Facial Expression</td><td><a href="papers/ea050801199f98a1c7c1df6769f23f658299a3ae.html" target="_blank">The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</a></td><td><a href="https://pdfs.semanticscholar.org/ea05/0801199f98a1c7c1df6769f23f658299a3ae.pdf" target="_blank">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>33</td><td>17</td><td>16</td><td>4</td><td>29</td><td>4</td></tr><tr><td>ea050801199f98a1c7c1df6769f23f658299a3ae</td><td>mpi_small</td><td>Small MPI Facial Expression</td><td><a href="papers/ea050801199f98a1c7c1df6769f23f658299a3ae.html" target="_blank">The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</a></td><td><a href="https://pdfs.semanticscholar.org/ea05/0801199f98a1c7c1df6769f23f658299a3ae.pdf" target="_blank">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>33</td><td>17</td><td>16</td><td>4</td><td>29</td><td>4</td></tr><tr><td>1e3df3ca8feab0b36fd293fe689f93bb2aaac591</td><td>immediacy</td><td>Immediacy</td><td><a href="papers/1e3df3ca8feab0b36fd293fe689f93bb2aaac591.html" target="_blank">Multi-task Recurrent Neural Network for Immediacy Prediction</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>26</td><td>16</td><td>10</td><td>2</td><td>21</td><td>6</td></tr><tr><td>71b7fc715e2f1bb24c0030af8d7e7b6e7cd128a6</td><td>umd_faces</td><td>UMD</td><td><a href="papers/71b7fc715e2f1bb24c0030af8d7e7b6e7cd128a6.html" target="_blank">The Do’s and Don’ts for CNN-Based Face Verification</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE International Conference on Computer Vision Workshops (ICCVW)</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>26</td><td>16</td><td>10</td><td>2</td><td>16</td><td>8</td></tr><tr><td>19d1b811df60f86cbd5e04a094b07f32fff7a32a</td><td>york_3d</td><td>UOY 3D Face Database</td><td><a href="papers/19d1b811df60f86cbd5e04a094b07f32fff7a32a.html" target="_blank">Three-dimensional face recognition: an eigensurface approach</a></td><td><span class="gray">[pdf]</a></td><td>2004 International Conference on Image Processing, 2004. ICIP '04.</td><td></td><td></td><td></td><td></td><td></td><td>42%</td><td>38</td><td>16</td><td>22</td><td>4</td><td>24</td><td>13</td></tr><tr><td>b6c293f0420f7e945b5916ae44269fb53e139275</td><td>erce</td><td>ERCe</td><td><a href="papers/b6c293f0420f7e945b5916ae44269fb53e139275.html" target="_blank">Video Synopsis by Heterogeneous Multi-source Correlation</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>29</td><td>15</td><td>14</td><td>2</td><td>14</td><td>13</td></tr><tr><td>b6c293f0420f7e945b5916ae44269fb53e139275</td><td>tisi</td><td>Times Square Intersection</td><td><a href="papers/b6c293f0420f7e945b5916ae44269fb53e139275.html" target="_blank">Video Synopsis by Heterogeneous Multi-source Correlation</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>29</td><td>15</td><td>14</td><td>2</td><td>14</td><td>13</td></tr><tr><td>2a171f8d14b6b8735001a11c217af9587d095848</td><td>social_relation</td><td>Social Relation</td><td><a href="papers/2a171f8d14b6b8735001a11c217af9587d095848.html" target="_blank">Learning Social Relation Traits from Face Images</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>23</td><td>14</td><td>9</td><td>4</td><td>16</td><td>7</td></tr><tr><td>5753b2b5e442eaa3be066daa4a2ca8d8a0bb1725</td><td>fpoq</td><td>50 People One Question</td><td><a href="papers/5753b2b5e442eaa3be066daa4a2ca8d8a0bb1725.html" target="_blank">Merging Pose Estimates Across Space and Time</a></td><td><a href="http://authors.library.caltech.edu/41565/1/tracking_bmvc.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>81%</td><td>16</td><td>13</td><td>3</td><td>0</td><td>13</td><td>4</td></tr><tr><td>44d23df380af207f5ac5b41459c722c87283e1eb</td><td>wider_attribute</td><td>WIDER Attribute</td><td><a href="papers/44d23df380af207f5ac5b41459c722c87283e1eb.html" target="_blank">Human Attribute Recognition by Deep Hierarchical Contexts</a></td><td><a href="https://pdfs.semanticscholar.org/8e28/07f2dd53b03a759e372e07f7191cae65c9fd.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>72%</td><td>18</td><td>13</td><td>5</td><td>0</td><td>14</td><td>4</td></tr><tr><td>45e616093a92e5f1e61a7c6037d5f637aa8964af</td><td>malf</td><td>MALF</td><td><a href="papers/45e616093a92e5f1e61a7c6037d5f637aa8964af.html" target="_blank">Fine-grained evaluation on face detection in the wild</a></td><td><span class="gray">[pdf]</a></td><td>2015 11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)</td><td></td><td></td><td></td><td></td><td></td><td>71%</td><td>17</td><td>12</td><td>5</td><td>0</td><td>12</td><td>5</td></tr><tr><td>774cbb45968607a027ae4729077734db000a1ec5</td><td>urban_tribes</td><td>Urban Tribes</td><td><a href="papers/774cbb45968607a027ae4729077734db000a1ec5.html" target="_blank">From Bikers to Surfers: Visual Recognition of Urban Tribes</a></td><td><a href="http://vision.cornell.edu/se3/wp-content/uploads/2014/09/utribes_bmvc13_final.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>67%</td><td>18</td><td>12</td><td>6</td><td>1</td><td>12</td><td>6</td></tr><tr><td>a94cae786d515d3450d48267e12ca954aab791c4</td><td>yawdd</td><td>YawDD</td><td><a href="papers/a94cae786d515d3450d48267e12ca954aab791c4.html" target="_blank">YawDD: a yawning detection dataset</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>80%</td><td>15</td><td>12</td><td>3</td><td>1</td><td>2</td><td>13</td></tr><tr><td>4e6ee936eb50dd032f7138702fa39b7c18ee8907</td><td>dartmouth_children</td><td>Dartmouth Children</td><td><a href="papers/4e6ee936eb50dd032f7138702fa39b7c18ee8907.html" target="_blank">The Dartmouth Database of Children’s Faces: Acquisition and Validation of a New Face Stimulus Set</a></td><td><a href="https://pdfs.semanticscholar.org/4e6e/e936eb50dd032f7138702fa39b7c18ee8907.pdf" target="_blank">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>21</td><td>11</td><td>10</td><td>2</td><td>18</td><td>3</td></tr><tr><td>4156b7e88f2e0ab0a7c095b9bab199ae2b23bd06</td><td>distance_nighttime</td><td>Long Distance Heterogeneous Face</td><td><a href="papers/4156b7e88f2e0ab0a7c095b9bab199ae2b23bd06.html" target="_blank">Nighttime Face Recognition at Long Distance: Cross-Distance and Cross-Spectral Matching</a></td><td><a href="https://pdfs.semanticscholar.org/4156/b7e88f2e0ab0a7c095b9bab199ae2b23bd06.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>22</td><td>11</td><td>11</td><td>3</td><td>11</td><td>10</td></tr><tr><td>b71d1aa90dcbe3638888725314c0d56640c1fef1</td><td>ifdb</td><td>IFDB</td><td><a href="papers/b71d1aa90dcbe3638888725314c0d56640c1fef1.html" target="_blank">Iranian Face Database with age, pose and expression</a></td><td><span class="gray">[pdf]</a></td><td>2007 International Conference on Machine Vision</td><td>edu</td><td>Islamic Azad University</td><td>Iran</td><td>34.84529990</td><td>48.55962120</td><td>48%</td><td>23</td><td>11</td><td>12</td><td>2</td><td>14</td><td>9</td></tr><tr><td>57178b36c21fd7f4529ac6748614bb3374714e91</td><td>ijb_c</td><td>IJB-C</td><td><a href="papers/57178b36c21fd7f4529ac6748614bb3374714e91.html" target="_blank">IARPA Janus Benchmark - C: Face Dataset and Protocol</a></td><td><span class="gray">[pdf]</a></td><td>2018 International Conference on Biometrics (ICB)</td><td></td><td></td><td></td><td></td><td></td><td>79%</td><td>14</td><td>11</td><td>3</td><td>0</td><td>12</td><td>1</td></tr><tr><td>ca3e88d87e1344d076c964ea89d91a75c417f5ee</td><td>imfdb</td><td>IMFDB</td><td><a href="papers/ca3e88d87e1344d076c964ea89d91a75c417f5ee.html" target="_blank">Indian Movie Face Database: A benchmark for face recognition under wide variations</a></td><td><span class="gray">[pdf]</a></td><td>2013 Fourth National Conference on Computer Vision, Pattern Recognition, Image Processing and Graphics (NCVPRIPG)</td><td>edu</td><td>BVBCET, Hubli, India</td><td>India</td><td>15.36883320</td><td>75.12137960</td><td>65%</td><td>17</td><td>11</td><td>6</td><td>0</td><td>11</td><td>5</td></tr><tr><td>25474c21613607f6bb7687a281d5f9d4ffa1f9f3</td><td>faceplace</td><td>Face Place</td><td><a href="papers/25474c21613607f6bb7687a281d5f9d4ffa1f9f3.html" target="_blank">Recognizing disguised faces</a></td><td><a href="https://pdfs.semanticscholar.org/d936/7ceb0be378c3a9ddf7cb741c678c1a3c574c.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>34%</td><td>29</td><td>10</td><td>19</td><td>0</td><td>18</td><td>10</td></tr><tr><td>0f0fcf041559703998abf310e56f8a2f90ee6f21</td><td>feret</td><td>FERET</td><td><a href="papers/0f0fcf041559703998abf310e56f8a2f90ee6f21.html" target="_blank">The FERET Evaluation Methodology for Face-Recognition Algorithms</a></td><td><a href="https://pdfs.semanticscholar.org/5099/7a5605c1f61e09e9a96789ed7495be6625aa.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>34%</td><td>29</td><td>10</td><td>19</td><td>3</td><td>18</td><td>9</td></tr><tr><td>2f43b614607163abf41dfe5d17ef6749a1b61304</td><td>hrt_transgender</td><td>HRT Transgender</td><td><a href="papers/2f43b614607163abf41dfe5d17ef6749a1b61304.html" target="_blank">Investigating the Periocular-Based Face Recognition Across Gender Transformation</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Information Forensics and Security</td><td>edu</td><td>University of North Carolina at Wilmington</td><td>United States</td><td>34.22498270</td><td>-77.86907744</td><td>77%</td><td>13</td><td>10</td><td>3</td><td>0</td><td>6</td><td>8</td></tr><tr><td>4563b46d42079242f06567b3f2e2f7a80cb3befe</td><td>vadana</td><td>VADANA</td><td><a href="papers/4563b46d42079242f06567b3f2e2f7a80cb3befe.html" target="_blank">VADANA: A dense dataset for facial image analysis</a></td><td><span class="gray">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td>edu</td><td>University of Delaware</td><td>United States</td><td>39.68103280</td><td>-75.75401840</td><td>67%</td><td>15</td><td>10</td><td>5</td><td>0</td><td>5</td><td>10</td></tr><tr><td>2624d84503bc2f8e190e061c5480b6aa4d89277a</td><td>afew_va</td><td>AFEW-VA</td><td><a href="papers/2624d84503bc2f8e190e061c5480b6aa4d89277a.html" target="_blank">AFEW-VA database for valence and arousal estimation in-the-wild</a></td><td><a href="https://ibug.doc.ic.ac.uk/media/uploads/documents/afew-va.pdf" target="_blank">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>18</td><td>9</td><td>9</td><td>0</td><td>12</td><td>5</td></tr><tr><td>6403117f9c005ae81f1e8e6d1302f4a045e3d99d</td><td>alert_airport</td><td>ALERT Airport</td><td><a href="papers/6403117f9c005ae81f1e8e6d1302f4a045e3d99d.html" target="_blank">A Systematic Evaluation and Benchmark for Person Re-Identification: Features, Metrics, and Datasets</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>45%</td><td>20</td><td>9</td><td>11</td><td>0</td><td>9</td><td>11</td></tr><tr><td>8d5998cd984e7cce307da7d46f155f9db99c6590</td><td>chalearn</td><td>ChaLearn</td><td><a href="papers/8d5998cd984e7cce307da7d46f155f9db99c6590.html" target="_blank">ChaLearn looking at people: A review of events and resources</a></td><td><span class="gray">[pdf]</a></td><td>2017 International Joint Conference on Neural Networks (IJCNN)</td><td></td><td></td><td></td><td></td><td></td><td>69%</td><td>13</td><td>9</td><td>4</td><td>1</td><td>8</td><td>4</td></tr><tr><td>a8d0b149c2eadaa02204d3e4356fbc8eccf3b315</td><td>hi4d_adsip</td><td>Hi4D-ADSIP</td><td><a href="papers/a8d0b149c2eadaa02204d3e4356fbc8eccf3b315.html" target="_blank">Hi4D-ADSIP 3-D dynamic facial articulation database</a></td><td><span class="gray">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>15</td><td>9</td><td>6</td><td>1</td><td>4</td><td>11</td></tr><tr><td>bd26dabab576adb6af30484183c9c9c8379bf2e0</td><td>scut_fbp</td><td>SCUT-FBP</td><td><a href="papers/bd26dabab576adb6af30484183c9c9c8379bf2e0.html" target="_blank">SCUT-FBP: A Benchmark Dataset for Facial Beauty Perception</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Systems, Man, and Cybernetics</td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>19</td><td>9</td><td>10</td><td>2</td><td>6</td><td>13</td></tr><tr><td>060820f110a72cbf02c14a6d1085bd6e1d994f6a</td><td>caltech_crp</td><td>Caltech CRP</td><td><a href="papers/060820f110a72cbf02c14a6d1085bd6e1d994f6a.html" target="_blank">Fine-grained classification of pedestrians in video: Benchmark and state of the art</a></td><td><a href="https://arxiv.org/pdf/1605.06177.pdf" target="_blank">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>17</td><td>8</td><td>9</td><td>0</td><td>9</td><td>8</td></tr><tr><td>2b926b3586399d028b46315d7d9fb9d879e4f79c</td><td>frav3d</td><td>FRAV3D</td><td><a href="papers/2b926b3586399d028b46315d7d9fb9d879e4f79c.html" target="_blank">Multimodal 2D, 2.5D & 3D Face Verification</a></td><td><span class="gray">[pdf]</a></td><td>2006 International Conference on Image Processing</td><td>edu</td><td>Universidad Rey Juan Carlos, Spain</td><td>Spain</td><td>40.33586610</td><td>-3.87694320</td><td>57%</td><td>14</td><td>8</td><td>6</td><td>0</td><td>2</td><td>12</td></tr><tr><td>8f02ec0be21461fbcedf51d864f944cfc42c875f</td><td>hda_plus</td><td>HDA+</td><td><a href="papers/8f02ec0be21461fbcedf51d864f944cfc42c875f.html" target="_blank">The HDA+ Data Set for Research on Fully Automated Re-identification Systems</a></td><td><a href="https://pdfs.semanticscholar.org/8f02/ec0be21461fbcedf51d864f944cfc42c875f.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>16</td><td>8</td><td>8</td><td>1</td><td>10</td><td>6</td></tr><tr><td>c570d1247e337f91e555c3be0e8c8a5aba539d9f</td><td>mcgill</td><td>McGill Real World</td><td><a href="papers/c570d1247e337f91e555c3be0e8c8a5aba539d9f.html" target="_blank">Robust semi-automatic head pose labeling for real-world face video sequences</a></td><td><span class="gray">[pdf]</a></td><td>Multimedia Tools and Applications</td><td>edu</td><td>McGill University</td><td>Canada</td><td>45.50397610</td><td>-73.57496870</td><td>44%</td><td>18</td><td>8</td><td>10</td><td>0</td><td>13</td><td>7</td></tr><tr><td>041d3eedf5e45ce5c5229f0181c5c576ed1fafd6</td><td>ucf_selfie</td><td>UCF Selfie</td><td><a href="papers/041d3eedf5e45ce5c5229f0181c5c576ed1fafd6.html" target="_blank">How to Take a Good Selfie?</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>73%</td><td>11</td><td>8</td><td>3</td><td>0</td><td>7</td><td>5</td></tr><tr><td>633c851ebf625ad7abdda2324e9de093cf623141</td><td>appa_real</td><td>APPA-REAL</td><td><a href="papers/633c851ebf625ad7abdda2324e9de093cf623141.html" target="_blank">Apparent and Real Age Estimation in Still Images with Deep Residual Regressors on Appa-Real Database</a></td><td><span class="gray">[pdf]</a></td><td>2017 12th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2017)</td><td></td><td></td><td></td><td></td><td></td><td>70%</td><td>10</td><td>7</td><td>3</td><td>0</td><td>8</td><td>3</td></tr><tr><td>2cd7821fcf5fae53a185624f7eeda007434ae037</td><td>geofaces</td><td>GeoFaces</td><td><a href="papers/2cd7821fcf5fae53a185624f7eeda007434ae037.html" target="_blank">Exploring the geo-dependence of human face appearance</a></td><td><a href="http://cs.uky.edu/~jacobs/papers/islam2014faces.pdf" target="_blank">[pdf]</a></td><td>IEEE Winter Conference on Applications of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>88%</td><td>8</td><td>7</td><td>1</td><td>0</td><td>5</td><td>3</td></tr><tr><td>2cd7821fcf5fae53a185624f7eeda007434ae037</td><td>geofaces</td><td>GeoFaces</td><td><a href="papers/2cd7821fcf5fae53a185624f7eeda007434ae037.html" target="_blank">Exploring the geo-dependence of human face appearance</a></td><td><a href="http://cs.uky.edu/~jacobs/papers/islam2014faces.pdf" target="_blank">[pdf]</a></td><td>IEEE Winter Conference on Applications of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>88%</td><td>8</td><td>7</td><td>1</td><td>0</td><td>5</td><td>3</td></tr><tr><td>7f4040b482d16354d5938c1d1b926b544652bf5b</td><td>nova_emotions</td><td>Novaemötions Dataset</td><td><a href="papers/7f4040b482d16354d5938c1d1b926b544652bf5b.html" target="_blank">Competitive affective gaming: winning with a smile</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Universidade NOVA de Lisboa, Caparica, Portugal</td><td>Portugal</td><td>38.66096400</td><td>-9.20581300</td><td>78%</td><td>9</td><td>7</td><td>2</td><td>0</td><td>5</td><td>4</td></tr><tr><td>4b4106614c1d553365bad75d7866bff0de6056ed</td><td>ufi</td><td>UFI</td><td><a href="papers/4b4106614c1d553365bad75d7866bff0de6056ed.html" target="_blank">Unconstrained Facial Images: Database for Face Recognition Under Real-World Conditions</a></td><td><a href="http://home.zcu.cz/~pkral/papers/kral_micai15.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>12</td><td>6</td><td>6</td><td>0</td><td>4</td><td>6</td></tr><tr><td>22f656d0f8426c84a33a267977f511f127bfd7f3</td><td>expw</td><td>ExpW</td><td><a href="papers/22f656d0f8426c84a33a267977f511f127bfd7f3.html" target="_blank">From Facial Expression Recognition to Interpersonal Relation Prediction</a></td><td><a href="https://arxiv.org/pdf/1609.06426.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>11</td><td>6</td><td>5</td><td>0</td><td>5</td><td>4</td></tr><tr><td>4af89578ac237278be310f7660a408b03f12d603</td><td>geofaces</td><td>GeoFaces</td><td><a href="papers/4af89578ac237278be310f7660a408b03f12d603.html" target="_blank">Large-scale geo-facial image analysis</a></td><td><a href="https://pdfs.semanticscholar.org/3ede/3ed28329bf48fbd06438a69c4f855bef003f.pdf" target="_blank">[pdf]</a></td><td>EURASIP J. Image and Video Processing</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>6</td><td>6</td><td>0</td><td>0</td><td>4</td><td>2</td></tr><tr><td>2d45cfd838016a6e39f6b766ffe85acd649440c7</td><td>mcgill</td><td>McGill Real World</td><td><a href="papers/2d45cfd838016a6e39f6b766ffe85acd649440c7.html" target="_blank">Hierarchical temporal graphical model for head pose estimation and subsequent attribute classification in real-world videos</a></td><td><span class="gray">[pdf]</a></td><td>Computer Vision and Image Understanding</td><td></td><td></td><td></td><td></td><td></td><td>75%</td><td>8</td><td>6</td><td>2</td><td>0</td><td>5</td><td>3</td></tr><tr><td>1a40092b493c6b8840257ab7f96051d1a4dbfeb2</td><td>sports_videos_in_the_wild</td><td>SVW</td><td><a href="papers/1a40092b493c6b8840257ab7f96051d1a4dbfeb2.html" target="_blank">Sports Videos in the Wild (SVW): A video dataset for sports analysis</a></td><td><span class="gray">[pdf]</a></td><td>2015 11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)</td><td></td><td></td><td></td><td></td><td></td><td>86%</td><td>7</td><td>6</td><td>1</td><td>1</td><td>5</td><td>2</td></tr><tr><td>8627f019882b024aef92e4eb9355c499c733e5b7</td><td>used</td><td>USED Social Event Dataset</td><td><a href="papers/8627f019882b024aef92e4eb9355c499c733e5b7.html" target="_blank">USED: a large-scale social event detection dataset</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td>edu</td><td>University of Trento</td><td>Italy</td><td>46.06588360</td><td>11.11598940</td><td>86%</td><td>7</td><td>6</td><td>1</td><td>0</td><td>3</td><td>4</td></tr><tr><td>0d2dd4fc016cb6a517d8fb43a7cc3ff62964832e</td><td>lag</td><td>LAG</td><td><a href="papers/0d2dd4fc016cb6a517d8fb43a7cc3ff62964832e.html" target="_blank">Large age-gap face verification by feature injection in deep networks</a></td><td><a href="https://arxiv.org/pdf/1602.06149.pdf" target="_blank">[pdf]</a></td><td>Pattern Recognition Letters</td><td></td><td></td><td></td><td></td><td></td><td>71%</td><td>7</td><td>5</td><td>2</td><td>0</td><td>3</td><td>4</td></tr><tr><td>9e5378e7b336c89735d3bb15cf67eff96f86d39a</td><td>precarious</td><td>Precarious</td><td><a href="papers/9e5378e7b336c89735d3bb15cf67eff96f86d39a.html" target="_blank">Expecting the Unexpected: Training Detectors for Unusual Pedestrians with Adversarial Imposters</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>36%</td><td>14</td><td>5</td><td>9</td><td>0</td><td>12</td><td>1</td></tr><tr><td>07fcbae86f7a3ad3ea1cf95178459ee9eaf77cb1</td><td>uccs</td><td>UCCS</td><td><a href="papers/07fcbae86f7a3ad3ea1cf95178459ee9eaf77cb1.html" target="_blank">Large scale unconstrained open set face database</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE Sixth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td>company</td><td>Securics Inc., Colorado Springs, CO</td><td>United States</td><td>38.83388160</td><td>-104.82136340</td><td>83%</td><td>6</td><td>5</td><td>1</td><td>0</td><td>4</td><td>2</td></tr><tr><td>d4f1eb008eb80595bcfdac368e23ae9754e1e745</td><td>uccs</td><td>UCCS</td><td><a href="papers/d4f1eb008eb80595bcfdac368e23ae9754e1e745.html" target="_blank">Unconstrained Face Detection and Open-Set Face Recognition Challenge</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE International Joint Conference on Biometrics (IJCB)</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>5</td><td>5</td><td>0</td><td>0</td><td>4</td><td>1</td></tr><tr><td>922e0a51a3b8c67c4c6ac09a577ff674cbd28b34</td><td>v47</td><td>V47</td><td><a href="papers/922e0a51a3b8c67c4c6ac09a577ff674cbd28b34.html" target="_blank">Re-identification of pedestrians with variable occlusion and scale</a></td><td><span class="gray">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td>edu</td><td>Kingston University</td><td>United Kingdom</td><td>51.42930860</td><td>-0.26840440</td><td>56%</td><td>9</td><td>5</td><td>4</td><td>1</td><td>5</td><td>4</td></tr><tr><td>7ebb153704706e457ab57b432793d2b6e5d12592</td><td>vgg_celebs_in_places</td><td>CIP</td><td><a href="papers/7ebb153704706e457ab57b432793d2b6e5d12592.html" target="_blank">Faces in Places: compound query retrieval</a></td><td><a href="https://pdfs.semanticscholar.org/7ebb/153704706e457ab57b432793d2b6e5d12592.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>5</td><td>5</td><td>0</td><td>0</td><td>3</td><td>2</td></tr><tr><td>56ae6d94fc6097ec4ca861f0daa87941d1c10b70</td><td>cmdp</td><td>CMDP</td><td><a href="papers/56ae6d94fc6097ec4ca861f0daa87941d1c10b70.html" target="_blank">Distance Estimation of an Unknown Person from a Portrait</a></td><td><a href="http://authors.library.caltech.edu/49084/13/FaceDistanceEstimation_RONCHI.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>44%</td><td>9</td><td>4</td><td>5</td><td>0</td><td>6</td><td>3</td></tr><tr><td>563c940054e4b456661762c1ab858e6f730c3159</td><td>data_61</td><td>Data61 Pedestrian</td><td><a href="papers/563c940054e4b456661762c1ab858e6f730c3159.html" target="_blank">A Multi-modal Graphical Model for Scene Analysis</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE Winter Conference on Applications of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>8</td><td>4</td><td>4</td><td>0</td><td>5</td><td>3</td></tr><tr><td>287ddcb3db5562235d83aee318f318b8d5e43fb1</td><td>erce</td><td>ERCe</td><td><a href="papers/287ddcb3db5562235d83aee318f318b8d5e43fb1.html" target="_blank">Learning from Multiple Sources for Video Summarisation</a></td><td><a href="https://arxiv.org/pdf/1501.03069.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>7</td><td>4</td><td>3</td><td>0</td><td>4</td><td>3</td></tr><tr><td>dd65f71dac86e36eecbd3ed225d016c3336b4a13</td><td>families_in_the_wild</td><td>FIW</td><td><a href="papers/dd65f71dac86e36eecbd3ed225d016c3336b4a13.html" target="_blank">Visual Kinship Recognition of Families in the Wild</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td>edu</td><td>University of Massachusetts Dartmouth</td><td>United States</td><td>41.62772475</td><td>-71.00724501</td><td>80%</td><td>5</td><td>4</td><td>1</td><td>0</td><td>2</td><td>3</td></tr><tr><td>137aa2f891d474fce1e7a1d1e9b3aefe21e22b34</td><td>hrt_transgender</td><td>HRT Transgender</td><td><a href="papers/137aa2f891d474fce1e7a1d1e9b3aefe21e22b34.html" target="_blank">Is the eye region more reliable than the face? A preliminary study of face-based recognition on a transgender dataset</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE Sixth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>7</td><td>4</td><td>3</td><td>1</td><td>3</td><td>5</td></tr><tr><td>9cc8cf0c7d7fa7607659921b6ff657e17e135ecc</td><td>mafa</td><td>MAsked FAces</td><td><a href="papers/9cc8cf0c7d7fa7607659921b6ff657e17e135ecc.html" target="_blank">Detecting Masked Faces in the Wild with LLE-CNNs</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>80%</td><td>5</td><td>4</td><td>1</td><td>0</td><td>4</td><td>1</td></tr><tr><td>c72a2ea819df9b0e8cd267eebcc6528b8741e03d</td><td>megaage</td><td>MegaAge</td><td><a href="papers/c72a2ea819df9b0e8cd267eebcc6528b8741e03d.html" target="_blank">Quantifying Facial Age by Posterior of Age Comparisons</a></td><td><a href="https://arxiv.org/pdf/1708.09687.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>4</td><td>4</td><td>0</td><td>0</td><td>4</td><td>0</td></tr><tr><td>23e824d1dfc33f3780dd18076284f07bd99f1c43</td><td>mifs</td><td>MIFS</td><td><a href="papers/23e824d1dfc33f3780dd18076284f07bd99f1c43.html" target="_blank">Spoofing faces using makeup: An investigative study</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE International Conference on Identity, Security and Behavior Analysis (ISBA)</td><td>edu</td><td>INRIA Méditerranée</td><td>France</td><td>43.61581310</td><td>7.06838000</td><td>67%</td><td>6</td><td>4</td><td>2</td><td>0</td><td>1</td><td>5</td></tr><tr><td>22909dd19a0ec3b6065334cb5be5392cb24d839d</td><td>pets</td><td>PETS 2017</td><td><a href="papers/22909dd19a0ec3b6065334cb5be5392cb24d839d.html" target="_blank">PETS 2017: Dataset and Challenge</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td></td><td></td><td></td><td></td><td></td><td>44%</td><td>9</td><td>4</td><td>5</td><td>0</td><td>1</td><td>8</td></tr><tr><td>54983972aafc8e149259d913524581357b0f91c3</td><td>reseed</td><td>ReSEED</td><td><a href="papers/54983972aafc8e149259d913524581357b0f91c3.html" target="_blank">ReSEED: social event dEtection dataset</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>67%</td><td>6</td><td>4</td><td>2</td><td>1</td><td>1</td><td>5</td></tr><tr><td>287ddcb3db5562235d83aee318f318b8d5e43fb1</td><td>tisi</td><td>Times Square Intersection</td><td><a href="papers/287ddcb3db5562235d83aee318f318b8d5e43fb1.html" target="_blank">Learning from Multiple Sources for Video Summarisation</a></td><td><a href="https://arxiv.org/pdf/1501.03069.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>7</td><td>4</td><td>3</td><td>0</td><td>4</td><td>3</td></tr><tr><td>9e31e77f9543ab42474ba4e9330676e18c242e72</td><td>imdb_face</td><td>IMDb Face</td><td><a href="papers/9e31e77f9543ab42474ba4e9330676e18c242e72.html" target="_blank">The Devil of Face Recognition is in the Noise</a></td><td><a href="https://arxiv.org/pdf/1807.11649.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Nanyang Technological University</td><td>Singapore</td><td>1.34841040</td><td>103.68297965</td><td>50%</td><td>6</td><td>3</td><td>3</td><td>0</td><td>4</td><td>1</td></tr><tr><td>a7fe834a0af614ce6b50dc093132b031dd9a856b</td><td>market_1501</td><td>Market 1501</td><td><a href="papers/a7fe834a0af614ce6b50dc093132b031dd9a856b.html" target="_blank">Orientation Driven Bag of Appearances for Person Re-identification</a></td><td><a href="https://arxiv.org/pdf/1605.02464.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>7</td><td>3</td><td>4</td><td>0</td><td>4</td><td>4</td></tr><tr><td>578d4ad74818086bb64f182f72e2c8bd31e3d426</td><td>mr2</td><td>MR2</td><td><a href="papers/578d4ad74818086bb64f182f72e2c8bd31e3d426.html" target="_blank">The MR2: A multi-racial, mega-resolution database of facial stimuli.</a></td><td><a href="http://www.mpmlab.org/The%20MR2%20face%20database.pdf" target="_blank">[pdf]</a></td><td>Behavior research methods</td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>7</td><td>3</td><td>4</td><td>0</td><td>7</td><td>0</td></tr><tr><td>ad01687649d95cd5b56d7399a9603c4b8e2217d7</td><td>mrp_drone</td><td>MRP Drone</td><td><a href="papers/ad01687649d95cd5b56d7399a9603c4b8e2217d7.html" target="_blank">Investigating Open-World Person Re-identification Using a Drone</a></td><td><a href="https://pdfs.semanticscholar.org/ad01/687649d95cd5b56d7399a9603c4b8e2217d7.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>7</td><td>3</td><td>4</td><td>1</td><td>5</td><td>2</td></tr><tr><td>a7fe834a0af614ce6b50dc093132b031dd9a856b</td><td>pku_reid</td><td>PKU-Reid</td><td><a href="papers/a7fe834a0af614ce6b50dc093132b031dd9a856b.html" target="_blank">Orientation Driven Bag of Appearances for Person Re-identification</a></td><td><a href="https://arxiv.org/pdf/1605.02464.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>7</td><td>3</td><td>4</td><td>0</td><td>4</td><td>4</td></tr><tr><td>3531332efe19be21e7401ba1f04570a142617236</td><td>ufdd</td><td>UFDD</td><td><a href="papers/3531332efe19be21e7401ba1f04570a142617236.html" target="_blank">Pushing the Limits of Unconstrained Face Detection: a Challenge Dataset and Baseline Results</a></td><td><a href="https://arxiv.org/pdf/1804.10275.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>75%</td><td>4</td><td>3</td><td>1</td><td>0</td><td>4</td><td>0</td></tr><tr><td>17b46e2dad927836c689d6787ddb3387c6159ece</td><td>geofaces</td><td>GeoFaces</td><td><a href="papers/17b46e2dad927836c689d6787ddb3387c6159ece.html" target="_blank">GeoFaceExplorer: exploring the geo-dependence of facial attributes</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>2</td><td>2</td><td>0</td><td>0</td><td>1</td><td>1</td></tr><tr><td>e58dd160a76349d46f881bd6ddbc2921f08d1050</td><td>gfw</td><td>Grouping Face in the Wild</td><td><a href="papers/e58dd160a76349d46f881bd6ddbc2921f08d1050.html" target="_blank">Merge or Not? Learning to Group Faces via Imitation Learning</a></td><td><a href="https://arxiv.org/pdf/1707.03986.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>2</td><td>2</td><td>0</td><td>0</td><td>2</td><td>0</td></tr><tr><td>4eab317b5ac436a949849ed286baa3de2a541eef</td><td>laofiw</td><td>LAOFIW</td><td><a href="papers/4eab317b5ac436a949849ed286baa3de2a541eef.html" target="_blank">Turning a Blind Eye: Explicit Removal of Biases and Variation from Deep Neural Network Embeddings</a></td><td><a href="https://arxiv.org/pdf/1809.02169.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>2</td><td>2</td><td>0</td><td>0</td><td>2</td><td>0</td></tr><tr><td>f6c8d5e35d7e4d60a0104f233ac1a3ab757da53f</td><td>pku_reid</td><td>PKU-Reid</td><td><a href="papers/f6c8d5e35d7e4d60a0104f233ac1a3ab757da53f.html" target="_blank">Swiss-System Based Cascade Ranking for Gait-Based Person Re-Identification</a></td><td><a href="https://pdfs.semanticscholar.org/f6c8/d5e35d7e4d60a0104f233ac1a3ab757da53f.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>4</td><td>2</td><td>2</td><td>0</td><td>1</td><td>2</td></tr><tr><td>4d4bb462c9f1d4e4ab1e4aa6a75cc0bc71b38461</td><td>3dddb_unconstrained</td><td>3D Dynamic</td><td><a href="papers/4d4bb462c9f1d4e4ab1e4aa6a75cc0bc71b38461.html" target="_blank">A 3D Dynamic Database for Unconstrained Face Recognition</a></td><td><a href="https://pdfs.semanticscholar.org/4d4b/b462c9f1d4e4ab1e4aa6a75cc0bc71b38461.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>2</td><td>1</td><td>1</td><td>0</td><td>1</td><td>1</td></tr><tr><td>a40f9bfd3c45658ee8da70e1f2dfbe1f0c744d43</td><td>4dfab</td><td>4DFAB</td><td><a href="papers/a40f9bfd3c45658ee8da70e1f2dfbe1f0c744d43.html" target="_blank">4DFAB: A Large Scale 4D Facial Expression Database for Biometric Applications</a></td><td><a href="https://arxiv.org/pdf/1712.01443.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>25%</td><td>4</td><td>1</td><td>3</td><td>0</td><td>2</td><td>2</td></tr><tr><td>65355cbb581a219bd7461d48b3afd115263ea760</td><td>complex_activities</td><td>Ongoing Complex Activities</td><td><a href="papers/65355cbb581a219bd7461d48b3afd115263ea760.html" target="_blank">Recognition of ongoing complex activities by sequence prediction over a hierarchical label space</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Winter Conference on Applications of Computer Vision (WACV)</td><td></td><td></td><td></td><td></td><td></td><td>33%</td><td>3</td><td>1</td><td>2</td><td>0</td><td>3</td><td>0</td></tr><tr><td>55c40cbcf49a0225e72d911d762c27bb1c2d14aa</td><td>ifad</td><td>IFAD</td><td><a href="papers/55c40cbcf49a0225e72d911d762c27bb1c2d14aa.html" target="_blank">Indian Face Age Database: A Database for Face Recognition with Age Variation</a></td><td><a href="https://pdfs.semanticscholar.org/025e/4cf3fd3fdeced91e9373b56ee14af7ca432c.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>2</td><td>1</td><td>1</td><td>0</td><td>2</td><td>0</td></tr><tr><td>c06b13d0ec3f5c43e2782cd22542588e233733c3</td><td>nova_emotions</td><td>Novaemötions Dataset</td><td><a href="papers/c06b13d0ec3f5c43e2782cd22542588e233733c3.html" target="_blank">Crowdsourcing facial expressions for affective-interaction</a></td><td><span class="gray">[pdf]</a></td><td>Computer Vision and Image Understanding</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>1</td><td>1</td><td>0</td><td>0</td><td>1</td><td>0</td></tr><tr><td>2306b2a8fba28539306052764a77a0d0f5d1236a</td><td>qmul_surv_face</td><td>QMUL-SurvFace</td><td><a href="papers/2306b2a8fba28539306052764a77a0d0f5d1236a.html" target="_blank">Surveillance Face Recognition Challenge</a></td><td><a href="https://arxiv.org/pdf/1804.09691.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td>edu</td><td>Queen Mary University of London</td><td>United Kingdom</td><td>51.52472720</td><td>-0.03931035</td><td>100%</td><td>1</td><td>1</td><td>0</td><td>0</td><td>1</td><td>0</td></tr><tr><td>d3f5a1848b0028d8ab51d0b0673732cad2e3c8c9</td><td>stair_actions</td><td>STAIR Action</td><td><a href="papers/d3f5a1848b0028d8ab51d0b0673732cad2e3c8c9.html" target="_blank">STAIR Actions: A Video Dataset of Everyday Home Actions</a></td><td><a href="https://arxiv.org/pdf/1804.04326.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>1</td><td>1</td><td>0</td><td>0</td><td>1</td><td>0</td></tr><tr><td>5ad4e9f947c1653c247d418f05dad758a3f9277b</td><td>wlfdb</td><td>WLFDB</td><td><a href="papers/5ad4e9f947c1653c247d418f05dad758a3f9277b.html" target="_blank">WLFDB : Weakly Labeled Face Databases</a></td><td><a href="https://pdfs.semanticscholar.org/5ad4/e9f947c1653c247d418f05dad758a3f9277b.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>1</td><td>1</td><td>0</td><td>0</td><td>0</td><td>1</td></tr><tr><td>7b92d1e53cc87f7a4256695de590098a2f30261e</td><td>appa_real</td><td>APPA-REAL</td><td><a href="papers/7b92d1e53cc87f7a4256695de590098a2f30261e.html" target="_blank">From Apparent to Real Age: Gender, Age, Ethnic, Makeup, and Expression Bias Analysis in Real Age Estimation</a></td><td><span class="gray">[pdf]</a></td><td>2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>1dc35905a1deff8bc74688f2d7e2f48fd2273275</td><td>caltech_pedestrians</td><td>Caltech Pedestrians</td><td><a href="papers/1dc35905a1deff8bc74688f2d7e2f48fd2273275.html" target="_blank">Pedestrian detection: A benchmark</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>15e1af79939dbf90790b03d8aa02477783fb1d0f</td><td>duke_mtmc</td><td>Duke MTMC</td><td><a href="papers/15e1af79939dbf90790b03d8aa02477783fb1d0f.html" target="_blank">Unlabeled Samples Generated by GAN Improve the Person Re-identification Baseline in Vitro</a></td><td><a href="https://arxiv.org/pdf/1701.07717.pdf" target="_blank">[pdf]</a></td><td>2017 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>72a155c987816ae81c858fddbd6beab656d86220</td><td>europersons</td><td>EuroCity Persons</td><td><a href="papers/72a155c987816ae81c858fddbd6beab656d86220.html" target="_blank">The EuroCity Persons Dataset: A Novel Benchmark for Object Detection</a></td><td><a href="https://arxiv.org/pdf/1805.07193.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>2</td><td>0</td><td>2</td><td>0</td><td>2</td><td>0</td></tr><tr><td>670637d0303a863c1548d5b19f705860a23e285c</td><td>face_tracer</td><td>FaceTracer</td><td><a href="papers/670637d0303a863c1548d5b19f705860a23e285c.html" target="_blank">Face swapping: automatically replacing faces in photographs</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>12ad3b5bbbf407f8e54ea692c07633d1a867c566</td><td>graz</td><td>Graz Pedestrian</td><td><a href="papers/12ad3b5bbbf407f8e54ea692c07633d1a867c566.html" target="_blank">Object recognition using segmentation for feature detection</a></td><td><span class="gray">[pdf]</a></td><td>Proceedings of the 17th International Conference on Pattern Recognition, 2004. ICPR 2004.</td><td>edu</td><td>Inst. of Comput. Sci., Univ. of Leoben, Austria</td><td>Austria</td><td>47.38473720</td><td>15.09302010</td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>bd88bb2e4f351352d88ee7375af834360e223498</td><td>hda_plus</td><td>HDA+</td><td><a href="papers/bd88bb2e4f351352d88ee7375af834360e223498.html" target="_blank">HDA dataset-DRAFT 1 A Multi-camera video data set for research on High-Definition surveillance</a></td><td><a href="https://pdfs.semanticscholar.org/bd88/bb2e4f351352d88ee7375af834360e223498.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>2</td><td>0</td><td>2</td><td>0</td><td>1</td><td>2</td></tr><tr><td>0ab7cff2ccda7269b73ff6efd9d37e1318f7db25</td><td>ibm_dif</td><td>IBM Diversity in Faces</td><td><a href="papers/0ab7cff2ccda7269b73ff6efd9d37e1318f7db25.html" target="_blank">Facial Coding Scheme Reference 1 Craniofacial Distances</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>066d71fcd997033dce4ca58df924397dfe0b5fd1</td><td>ifdb</td><td>IFDB</td><td><a href="papers/066d71fcd997033dce4ca58df924397dfe0b5fd1.html" target="_blank">Iranian Face Database and Evaluation with a New Detection Algorithm</a></td><td><a href="https://pdfs.semanticscholar.org/066d/71fcd997033dce4ca58df924397dfe0b5fd1.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>21d9d0deed16f0ad62a4865e9acf0686f4f15492</td><td>images_of_groups</td><td>Images of Groups</td><td><a href="papers/21d9d0deed16f0ad62a4865e9acf0686f4f15492.html" target="_blank">Understanding images of groups of people</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>Carnegie Mellon University</td><td>United States</td><td>40.44416190</td><td>-79.94272826</td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>140438a77a771a8fb656b39a78ff488066eb6b50</td><td>lfpw</td><td>LFPW</td><td><a href="papers/140438a77a771a8fb656b39a78ff488066eb6b50.html" target="_blank">Localizing Parts of Faces Using a Consensus of Exemplars</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>079a0a3bf5200994e1f972b1b9197bf2f90e87d4</td><td>mit_cbcl</td><td>MIT CBCL</td><td><a href="papers/079a0a3bf5200994e1f972b1b9197bf2f90e87d4.html" target="_blank">Component-Based Face Recognition with 3D Morphable Models</a></td><td><span class="gray">[pdf]</a></td><td>2004 Conference on Computer Vision and Pattern Recognition Workshop</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>2fda164863a06a92d3a910b96eef927269aeb730</td><td>names_and_faces</td><td>News Dataset</td><td><a href="papers/2fda164863a06a92d3a910b96eef927269aeb730.html" target="_blank">Names and faces in the news</a></td><td><span class="gray">[pdf]</a></td><td>Proceedings of the 2004 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2004. CVPR 2004.</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>d3200d49a19a4a4e4e9745ee39649b65d80c834b</td><td>scut_head</td><td>SCUT HEAD</td><td><a href="papers/d3200d49a19a4a4e4e9745ee39649b65d80c834b.html" target="_blank">Detecting Heads using Feature Refine Net and Cascaded Multi-scale Architecture</a></td><td><a href="https://arxiv.org/pdf/1803.09256.pdf" target="_blank">[pdf]</a></td><td>2018 24th International Conference on Pattern Recognition (ICPR)</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>8990cdce3f917dad622e43e033db686b354d057c</td><td>tiny_faces</td><td>TinyFace</td><td><a href="papers/8990cdce3f917dad622e43e033db686b354d057c.html" target="_blank">Low-Resolution Face Recognition</a></td><td><a href="https://arxiv.org/pdf/1811.08965.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>6ad5a38df8dd4cdddd74f31996ce096d41219f72</td><td>tud_brussels</td><td>TUD-Brussels</td><td><a href="papers/6ad5a38df8dd4cdddd74f31996ce096d41219f72.html" target="_blank">Multi-cue onboard pedestrian detection</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>6ad5a38df8dd4cdddd74f31996ce096d41219f72</td><td>tud_motionpairs</td><td>TUD-Motionparis</td><td><a href="papers/6ad5a38df8dd4cdddd74f31996ce096d41219f72.html" target="_blank">Multi-cue onboard pedestrian detection</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>01959ef569f74c286956024866c1d107099199f7</td><td>vqa</td><td>VQA</td><td><a href="papers/01959ef569f74c286956024866c1d107099199f7.html" target="_blank">VQA: Visual Question Answering</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>9b9bf5e623cb8af7407d2d2d857bc3f1b531c182</td><td>who_goes_there</td><td>WGT</td><td><a href="papers/9b9bf5e623cb8af7407d2d2d857bc3f1b531c182.html" target="_blank">Who goes there?: approaches to mapping facial appearance diversity</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td>edu</td><td>University of Kentucky</td><td>United States</td><td>38.03337420</td><td>-84.50177580</td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>36bccfb2ad847096bc76777e544f305813cd8f5b</td><td>wildtrack</td><td>WildTrack</td><td><a href="papers/36bccfb2ad847096bc76777e544f305813cd8f5b.html" target="_blank">WILDTRACK: A Multi-camera HD Dataset for Dense Unscripted Pedestrian Detection</a></td><td><a href="http://openaccess.thecvf.com/content_cvpr_2018/Supplemental/1562-supp.pdf" target="_blank">[pdf]</a></td><td>2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr></table></body></html>
\ No newline at end of file diff --git a/scraper/reports/report_index.html b/scraper/reports/report_index.html index 6c04165f..4fc433e1 100644 --- a/scraper/reports/report_index.html +++ b/scraper/reports/report_index.html @@ -1 +1 @@ -<!doctype html><html><head><meta charset='utf-8'><title>All Papers</title><link rel='stylesheet' href='reports.css'></head><body><h2>All Papers</h2><table border='1' cellpadding='3' cellspacing='3'><th>Paper ID</th><th>Megapixels Key</th><th>Megapixels Name</th><th>Report Link</th><th>PDF Link</th><th>Journal</th><th>Type</th><th>Address</th><th>Country</th><th>Lat</th><th>Lng</th><th>Coverage</th><th>Total Citations</th><th>Geocoded Citations</th><th>Unknown Citations</th><th>Empty Citations</th><th>With PDF</th><th>With DOI</th><tr><td>3325860c0c82a93b2eac654f5324dd6a776f609e</td><td>mpii_human_pose</td><td>MPII Human Pose</td><td><a href="papers/3325860c0c82a93b2eac654f5324dd6a776f609e.html" target="_blank">2D Human Pose Estimation: New Benchmark and State of the Art Analysis</a></td><td><span class="gray">[pdf]</a></td><td>2014 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>66%</td><td>387</td><td>254</td><td>133</td><td>19</td><td>291</td><td>96</td></tr><tr><td>e4754afaa15b1b53e70743880484b8d0736990ff</td><td>fiw_300</td><td>300-W</td><td><a href="papers/e4754afaa15b1b53e70743880484b8d0736990ff.html" target="_blank">300 Faces In-The-Wild Challenge: database and results</a></td><td><a href="http://ibug.doc.ic.ac.uk/media/uploads/documents/1-s2.0-s0262885616000147-main.pdf" target="_blank">[pdf]</a></td><td>Image Vision Comput.</td><td>edu</td><td>Imperial College London</td><td>United Kingdom</td><td>51.49887085</td><td>-0.17560797</td><td>71%</td><td>129</td><td>92</td><td>37</td><td>6</td><td>74</td><td>55</td></tr><tr><td>044d9a8c61383312cdafbcc44b9d00d650b21c70</td><td>fiw_300</td><td>300-W</td><td><a href="papers/044d9a8c61383312cdafbcc44b9d00d650b21c70.html" target="_blank">300 Faces in-the-Wild Challenge: The First Facial Landmark Localization Challenge</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision Workshops</td><td></td><td></td><td></td><td></td><td></td><td>79%</td><td>323</td><td>255</td><td>68</td><td>15</td><td>208</td><td>120</td></tr><tr><td>2e8d0f1802e50cccfd3c0aabac0d0beab3a7846e</td><td>3dpes</td><td>3DPeS</td><td><a href="papers/2e8d0f1802e50cccfd3c0aabac0d0beab3a7846e.html" target="_blank">3DPeS: 3D people dataset for surveillance and forensics</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>133</td><td>82</td><td>51</td><td>9</td><td>73</td><td>58</td></tr><tr><td>a40f9bfd3c45658ee8da70e1f2dfbe1f0c744d43</td><td>4dfab</td><td>4DFAB</td><td><a href="papers/a40f9bfd3c45658ee8da70e1f2dfbe1f0c744d43.html" target="_blank">4DFAB: A Large Scale 4D Facial Expression Database for Biometric Applications</a></td><td><a href="https://arxiv.org/pdf/1712.01443.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>25%</td><td>4</td><td>1</td><td>3</td><td>0</td><td>2</td><td>2</td></tr><tr><td>31b58ced31f22eab10bd3ee2d9174e7c14c27c01</td><td>tiny_images</td><td>Tiny Images</td><td><a href="papers/31b58ced31f22eab10bd3ee2d9174e7c14c27c01.html" target="_blank">80 Million Tiny Images: A Large Data Set for Nonparametric Object and Scene Recognition</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>999</td><td>574</td><td>425</td><td>89</td><td>644</td><td>337</td></tr><tr><td>d08cc366a4a0192a01e9a7495af1eb5d9f9e73ae</td><td>b3d_ac</td><td>B3D(AC)</td><td><a href="papers/d08cc366a4a0192a01e9a7495af1eb5d9f9e73ae.html" target="_blank">A 3-D Audio-Visual Corpus of Affective Communication</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Multimedia</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>42</td><td>23</td><td>19</td><td>2</td><td>26</td><td>15</td></tr><tr><td>4d4bb462c9f1d4e4ab1e4aa6a75cc0bc71b38461</td><td>3dddb_unconstrained</td><td>3D Dynamic</td><td><a href="papers/4d4bb462c9f1d4e4ab1e4aa6a75cc0bc71b38461.html" target="_blank">A 3D Dynamic Database for Unconstrained Face Recognition</a></td><td><a href="https://pdfs.semanticscholar.org/4d4b/b462c9f1d4e4ab1e4aa6a75cc0bc71b38461.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>2</td><td>1</td><td>1</td><td>0</td><td>1</td><td>1</td></tr><tr><td>639937b3a1b8bded3f7e9a40e85bd3770016cf3c</td><td>bfm</td><td>BFM</td><td><a href="papers/639937b3a1b8bded3f7e9a40e85bd3770016cf3c.html" target="_blank">A 3D Face Model for Pose and Illumination Invariant Face Recognition</a></td><td><span class="gray">[pdf]</a></td><td>2009 Sixth IEEE International Conference on Advanced Video and Signal Based Surveillance</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>343</td><td>194</td><td>149</td><td>23</td><td>223</td><td>114</td></tr><tr><td>cc589c499dcf323fe4a143bbef0074c3e31f9b60</td><td>bu_3dfe</td><td>BU-3DFE</td><td><a href="papers/cc589c499dcf323fe4a143bbef0074c3e31f9b60.html" target="_blank">A 3D facial expression database for facial behavior research</a></td><td><span class="gray">[pdf]</a></td><td>7th International Conference on Automatic Face and Gesture Recognition (FGR06)</td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>588</td><td>316</td><td>271</td><td>44</td><td>306</td><td>282</td></tr><tr><td>22646e00a7ba34d1b5fbe3b1efcd91a1e1be3c2b</td><td>saivt</td><td>SAIVT SoftBio</td><td><a href="papers/22646e00a7ba34d1b5fbe3b1efcd91a1e1be3c2b.html" target="_blank">A Database for Person Re-Identification in Multi-Camera Surveillance Networks</a></td><td><span class="gray">[pdf]</a></td><td>2012 International Conference on Digital Image Computing Techniques and Applications (DICTA)</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>65</td><td>38</td><td>27</td><td>6</td><td>45</td><td>20</td></tr><tr><td>070de852bc6eb275d7ca3a9cdde8f6be8795d1a3</td><td>d3dfacs</td><td>D3DFACS</td><td><a href="papers/070de852bc6eb275d7ca3a9cdde8f6be8795d1a3.html" target="_blank">A FACS valid 3D dynamic action unit database with applications to 3D dynamic morphable facial modeling</a></td><td><span class="gray">[pdf]</a></td><td>2011 International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>50</td><td>26</td><td>24</td><td>5</td><td>31</td><td>18</td></tr><tr><td>563c940054e4b456661762c1ab858e6f730c3159</td><td>data_61</td><td>Data61 Pedestrian</td><td><a href="papers/563c940054e4b456661762c1ab858e6f730c3159.html" target="_blank">A Multi-modal Graphical Model for Scene Analysis</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE Winter Conference on Applications of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>8</td><td>4</td><td>4</td><td>0</td><td>5</td><td>3</td></tr><tr><td>221c18238b829c12b911706947ab38fd017acef7</td><td>rap_pedestrian</td><td>RAP</td><td><a href="papers/221c18238b829c12b911706947ab38fd017acef7.html" target="_blank">A Richly Annotated Dataset for Pedestrian Attribute Recognition</a></td><td><a href="https://arxiv.org/pdf/1603.07054.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>69%</td><td>26</td><td>18</td><td>8</td><td>0</td><td>16</td><td>10</td></tr><tr><td>013909077ad843eb6df7a3e8e290cfd5575999d2</td><td>fiw_300</td><td>300-W</td><td><a href="papers/013909077ad843eb6df7a3e8e290cfd5575999d2.html" target="_blank">A Semi-automatic Methodology for Facial Landmark Annotation</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition Workshops</td><td></td><td></td><td></td><td></td><td></td><td>80%</td><td>184</td><td>148</td><td>36</td><td>8</td><td>120</td><td>67</td></tr><tr><td>3b4ec8af470948a72a6ed37a9fd226719a874ebc</td><td>sdu_vid</td><td>SDU-VID</td><td><a href="papers/3b4ec8af470948a72a6ed37a9fd226719a874ebc.html" target="_blank">A Spatio-Temporal Appearance Representation for Video-Based Pedestrian Re-Identification</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>66%</td><td>95</td><td>63</td><td>32</td><td>6</td><td>50</td><td>45</td></tr><tr><td>6403117f9c005ae81f1e8e6d1302f4a045e3d99d</td><td>alert_airport</td><td>ALERT Airport</td><td><a href="papers/6403117f9c005ae81f1e8e6d1302f4a045e3d99d.html" target="_blank">A Systematic Evaluation and Benchmark for Person Re-Identification: Features, Metrics, and Datasets</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>45%</td><td>20</td><td>9</td><td>11</td><td>0</td><td>9</td><td>11</td></tr><tr><td>0d3bb75852098b25d90f31d2f48fd0cb4944702b</td><td>face_scrub</td><td>FaceScrub</td><td><a href="papers/0d3bb75852098b25d90f31d2f48fd0cb4944702b.html" target="_blank">A data-driven approach to cleaning large face datasets</a></td><td><span class="gray">[pdf]</a></td><td>2014 IEEE International Conference on Image Processing (ICIP)</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>138</td><td>89</td><td>49</td><td>1</td><td>95</td><td>41</td></tr><tr><td>b91f54e1581fbbf60392364323d00a0cd43e493c</td><td>bp4d_spontanous</td><td>BP4D-Spontanous</td><td><a href="papers/b91f54e1581fbbf60392364323d00a0cd43e493c.html" target="_blank">A high-resolution spontaneous 3D dynamic facial expression database</a></td><td><span class="gray">[pdf]</a></td><td>2013 10th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)</td><td>edu</td><td>SUNY Binghamton</td><td>United States</td><td>42.08779975</td><td>-75.97066066</td><td>52%</td><td>154</td><td>80</td><td>74</td><td>6</td><td>80</td><td>75</td></tr><tr><td>8b56e33f33e582f3e473dba573a16b598ed9bcdc</td><td>fei</td><td>FEI</td><td><a href="papers/8b56e33f33e582f3e473dba573a16b598ed9bcdc.html" target="_blank">A new ranking method for principal components analysis and its application to face image analysis</a></td><td><span class="gray">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>169</td><td>93</td><td>76</td><td>6</td><td>69</td><td>102</td></tr><tr><td>2624d84503bc2f8e190e061c5480b6aa4d89277a</td><td>afew_va</td><td>AFEW-VA</td><td><a href="papers/2624d84503bc2f8e190e061c5480b6aa4d89277a.html" target="_blank">AFEW-VA database for valence and arousal estimation in-the-wild</a></td><td><a href="https://ibug.doc.ic.ac.uk/media/uploads/documents/afew-va.pdf" target="_blank">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>18</td><td>9</td><td>9</td><td>0</td><td>12</td><td>5</td></tr><tr><td>2ad0ee93d029e790ebb50574f403a09854b65b7e</td><td>yale_faces</td><td>YaleFaces</td><td><a href="papers/2ad0ee93d029e790ebb50574f403a09854b65b7e.html" target="_blank">Acquiring linear subspaces for face recognition under variable lighting</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>999</td><td>554</td><td>445</td><td>94</td><td>495</td><td>491</td></tr><tr><td>57fe081950f21ca03b5b375ae3e84b399c015861</td><td>cvc_01_barcelona</td><td>CVC-01</td><td><a href="papers/57fe081950f21ca03b5b375ae3e84b399c015861.html" target="_blank">Adaptive Image Sampling and Windows Classification for On-board Pedestrian Detection</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>51%</td><td>47</td><td>24</td><td>23</td><td>1</td><td>23</td><td>24</td></tr><tr><td>758d7e1be64cc668c59ef33ba8882c8597406e53</td><td>affectnet</td><td>AffectNet</td><td><a href="papers/758d7e1be64cc668c59ef33ba8882c8597406e53.html" target="_blank">AffectNet: A Database for Facial Expression, Valence, and Arousal Computing in the Wild</a></td><td><a href="https://arxiv.org/pdf/1708.03985.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>37</td><td>23</td><td>14</td><td>0</td><td>25</td><td>11</td></tr><tr><td>47aeb3b82f54b5ae8142b4bdda7b614433e69b9a</td><td>am_fed</td><td>AM-FED</td><td><a href="papers/47aeb3b82f54b5ae8142b4bdda7b614433e69b9a.html" target="_blank">Affectiva-MIT Facial Expression Dataset (AM-FED): Naturalistic and Spontaneous Facial Expressions Collected "In-the-Wild"</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition Workshops</td><td></td><td></td><td></td><td></td><td></td><td>46%</td><td>83</td><td>38</td><td>45</td><td>6</td><td>43</td><td>39</td></tr><tr><td>1be498d4bbc30c3bfd0029114c784bc2114d67c0</td><td>adience</td><td>Adience</td><td><a href="papers/1be498d4bbc30c3bfd0029114c784bc2114d67c0.html" target="_blank">Age and Gender Estimation of Unfiltered Faces</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Information Forensics and Security</td><td>edu</td><td>Open University of Israel</td><td>Israel</td><td>32.77824165</td><td>34.99565673</td><td>87%</td><td>179</td><td>156</td><td>23</td><td>1</td><td>98</td><td>80</td></tr><tr><td>d818568838433a6d6831adde49a58cef05e0c89f</td><td>agedb</td><td>AgeDB</td><td><a href="papers/d818568838433a6d6831adde49a58cef05e0c89f.html" target="_blank">AgeDB: The First Manually Collected, In-the-Wild Age Database</a></td><td><a href="http://eprints.mdx.ac.uk/22044/1/agedb_kotsia.pdf" target="_blank">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td>edu</td><td>Imperial College London</td><td>United Kingdom</td><td>51.49887085</td><td>-0.17560797</td><td>94%</td><td>18</td><td>17</td><td>1</td><td>0</td><td>14</td><td>3</td></tr><tr><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td><td>aflw</td><td>AFLW</td><td><a href="papers/a74251efa970b92925b89eeef50a5e37d9281ad0.html" target="_blank">Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</a></td><td><span class="gray">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td>edu</td><td>TU Graz</td><td>Austria</td><td>47.07071400</td><td>15.43950400</td><td>69%</td><td>318</td><td>218</td><td>100</td><td>27</td><td>211</td><td>107</td></tr><tr><td>2ce2560cf59db59ce313bbeb004e8ce55c5ce928</td><td>texas_3dfrd</td><td>Texas 3DFRD</td><td><a href="papers/2ce2560cf59db59ce313bbeb004e8ce55c5ce928.html" target="_blank">Anthropometric 3D Face Recognition</a></td><td><a href="http://live.ece.utexas.edu/publications/2010/sg_ijcv_june10.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>63%</td><td>91</td><td>57</td><td>34</td><td>5</td><td>60</td><td>31</td></tr><tr><td>633c851ebf625ad7abdda2324e9de093cf623141</td><td>appa_real</td><td>APPA-REAL</td><td><a href="papers/633c851ebf625ad7abdda2324e9de093cf623141.html" target="_blank">Apparent and Real Age Estimation in Still Images with Deep Residual Regressors on Appa-Real Database</a></td><td><span class="gray">[pdf]</a></td><td>2017 12th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2017)</td><td></td><td></td><td></td><td></td><td></td><td>70%</td><td>10</td><td>7</td><td>3</td><td>0</td><td>8</td><td>3</td></tr><tr><td>0df0d1adea39a5bef318b74faa37de7f3e00b452</td><td>mpii_gaze</td><td>MPIIGaze</td><td><a href="papers/0df0d1adea39a5bef318b74faa37de7f3e00b452.html" target="_blank">Appearance-based gaze estimation in the wild</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>73%</td><td>149</td><td>109</td><td>40</td><td>3</td><td>94</td><td>54</td></tr><tr><td>759a3b3821d9f0e08e0b0a62c8b693230afc3f8d</td><td>pubfig</td><td>PubFig</td><td><a href="papers/759a3b3821d9f0e08e0b0a62c8b693230afc3f8d.html" target="_blank">Attribute and simile classifiers for face verification</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE 12th International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>914</td><td>585</td><td>329</td><td>47</td><td>586</td><td>316</td></tr><tr><td>faf40ce28857aedf183e193486f5b4b0a8c478a2</td><td>iit_dehli_ear</td><td>IIT Dehli Ear</td><td><a href="papers/faf40ce28857aedf183e193486f5b4b0a8c478a2.html" target="_blank">Automated Human Identification Using Ear Imaging</a></td><td><a href="https://pdfs.semanticscholar.org/faf4/0ce28857aedf183e193486f5b4b0a8c478a2.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>80</td><td>40</td><td>40</td><td>6</td><td>35</td><td>44</td></tr><tr><td>2160788824c4c29ffe213b2cbeb3f52972d73f37</td><td>3d_rma</td><td>3D-RMA</td><td><a href="papers/2160788824c4c29ffe213b2cbeb3f52972d73f37.html" target="_blank">Automatic 3D face authentication</a></td><td><a href="http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.31.9190&rep=rep1&type=pdf" target="_blank">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>100</td><td>54</td><td>46</td><td>8</td><td>63</td><td>36</td></tr><tr><td>213a579af9e4f57f071b884aa872651372b661fd</td><td>bbc_pose</td><td>BBC Pose</td><td><a href="papers/213a579af9e4f57f071b884aa872651372b661fd.html" target="_blank">Automatic and Efficient Human Pose Estimation for Sign Language Videos</a></td><td><a href="http://tomas.pfister.fi/files/charles13ijcv.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>65%</td><td>26</td><td>17</td><td>9</td><td>1</td><td>16</td><td>11</td></tr><tr><td>fcc6fe6007c322641796cb8792718641856a22a7</td><td>miw</td><td>MIW</td><td><a href="papers/fcc6fe6007c322641796cb8792718641856a22a7.html" target="_blank">Automatic facial makeup detection with application in face recognition</a></td><td><span class="gray">[pdf]</a></td><td>2013 International Conference on Biometrics (ICB)</td><td>edu</td><td>West Virginia University</td><td>United States</td><td>39.65404635</td><td>-79.96475355</td><td>71%</td><td>49</td><td>35</td><td>14</td><td>1</td><td>19</td><td>29</td></tr><tr><td>fcc6fe6007c322641796cb8792718641856a22a7</td><td>youtube_makeup</td><td>YMU</td><td><a href="papers/fcc6fe6007c322641796cb8792718641856a22a7.html" target="_blank">Automatic facial makeup detection with application in face recognition</a></td><td><span class="gray">[pdf]</a></td><td>2013 International Conference on Biometrics (ICB)</td><td>edu</td><td>West Virginia University</td><td>United States</td><td>39.65404635</td><td>-79.96475355</td><td>71%</td><td>49</td><td>35</td><td>14</td><td>1</td><td>19</td><td>29</td></tr><tr><td>0a85bdff552615643dd74646ac881862a7c7072d</td><td>pipa</td><td>PIPA</td><td><a href="papers/0a85bdff552615643dd74646ac881862a7c7072d.html" target="_blank">Beyond frontal faces: Improving Person Recognition using multiple cues</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>company</td><td>Facebook</td><td>United States</td><td>37.39367170</td><td>-122.08072620</td><td>91%</td><td>54</td><td>49</td><td>4</td><td>1</td><td>41</td><td>12</td></tr><tr><td>2acf7e58f0a526b957be2099c10aab693f795973</td><td>bosphorus</td><td>The Bosphorus</td><td><a href="papers/2acf7e58f0a526b957be2099c10aab693f795973.html" target="_blank">Bosphorus Database for 3D Face Analysis</a></td><td><a href="https://pdfs.semanticscholar.org/4254/fbba3846008f50671edc9cf70b99d7304543.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>352</td><td>198</td><td>154</td><td>17</td><td>162</td><td>188</td></tr><tr><td>37d6f0eb074d207b53885bd2eb78ccc8a04be597</td><td>vmu</td><td>VMU</td><td><a href="papers/37d6f0eb074d207b53885bd2eb78ccc8a04be597.html" target="_blank">Can facial cosmetics affect the matching accuracy of face recognition systems?</a></td><td><span class="gray">[pdf]</a></td><td>2012 IEEE Fifth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td>edu</td><td>West Virginia University</td><td>United States</td><td>39.65404635</td><td>-79.96475355</td><td>62%</td><td>53</td><td>33</td><td>20</td><td>0</td><td>19</td><td>31</td></tr><tr><td>37d6f0eb074d207b53885bd2eb78ccc8a04be597</td><td>youtube_makeup</td><td>YMU</td><td><a href="papers/37d6f0eb074d207b53885bd2eb78ccc8a04be597.html" target="_blank">Can facial cosmetics affect the matching accuracy of face recognition systems?</a></td><td><span class="gray">[pdf]</a></td><td>2012 IEEE Fifth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td>edu</td><td>West Virginia University</td><td>United States</td><td>39.65404635</td><td>-79.96475355</td><td>62%</td><td>53</td><td>33</td><td>20</td><td>0</td><td>19</td><td>31</td></tr><tr><td>8d5998cd984e7cce307da7d46f155f9db99c6590</td><td>chalearn</td><td>ChaLearn</td><td><a href="papers/8d5998cd984e7cce307da7d46f155f9db99c6590.html" target="_blank">ChaLearn looking at people: A review of events and resources</a></td><td><span class="gray">[pdf]</a></td><td>2017 International Joint Conference on Neural Networks (IJCNN)</td><td></td><td></td><td></td><td></td><td></td><td>69%</td><td>13</td><td>9</td><td>4</td><td>1</td><td>8</td><td>4</td></tr><tr><td>2bf8541199728262f78d4dced6fb91479b39b738</td><td>clothing_co_parsing</td><td>CCP</td><td><a href="papers/2bf8541199728262f78d4dced6fb91479b39b738.html" target="_blank">Clothing Co-parsing by Joint Image Segmentation and Labeling</a></td><td><span class="gray">[pdf]</a></td><td>2014 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>70%</td><td>60</td><td>42</td><td>18</td><td>0</td><td>34</td><td>28</td></tr><tr><td>22ad2c8c0f4d6aa4328b38d894b814ec22579761</td><td>gallagher</td><td>Gallagher</td><td><a href="papers/22ad2c8c0f4d6aa4328b38d894b814ec22579761.html" target="_blank">Clothing cosegmentation for recognizing people</a></td><td><span class="gray">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>Carnegie Mellon University</td><td>United States</td><td>40.44416190</td><td>-79.94272826</td><td>65%</td><td>178</td><td>116</td><td>62</td><td>7</td><td>100</td><td>86</td></tr><tr><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td><td>leeds_sports_pose</td><td>Leeds Sports Pose</td><td><a href="papers/4b1d23d17476fcf78f4cbadf69fb130b1aa627c0.html" target="_blank">Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</a></td><td><a href="http://www.bmva.org/bmvc/2010/conference/paper12/abstract12.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>66%</td><td>285</td><td>188</td><td>97</td><td>11</td><td>197</td><td>93</td></tr><tr><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td><td>stickmen_buffy</td><td>Buffy Stickmen</td><td><a href="papers/4b1d23d17476fcf78f4cbadf69fb130b1aa627c0.html" target="_blank">Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</a></td><td><a href="http://www.bmva.org/bmvc/2010/conference/paper12/abstract12.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>66%</td><td>285</td><td>188</td><td>97</td><td>11</td><td>197</td><td>93</td></tr><tr><td>45c31cde87258414f33412b3b12fc5bec7cb3ba9</td><td>jaffe</td><td>JAFFE</td><td><a href="papers/45c31cde87258414f33412b3b12fc5bec7cb3ba9.html" target="_blank">Coding Facial Expressions with Gabor Wavelets</a></td><td><a href="http://physics.lbl.gov/patrecog/images/Facerecog_gabor.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>899</td><td>508</td><td>391</td><td>51</td><td>431</td><td>451</td></tr><tr><td>b1f4423c227fa37b9680787be38857069247a307</td><td>afew_va</td><td>AFEW-VA</td><td><a href="papers/b1f4423c227fa37b9680787be38857069247a307.html" target="_blank">Collecting Large, Richly Annotated Facial-Expression Databases from Movies</a></td><td><span class="gray">[pdf]</a></td><td>IEEE MultiMedia</td><td>edu</td><td>Australian National University</td><td>Australia</td><td>-35.27769990</td><td>149.11852700</td><td>64%</td><td>181</td><td>115</td><td>66</td><td>8</td><td>87</td><td>97</td></tr><tr><td>7f4040b482d16354d5938c1d1b926b544652bf5b</td><td>nova_emotions</td><td>Novaemötions Dataset</td><td><a href="papers/7f4040b482d16354d5938c1d1b926b544652bf5b.html" target="_blank">Competitive affective gaming: winning with a smile</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Universidade NOVA de Lisboa, Caparica, Portugal</td><td>Portugal</td><td>38.66096400</td><td>-9.20581300</td><td>78%</td><td>9</td><td>7</td><td>2</td><td>0</td><td>5</td><td>4</td></tr><tr><td>079a0a3bf5200994e1f972b1b9197bf2f90e87d4</td><td>mit_cbcl</td><td>MIT CBCL</td><td><a href="papers/079a0a3bf5200994e1f972b1b9197bf2f90e87d4.html" target="_blank">Component-Based Face Recognition with 3D Morphable Models</a></td><td><span class="gray">[pdf]</a></td><td>2004 Conference on Computer Vision and Pattern Recognition Workshop</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>23fc83c8cfff14a16df7ca497661264fc54ed746</td><td>cohn_kanade</td><td>CK</td><td><a href="papers/23fc83c8cfff14a16df7ca497661264fc54ed746.html" target="_blank">Comprehensive Database for Facial Expression Analysis</a></td><td><a href="https://pdfs.semanticscholar.org/23fc/83c8cfff14a16df7ca497661264fc54ed746.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>999</td><td>553</td><td>446</td><td>69</td><td>540</td><td>439</td></tr><tr><td>09d78009687bec46e70efcf39d4612822e61cb8c</td><td>raid</td><td>RAiD</td><td><a href="papers/09d78009687bec46e70efcf39d4612822e61cb8c.html" target="_blank">Consistent Re-identification in a Camera Network</a></td><td><a href="https://pdfs.semanticscholar.org/c27f/099e6e7e3f7f9979cbe9e0a5175fc5848ea0.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>71%</td><td>49</td><td>35</td><td>14</td><td>3</td><td>34</td><td>13</td></tr><tr><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td><td>casablanca</td><td>Casablanca</td><td><a href="papers/0ceda9dae8b9f322df65ca2ef02caca9758aec6f.html" target="_blank">Context-Aware CNNs for Person Head Detection</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>33</td><td>20</td><td>13</td><td>1</td><td>23</td><td>11</td></tr><tr><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td><td>hollywood_headset</td><td>HollywoodHeads</td><td><a href="papers/0ceda9dae8b9f322df65ca2ef02caca9758aec6f.html" target="_blank">Context-Aware CNNs for Person Head Detection</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>33</td><td>20</td><td>13</td><td>1</td><td>23</td><td>11</td></tr><tr><td>c06b13d0ec3f5c43e2782cd22542588e233733c3</td><td>nova_emotions</td><td>Novaemötions Dataset</td><td><a href="papers/c06b13d0ec3f5c43e2782cd22542588e233733c3.html" target="_blank">Crowdsourcing facial expressions for affective-interaction</a></td><td><span class="gray">[pdf]</a></td><td>Computer Vision and Image Understanding</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>1</td><td>1</td><td>0</td><td>0</td><td>1</td><td>0</td></tr><tr><td>8355d095d3534ef511a9af68a3b2893339e3f96b</td><td>imdb_wiki</td><td>IMDB-Wiki</td><td><a href="papers/8355d095d3534ef511a9af68a3b2893339e3f96b.html" target="_blank">DEX: Deep EXpectation of Apparent Age from a Single Image</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision Workshop (ICCVW)</td><td></td><td></td><td></td><td></td><td></td><td>79%</td><td>122</td><td>96</td><td>26</td><td>4</td><td>75</td><td>48</td></tr><tr><td>5a5f0287484f0d480fed1ce585dbf729586f0edc</td><td>disfa</td><td>DISFA</td><td><a href="papers/5a5f0287484f0d480fed1ce585dbf729586f0edc.html" target="_blank">DISFA: A Spontaneous Facial Action Intensity Database</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Affective Computing</td><td>edu</td><td>University of Denver</td><td>United States</td><td>39.67665410</td><td>-104.96220300</td><td>54%</td><td>184</td><td>100</td><td>84</td><td>17</td><td>96</td><td>89</td></tr><tr><td>10195a163ab6348eef37213a46f60a3d87f289c5</td><td>imdb_wiki</td><td>IMDB-Wiki</td><td><a href="papers/10195a163ab6348eef37213a46f60a3d87f289c5.html" target="_blank">Deep Expectation of Real and Apparent Age from a Single Image Without Facial Landmarks</a></td><td><a href="http://www.vision.ee.ethz.ch/en/publications/papers/articles/eth_biwi_01299.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td>edu</td><td>ETH Zurich</td><td>Switzerland</td><td>47.37631300</td><td>8.54766990</td><td>72%</td><td>145</td><td>105</td><td>40</td><td>9</td><td>93</td><td>51</td></tr><tr><td>162ea969d1929ed180cc6de9f0bf116993ff6e06</td><td>vgg_faces</td><td>VGG Face</td><td><a href="papers/162ea969d1929ed180cc6de9f0bf116993ff6e06.html" target="_blank">Deep Face Recognition</a></td><td><a href="https://pdfs.semanticscholar.org/f372/ab9b3270d4e4f6a0258c83c2736c3a5c0454.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>65%</td><td>999</td><td>646</td><td>353</td><td>48</td><td>558</td><td>429</td></tr><tr><td>6424b69f3ff4d35249c0bb7ef912fbc2c86f4ff4</td><td>celeba</td><td>CelebA</td><td><a href="papers/6424b69f3ff4d35249c0bb7ef912fbc2c86f4ff4.html" target="_blank">Deep Learning Face Attributes in the Wild</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td>edu</td><td>Chinese University of Hong Kong</td><td>China</td><td>22.41626320</td><td>114.21093180</td><td>57%</td><td>919</td><td>526</td><td>392</td><td>61</td><td>694</td><td>201</td></tr><tr><td>18010284894ed0edcca74e5bf768ee2e15ef7841</td><td>deep_fashion</td><td>DeepFashion</td><td><a href="papers/18010284894ed0edcca74e5bf768ee2e15ef7841.html" target="_blank">DeepFashion: Powering Robust Clothes Recognition and Retrieval with Rich Annotations</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>176</td><td>112</td><td>64</td><td>2</td><td>113</td><td>62</td></tr><tr><td>6bd36e9fd0ef20a3074e1430a6cc601e6d407fc3</td><td>cuhk_campus_03</td><td>CUHK03 Campus</td><td><a href="papers/6bd36e9fd0ef20a3074e1430a6cc601e6d407fc3.html" target="_blank">DeepReID: Deep Filter Pairing Neural Network for Person Re-identification</a></td><td><span class="gray">[pdf]</a></td><td>2014 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>73%</td><td>568</td><td>412</td><td>156</td><td>19</td><td>320</td><td>235</td></tr><tr><td>13f06b08f371ba8b5d31c3e288b4deb61335b462</td><td>eth_andreas_ess</td><td>ETHZ Pedestrian</td><td><a href="papers/13f06b08f371ba8b5d31c3e288b4deb61335b462.html" target="_blank">Depth and Appearance for Mobile Scene Analysis</a></td><td><span class="gray">[pdf]</a></td><td>2007 IEEE 11th International Conference on Computer Vision</td><td>edu</td><td>ETH Zurich</td><td>Switzerland</td><td>47.37631300</td><td>8.54766990</td><td>63%</td><td>324</td><td>203</td><td>121</td><td>26</td><td>193</td><td>127</td></tr><tr><td>4946ba10a4d5a7d0a38372f23e6622bd347ae273</td><td>coco_action</td><td>COCO-a</td><td><a href="papers/4946ba10a4d5a7d0a38372f23e6622bd347ae273.html" target="_blank">Describing Common Human Visual Actions in Images</a></td><td><a href="https://arxiv.org/pdf/1506.02203.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>68%</td><td>25</td><td>17</td><td>8</td><td>0</td><td>23</td><td>2</td></tr><tr><td>7808937b46acad36e43c30ae4e9f3fd57462853d</td><td>bpad</td><td>BPAD</td><td><a href="papers/7808937b46acad36e43c30ae4e9f3fd57462853d.html" target="_blank">Describing people: A poselet-based approach to attribute classification</a></td><td><span class="gray">[pdf]</a></td><td>2011 International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>230</td><td>140</td><td>90</td><td>14</td><td>163</td><td>66</td></tr><tr><td>d3200d49a19a4a4e4e9745ee39649b65d80c834b</td><td>scut_head</td><td>SCUT HEAD</td><td><a href="papers/d3200d49a19a4a4e4e9745ee39649b65d80c834b.html" target="_blank">Detecting Heads using Feature Refine Net and Cascaded Multi-scale Architecture</a></td><td><a href="https://arxiv.org/pdf/1803.09256.pdf" target="_blank">[pdf]</a></td><td>2018 24th International Conference on Pattern Recognition (ICPR)</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>9cc8cf0c7d7fa7607659921b6ff657e17e135ecc</td><td>mafa</td><td>MAsked FAces</td><td><a href="papers/9cc8cf0c7d7fa7607659921b6ff657e17e135ecc.html" target="_blank">Detecting Masked Faces in the Wild with LLE-CNNs</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>80%</td><td>5</td><td>4</td><td>1</td><td>0</td><td>4</td><td>1</td></tr><tr><td>56ae6d94fc6097ec4ca861f0daa87941d1c10b70</td><td>cmdp</td><td>CMDP</td><td><a href="papers/56ae6d94fc6097ec4ca861f0daa87941d1c10b70.html" target="_blank">Distance Estimation of an Unknown Person from a Portrait</a></td><td><a href="http://authors.library.caltech.edu/49084/13/FaceDistanceEstimation_RONCHI.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>44%</td><td>9</td><td>4</td><td>5</td><td>0</td><td>6</td><td>3</td></tr><tr><td>84fe5b4ac805af63206012d29523a1e033bc827e</td><td>awe_ears</td><td>AWE Ears</td><td><a href="papers/84fe5b4ac805af63206012d29523a1e033bc827e.html" target="_blank">Ear Recognition: More Than a Survey</a></td><td><a href="https://arxiv.org/pdf/1611.06203.pdf" target="_blank">[pdf]</a></td><td>Neurocomputing</td><td></td><td></td><td></td><td></td><td></td><td>77%</td><td>26</td><td>20</td><td>6</td><td>0</td><td>10</td><td>16</td></tr><tr><td>133f01aec1534604d184d56de866a4bd531dac87</td><td>lfw</td><td>LFW</td><td><a href="papers/133f01aec1534604d184d56de866a4bd531dac87.html" target="_blank">Effective Unconstrained Face Recognition by Combining Multiple Descriptors and Learned Background Statistics</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>183</td><td>109</td><td>74</td><td>13</td><td>103</td><td>77</td></tr><tr><td>c900e0ad4c95948baaf0acd8449fde26f9b4952a</td><td>emotio_net</td><td>EmotioNet Database</td><td><a href="papers/c900e0ad4c95948baaf0acd8449fde26f9b4952a.html" target="_blank">EmotioNet: An Accurate, Real-Time Algorithm for the Automatic Annotation of a Million Facial Expressions in the Wild</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>86</td><td>45</td><td>41</td><td>7</td><td>54</td><td>29</td></tr><tr><td>2161f6b7ee3c0acc81603b01dc0df689683577b9</td><td>large_scale_person_search</td><td>Large Scale Person Search</td><td><a href="papers/2161f6b7ee3c0acc81603b01dc0df689683577b9.html" target="_blank">End-to-End Deep Learning for Person Search</a></td><td><a href="https://pdfs.semanticscholar.org/2161/f6b7ee3c0acc81603b01dc0df689683577b9.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>70%</td><td>46</td><td>32</td><td>14</td><td>0</td><td>27</td><td>16</td></tr><tr><td>1bd1645a629f1b612960ab9bba276afd4cf7c666</td><td>brainwash</td><td>Brainwash</td><td><a href="papers/1bd1645a629f1b612960ab9bba276afd4cf7c666.html" target="_blank">End-to-End People Detection in Crowded Scenes</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>Stanford University</td><td>United States</td><td>37.43131385</td><td>-122.16936535</td><td>61%</td><td>67</td><td>41</td><td>25</td><td>3</td><td>42</td><td>23</td></tr><tr><td>6273b3491e94ea4dd1ce42b791d77bdc96ee73a8</td><td>viper</td><td>VIPeR</td><td><a href="papers/6273b3491e94ea4dd1ce42b791d77bdc96ee73a8.html" target="_blank">Evaluating Appearance Models for Recognition, Reacquisition, and Tracking</a></td><td><a href="https://pdfs.semanticscholar.org/7847/b1fbccadb780b655e72c66d3f9e93ddb880c.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>edu</td><td>University of California, Santa Cruz</td><td>United States</td><td>36.99158470</td><td>-122.05827710</td><td>66%</td><td>624</td><td>414</td><td>210</td><td>33</td><td>342</td><td>276</td></tr><tr><td>2258e01865367018ed6f4262c880df85b94959f8</td><td>mot</td><td>MOT</td><td><a href="papers/2258e01865367018ed6f4262c880df85b94959f8.html" target="_blank">Evaluating Multiple Object Tracking Performance: The CLEAR MOT Metrics</a></td><td><a href="https://cvhci.anthropomatik.kit.edu/images/stories/msmmi/papers/eurasip2008.pdf" target="_blank">[pdf]</a></td><td>EURASIP J. Image and Video Processing</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>632</td><td>366</td><td>264</td><td>44</td><td>358</td><td>264</td></tr><tr><td>9e5378e7b336c89735d3bb15cf67eff96f86d39a</td><td>precarious</td><td>Precarious</td><td><a href="papers/9e5378e7b336c89735d3bb15cf67eff96f86d39a.html" target="_blank">Expecting the Unexpected: Training Detectors for Unusual Pedestrians with Adversarial Imposters</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>36%</td><td>14</td><td>5</td><td>9</td><td>0</td><td>12</td><td>1</td></tr><tr><td>35b0331dfcd2897abd5749b49ff5e2b8ba0f7a62</td><td>coco_qa</td><td>COCO QA</td><td><a href="papers/35b0331dfcd2897abd5749b49ff5e2b8ba0f7a62.html" target="_blank">Exploring Models and Data for Image Question Answering</a></td><td><a href="https://arxiv.org/pdf/1505.02074.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>206</td><td>126</td><td>80</td><td>11</td><td>162</td><td>39</td></tr><tr><td>2cd7821fcf5fae53a185624f7eeda007434ae037</td><td>geofaces</td><td>GeoFaces</td><td><a href="papers/2cd7821fcf5fae53a185624f7eeda007434ae037.html" target="_blank">Exploring the geo-dependence of human face appearance</a></td><td><a href="http://cs.uky.edu/~jacobs/papers/islam2014faces.pdf" target="_blank">[pdf]</a></td><td>IEEE Winter Conference on Applications of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>88%</td><td>8</td><td>7</td><td>1</td><td>0</td><td>5</td><td>3</td></tr><tr><td>2cd7821fcf5fae53a185624f7eeda007434ae037</td><td>geofaces</td><td>GeoFaces</td><td><a href="papers/2cd7821fcf5fae53a185624f7eeda007434ae037.html" target="_blank">Exploring the geo-dependence of human face appearance</a></td><td><a href="http://cs.uky.edu/~jacobs/papers/islam2014faces.pdf" target="_blank">[pdf]</a></td><td>IEEE Winter Conference on Applications of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>88%</td><td>8</td><td>7</td><td>1</td><td>0</td><td>5</td><td>3</td></tr><tr><td>75da1df4ed319926c544eefe17ec8d720feef8c0</td><td>fddb</td><td>FDDB</td><td><a href="papers/75da1df4ed319926c544eefe17ec8d720feef8c0.html" target="_blank">FDDB: A benchmark for face detection in unconstrained settings</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>65%</td><td>380</td><td>247</td><td>133</td><td>16</td><td>202</td><td>164</td></tr><tr><td>31de9b3dd6106ce6eec9a35991b2b9083395fd0b</td><td>feret</td><td>FERET</td><td><a href="papers/31de9b3dd6106ce6eec9a35991b2b9083395fd0b.html" target="_blank">FERET ( Face Recognition Technology ) Recognition Algorithm Development and Test Results</a></td><td><a href="https://pdfs.semanticscholar.org/31de/9b3dd6106ce6eec9a35991b2b9083395fd0b.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>75</td><td>39</td><td>36</td><td>5</td><td>54</td><td>20</td></tr><tr><td>0e986f51fe45b00633de9fd0c94d082d2be51406</td><td>afw</td><td>AFW</td><td><a href="papers/0e986f51fe45b00633de9fd0c94d082d2be51406.html" target="_blank">Face detection, pose estimation, and landmark localization in the wild</a></td><td><span class="gray">[pdf]</a></td><td>2012 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>71%</td><td>999</td><td>709</td><td>290</td><td>35</td><td>576</td><td>422</td></tr><tr><td>560e0e58d0059259ddf86fcec1fa7975dee6a868</td><td>youtube_faces</td><td>YouTubeFaces</td><td><a href="papers/560e0e58d0059259ddf86fcec1fa7975dee6a868.html" target="_blank">Face recognition in unconstrained videos with matched background similarity</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011</td><td>edu</td><td>Tel Aviv University</td><td>Israel</td><td>32.11198890</td><td>34.80459702</td><td>66%</td><td>509</td><td>338</td><td>170</td><td>23</td><td>294</td><td>216</td></tr><tr><td>670637d0303a863c1548d5b19f705860a23e285c</td><td>face_tracer</td><td>FaceTracer</td><td><a href="papers/670637d0303a863c1548d5b19f705860a23e285c.html" target="_blank">Face swapping: automatically replacing faces in photographs</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>6204776d31359d129a582057c2d788a14f8aadeb</td><td>youtube_celebrities</td><td>YouTube Celebrities</td><td><a href="papers/6204776d31359d129a582057c2d788a14f8aadeb.html" target="_blank">Face tracking and recognition with visual constraints in real-world videos</a></td><td><span class="gray">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>Rutgers University</td><td>United States</td><td>40.47913175</td><td>-74.43168868</td><td>57%</td><td>267</td><td>151</td><td>115</td><td>11</td><td>125</td><td>121</td></tr><tr><td>4c170a0dcc8de75587dae21ca508dab2f9343974</td><td>face_tracer</td><td>FaceTracer</td><td><a href="papers/4c170a0dcc8de75587dae21ca508dab2f9343974.html" target="_blank">FaceTracer: A Search Engine for Large Collections of Images with Faces</a></td><td><a href="https://pdfs.semanticscholar.org/73a8/1d311eedac8dea3ca24dc15b6990fa4a725e.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>225</td><td>144</td><td>81</td><td>17</td><td>146</td><td>77</td></tr><tr><td>7ebb153704706e457ab57b432793d2b6e5d12592</td><td>vgg_celebs_in_places</td><td>CIP</td><td><a href="papers/7ebb153704706e457ab57b432793d2b6e5d12592.html" target="_blank">Faces in Places: compound query retrieval</a></td><td><a href="https://pdfs.semanticscholar.org/7ebb/153704706e457ab57b432793d2b6e5d12592.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>5</td><td>5</td><td>0</td><td>0</td><td>3</td><td>2</td></tr><tr><td>0ab7cff2ccda7269b73ff6efd9d37e1318f7db25</td><td>ibm_dif</td><td>IBM Diversity in Faces</td><td><a href="papers/0ab7cff2ccda7269b73ff6efd9d37e1318f7db25.html" target="_blank">Facial Coding Scheme Reference 1 Craniofacial Distances</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>8a3c5507237957d013a0fe0f082cab7f757af6ee</td><td>mafl</td><td>MAFL</td><td><a href="papers/8a3c5507237957d013a0fe0f082cab7f757af6ee.html" target="_blank">Facial Landmark Detection by Deep Multi-task Learning</a></td><td><a href="https://pdfs.semanticscholar.org/fcd7/1c18192928a2e0b264edd4d919ab2f8f652a.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>70%</td><td>407</td><td>283</td><td>124</td><td>16</td><td>252</td><td>153</td></tr><tr><td>8a3c5507237957d013a0fe0f082cab7f757af6ee</td><td>mtfl</td><td>MTFL</td><td><a href="papers/8a3c5507237957d013a0fe0f082cab7f757af6ee.html" target="_blank">Facial Landmark Detection by Deep Multi-task Learning</a></td><td><a href="https://pdfs.semanticscholar.org/fcd7/1c18192928a2e0b264edd4d919ab2f8f652a.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>70%</td><td>407</td><td>283</td><td>124</td><td>16</td><td>252</td><td>153</td></tr><tr><td>4fefd1bc8dc4e0ab37ee3324ddfa43ad9d6a04a7</td><td>deep_fashion</td><td>DeepFashion</td><td><a href="papers/4fefd1bc8dc4e0ab37ee3324ddfa43ad9d6a04a7.html" target="_blank">Fashion Landmark Detection in the Wild</a></td><td><a href="https://arxiv.org/pdf/1608.03049.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>73%</td><td>26</td><td>19</td><td>7</td><td>1</td><td>16</td><td>10</td></tr><tr><td>060820f110a72cbf02c14a6d1085bd6e1d994f6a</td><td>caltech_crp</td><td>Caltech CRP</td><td><a href="papers/060820f110a72cbf02c14a6d1085bd6e1d994f6a.html" target="_blank">Fine-grained classification of pedestrians in video: Benchmark and state of the art</a></td><td><a href="https://arxiv.org/pdf/1605.06177.pdf" target="_blank">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>17</td><td>8</td><td>9</td><td>0</td><td>9</td><td>8</td></tr><tr><td>45e616093a92e5f1e61a7c6037d5f637aa8964af</td><td>malf</td><td>MALF</td><td><a href="papers/45e616093a92e5f1e61a7c6037d5f637aa8964af.html" target="_blank">Fine-grained evaluation on face detection in the wild</a></td><td><span class="gray">[pdf]</a></td><td>2015 11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)</td><td></td><td></td><td></td><td></td><td></td><td>71%</td><td>17</td><td>12</td><td>5</td><td>0</td><td>12</td><td>5</td></tr><tr><td>1aad2da473888cb7ebc1bfaa15bfa0f1502ce005</td><td>jpl_pose</td><td>JPL-Interaction dataset</td><td><a href="papers/1aad2da473888cb7ebc1bfaa15bfa0f1502ce005.html" target="_blank">First-Person Activity Recognition: What Are They Doing to Me?</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>67%</td><td>148</td><td>99</td><td>49</td><td>7</td><td>105</td><td>43</td></tr><tr><td>7b92d1e53cc87f7a4256695de590098a2f30261e</td><td>appa_real</td><td>APPA-REAL</td><td><a href="papers/7b92d1e53cc87f7a4256695de590098a2f30261e.html" target="_blank">From Apparent to Real Age: Gender, Age, Ethnic, Makeup, and Expression Bias Analysis in Real Age Estimation</a></td><td><span class="gray">[pdf]</a></td><td>2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>774cbb45968607a027ae4729077734db000a1ec5</td><td>urban_tribes</td><td>Urban Tribes</td><td><a href="papers/774cbb45968607a027ae4729077734db000a1ec5.html" target="_blank">From Bikers to Surfers: Visual Recognition of Urban Tribes</a></td><td><a href="http://vision.cornell.edu/se3/wp-content/uploads/2014/09/utribes_bmvc13_final.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>67%</td><td>18</td><td>12</td><td>6</td><td>1</td><td>12</td><td>6</td></tr><tr><td>22f656d0f8426c84a33a267977f511f127bfd7f3</td><td>expw</td><td>ExpW</td><td><a href="papers/22f656d0f8426c84a33a267977f511f127bfd7f3.html" target="_blank">From Facial Expression Recognition to Interpersonal Relation Prediction</a></td><td><a href="https://arxiv.org/pdf/1609.06426.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>11</td><td>6</td><td>5</td><td>0</td><td>5</td><td>4</td></tr><tr><td>18c72175ddbb7d5956d180b65a96005c100f6014</td><td>yale_faces</td><td>YaleFaces</td><td><a href="papers/18c72175ddbb7d5956d180b65a96005c100f6014.html" target="_blank">From Few to Many: Illumination Cone Models for Face Recognition under Variable Lighting and Pose</a></td><td><a href="http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.147.1487&rep=rep1&type=pdf" target="_blank">[pdf]</a></td><td>IEEE Trans. Pattern Anal. Mach. Intell.</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>999</td><td>561</td><td>438</td><td>66</td><td>498</td><td>462</td></tr><tr><td>06f02199690961ba52997cde1527e714d2b3bf8f</td><td>columbia_gaze</td><td>Columbia Gaze</td><td><a href="papers/06f02199690961ba52997cde1527e714d2b3bf8f.html" target="_blank">Gaze locking: passive eye contact detection for human-object interaction</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Columbia University</td><td>United States</td><td>40.84198360</td><td>-73.94368971</td><td>76%</td><td>79</td><td>60</td><td>19</td><td>0</td><td>49</td><td>34</td></tr><tr><td>18858cc936947fc96b5c06bbe3c6c2faa5614540</td><td>pilot_parliament</td><td>PPB</td><td><a href="papers/18858cc936947fc96b5c06bbe3c6c2faa5614540.html" target="_blank">Gender Shades: Intersectional Accuracy Disparities in Commercial Gender Classification</a></td><td><a href="http://proceedings.mlr.press/v81/buolamwini18a/buolamwini18a-supp.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>53%</td><td>59</td><td>31</td><td>28</td><td>0</td><td>47</td><td>10</td></tr><tr><td>2eb84aaba316b095d4bb51da1a3e4365bbf9ab1d</td><td>kin_face</td><td>UB KinFace</td><td><a href="papers/2eb84aaba316b095d4bb51da1a3e4365bbf9ab1d.html" target="_blank">Genealogical face recognition based on UB KinFace database</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td>edu</td><td>SUNY Buffalo</td><td>United States</td><td>42.93362780</td><td>-78.88394479</td><td>55%</td><td>31</td><td>17</td><td>14</td><td>0</td><td>11</td><td>21</td></tr><tr><td>2eed184680edcdec8a3b605ad1a3ba8e8f7cc2e9</td><td>graz</td><td>Graz Pedestrian</td><td><a href="papers/2eed184680edcdec8a3b605ad1a3ba8e8f7cc2e9.html" target="_blank">Generic object recognition with boosting</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td>edu</td><td>TU Graz</td><td>Austria</td><td>47.07071400</td><td>15.43950400</td><td>53%</td><td>293</td><td>155</td><td>138</td><td>16</td><td>195</td><td>97</td></tr><tr><td>17b46e2dad927836c689d6787ddb3387c6159ece</td><td>geofaces</td><td>GeoFaces</td><td><a href="papers/17b46e2dad927836c689d6787ddb3387c6159ece.html" target="_blank">GeoFaceExplorer: exploring the geo-dependence of facial attributes</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>2</td><td>2</td><td>0</td><td>0</td><td>1</td><td>1</td></tr><tr><td>bd88bb2e4f351352d88ee7375af834360e223498</td><td>hda_plus</td><td>HDA+</td><td><a href="papers/bd88bb2e4f351352d88ee7375af834360e223498.html" target="_blank">HDA dataset-DRAFT 1 A Multi-camera video data set for research on High-Definition surveillance</a></td><td><a href="https://pdfs.semanticscholar.org/bd88/bb2e4f351352d88ee7375af834360e223498.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>2</td><td>0</td><td>2</td><td>0</td><td>1</td><td>2</td></tr><tr><td>a8d0b149c2eadaa02204d3e4356fbc8eccf3b315</td><td>hi4d_adsip</td><td>Hi4D-ADSIP</td><td><a href="papers/a8d0b149c2eadaa02204d3e4356fbc8eccf3b315.html" target="_blank">Hi4D-ADSIP 3-D dynamic facial articulation database</a></td><td><span class="gray">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>15</td><td>9</td><td>6</td><td>1</td><td>4</td><td>11</td></tr><tr><td>2d45cfd838016a6e39f6b766ffe85acd649440c7</td><td>mcgill</td><td>McGill Real World</td><td><a href="papers/2d45cfd838016a6e39f6b766ffe85acd649440c7.html" target="_blank">Hierarchical temporal graphical model for head pose estimation and subsequent attribute classification in real-world videos</a></td><td><span class="gray">[pdf]</a></td><td>Computer Vision and Image Understanding</td><td></td><td></td><td></td><td></td><td></td><td>75%</td><td>8</td><td>6</td><td>2</td><td>0</td><td>5</td><td>3</td></tr><tr><td>3cd40bfa1ff193a96bde0207e5140a399476466c</td><td>tvhi</td><td>TVHI</td><td><a href="papers/3cd40bfa1ff193a96bde0207e5140a399476466c.html" target="_blank">High Five: Recognising human interactions in TV shows</a></td><td><a href="https://pdfs.semanticscholar.org/3cd4/0bfa1ff193a96bde0207e5140a399476466c.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>98</td><td>56</td><td>42</td><td>10</td><td>66</td><td>28</td></tr><tr><td>04c2cda00e5536f4b1508cbd80041e9552880e67</td><td>hipsterwars</td><td>Hipsterwars</td><td><a href="papers/04c2cda00e5536f4b1508cbd80041e9552880e67.html" target="_blank">Hipster Wars: Discovering Elements of Fashion Styles</a></td><td><a href="https://pdfs.semanticscholar.org/04c2/cda00e5536f4b1508cbd80041e9552880e67.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>95</td><td>61</td><td>34</td><td>4</td><td>59</td><td>35</td></tr><tr><td>10d6b12fa07c7c8d6c8c3f42c7f1c061c131d4c5</td><td>inria_person</td><td>INRIA Pedestrian</td><td><a href="papers/10d6b12fa07c7c8d6c8c3f42c7f1c061c131d4c5.html" target="_blank">Histograms of oriented gradients for human detection</a></td><td><span class="gray">[pdf]</a></td><td>2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)</td><td>edu</td><td>INRIA Rhone-Alps, Montbonnot, France</td><td>France</td><td>45.21788600</td><td>5.80736900</td><td>57%</td><td>999</td><td>573</td><td>426</td><td>41</td><td>419</td><td>509</td></tr><tr><td>041d3eedf5e45ce5c5229f0181c5c576ed1fafd6</td><td>ucf_selfie</td><td>UCF Selfie</td><td><a href="papers/041d3eedf5e45ce5c5229f0181c5c576ed1fafd6.html" target="_blank">How to Take a Good Selfie?</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>73%</td><td>11</td><td>8</td><td>3</td><td>0</td><td>7</td><td>5</td></tr><tr><td>44d23df380af207f5ac5b41459c722c87283e1eb</td><td>wider_attribute</td><td>WIDER Attribute</td><td><a href="papers/44d23df380af207f5ac5b41459c722c87283e1eb.html" target="_blank">Human Attribute Recognition by Deep Hierarchical Contexts</a></td><td><a href="https://pdfs.semanticscholar.org/8e28/07f2dd53b03a759e372e07f7191cae65c9fd.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>72%</td><td>18</td><td>13</td><td>5</td><td>0</td><td>14</td><td>4</td></tr><tr><td>44484d2866f222bbb9b6b0870890f9eea1ffb2d0</td><td>cuhk_campus_03</td><td>CUHK03 Campus</td><td><a href="papers/44484d2866f222bbb9b6b0870890f9eea1ffb2d0.html" target="_blank">Human Reidentification with Transferred Metric Learning</a></td><td><a href="http://www.ee.cuhk.edu.hk/~xgwang/papers/liZWaccv12.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>69%</td><td>280</td><td>194</td><td>86</td><td>9</td><td>139</td><td>137</td></tr><tr><td>f41c7bb02fc97d5fb9cadd7a49c3e558a1c58a44</td><td>pa_100k</td><td>PA-100K</td><td><a href="papers/f41c7bb02fc97d5fb9cadd7a49c3e558a1c58a44.html" target="_blank">HydraPlus-Net: Attentive Deep Features for Pedestrian Analysis</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>75%</td><td>55</td><td>41</td><td>14</td><td>0</td><td>36</td><td>17</td></tr><tr><td>57178b36c21fd7f4529ac6748614bb3374714e91</td><td>ijb_c</td><td>IJB-C</td><td><a href="papers/57178b36c21fd7f4529ac6748614bb3374714e91.html" target="_blank">IARPA Janus Benchmark - C: Face Dataset and Protocol</a></td><td><span class="gray">[pdf]</a></td><td>2018 International Conference on Biometrics (ICB)</td><td></td><td></td><td></td><td></td><td></td><td>79%</td><td>14</td><td>11</td><td>3</td><td>0</td><td>12</td><td>1</td></tr><tr><td>0cb2dd5f178e3a297a0c33068961018659d0f443</td><td>ijb_b</td><td>IJB-B</td><td><a href="papers/0cb2dd5f178e3a297a0c33068961018659d0f443.html" target="_blank">IARPA Janus Benchmark-B Face Dataset</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td>edu</td><td>Michigan State University</td><td>United States</td><td>42.71856800</td><td>-84.47791571</td><td>63%</td><td>35</td><td>22</td><td>13</td><td>3</td><td>25</td><td>8</td></tr><tr><td>0297448f3ed948e136bb06ceff10eccb34e5bb77</td><td>ilids_mcts</td><td>i-LIDS Multiple-Camera</td><td><a href="papers/0297448f3ed948e136bb06ceff10eccb34e5bb77.html" target="_blank">Imagery Library for Intelligent Detection Systems (i-LIDS); A Standard for Testing Video Based Detection Systems</a></td><td><span class="gray">[pdf]</a></td><td>Proceedings 40th Annual 2006 International Carnahan Conference on Security Technology</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>35</td><td>20</td><td>15</td><td>2</td><td>21</td><td>14</td></tr><tr><td>7f23a4bb0c777dd72cca7665a5f370ac7980217e</td><td>duke_mtmc</td><td>Duke MTMC</td><td><a href="papers/7f23a4bb0c777dd72cca7665a5f370ac7980217e.html" target="_blank">Improving Person Re-identification by Attribute and Identity Learning</a></td><td><a href="https://arxiv.org/pdf/1703.07220.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>84%</td><td>87</td><td>73</td><td>14</td><td>0</td><td>43</td><td>42</td></tr><tr><td>55c40cbcf49a0225e72d911d762c27bb1c2d14aa</td><td>ifad</td><td>IFAD</td><td><a href="papers/55c40cbcf49a0225e72d911d762c27bb1c2d14aa.html" target="_blank">Indian Face Age Database: A Database for Face Recognition with Age Variation</a></td><td><a href="https://pdfs.semanticscholar.org/025e/4cf3fd3fdeced91e9373b56ee14af7ca432c.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>2</td><td>1</td><td>1</td><td>0</td><td>2</td><td>0</td></tr><tr><td>ca3e88d87e1344d076c964ea89d91a75c417f5ee</td><td>imfdb</td><td>IMFDB</td><td><a href="papers/ca3e88d87e1344d076c964ea89d91a75c417f5ee.html" target="_blank">Indian Movie Face Database: A benchmark for face recognition under wide variations</a></td><td><span class="gray">[pdf]</a></td><td>2013 Fourth National Conference on Computer Vision, Pattern Recognition, Image Processing and Graphics (NCVPRIPG)</td><td>edu</td><td>BVBCET, Hubli, India</td><td>India</td><td>15.36883320</td><td>75.12137960</td><td>65%</td><td>17</td><td>11</td><td>6</td><td>0</td><td>11</td><td>5</td></tr><tr><td>95f12d27c3b4914e0668a268360948bce92f7db3</td><td>helen</td><td>Helen</td><td><a href="papers/95f12d27c3b4914e0668a268360948bce92f7db3.html" target="_blank">Interactive Facial Feature Localization</a></td><td><a href="https://pdfs.semanticscholar.org/95f1/2d27c3b4914e0668a268360948bce92f7db3.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>company</td><td>Adobe</td><td>United States</td><td>37.33077030</td><td>-121.89409510</td><td>85%</td><td>352</td><td>298</td><td>54</td><td>8</td><td>212</td><td>146</td></tr><tr><td>ad01687649d95cd5b56d7399a9603c4b8e2217d7</td><td>mrp_drone</td><td>MRP Drone</td><td><a href="papers/ad01687649d95cd5b56d7399a9603c4b8e2217d7.html" target="_blank">Investigating Open-World Person Re-identification Using a Drone</a></td><td><a href="https://pdfs.semanticscholar.org/ad01/687649d95cd5b56d7399a9603c4b8e2217d7.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>7</td><td>3</td><td>4</td><td>1</td><td>5</td><td>2</td></tr><tr><td>2f43b614607163abf41dfe5d17ef6749a1b61304</td><td>hrt_transgender</td><td>HRT Transgender</td><td><a href="papers/2f43b614607163abf41dfe5d17ef6749a1b61304.html" target="_blank">Investigating the Periocular-Based Face Recognition Across Gender Transformation</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Information Forensics and Security</td><td>edu</td><td>University of North Carolina at Wilmington</td><td>United States</td><td>34.22498270</td><td>-77.86907744</td><td>77%</td><td>13</td><td>10</td><td>3</td><td>0</td><td>6</td><td>8</td></tr><tr><td>066d71fcd997033dce4ca58df924397dfe0b5fd1</td><td>ifdb</td><td>IFDB</td><td><a href="papers/066d71fcd997033dce4ca58df924397dfe0b5fd1.html" target="_blank">Iranian Face Database and Evaluation with a New Detection Algorithm</a></td><td><a href="https://pdfs.semanticscholar.org/066d/71fcd997033dce4ca58df924397dfe0b5fd1.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>b71d1aa90dcbe3638888725314c0d56640c1fef1</td><td>ifdb</td><td>IFDB</td><td><a href="papers/b71d1aa90dcbe3638888725314c0d56640c1fef1.html" target="_blank">Iranian Face Database with age, pose and expression</a></td><td><span class="gray">[pdf]</a></td><td>2007 International Conference on Machine Vision</td><td>edu</td><td>Islamic Azad University</td><td>Iran</td><td>34.84529990</td><td>48.55962120</td><td>48%</td><td>23</td><td>11</td><td>12</td><td>2</td><td>14</td><td>9</td></tr><tr><td>137aa2f891d474fce1e7a1d1e9b3aefe21e22b34</td><td>hrt_transgender</td><td>HRT Transgender</td><td><a href="papers/137aa2f891d474fce1e7a1d1e9b3aefe21e22b34.html" target="_blank">Is the eye region more reliable than the face? A preliminary study of face-based recognition on a transgender dataset</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE Sixth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>7</td><td>4</td><td>3</td><td>1</td><td>3</td><td>5</td></tr><tr><td>0b440695c822a8e35184fb2f60dcdaa8a6de84ae</td><td>kinectface</td><td>KinectFaceDB</td><td><a href="papers/0b440695c822a8e35184fb2f60dcdaa8a6de84ae.html" target="_blank">KinectFaceDB: A Kinect Database for Face Recognition</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Systems, Man, and Cybernetics: Systems</td><td>edu</td><td>University of North Carolina at Chapel Hill</td><td>United States</td><td>35.91139710</td><td>-79.05045290</td><td>61%</td><td>82</td><td>50</td><td>32</td><td>6</td><td>28</td><td>52</td></tr><tr><td>4793f11fbca4a7dba898b9fff68f70d868e2497c</td><td>kin_face</td><td>UB KinFace</td><td><a href="papers/4793f11fbca4a7dba898b9fff68f70d868e2497c.html" target="_blank">Kinship Verification through Transfer Learning</a></td><td><a href="https://pdfs.semanticscholar.org/4793/f11fbca4a7dba898b9fff68f70d868e2497c.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>71</td><td>41</td><td>30</td><td>2</td><td>29</td><td>42</td></tr><tr><td>2d3482dcff69c7417c7b933f22de606a0e8e42d4</td><td>lfw</td><td>LFW</td><td><a href="papers/2d3482dcff69c7417c7b933f22de606a0e8e42d4.html" target="_blank">Labeled Faces in the Wild : Updates and New Reporting Procedures</a></td><td><a href="https://pdfs.semanticscholar.org/2d34/82dcff69c7417c7b933f22de606a0e8e42d4.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>edu</td><td>University of Massachusetts</td><td>United States</td><td>42.38897850</td><td>-72.52869870</td><td>69%</td><td>123</td><td>85</td><td>38</td><td>3</td><td>71</td><td>51</td></tr><tr><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td><td>lfw</td><td>LFW</td><td><a href="papers/370b5757a5379b15e30d619e4d3fb9e8e13f3256.html" target="_blank">Labeled Faces in the Wild: A Database forStudying Face Recognition in Unconstrained Environments</a></td><td><a href="https://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>63%</td><td>999</td><td>632</td><td>367</td><td>59</td><td>598</td><td>382</td></tr><tr><td>7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22</td><td>lfw</td><td>LFW</td><td><a href="papers/7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22.html" target="_blank">Labeled Faces in the Wild: A Survey</a></td><td><a href="https://pdfs.semanticscholar.org/7de6/e81d775e9cd7becbfd1bd685f4e2a5eebb22.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Stevens Institute of Technology</td><td>United States</td><td>40.74225200</td><td>-74.02709490</td><td>64%</td><td>109</td><td>70</td><td>39</td><td>7</td><td>66</td><td>43</td></tr><tr><td>0d2dd4fc016cb6a517d8fb43a7cc3ff62964832e</td><td>lag</td><td>LAG</td><td><a href="papers/0d2dd4fc016cb6a517d8fb43a7cc3ff62964832e.html" target="_blank">Large age-gap face verification by feature injection in deep networks</a></td><td><a href="https://arxiv.org/pdf/1602.06149.pdf" target="_blank">[pdf]</a></td><td>Pattern Recognition Letters</td><td></td><td></td><td></td><td></td><td></td><td>71%</td><td>7</td><td>5</td><td>2</td><td>0</td><td>3</td><td>4</td></tr><tr><td>07fcbae86f7a3ad3ea1cf95178459ee9eaf77cb1</td><td>uccs</td><td>UCCS</td><td><a href="papers/07fcbae86f7a3ad3ea1cf95178459ee9eaf77cb1.html" target="_blank">Large scale unconstrained open set face database</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE Sixth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td>company</td><td>Securics Inc., Colorado Springs, CO</td><td>United States</td><td>38.83388160</td><td>-104.82136340</td><td>83%</td><td>6</td><td>5</td><td>1</td><td>0</td><td>4</td><td>2</td></tr><tr><td>4af89578ac237278be310f7660a408b03f12d603</td><td>geofaces</td><td>GeoFaces</td><td><a href="papers/4af89578ac237278be310f7660a408b03f12d603.html" target="_blank">Large-scale geo-facial image analysis</a></td><td><a href="https://pdfs.semanticscholar.org/3ede/3ed28329bf48fbd06438a69c4f855bef003f.pdf" target="_blank">[pdf]</a></td><td>EURASIP J. Image and Video Processing</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>6</td><td>6</td><td>0</td><td>0</td><td>4</td><td>2</td></tr><tr><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td><td>mafl</td><td>MAFL</td><td><a href="papers/a0fd85b3400c7b3e11122f44dc5870ae2de9009a.html" target="_blank">Learning Deep Representation for Face Alignment with Auxiliary Attributes</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>71%</td><td>108</td><td>77</td><td>31</td><td>7</td><td>66</td><td>44</td></tr><tr><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td><td>mtfl</td><td>MTFL</td><td><a href="papers/a0fd85b3400c7b3e11122f44dc5870ae2de9009a.html" target="_blank">Learning Deep Representation for Face Alignment with Auxiliary Attributes</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>71%</td><td>108</td><td>77</td><td>31</td><td>7</td><td>66</td><td>44</td></tr><tr><td>853bd61bc48a431b9b1c7cab10c603830c488e39</td><td>casia_webface</td><td>CASIA Webface</td><td><a href="papers/853bd61bc48a431b9b1c7cab10c603830c488e39.html" target="_blank">Learning Face Representation from Scratch</a></td><td><a href="https://arxiv.org/pdf/1411.7923.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td>edu</td><td>Chinese Academy of Sciences</td><td>China</td><td>40.00447950</td><td>116.37023800</td><td>71%</td><td>476</td><td>339</td><td>137</td><td>19</td><td>290</td><td>182</td></tr><tr><td>2a171f8d14b6b8735001a11c217af9587d095848</td><td>social_relation</td><td>Social Relation</td><td><a href="papers/2a171f8d14b6b8735001a11c217af9587d095848.html" target="_blank">Learning Social Relation Traits from Face Images</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>23</td><td>14</td><td>9</td><td>4</td><td>16</td><td>7</td></tr><tr><td>4e4746094bf60ee83e40d8597a6191e463b57f76</td><td>leeds_sports_pose_extended</td><td>Leeds Sports Pose Extended</td><td><a href="papers/4e4746094bf60ee83e40d8597a6191e463b57f76.html" target="_blank">Learning effective human pose estimation from inaccurate annotation</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011</td><td>edu</td><td>University of Leeds</td><td>United Kingdom</td><td>53.80387185</td><td>-1.55245712</td><td>71%</td><td>169</td><td>120</td><td>49</td><td>7</td><td>108</td><td>65</td></tr><tr><td>287ddcb3db5562235d83aee318f318b8d5e43fb1</td><td>erce</td><td>ERCe</td><td><a href="papers/287ddcb3db5562235d83aee318f318b8d5e43fb1.html" target="_blank">Learning from Multiple Sources for Video Summarisation</a></td><td><a href="https://arxiv.org/pdf/1501.03069.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>7</td><td>4</td><td>3</td><td>0</td><td>4</td><td>3</td></tr><tr><td>287ddcb3db5562235d83aee318f318b8d5e43fb1</td><td>tisi</td><td>Times Square Intersection</td><td><a href="papers/287ddcb3db5562235d83aee318f318b8d5e43fb1.html" target="_blank">Learning from Multiple Sources for Video Summarisation</a></td><td><a href="https://arxiv.org/pdf/1501.03069.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>7</td><td>4</td><td>3</td><td>0</td><td>4</td><td>3</td></tr><tr><td>5981e6479c3fd4e31644db35d236bfb84ae46514</td><td>mot</td><td>MOT</td><td><a href="papers/5981e6479c3fd4e31644db35d236bfb84ae46514.html" target="_blank">Learning to associate: HybridBoosted multi-target tracker for crowded scene</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>University of Southern California</td><td>United States</td><td>34.02241490</td><td>-118.28634407</td><td>61%</td><td>326</td><td>200</td><td>125</td><td>22</td><td>190</td><td>137</td></tr><tr><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td><td>stickmen_buffy</td><td>Buffy Stickmen</td><td><a href="papers/6dd0597f8513dc100cd0bc1b493768cde45098a9.html" target="_blank">Learning to parse images of articulated bodies</a></td><td><a href="http://machinelearning.wustl.edu/mlpapers/paper_files/NIPS2006_899.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>369</td><td>227</td><td>141</td><td>32</td><td>237</td><td>131</td></tr><tr><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td><a href="papers/6dd0597f8513dc100cd0bc1b493768cde45098a9.html" target="_blank">Learning to parse images of articulated bodies</a></td><td><a href="http://machinelearning.wustl.edu/mlpapers/paper_files/NIPS2006_899.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>369</td><td>227</td><td>141</td><td>32</td><td>237</td><td>131</td></tr><tr><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td><a href="papers/6dd0597f8513dc100cd0bc1b493768cde45098a9.html" target="_blank">Learning to parse images of articulated bodies</a></td><td><a href="http://machinelearning.wustl.edu/mlpapers/paper_files/NIPS2006_899.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>369</td><td>227</td><td>141</td><td>32</td><td>237</td><td>131</td></tr><tr><td>28d4e027c7e90b51b7d8908fce68128d1964668a</td><td>megaface</td><td>MegaFace</td><td><a href="papers/28d4e027c7e90b51b7d8908fce68128d1964668a.html" target="_blank">Level Playing Field for Million Scale Face Recognition</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>University of Washington</td><td>United States</td><td>47.65432380</td><td>-122.30800894</td><td>72%</td><td>39</td><td>28</td><td>11</td><td>2</td><td>29</td><td>9</td></tr><tr><td>46a01565e6afe7c074affb752e7069ee3bf2e4ef</td><td>sdu_vid</td><td>SDU-VID</td><td><a href="papers/46a01565e6afe7c074affb752e7069ee3bf2e4ef.html" target="_blank">Local Descriptors Encoded by Fisher Vectors for Person Re-identification</a></td><td><a href="https://pdfs.semanticscholar.org/a105/f1ef67b4b02da38eadce8ffb4e13aa301a93.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>67%</td><td>197</td><td>132</td><td>65</td><td>15</td><td>108</td><td>88</td></tr><tr><td>140438a77a771a8fb656b39a78ff488066eb6b50</td><td>lfpw</td><td>LFPW</td><td><a href="papers/140438a77a771a8fb656b39a78ff488066eb6b50.html" target="_blank">Localizing Parts of Faces Using a Consensus of Exemplars</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>38b55d95189c5e69cf4ab45098a48fba407609b4</td><td>cuhk_campus_03</td><td>CUHK03 Campus</td><td><a href="papers/38b55d95189c5e69cf4ab45098a48fba407609b4.html" target="_blank">Locally Aligned Feature Transforms across Views</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>258</td><td>164</td><td>94</td><td>15</td><td>136</td><td>117</td></tr><tr><td>8990cdce3f917dad622e43e033db686b354d057c</td><td>tiny_faces</td><td>TinyFace</td><td><a href="papers/8990cdce3f917dad622e43e033db686b354d057c.html" target="_blank">Low-Resolution Face Recognition</a></td><td><a href="https://arxiv.org/pdf/1811.08965.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>c0387e788a52f10bf35d4d50659cfa515d89fbec</td><td>mars</td><td>MARS</td><td><a href="papers/c0387e788a52f10bf35d4d50659cfa515d89fbec.html" target="_blank">MARS: A Video Benchmark for Large-Scale Person Re-Identification</a></td><td><a href="https://pdfs.semanticscholar.org/c038/7e788a52f10bf35d4d50659cfa515d89fbec.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>68%</td><td>168</td><td>115</td><td>53</td><td>4</td><td>97</td><td>69</td></tr><tr><td>9055b155cbabdce3b98e16e5ac9c0edf00f9552f</td><td>morph</td><td>MORPH Commercial</td><td><a href="papers/9055b155cbabdce3b98e16e5ac9c0edf00f9552f.html" target="_blank">MORPH: a longitudinal image database of normal adult age-progression</a></td><td><span class="gray">[pdf]</a></td><td>7th International Conference on Automatic Face and Gesture Recognition (FGR06)</td><td>edu</td><td>North Carolina University</td><td>United States</td><td>34.22398690</td><td>-77.87013250</td><td>59%</td><td>437</td><td>258</td><td>178</td><td>22</td><td>228</td><td>203</td></tr><tr><td>9055b155cbabdce3b98e16e5ac9c0edf00f9552f</td><td>morph_nc</td><td>MORPH Non-Commercial</td><td><a href="papers/9055b155cbabdce3b98e16e5ac9c0edf00f9552f.html" target="_blank">MORPH: a longitudinal image database of normal adult age-progression</a></td><td><span class="gray">[pdf]</a></td><td>7th International Conference on Automatic Face and Gesture Recognition (FGR06)</td><td>edu</td><td>North Carolina University</td><td>United States</td><td>34.22398690</td><td>-77.87013250</td><td>59%</td><td>437</td><td>258</td><td>178</td><td>22</td><td>228</td><td>203</td></tr><tr><td>291265db88023e92bb8c8e6390438e5da148e8f5</td><td>msceleb</td><td>MsCeleb</td><td><a href="papers/291265db88023e92bb8c8e6390438e5da148e8f5.html" target="_blank">MS-Celeb-1M: A Dataset and Benchmark for Large-Scale Face Recognition</a></td><td><a href="https://arxiv.org/pdf/1607.08221.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>company</td><td>Microsoft</td><td>United States</td><td>47.64233180</td><td>-122.13693020</td><td>78%</td><td>180</td><td>141</td><td>39</td><td>8</td><td>120</td><td>59</td></tr><tr><td>3dc3f0b64ef80f573e3a5f96e456e52ee980b877</td><td>georgia_tech_face_database</td><td>Georgia Tech Face</td><td><a href="papers/3dc3f0b64ef80f573e3a5f96e456e52ee980b877.html" target="_blank">Maximum Likelihood Training of the Embedded HMM for Face Detection and Recognition</a></td><td><a href="http://www.researchgate.net/profile/Monson_Hayes/publication/221124512_Maximum_Likelihood_Training_of_the_Embedded_HMM_for_Face_Detection_and_Recognition/links/0deec53509be9d6f55000000.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>67</td><td>36</td><td>31</td><td>4</td><td>29</td><td>28</td></tr><tr><td>e58dd160a76349d46f881bd6ddbc2921f08d1050</td><td>gfw</td><td>Grouping Face in the Wild</td><td><a href="papers/e58dd160a76349d46f881bd6ddbc2921f08d1050.html" target="_blank">Merge or Not? Learning to Group Faces via Imitation Learning</a></td><td><a href="https://arxiv.org/pdf/1707.03986.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>2</td><td>2</td><td>0</td><td>0</td><td>2</td><td>0</td></tr><tr><td>5753b2b5e442eaa3be066daa4a2ca8d8a0bb1725</td><td>fpoq</td><td>50 People One Question</td><td><a href="papers/5753b2b5e442eaa3be066daa4a2ca8d8a0bb1725.html" target="_blank">Merging Pose Estimates Across Space and Time</a></td><td><a href="http://authors.library.caltech.edu/41565/1/tracking_bmvc.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>81%</td><td>16</td><td>13</td><td>3</td><td>0</td><td>13</td><td>4</td></tr><tr><td>5e0f8c355a37a5a89351c02f174e7a5ddcb98683</td><td>coco</td><td>COCO</td><td><a href="papers/5e0f8c355a37a5a89351c02f174e7a5ddcb98683.html" target="_blank">Microsoft COCO: Common Objects in Context</a></td><td><a href="https://arxiv.org/pdf/1405.0312.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>999</td><td>608</td><td>391</td><td>25</td><td>722</td><td>259</td></tr><tr><td>41976ebc8ab76d9a6861487c97cc7fcbe3b6015f</td><td>moments_in_time</td><td>Moments in Time</td><td><a href="papers/41976ebc8ab76d9a6861487c97cc7fcbe3b6015f.html" target="_blank">Moments in Time Dataset: one million videos for event understanding</a></td><td><a href="https://arxiv.org/pdf/1801.03150.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td>edu</td><td>Columbia University</td><td>United States</td><td>40.84198360</td><td>-73.94368971</td><td>76%</td><td>29</td><td>22</td><td>7</td><td>2</td><td>27</td><td>2</td></tr><tr><td>436f798d1a4e54e5947c1e7d7375c31b2bdb4064</td><td>tud_multiview</td><td>TUD-Multiview</td><td><a href="papers/436f798d1a4e54e5947c1e7d7375c31b2bdb4064.html" target="_blank">Monocular 3D pose estimation and tracking by detection</a></td><td><span class="gray">[pdf]</a></td><td>2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>TU Darmstadt</td><td>Germany</td><td>49.87482770</td><td>8.65632810</td><td>60%</td><td>311</td><td>186</td><td>125</td><td>33</td><td>208</td><td>105</td></tr><tr><td>436f798d1a4e54e5947c1e7d7375c31b2bdb4064</td><td>tud_stadtmitte</td><td>TUD-Stadtmitte</td><td><a href="papers/436f798d1a4e54e5947c1e7d7375c31b2bdb4064.html" target="_blank">Monocular 3D pose estimation and tracking by detection</a></td><td><span class="gray">[pdf]</a></td><td>2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>TU Darmstadt</td><td>Germany</td><td>49.87482770</td><td>8.65632810</td><td>60%</td><td>311</td><td>186</td><td>125</td><td>33</td><td>208</td><td>105</td></tr><tr><td>3b5b6d19d4733ab606c39c69a889f9e67967f151</td><td>qmul_grid</td><td>GRID</td><td><a href="papers/3b5b6d19d4733ab606c39c69a889f9e67967f151.html" target="_blank">Multi-camera activity correlation analysis</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>Queen Mary University of London</td><td>United Kingdom</td><td>51.52472720</td><td>-0.03931035</td><td>69%</td><td>142</td><td>98</td><td>44</td><td>7</td><td>77</td><td>64</td></tr><tr><td>6ad5a38df8dd4cdddd74f31996ce096d41219f72</td><td>tud_brussels</td><td>TUD-Brussels</td><td><a href="papers/6ad5a38df8dd4cdddd74f31996ce096d41219f72.html" target="_blank">Multi-cue onboard pedestrian detection</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>6ad5a38df8dd4cdddd74f31996ce096d41219f72</td><td>tud_motionpairs</td><td>TUD-Motionparis</td><td><a href="papers/6ad5a38df8dd4cdddd74f31996ce096d41219f72.html" target="_blank">Multi-cue onboard pedestrian detection</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>32c801cb7fbeb742edfd94cccfca4934baec71da</td><td>ucf_crowd</td><td>UCF-CC-50</td><td><a href="papers/32c801cb7fbeb742edfd94cccfca4934baec71da.html" target="_blank">Multi-source Multi-scale Counting in Extremely Dense Crowd Images</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>76%</td><td>148</td><td>113</td><td>35</td><td>3</td><td>80</td><td>65</td></tr><tr><td>1e3df3ca8feab0b36fd293fe689f93bb2aaac591</td><td>immediacy</td><td>Immediacy</td><td><a href="papers/1e3df3ca8feab0b36fd293fe689f93bb2aaac591.html" target="_blank">Multi-task Recurrent Neural Network for Immediacy Prediction</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>26</td><td>16</td><td>10</td><td>2</td><td>21</td><td>6</td></tr><tr><td>2b926b3586399d028b46315d7d9fb9d879e4f79c</td><td>frav3d</td><td>FRAV3D</td><td><a href="papers/2b926b3586399d028b46315d7d9fb9d879e4f79c.html" target="_blank">Multimodal 2D, 2.5D & 3D Face Verification</a></td><td><span class="gray">[pdf]</a></td><td>2006 International Conference on Image Processing</td><td>edu</td><td>Universidad Rey Juan Carlos, Spain</td><td>Spain</td><td>40.33586610</td><td>-3.87694320</td><td>57%</td><td>14</td><td>8</td><td>6</td><td>0</td><td>2</td><td>12</td></tr><tr><td>53ae38a6bb2b21b42bac4f0c4c8ed1f9fa02f9d4</td><td>bp4d_plus</td><td>BP4D+</td><td><a href="papers/53ae38a6bb2b21b42bac4f0c4c8ed1f9fa02f9d4.html" target="_blank">Multimodal Spontaneous Emotion Corpus for Human Behavior Analysis</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>42</td><td>23</td><td>19</td><td>0</td><td>17</td><td>26</td></tr><tr><td>2fda164863a06a92d3a910b96eef927269aeb730</td><td>names_and_faces</td><td>News Dataset</td><td><a href="papers/2fda164863a06a92d3a910b96eef927269aeb730.html" target="_blank">Names and faces in the news</a></td><td><span class="gray">[pdf]</a></td><td>Proceedings of the 2004 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2004. CVPR 2004.</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>4156b7e88f2e0ab0a7c095b9bab199ae2b23bd06</td><td>distance_nighttime</td><td>Long Distance Heterogeneous Face</td><td><a href="papers/4156b7e88f2e0ab0a7c095b9bab199ae2b23bd06.html" target="_blank">Nighttime Face Recognition at Long Distance: Cross-Distance and Cross-Spectral Matching</a></td><td><a href="https://pdfs.semanticscholar.org/4156/b7e88f2e0ab0a7c095b9bab199ae2b23bd06.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>22</td><td>11</td><td>11</td><td>3</td><td>11</td><td>10</td></tr><tr><td>3394168ff0719b03ff65bcea35336a76b21fe5e4</td><td>penn_fudan</td><td>Penn Fudan</td><td><a href="papers/3394168ff0719b03ff65bcea35336a76b21fe5e4.html" target="_blank">Object Detection Combining Recognition and Segmentation</a></td><td><a href="https://pdfs.semanticscholar.org/3394/168ff0719b03ff65bcea35336a76b21fe5e4.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>105</td><td>64</td><td>41</td><td>9</td><td>58</td><td>43</td></tr><tr><td>12ad3b5bbbf407f8e54ea692c07633d1a867c566</td><td>graz</td><td>Graz Pedestrian</td><td><a href="papers/12ad3b5bbbf407f8e54ea692c07633d1a867c566.html" target="_blank">Object recognition using segmentation for feature detection</a></td><td><span class="gray">[pdf]</a></td><td>Proceedings of the 17th International Conference on Pattern Recognition, 2004. ICPR 2004.</td><td>edu</td><td>Inst. of Comput. Sci., Univ. of Leoben, Austria</td><td>Austria</td><td>47.38473720</td><td>15.09302010</td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>4f93cd09785c6e77bf4bc5a788e079df524c8d21</td><td>soton</td><td>SOTON HiD</td><td><a href="papers/4f93cd09785c6e77bf4bc5a788e079df524c8d21.html" target="_blank">On a Large Sequence-Based Human Gait Database</a></td><td><a href="https://pdfs.semanticscholar.org/4f93/cd09785c6e77bf4bc5a788e079df524c8d21.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>63%</td><td>150</td><td>95</td><td>55</td><td>17</td><td>103</td><td>51</td></tr><tr><td>6618cff7f2ed440a0d2fa9e74ad5469df5cdbe4c</td><td>afad</td><td>AFAD</td><td><a href="papers/6618cff7f2ed440a0d2fa9e74ad5469df5cdbe4c.html" target="_blank">Ordinal Regression with Multiple Output CNN for Age Estimation</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>78</td><td>43</td><td>35</td><td>8</td><td>44</td><td>31</td></tr><tr><td>a7fe834a0af614ce6b50dc093132b031dd9a856b</td><td>market_1501</td><td>Market 1501</td><td><a href="papers/a7fe834a0af614ce6b50dc093132b031dd9a856b.html" target="_blank">Orientation Driven Bag of Appearances for Person Re-identification</a></td><td><a href="https://arxiv.org/pdf/1605.02464.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>7</td><td>3</td><td>4</td><td>0</td><td>4</td><td>4</td></tr><tr><td>a7fe834a0af614ce6b50dc093132b031dd9a856b</td><td>pku_reid</td><td>PKU-Reid</td><td><a href="papers/a7fe834a0af614ce6b50dc093132b031dd9a856b.html" target="_blank">Orientation Driven Bag of Appearances for Person Re-identification</a></td><td><a href="https://arxiv.org/pdf/1605.02464.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>7</td><td>3</td><td>4</td><td>0</td><td>4</td><td>4</td></tr><tr><td>18ae7c9a4bbc832b8b14bc4122070d7939f5e00e</td><td>frgc</td><td>FRGC</td><td><a href="papers/18ae7c9a4bbc832b8b14bc4122070d7939f5e00e.html" target="_blank">Overview of the face recognition grand challenge</a></td><td><span class="gray">[pdf]</a></td><td>2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)</td><td>edu</td><td>NIST</td><td>United States</td><td>39.14004000</td><td>-77.21850600</td><td>57%</td><td>999</td><td>568</td><td>430</td><td>85</td><td>549</td><td>442</td></tr><tr><td>22909dd19a0ec3b6065334cb5be5392cb24d839d</td><td>pets</td><td>PETS 2017</td><td><a href="papers/22909dd19a0ec3b6065334cb5be5392cb24d839d.html" target="_blank">PETS 2017: Dataset and Challenge</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td></td><td></td><td></td><td></td><td></td><td>44%</td><td>9</td><td>4</td><td>5</td><td>0</td><td>1</td><td>8</td></tr><tr><td>56ffa7d906b08d02d6d5a12c7377a57e24ef3391</td><td>unbc_shoulder_pain</td><td>UNBC-McMaster Pain</td><td><a href="papers/56ffa7d906b08d02d6d5a12c7377a57e24ef3391.html" target="_blank">Painful data: The UNBC-McMaster shoulder pain expression archive database</a></td><td><span class="gray">[pdf]</a></td><td>Face and Gesture 2011</td><td>edu</td><td>Carnegie Mellon University</td><td>United States</td><td>40.44416190</td><td>-79.94272826</td><td>54%</td><td>189</td><td>103</td><td>86</td><td>21</td><td>108</td><td>78</td></tr><tr><td>55206f0b5f57ce17358999145506cd01e570358c</td><td>orl</td><td>ORL</td><td><a href="papers/55206f0b5f57ce17358999145506cd01e570358c.html" target="_blank">Parameterisation of a stochastic model for human face identification</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>999</td><td>501</td><td>498</td><td>94</td><td>543</td><td>427</td></tr><tr><td>0486214fb58ee9a04edfe7d6a74c6d0f661a7668</td><td>chokepoint</td><td>ChokePoint</td><td><a href="papers/0486214fb58ee9a04edfe7d6a74c6d0f661a7668.html" target="_blank">Patch-based probabilistic image quality assessment for face selection and improved video-based face recognition</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>138</td><td>83</td><td>55</td><td>6</td><td>76</td><td>63</td></tr><tr><td>488e475eeb3bb39a145f23ede197cd3620f1d98a</td><td>apis</td><td>APiS1.0</td><td><a href="papers/488e475eeb3bb39a145f23ede197cd3620f1d98a.html" target="_blank">Pedestrian Attribute Classification in Surveillance: Database and Evaluation</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision Workshops</td><td></td><td></td><td></td><td></td><td></td><td>71%</td><td>28</td><td>20</td><td>8</td><td>0</td><td>13</td><td>15</td></tr><tr><td>488e475eeb3bb39a145f23ede197cd3620f1d98a</td><td>svs</td><td>SVS</td><td><a href="papers/488e475eeb3bb39a145f23ede197cd3620f1d98a.html" target="_blank">Pedestrian Attribute Classification in Surveillance: Database and Evaluation</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision Workshops</td><td></td><td></td><td></td><td></td><td></td><td>71%</td><td>28</td><td>20</td><td>8</td><td>0</td><td>13</td><td>15</td></tr><tr><td>2a4bbee0b4cf52d5aadbbc662164f7efba89566c</td><td>peta</td><td>PETA</td><td><a href="papers/2a4bbee0b4cf52d5aadbbc662164f7efba89566c.html" target="_blank">Pedestrian Attribute Recognition At Far Distance</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>75%</td><td>88</td><td>66</td><td>22</td><td>1</td><td>50</td><td>36</td></tr><tr><td>f72f6a45ee240cc99296a287ff725aaa7e7ebb35</td><td>caltech_pedestrians</td><td>Caltech Pedestrians</td><td><a href="papers/f72f6a45ee240cc99296a287ff725aaa7e7ebb35.html" target="_blank">Pedestrian Detection: An Evaluation of the State of the Art</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td>edu</td><td>California Institute of Technology</td><td>United States</td><td>34.13710185</td><td>-118.12527487</td><td>60%</td><td>999</td><td>599</td><td>400</td><td>69</td><td>527</td><td>466</td></tr><tr><td>1dc35905a1deff8bc74688f2d7e2f48fd2273275</td><td>caltech_pedestrians</td><td>Caltech Pedestrians</td><td><a href="papers/1dc35905a1deff8bc74688f2d7e2f48fd2273275.html" target="_blank">Pedestrian detection: A benchmark</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td><td>tud_campus</td><td>TUD-Campus</td><td><a href="papers/3316521a5527c7700af8ae6aef32a79a8b83672c.html" target="_blank">People-tracking-by-detection and people-detection-by-tracking</a></td><td><span class="gray">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>59%</td><td>545</td><td>324</td><td>220</td><td>37</td><td>330</td><td>218</td></tr><tr><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td><td>tud_crossing</td><td>TUD-Crossing</td><td><a href="papers/3316521a5527c7700af8ae6aef32a79a8b83672c.html" target="_blank">People-tracking-by-detection and people-detection-by-tracking</a></td><td><span class="gray">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>59%</td><td>545</td><td>324</td><td>220</td><td>37</td><td>330</td><td>218</td></tr><tr><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td><td>tud_pedestrian</td><td>TUD-Pedestrian</td><td><a href="papers/3316521a5527c7700af8ae6aef32a79a8b83672c.html" target="_blank">People-tracking-by-detection and people-detection-by-tracking</a></td><td><span class="gray">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>59%</td><td>545</td><td>324</td><td>220</td><td>37</td><td>330</td><td>218</td></tr><tr><td>27a2fad58dd8727e280f97036e0d2bc55ef5424c</td><td>duke_mtmc</td><td>Duke MTMC</td><td><a href="papers/27a2fad58dd8727e280f97036e0d2bc55ef5424c.html" target="_blank">Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</a></td><td><a href="https://arxiv.org/pdf/1609.01775.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Duke University</td><td>United States</td><td>35.99905220</td><td>-78.92906290</td><td>85%</td><td>169</td><td>144</td><td>25</td><td>3</td><td>113</td><td>54</td></tr><tr><td>27a2fad58dd8727e280f97036e0d2bc55ef5424c</td><td>mot</td><td>MOT</td><td><a href="papers/27a2fad58dd8727e280f97036e0d2bc55ef5424c.html" target="_blank">Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</a></td><td><a href="https://arxiv.org/pdf/1609.01775.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Duke University</td><td>United States</td><td>35.99905220</td><td>-78.92906290</td><td>85%</td><td>169</td><td>144</td><td>25</td><td>3</td><td>113</td><td>54</td></tr><tr><td>16c7c31a7553d99f1837fc6e88e77b5ccbb346b8</td><td>prid</td><td>PRID</td><td><a href="papers/16c7c31a7553d99f1837fc6e88e77b5ccbb346b8.html" target="_blank">Person Re-identification by Descriptive and Discriminative Classification</a></td><td><a href="https://pdfs.semanticscholar.org/4c1b/f0592be3e535faf256c95e27982db9b3d3d3.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>68%</td><td>386</td><td>263</td><td>123</td><td>23</td><td>204</td><td>180</td></tr><tr><td>98bb029afe2a1239c3fdab517323066f0957b81b</td><td>ilids_mcts_vid</td><td>iLIDS-VID</td><td><a href="papers/98bb029afe2a1239c3fdab517323066f0957b81b.html" target="_blank">Person Re-identification by Video Ranking</a></td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>68%</td><td>209</td><td>143</td><td>66</td><td>8</td><td>111</td><td>97</td></tr><tr><td>98bb029afe2a1239c3fdab517323066f0957b81b</td><td>sdu_vid</td><td>SDU-VID</td><td><a href="papers/98bb029afe2a1239c3fdab517323066f0957b81b.html" target="_blank">Person Re-identification by Video Ranking</a></td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>68%</td><td>209</td><td>143</td><td>66</td><td>8</td><td>111</td><td>97</td></tr><tr><td>0b84f07af44f964817675ad961def8a51406dd2e</td><td>prw</td><td>PRW</td><td><a href="papers/0b84f07af44f964817675ad961def8a51406dd2e.html" target="_blank">Person Re-identification in the Wild</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>68%</td><td>77</td><td>52</td><td>25</td><td>1</td><td>47</td><td>27</td></tr><tr><td>a0cc5f73a37723a6dd465924143f1cb4976d0169</td><td>msmt_17</td><td>MSMT17</td><td><a href="papers/a0cc5f73a37723a6dd465924143f1cb4976d0169.html" target="_blank">Person Transfer GAN to Bridge Domain Gap for Person Re-identification</a></td><td><span class="gray">[pdf]</a></td><td>2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>92%</td><td>24</td><td>22</td><td>2</td><td>1</td><td>20</td><td>4</td></tr><tr><td>1c2802c2199b6d15ecefe7ba0c39bfe44363de38</td><td>youtube_poses</td><td>YouTube Pose</td><td><a href="papers/1c2802c2199b6d15ecefe7ba0c39bfe44363de38.html" target="_blank">Personalizing Human Video Pose Estimation</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>Oxford University</td><td>United Kingdom</td><td>51.75208490</td><td>-1.25166460</td><td>64%</td><td>36</td><td>23</td><td>13</td><td>2</td><td>30</td><td>8</td></tr><tr><td>2830fb5282de23d7784b4b4bc37065d27839a412</td><td>h3d</td><td>H3D</td><td><a href="papers/2830fb5282de23d7784b4b4bc37065d27839a412.html" target="_blank">Poselets: Body part detectors trained using 3D human pose annotations</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE 12th International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>716</td><td>415</td><td>301</td><td>59</td><td>492</td><td>222</td></tr><tr><td>3765df816dc5a061bc261e190acc8bdd9d47bec0</td><td>rafd</td><td>RaFD</td><td><a href="papers/3765df816dc5a061bc261e190acc8bdd9d47bec0.html" target="_blank">Presentation and validation of the Radboud Faces Database</a></td><td><a href="https://pdfs.semanticscholar.org/3765/df816dc5a061bc261e190acc8bdd9d47bec0.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>48%</td><td>487</td><td>234</td><td>253</td><td>39</td><td>342</td><td>144</td></tr><tr><td>636b8ffc09b1b23ff714ac8350bb35635e49fa3c</td><td>caltech_10k_web_faces</td><td>Caltech 10K Web Faces</td><td><a href="papers/636b8ffc09b1b23ff714ac8350bb35635e49fa3c.html" target="_blank">Pruning training sets for learning of object categories</a></td><td><span class="gray">[pdf]</a></td><td>2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)</td><td></td><td></td><td></td><td></td><td></td><td>70%</td><td>63</td><td>44</td><td>19</td><td>4</td><td>42</td><td>20</td></tr><tr><td>3531332efe19be21e7401ba1f04570a142617236</td><td>ufdd</td><td>UFDD</td><td><a href="papers/3531332efe19be21e7401ba1f04570a142617236.html" target="_blank">Pushing the Limits of Unconstrained Face Detection: a Challenge Dataset and Baseline Results</a></td><td><a href="https://arxiv.org/pdf/1804.10275.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>75%</td><td>4</td><td>3</td><td>1</td><td>0</td><td>4</td><td>0</td></tr><tr><td>140c95e53c619eac594d70f6369f518adfea12ef</td><td>ijb_a</td><td>IJB-A</td><td><a href="papers/140c95e53c619eac594d70f6369f518adfea12ef.html" target="_blank">Pushing the frontiers of unconstrained face detection and recognition: IARPA Janus Benchmark A</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>66%</td><td>237</td><td>156</td><td>81</td><td>14</td><td>159</td><td>76</td></tr><tr><td>c72a2ea819df9b0e8cd267eebcc6528b8741e03d</td><td>megaage</td><td>MegaAge</td><td><a href="papers/c72a2ea819df9b0e8cd267eebcc6528b8741e03d.html" target="_blank">Quantifying Facial Age by Posterior of Age Comparisons</a></td><td><a href="https://arxiv.org/pdf/1708.09687.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>4</td><td>4</td><td>0</td><td>0</td><td>4</td><td>0</td></tr><tr><td>922e0a51a3b8c67c4c6ac09a577ff674cbd28b34</td><td>v47</td><td>V47</td><td><a href="papers/922e0a51a3b8c67c4c6ac09a577ff674cbd28b34.html" target="_blank">Re-identification of pedestrians with variable occlusion and scale</a></td><td><span class="gray">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td>edu</td><td>Kingston University</td><td>United Kingdom</td><td>51.42930860</td><td>-0.26840440</td><td>56%</td><td>9</td><td>5</td><td>4</td><td>1</td><td>5</td><td>4</td></tr><tr><td>6f3c76b7c0bd8e1d122c6ea808a271fd4749c951</td><td>ward</td><td>WARD</td><td><a href="papers/6f3c76b7c0bd8e1d122c6ea808a271fd4749c951.html" target="_blank">Re-identify people in wide area camera network</a></td><td><span class="gray">[pdf]</a></td><td>2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops</td><td>edu</td><td>University of Udine</td><td>Italy</td><td>46.08107230</td><td>13.21194740</td><td>60%</td><td>60</td><td>36</td><td>24</td><td>1</td><td>38</td><td>21</td></tr><tr><td>54983972aafc8e149259d913524581357b0f91c3</td><td>reseed</td><td>ReSEED</td><td><a href="papers/54983972aafc8e149259d913524581357b0f91c3.html" target="_blank">ReSEED: social event dEtection dataset</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>67%</td><td>6</td><td>4</td><td>2</td><td>1</td><td>1</td><td>5</td></tr><tr><td>65355cbb581a219bd7461d48b3afd115263ea760</td><td>complex_activities</td><td>Ongoing Complex Activities</td><td><a href="papers/65355cbb581a219bd7461d48b3afd115263ea760.html" target="_blank">Recognition of ongoing complex activities by sequence prediction over a hierarchical label space</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Winter Conference on Applications of Computer Vision (WACV)</td><td></td><td></td><td></td><td></td><td></td><td>33%</td><td>3</td><td>1</td><td>2</td><td>0</td><td>3</td><td>0</td></tr><tr><td>e8de844fefd54541b71c9823416daa238be65546</td><td>visual_phrases</td><td>Phrasal Recognition</td><td><a href="papers/e8de844fefd54541b71c9823416daa238be65546.html" target="_blank">Recognition using visual phrases</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011</td><td>edu</td><td>University of Illinois, Urbana-Champaign</td><td>United States</td><td>40.11116745</td><td>-88.22587665</td><td>58%</td><td>246</td><td>143</td><td>103</td><td>17</td><td>170</td><td>68</td></tr><tr><td>356b431d4f7a2a0a38cf971c84568207dcdbf189</td><td>wider</td><td>WIDER</td><td><a href="papers/356b431d4f7a2a0a38cf971c84568207dcdbf189.html" target="_blank">Recognize complex events from static images by fusing deep channels</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>70%</td><td>44</td><td>31</td><td>13</td><td>1</td><td>29</td><td>15</td></tr><tr><td>25474c21613607f6bb7687a281d5f9d4ffa1f9f3</td><td>faceplace</td><td>Face Place</td><td><a href="papers/25474c21613607f6bb7687a281d5f9d4ffa1f9f3.html" target="_blank">Recognizing disguised faces</a></td><td><a href="https://pdfs.semanticscholar.org/d936/7ceb0be378c3a9ddf7cb741c678c1a3c574c.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>34%</td><td>29</td><td>10</td><td>19</td><td>0</td><td>18</td><td>10</td></tr><tr><td>4053e3423fb70ad9140ca89351df49675197196a</td><td>bio_id</td><td>BioID Face</td><td><a href="papers/4053e3423fb70ad9140ca89351df49675197196a.html" target="_blank">Robust Face Detection Using the Hausdorff Distance</a></td><td><a href="https://pdfs.semanticscholar.org/4053/e3423fb70ad9140ca89351df49675197196a.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>511</td><td>289</td><td>222</td><td>49</td><td>329</td><td>182</td></tr><tr><td>2724ba85ec4a66de18da33925e537f3902f21249</td><td>cofw</td><td>COFW</td><td><a href="papers/2724ba85ec4a66de18da33925e537f3902f21249.html" target="_blank">Robust Face Landmark Estimation under Occlusion</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision</td><td>edu</td><td>California Institute of Technology</td><td>United States</td><td>34.13710185</td><td>-118.12527487</td><td>72%</td><td>325</td><td>233</td><td>92</td><td>12</td><td>194</td><td>133</td></tr><tr><td>c570d1247e337f91e555c3be0e8c8a5aba539d9f</td><td>mcgill</td><td>McGill Real World</td><td><a href="papers/c570d1247e337f91e555c3be0e8c8a5aba539d9f.html" target="_blank">Robust semi-automatic head pose labeling for real-world face video sequences</a></td><td><span class="gray">[pdf]</a></td><td>Multimedia Tools and Applications</td><td>edu</td><td>McGill University</td><td>Canada</td><td>45.50397610</td><td>-73.57496870</td><td>44%</td><td>18</td><td>8</td><td>10</td><td>0</td><td>13</td><td>7</td></tr><tr><td>e27ef52c641c2b5100a1b34fd0b819e84a31b4df</td><td>sarc3d</td><td>Sarc3D</td><td><a href="papers/e27ef52c641c2b5100a1b34fd0b819e84a31b4df.html" target="_blank">SARC3D: A New 3D Body Model for People Tracking and Re-identification</a></td><td><a href="https://pdfs.semanticscholar.org/e27e/f52c641c2b5100a1b34fd0b819e84a31b4df.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>74%</td><td>34</td><td>25</td><td>9</td><td>2</td><td>21</td><td>12</td></tr><tr><td>bd26dabab576adb6af30484183c9c9c8379bf2e0</td><td>scut_fbp</td><td>SCUT-FBP</td><td><a href="papers/bd26dabab576adb6af30484183c9c9c8379bf2e0.html" target="_blank">SCUT-FBP: A Benchmark Dataset for Facial Beauty Perception</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Systems, Man, and Cybernetics</td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>19</td><td>9</td><td>10</td><td>2</td><td>6</td><td>13</td></tr><tr><td>29a705a5fa76641e0d8963f1fdd67ee4c0d92d3d</td><td>scface</td><td>SCface</td><td><a href="papers/29a705a5fa76641e0d8963f1fdd67ee4c0d92d3d.html" target="_blank">SCface – surveillance cameras face database</a></td><td><a href="http://scface.org/SCface%20-%20Surveillance%20Cameras%20Face%20Database.pdf" target="_blank">[pdf]</a></td><td>Multimedia Tools and Applications</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>179</td><td>101</td><td>78</td><td>15</td><td>88</td><td>89</td></tr><tr><td>d3f5a1848b0028d8ab51d0b0673732cad2e3c8c9</td><td>stair_actions</td><td>STAIR Action</td><td><a href="papers/d3f5a1848b0028d8ab51d0b0673732cad2e3c8c9.html" target="_blank">STAIR Actions: A Video Dataset of Everyday Home Actions</a></td><td><a href="https://arxiv.org/pdf/1804.04326.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>1</td><td>1</td><td>0</td><td>0</td><td>1</td><td>0</td></tr><tr><td>833fa04463d90aab4a9fe2870d480f0b40df446e</td><td>sun_attributes</td><td>SUN</td><td><a href="papers/833fa04463d90aab4a9fe2870d480f0b40df446e.html" target="_blank">SUN attribute database: Discovering, annotating, and recognizing scene attributes</a></td><td><span class="gray">[pdf]</a></td><td>2012 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>Brown University</td><td>United States</td><td>41.82686820</td><td>-71.40123146</td><td>60%</td><td>264</td><td>159</td><td>105</td><td>27</td><td>206</td><td>56</td></tr><tr><td>4308bd8c28e37e2ed9a3fcfe74d5436cce34b410</td><td>market_1501</td><td>Market 1501</td><td><a href="papers/4308bd8c28e37e2ed9a3fcfe74d5436cce34b410.html" target="_blank">Scalable Person Re-identification: A Benchmark</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td>company</td><td>Microsoft</td><td>United States</td><td>47.64233180</td><td>-122.13693020</td><td>77%</td><td>460</td><td>355</td><td>105</td><td>9</td><td>263</td><td>185</td></tr><tr><td>9c23859ec7313f2e756a3e85575735e0c52249f4</td><td>facebook_100</td><td>Facebook100</td><td><a href="papers/9c23859ec7313f2e756a3e85575735e0c52249f4.html" target="_blank">Scaling up biologically-inspired computer vision: A case study in unconstrained face recognition on facebook</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td>edu</td><td>Harvard University</td><td>United States</td><td>42.36782045</td><td>-71.12666653</td><td>62%</td><td>52</td><td>32</td><td>20</td><td>3</td><td>38</td><td>13</td></tr><tr><td>9c23859ec7313f2e756a3e85575735e0c52249f4</td><td>pubfig_83</td><td>pubfig83</td><td><a href="papers/9c23859ec7313f2e756a3e85575735e0c52249f4.html" target="_blank">Scaling up biologically-inspired computer vision: A case study in unconstrained face recognition on facebook</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td>edu</td><td>Harvard University</td><td>United States</td><td>42.36782045</td><td>-71.12666653</td><td>62%</td><td>52</td><td>32</td><td>20</td><td>3</td><td>38</td><td>13</td></tr><tr><td>51eba481dac6b229a7490f650dff7b17ce05df73</td><td>imsitu</td><td>imSitu</td><td><a href="papers/51eba481dac6b229a7490f650dff7b17ce05df73.html" target="_blank">Situation Recognition: Visual Semantic Role Labeling for Image Understanding</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>65%</td><td>52</td><td>34</td><td>18</td><td>1</td><td>46</td><td>6</td></tr><tr><td>570f37ed63142312e6ccdf00ecc376341ec72b9f</td><td>stanford_drone</td><td>Stanford Drone</td><td><a href="papers/570f37ed63142312e6ccdf00ecc376341ec72b9f.html" target="_blank">Social LSTM: Human Trajectory Prediction in Crowded Spaces</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>224</td><td>125</td><td>99</td><td>3</td><td>140</td><td>81</td></tr><tr><td>23e824d1dfc33f3780dd18076284f07bd99f1c43</td><td>mifs</td><td>MIFS</td><td><a href="papers/23e824d1dfc33f3780dd18076284f07bd99f1c43.html" target="_blank">Spoofing faces using makeup: An investigative study</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE International Conference on Identity, Security and Behavior Analysis (ISBA)</td><td>edu</td><td>INRIA Méditerranée</td><td>France</td><td>43.61581310</td><td>7.06838000</td><td>67%</td><td>6</td><td>4</td><td>2</td><td>0</td><td>1</td><td>5</td></tr><tr><td>1a40092b493c6b8840257ab7f96051d1a4dbfeb2</td><td>sports_videos_in_the_wild</td><td>SVW</td><td><a href="papers/1a40092b493c6b8840257ab7f96051d1a4dbfeb2.html" target="_blank">Sports Videos in the Wild (SVW): A video dataset for sports analysis</a></td><td><span class="gray">[pdf]</a></td><td>2015 11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)</td><td></td><td></td><td></td><td></td><td></td><td>86%</td><td>7</td><td>6</td><td>1</td><td>1</td><td>5</td><td>2</td></tr><tr><td>9361b784e73e9238d5cefbea5ac40d35d1e3103f</td><td>oxford_town_centre</td><td>TownCentre</td><td><a href="papers/9361b784e73e9238d5cefbea5ac40d35d1e3103f.html" target="_blank">Stable multi-target tracking in real-time surveillance video</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011</td><td>edu</td><td>University of Oxford</td><td>United Kingdom</td><td>51.75345380</td><td>-1.25400997</td><td>68%</td><td>328</td><td>222</td><td>106</td><td>13</td><td>186</td><td>140</td></tr><tr><td>2306b2a8fba28539306052764a77a0d0f5d1236a</td><td>qmul_surv_face</td><td>QMUL-SurvFace</td><td><a href="papers/2306b2a8fba28539306052764a77a0d0f5d1236a.html" target="_blank">Surveillance Face Recognition Challenge</a></td><td><a href="https://arxiv.org/pdf/1804.09691.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td>edu</td><td>Queen Mary University of London</td><td>United Kingdom</td><td>51.52472720</td><td>-0.03931035</td><td>100%</td><td>1</td><td>1</td><td>0</td><td>0</td><td>1</td><td>0</td></tr><tr><td>f6c8d5e35d7e4d60a0104f233ac1a3ab757da53f</td><td>pku_reid</td><td>PKU-Reid</td><td><a href="papers/f6c8d5e35d7e4d60a0104f233ac1a3ab757da53f.html" target="_blank">Swiss-System Based Cascade Ranking for Gait-Based Person Re-Identification</a></td><td><a href="https://pdfs.semanticscholar.org/f6c8/d5e35d7e4d60a0104f233ac1a3ab757da53f.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>4</td><td>2</td><td>2</td><td>0</td><td>1</td><td>2</td></tr><tr><td>4d58f886f5150b2d5e48fd1b5a49e09799bf895d</td><td>texas_3dfrd</td><td>Texas 3DFRD</td><td><a href="papers/4d58f886f5150b2d5e48fd1b5a49e09799bf895d.html" target="_blank">Texas 3D Face Recognition Database</a></td><td><span class="gray">[pdf]</a></td><td>2010 IEEE Southwest Symposium on Image Analysis & Interpretation (SSIAI)</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>66</td><td>40</td><td>26</td><td>3</td><td>40</td><td>27</td></tr><tr><td>6d96f946aaabc734af7fe3fc4454cf8547fcd5ed</td><td>ar_facedb</td><td>AR Face</td><td><a href="papers/6d96f946aaabc734af7fe3fc4454cf8547fcd5ed.html" target="_blank">The AR face database</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>999</td><td>579</td><td>420</td><td>58</td><td>458</td><td>530</td></tr><tr><td>2485c98aa44131d1a2f7d1355b1e372f2bb148ad</td><td>cas_peal</td><td>CAS-PEAL</td><td><a href="papers/2485c98aa44131d1a2f7d1355b1e372f2bb148ad.html" target="_blank">The CAS-PEAL Large-Scale Chinese Face Database and Baseline Evaluations</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Systems, Man, and Cybernetics - Part A: Systems and Humans</td><td></td><td></td><td></td><td></td><td></td><td>59%</td><td>429</td><td>254</td><td>175</td><td>38</td><td>198</td><td>234</td></tr><tr><td>47662d1a368daf70ba70ef2d59eb6209f98b675d</td><td>fia</td><td>CMU FiA</td><td><a href="papers/47662d1a368daf70ba70ef2d59eb6209f98b675d.html" target="_blank">The CMU Face In Action (FIA) Database</a></td><td><a href="https://pdfs.semanticscholar.org/4766/2d1a368daf70ba70ef2d59eb6209f98b675d.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>48%</td><td>54</td><td>26</td><td>28</td><td>5</td><td>40</td><td>16</td></tr><tr><td>4d423acc78273b75134e2afd1777ba6d3a398973</td><td>cmu_pie</td><td>CMU PIE</td><td><a href="papers/4d423acc78273b75134e2afd1777ba6d3a398973.html" target="_blank">The CMU Pose, Illumination, and Expression (PIE) Database</a></td><td><a href="http://www.comp.nus.edu.sg/~tsim/piedb.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>59%</td><td>760</td><td>449</td><td>310</td><td>49</td><td>404</td><td>345</td></tr><tr><td>4d423acc78273b75134e2afd1777ba6d3a398973</td><td>multi_pie</td><td>MULTIPIE</td><td><a href="papers/4d423acc78273b75134e2afd1777ba6d3a398973.html" target="_blank">The CMU Pose, Illumination, and Expression (PIE) Database</a></td><td><a href="http://www.comp.nus.edu.sg/~tsim/piedb.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>59%</td><td>760</td><td>449</td><td>310</td><td>49</td><td>404</td><td>345</td></tr><tr><td>4df3143922bcdf7db78eb91e6b5359d6ada004d2</td><td>cfd</td><td>CFD</td><td><a href="papers/4df3143922bcdf7db78eb91e6b5359d6ada004d2.html" target="_blank">The Chicago face database: A free stimulus set of faces and norming data.</a></td><td><a href="https://pdfs.semanticscholar.org/4df3/143922bcdf7db78eb91e6b5359d6ada004d2.pdf" target="_blank">[pdf]</a></td><td>Behavior research methods</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>99</td><td>59</td><td>40</td><td>1</td><td>73</td><td>21</td></tr><tr><td>20388099cc415c772926e47bcbbe554e133343d1</td><td>cafe</td><td>#N/A</td><td><a href="papers/20388099cc415c772926e47bcbbe554e133343d1.html" target="_blank">The Child Affective Facial Expression (CAFE) set: validity and reliability from untrained adults</a></td><td><a href="https://pdfs.semanticscholar.org/2038/8099cc415c772926e47bcbbe554e133343d1.pdf" target="_blank">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>37</td><td>20</td><td>17</td><td>3</td><td>30</td><td>7</td></tr><tr><td>4e6ee936eb50dd032f7138702fa39b7c18ee8907</td><td>dartmouth_children</td><td>Dartmouth Children</td><td><a href="papers/4e6ee936eb50dd032f7138702fa39b7c18ee8907.html" target="_blank">The Dartmouth Database of Children’s Faces: Acquisition and Validation of a New Face Stimulus Set</a></td><td><a href="https://pdfs.semanticscholar.org/4e6e/e936eb50dd032f7138702fa39b7c18ee8907.pdf" target="_blank">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>21</td><td>11</td><td>10</td><td>2</td><td>18</td><td>3</td></tr><tr><td>9e31e77f9543ab42474ba4e9330676e18c242e72</td><td>imdb_face</td><td>IMDb Face</td><td><a href="papers/9e31e77f9543ab42474ba4e9330676e18c242e72.html" target="_blank">The Devil of Face Recognition is in the Noise</a></td><td><a href="https://arxiv.org/pdf/1807.11649.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Nanyang Technological University</td><td>Singapore</td><td>1.34841040</td><td>103.68297965</td><td>50%</td><td>6</td><td>3</td><td>3</td><td>0</td><td>4</td><td>1</td></tr><tr><td>71b7fc715e2f1bb24c0030af8d7e7b6e7cd128a6</td><td>umd_faces</td><td>UMD</td><td><a href="papers/71b7fc715e2f1bb24c0030af8d7e7b6e7cd128a6.html" target="_blank">The Do’s and Don’ts for CNN-Based Face Verification</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE International Conference on Computer Vision Workshops (ICCVW)</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>26</td><td>16</td><td>10</td><td>2</td><td>16</td><td>8</td></tr><tr><td>72a155c987816ae81c858fddbd6beab656d86220</td><td>europersons</td><td>EuroCity Persons</td><td><a href="papers/72a155c987816ae81c858fddbd6beab656d86220.html" target="_blank">The EuroCity Persons Dataset: A Novel Benchmark for Object Detection</a></td><td><a href="https://arxiv.org/pdf/1805.07193.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>2</td><td>0</td><td>2</td><td>0</td><td>2</td><td>0</td></tr><tr><td>4d9a02d080636e9666c4d1cc438b9893391ec6c7</td><td>cohn_kanade_plus</td><td>CK+</td><td><a href="papers/4d9a02d080636e9666c4d1cc438b9893391ec6c7.html" target="_blank">The Extended Cohn-Kanade Dataset (CK+): A complete dataset for action unit and emotion-specified expression</a></td><td><span class="gray">[pdf]</a></td><td>2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition - Workshops</td><td>edu</td><td>University of Pittsburgh</td><td>United States</td><td>40.44415295</td><td>-79.96243993</td><td>61%</td><td>999</td><td>607</td><td>392</td><td>57</td><td>470</td><td>518</td></tr><tr><td>0f0fcf041559703998abf310e56f8a2f90ee6f21</td><td>feret</td><td>FERET</td><td><a href="papers/0f0fcf041559703998abf310e56f8a2f90ee6f21.html" target="_blank">The FERET Evaluation Methodology for Face-Recognition Algorithms</a></td><td><a href="https://pdfs.semanticscholar.org/5099/7a5605c1f61e09e9a96789ed7495be6625aa.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>34%</td><td>29</td><td>10</td><td>19</td><td>3</td><td>18</td><td>9</td></tr><tr><td>0c4a139bb87c6743c7905b29a3cfec27a5130652</td><td>feret</td><td>FERET</td><td><a href="papers/0c4a139bb87c6743c7905b29a3cfec27a5130652.html" target="_blank">The FERET Verification Testing Protocol for Face Recognition Algorithms</a></td><td><a href="https://pdfs.semanticscholar.org/8d2a/1c768fce6f71584dd993fb97e7b6419aaf60.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>edu</td><td>City University of New York</td><td>United States</td><td>40.87228250</td><td>-73.89489171</td><td>51%</td><td>115</td><td>59</td><td>56</td><td>8</td><td>75</td><td>37</td></tr><tr><td>dc8b25e35a3acb812beb499844734081722319b4</td><td>feret</td><td>FERET</td><td><a href="papers/dc8b25e35a3acb812beb499844734081722319b4.html" target="_blank">The FERET database and evaluation procedure for face-recognition algorithms</a></td><td><a href="http://biometrics.nist.gov/cs_links/face/frvt/feret/FERET_Database_evaluation_procedure.pdf" target="_blank">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>999</td><td>521</td><td>478</td><td>103</td><td>591</td><td>421</td></tr><tr><td>8f02ec0be21461fbcedf51d864f944cfc42c875f</td><td>hda_plus</td><td>HDA+</td><td><a href="papers/8f02ec0be21461fbcedf51d864f944cfc42c875f.html" target="_blank">The HDA+ Data Set for Research on Fully Automated Re-identification Systems</a></td><td><a href="https://pdfs.semanticscholar.org/8f02/ec0be21461fbcedf51d864f944cfc42c875f.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>16</td><td>8</td><td>8</td><td>1</td><td>10</td><td>6</td></tr><tr><td>8be57cdad86fdf8c8290df4ca3149592f3c46dd3</td><td>m2vts</td><td>m2vts</td><td><a href="papers/8be57cdad86fdf8c8290df4ca3149592f3c46dd3.html" target="_blank">The M2VTS Multimodal Face Database (Release 1.00)</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>45%</td><td>73</td><td>33</td><td>40</td><td>2</td><td>39</td><td>33</td></tr><tr><td>ea050801199f98a1c7c1df6769f23f658299a3ae</td><td>mpi_large</td><td>Large MPI Facial Expression</td><td><a href="papers/ea050801199f98a1c7c1df6769f23f658299a3ae.html" target="_blank">The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</a></td><td><a href="https://pdfs.semanticscholar.org/ea05/0801199f98a1c7c1df6769f23f658299a3ae.pdf" target="_blank">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>33</td><td>17</td><td>16</td><td>4</td><td>29</td><td>4</td></tr><tr><td>ea050801199f98a1c7c1df6769f23f658299a3ae</td><td>mpi_small</td><td>Small MPI Facial Expression</td><td><a href="papers/ea050801199f98a1c7c1df6769f23f658299a3ae.html" target="_blank">The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</a></td><td><a href="https://pdfs.semanticscholar.org/ea05/0801199f98a1c7c1df6769f23f658299a3ae.pdf" target="_blank">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>33</td><td>17</td><td>16</td><td>4</td><td>29</td><td>4</td></tr><tr><td>578d4ad74818086bb64f182f72e2c8bd31e3d426</td><td>mr2</td><td>MR2</td><td><a href="papers/578d4ad74818086bb64f182f72e2c8bd31e3d426.html" target="_blank">The MR2: A multi-racial, mega-resolution database of facial stimuli.</a></td><td><a href="http://www.mpmlab.org/The%20MR2%20face%20database.pdf" target="_blank">[pdf]</a></td><td>Behavior research methods</td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>7</td><td>3</td><td>4</td><td>0</td><td>7</td><td>0</td></tr><tr><td>f1af714b92372c8e606485a3982eab2f16772ad8</td><td>mug_faces</td><td>MUG Faces</td><td><a href="papers/f1af714b92372c8e606485a3982eab2f16772ad8.html" target="_blank">The MUG facial expression database</a></td><td><span class="gray">[pdf]</a></td><td>11th International Workshop on Image Analysis for Multimedia Interactive Services WIAMIS 10</td><td>edu</td><td>Aristotle University of Thessaloniki</td><td>Greece</td><td>40.62984145</td><td>22.95889350</td><td>55%</td><td>82</td><td>45</td><td>37</td><td>4</td><td>34</td><td>47</td></tr><tr><td>79828e6e9f137a583082b8b5a9dfce0c301989b8</td><td>mapillary</td><td>Mapillary</td><td><a href="papers/79828e6e9f137a583082b8b5a9dfce0c301989b8.html" target="_blank">The Mapillary Vistas Dataset for Semantic Understanding of Street Scenes</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>61</td><td>37</td><td>24</td><td>0</td><td>43</td><td>16</td></tr><tr><td>96e0cfcd81cdeb8282e29ef9ec9962b125f379b0</td><td>megaface</td><td>MegaFace</td><td><a href="papers/96e0cfcd81cdeb8282e29ef9ec9962b125f379b0.html" target="_blank">The MegaFace Benchmark: 1 Million Faces for Recognition at Scale</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>76%</td><td>139</td><td>106</td><td>33</td><td>5</td><td>100</td><td>37</td></tr><tr><td>0ee1916a0cb2dc7d3add086b5f1092c3d4beb38a</td><td>voc</td><td>VOC</td><td><a href="papers/0ee1916a0cb2dc7d3add086b5f1092c3d4beb38a.html" target="_blank">The Pascal Visual Object Classes (VOC) Challenge</a></td><td><a href="http://eprints.pascal-network.org/archive/00006187/01/PascalVOC_IJCV2009.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td>company</td><td>Microsoft</td><td>United States</td><td>47.64233180</td><td>-122.13693020</td><td>61%</td><td>999</td><td>607</td><td>391</td><td>28</td><td>557</td><td>422</td></tr><tr><td>66e6f08873325d37e0ec20a4769ce881e04e964e</td><td>sun_attributes</td><td>SUN</td><td><a href="papers/66e6f08873325d37e0ec20a4769ce881e04e964e.html" target="_blank">The SUN Attribute Database: Beyond Categories for Deeper Scene Understanding</a></td><td><a href="http://www.cc.gatech.edu/~hays/papers/attribute_ijcv.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>116</td><td>70</td><td>46</td><td>14</td><td>84</td><td>31</td></tr><tr><td>8b2dd5c61b23ead5ae5508bb8ce808b5ea266730</td><td>10k_US_adult_faces</td><td>10K US Adult Faces</td><td><a href="papers/8b2dd5c61b23ead5ae5508bb8ce808b5ea266730.html" target="_blank">The intrinsic memorability of face photographs.</a></td><td><a href="https://pdfs.semanticscholar.org/8b2d/d5c61b23ead5ae5508bb8ce808b5ea266730.pdf" target="_blank">[pdf]</a></td><td>Journal of experimental psychology. General</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>52</td><td>29</td><td>23</td><td>2</td><td>36</td><td>14</td></tr><tr><td>d178cde92ab3dc0dd2ebee5a76a33d556c39448b</td><td>jiku_mobile</td><td>Jiku Mobile Video Dataset</td><td><a href="papers/d178cde92ab3dc0dd2ebee5a76a33d556c39448b.html" target="_blank">The jiku mobile video dataset</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td>edu</td><td>National University of Singapore</td><td>Singapore</td><td>1.29620180</td><td>103.77689944</td><td>71%</td><td>24</td><td>17</td><td>7</td><td>0</td><td>6</td><td>19</td></tr><tr><td>ae0aee03d946efffdc7af2362a42d3750e7dd48a</td><td>put_face</td><td>Put Face</td><td><a href="papers/ae0aee03d946efffdc7af2362a42d3750e7dd48a.html" target="_blank">The put face database</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>99</td><td>54</td><td>45</td><td>5</td><td>55</td><td>48</td></tr><tr><td>19d1b811df60f86cbd5e04a094b07f32fff7a32a</td><td>york_3d</td><td>UOY 3D Face Database</td><td><a href="papers/19d1b811df60f86cbd5e04a094b07f32fff7a32a.html" target="_blank">Three-dimensional face recognition: an eigensurface approach</a></td><td><span class="gray">[pdf]</a></td><td>2004 International Conference on Image Processing, 2004. ICIP '04.</td><td></td><td></td><td></td><td></td><td></td><td>42%</td><td>38</td><td>16</td><td>22</td><td>4</td><td>24</td><td>13</td></tr><tr><td>2edb87494278ad11641b6cf7a3f8996de12b8e14</td><td>qmul_grid</td><td>GRID</td><td><a href="papers/2edb87494278ad11641b6cf7a3f8996de12b8e14.html" target="_blank">Time-Delayed Correlation Analysis for Multi-Camera Activity Understanding</a></td><td><a href="http://www.eecs.qmul.ac.uk/~ccloy/files/ijcv_2010.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td>edu</td><td>Queen Mary University of London</td><td>United Kingdom</td><td>51.52472720</td><td>-0.03931035</td><td>63%</td><td>84</td><td>53</td><td>31</td><td>4</td><td>51</td><td>33</td></tr><tr><td>64e0690dd176a93de9d4328f6e31fc4afe1e7536</td><td>duke_mtmc</td><td>Duke MTMC</td><td><a href="papers/64e0690dd176a93de9d4328f6e31fc4afe1e7536.html" target="_blank">Tracking Multiple People Online and in Real Time</a></td><td><a href="https://pdfs.semanticscholar.org/64e0/690dd176a93de9d4328f6e31fc4afe1e7536.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>78%</td><td>23</td><td>18</td><td>5</td><td>1</td><td>12</td><td>10</td></tr><tr><td>298cbc3dfbbb3a20af4eed97906650a4ea1c29e0</td><td>ferplus</td><td>FER+</td><td><a href="papers/298cbc3dfbbb3a20af4eed97906650a4ea1c29e0.html" target="_blank">Training deep networks for facial expression recognition with crowd-sourced label distribution</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>74%</td><td>34</td><td>25</td><td>9</td><td>0</td><td>18</td><td>16</td></tr><tr><td>4eab317b5ac436a949849ed286baa3de2a541eef</td><td>laofiw</td><td>LAOFIW</td><td><a href="papers/4eab317b5ac436a949849ed286baa3de2a541eef.html" target="_blank">Turning a Blind Eye: Explicit Removal of Biases and Variation from Deep Neural Network Embeddings</a></td><td><a href="https://arxiv.org/pdf/1809.02169.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>2</td><td>2</td><td>0</td><td>0</td><td>2</td><td>0</td></tr><tr><td>b5f2846a506fc417e7da43f6a7679146d99c5e96</td><td>ucf_101</td><td>UCF101</td><td><a href="papers/b5f2846a506fc417e7da43f6a7679146d99c5e96.html" target="_blank">UCF101: A Dataset of 101 Human Actions Classes From Videos in The Wild</a></td><td><a href="https://arxiv.org/pdf/1212.0402.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>999</td><td>643</td><td>356</td><td>56</td><td>628</td><td>362</td></tr><tr><td>16e8b0a1e8451d5f697b94c0c2b32a00abee1d52</td><td>umb</td><td>UMB</td><td><a href="papers/16e8b0a1e8451d5f697b94c0c2b32a00abee1d52.html" target="_blank">UMB-DB: A database of partially occluded 3D faces</a></td><td><span class="gray">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td></td><td>66%</td><td>47</td><td>31</td><td>16</td><td>2</td><td>22</td><td>24</td></tr><tr><td>31b05f65405534a696a847dd19c621b7b8588263</td><td>umd_faces</td><td>UMD</td><td><a href="papers/31b05f65405534a696a847dd19c621b7b8588263.html" target="_blank">UMDFaces: An annotated face dataset for training deep networks</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE International Joint Conference on Biometrics (IJCB)</td><td>edu</td><td>University of Maryland</td><td>United States</td><td>39.28996850</td><td>-76.62196103</td><td>79%</td><td>42</td><td>33</td><td>9</td><td>2</td><td>30</td><td>11</td></tr><tr><td>8627f019882b024aef92e4eb9355c499c733e5b7</td><td>used</td><td>USED Social Event Dataset</td><td><a href="papers/8627f019882b024aef92e4eb9355c499c733e5b7.html" target="_blank">USED: a large-scale social event detection dataset</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td>edu</td><td>University of Trento</td><td>Italy</td><td>46.06588360</td><td>11.11598940</td><td>86%</td><td>7</td><td>6</td><td>1</td><td>0</td><td>3</td><td>4</td></tr><tr><td>d4f1eb008eb80595bcfdac368e23ae9754e1e745</td><td>uccs</td><td>UCCS</td><td><a href="papers/d4f1eb008eb80595bcfdac368e23ae9754e1e745.html" target="_blank">Unconstrained Face Detection and Open-Set Face Recognition Challenge</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE International Joint Conference on Biometrics (IJCB)</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>5</td><td>5</td><td>0</td><td>0</td><td>4</td><td>1</td></tr><tr><td>4b4106614c1d553365bad75d7866bff0de6056ed</td><td>ufi</td><td>UFI</td><td><a href="papers/4b4106614c1d553365bad75d7866bff0de6056ed.html" target="_blank">Unconstrained Facial Images: Database for Face Recognition Under Real-World Conditions</a></td><td><a href="http://home.zcu.cz/~pkral/papers/kral_micai15.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>12</td><td>6</td><td>6</td><td>0</td><td>4</td><td>6</td></tr><tr><td>08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7</td><td>kin_face</td><td>UB KinFace</td><td><a href="papers/08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7.html" target="_blank">Understanding Kin Relationships in a Photo</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Multimedia</td><td></td><td></td><td></td><td></td><td></td><td>63%</td><td>94</td><td>59</td><td>35</td><td>1</td><td>33</td><td>61</td></tr><tr><td>5a4df9bef1872865f0b619ac3aacc97f49e4a035</td><td>cuhk_train_station</td><td>CUHK Train Station Dataset</td><td><a href="papers/5a4df9bef1872865f0b619ac3aacc97f49e4a035.html" target="_blank">Understanding collective crowd behaviors: Learning a Mixture model of Dynamic pedestrian-Agents</a></td><td><span class="gray">[pdf]</a></td><td>2012 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>Chinese University of Hong Kong</td><td>China</td><td>22.41626320</td><td>114.21093180</td><td>60%</td><td>141</td><td>84</td><td>57</td><td>4</td><td>60</td><td>75</td></tr><tr><td>21d9d0deed16f0ad62a4865e9acf0686f4f15492</td><td>images_of_groups</td><td>Images of Groups</td><td><a href="papers/21d9d0deed16f0ad62a4865e9acf0686f4f15492.html" target="_blank">Understanding images of groups of people</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>Carnegie Mellon University</td><td>United States</td><td>40.44416190</td><td>-79.94272826</td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>15e1af79939dbf90790b03d8aa02477783fb1d0f</td><td>duke_mtmc</td><td>Duke MTMC</td><td><a href="papers/15e1af79939dbf90790b03d8aa02477783fb1d0f.html" target="_blank">Unlabeled Samples Generated by GAN Improve the Person Re-identification Baseline in Vitro</a></td><td><a href="https://arxiv.org/pdf/1701.07717.pdf" target="_blank">[pdf]</a></td><td>2017 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>fd8168f1c50de85bac58a8d328df0a50248b16ae</td><td>nd_2006</td><td>ND-2006</td><td><a href="papers/fd8168f1c50de85bac58a8d328df0a50248b16ae.html" target="_blank">Using a Multi-Instance Enrollment Representation to Improve 3D Face Recognition</a></td><td><span class="gray">[pdf]</a></td><td>2007 First IEEE International Conference on Biometrics: Theory, Applications, and Systems</td><td>edu</td><td>University of Notre Dame</td><td>United States</td><td>41.70456775</td><td>-86.23822026</td><td>63%</td><td>35</td><td>22</td><td>13</td><td>3</td><td>18</td><td>15</td></tr><tr><td>4563b46d42079242f06567b3f2e2f7a80cb3befe</td><td>vadana</td><td>VADANA</td><td><a href="papers/4563b46d42079242f06567b3f2e2f7a80cb3befe.html" target="_blank">VADANA: A dense dataset for facial image analysis</a></td><td><span class="gray">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td>edu</td><td>University of Delaware</td><td>United States</td><td>39.68103280</td><td>-75.75401840</td><td>67%</td><td>15</td><td>10</td><td>5</td><td>0</td><td>5</td><td>10</td></tr><tr><td>70c59dc3470ae867016f6ab0e008ac8ba03774a1</td><td>vgg_faces2</td><td>VGG Face2</td><td><a href="papers/70c59dc3470ae867016f6ab0e008ac8ba03774a1.html" target="_blank">VGGFace2: A Dataset for Recognising Faces across Pose and Age</a></td><td><a href="https://arxiv.org/pdf/1710.08092.pdf" target="_blank">[pdf]</a></td><td>2018 13th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2018)</td><td></td><td></td><td></td><td></td><td></td><td>80%</td><td>83</td><td>66</td><td>17</td><td>3</td><td>61</td><td>20</td></tr><tr><td>01959ef569f74c286956024866c1d107099199f7</td><td>vqa</td><td>VQA</td><td><a href="papers/01959ef569f74c286956024866c1d107099199f7.html" target="_blank">VQA: Visual Question Answering</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>b6c293f0420f7e945b5916ae44269fb53e139275</td><td>erce</td><td>ERCe</td><td><a href="papers/b6c293f0420f7e945b5916ae44269fb53e139275.html" target="_blank">Video Synopsis by Heterogeneous Multi-source Correlation</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>29</td><td>15</td><td>14</td><td>2</td><td>14</td><td>13</td></tr><tr><td>b6c293f0420f7e945b5916ae44269fb53e139275</td><td>tisi</td><td>Times Square Intersection</td><td><a href="papers/b6c293f0420f7e945b5916ae44269fb53e139275.html" target="_blank">Video Synopsis by Heterogeneous Multi-source Correlation</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>29</td><td>15</td><td>14</td><td>2</td><td>14</td><td>13</td></tr><tr><td>5194cbd51f9769ab25260446b4fa17204752e799</td><td>violent_flows</td><td>Violent Flows</td><td><a href="papers/5194cbd51f9769ab25260446b4fa17204752e799.html" target="_blank">Violent flows: Real-time detection of violent crowd behavior</a></td><td><span class="gray">[pdf]</a></td><td>2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops</td><td>edu</td><td>Open University of Israel</td><td>Israel</td><td>32.77824165</td><td>34.99565673</td><td>65%</td><td>88</td><td>57</td><td>31</td><td>6</td><td>45</td><td>44</td></tr><tr><td>026e3363b7f76b51cc711886597a44d5f1fd1de2</td><td>kitti</td><td>KITTI</td><td><a href="papers/026e3363b7f76b51cc711886597a44d5f1fd1de2.html" target="_blank">Vision meets robotics: The KITTI dataset</a></td><td><a href="https://pdfs.semanticscholar.org/026e/3363b7f76b51cc711886597a44d5f1fd1de2.pdf" target="_blank">[pdf]</a></td><td>I. J. Robotics Res.</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>999</td><td>602</td><td>397</td><td>36</td><td>553</td><td>462</td></tr><tr><td>066000d44d6691d27202896691f08b27117918b9</td><td>psu</td><td>PSU</td><td><a href="papers/066000d44d6691d27202896691f08b27117918b9.html" target="_blank">Vision-Based Analysis of Small Groups in Pedestrian Crowds</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>168</td><td>91</td><td>77</td><td>10</td><td>85</td><td>79</td></tr><tr><td>dd65f71dac86e36eecbd3ed225d016c3336b4a13</td><td>families_in_the_wild</td><td>FIW</td><td><a href="papers/dd65f71dac86e36eecbd3ed225d016c3336b4a13.html" target="_blank">Visual Kinship Recognition of Families in the Wild</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td>edu</td><td>University of Massachusetts Dartmouth</td><td>United States</td><td>41.62772475</td><td>-71.00724501</td><td>80%</td><td>5</td><td>4</td><td>1</td><td>0</td><td>2</td><td>3</td></tr><tr><td>8875ae233bc074f5cd6c4ebba447b536a7e847a5</td><td>voxceleb2</td><td>VoxCeleb2</td><td><a href="papers/8875ae233bc074f5cd6c4ebba447b536a7e847a5.html" target="_blank">VoxCeleb2: Deep Speaker Recognition.</a></td><td><a href="https://pdfs.semanticscholar.org/8875/ae233bc074f5cd6c4ebba447b536a7e847a5.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>71%</td><td>34</td><td>24</td><td>9</td><td>2</td><td>31</td><td>2</td></tr><tr><td>52d7eb0fbc3522434c13cc247549f74bb9609c5d</td><td>wider_face</td><td>WIDER FACE</td><td><a href="papers/52d7eb0fbc3522434c13cc247549f74bb9609c5d.html" target="_blank">WIDER FACE: A Face Detection Benchmark</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>Chinese University of Hong Kong</td><td>China</td><td>22.41626320</td><td>114.21093180</td><td>66%</td><td>178</td><td>118</td><td>60</td><td>11</td><td>112</td><td>66</td></tr><tr><td>36bccfb2ad847096bc76777e544f305813cd8f5b</td><td>wildtrack</td><td>WildTrack</td><td><a href="papers/36bccfb2ad847096bc76777e544f305813cd8f5b.html" target="_blank">WILDTRACK: A Multi-camera HD Dataset for Dense Unscripted Pedestrian Detection</a></td><td><a href="http://openaccess.thecvf.com/content_cvpr_2018/Supplemental/1562-supp.pdf" target="_blank">[pdf]</a></td><td>2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>5ad4e9f947c1653c247d418f05dad758a3f9277b</td><td>wlfdb</td><td>WLFDB</td><td><a href="papers/5ad4e9f947c1653c247d418f05dad758a3f9277b.html" target="_blank">WLFDB : Weakly Labeled Face Databases</a></td><td><a href="https://pdfs.semanticscholar.org/5ad4/e9f947c1653c247d418f05dad758a3f9277b.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>1</td><td>1</td><td>0</td><td>0</td><td>0</td><td>1</td></tr><tr><td>0dc11a37cadda92886c56a6fb5191ded62099c28</td><td>stickmen_family</td><td>We Are Family Stickmen</td><td><a href="papers/0dc11a37cadda92886c56a6fb5191ded62099c28.html" target="_blank">We Are Family: Joint Pose Estimation of Multiple Persons</a></td><td><a href="http://eprints.pascal-network.org/archive/00007964/01/eichner10eccv.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>65%</td><td>78</td><td>51</td><td>27</td><td>5</td><td>54</td><td>23</td></tr><tr><td>0c91808994a250d7be332400a534a9291ca3b60e</td><td>graz</td><td>Graz Pedestrian</td><td><a href="papers/0c91808994a250d7be332400a534a9291ca3b60e.html" target="_blank">Weak Hypotheses and Boosting for Generic Object Detection and Recognition</a></td><td><a href="https://pdfs.semanticscholar.org/0c91/808994a250d7be332400a534a9291ca3b60e.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>236</td><td>131</td><td>105</td><td>17</td><td>161</td><td>77</td></tr><tr><td>2a75f34663a60ab1b04a0049ed1d14335129e908</td><td>mmi_facial_expression</td><td>MMI Facial Expression Dataset</td><td><a href="papers/2a75f34663a60ab1b04a0049ed1d14335129e908.html" target="_blank">Web-based database for facial expression analysis</a></td><td><span class="gray">[pdf]</a></td><td>2005 IEEE International Conference on Multimedia and Expo</td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>464</td><td>250</td><td>214</td><td>45</td><td>282</td><td>188</td></tr><tr><td>9b9bf5e623cb8af7407d2d2d857bc3f1b531c182</td><td>who_goes_there</td><td>WGT</td><td><a href="papers/9b9bf5e623cb8af7407d2d2d857bc3f1b531c182.html" target="_blank">Who goes there?: approaches to mapping facial appearance diversity</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td>edu</td><td>University of Kentucky</td><td>United States</td><td>38.03337420</td><td>-84.50177580</td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>b62628ac06bbac998a3ab825324a41a11bc3a988</td><td>m2vtsdb_extended</td><td>xm2vtsdb</td><td><a href="papers/b62628ac06bbac998a3ab825324a41a11bc3a988.html" target="_blank">XM2VTSDB : The extended M2VTS database</a></td><td><a href="https://pdfs.semanticscholar.org/b626/28ac06bbac998a3ab825324a41a11bc3a988.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>864</td><td>539</td><td>325</td><td>37</td><td>493</td><td>404</td></tr><tr><td>010f0f4929e6a6644fb01f0e43820f91d0fad292</td><td>yfcc_100m</td><td>YFCC100M</td><td><a href="papers/010f0f4929e6a6644fb01f0e43820f91d0fad292.html" target="_blank">YFCC100M: the new data in multimedia research</a></td><td><span class="gray">[pdf]</a></td><td>Commun. ACM</td><td>edu</td><td>Carnegie Mellon University</td><td>United States</td><td>40.44416190</td><td>-79.94272826</td><td>64%</td><td>274</td><td>176</td><td>98</td><td>23</td><td>172</td><td>100</td></tr><tr><td>a94cae786d515d3450d48267e12ca954aab791c4</td><td>yawdd</td><td>YawDD</td><td><a href="papers/a94cae786d515d3450d48267e12ca954aab791c4.html" target="_blank">YawDD: a yawning detection dataset</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>80%</td><td>15</td><td>12</td><td>3</td><td>1</td><td>2</td><td>13</td></tr></table></body></html>
\ No newline at end of file +<!doctype html><html><head><meta charset='utf-8'><title>All Papers</title><link rel='stylesheet' href='reports.css'></head><body><h2>All Papers</h2><table border='1' cellpadding='3' cellspacing='3'><th>Paper ID</th><th>Megapixels Key</th><th>Megapixels Name</th><th>Report Link</th><th>PDF Link</th><th>Journal</th><th>Type</th><th>Address</th><th>Country</th><th>Lat</th><th>Lng</th><th>Coverage</th><th>Total Citations</th><th>Geocoded Citations</th><th>Unknown Citations</th><th>Empty Citations</th><th>With PDF</th><th>With DOI</th><tr><td>3325860c0c82a93b2eac654f5324dd6a776f609e</td><td>mpii_human_pose</td><td>MPII Human Pose</td><td><a href="papers/3325860c0c82a93b2eac654f5324dd6a776f609e.html" target="_blank">2D Human Pose Estimation: New Benchmark and State of the Art Analysis</a></td><td><span class="gray">[pdf]</a></td><td>2014 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>66%</td><td>387</td><td>254</td><td>133</td><td>19</td><td>291</td><td>96</td></tr><tr><td>e4754afaa15b1b53e70743880484b8d0736990ff</td><td>fiw_300</td><td>300-W</td><td><a href="papers/e4754afaa15b1b53e70743880484b8d0736990ff.html" target="_blank">300 Faces In-The-Wild Challenge: database and results</a></td><td><a href="http://ibug.doc.ic.ac.uk/media/uploads/documents/1-s2.0-s0262885616000147-main.pdf" target="_blank">[pdf]</a></td><td>Image Vision Comput.</td><td>edu</td><td>Imperial College London</td><td>United Kingdom</td><td>51.49887085</td><td>-0.17560797</td><td>71%</td><td>129</td><td>92</td><td>37</td><td>6</td><td>74</td><td>55</td></tr><tr><td>044d9a8c61383312cdafbcc44b9d00d650b21c70</td><td>fiw_300</td><td>300-W</td><td><a href="papers/044d9a8c61383312cdafbcc44b9d00d650b21c70.html" target="_blank">300 Faces in-the-Wild Challenge: The First Facial Landmark Localization Challenge</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision Workshops</td><td></td><td></td><td></td><td></td><td></td><td>79%</td><td>323</td><td>255</td><td>68</td><td>15</td><td>208</td><td>120</td></tr><tr><td>2e8d0f1802e50cccfd3c0aabac0d0beab3a7846e</td><td>3dpes</td><td>3DPeS</td><td><a href="papers/2e8d0f1802e50cccfd3c0aabac0d0beab3a7846e.html" target="_blank">3DPeS: 3D people dataset for surveillance and forensics</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>133</td><td>82</td><td>51</td><td>9</td><td>73</td><td>58</td></tr><tr><td>a40f9bfd3c45658ee8da70e1f2dfbe1f0c744d43</td><td>4dfab</td><td>4DFAB</td><td><a href="papers/a40f9bfd3c45658ee8da70e1f2dfbe1f0c744d43.html" target="_blank">4DFAB: A Large Scale 4D Facial Expression Database for Biometric Applications</a></td><td><a href="https://arxiv.org/pdf/1712.01443.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>25%</td><td>4</td><td>1</td><td>3</td><td>0</td><td>2</td><td>2</td></tr><tr><td>31b58ced31f22eab10bd3ee2d9174e7c14c27c01</td><td>tiny_images</td><td>Tiny Images</td><td><a href="papers/31b58ced31f22eab10bd3ee2d9174e7c14c27c01.html" target="_blank">80 Million Tiny Images: A Large Data Set for Nonparametric Object and Scene Recognition</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>999</td><td>574</td><td>425</td><td>89</td><td>644</td><td>337</td></tr><tr><td>d08cc366a4a0192a01e9a7495af1eb5d9f9e73ae</td><td>b3d_ac</td><td>B3D(AC)</td><td><a href="papers/d08cc366a4a0192a01e9a7495af1eb5d9f9e73ae.html" target="_blank">A 3-D Audio-Visual Corpus of Affective Communication</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Multimedia</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>42</td><td>23</td><td>19</td><td>2</td><td>26</td><td>15</td></tr><tr><td>4d4bb462c9f1d4e4ab1e4aa6a75cc0bc71b38461</td><td>3dddb_unconstrained</td><td>3D Dynamic</td><td><a href="papers/4d4bb462c9f1d4e4ab1e4aa6a75cc0bc71b38461.html" target="_blank">A 3D Dynamic Database for Unconstrained Face Recognition</a></td><td><a href="https://pdfs.semanticscholar.org/4d4b/b462c9f1d4e4ab1e4aa6a75cc0bc71b38461.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>2</td><td>1</td><td>1</td><td>0</td><td>1</td><td>1</td></tr><tr><td>639937b3a1b8bded3f7e9a40e85bd3770016cf3c</td><td>bfm</td><td>BFM</td><td><a href="papers/639937b3a1b8bded3f7e9a40e85bd3770016cf3c.html" target="_blank">A 3D Face Model for Pose and Illumination Invariant Face Recognition</a></td><td><span class="gray">[pdf]</a></td><td>2009 Sixth IEEE International Conference on Advanced Video and Signal Based Surveillance</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>343</td><td>194</td><td>149</td><td>23</td><td>223</td><td>114</td></tr><tr><td>cc589c499dcf323fe4a143bbef0074c3e31f9b60</td><td>bu_3dfe</td><td>BU-3DFE</td><td><a href="papers/cc589c499dcf323fe4a143bbef0074c3e31f9b60.html" target="_blank">A 3D facial expression database for facial behavior research</a></td><td><span class="gray">[pdf]</a></td><td>7th International Conference on Automatic Face and Gesture Recognition (FGR06)</td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>588</td><td>316</td><td>271</td><td>44</td><td>306</td><td>282</td></tr><tr><td>22646e00a7ba34d1b5fbe3b1efcd91a1e1be3c2b</td><td>saivt</td><td>SAIVT SoftBio</td><td><a href="papers/22646e00a7ba34d1b5fbe3b1efcd91a1e1be3c2b.html" target="_blank">A Database for Person Re-Identification in Multi-Camera Surveillance Networks</a></td><td><span class="gray">[pdf]</a></td><td>2012 International Conference on Digital Image Computing Techniques and Applications (DICTA)</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>65</td><td>38</td><td>27</td><td>6</td><td>45</td><td>20</td></tr><tr><td>070de852bc6eb275d7ca3a9cdde8f6be8795d1a3</td><td>d3dfacs</td><td>D3DFACS</td><td><a href="papers/070de852bc6eb275d7ca3a9cdde8f6be8795d1a3.html" target="_blank">A FACS valid 3D dynamic action unit database with applications to 3D dynamic morphable facial modeling</a></td><td><span class="gray">[pdf]</a></td><td>2011 International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>50</td><td>26</td><td>24</td><td>5</td><td>31</td><td>18</td></tr><tr><td>563c940054e4b456661762c1ab858e6f730c3159</td><td>data_61</td><td>Data61 Pedestrian</td><td><a href="papers/563c940054e4b456661762c1ab858e6f730c3159.html" target="_blank">A Multi-modal Graphical Model for Scene Analysis</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE Winter Conference on Applications of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>8</td><td>4</td><td>4</td><td>0</td><td>5</td><td>3</td></tr><tr><td>221c18238b829c12b911706947ab38fd017acef7</td><td>rap_pedestrian</td><td>RAP</td><td><a href="papers/221c18238b829c12b911706947ab38fd017acef7.html" target="_blank">A Richly Annotated Dataset for Pedestrian Attribute Recognition</a></td><td><a href="https://arxiv.org/pdf/1603.07054.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>69%</td><td>26</td><td>18</td><td>8</td><td>0</td><td>16</td><td>10</td></tr><tr><td>013909077ad843eb6df7a3e8e290cfd5575999d2</td><td>fiw_300</td><td>300-W</td><td><a href="papers/013909077ad843eb6df7a3e8e290cfd5575999d2.html" target="_blank">A Semi-automatic Methodology for Facial Landmark Annotation</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition Workshops</td><td></td><td></td><td></td><td></td><td></td><td>80%</td><td>184</td><td>148</td><td>36</td><td>8</td><td>120</td><td>67</td></tr><tr><td>3b4ec8af470948a72a6ed37a9fd226719a874ebc</td><td>sdu_vid</td><td>SDU-VID</td><td><a href="papers/3b4ec8af470948a72a6ed37a9fd226719a874ebc.html" target="_blank">A Spatio-Temporal Appearance Representation for Video-Based Pedestrian Re-Identification</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>66%</td><td>95</td><td>63</td><td>32</td><td>6</td><td>50</td><td>45</td></tr><tr><td>6403117f9c005ae81f1e8e6d1302f4a045e3d99d</td><td>alert_airport</td><td>ALERT Airport</td><td><a href="papers/6403117f9c005ae81f1e8e6d1302f4a045e3d99d.html" target="_blank">A Systematic Evaluation and Benchmark for Person Re-Identification: Features, Metrics, and Datasets</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>45%</td><td>20</td><td>9</td><td>11</td><td>0</td><td>9</td><td>11</td></tr><tr><td>0d3bb75852098b25d90f31d2f48fd0cb4944702b</td><td>face_scrub</td><td>FaceScrub</td><td><a href="papers/0d3bb75852098b25d90f31d2f48fd0cb4944702b.html" target="_blank">A data-driven approach to cleaning large face datasets</a></td><td><span class="gray">[pdf]</a></td><td>2014 IEEE International Conference on Image Processing (ICIP)</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>138</td><td>89</td><td>49</td><td>1</td><td>95</td><td>41</td></tr><tr><td>b91f54e1581fbbf60392364323d00a0cd43e493c</td><td>bp4d_spontanous</td><td>BP4D-Spontanous</td><td><a href="papers/b91f54e1581fbbf60392364323d00a0cd43e493c.html" target="_blank">A high-resolution spontaneous 3D dynamic facial expression database</a></td><td><span class="gray">[pdf]</a></td><td>2013 10th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)</td><td>edu</td><td>SUNY Binghamton</td><td>United States</td><td>42.08779975</td><td>-75.97066066</td><td>52%</td><td>154</td><td>80</td><td>74</td><td>6</td><td>80</td><td>75</td></tr><tr><td>8b56e33f33e582f3e473dba573a16b598ed9bcdc</td><td>fei</td><td>FEI</td><td><a href="papers/8b56e33f33e582f3e473dba573a16b598ed9bcdc.html" target="_blank">A new ranking method for principal components analysis and its application to face image analysis</a></td><td><span class="gray">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>169</td><td>93</td><td>76</td><td>6</td><td>69</td><td>102</td></tr><tr><td>2624d84503bc2f8e190e061c5480b6aa4d89277a</td><td>afew_va</td><td>AFEW-VA</td><td><a href="papers/2624d84503bc2f8e190e061c5480b6aa4d89277a.html" target="_blank">AFEW-VA database for valence and arousal estimation in-the-wild</a></td><td><a href="https://ibug.doc.ic.ac.uk/media/uploads/documents/afew-va.pdf" target="_blank">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>18</td><td>9</td><td>9</td><td>0</td><td>12</td><td>5</td></tr><tr><td>2ad0ee93d029e790ebb50574f403a09854b65b7e</td><td>yale_faces</td><td>YaleFaces</td><td><a href="papers/2ad0ee93d029e790ebb50574f403a09854b65b7e.html" target="_blank">Acquiring linear subspaces for face recognition under variable lighting</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>999</td><td>554</td><td>445</td><td>94</td><td>495</td><td>491</td></tr><tr><td>57fe081950f21ca03b5b375ae3e84b399c015861</td><td>cvc_01_barcelona</td><td>CVC-01</td><td><a href="papers/57fe081950f21ca03b5b375ae3e84b399c015861.html" target="_blank">Adaptive Image Sampling and Windows Classification for On-board Pedestrian Detection</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>51%</td><td>47</td><td>24</td><td>23</td><td>1</td><td>23</td><td>24</td></tr><tr><td>758d7e1be64cc668c59ef33ba8882c8597406e53</td><td>affectnet</td><td>AffectNet</td><td><a href="papers/758d7e1be64cc668c59ef33ba8882c8597406e53.html" target="_blank">AffectNet: A Database for Facial Expression, Valence, and Arousal Computing in the Wild</a></td><td><a href="https://arxiv.org/pdf/1708.03985.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>37</td><td>23</td><td>14</td><td>0</td><td>25</td><td>11</td></tr><tr><td>47aeb3b82f54b5ae8142b4bdda7b614433e69b9a</td><td>am_fed</td><td>AM-FED</td><td><a href="papers/47aeb3b82f54b5ae8142b4bdda7b614433e69b9a.html" target="_blank">Affectiva-MIT Facial Expression Dataset (AM-FED): Naturalistic and Spontaneous Facial Expressions Collected "In-the-Wild"</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition Workshops</td><td></td><td></td><td></td><td></td><td></td><td>46%</td><td>83</td><td>38</td><td>45</td><td>6</td><td>43</td><td>39</td></tr><tr><td>1be498d4bbc30c3bfd0029114c784bc2114d67c0</td><td>adience</td><td>Adience</td><td><a href="papers/1be498d4bbc30c3bfd0029114c784bc2114d67c0.html" target="_blank">Age and Gender Estimation of Unfiltered Faces</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Information Forensics and Security</td><td>edu</td><td>Open University of Israel</td><td>Israel</td><td>32.77824165</td><td>34.99565673</td><td>87%</td><td>179</td><td>156</td><td>23</td><td>1</td><td>98</td><td>80</td></tr><tr><td>d818568838433a6d6831adde49a58cef05e0c89f</td><td>agedb</td><td>AgeDB</td><td><a href="papers/d818568838433a6d6831adde49a58cef05e0c89f.html" target="_blank">AgeDB: The First Manually Collected, In-the-Wild Age Database</a></td><td><a href="http://eprints.mdx.ac.uk/22044/1/agedb_kotsia.pdf" target="_blank">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td>edu</td><td>Imperial College London</td><td>United Kingdom</td><td>51.49887085</td><td>-0.17560797</td><td>94%</td><td>18</td><td>17</td><td>1</td><td>0</td><td>14</td><td>3</td></tr><tr><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td><td>aflw</td><td>AFLW</td><td><a href="papers/a74251efa970b92925b89eeef50a5e37d9281ad0.html" target="_blank">Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</a></td><td><span class="gray">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td>edu</td><td>TU Graz</td><td>Austria</td><td>47.07071400</td><td>15.43950400</td><td>69%</td><td>318</td><td>218</td><td>100</td><td>27</td><td>211</td><td>107</td></tr><tr><td>2ce2560cf59db59ce313bbeb004e8ce55c5ce928</td><td>texas_3dfrd</td><td>Texas 3DFRD</td><td><a href="papers/2ce2560cf59db59ce313bbeb004e8ce55c5ce928.html" target="_blank">Anthropometric 3D Face Recognition</a></td><td><a href="http://live.ece.utexas.edu/publications/2010/sg_ijcv_june10.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>63%</td><td>91</td><td>57</td><td>34</td><td>5</td><td>60</td><td>31</td></tr><tr><td>633c851ebf625ad7abdda2324e9de093cf623141</td><td>appa_real</td><td>APPA-REAL</td><td><a href="papers/633c851ebf625ad7abdda2324e9de093cf623141.html" target="_blank">Apparent and Real Age Estimation in Still Images with Deep Residual Regressors on Appa-Real Database</a></td><td><span class="gray">[pdf]</a></td><td>2017 12th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2017)</td><td></td><td></td><td></td><td></td><td></td><td>70%</td><td>10</td><td>7</td><td>3</td><td>0</td><td>8</td><td>3</td></tr><tr><td>0df0d1adea39a5bef318b74faa37de7f3e00b452</td><td>mpii_gaze</td><td>MPIIGaze</td><td><a href="papers/0df0d1adea39a5bef318b74faa37de7f3e00b452.html" target="_blank">Appearance-based gaze estimation in the wild</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>73%</td><td>149</td><td>109</td><td>40</td><td>3</td><td>94</td><td>54</td></tr><tr><td>759a3b3821d9f0e08e0b0a62c8b693230afc3f8d</td><td>pubfig</td><td>PubFig</td><td><a href="papers/759a3b3821d9f0e08e0b0a62c8b693230afc3f8d.html" target="_blank">Attribute and simile classifiers for face verification</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE 12th International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>914</td><td>585</td><td>329</td><td>47</td><td>586</td><td>316</td></tr><tr><td>faf40ce28857aedf183e193486f5b4b0a8c478a2</td><td>iit_dehli_ear</td><td>IIT Dehli Ear</td><td><a href="papers/faf40ce28857aedf183e193486f5b4b0a8c478a2.html" target="_blank">Automated Human Identification Using Ear Imaging</a></td><td><a href="https://pdfs.semanticscholar.org/faf4/0ce28857aedf183e193486f5b4b0a8c478a2.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>80</td><td>40</td><td>40</td><td>6</td><td>35</td><td>44</td></tr><tr><td>2160788824c4c29ffe213b2cbeb3f52972d73f37</td><td>3d_rma</td><td>3D-RMA</td><td><a href="papers/2160788824c4c29ffe213b2cbeb3f52972d73f37.html" target="_blank">Automatic 3D face authentication</a></td><td><a href="http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.31.9190&rep=rep1&type=pdf" target="_blank">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>100</td><td>54</td><td>46</td><td>8</td><td>63</td><td>36</td></tr><tr><td>213a579af9e4f57f071b884aa872651372b661fd</td><td>bbc_pose</td><td>BBC Pose</td><td><a href="papers/213a579af9e4f57f071b884aa872651372b661fd.html" target="_blank">Automatic and Efficient Human Pose Estimation for Sign Language Videos</a></td><td><a href="http://tomas.pfister.fi/files/charles13ijcv.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>65%</td><td>26</td><td>17</td><td>9</td><td>1</td><td>16</td><td>11</td></tr><tr><td>fcc6fe6007c322641796cb8792718641856a22a7</td><td>miw</td><td>MIW</td><td><a href="papers/fcc6fe6007c322641796cb8792718641856a22a7.html" target="_blank">Automatic facial makeup detection with application in face recognition</a></td><td><span class="gray">[pdf]</a></td><td>2013 International Conference on Biometrics (ICB)</td><td>edu</td><td>West Virginia University</td><td>United States</td><td>39.65404635</td><td>-79.96475355</td><td>71%</td><td>49</td><td>35</td><td>14</td><td>1</td><td>19</td><td>29</td></tr><tr><td>fcc6fe6007c322641796cb8792718641856a22a7</td><td>youtube_makeup</td><td>YMU</td><td><a href="papers/fcc6fe6007c322641796cb8792718641856a22a7.html" target="_blank">Automatic facial makeup detection with application in face recognition</a></td><td><span class="gray">[pdf]</a></td><td>2013 International Conference on Biometrics (ICB)</td><td>edu</td><td>West Virginia University</td><td>United States</td><td>39.65404635</td><td>-79.96475355</td><td>71%</td><td>49</td><td>35</td><td>14</td><td>1</td><td>19</td><td>29</td></tr><tr><td>0a85bdff552615643dd74646ac881862a7c7072d</td><td>pipa</td><td>PIPA</td><td><a href="papers/0a85bdff552615643dd74646ac881862a7c7072d.html" target="_blank">Beyond frontal faces: Improving Person Recognition using multiple cues</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>company</td><td>Facebook</td><td>United States</td><td>37.39367170</td><td>-122.08072620</td><td>91%</td><td>54</td><td>49</td><td>4</td><td>1</td><td>41</td><td>12</td></tr><tr><td>2acf7e58f0a526b957be2099c10aab693f795973</td><td>bosphorus</td><td>The Bosphorus</td><td><a href="papers/2acf7e58f0a526b957be2099c10aab693f795973.html" target="_blank">Bosphorus Database for 3D Face Analysis</a></td><td><a href="https://pdfs.semanticscholar.org/4254/fbba3846008f50671edc9cf70b99d7304543.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>352</td><td>198</td><td>154</td><td>17</td><td>162</td><td>188</td></tr><tr><td>37d6f0eb074d207b53885bd2eb78ccc8a04be597</td><td>vmu</td><td>VMU</td><td><a href="papers/37d6f0eb074d207b53885bd2eb78ccc8a04be597.html" target="_blank">Can facial cosmetics affect the matching accuracy of face recognition systems?</a></td><td><span class="gray">[pdf]</a></td><td>2012 IEEE Fifth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td>edu</td><td>West Virginia University</td><td>United States</td><td>39.65404635</td><td>-79.96475355</td><td>62%</td><td>53</td><td>33</td><td>20</td><td>0</td><td>19</td><td>31</td></tr><tr><td>37d6f0eb074d207b53885bd2eb78ccc8a04be597</td><td>youtube_makeup</td><td>YMU</td><td><a href="papers/37d6f0eb074d207b53885bd2eb78ccc8a04be597.html" target="_blank">Can facial cosmetics affect the matching accuracy of face recognition systems?</a></td><td><span class="gray">[pdf]</a></td><td>2012 IEEE Fifth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td>edu</td><td>West Virginia University</td><td>United States</td><td>39.65404635</td><td>-79.96475355</td><td>62%</td><td>53</td><td>33</td><td>20</td><td>0</td><td>19</td><td>31</td></tr><tr><td>8d5998cd984e7cce307da7d46f155f9db99c6590</td><td>chalearn</td><td>ChaLearn</td><td><a href="papers/8d5998cd984e7cce307da7d46f155f9db99c6590.html" target="_blank">ChaLearn looking at people: A review of events and resources</a></td><td><span class="gray">[pdf]</a></td><td>2017 International Joint Conference on Neural Networks (IJCNN)</td><td></td><td></td><td></td><td></td><td></td><td>69%</td><td>13</td><td>9</td><td>4</td><td>1</td><td>8</td><td>4</td></tr><tr><td>2bf8541199728262f78d4dced6fb91479b39b738</td><td>clothing_co_parsing</td><td>CCP</td><td><a href="papers/2bf8541199728262f78d4dced6fb91479b39b738.html" target="_blank">Clothing Co-parsing by Joint Image Segmentation and Labeling</a></td><td><span class="gray">[pdf]</a></td><td>2014 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>70%</td><td>60</td><td>42</td><td>18</td><td>0</td><td>34</td><td>28</td></tr><tr><td>22ad2c8c0f4d6aa4328b38d894b814ec22579761</td><td>gallagher</td><td>Gallagher</td><td><a href="papers/22ad2c8c0f4d6aa4328b38d894b814ec22579761.html" target="_blank">Clothing cosegmentation for recognizing people</a></td><td><span class="gray">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>Carnegie Mellon University</td><td>United States</td><td>40.44416190</td><td>-79.94272826</td><td>65%</td><td>178</td><td>116</td><td>62</td><td>7</td><td>100</td><td>86</td></tr><tr><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td><td>leeds_sports_pose</td><td>Leeds Sports Pose</td><td><a href="papers/4b1d23d17476fcf78f4cbadf69fb130b1aa627c0.html" target="_blank">Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</a></td><td><a href="http://www.bmva.org/bmvc/2010/conference/paper12/abstract12.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>66%</td><td>285</td><td>188</td><td>97</td><td>11</td><td>197</td><td>93</td></tr><tr><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td><td>stickmen_buffy</td><td>Buffy Stickmen</td><td><a href="papers/4b1d23d17476fcf78f4cbadf69fb130b1aa627c0.html" target="_blank">Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</a></td><td><a href="http://www.bmva.org/bmvc/2010/conference/paper12/abstract12.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>66%</td><td>285</td><td>188</td><td>97</td><td>11</td><td>197</td><td>93</td></tr><tr><td>45c31cde87258414f33412b3b12fc5bec7cb3ba9</td><td>jaffe</td><td>JAFFE</td><td><a href="papers/45c31cde87258414f33412b3b12fc5bec7cb3ba9.html" target="_blank">Coding Facial Expressions with Gabor Wavelets</a></td><td><a href="http://physics.lbl.gov/patrecog/images/Facerecog_gabor.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>899</td><td>508</td><td>391</td><td>51</td><td>431</td><td>451</td></tr><tr><td>b1f4423c227fa37b9680787be38857069247a307</td><td>afew_va</td><td>AFEW-VA</td><td><a href="papers/b1f4423c227fa37b9680787be38857069247a307.html" target="_blank">Collecting Large, Richly Annotated Facial-Expression Databases from Movies</a></td><td><span class="gray">[pdf]</a></td><td>IEEE MultiMedia</td><td>edu</td><td>Australian National University</td><td>Australia</td><td>-35.27769990</td><td>149.11852700</td><td>64%</td><td>181</td><td>115</td><td>66</td><td>8</td><td>87</td><td>97</td></tr><tr><td>7f4040b482d16354d5938c1d1b926b544652bf5b</td><td>nova_emotions</td><td>Novaemötions Dataset</td><td><a href="papers/7f4040b482d16354d5938c1d1b926b544652bf5b.html" target="_blank">Competitive affective gaming: winning with a smile</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Universidade NOVA de Lisboa, Caparica, Portugal</td><td>Portugal</td><td>38.66096400</td><td>-9.20581300</td><td>78%</td><td>9</td><td>7</td><td>2</td><td>0</td><td>5</td><td>4</td></tr><tr><td>079a0a3bf5200994e1f972b1b9197bf2f90e87d4</td><td>mit_cbcl</td><td>MIT CBCL</td><td><a href="papers/079a0a3bf5200994e1f972b1b9197bf2f90e87d4.html" target="_blank">Component-Based Face Recognition with 3D Morphable Models</a></td><td><span class="gray">[pdf]</a></td><td>2004 Conference on Computer Vision and Pattern Recognition Workshop</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>23fc83c8cfff14a16df7ca497661264fc54ed746</td><td>cohn_kanade</td><td>CK</td><td><a href="papers/23fc83c8cfff14a16df7ca497661264fc54ed746.html" target="_blank">Comprehensive Database for Facial Expression Analysis</a></td><td><a href="https://pdfs.semanticscholar.org/23fc/83c8cfff14a16df7ca497661264fc54ed746.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>999</td><td>553</td><td>446</td><td>69</td><td>540</td><td>439</td></tr><tr><td>09d78009687bec46e70efcf39d4612822e61cb8c</td><td>raid</td><td>RAiD</td><td><a href="papers/09d78009687bec46e70efcf39d4612822e61cb8c.html" target="_blank">Consistent Re-identification in a Camera Network</a></td><td><a href="https://pdfs.semanticscholar.org/c27f/099e6e7e3f7f9979cbe9e0a5175fc5848ea0.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>71%</td><td>49</td><td>35</td><td>14</td><td>3</td><td>34</td><td>13</td></tr><tr><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td><td>casablanca</td><td>Casablanca</td><td><a href="papers/0ceda9dae8b9f322df65ca2ef02caca9758aec6f.html" target="_blank">Context-Aware CNNs for Person Head Detection</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>33</td><td>20</td><td>13</td><td>1</td><td>23</td><td>11</td></tr><tr><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td><td>hollywood_headset</td><td>HollywoodHeads</td><td><a href="papers/0ceda9dae8b9f322df65ca2ef02caca9758aec6f.html" target="_blank">Context-Aware CNNs for Person Head Detection</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>33</td><td>20</td><td>13</td><td>1</td><td>23</td><td>11</td></tr><tr><td>c06b13d0ec3f5c43e2782cd22542588e233733c3</td><td>nova_emotions</td><td>Novaemötions Dataset</td><td><a href="papers/c06b13d0ec3f5c43e2782cd22542588e233733c3.html" target="_blank">Crowdsourcing facial expressions for affective-interaction</a></td><td><span class="gray">[pdf]</a></td><td>Computer Vision and Image Understanding</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>1</td><td>1</td><td>0</td><td>0</td><td>1</td><td>0</td></tr><tr><td>8355d095d3534ef511a9af68a3b2893339e3f96b</td><td>imdb_wiki</td><td>IMDB-Wiki</td><td><a href="papers/8355d095d3534ef511a9af68a3b2893339e3f96b.html" target="_blank">DEX: Deep EXpectation of Apparent Age from a Single Image</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision Workshop (ICCVW)</td><td></td><td></td><td></td><td></td><td></td><td>79%</td><td>122</td><td>96</td><td>26</td><td>4</td><td>75</td><td>48</td></tr><tr><td>5a5f0287484f0d480fed1ce585dbf729586f0edc</td><td>disfa</td><td>DISFA</td><td><a href="papers/5a5f0287484f0d480fed1ce585dbf729586f0edc.html" target="_blank">DISFA: A Spontaneous Facial Action Intensity Database</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Affective Computing</td><td>edu</td><td>University of Denver</td><td>United States</td><td>39.67665410</td><td>-104.96220300</td><td>54%</td><td>184</td><td>100</td><td>84</td><td>17</td><td>96</td><td>89</td></tr><tr><td>10195a163ab6348eef37213a46f60a3d87f289c5</td><td>imdb_wiki</td><td>IMDB-Wiki</td><td><a href="papers/10195a163ab6348eef37213a46f60a3d87f289c5.html" target="_blank">Deep Expectation of Real and Apparent Age from a Single Image Without Facial Landmarks</a></td><td><a href="http://www.vision.ee.ethz.ch/en/publications/papers/articles/eth_biwi_01299.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td>edu</td><td>ETH Zurich</td><td>Switzerland</td><td>47.37631300</td><td>8.54766990</td><td>72%</td><td>145</td><td>105</td><td>40</td><td>9</td><td>93</td><td>51</td></tr><tr><td>162ea969d1929ed180cc6de9f0bf116993ff6e06</td><td>vgg_faces</td><td>VGG Face</td><td><a href="papers/162ea969d1929ed180cc6de9f0bf116993ff6e06.html" target="_blank">Deep Face Recognition</a></td><td><a href="https://pdfs.semanticscholar.org/f372/ab9b3270d4e4f6a0258c83c2736c3a5c0454.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>65%</td><td>999</td><td>646</td><td>353</td><td>48</td><td>558</td><td>429</td></tr><tr><td>6424b69f3ff4d35249c0bb7ef912fbc2c86f4ff4</td><td>celeba</td><td>CelebA</td><td><a href="papers/6424b69f3ff4d35249c0bb7ef912fbc2c86f4ff4.html" target="_blank">Deep Learning Face Attributes in the Wild</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td>edu</td><td>Chinese University of Hong Kong</td><td>China</td><td>22.41626320</td><td>114.21093180</td><td>57%</td><td>919</td><td>526</td><td>392</td><td>61</td><td>694</td><td>201</td></tr><tr><td>18010284894ed0edcca74e5bf768ee2e15ef7841</td><td>deep_fashion</td><td>DeepFashion</td><td><a href="papers/18010284894ed0edcca74e5bf768ee2e15ef7841.html" target="_blank">DeepFashion: Powering Robust Clothes Recognition and Retrieval with Rich Annotations</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>176</td><td>112</td><td>64</td><td>2</td><td>113</td><td>62</td></tr><tr><td>6bd36e9fd0ef20a3074e1430a6cc601e6d407fc3</td><td>cuhk_campus_03</td><td>CUHK03 Campus</td><td><a href="papers/6bd36e9fd0ef20a3074e1430a6cc601e6d407fc3.html" target="_blank">DeepReID: Deep Filter Pairing Neural Network for Person Re-identification</a></td><td><span class="gray">[pdf]</a></td><td>2014 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>73%</td><td>568</td><td>412</td><td>156</td><td>19</td><td>320</td><td>235</td></tr><tr><td>13f06b08f371ba8b5d31c3e288b4deb61335b462</td><td>eth_andreas_ess</td><td>ETHZ Pedestrian</td><td><a href="papers/13f06b08f371ba8b5d31c3e288b4deb61335b462.html" target="_blank">Depth and Appearance for Mobile Scene Analysis</a></td><td><span class="gray">[pdf]</a></td><td>2007 IEEE 11th International Conference on Computer Vision</td><td>edu</td><td>ETH Zurich</td><td>Switzerland</td><td>47.37631300</td><td>8.54766990</td><td>63%</td><td>324</td><td>204</td><td>120</td><td>26</td><td>193</td><td>127</td></tr><tr><td>4946ba10a4d5a7d0a38372f23e6622bd347ae273</td><td>coco_action</td><td>COCO-a</td><td><a href="papers/4946ba10a4d5a7d0a38372f23e6622bd347ae273.html" target="_blank">Describing Common Human Visual Actions in Images</a></td><td><a href="https://arxiv.org/pdf/1506.02203.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>68%</td><td>25</td><td>17</td><td>8</td><td>0</td><td>23</td><td>2</td></tr><tr><td>7808937b46acad36e43c30ae4e9f3fd57462853d</td><td>bpad</td><td>BPAD</td><td><a href="papers/7808937b46acad36e43c30ae4e9f3fd57462853d.html" target="_blank">Describing people: A poselet-based approach to attribute classification</a></td><td><span class="gray">[pdf]</a></td><td>2011 International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>230</td><td>140</td><td>90</td><td>14</td><td>163</td><td>66</td></tr><tr><td>d3200d49a19a4a4e4e9745ee39649b65d80c834b</td><td>scut_head</td><td>SCUT HEAD</td><td><a href="papers/d3200d49a19a4a4e4e9745ee39649b65d80c834b.html" target="_blank">Detecting Heads using Feature Refine Net and Cascaded Multi-scale Architecture</a></td><td><a href="https://arxiv.org/pdf/1803.09256.pdf" target="_blank">[pdf]</a></td><td>2018 24th International Conference on Pattern Recognition (ICPR)</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>9cc8cf0c7d7fa7607659921b6ff657e17e135ecc</td><td>mafa</td><td>MAsked FAces</td><td><a href="papers/9cc8cf0c7d7fa7607659921b6ff657e17e135ecc.html" target="_blank">Detecting Masked Faces in the Wild with LLE-CNNs</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>80%</td><td>5</td><td>4</td><td>1</td><td>0</td><td>4</td><td>1</td></tr><tr><td>56ae6d94fc6097ec4ca861f0daa87941d1c10b70</td><td>cmdp</td><td>CMDP</td><td><a href="papers/56ae6d94fc6097ec4ca861f0daa87941d1c10b70.html" target="_blank">Distance Estimation of an Unknown Person from a Portrait</a></td><td><a href="http://authors.library.caltech.edu/49084/13/FaceDistanceEstimation_RONCHI.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>44%</td><td>9</td><td>4</td><td>5</td><td>0</td><td>6</td><td>3</td></tr><tr><td>84fe5b4ac805af63206012d29523a1e033bc827e</td><td>awe_ears</td><td>AWE Ears</td><td><a href="papers/84fe5b4ac805af63206012d29523a1e033bc827e.html" target="_blank">Ear Recognition: More Than a Survey</a></td><td><a href="https://arxiv.org/pdf/1611.06203.pdf" target="_blank">[pdf]</a></td><td>Neurocomputing</td><td></td><td></td><td></td><td></td><td></td><td>77%</td><td>26</td><td>20</td><td>6</td><td>0</td><td>10</td><td>16</td></tr><tr><td>133f01aec1534604d184d56de866a4bd531dac87</td><td>lfw</td><td>LFW</td><td><a href="papers/133f01aec1534604d184d56de866a4bd531dac87.html" target="_blank">Effective Unconstrained Face Recognition by Combining Multiple Descriptors and Learned Background Statistics</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>183</td><td>109</td><td>74</td><td>13</td><td>103</td><td>77</td></tr><tr><td>c900e0ad4c95948baaf0acd8449fde26f9b4952a</td><td>emotio_net</td><td>EmotioNet Database</td><td><a href="papers/c900e0ad4c95948baaf0acd8449fde26f9b4952a.html" target="_blank">EmotioNet: An Accurate, Real-Time Algorithm for the Automatic Annotation of a Million Facial Expressions in the Wild</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>86</td><td>45</td><td>41</td><td>7</td><td>54</td><td>29</td></tr><tr><td>2161f6b7ee3c0acc81603b01dc0df689683577b9</td><td>large_scale_person_search</td><td>Large Scale Person Search</td><td><a href="papers/2161f6b7ee3c0acc81603b01dc0df689683577b9.html" target="_blank">End-to-End Deep Learning for Person Search</a></td><td><a href="https://pdfs.semanticscholar.org/2161/f6b7ee3c0acc81603b01dc0df689683577b9.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>70%</td><td>46</td><td>32</td><td>14</td><td>0</td><td>27</td><td>16</td></tr><tr><td>1bd1645a629f1b612960ab9bba276afd4cf7c666</td><td>brainwash</td><td>Brainwash</td><td><a href="papers/1bd1645a629f1b612960ab9bba276afd4cf7c666.html" target="_blank">End-to-End People Detection in Crowded Scenes</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>Stanford University</td><td>United States</td><td>37.43131385</td><td>-122.16936535</td><td>66%</td><td>67</td><td>44</td><td>22</td><td>2</td><td>42</td><td>23</td></tr><tr><td>6273b3491e94ea4dd1ce42b791d77bdc96ee73a8</td><td>viper</td><td>VIPeR</td><td><a href="papers/6273b3491e94ea4dd1ce42b791d77bdc96ee73a8.html" target="_blank">Evaluating Appearance Models for Recognition, Reacquisition, and Tracking</a></td><td><a href="https://pdfs.semanticscholar.org/7847/b1fbccadb780b655e72c66d3f9e93ddb880c.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>edu</td><td>University of California, Santa Cruz</td><td>United States</td><td>36.99158470</td><td>-122.05827710</td><td>66%</td><td>624</td><td>414</td><td>210</td><td>33</td><td>342</td><td>276</td></tr><tr><td>2258e01865367018ed6f4262c880df85b94959f8</td><td>mot</td><td>MOT</td><td><a href="papers/2258e01865367018ed6f4262c880df85b94959f8.html" target="_blank">Evaluating Multiple Object Tracking Performance: The CLEAR MOT Metrics</a></td><td><a href="https://cvhci.anthropomatik.kit.edu/images/stories/msmmi/papers/eurasip2008.pdf" target="_blank">[pdf]</a></td><td>EURASIP J. Image and Video Processing</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>632</td><td>366</td><td>264</td><td>44</td><td>358</td><td>264</td></tr><tr><td>9e5378e7b336c89735d3bb15cf67eff96f86d39a</td><td>precarious</td><td>Precarious</td><td><a href="papers/9e5378e7b336c89735d3bb15cf67eff96f86d39a.html" target="_blank">Expecting the Unexpected: Training Detectors for Unusual Pedestrians with Adversarial Imposters</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>36%</td><td>14</td><td>5</td><td>9</td><td>0</td><td>12</td><td>1</td></tr><tr><td>35b0331dfcd2897abd5749b49ff5e2b8ba0f7a62</td><td>coco_qa</td><td>COCO QA</td><td><a href="papers/35b0331dfcd2897abd5749b49ff5e2b8ba0f7a62.html" target="_blank">Exploring Models and Data for Image Question Answering</a></td><td><a href="https://arxiv.org/pdf/1505.02074.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>206</td><td>126</td><td>80</td><td>11</td><td>162</td><td>39</td></tr><tr><td>2cd7821fcf5fae53a185624f7eeda007434ae037</td><td>geofaces</td><td>GeoFaces</td><td><a href="papers/2cd7821fcf5fae53a185624f7eeda007434ae037.html" target="_blank">Exploring the geo-dependence of human face appearance</a></td><td><a href="http://cs.uky.edu/~jacobs/papers/islam2014faces.pdf" target="_blank">[pdf]</a></td><td>IEEE Winter Conference on Applications of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>88%</td><td>8</td><td>7</td><td>1</td><td>0</td><td>5</td><td>3</td></tr><tr><td>2cd7821fcf5fae53a185624f7eeda007434ae037</td><td>geofaces</td><td>GeoFaces</td><td><a href="papers/2cd7821fcf5fae53a185624f7eeda007434ae037.html" target="_blank">Exploring the geo-dependence of human face appearance</a></td><td><a href="http://cs.uky.edu/~jacobs/papers/islam2014faces.pdf" target="_blank">[pdf]</a></td><td>IEEE Winter Conference on Applications of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>88%</td><td>8</td><td>7</td><td>1</td><td>0</td><td>5</td><td>3</td></tr><tr><td>75da1df4ed319926c544eefe17ec8d720feef8c0</td><td>fddb</td><td>FDDB</td><td><a href="papers/75da1df4ed319926c544eefe17ec8d720feef8c0.html" target="_blank">FDDB: A benchmark for face detection in unconstrained settings</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>65%</td><td>380</td><td>247</td><td>133</td><td>16</td><td>202</td><td>164</td></tr><tr><td>31de9b3dd6106ce6eec9a35991b2b9083395fd0b</td><td>feret</td><td>FERET</td><td><a href="papers/31de9b3dd6106ce6eec9a35991b2b9083395fd0b.html" target="_blank">FERET ( Face Recognition Technology ) Recognition Algorithm Development and Test Results</a></td><td><a href="https://pdfs.semanticscholar.org/31de/9b3dd6106ce6eec9a35991b2b9083395fd0b.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>75</td><td>39</td><td>36</td><td>5</td><td>54</td><td>20</td></tr><tr><td>0e986f51fe45b00633de9fd0c94d082d2be51406</td><td>afw</td><td>AFW</td><td><a href="papers/0e986f51fe45b00633de9fd0c94d082d2be51406.html" target="_blank">Face detection, pose estimation, and landmark localization in the wild</a></td><td><span class="gray">[pdf]</a></td><td>2012 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>71%</td><td>999</td><td>709</td><td>290</td><td>35</td><td>576</td><td>422</td></tr><tr><td>560e0e58d0059259ddf86fcec1fa7975dee6a868</td><td>youtube_faces</td><td>YouTubeFaces</td><td><a href="papers/560e0e58d0059259ddf86fcec1fa7975dee6a868.html" target="_blank">Face recognition in unconstrained videos with matched background similarity</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011</td><td>edu</td><td>Tel Aviv University</td><td>Israel</td><td>32.11198890</td><td>34.80459702</td><td>66%</td><td>509</td><td>338</td><td>170</td><td>23</td><td>294</td><td>216</td></tr><tr><td>670637d0303a863c1548d5b19f705860a23e285c</td><td>face_tracer</td><td>FaceTracer</td><td><a href="papers/670637d0303a863c1548d5b19f705860a23e285c.html" target="_blank">Face swapping: automatically replacing faces in photographs</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>6204776d31359d129a582057c2d788a14f8aadeb</td><td>youtube_celebrities</td><td>YouTube Celebrities</td><td><a href="papers/6204776d31359d129a582057c2d788a14f8aadeb.html" target="_blank">Face tracking and recognition with visual constraints in real-world videos</a></td><td><span class="gray">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>Rutgers University</td><td>United States</td><td>40.47913175</td><td>-74.43168868</td><td>57%</td><td>267</td><td>151</td><td>115</td><td>11</td><td>125</td><td>121</td></tr><tr><td>4c170a0dcc8de75587dae21ca508dab2f9343974</td><td>face_tracer</td><td>FaceTracer</td><td><a href="papers/4c170a0dcc8de75587dae21ca508dab2f9343974.html" target="_blank">FaceTracer: A Search Engine for Large Collections of Images with Faces</a></td><td><a href="https://pdfs.semanticscholar.org/73a8/1d311eedac8dea3ca24dc15b6990fa4a725e.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>225</td><td>144</td><td>81</td><td>17</td><td>146</td><td>77</td></tr><tr><td>7ebb153704706e457ab57b432793d2b6e5d12592</td><td>vgg_celebs_in_places</td><td>CIP</td><td><a href="papers/7ebb153704706e457ab57b432793d2b6e5d12592.html" target="_blank">Faces in Places: compound query retrieval</a></td><td><a href="https://pdfs.semanticscholar.org/7ebb/153704706e457ab57b432793d2b6e5d12592.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>5</td><td>5</td><td>0</td><td>0</td><td>3</td><td>2</td></tr><tr><td>0ab7cff2ccda7269b73ff6efd9d37e1318f7db25</td><td>ibm_dif</td><td>IBM Diversity in Faces</td><td><a href="papers/0ab7cff2ccda7269b73ff6efd9d37e1318f7db25.html" target="_blank">Facial Coding Scheme Reference 1 Craniofacial Distances</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>8a3c5507237957d013a0fe0f082cab7f757af6ee</td><td>mafl</td><td>MAFL</td><td><a href="papers/8a3c5507237957d013a0fe0f082cab7f757af6ee.html" target="_blank">Facial Landmark Detection by Deep Multi-task Learning</a></td><td><a href="https://pdfs.semanticscholar.org/fcd7/1c18192928a2e0b264edd4d919ab2f8f652a.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>70%</td><td>407</td><td>283</td><td>124</td><td>16</td><td>252</td><td>153</td></tr><tr><td>8a3c5507237957d013a0fe0f082cab7f757af6ee</td><td>mtfl</td><td>MTFL</td><td><a href="papers/8a3c5507237957d013a0fe0f082cab7f757af6ee.html" target="_blank">Facial Landmark Detection by Deep Multi-task Learning</a></td><td><a href="https://pdfs.semanticscholar.org/fcd7/1c18192928a2e0b264edd4d919ab2f8f652a.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>70%</td><td>407</td><td>283</td><td>124</td><td>16</td><td>252</td><td>153</td></tr><tr><td>4fefd1bc8dc4e0ab37ee3324ddfa43ad9d6a04a7</td><td>deep_fashion</td><td>DeepFashion</td><td><a href="papers/4fefd1bc8dc4e0ab37ee3324ddfa43ad9d6a04a7.html" target="_blank">Fashion Landmark Detection in the Wild</a></td><td><a href="https://arxiv.org/pdf/1608.03049.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>73%</td><td>26</td><td>19</td><td>7</td><td>1</td><td>16</td><td>10</td></tr><tr><td>060820f110a72cbf02c14a6d1085bd6e1d994f6a</td><td>caltech_crp</td><td>Caltech CRP</td><td><a href="papers/060820f110a72cbf02c14a6d1085bd6e1d994f6a.html" target="_blank">Fine-grained classification of pedestrians in video: Benchmark and state of the art</a></td><td><a href="https://arxiv.org/pdf/1605.06177.pdf" target="_blank">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>17</td><td>8</td><td>9</td><td>0</td><td>9</td><td>8</td></tr><tr><td>45e616093a92e5f1e61a7c6037d5f637aa8964af</td><td>malf</td><td>MALF</td><td><a href="papers/45e616093a92e5f1e61a7c6037d5f637aa8964af.html" target="_blank">Fine-grained evaluation on face detection in the wild</a></td><td><span class="gray">[pdf]</a></td><td>2015 11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)</td><td></td><td></td><td></td><td></td><td></td><td>71%</td><td>17</td><td>12</td><td>5</td><td>0</td><td>12</td><td>5</td></tr><tr><td>1aad2da473888cb7ebc1bfaa15bfa0f1502ce005</td><td>jpl_pose</td><td>JPL-Interaction dataset</td><td><a href="papers/1aad2da473888cb7ebc1bfaa15bfa0f1502ce005.html" target="_blank">First-Person Activity Recognition: What Are They Doing to Me?</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>67%</td><td>148</td><td>99</td><td>49</td><td>7</td><td>105</td><td>43</td></tr><tr><td>7b92d1e53cc87f7a4256695de590098a2f30261e</td><td>appa_real</td><td>APPA-REAL</td><td><a href="papers/7b92d1e53cc87f7a4256695de590098a2f30261e.html" target="_blank">From Apparent to Real Age: Gender, Age, Ethnic, Makeup, and Expression Bias Analysis in Real Age Estimation</a></td><td><span class="gray">[pdf]</a></td><td>2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>774cbb45968607a027ae4729077734db000a1ec5</td><td>urban_tribes</td><td>Urban Tribes</td><td><a href="papers/774cbb45968607a027ae4729077734db000a1ec5.html" target="_blank">From Bikers to Surfers: Visual Recognition of Urban Tribes</a></td><td><a href="http://vision.cornell.edu/se3/wp-content/uploads/2014/09/utribes_bmvc13_final.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>67%</td><td>18</td><td>12</td><td>6</td><td>1</td><td>12</td><td>6</td></tr><tr><td>22f656d0f8426c84a33a267977f511f127bfd7f3</td><td>expw</td><td>ExpW</td><td><a href="papers/22f656d0f8426c84a33a267977f511f127bfd7f3.html" target="_blank">From Facial Expression Recognition to Interpersonal Relation Prediction</a></td><td><a href="https://arxiv.org/pdf/1609.06426.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>11</td><td>6</td><td>5</td><td>0</td><td>5</td><td>4</td></tr><tr><td>18c72175ddbb7d5956d180b65a96005c100f6014</td><td>yale_faces</td><td>YaleFaces</td><td><a href="papers/18c72175ddbb7d5956d180b65a96005c100f6014.html" target="_blank">From Few to Many: Illumination Cone Models for Face Recognition under Variable Lighting and Pose</a></td><td><a href="http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.147.1487&rep=rep1&type=pdf" target="_blank">[pdf]</a></td><td>IEEE Trans. Pattern Anal. Mach. Intell.</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>999</td><td>561</td><td>438</td><td>66</td><td>498</td><td>462</td></tr><tr><td>06f02199690961ba52997cde1527e714d2b3bf8f</td><td>columbia_gaze</td><td>Columbia Gaze</td><td><a href="papers/06f02199690961ba52997cde1527e714d2b3bf8f.html" target="_blank">Gaze locking: passive eye contact detection for human-object interaction</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Columbia University</td><td>United States</td><td>40.84198360</td><td>-73.94368971</td><td>76%</td><td>79</td><td>60</td><td>19</td><td>0</td><td>49</td><td>34</td></tr><tr><td>18858cc936947fc96b5c06bbe3c6c2faa5614540</td><td>pilot_parliament</td><td>PPB</td><td><a href="papers/18858cc936947fc96b5c06bbe3c6c2faa5614540.html" target="_blank">Gender Shades: Intersectional Accuracy Disparities in Commercial Gender Classification</a></td><td><a href="http://proceedings.mlr.press/v81/buolamwini18a/buolamwini18a-supp.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>53%</td><td>59</td><td>31</td><td>28</td><td>0</td><td>47</td><td>10</td></tr><tr><td>2eb84aaba316b095d4bb51da1a3e4365bbf9ab1d</td><td>kin_face</td><td>UB KinFace</td><td><a href="papers/2eb84aaba316b095d4bb51da1a3e4365bbf9ab1d.html" target="_blank">Genealogical face recognition based on UB KinFace database</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td>edu</td><td>SUNY Buffalo</td><td>United States</td><td>42.93362780</td><td>-78.88394479</td><td>55%</td><td>31</td><td>17</td><td>14</td><td>0</td><td>11</td><td>21</td></tr><tr><td>2eed184680edcdec8a3b605ad1a3ba8e8f7cc2e9</td><td>graz</td><td>Graz Pedestrian</td><td><a href="papers/2eed184680edcdec8a3b605ad1a3ba8e8f7cc2e9.html" target="_blank">Generic object recognition with boosting</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td>edu</td><td>TU Graz</td><td>Austria</td><td>47.07071400</td><td>15.43950400</td><td>53%</td><td>293</td><td>155</td><td>138</td><td>16</td><td>195</td><td>97</td></tr><tr><td>17b46e2dad927836c689d6787ddb3387c6159ece</td><td>geofaces</td><td>GeoFaces</td><td><a href="papers/17b46e2dad927836c689d6787ddb3387c6159ece.html" target="_blank">GeoFaceExplorer: exploring the geo-dependence of facial attributes</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>2</td><td>2</td><td>0</td><td>0</td><td>1</td><td>1</td></tr><tr><td>bd88bb2e4f351352d88ee7375af834360e223498</td><td>hda_plus</td><td>HDA+</td><td><a href="papers/bd88bb2e4f351352d88ee7375af834360e223498.html" target="_blank">HDA dataset-DRAFT 1 A Multi-camera video data set for research on High-Definition surveillance</a></td><td><a href="https://pdfs.semanticscholar.org/bd88/bb2e4f351352d88ee7375af834360e223498.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>2</td><td>0</td><td>2</td><td>0</td><td>1</td><td>2</td></tr><tr><td>a8d0b149c2eadaa02204d3e4356fbc8eccf3b315</td><td>hi4d_adsip</td><td>Hi4D-ADSIP</td><td><a href="papers/a8d0b149c2eadaa02204d3e4356fbc8eccf3b315.html" target="_blank">Hi4D-ADSIP 3-D dynamic facial articulation database</a></td><td><span class="gray">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>15</td><td>9</td><td>6</td><td>1</td><td>4</td><td>11</td></tr><tr><td>2d45cfd838016a6e39f6b766ffe85acd649440c7</td><td>mcgill</td><td>McGill Real World</td><td><a href="papers/2d45cfd838016a6e39f6b766ffe85acd649440c7.html" target="_blank">Hierarchical temporal graphical model for head pose estimation and subsequent attribute classification in real-world videos</a></td><td><span class="gray">[pdf]</a></td><td>Computer Vision and Image Understanding</td><td></td><td></td><td></td><td></td><td></td><td>75%</td><td>8</td><td>6</td><td>2</td><td>0</td><td>5</td><td>3</td></tr><tr><td>3cd40bfa1ff193a96bde0207e5140a399476466c</td><td>tvhi</td><td>TVHI</td><td><a href="papers/3cd40bfa1ff193a96bde0207e5140a399476466c.html" target="_blank">High Five: Recognising human interactions in TV shows</a></td><td><a href="https://pdfs.semanticscholar.org/3cd4/0bfa1ff193a96bde0207e5140a399476466c.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>98</td><td>56</td><td>42</td><td>10</td><td>66</td><td>28</td></tr><tr><td>04c2cda00e5536f4b1508cbd80041e9552880e67</td><td>hipsterwars</td><td>Hipsterwars</td><td><a href="papers/04c2cda00e5536f4b1508cbd80041e9552880e67.html" target="_blank">Hipster Wars: Discovering Elements of Fashion Styles</a></td><td><a href="https://pdfs.semanticscholar.org/04c2/cda00e5536f4b1508cbd80041e9552880e67.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>95</td><td>61</td><td>34</td><td>4</td><td>59</td><td>35</td></tr><tr><td>10d6b12fa07c7c8d6c8c3f42c7f1c061c131d4c5</td><td>inria_person</td><td>INRIA Pedestrian</td><td><a href="papers/10d6b12fa07c7c8d6c8c3f42c7f1c061c131d4c5.html" target="_blank">Histograms of oriented gradients for human detection</a></td><td><span class="gray">[pdf]</a></td><td>2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)</td><td>edu</td><td>INRIA Rhone-Alps, Montbonnot, France</td><td>France</td><td>45.21788600</td><td>5.80736900</td><td>57%</td><td>999</td><td>573</td><td>426</td><td>41</td><td>419</td><td>509</td></tr><tr><td>041d3eedf5e45ce5c5229f0181c5c576ed1fafd6</td><td>ucf_selfie</td><td>UCF Selfie</td><td><a href="papers/041d3eedf5e45ce5c5229f0181c5c576ed1fafd6.html" target="_blank">How to Take a Good Selfie?</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>73%</td><td>11</td><td>8</td><td>3</td><td>0</td><td>7</td><td>5</td></tr><tr><td>44d23df380af207f5ac5b41459c722c87283e1eb</td><td>wider_attribute</td><td>WIDER Attribute</td><td><a href="papers/44d23df380af207f5ac5b41459c722c87283e1eb.html" target="_blank">Human Attribute Recognition by Deep Hierarchical Contexts</a></td><td><a href="https://pdfs.semanticscholar.org/8e28/07f2dd53b03a759e372e07f7191cae65c9fd.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>72%</td><td>18</td><td>13</td><td>5</td><td>0</td><td>14</td><td>4</td></tr><tr><td>44484d2866f222bbb9b6b0870890f9eea1ffb2d0</td><td>cuhk_campus_03</td><td>CUHK03 Campus</td><td><a href="papers/44484d2866f222bbb9b6b0870890f9eea1ffb2d0.html" target="_blank">Human Reidentification with Transferred Metric Learning</a></td><td><a href="http://www.ee.cuhk.edu.hk/~xgwang/papers/liZWaccv12.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>69%</td><td>280</td><td>194</td><td>86</td><td>9</td><td>139</td><td>137</td></tr><tr><td>f41c7bb02fc97d5fb9cadd7a49c3e558a1c58a44</td><td>pa_100k</td><td>PA-100K</td><td><a href="papers/f41c7bb02fc97d5fb9cadd7a49c3e558a1c58a44.html" target="_blank">HydraPlus-Net: Attentive Deep Features for Pedestrian Analysis</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>75%</td><td>55</td><td>41</td><td>14</td><td>0</td><td>36</td><td>17</td></tr><tr><td>57178b36c21fd7f4529ac6748614bb3374714e91</td><td>ijb_c</td><td>IJB-C</td><td><a href="papers/57178b36c21fd7f4529ac6748614bb3374714e91.html" target="_blank">IARPA Janus Benchmark - C: Face Dataset and Protocol</a></td><td><span class="gray">[pdf]</a></td><td>2018 International Conference on Biometrics (ICB)</td><td></td><td></td><td></td><td></td><td></td><td>79%</td><td>14</td><td>11</td><td>3</td><td>0</td><td>12</td><td>1</td></tr><tr><td>0cb2dd5f178e3a297a0c33068961018659d0f443</td><td>ijb_b</td><td>IJB-B</td><td><a href="papers/0cb2dd5f178e3a297a0c33068961018659d0f443.html" target="_blank">IARPA Janus Benchmark-B Face Dataset</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td>edu</td><td>Michigan State University</td><td>United States</td><td>42.71856800</td><td>-84.47791571</td><td>63%</td><td>35</td><td>22</td><td>13</td><td>3</td><td>25</td><td>8</td></tr><tr><td>0297448f3ed948e136bb06ceff10eccb34e5bb77</td><td>ilids_mcts</td><td>i-LIDS Multiple-Camera</td><td><a href="papers/0297448f3ed948e136bb06ceff10eccb34e5bb77.html" target="_blank">Imagery Library for Intelligent Detection Systems (i-LIDS); A Standard for Testing Video Based Detection Systems</a></td><td><span class="gray">[pdf]</a></td><td>Proceedings 40th Annual 2006 International Carnahan Conference on Security Technology</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>35</td><td>20</td><td>15</td><td>2</td><td>21</td><td>14</td></tr><tr><td>7f23a4bb0c777dd72cca7665a5f370ac7980217e</td><td>duke_mtmc</td><td>Duke MTMC</td><td><a href="papers/7f23a4bb0c777dd72cca7665a5f370ac7980217e.html" target="_blank">Improving Person Re-identification by Attribute and Identity Learning</a></td><td><a href="https://arxiv.org/pdf/1703.07220.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>84%</td><td>87</td><td>73</td><td>14</td><td>0</td><td>43</td><td>42</td></tr><tr><td>55c40cbcf49a0225e72d911d762c27bb1c2d14aa</td><td>ifad</td><td>IFAD</td><td><a href="papers/55c40cbcf49a0225e72d911d762c27bb1c2d14aa.html" target="_blank">Indian Face Age Database: A Database for Face Recognition with Age Variation</a></td><td><a href="https://pdfs.semanticscholar.org/025e/4cf3fd3fdeced91e9373b56ee14af7ca432c.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>2</td><td>1</td><td>1</td><td>0</td><td>2</td><td>0</td></tr><tr><td>ca3e88d87e1344d076c964ea89d91a75c417f5ee</td><td>imfdb</td><td>IMFDB</td><td><a href="papers/ca3e88d87e1344d076c964ea89d91a75c417f5ee.html" target="_blank">Indian Movie Face Database: A benchmark for face recognition under wide variations</a></td><td><span class="gray">[pdf]</a></td><td>2013 Fourth National Conference on Computer Vision, Pattern Recognition, Image Processing and Graphics (NCVPRIPG)</td><td>edu</td><td>BVBCET, Hubli, India</td><td>India</td><td>15.36883320</td><td>75.12137960</td><td>65%</td><td>17</td><td>11</td><td>6</td><td>0</td><td>11</td><td>5</td></tr><tr><td>95f12d27c3b4914e0668a268360948bce92f7db3</td><td>helen</td><td>Helen</td><td><a href="papers/95f12d27c3b4914e0668a268360948bce92f7db3.html" target="_blank">Interactive Facial Feature Localization</a></td><td><a href="https://pdfs.semanticscholar.org/95f1/2d27c3b4914e0668a268360948bce92f7db3.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>company</td><td>Adobe</td><td>United States</td><td>37.33077030</td><td>-121.89409510</td><td>85%</td><td>352</td><td>298</td><td>54</td><td>8</td><td>212</td><td>146</td></tr><tr><td>ad01687649d95cd5b56d7399a9603c4b8e2217d7</td><td>mrp_drone</td><td>MRP Drone</td><td><a href="papers/ad01687649d95cd5b56d7399a9603c4b8e2217d7.html" target="_blank">Investigating Open-World Person Re-identification Using a Drone</a></td><td><a href="https://pdfs.semanticscholar.org/ad01/687649d95cd5b56d7399a9603c4b8e2217d7.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>7</td><td>3</td><td>4</td><td>1</td><td>5</td><td>2</td></tr><tr><td>2f43b614607163abf41dfe5d17ef6749a1b61304</td><td>hrt_transgender</td><td>HRT Transgender</td><td><a href="papers/2f43b614607163abf41dfe5d17ef6749a1b61304.html" target="_blank">Investigating the Periocular-Based Face Recognition Across Gender Transformation</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Information Forensics and Security</td><td>edu</td><td>University of North Carolina at Wilmington</td><td>United States</td><td>34.22498270</td><td>-77.86907744</td><td>77%</td><td>13</td><td>10</td><td>3</td><td>0</td><td>6</td><td>8</td></tr><tr><td>066d71fcd997033dce4ca58df924397dfe0b5fd1</td><td>ifdb</td><td>IFDB</td><td><a href="papers/066d71fcd997033dce4ca58df924397dfe0b5fd1.html" target="_blank">Iranian Face Database and Evaluation with a New Detection Algorithm</a></td><td><a href="https://pdfs.semanticscholar.org/066d/71fcd997033dce4ca58df924397dfe0b5fd1.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>b71d1aa90dcbe3638888725314c0d56640c1fef1</td><td>ifdb</td><td>IFDB</td><td><a href="papers/b71d1aa90dcbe3638888725314c0d56640c1fef1.html" target="_blank">Iranian Face Database with age, pose and expression</a></td><td><span class="gray">[pdf]</a></td><td>2007 International Conference on Machine Vision</td><td>edu</td><td>Islamic Azad University</td><td>Iran</td><td>34.84529990</td><td>48.55962120</td><td>48%</td><td>23</td><td>11</td><td>12</td><td>2</td><td>14</td><td>9</td></tr><tr><td>137aa2f891d474fce1e7a1d1e9b3aefe21e22b34</td><td>hrt_transgender</td><td>HRT Transgender</td><td><a href="papers/137aa2f891d474fce1e7a1d1e9b3aefe21e22b34.html" target="_blank">Is the eye region more reliable than the face? A preliminary study of face-based recognition on a transgender dataset</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE Sixth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>7</td><td>4</td><td>3</td><td>1</td><td>3</td><td>5</td></tr><tr><td>0b440695c822a8e35184fb2f60dcdaa8a6de84ae</td><td>kinectface</td><td>KinectFaceDB</td><td><a href="papers/0b440695c822a8e35184fb2f60dcdaa8a6de84ae.html" target="_blank">KinectFaceDB: A Kinect Database for Face Recognition</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Systems, Man, and Cybernetics: Systems</td><td>edu</td><td>University of North Carolina at Chapel Hill</td><td>United States</td><td>35.91139710</td><td>-79.05045290</td><td>61%</td><td>82</td><td>50</td><td>32</td><td>6</td><td>28</td><td>52</td></tr><tr><td>4793f11fbca4a7dba898b9fff68f70d868e2497c</td><td>kin_face</td><td>UB KinFace</td><td><a href="papers/4793f11fbca4a7dba898b9fff68f70d868e2497c.html" target="_blank">Kinship Verification through Transfer Learning</a></td><td><a href="https://pdfs.semanticscholar.org/4793/f11fbca4a7dba898b9fff68f70d868e2497c.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>71</td><td>41</td><td>30</td><td>2</td><td>29</td><td>42</td></tr><tr><td>2d3482dcff69c7417c7b933f22de606a0e8e42d4</td><td>lfw</td><td>LFW</td><td><a href="papers/2d3482dcff69c7417c7b933f22de606a0e8e42d4.html" target="_blank">Labeled Faces in the Wild : Updates and New Reporting Procedures</a></td><td><a href="https://pdfs.semanticscholar.org/2d34/82dcff69c7417c7b933f22de606a0e8e42d4.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>edu</td><td>University of Massachusetts</td><td>United States</td><td>42.38897850</td><td>-72.52869870</td><td>69%</td><td>123</td><td>85</td><td>38</td><td>3</td><td>71</td><td>51</td></tr><tr><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td><td>lfw</td><td>LFW</td><td><a href="papers/370b5757a5379b15e30d619e4d3fb9e8e13f3256.html" target="_blank">Labeled Faces in the Wild: A Database forStudying Face Recognition in Unconstrained Environments</a></td><td><a href="https://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>63%</td><td>999</td><td>632</td><td>367</td><td>59</td><td>598</td><td>382</td></tr><tr><td>7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22</td><td>lfw</td><td>LFW</td><td><a href="papers/7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22.html" target="_blank">Labeled Faces in the Wild: A Survey</a></td><td><a href="https://pdfs.semanticscholar.org/7de6/e81d775e9cd7becbfd1bd685f4e2a5eebb22.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Stevens Institute of Technology</td><td>United States</td><td>40.74225200</td><td>-74.02709490</td><td>64%</td><td>109</td><td>70</td><td>39</td><td>7</td><td>66</td><td>43</td></tr><tr><td>0d2dd4fc016cb6a517d8fb43a7cc3ff62964832e</td><td>lag</td><td>LAG</td><td><a href="papers/0d2dd4fc016cb6a517d8fb43a7cc3ff62964832e.html" target="_blank">Large age-gap face verification by feature injection in deep networks</a></td><td><a href="https://arxiv.org/pdf/1602.06149.pdf" target="_blank">[pdf]</a></td><td>Pattern Recognition Letters</td><td></td><td></td><td></td><td></td><td></td><td>71%</td><td>7</td><td>5</td><td>2</td><td>0</td><td>3</td><td>4</td></tr><tr><td>07fcbae86f7a3ad3ea1cf95178459ee9eaf77cb1</td><td>uccs</td><td>UCCS</td><td><a href="papers/07fcbae86f7a3ad3ea1cf95178459ee9eaf77cb1.html" target="_blank">Large scale unconstrained open set face database</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE Sixth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td>company</td><td>Securics Inc., Colorado Springs, CO</td><td>United States</td><td>38.83388160</td><td>-104.82136340</td><td>83%</td><td>6</td><td>5</td><td>1</td><td>0</td><td>4</td><td>2</td></tr><tr><td>4af89578ac237278be310f7660a408b03f12d603</td><td>geofaces</td><td>GeoFaces</td><td><a href="papers/4af89578ac237278be310f7660a408b03f12d603.html" target="_blank">Large-scale geo-facial image analysis</a></td><td><a href="https://pdfs.semanticscholar.org/3ede/3ed28329bf48fbd06438a69c4f855bef003f.pdf" target="_blank">[pdf]</a></td><td>EURASIP J. Image and Video Processing</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>6</td><td>6</td><td>0</td><td>0</td><td>4</td><td>2</td></tr><tr><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td><td>mafl</td><td>MAFL</td><td><a href="papers/a0fd85b3400c7b3e11122f44dc5870ae2de9009a.html" target="_blank">Learning Deep Representation for Face Alignment with Auxiliary Attributes</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>71%</td><td>108</td><td>77</td><td>31</td><td>7</td><td>66</td><td>44</td></tr><tr><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td><td>mtfl</td><td>MTFL</td><td><a href="papers/a0fd85b3400c7b3e11122f44dc5870ae2de9009a.html" target="_blank">Learning Deep Representation for Face Alignment with Auxiliary Attributes</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>71%</td><td>108</td><td>77</td><td>31</td><td>7</td><td>66</td><td>44</td></tr><tr><td>853bd61bc48a431b9b1c7cab10c603830c488e39</td><td>casia_webface</td><td>CASIA Webface</td><td><a href="papers/853bd61bc48a431b9b1c7cab10c603830c488e39.html" target="_blank">Learning Face Representation from Scratch</a></td><td><a href="https://arxiv.org/pdf/1411.7923.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td>edu</td><td>Chinese Academy of Sciences</td><td>China</td><td>40.00447950</td><td>116.37023800</td><td>71%</td><td>476</td><td>339</td><td>137</td><td>19</td><td>290</td><td>182</td></tr><tr><td>2a171f8d14b6b8735001a11c217af9587d095848</td><td>social_relation</td><td>Social Relation</td><td><a href="papers/2a171f8d14b6b8735001a11c217af9587d095848.html" target="_blank">Learning Social Relation Traits from Face Images</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>23</td><td>14</td><td>9</td><td>4</td><td>16</td><td>7</td></tr><tr><td>4e4746094bf60ee83e40d8597a6191e463b57f76</td><td>leeds_sports_pose_extended</td><td>Leeds Sports Pose Extended</td><td><a href="papers/4e4746094bf60ee83e40d8597a6191e463b57f76.html" target="_blank">Learning effective human pose estimation from inaccurate annotation</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011</td><td>edu</td><td>University of Leeds</td><td>United Kingdom</td><td>53.80387185</td><td>-1.55245712</td><td>71%</td><td>169</td><td>120</td><td>49</td><td>7</td><td>108</td><td>65</td></tr><tr><td>287ddcb3db5562235d83aee318f318b8d5e43fb1</td><td>erce</td><td>ERCe</td><td><a href="papers/287ddcb3db5562235d83aee318f318b8d5e43fb1.html" target="_blank">Learning from Multiple Sources for Video Summarisation</a></td><td><a href="https://arxiv.org/pdf/1501.03069.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>7</td><td>4</td><td>3</td><td>0</td><td>4</td><td>3</td></tr><tr><td>287ddcb3db5562235d83aee318f318b8d5e43fb1</td><td>tisi</td><td>Times Square Intersection</td><td><a href="papers/287ddcb3db5562235d83aee318f318b8d5e43fb1.html" target="_blank">Learning from Multiple Sources for Video Summarisation</a></td><td><a href="https://arxiv.org/pdf/1501.03069.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>7</td><td>4</td><td>3</td><td>0</td><td>4</td><td>3</td></tr><tr><td>5981e6479c3fd4e31644db35d236bfb84ae46514</td><td>mot</td><td>MOT</td><td><a href="papers/5981e6479c3fd4e31644db35d236bfb84ae46514.html" target="_blank">Learning to associate: HybridBoosted multi-target tracker for crowded scene</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>University of Southern California</td><td>United States</td><td>34.02241490</td><td>-118.28634407</td><td>61%</td><td>326</td><td>200</td><td>125</td><td>22</td><td>190</td><td>137</td></tr><tr><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td><td>stickmen_buffy</td><td>Buffy Stickmen</td><td><a href="papers/6dd0597f8513dc100cd0bc1b493768cde45098a9.html" target="_blank">Learning to parse images of articulated bodies</a></td><td><a href="http://machinelearning.wustl.edu/mlpapers/paper_files/NIPS2006_899.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>369</td><td>227</td><td>141</td><td>32</td><td>237</td><td>131</td></tr><tr><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td><a href="papers/6dd0597f8513dc100cd0bc1b493768cde45098a9.html" target="_blank">Learning to parse images of articulated bodies</a></td><td><a href="http://machinelearning.wustl.edu/mlpapers/paper_files/NIPS2006_899.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>369</td><td>227</td><td>141</td><td>32</td><td>237</td><td>131</td></tr><tr><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td><a href="papers/6dd0597f8513dc100cd0bc1b493768cde45098a9.html" target="_blank">Learning to parse images of articulated bodies</a></td><td><a href="http://machinelearning.wustl.edu/mlpapers/paper_files/NIPS2006_899.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>369</td><td>227</td><td>141</td><td>32</td><td>237</td><td>131</td></tr><tr><td>28d4e027c7e90b51b7d8908fce68128d1964668a</td><td>megaface</td><td>MegaFace</td><td><a href="papers/28d4e027c7e90b51b7d8908fce68128d1964668a.html" target="_blank">Level Playing Field for Million Scale Face Recognition</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>University of Washington</td><td>United States</td><td>47.65432380</td><td>-122.30800894</td><td>72%</td><td>39</td><td>28</td><td>11</td><td>2</td><td>29</td><td>9</td></tr><tr><td>46a01565e6afe7c074affb752e7069ee3bf2e4ef</td><td>sdu_vid</td><td>SDU-VID</td><td><a href="papers/46a01565e6afe7c074affb752e7069ee3bf2e4ef.html" target="_blank">Local Descriptors Encoded by Fisher Vectors for Person Re-identification</a></td><td><a href="https://pdfs.semanticscholar.org/a105/f1ef67b4b02da38eadce8ffb4e13aa301a93.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>67%</td><td>197</td><td>132</td><td>65</td><td>15</td><td>108</td><td>88</td></tr><tr><td>140438a77a771a8fb656b39a78ff488066eb6b50</td><td>lfpw</td><td>LFPW</td><td><a href="papers/140438a77a771a8fb656b39a78ff488066eb6b50.html" target="_blank">Localizing Parts of Faces Using a Consensus of Exemplars</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>38b55d95189c5e69cf4ab45098a48fba407609b4</td><td>cuhk_campus_03</td><td>CUHK03 Campus</td><td><a href="papers/38b55d95189c5e69cf4ab45098a48fba407609b4.html" target="_blank">Locally Aligned Feature Transforms across Views</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>258</td><td>164</td><td>94</td><td>15</td><td>136</td><td>117</td></tr><tr><td>8990cdce3f917dad622e43e033db686b354d057c</td><td>tiny_faces</td><td>TinyFace</td><td><a href="papers/8990cdce3f917dad622e43e033db686b354d057c.html" target="_blank">Low-Resolution Face Recognition</a></td><td><a href="https://arxiv.org/pdf/1811.08965.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>c0387e788a52f10bf35d4d50659cfa515d89fbec</td><td>mars</td><td>MARS</td><td><a href="papers/c0387e788a52f10bf35d4d50659cfa515d89fbec.html" target="_blank">MARS: A Video Benchmark for Large-Scale Person Re-Identification</a></td><td><a href="https://pdfs.semanticscholar.org/c038/7e788a52f10bf35d4d50659cfa515d89fbec.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>68%</td><td>168</td><td>115</td><td>53</td><td>4</td><td>97</td><td>69</td></tr><tr><td>9055b155cbabdce3b98e16e5ac9c0edf00f9552f</td><td>morph</td><td>MORPH Commercial</td><td><a href="papers/9055b155cbabdce3b98e16e5ac9c0edf00f9552f.html" target="_blank">MORPH: a longitudinal image database of normal adult age-progression</a></td><td><span class="gray">[pdf]</a></td><td>7th International Conference on Automatic Face and Gesture Recognition (FGR06)</td><td>edu</td><td>North Carolina University</td><td>United States</td><td>34.22398690</td><td>-77.87013250</td><td>59%</td><td>437</td><td>258</td><td>178</td><td>22</td><td>228</td><td>203</td></tr><tr><td>9055b155cbabdce3b98e16e5ac9c0edf00f9552f</td><td>morph_nc</td><td>MORPH Non-Commercial</td><td><a href="papers/9055b155cbabdce3b98e16e5ac9c0edf00f9552f.html" target="_blank">MORPH: a longitudinal image database of normal adult age-progression</a></td><td><span class="gray">[pdf]</a></td><td>7th International Conference on Automatic Face and Gesture Recognition (FGR06)</td><td>edu</td><td>North Carolina University</td><td>United States</td><td>34.22398690</td><td>-77.87013250</td><td>59%</td><td>437</td><td>258</td><td>178</td><td>22</td><td>228</td><td>203</td></tr><tr><td>291265db88023e92bb8c8e6390438e5da148e8f5</td><td>msceleb</td><td>MsCeleb</td><td><a href="papers/291265db88023e92bb8c8e6390438e5da148e8f5.html" target="_blank">MS-Celeb-1M: A Dataset and Benchmark for Large-Scale Face Recognition</a></td><td><a href="https://arxiv.org/pdf/1607.08221.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>company</td><td>Microsoft</td><td>United States</td><td>47.64233180</td><td>-122.13693020</td><td>78%</td><td>180</td><td>141</td><td>39</td><td>8</td><td>120</td><td>59</td></tr><tr><td>3dc3f0b64ef80f573e3a5f96e456e52ee980b877</td><td>georgia_tech_face_database</td><td>Georgia Tech Face</td><td><a href="papers/3dc3f0b64ef80f573e3a5f96e456e52ee980b877.html" target="_blank">Maximum Likelihood Training of the Embedded HMM for Face Detection and Recognition</a></td><td><a href="http://www.researchgate.net/profile/Monson_Hayes/publication/221124512_Maximum_Likelihood_Training_of_the_Embedded_HMM_for_Face_Detection_and_Recognition/links/0deec53509be9d6f55000000.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>67</td><td>36</td><td>31</td><td>4</td><td>29</td><td>28</td></tr><tr><td>e58dd160a76349d46f881bd6ddbc2921f08d1050</td><td>gfw</td><td>Grouping Face in the Wild</td><td><a href="papers/e58dd160a76349d46f881bd6ddbc2921f08d1050.html" target="_blank">Merge or Not? Learning to Group Faces via Imitation Learning</a></td><td><a href="https://arxiv.org/pdf/1707.03986.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>2</td><td>2</td><td>0</td><td>0</td><td>2</td><td>0</td></tr><tr><td>5753b2b5e442eaa3be066daa4a2ca8d8a0bb1725</td><td>fpoq</td><td>50 People One Question</td><td><a href="papers/5753b2b5e442eaa3be066daa4a2ca8d8a0bb1725.html" target="_blank">Merging Pose Estimates Across Space and Time</a></td><td><a href="http://authors.library.caltech.edu/41565/1/tracking_bmvc.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>81%</td><td>16</td><td>13</td><td>3</td><td>0</td><td>13</td><td>4</td></tr><tr><td>5e0f8c355a37a5a89351c02f174e7a5ddcb98683</td><td>coco</td><td>COCO</td><td><a href="papers/5e0f8c355a37a5a89351c02f174e7a5ddcb98683.html" target="_blank">Microsoft COCO: Common Objects in Context</a></td><td><a href="https://arxiv.org/pdf/1405.0312.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>999</td><td>608</td><td>391</td><td>25</td><td>722</td><td>259</td></tr><tr><td>41976ebc8ab76d9a6861487c97cc7fcbe3b6015f</td><td>moments_in_time</td><td>Moments in Time</td><td><a href="papers/41976ebc8ab76d9a6861487c97cc7fcbe3b6015f.html" target="_blank">Moments in Time Dataset: one million videos for event understanding</a></td><td><a href="https://arxiv.org/pdf/1801.03150.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td>edu</td><td>Columbia University</td><td>United States</td><td>40.84198360</td><td>-73.94368971</td><td>76%</td><td>29</td><td>22</td><td>7</td><td>2</td><td>27</td><td>2</td></tr><tr><td>436f798d1a4e54e5947c1e7d7375c31b2bdb4064</td><td>tud_multiview</td><td>TUD-Multiview</td><td><a href="papers/436f798d1a4e54e5947c1e7d7375c31b2bdb4064.html" target="_blank">Monocular 3D pose estimation and tracking by detection</a></td><td><span class="gray">[pdf]</a></td><td>2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>TU Darmstadt</td><td>Germany</td><td>49.87482770</td><td>8.65632810</td><td>60%</td><td>311</td><td>186</td><td>125</td><td>33</td><td>208</td><td>105</td></tr><tr><td>436f798d1a4e54e5947c1e7d7375c31b2bdb4064</td><td>tud_stadtmitte</td><td>TUD-Stadtmitte</td><td><a href="papers/436f798d1a4e54e5947c1e7d7375c31b2bdb4064.html" target="_blank">Monocular 3D pose estimation and tracking by detection</a></td><td><span class="gray">[pdf]</a></td><td>2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>TU Darmstadt</td><td>Germany</td><td>49.87482770</td><td>8.65632810</td><td>60%</td><td>311</td><td>186</td><td>125</td><td>33</td><td>208</td><td>105</td></tr><tr><td>3b5b6d19d4733ab606c39c69a889f9e67967f151</td><td>qmul_grid</td><td>GRID</td><td><a href="papers/3b5b6d19d4733ab606c39c69a889f9e67967f151.html" target="_blank">Multi-camera activity correlation analysis</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>Queen Mary University of London</td><td>United Kingdom</td><td>51.52472720</td><td>-0.03931035</td><td>69%</td><td>142</td><td>98</td><td>44</td><td>7</td><td>77</td><td>64</td></tr><tr><td>6ad5a38df8dd4cdddd74f31996ce096d41219f72</td><td>tud_brussels</td><td>TUD-Brussels</td><td><a href="papers/6ad5a38df8dd4cdddd74f31996ce096d41219f72.html" target="_blank">Multi-cue onboard pedestrian detection</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>6ad5a38df8dd4cdddd74f31996ce096d41219f72</td><td>tud_motionpairs</td><td>TUD-Motionparis</td><td><a href="papers/6ad5a38df8dd4cdddd74f31996ce096d41219f72.html" target="_blank">Multi-cue onboard pedestrian detection</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>32c801cb7fbeb742edfd94cccfca4934baec71da</td><td>ucf_crowd</td><td>UCF-CC-50</td><td><a href="papers/32c801cb7fbeb742edfd94cccfca4934baec71da.html" target="_blank">Multi-source Multi-scale Counting in Extremely Dense Crowd Images</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>76%</td><td>148</td><td>113</td><td>35</td><td>3</td><td>80</td><td>65</td></tr><tr><td>1e3df3ca8feab0b36fd293fe689f93bb2aaac591</td><td>immediacy</td><td>Immediacy</td><td><a href="papers/1e3df3ca8feab0b36fd293fe689f93bb2aaac591.html" target="_blank">Multi-task Recurrent Neural Network for Immediacy Prediction</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>26</td><td>16</td><td>10</td><td>2</td><td>21</td><td>6</td></tr><tr><td>2b926b3586399d028b46315d7d9fb9d879e4f79c</td><td>frav3d</td><td>FRAV3D</td><td><a href="papers/2b926b3586399d028b46315d7d9fb9d879e4f79c.html" target="_blank">Multimodal 2D, 2.5D & 3D Face Verification</a></td><td><span class="gray">[pdf]</a></td><td>2006 International Conference on Image Processing</td><td>edu</td><td>Universidad Rey Juan Carlos, Spain</td><td>Spain</td><td>40.33586610</td><td>-3.87694320</td><td>57%</td><td>14</td><td>8</td><td>6</td><td>0</td><td>2</td><td>12</td></tr><tr><td>53ae38a6bb2b21b42bac4f0c4c8ed1f9fa02f9d4</td><td>bp4d_plus</td><td>BP4D+</td><td><a href="papers/53ae38a6bb2b21b42bac4f0c4c8ed1f9fa02f9d4.html" target="_blank">Multimodal Spontaneous Emotion Corpus for Human Behavior Analysis</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>42</td><td>23</td><td>19</td><td>0</td><td>17</td><td>26</td></tr><tr><td>2fda164863a06a92d3a910b96eef927269aeb730</td><td>names_and_faces</td><td>News Dataset</td><td><a href="papers/2fda164863a06a92d3a910b96eef927269aeb730.html" target="_blank">Names and faces in the news</a></td><td><span class="gray">[pdf]</a></td><td>Proceedings of the 2004 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2004. CVPR 2004.</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>4156b7e88f2e0ab0a7c095b9bab199ae2b23bd06</td><td>distance_nighttime</td><td>Long Distance Heterogeneous Face</td><td><a href="papers/4156b7e88f2e0ab0a7c095b9bab199ae2b23bd06.html" target="_blank">Nighttime Face Recognition at Long Distance: Cross-Distance and Cross-Spectral Matching</a></td><td><a href="https://pdfs.semanticscholar.org/4156/b7e88f2e0ab0a7c095b9bab199ae2b23bd06.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>22</td><td>11</td><td>11</td><td>3</td><td>11</td><td>10</td></tr><tr><td>3394168ff0719b03ff65bcea35336a76b21fe5e4</td><td>penn_fudan</td><td>Penn Fudan</td><td><a href="papers/3394168ff0719b03ff65bcea35336a76b21fe5e4.html" target="_blank">Object Detection Combining Recognition and Segmentation</a></td><td><a href="https://pdfs.semanticscholar.org/3394/168ff0719b03ff65bcea35336a76b21fe5e4.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>105</td><td>64</td><td>41</td><td>9</td><td>58</td><td>43</td></tr><tr><td>12ad3b5bbbf407f8e54ea692c07633d1a867c566</td><td>graz</td><td>Graz Pedestrian</td><td><a href="papers/12ad3b5bbbf407f8e54ea692c07633d1a867c566.html" target="_blank">Object recognition using segmentation for feature detection</a></td><td><span class="gray">[pdf]</a></td><td>Proceedings of the 17th International Conference on Pattern Recognition, 2004. ICPR 2004.</td><td>edu</td><td>Inst. of Comput. Sci., Univ. of Leoben, Austria</td><td>Austria</td><td>47.38473720</td><td>15.09302010</td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>4f93cd09785c6e77bf4bc5a788e079df524c8d21</td><td>soton</td><td>SOTON HiD</td><td><a href="papers/4f93cd09785c6e77bf4bc5a788e079df524c8d21.html" target="_blank">On a Large Sequence-Based Human Gait Database</a></td><td><a href="https://pdfs.semanticscholar.org/4f93/cd09785c6e77bf4bc5a788e079df524c8d21.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>63%</td><td>150</td><td>95</td><td>55</td><td>17</td><td>103</td><td>51</td></tr><tr><td>6618cff7f2ed440a0d2fa9e74ad5469df5cdbe4c</td><td>afad</td><td>AFAD</td><td><a href="papers/6618cff7f2ed440a0d2fa9e74ad5469df5cdbe4c.html" target="_blank">Ordinal Regression with Multiple Output CNN for Age Estimation</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>78</td><td>43</td><td>35</td><td>8</td><td>44</td><td>31</td></tr><tr><td>a7fe834a0af614ce6b50dc093132b031dd9a856b</td><td>market_1501</td><td>Market 1501</td><td><a href="papers/a7fe834a0af614ce6b50dc093132b031dd9a856b.html" target="_blank">Orientation Driven Bag of Appearances for Person Re-identification</a></td><td><a href="https://arxiv.org/pdf/1605.02464.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>7</td><td>3</td><td>4</td><td>0</td><td>4</td><td>4</td></tr><tr><td>a7fe834a0af614ce6b50dc093132b031dd9a856b</td><td>pku_reid</td><td>PKU-Reid</td><td><a href="papers/a7fe834a0af614ce6b50dc093132b031dd9a856b.html" target="_blank">Orientation Driven Bag of Appearances for Person Re-identification</a></td><td><a href="https://arxiv.org/pdf/1605.02464.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>7</td><td>3</td><td>4</td><td>0</td><td>4</td><td>4</td></tr><tr><td>18ae7c9a4bbc832b8b14bc4122070d7939f5e00e</td><td>frgc</td><td>FRGC</td><td><a href="papers/18ae7c9a4bbc832b8b14bc4122070d7939f5e00e.html" target="_blank">Overview of the face recognition grand challenge</a></td><td><span class="gray">[pdf]</a></td><td>2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)</td><td>edu</td><td>NIST</td><td>United States</td><td>39.14004000</td><td>-77.21850600</td><td>57%</td><td>999</td><td>568</td><td>430</td><td>85</td><td>549</td><td>442</td></tr><tr><td>22909dd19a0ec3b6065334cb5be5392cb24d839d</td><td>pets</td><td>PETS 2017</td><td><a href="papers/22909dd19a0ec3b6065334cb5be5392cb24d839d.html" target="_blank">PETS 2017: Dataset and Challenge</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td></td><td></td><td></td><td></td><td></td><td>44%</td><td>9</td><td>4</td><td>5</td><td>0</td><td>1</td><td>8</td></tr><tr><td>56ffa7d906b08d02d6d5a12c7377a57e24ef3391</td><td>unbc_shoulder_pain</td><td>UNBC-McMaster Pain</td><td><a href="papers/56ffa7d906b08d02d6d5a12c7377a57e24ef3391.html" target="_blank">Painful data: The UNBC-McMaster shoulder pain expression archive database</a></td><td><span class="gray">[pdf]</a></td><td>Face and Gesture 2011</td><td>edu</td><td>Carnegie Mellon University</td><td>United States</td><td>40.44416190</td><td>-79.94272826</td><td>54%</td><td>189</td><td>103</td><td>86</td><td>21</td><td>108</td><td>78</td></tr><tr><td>55206f0b5f57ce17358999145506cd01e570358c</td><td>orl</td><td>ORL</td><td><a href="papers/55206f0b5f57ce17358999145506cd01e570358c.html" target="_blank">Parameterisation of a stochastic model for human face identification</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>999</td><td>501</td><td>498</td><td>94</td><td>543</td><td>427</td></tr><tr><td>0486214fb58ee9a04edfe7d6a74c6d0f661a7668</td><td>chokepoint</td><td>ChokePoint</td><td><a href="papers/0486214fb58ee9a04edfe7d6a74c6d0f661a7668.html" target="_blank">Patch-based probabilistic image quality assessment for face selection and improved video-based face recognition</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>138</td><td>83</td><td>55</td><td>6</td><td>76</td><td>63</td></tr><tr><td>488e475eeb3bb39a145f23ede197cd3620f1d98a</td><td>apis</td><td>APiS1.0</td><td><a href="papers/488e475eeb3bb39a145f23ede197cd3620f1d98a.html" target="_blank">Pedestrian Attribute Classification in Surveillance: Database and Evaluation</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision Workshops</td><td></td><td></td><td></td><td></td><td></td><td>71%</td><td>28</td><td>20</td><td>8</td><td>0</td><td>13</td><td>15</td></tr><tr><td>488e475eeb3bb39a145f23ede197cd3620f1d98a</td><td>svs</td><td>SVS</td><td><a href="papers/488e475eeb3bb39a145f23ede197cd3620f1d98a.html" target="_blank">Pedestrian Attribute Classification in Surveillance: Database and Evaluation</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision Workshops</td><td></td><td></td><td></td><td></td><td></td><td>71%</td><td>28</td><td>20</td><td>8</td><td>0</td><td>13</td><td>15</td></tr><tr><td>2a4bbee0b4cf52d5aadbbc662164f7efba89566c</td><td>peta</td><td>PETA</td><td><a href="papers/2a4bbee0b4cf52d5aadbbc662164f7efba89566c.html" target="_blank">Pedestrian Attribute Recognition At Far Distance</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>75%</td><td>88</td><td>66</td><td>22</td><td>1</td><td>50</td><td>36</td></tr><tr><td>f72f6a45ee240cc99296a287ff725aaa7e7ebb35</td><td>caltech_pedestrians</td><td>Caltech Pedestrians</td><td><a href="papers/f72f6a45ee240cc99296a287ff725aaa7e7ebb35.html" target="_blank">Pedestrian Detection: An Evaluation of the State of the Art</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td>edu</td><td>California Institute of Technology</td><td>United States</td><td>34.13710185</td><td>-118.12527487</td><td>60%</td><td>999</td><td>600</td><td>399</td><td>69</td><td>527</td><td>466</td></tr><tr><td>1dc35905a1deff8bc74688f2d7e2f48fd2273275</td><td>caltech_pedestrians</td><td>Caltech Pedestrians</td><td><a href="papers/1dc35905a1deff8bc74688f2d7e2f48fd2273275.html" target="_blank">Pedestrian detection: A benchmark</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td><td>tud_campus</td><td>TUD-Campus</td><td><a href="papers/3316521a5527c7700af8ae6aef32a79a8b83672c.html" target="_blank">People-tracking-by-detection and people-detection-by-tracking</a></td><td><span class="gray">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>59%</td><td>545</td><td>324</td><td>220</td><td>37</td><td>330</td><td>218</td></tr><tr><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td><td>tud_crossing</td><td>TUD-Crossing</td><td><a href="papers/3316521a5527c7700af8ae6aef32a79a8b83672c.html" target="_blank">People-tracking-by-detection and people-detection-by-tracking</a></td><td><span class="gray">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>59%</td><td>545</td><td>324</td><td>220</td><td>37</td><td>330</td><td>218</td></tr><tr><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td><td>tud_pedestrian</td><td>TUD-Pedestrian</td><td><a href="papers/3316521a5527c7700af8ae6aef32a79a8b83672c.html" target="_blank">People-tracking-by-detection and people-detection-by-tracking</a></td><td><span class="gray">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>59%</td><td>545</td><td>324</td><td>220</td><td>37</td><td>330</td><td>218</td></tr><tr><td>27a2fad58dd8727e280f97036e0d2bc55ef5424c</td><td>duke_mtmc</td><td>Duke MTMC</td><td><a href="papers/27a2fad58dd8727e280f97036e0d2bc55ef5424c.html" target="_blank">Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</a></td><td><a href="https://arxiv.org/pdf/1609.01775.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Duke University</td><td>United States</td><td>35.99905220</td><td>-78.92906290</td><td>85%</td><td>169</td><td>144</td><td>25</td><td>3</td><td>113</td><td>54</td></tr><tr><td>27a2fad58dd8727e280f97036e0d2bc55ef5424c</td><td>mot</td><td>MOT</td><td><a href="papers/27a2fad58dd8727e280f97036e0d2bc55ef5424c.html" target="_blank">Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking</a></td><td><a href="https://arxiv.org/pdf/1609.01775.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Duke University</td><td>United States</td><td>35.99905220</td><td>-78.92906290</td><td>85%</td><td>169</td><td>144</td><td>25</td><td>3</td><td>113</td><td>54</td></tr><tr><td>16c7c31a7553d99f1837fc6e88e77b5ccbb346b8</td><td>prid</td><td>PRID</td><td><a href="papers/16c7c31a7553d99f1837fc6e88e77b5ccbb346b8.html" target="_blank">Person Re-identification by Descriptive and Discriminative Classification</a></td><td><a href="https://pdfs.semanticscholar.org/4c1b/f0592be3e535faf256c95e27982db9b3d3d3.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>68%</td><td>386</td><td>263</td><td>123</td><td>23</td><td>204</td><td>180</td></tr><tr><td>98bb029afe2a1239c3fdab517323066f0957b81b</td><td>ilids_mcts_vid</td><td>iLIDS-VID</td><td><a href="papers/98bb029afe2a1239c3fdab517323066f0957b81b.html" target="_blank">Person Re-identification by Video Ranking</a></td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>68%</td><td>209</td><td>143</td><td>66</td><td>8</td><td>111</td><td>97</td></tr><tr><td>98bb029afe2a1239c3fdab517323066f0957b81b</td><td>sdu_vid</td><td>SDU-VID</td><td><a href="papers/98bb029afe2a1239c3fdab517323066f0957b81b.html" target="_blank">Person Re-identification by Video Ranking</a></td><td><a href="https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>68%</td><td>209</td><td>143</td><td>66</td><td>8</td><td>111</td><td>97</td></tr><tr><td>0b84f07af44f964817675ad961def8a51406dd2e</td><td>prw</td><td>PRW</td><td><a href="papers/0b84f07af44f964817675ad961def8a51406dd2e.html" target="_blank">Person Re-identification in the Wild</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>68%</td><td>77</td><td>52</td><td>25</td><td>1</td><td>47</td><td>27</td></tr><tr><td>a0cc5f73a37723a6dd465924143f1cb4976d0169</td><td>msmt_17</td><td>MSMT17</td><td><a href="papers/a0cc5f73a37723a6dd465924143f1cb4976d0169.html" target="_blank">Person Transfer GAN to Bridge Domain Gap for Person Re-identification</a></td><td><span class="gray">[pdf]</a></td><td>2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>92%</td><td>24</td><td>22</td><td>2</td><td>1</td><td>20</td><td>4</td></tr><tr><td>1c2802c2199b6d15ecefe7ba0c39bfe44363de38</td><td>youtube_poses</td><td>YouTube Pose</td><td><a href="papers/1c2802c2199b6d15ecefe7ba0c39bfe44363de38.html" target="_blank">Personalizing Human Video Pose Estimation</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>Oxford University</td><td>United Kingdom</td><td>51.75208490</td><td>-1.25166460</td><td>64%</td><td>36</td><td>23</td><td>13</td><td>2</td><td>30</td><td>8</td></tr><tr><td>2830fb5282de23d7784b4b4bc37065d27839a412</td><td>h3d</td><td>H3D</td><td><a href="papers/2830fb5282de23d7784b4b4bc37065d27839a412.html" target="_blank">Poselets: Body part detectors trained using 3D human pose annotations</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE 12th International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>716</td><td>415</td><td>301</td><td>59</td><td>492</td><td>222</td></tr><tr><td>3765df816dc5a061bc261e190acc8bdd9d47bec0</td><td>rafd</td><td>RaFD</td><td><a href="papers/3765df816dc5a061bc261e190acc8bdd9d47bec0.html" target="_blank">Presentation and validation of the Radboud Faces Database</a></td><td><a href="https://pdfs.semanticscholar.org/3765/df816dc5a061bc261e190acc8bdd9d47bec0.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>48%</td><td>487</td><td>234</td><td>253</td><td>39</td><td>342</td><td>144</td></tr><tr><td>636b8ffc09b1b23ff714ac8350bb35635e49fa3c</td><td>caltech_10k_web_faces</td><td>Caltech 10K Web Faces</td><td><a href="papers/636b8ffc09b1b23ff714ac8350bb35635e49fa3c.html" target="_blank">Pruning training sets for learning of object categories</a></td><td><span class="gray">[pdf]</a></td><td>2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)</td><td></td><td></td><td></td><td></td><td></td><td>70%</td><td>63</td><td>44</td><td>19</td><td>4</td><td>42</td><td>20</td></tr><tr><td>3531332efe19be21e7401ba1f04570a142617236</td><td>ufdd</td><td>UFDD</td><td><a href="papers/3531332efe19be21e7401ba1f04570a142617236.html" target="_blank">Pushing the Limits of Unconstrained Face Detection: a Challenge Dataset and Baseline Results</a></td><td><a href="https://arxiv.org/pdf/1804.10275.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>75%</td><td>4</td><td>3</td><td>1</td><td>0</td><td>4</td><td>0</td></tr><tr><td>140c95e53c619eac594d70f6369f518adfea12ef</td><td>ijb_a</td><td>IJB-A</td><td><a href="papers/140c95e53c619eac594d70f6369f518adfea12ef.html" target="_blank">Pushing the frontiers of unconstrained face detection and recognition: IARPA Janus Benchmark A</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>66%</td><td>237</td><td>156</td><td>81</td><td>14</td><td>159</td><td>76</td></tr><tr><td>c72a2ea819df9b0e8cd267eebcc6528b8741e03d</td><td>megaage</td><td>MegaAge</td><td><a href="papers/c72a2ea819df9b0e8cd267eebcc6528b8741e03d.html" target="_blank">Quantifying Facial Age by Posterior of Age Comparisons</a></td><td><a href="https://arxiv.org/pdf/1708.09687.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>4</td><td>4</td><td>0</td><td>0</td><td>4</td><td>0</td></tr><tr><td>922e0a51a3b8c67c4c6ac09a577ff674cbd28b34</td><td>v47</td><td>V47</td><td><a href="papers/922e0a51a3b8c67c4c6ac09a577ff674cbd28b34.html" target="_blank">Re-identification of pedestrians with variable occlusion and scale</a></td><td><span class="gray">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td>edu</td><td>Kingston University</td><td>United Kingdom</td><td>51.42930860</td><td>-0.26840440</td><td>56%</td><td>9</td><td>5</td><td>4</td><td>1</td><td>5</td><td>4</td></tr><tr><td>6f3c76b7c0bd8e1d122c6ea808a271fd4749c951</td><td>ward</td><td>WARD</td><td><a href="papers/6f3c76b7c0bd8e1d122c6ea808a271fd4749c951.html" target="_blank">Re-identify people in wide area camera network</a></td><td><span class="gray">[pdf]</a></td><td>2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops</td><td>edu</td><td>University of Udine</td><td>Italy</td><td>46.08107230</td><td>13.21194740</td><td>60%</td><td>60</td><td>36</td><td>24</td><td>1</td><td>38</td><td>21</td></tr><tr><td>54983972aafc8e149259d913524581357b0f91c3</td><td>reseed</td><td>ReSEED</td><td><a href="papers/54983972aafc8e149259d913524581357b0f91c3.html" target="_blank">ReSEED: social event dEtection dataset</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>67%</td><td>6</td><td>4</td><td>2</td><td>1</td><td>1</td><td>5</td></tr><tr><td>65355cbb581a219bd7461d48b3afd115263ea760</td><td>complex_activities</td><td>Ongoing Complex Activities</td><td><a href="papers/65355cbb581a219bd7461d48b3afd115263ea760.html" target="_blank">Recognition of ongoing complex activities by sequence prediction over a hierarchical label space</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Winter Conference on Applications of Computer Vision (WACV)</td><td></td><td></td><td></td><td></td><td></td><td>33%</td><td>3</td><td>1</td><td>2</td><td>0</td><td>3</td><td>0</td></tr><tr><td>e8de844fefd54541b71c9823416daa238be65546</td><td>visual_phrases</td><td>Phrasal Recognition</td><td><a href="papers/e8de844fefd54541b71c9823416daa238be65546.html" target="_blank">Recognition using visual phrases</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011</td><td>edu</td><td>University of Illinois, Urbana-Champaign</td><td>United States</td><td>40.11116745</td><td>-88.22587665</td><td>58%</td><td>246</td><td>143</td><td>103</td><td>17</td><td>170</td><td>68</td></tr><tr><td>356b431d4f7a2a0a38cf971c84568207dcdbf189</td><td>wider</td><td>WIDER</td><td><a href="papers/356b431d4f7a2a0a38cf971c84568207dcdbf189.html" target="_blank">Recognize complex events from static images by fusing deep channels</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>70%</td><td>44</td><td>31</td><td>13</td><td>1</td><td>29</td><td>15</td></tr><tr><td>25474c21613607f6bb7687a281d5f9d4ffa1f9f3</td><td>faceplace</td><td>Face Place</td><td><a href="papers/25474c21613607f6bb7687a281d5f9d4ffa1f9f3.html" target="_blank">Recognizing disguised faces</a></td><td><a href="https://pdfs.semanticscholar.org/d936/7ceb0be378c3a9ddf7cb741c678c1a3c574c.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>34%</td><td>29</td><td>10</td><td>19</td><td>0</td><td>18</td><td>10</td></tr><tr><td>4053e3423fb70ad9140ca89351df49675197196a</td><td>bio_id</td><td>BioID Face</td><td><a href="papers/4053e3423fb70ad9140ca89351df49675197196a.html" target="_blank">Robust Face Detection Using the Hausdorff Distance</a></td><td><a href="https://pdfs.semanticscholar.org/4053/e3423fb70ad9140ca89351df49675197196a.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>57%</td><td>511</td><td>289</td><td>222</td><td>49</td><td>329</td><td>182</td></tr><tr><td>2724ba85ec4a66de18da33925e537f3902f21249</td><td>cofw</td><td>COFW</td><td><a href="papers/2724ba85ec4a66de18da33925e537f3902f21249.html" target="_blank">Robust Face Landmark Estimation under Occlusion</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision</td><td>edu</td><td>California Institute of Technology</td><td>United States</td><td>34.13710185</td><td>-118.12527487</td><td>72%</td><td>325</td><td>233</td><td>92</td><td>12</td><td>194</td><td>133</td></tr><tr><td>c570d1247e337f91e555c3be0e8c8a5aba539d9f</td><td>mcgill</td><td>McGill Real World</td><td><a href="papers/c570d1247e337f91e555c3be0e8c8a5aba539d9f.html" target="_blank">Robust semi-automatic head pose labeling for real-world face video sequences</a></td><td><span class="gray">[pdf]</a></td><td>Multimedia Tools and Applications</td><td>edu</td><td>McGill University</td><td>Canada</td><td>45.50397610</td><td>-73.57496870</td><td>44%</td><td>18</td><td>8</td><td>10</td><td>0</td><td>13</td><td>7</td></tr><tr><td>e27ef52c641c2b5100a1b34fd0b819e84a31b4df</td><td>sarc3d</td><td>Sarc3D</td><td><a href="papers/e27ef52c641c2b5100a1b34fd0b819e84a31b4df.html" target="_blank">SARC3D: A New 3D Body Model for People Tracking and Re-identification</a></td><td><a href="https://pdfs.semanticscholar.org/e27e/f52c641c2b5100a1b34fd0b819e84a31b4df.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>74%</td><td>34</td><td>25</td><td>9</td><td>2</td><td>21</td><td>12</td></tr><tr><td>bd26dabab576adb6af30484183c9c9c8379bf2e0</td><td>scut_fbp</td><td>SCUT-FBP</td><td><a href="papers/bd26dabab576adb6af30484183c9c9c8379bf2e0.html" target="_blank">SCUT-FBP: A Benchmark Dataset for Facial Beauty Perception</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Systems, Man, and Cybernetics</td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>19</td><td>9</td><td>10</td><td>2</td><td>6</td><td>13</td></tr><tr><td>29a705a5fa76641e0d8963f1fdd67ee4c0d92d3d</td><td>scface</td><td>SCface</td><td><a href="papers/29a705a5fa76641e0d8963f1fdd67ee4c0d92d3d.html" target="_blank">SCface – surveillance cameras face database</a></td><td><a href="http://scface.org/SCface%20-%20Surveillance%20Cameras%20Face%20Database.pdf" target="_blank">[pdf]</a></td><td>Multimedia Tools and Applications</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>179</td><td>101</td><td>78</td><td>15</td><td>88</td><td>89</td></tr><tr><td>d3f5a1848b0028d8ab51d0b0673732cad2e3c8c9</td><td>stair_actions</td><td>STAIR Action</td><td><a href="papers/d3f5a1848b0028d8ab51d0b0673732cad2e3c8c9.html" target="_blank">STAIR Actions: A Video Dataset of Everyday Home Actions</a></td><td><a href="https://arxiv.org/pdf/1804.04326.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>1</td><td>1</td><td>0</td><td>0</td><td>1</td><td>0</td></tr><tr><td>833fa04463d90aab4a9fe2870d480f0b40df446e</td><td>sun_attributes</td><td>SUN</td><td><a href="papers/833fa04463d90aab4a9fe2870d480f0b40df446e.html" target="_blank">SUN attribute database: Discovering, annotating, and recognizing scene attributes</a></td><td><span class="gray">[pdf]</a></td><td>2012 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>Brown University</td><td>United States</td><td>41.82686820</td><td>-71.40123146</td><td>60%</td><td>264</td><td>159</td><td>105</td><td>27</td><td>206</td><td>56</td></tr><tr><td>4308bd8c28e37e2ed9a3fcfe74d5436cce34b410</td><td>market_1501</td><td>Market 1501</td><td><a href="papers/4308bd8c28e37e2ed9a3fcfe74d5436cce34b410.html" target="_blank">Scalable Person Re-identification: A Benchmark</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td>company</td><td>Microsoft</td><td>United States</td><td>47.64233180</td><td>-122.13693020</td><td>77%</td><td>460</td><td>355</td><td>105</td><td>9</td><td>263</td><td>185</td></tr><tr><td>9c23859ec7313f2e756a3e85575735e0c52249f4</td><td>facebook_100</td><td>Facebook100</td><td><a href="papers/9c23859ec7313f2e756a3e85575735e0c52249f4.html" target="_blank">Scaling up biologically-inspired computer vision: A case study in unconstrained face recognition on facebook</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td>edu</td><td>Harvard University</td><td>United States</td><td>42.36782045</td><td>-71.12666653</td><td>62%</td><td>52</td><td>32</td><td>20</td><td>3</td><td>38</td><td>13</td></tr><tr><td>9c23859ec7313f2e756a3e85575735e0c52249f4</td><td>pubfig_83</td><td>pubfig83</td><td><a href="papers/9c23859ec7313f2e756a3e85575735e0c52249f4.html" target="_blank">Scaling up biologically-inspired computer vision: A case study in unconstrained face recognition on facebook</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td>edu</td><td>Harvard University</td><td>United States</td><td>42.36782045</td><td>-71.12666653</td><td>62%</td><td>52</td><td>32</td><td>20</td><td>3</td><td>38</td><td>13</td></tr><tr><td>51eba481dac6b229a7490f650dff7b17ce05df73</td><td>imsitu</td><td>imSitu</td><td><a href="papers/51eba481dac6b229a7490f650dff7b17ce05df73.html" target="_blank">Situation Recognition: Visual Semantic Role Labeling for Image Understanding</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>65%</td><td>52</td><td>34</td><td>18</td><td>1</td><td>46</td><td>6</td></tr><tr><td>570f37ed63142312e6ccdf00ecc376341ec72b9f</td><td>stanford_drone</td><td>Stanford Drone</td><td><a href="papers/570f37ed63142312e6ccdf00ecc376341ec72b9f.html" target="_blank">Social LSTM: Human Trajectory Prediction in Crowded Spaces</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>224</td><td>125</td><td>99</td><td>3</td><td>140</td><td>81</td></tr><tr><td>23e824d1dfc33f3780dd18076284f07bd99f1c43</td><td>mifs</td><td>MIFS</td><td><a href="papers/23e824d1dfc33f3780dd18076284f07bd99f1c43.html" target="_blank">Spoofing faces using makeup: An investigative study</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE International Conference on Identity, Security and Behavior Analysis (ISBA)</td><td>edu</td><td>INRIA Méditerranée</td><td>France</td><td>43.61581310</td><td>7.06838000</td><td>67%</td><td>6</td><td>4</td><td>2</td><td>0</td><td>1</td><td>5</td></tr><tr><td>1a40092b493c6b8840257ab7f96051d1a4dbfeb2</td><td>sports_videos_in_the_wild</td><td>SVW</td><td><a href="papers/1a40092b493c6b8840257ab7f96051d1a4dbfeb2.html" target="_blank">Sports Videos in the Wild (SVW): A video dataset for sports analysis</a></td><td><span class="gray">[pdf]</a></td><td>2015 11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)</td><td></td><td></td><td></td><td></td><td></td><td>86%</td><td>7</td><td>6</td><td>1</td><td>1</td><td>5</td><td>2</td></tr><tr><td>9361b784e73e9238d5cefbea5ac40d35d1e3103f</td><td>oxford_town_centre</td><td>TownCentre</td><td><a href="papers/9361b784e73e9238d5cefbea5ac40d35d1e3103f.html" target="_blank">Stable multi-target tracking in real-time surveillance video</a></td><td><span class="gray">[pdf]</a></td><td>CVPR 2011</td><td>edu</td><td>University of Oxford</td><td>United Kingdom</td><td>51.75345380</td><td>-1.25400997</td><td>68%</td><td>328</td><td>222</td><td>106</td><td>13</td><td>186</td><td>140</td></tr><tr><td>2306b2a8fba28539306052764a77a0d0f5d1236a</td><td>qmul_surv_face</td><td>QMUL-SurvFace</td><td><a href="papers/2306b2a8fba28539306052764a77a0d0f5d1236a.html" target="_blank">Surveillance Face Recognition Challenge</a></td><td><a href="https://arxiv.org/pdf/1804.09691.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td>edu</td><td>Queen Mary University of London</td><td>United Kingdom</td><td>51.52472720</td><td>-0.03931035</td><td>100%</td><td>1</td><td>1</td><td>0</td><td>0</td><td>1</td><td>0</td></tr><tr><td>f6c8d5e35d7e4d60a0104f233ac1a3ab757da53f</td><td>pku_reid</td><td>PKU-Reid</td><td><a href="papers/f6c8d5e35d7e4d60a0104f233ac1a3ab757da53f.html" target="_blank">Swiss-System Based Cascade Ranking for Gait-Based Person Re-Identification</a></td><td><a href="https://pdfs.semanticscholar.org/f6c8/d5e35d7e4d60a0104f233ac1a3ab757da53f.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>4</td><td>2</td><td>2</td><td>0</td><td>1</td><td>2</td></tr><tr><td>4d58f886f5150b2d5e48fd1b5a49e09799bf895d</td><td>texas_3dfrd</td><td>Texas 3DFRD</td><td><a href="papers/4d58f886f5150b2d5e48fd1b5a49e09799bf895d.html" target="_blank">Texas 3D Face Recognition Database</a></td><td><span class="gray">[pdf]</a></td><td>2010 IEEE Southwest Symposium on Image Analysis & Interpretation (SSIAI)</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>66</td><td>40</td><td>26</td><td>3</td><td>40</td><td>27</td></tr><tr><td>6d96f946aaabc734af7fe3fc4454cf8547fcd5ed</td><td>ar_facedb</td><td>AR Face</td><td><a href="papers/6d96f946aaabc734af7fe3fc4454cf8547fcd5ed.html" target="_blank">The AR face database</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>58%</td><td>999</td><td>579</td><td>420</td><td>58</td><td>458</td><td>530</td></tr><tr><td>2485c98aa44131d1a2f7d1355b1e372f2bb148ad</td><td>cas_peal</td><td>CAS-PEAL</td><td><a href="papers/2485c98aa44131d1a2f7d1355b1e372f2bb148ad.html" target="_blank">The CAS-PEAL Large-Scale Chinese Face Database and Baseline Evaluations</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Systems, Man, and Cybernetics - Part A: Systems and Humans</td><td></td><td></td><td></td><td></td><td></td><td>59%</td><td>429</td><td>254</td><td>175</td><td>38</td><td>198</td><td>234</td></tr><tr><td>47662d1a368daf70ba70ef2d59eb6209f98b675d</td><td>fia</td><td>CMU FiA</td><td><a href="papers/47662d1a368daf70ba70ef2d59eb6209f98b675d.html" target="_blank">The CMU Face In Action (FIA) Database</a></td><td><a href="https://pdfs.semanticscholar.org/4766/2d1a368daf70ba70ef2d59eb6209f98b675d.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>48%</td><td>54</td><td>26</td><td>28</td><td>5</td><td>40</td><td>16</td></tr><tr><td>4d423acc78273b75134e2afd1777ba6d3a398973</td><td>cmu_pie</td><td>CMU PIE</td><td><a href="papers/4d423acc78273b75134e2afd1777ba6d3a398973.html" target="_blank">The CMU Pose, Illumination, and Expression (PIE) Database</a></td><td><a href="http://www.comp.nus.edu.sg/~tsim/piedb.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>59%</td><td>760</td><td>449</td><td>310</td><td>49</td><td>404</td><td>345</td></tr><tr><td>4d423acc78273b75134e2afd1777ba6d3a398973</td><td>multi_pie</td><td>MULTIPIE</td><td><a href="papers/4d423acc78273b75134e2afd1777ba6d3a398973.html" target="_blank">The CMU Pose, Illumination, and Expression (PIE) Database</a></td><td><a href="http://www.comp.nus.edu.sg/~tsim/piedb.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>59%</td><td>760</td><td>449</td><td>310</td><td>49</td><td>404</td><td>345</td></tr><tr><td>4df3143922bcdf7db78eb91e6b5359d6ada004d2</td><td>cfd</td><td>CFD</td><td><a href="papers/4df3143922bcdf7db78eb91e6b5359d6ada004d2.html" target="_blank">The Chicago face database: A free stimulus set of faces and norming data.</a></td><td><a href="https://pdfs.semanticscholar.org/4df3/143922bcdf7db78eb91e6b5359d6ada004d2.pdf" target="_blank">[pdf]</a></td><td>Behavior research methods</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>99</td><td>59</td><td>40</td><td>1</td><td>73</td><td>21</td></tr><tr><td>20388099cc415c772926e47bcbbe554e133343d1</td><td>cafe</td><td>#N/A</td><td><a href="papers/20388099cc415c772926e47bcbbe554e133343d1.html" target="_blank">The Child Affective Facial Expression (CAFE) set: validity and reliability from untrained adults</a></td><td><a href="https://pdfs.semanticscholar.org/2038/8099cc415c772926e47bcbbe554e133343d1.pdf" target="_blank">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>37</td><td>20</td><td>17</td><td>3</td><td>30</td><td>7</td></tr><tr><td>4e6ee936eb50dd032f7138702fa39b7c18ee8907</td><td>dartmouth_children</td><td>Dartmouth Children</td><td><a href="papers/4e6ee936eb50dd032f7138702fa39b7c18ee8907.html" target="_blank">The Dartmouth Database of Children’s Faces: Acquisition and Validation of a New Face Stimulus Set</a></td><td><a href="https://pdfs.semanticscholar.org/4e6e/e936eb50dd032f7138702fa39b7c18ee8907.pdf" target="_blank">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>21</td><td>11</td><td>10</td><td>2</td><td>18</td><td>3</td></tr><tr><td>9e31e77f9543ab42474ba4e9330676e18c242e72</td><td>imdb_face</td><td>IMDb Face</td><td><a href="papers/9e31e77f9543ab42474ba4e9330676e18c242e72.html" target="_blank">The Devil of Face Recognition is in the Noise</a></td><td><a href="https://arxiv.org/pdf/1807.11649.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Nanyang Technological University</td><td>Singapore</td><td>1.34841040</td><td>103.68297965</td><td>50%</td><td>6</td><td>3</td><td>3</td><td>0</td><td>4</td><td>1</td></tr><tr><td>71b7fc715e2f1bb24c0030af8d7e7b6e7cd128a6</td><td>umd_faces</td><td>UMD</td><td><a href="papers/71b7fc715e2f1bb24c0030af8d7e7b6e7cd128a6.html" target="_blank">The Do’s and Don’ts for CNN-Based Face Verification</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE International Conference on Computer Vision Workshops (ICCVW)</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>26</td><td>16</td><td>10</td><td>2</td><td>16</td><td>8</td></tr><tr><td>72a155c987816ae81c858fddbd6beab656d86220</td><td>europersons</td><td>EuroCity Persons</td><td><a href="papers/72a155c987816ae81c858fddbd6beab656d86220.html" target="_blank">The EuroCity Persons Dataset: A Novel Benchmark for Object Detection</a></td><td><a href="https://arxiv.org/pdf/1805.07193.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>2</td><td>0</td><td>2</td><td>0</td><td>2</td><td>0</td></tr><tr><td>4d9a02d080636e9666c4d1cc438b9893391ec6c7</td><td>cohn_kanade_plus</td><td>CK+</td><td><a href="papers/4d9a02d080636e9666c4d1cc438b9893391ec6c7.html" target="_blank">The Extended Cohn-Kanade Dataset (CK+): A complete dataset for action unit and emotion-specified expression</a></td><td><span class="gray">[pdf]</a></td><td>2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition - Workshops</td><td>edu</td><td>University of Pittsburgh</td><td>United States</td><td>40.44415295</td><td>-79.96243993</td><td>61%</td><td>999</td><td>607</td><td>392</td><td>57</td><td>470</td><td>518</td></tr><tr><td>0f0fcf041559703998abf310e56f8a2f90ee6f21</td><td>feret</td><td>FERET</td><td><a href="papers/0f0fcf041559703998abf310e56f8a2f90ee6f21.html" target="_blank">The FERET Evaluation Methodology for Face-Recognition Algorithms</a></td><td><a href="https://pdfs.semanticscholar.org/5099/7a5605c1f61e09e9a96789ed7495be6625aa.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>34%</td><td>29</td><td>10</td><td>19</td><td>3</td><td>18</td><td>9</td></tr><tr><td>0c4a139bb87c6743c7905b29a3cfec27a5130652</td><td>feret</td><td>FERET</td><td><a href="papers/0c4a139bb87c6743c7905b29a3cfec27a5130652.html" target="_blank">The FERET Verification Testing Protocol for Face Recognition Algorithms</a></td><td><a href="https://pdfs.semanticscholar.org/8d2a/1c768fce6f71584dd993fb97e7b6419aaf60.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td>edu</td><td>City University of New York</td><td>United States</td><td>40.87228250</td><td>-73.89489171</td><td>51%</td><td>115</td><td>59</td><td>56</td><td>8</td><td>75</td><td>37</td></tr><tr><td>dc8b25e35a3acb812beb499844734081722319b4</td><td>feret</td><td>FERET</td><td><a href="papers/dc8b25e35a3acb812beb499844734081722319b4.html" target="_blank">The FERET database and evaluation procedure for face-recognition algorithms</a></td><td><a href="http://biometrics.nist.gov/cs_links/face/frvt/feret/FERET_Database_evaluation_procedure.pdf" target="_blank">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>999</td><td>521</td><td>478</td><td>103</td><td>591</td><td>421</td></tr><tr><td>8f02ec0be21461fbcedf51d864f944cfc42c875f</td><td>hda_plus</td><td>HDA+</td><td><a href="papers/8f02ec0be21461fbcedf51d864f944cfc42c875f.html" target="_blank">The HDA+ Data Set for Research on Fully Automated Re-identification Systems</a></td><td><a href="https://pdfs.semanticscholar.org/8f02/ec0be21461fbcedf51d864f944cfc42c875f.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>16</td><td>8</td><td>8</td><td>1</td><td>10</td><td>6</td></tr><tr><td>8be57cdad86fdf8c8290df4ca3149592f3c46dd3</td><td>m2vts</td><td>m2vts</td><td><a href="papers/8be57cdad86fdf8c8290df4ca3149592f3c46dd3.html" target="_blank">The M2VTS Multimodal Face Database (Release 1.00)</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>45%</td><td>73</td><td>33</td><td>40</td><td>2</td><td>39</td><td>33</td></tr><tr><td>ea050801199f98a1c7c1df6769f23f658299a3ae</td><td>mpi_large</td><td>Large MPI Facial Expression</td><td><a href="papers/ea050801199f98a1c7c1df6769f23f658299a3ae.html" target="_blank">The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</a></td><td><a href="https://pdfs.semanticscholar.org/ea05/0801199f98a1c7c1df6769f23f658299a3ae.pdf" target="_blank">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>33</td><td>17</td><td>16</td><td>4</td><td>29</td><td>4</td></tr><tr><td>ea050801199f98a1c7c1df6769f23f658299a3ae</td><td>mpi_small</td><td>Small MPI Facial Expression</td><td><a href="papers/ea050801199f98a1c7c1df6769f23f658299a3ae.html" target="_blank">The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</a></td><td><a href="https://pdfs.semanticscholar.org/ea05/0801199f98a1c7c1df6769f23f658299a3ae.pdf" target="_blank">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>33</td><td>17</td><td>16</td><td>4</td><td>29</td><td>4</td></tr><tr><td>578d4ad74818086bb64f182f72e2c8bd31e3d426</td><td>mr2</td><td>MR2</td><td><a href="papers/578d4ad74818086bb64f182f72e2c8bd31e3d426.html" target="_blank">The MR2: A multi-racial, mega-resolution database of facial stimuli.</a></td><td><a href="http://www.mpmlab.org/The%20MR2%20face%20database.pdf" target="_blank">[pdf]</a></td><td>Behavior research methods</td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>7</td><td>3</td><td>4</td><td>0</td><td>7</td><td>0</td></tr><tr><td>f1af714b92372c8e606485a3982eab2f16772ad8</td><td>mug_faces</td><td>MUG Faces</td><td><a href="papers/f1af714b92372c8e606485a3982eab2f16772ad8.html" target="_blank">The MUG facial expression database</a></td><td><span class="gray">[pdf]</a></td><td>11th International Workshop on Image Analysis for Multimedia Interactive Services WIAMIS 10</td><td>edu</td><td>Aristotle University of Thessaloniki</td><td>Greece</td><td>40.62984145</td><td>22.95889350</td><td>55%</td><td>82</td><td>45</td><td>37</td><td>4</td><td>34</td><td>47</td></tr><tr><td>79828e6e9f137a583082b8b5a9dfce0c301989b8</td><td>mapillary</td><td>Mapillary</td><td><a href="papers/79828e6e9f137a583082b8b5a9dfce0c301989b8.html" target="_blank">The Mapillary Vistas Dataset for Semantic Understanding of Street Scenes</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>61%</td><td>61</td><td>37</td><td>24</td><td>0</td><td>43</td><td>16</td></tr><tr><td>96e0cfcd81cdeb8282e29ef9ec9962b125f379b0</td><td>megaface</td><td>MegaFace</td><td><a href="papers/96e0cfcd81cdeb8282e29ef9ec9962b125f379b0.html" target="_blank">The MegaFace Benchmark: 1 Million Faces for Recognition at Scale</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td></td><td>76%</td><td>139</td><td>106</td><td>33</td><td>5</td><td>100</td><td>37</td></tr><tr><td>0ee1916a0cb2dc7d3add086b5f1092c3d4beb38a</td><td>voc</td><td>VOC</td><td><a href="papers/0ee1916a0cb2dc7d3add086b5f1092c3d4beb38a.html" target="_blank">The Pascal Visual Object Classes (VOC) Challenge</a></td><td><a href="http://eprints.pascal-network.org/archive/00006187/01/PascalVOC_IJCV2009.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td>company</td><td>Microsoft</td><td>United States</td><td>47.64233180</td><td>-122.13693020</td><td>61%</td><td>999</td><td>607</td><td>391</td><td>28</td><td>557</td><td>422</td></tr><tr><td>66e6f08873325d37e0ec20a4769ce881e04e964e</td><td>sun_attributes</td><td>SUN</td><td><a href="papers/66e6f08873325d37e0ec20a4769ce881e04e964e.html" target="_blank">The SUN Attribute Database: Beyond Categories for Deeper Scene Understanding</a></td><td><a href="http://www.cc.gatech.edu/~hays/papers/attribute_ijcv.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>116</td><td>70</td><td>46</td><td>14</td><td>84</td><td>31</td></tr><tr><td>8b2dd5c61b23ead5ae5508bb8ce808b5ea266730</td><td>10k_US_adult_faces</td><td>10K US Adult Faces</td><td><a href="papers/8b2dd5c61b23ead5ae5508bb8ce808b5ea266730.html" target="_blank">The intrinsic memorability of face photographs.</a></td><td><a href="https://pdfs.semanticscholar.org/8b2d/d5c61b23ead5ae5508bb8ce808b5ea266730.pdf" target="_blank">[pdf]</a></td><td>Journal of experimental psychology. General</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>52</td><td>29</td><td>23</td><td>2</td><td>36</td><td>14</td></tr><tr><td>d178cde92ab3dc0dd2ebee5a76a33d556c39448b</td><td>jiku_mobile</td><td>Jiku Mobile Video Dataset</td><td><a href="papers/d178cde92ab3dc0dd2ebee5a76a33d556c39448b.html" target="_blank">The jiku mobile video dataset</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td>edu</td><td>National University of Singapore</td><td>Singapore</td><td>1.29620180</td><td>103.77689944</td><td>71%</td><td>24</td><td>17</td><td>7</td><td>0</td><td>6</td><td>19</td></tr><tr><td>ae0aee03d946efffdc7af2362a42d3750e7dd48a</td><td>put_face</td><td>Put Face</td><td><a href="papers/ae0aee03d946efffdc7af2362a42d3750e7dd48a.html" target="_blank">The put face database</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>55%</td><td>99</td><td>54</td><td>45</td><td>5</td><td>55</td><td>48</td></tr><tr><td>19d1b811df60f86cbd5e04a094b07f32fff7a32a</td><td>york_3d</td><td>UOY 3D Face Database</td><td><a href="papers/19d1b811df60f86cbd5e04a094b07f32fff7a32a.html" target="_blank">Three-dimensional face recognition: an eigensurface approach</a></td><td><span class="gray">[pdf]</a></td><td>2004 International Conference on Image Processing, 2004. ICIP '04.</td><td></td><td></td><td></td><td></td><td></td><td>42%</td><td>38</td><td>16</td><td>22</td><td>4</td><td>24</td><td>13</td></tr><tr><td>2edb87494278ad11641b6cf7a3f8996de12b8e14</td><td>qmul_grid</td><td>GRID</td><td><a href="papers/2edb87494278ad11641b6cf7a3f8996de12b8e14.html" target="_blank">Time-Delayed Correlation Analysis for Multi-Camera Activity Understanding</a></td><td><a href="http://www.eecs.qmul.ac.uk/~ccloy/files/ijcv_2010.pdf" target="_blank">[pdf]</a></td><td>International Journal of Computer Vision</td><td>edu</td><td>Queen Mary University of London</td><td>United Kingdom</td><td>51.52472720</td><td>-0.03931035</td><td>63%</td><td>84</td><td>53</td><td>31</td><td>4</td><td>51</td><td>33</td></tr><tr><td>64e0690dd176a93de9d4328f6e31fc4afe1e7536</td><td>duke_mtmc</td><td>Duke MTMC</td><td><a href="papers/64e0690dd176a93de9d4328f6e31fc4afe1e7536.html" target="_blank">Tracking Multiple People Online and in Real Time</a></td><td><a href="https://pdfs.semanticscholar.org/64e0/690dd176a93de9d4328f6e31fc4afe1e7536.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>78%</td><td>23</td><td>18</td><td>5</td><td>1</td><td>12</td><td>10</td></tr><tr><td>298cbc3dfbbb3a20af4eed97906650a4ea1c29e0</td><td>ferplus</td><td>FER+</td><td><a href="papers/298cbc3dfbbb3a20af4eed97906650a4ea1c29e0.html" target="_blank">Training deep networks for facial expression recognition with crowd-sourced label distribution</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>74%</td><td>34</td><td>25</td><td>9</td><td>0</td><td>18</td><td>16</td></tr><tr><td>4eab317b5ac436a949849ed286baa3de2a541eef</td><td>laofiw</td><td>LAOFIW</td><td><a href="papers/4eab317b5ac436a949849ed286baa3de2a541eef.html" target="_blank">Turning a Blind Eye: Explicit Removal of Biases and Variation from Deep Neural Network Embeddings</a></td><td><a href="https://arxiv.org/pdf/1809.02169.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>2</td><td>2</td><td>0</td><td>0</td><td>2</td><td>0</td></tr><tr><td>b5f2846a506fc417e7da43f6a7679146d99c5e96</td><td>ucf_101</td><td>UCF101</td><td><a href="papers/b5f2846a506fc417e7da43f6a7679146d99c5e96.html" target="_blank">UCF101: A Dataset of 101 Human Actions Classes From Videos in The Wild</a></td><td><a href="https://arxiv.org/pdf/1212.0402.pdf" target="_blank">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td></td><td>64%</td><td>999</td><td>643</td><td>356</td><td>56</td><td>628</td><td>362</td></tr><tr><td>16e8b0a1e8451d5f697b94c0c2b32a00abee1d52</td><td>umb</td><td>UMB</td><td><a href="papers/16e8b0a1e8451d5f697b94c0c2b32a00abee1d52.html" target="_blank">UMB-DB: A database of partially occluded 3D faces</a></td><td><span class="gray">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td></td><td>66%</td><td>47</td><td>31</td><td>16</td><td>2</td><td>22</td><td>24</td></tr><tr><td>31b05f65405534a696a847dd19c621b7b8588263</td><td>umd_faces</td><td>UMD</td><td><a href="papers/31b05f65405534a696a847dd19c621b7b8588263.html" target="_blank">UMDFaces: An annotated face dataset for training deep networks</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE International Joint Conference on Biometrics (IJCB)</td><td>edu</td><td>University of Maryland</td><td>United States</td><td>39.28996850</td><td>-76.62196103</td><td>79%</td><td>42</td><td>33</td><td>9</td><td>2</td><td>30</td><td>11</td></tr><tr><td>8627f019882b024aef92e4eb9355c499c733e5b7</td><td>used</td><td>USED Social Event Dataset</td><td><a href="papers/8627f019882b024aef92e4eb9355c499c733e5b7.html" target="_blank">USED: a large-scale social event detection dataset</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td>edu</td><td>University of Trento</td><td>Italy</td><td>46.06588360</td><td>11.11598940</td><td>86%</td><td>7</td><td>6</td><td>1</td><td>0</td><td>3</td><td>4</td></tr><tr><td>d4f1eb008eb80595bcfdac368e23ae9754e1e745</td><td>uccs</td><td>UCCS</td><td><a href="papers/d4f1eb008eb80595bcfdac368e23ae9754e1e745.html" target="_blank">Unconstrained Face Detection and Open-Set Face Recognition Challenge</a></td><td><span class="gray">[pdf]</a></td><td>2017 IEEE International Joint Conference on Biometrics (IJCB)</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>5</td><td>5</td><td>0</td><td>0</td><td>4</td><td>1</td></tr><tr><td>4b4106614c1d553365bad75d7866bff0de6056ed</td><td>ufi</td><td>UFI</td><td><a href="papers/4b4106614c1d553365bad75d7866bff0de6056ed.html" target="_blank">Unconstrained Facial Images: Database for Face Recognition Under Real-World Conditions</a></td><td><a href="http://home.zcu.cz/~pkral/papers/kral_micai15.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>12</td><td>6</td><td>6</td><td>0</td><td>4</td><td>6</td></tr><tr><td>08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7</td><td>kin_face</td><td>UB KinFace</td><td><a href="papers/08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7.html" target="_blank">Understanding Kin Relationships in a Photo</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Multimedia</td><td></td><td></td><td></td><td></td><td></td><td>63%</td><td>94</td><td>59</td><td>35</td><td>1</td><td>33</td><td>61</td></tr><tr><td>5a4df9bef1872865f0b619ac3aacc97f49e4a035</td><td>cuhk_train_station</td><td>CUHK Train Station Dataset</td><td><a href="papers/5a4df9bef1872865f0b619ac3aacc97f49e4a035.html" target="_blank">Understanding collective crowd behaviors: Learning a Mixture model of Dynamic pedestrian-Agents</a></td><td><span class="gray">[pdf]</a></td><td>2012 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>Chinese University of Hong Kong</td><td>China</td><td>22.41626320</td><td>114.21093180</td><td>60%</td><td>141</td><td>84</td><td>57</td><td>4</td><td>60</td><td>75</td></tr><tr><td>21d9d0deed16f0ad62a4865e9acf0686f4f15492</td><td>images_of_groups</td><td>Images of Groups</td><td><a href="papers/21d9d0deed16f0ad62a4865e9acf0686f4f15492.html" target="_blank">Understanding images of groups of people</a></td><td><span class="gray">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>Carnegie Mellon University</td><td>United States</td><td>40.44416190</td><td>-79.94272826</td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>15e1af79939dbf90790b03d8aa02477783fb1d0f</td><td>duke_mtmc</td><td>Duke MTMC</td><td><a href="papers/15e1af79939dbf90790b03d8aa02477783fb1d0f.html" target="_blank">Unlabeled Samples Generated by GAN Improve the Person Re-identification Baseline in Vitro</a></td><td><a href="https://arxiv.org/pdf/1701.07717.pdf" target="_blank">[pdf]</a></td><td>2017 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>fd8168f1c50de85bac58a8d328df0a50248b16ae</td><td>nd_2006</td><td>ND-2006</td><td><a href="papers/fd8168f1c50de85bac58a8d328df0a50248b16ae.html" target="_blank">Using a Multi-Instance Enrollment Representation to Improve 3D Face Recognition</a></td><td><span class="gray">[pdf]</a></td><td>2007 First IEEE International Conference on Biometrics: Theory, Applications, and Systems</td><td>edu</td><td>University of Notre Dame</td><td>United States</td><td>41.70456775</td><td>-86.23822026</td><td>63%</td><td>35</td><td>22</td><td>13</td><td>3</td><td>18</td><td>15</td></tr><tr><td>4563b46d42079242f06567b3f2e2f7a80cb3befe</td><td>vadana</td><td>VADANA</td><td><a href="papers/4563b46d42079242f06567b3f2e2f7a80cb3befe.html" target="_blank">VADANA: A dense dataset for facial image analysis</a></td><td><span class="gray">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td>edu</td><td>University of Delaware</td><td>United States</td><td>39.68103280</td><td>-75.75401840</td><td>67%</td><td>15</td><td>10</td><td>5</td><td>0</td><td>5</td><td>10</td></tr><tr><td>70c59dc3470ae867016f6ab0e008ac8ba03774a1</td><td>vgg_faces2</td><td>VGG Face2</td><td><a href="papers/70c59dc3470ae867016f6ab0e008ac8ba03774a1.html" target="_blank">VGGFace2: A Dataset for Recognising Faces across Pose and Age</a></td><td><a href="https://arxiv.org/pdf/1710.08092.pdf" target="_blank">[pdf]</a></td><td>2018 13th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2018)</td><td></td><td></td><td></td><td></td><td></td><td>80%</td><td>83</td><td>66</td><td>17</td><td>3</td><td>61</td><td>20</td></tr><tr><td>01959ef569f74c286956024866c1d107099199f7</td><td>vqa</td><td>VQA</td><td><a href="papers/01959ef569f74c286956024866c1d107099199f7.html" target="_blank">VQA: Visual Question Answering</a></td><td><span class="gray">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>b6c293f0420f7e945b5916ae44269fb53e139275</td><td>erce</td><td>ERCe</td><td><a href="papers/b6c293f0420f7e945b5916ae44269fb53e139275.html" target="_blank">Video Synopsis by Heterogeneous Multi-source Correlation</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>29</td><td>15</td><td>14</td><td>2</td><td>14</td><td>13</td></tr><tr><td>b6c293f0420f7e945b5916ae44269fb53e139275</td><td>tisi</td><td>Times Square Intersection</td><td><a href="papers/b6c293f0420f7e945b5916ae44269fb53e139275.html" target="_blank">Video Synopsis by Heterogeneous Multi-source Correlation</a></td><td><span class="gray">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td></td><td>52%</td><td>29</td><td>15</td><td>14</td><td>2</td><td>14</td><td>13</td></tr><tr><td>5194cbd51f9769ab25260446b4fa17204752e799</td><td>violent_flows</td><td>Violent Flows</td><td><a href="papers/5194cbd51f9769ab25260446b4fa17204752e799.html" target="_blank">Violent flows: Real-time detection of violent crowd behavior</a></td><td><span class="gray">[pdf]</a></td><td>2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops</td><td>edu</td><td>Open University of Israel</td><td>Israel</td><td>32.77824165</td><td>34.99565673</td><td>65%</td><td>88</td><td>57</td><td>31</td><td>6</td><td>45</td><td>44</td></tr><tr><td>026e3363b7f76b51cc711886597a44d5f1fd1de2</td><td>kitti</td><td>KITTI</td><td><a href="papers/026e3363b7f76b51cc711886597a44d5f1fd1de2.html" target="_blank">Vision meets robotics: The KITTI dataset</a></td><td><a href="https://pdfs.semanticscholar.org/026e/3363b7f76b51cc711886597a44d5f1fd1de2.pdf" target="_blank">[pdf]</a></td><td>I. J. Robotics Res.</td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>999</td><td>602</td><td>397</td><td>36</td><td>553</td><td>462</td></tr><tr><td>066000d44d6691d27202896691f08b27117918b9</td><td>psu</td><td>PSU</td><td><a href="papers/066000d44d6691d27202896691f08b27117918b9.html" target="_blank">Vision-Based Analysis of Small Groups in Pedestrian Crowds</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>168</td><td>91</td><td>77</td><td>10</td><td>85</td><td>79</td></tr><tr><td>dd65f71dac86e36eecbd3ed225d016c3336b4a13</td><td>families_in_the_wild</td><td>FIW</td><td><a href="papers/dd65f71dac86e36eecbd3ed225d016c3336b4a13.html" target="_blank">Visual Kinship Recognition of Families in the Wild</a></td><td><span class="gray">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td>edu</td><td>University of Massachusetts Dartmouth</td><td>United States</td><td>41.62772475</td><td>-71.00724501</td><td>80%</td><td>5</td><td>4</td><td>1</td><td>0</td><td>2</td><td>3</td></tr><tr><td>8875ae233bc074f5cd6c4ebba447b536a7e847a5</td><td>voxceleb2</td><td>VoxCeleb2</td><td><a href="papers/8875ae233bc074f5cd6c4ebba447b536a7e847a5.html" target="_blank">VoxCeleb2: Deep Speaker Recognition.</a></td><td><a href="https://pdfs.semanticscholar.org/8875/ae233bc074f5cd6c4ebba447b536a7e847a5.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>71%</td><td>34</td><td>24</td><td>9</td><td>2</td><td>31</td><td>2</td></tr><tr><td>52d7eb0fbc3522434c13cc247549f74bb9609c5d</td><td>wider_face</td><td>WIDER FACE</td><td><a href="papers/52d7eb0fbc3522434c13cc247549f74bb9609c5d.html" target="_blank">WIDER FACE: A Face Detection Benchmark</a></td><td><span class="gray">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>Chinese University of Hong Kong</td><td>China</td><td>22.41626320</td><td>114.21093180</td><td>66%</td><td>178</td><td>118</td><td>60</td><td>11</td><td>112</td><td>66</td></tr><tr><td>36bccfb2ad847096bc76777e544f305813cd8f5b</td><td>wildtrack</td><td>WildTrack</td><td><a href="papers/36bccfb2ad847096bc76777e544f305813cd8f5b.html" target="_blank">WILDTRACK: A Multi-camera HD Dataset for Dense Unscripted Pedestrian Detection</a></td><td><a href="http://openaccess.thecvf.com/content_cvpr_2018/Supplemental/1562-supp.pdf" target="_blank">[pdf]</a></td><td>2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>5ad4e9f947c1653c247d418f05dad758a3f9277b</td><td>wlfdb</td><td>WLFDB</td><td><a href="papers/5ad4e9f947c1653c247d418f05dad758a3f9277b.html" target="_blank">WLFDB : Weakly Labeled Face Databases</a></td><td><a href="https://pdfs.semanticscholar.org/5ad4/e9f947c1653c247d418f05dad758a3f9277b.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>1</td><td>1</td><td>0</td><td>0</td><td>0</td><td>1</td></tr><tr><td>0dc11a37cadda92886c56a6fb5191ded62099c28</td><td>stickmen_family</td><td>We Are Family Stickmen</td><td><a href="papers/0dc11a37cadda92886c56a6fb5191ded62099c28.html" target="_blank">We Are Family: Joint Pose Estimation of Multiple Persons</a></td><td><a href="http://eprints.pascal-network.org/archive/00007964/01/eichner10eccv.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>65%</td><td>78</td><td>51</td><td>27</td><td>5</td><td>54</td><td>23</td></tr><tr><td>0c91808994a250d7be332400a534a9291ca3b60e</td><td>graz</td><td>Graz Pedestrian</td><td><a href="papers/0c91808994a250d7be332400a534a9291ca3b60e.html" target="_blank">Weak Hypotheses and Boosting for Generic Object Detection and Recognition</a></td><td><a href="https://pdfs.semanticscholar.org/0c91/808994a250d7be332400a534a9291ca3b60e.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>56%</td><td>236</td><td>131</td><td>105</td><td>17</td><td>161</td><td>77</td></tr><tr><td>2a75f34663a60ab1b04a0049ed1d14335129e908</td><td>mmi_facial_expression</td><td>MMI Facial Expression Dataset</td><td><a href="papers/2a75f34663a60ab1b04a0049ed1d14335129e908.html" target="_blank">Web-based database for facial expression analysis</a></td><td><span class="gray">[pdf]</a></td><td>2005 IEEE International Conference on Multimedia and Expo</td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>464</td><td>250</td><td>214</td><td>45</td><td>282</td><td>188</td></tr><tr><td>9b9bf5e623cb8af7407d2d2d857bc3f1b531c182</td><td>who_goes_there</td><td>WGT</td><td><a href="papers/9b9bf5e623cb8af7407d2d2d857bc3f1b531c182.html" target="_blank">Who goes there?: approaches to mapping facial appearance diversity</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td>edu</td><td>University of Kentucky</td><td>United States</td><td>38.03337420</td><td>-84.50177580</td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>b62628ac06bbac998a3ab825324a41a11bc3a988</td><td>m2vtsdb_extended</td><td>xm2vtsdb</td><td><a href="papers/b62628ac06bbac998a3ab825324a41a11bc3a988.html" target="_blank">XM2VTSDB : The extended M2VTS database</a></td><td><a href="https://pdfs.semanticscholar.org/b626/28ac06bbac998a3ab825324a41a11bc3a988.pdf" target="_blank">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>62%</td><td>864</td><td>539</td><td>325</td><td>37</td><td>493</td><td>404</td></tr><tr><td>010f0f4929e6a6644fb01f0e43820f91d0fad292</td><td>yfcc_100m</td><td>YFCC100M</td><td><a href="papers/010f0f4929e6a6644fb01f0e43820f91d0fad292.html" target="_blank">YFCC100M: the new data in multimedia research</a></td><td><span class="gray">[pdf]</a></td><td>Commun. ACM</td><td>edu</td><td>Carnegie Mellon University</td><td>United States</td><td>40.44416190</td><td>-79.94272826</td><td>64%</td><td>274</td><td>176</td><td>98</td><td>23</td><td>172</td><td>100</td></tr><tr><td>a94cae786d515d3450d48267e12ca954aab791c4</td><td>yawdd</td><td>YawDD</td><td><a href="papers/a94cae786d515d3450d48267e12ca954aab791c4.html" target="_blank">YawDD: a yawning detection dataset</a></td><td><span class="gray">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td></td><td>80%</td><td>15</td><td>12</td><td>3</td><td>1</td><td>2</td><td>13</td></tr></table></body></html>
\ No newline at end of file diff --git a/scraper/s2-final-report.py b/scraper/s2-final-report.py index c9795680..63789d85 100644 --- a/scraper/s2-final-report.py +++ b/scraper/s2-final-report.py @@ -22,8 +22,8 @@ def s2_final_report(): verified_lookup, verified_totals = fetch_verified_paper_lookup() items = [] for key, item in megapixels.items(): - if key != 'brainwash': - continue + #if key != 'brainwash': + # continue ft_share = 'ft_share' in item['dataset'] and item['dataset']['ft_share'] == 'Y' nyt_share = 'nyt_share' in item['dataset'] and item['dataset']['nyt_share'] == 'Y' if ft_share or nyt_share: @@ -47,11 +47,11 @@ def s2_final_report(): # DIR_PUBLIC_CITATIONS + '/', # "s3://megapixels/v1/citations/", # ]) - #subprocess.call([ - # "s3cmd", "put", "-P", "--recursive", - # DIR_VERIFIED_CITATIONS + '/', - # "s3://megapixels/v1/citations/verified/", - #]) + subprocess.call([ + "s3cmd", "put", "-P", "--recursive", + DIR_VERIFIED_CITATIONS + '/', + "s3://megapixels/v1/citations/verified/", + ]) def process_paper(row, verified_lookup, verified_totals): aggregate_citations = {} @@ -75,8 +75,12 @@ def process_paper(row, verified_lookup, verified_totals): process_single_paper(row, 'search', addresses, aggregate_citations, unknown_citations) for paper_id in verified_lookup.keys(): - if paper_id not in aggregate_citations: - print('S2 API missing verified citation: {}'.format(paper_id)) + if paper_id in aggregate_citations: + pass + elif paper_id in unknown_citations: + print('Verified paper needs address: {}'.format(paper_id)) + else: + print('S2 API missing new verified citation: {}'.format(paper_id)) process_single_paper(row, 'verified', addresses, aggregate_citations, unknown_citations, verified_lookup.keys()) diff --git a/site/datasets/citations/brainwash.json b/site/datasets/citations/brainwash.json index 17db4acf..932e18ef 100644 --- a/site/datasets/citations/brainwash.json +++ b/site/datasets/citations/brainwash.json @@ -1 +1 @@ -{"id": "1bd1645a629f1b612960ab9bba276afd4cf7c666", "paper": {"key": "brainwash", "name": "Brainwash", "title": "End-to-End People Detection in Crowded Scenes", "year": "2016", "addresses": [{"name": "Stanford University", "source_name": "Stanford University", "street_adddress": "Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA", "lat": "37.43131385", "lng": "-122.16936535", "type": "edu", "country": "United States"}]}, "citations": [{"id": "e35515f699b60472ac8f50d1da84fab3c55417d6", "title": "Key Parts Context and Scene Geometry in Human Head Detection", "addresses": [{"name": "Tsinghua University", "source_name": "Tsinghua University", "street_adddress": "\u6e05\u534e\u5927\u5b66, 30, \u53cc\u6e05\u8def, \u4e94\u9053\u53e3, \u540e\u516b\u5bb6, \u6d77\u6dc0\u533a, 100084, \u4e2d\u56fd", "lat": "40.00229045", "lng": "116.32098908", "type": "edu", "country": "China"}], "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8451832"]}, {"id": "5d5d267416aeb2bbddaf06f703e8683753abdcd0", "title": "Exploiting Multispectral and Contextual Information to Improve Human Detection", "addresses": [{"name": "State University of New Jersey", "source_name": "The State University of New Jersey", "street_adddress": "Rutgers New Brunswick: Livingston Campus, Joyce Kilmer Avenue, Piscataway Township, Middlesex County, New Jersey, 08854, USA", "lat": "40.51865195", "lng": "-74.44099801", "type": "edu", "country": "United States"}], "year": "2017", "pdf": ["https://pdfs.semanticscholar.org/5d5d/267416aeb2bbddaf06f703e8683753abdcd0.pdf"], "doi": []}, {"id": "0cf0ad8235929417d904acd1c672713ca4fdb105", "title": "Fusion of Head and Full-Body Detectors for Multi-object Tracking", "addresses": [{"name": "Technical University Munich", "source_name": "Technical University Munich", "street_adddress": "TUM, 21, Arcisstra\u00dfe, Bezirksteil K\u00f6nigsplatz, Stadtbezirk 03 Maxvorstadt, M\u00fcnchen, Obb, Bayern, 80333, Deutschland", "lat": "48.14955455", "lng": "11.56775314", "type": "edu", "country": "Germany"}], "year": "2018", "pdf": ["https://arxiv.org/pdf/1705.08314.pdf"], "doi": []}, {"id": "cab97d4dc67919f965cc884d80e4d6b743a256eb", "title": "Scale Mapping and Dynamic Re-Detecting in Dense Head Detection", "addresses": [{"name": "South China University of Technology", "source_name": "South China University of Technology", "street_adddress": "\u534e\u5357\u7406\u5de5\u5927\u5b66, \u5927\u5b66\u57ce\u4e2d\u73af\u4e1c\u8def, \u5e7f\u5dde\u5927\u5b66\u57ce, \u65b0\u9020, \u756a\u79ba\u533a (Panyu), \u5e7f\u5dde\u5e02, \u5e7f\u4e1c\u7701, 510006, \u4e2d\u56fd", "lat": "23.05020420", "lng": "113.39880323", "type": "edu", "country": "China"}], "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8451653"]}, {"id": "8a6665bfe3ad126d84414280ee2f884818560063", "title": "Vehicle Detection in Urban Traffic Surveillance Images Based on Convolutional Neural Networks with Feature Concatenation", "addresses": [{"name": "China University of Mining and Technology", "source_name": "China University of Mining and Technology", "street_adddress": "China University of Mining and Technology, 1\u53f7, \u5927\u5b66\u8def, \u6cc9\u5c71\u533a (Quanshan), \u5f90\u5dde\u5e02 / Xuzhou, \u6c5f\u82cf\u7701, 221116, \u4e2d\u56fd", "lat": "34.21525380", "lng": "117.13985410", "type": "edu", "country": "China"}], "year": "2019", "pdf": ["https://pdfs.semanticscholar.org/8a66/65bfe3ad126d84414280ee2f884818560063.pdf"], "doi": []}, {"id": "333880ee776d57555f54935978ccb2ce13cfdb07", "title": "Person classification leveraging Convolutional Neural Network for obstacle avoidance via Unmanned Aerial Vehicles", "addresses": [{"name": "Cranfield University", "source_name": "Cranfield University, UK", "street_adddress": "College Rd, Wharley End, Bedford MK43 0AL, UK", "lat": "52.07418180", "lng": "-0.62781230", "type": "edu", "country": "United Kingdom"}], "year": "2017", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8101662"]}, {"id": "f30bdc22fe454a28234cf858d5bf9de94701ee7a", "title": "Adaptive NMS: Refining Pedestrian Detection in a Crowd", "addresses": [{"name": "Beihang University", "source_name": "Beihang University", "street_adddress": "\u5317\u4eac\u822a\u7a7a\u822a\u5929\u5927\u5b66, 37, \u5b66\u9662\u8def, \u4e94\u9053\u53e3, \u540e\u516b\u5bb6, \u6d77\u6dc0\u533a, 100083, \u4e2d\u56fd", "lat": "39.98083330", "lng": "116.34101249", "type": "edu", "country": "China"}], "year": "2019", "pdf": ["https://arxiv.org/pdf/1904.03629.pdf"], "doi": []}, {"id": "976647b32fd7e1a5c8ee4a11792155903bb34e43", "title": "Multi-Organ Plant Classification Based on Convolutional and Recurrent Neural Networks", "addresses": [{"name": "University of Malaya, Kuala Lumpur", "source_name": "University of Malaya", "street_adddress": "UM, Lingkaran Wawasan, Bukit Pantai, Bangsar, KL, 50603, Malaysia", "lat": "3.12267405", "lng": "101.65356103", "type": "edu", "country": "Malaysia"}, {"name": "Kingston University", "source_name": "Kingston University", "street_adddress": "Kingston University, Kingston Hill, Kingston Vale, Kingston-upon-Thames, London, Greater London, England, KT2 7TF, UK", "lat": "51.42930860", "lng": "-0.26840440", "type": "edu", "country": "United Kingdom"}], "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8359391"]}, {"id": "66a08ff2ea7447093624632e7069e3da16961d30", "title": "An Incremental Framework for Video-Based Traffic Sign Detection, Tracking, and Recognition", "addresses": [{"name": "Northwestern Polytechnical University", "source_name": "Northwestern Polytechnical University", "street_adddress": "\u897f\u5317\u5de5\u4e1a\u5927\u5b66 \u53cb\u8c0a\u6821\u533a, 127\u53f7, \u53cb\u8c0a\u897f\u8def, \u957f\u5b89\u8def, \u7891\u6797\u533a (Beilin), \u897f\u5b89\u5e02, \u9655\u897f\u7701, 710072, \u4e2d\u56fd", "lat": "34.24691520", "lng": "108.91061982", "type": "edu", "country": "China"}], "year": "2017", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7605450"]}, {"id": "6fc8c988dd841c6c4f5e96b1b1458b6aa564b2de", "title": "Crowd Counting via Scale-Adaptive Convolutional Neural Network", "addresses": [{"name": "Shanghai Jiaotong University", "source_name": "Shanghai Jiaotong University", "street_adddress": "China, Shanghai, Minhang, \u4e1c\u5ddd\u8def \u90ae\u653f\u7f16\u7801: 200240", "lat": "31.02522010", "lng": "121.43377840", "type": "edu", "country": "China"}], "year": "2018", "pdf": ["https://arxiv.org/pdf/1711.04433.pdf"], "doi": []}, {"id": "e607b91f69ea2bff3194b07c5d22b4625bbe306e", "title": "Learning to See People Like People : Predicting Social Impressions of Faces", "addresses": [{"name": "Purdue University", "source_name": "Purdue University", "street_adddress": "Purdue University, West Stadium Avenue, West Lafayette, Tippecanoe County, Indiana, 47907, USA", "lat": "40.43197220", "lng": "-86.92389368", "type": "edu", "country": "United States"}], "year": "2017", "pdf": ["https://pdfs.semanticscholar.org/e607/b91f69ea2bff3194b07c5d22b4625bbe306e.pdf"], "doi": []}, {"id": "737f3cf354f40a6a7fd8a2058fe2803b8dd6c56b", "title": "Image Crowd Counting Using Convolutional Neural Network and Markov Random Field", "addresses": [{"name": "Shanghai University", "source_name": "Shanghai University", "street_adddress": "\u4e0a\u6d77\u5927\u5b66, \u9526\u79cb\u8def, \u5927\u573a\u9547, \u5b9d\u5c71\u533a (Baoshan), \u4e0a\u6d77\u5e02, 201906, \u4e2d\u56fd", "lat": "31.32235655", "lng": "121.38400941", "type": "edu", "country": "China"}], "year": "2017", "pdf": ["https://pdfs.semanticscholar.org/737f/3cf354f40a6a7fd8a2058fe2803b8dd6c56b.pdf"], "doi": []}, {"id": "3e667c54e848233db092b794f2cfbf47ea63b771", "title": "Combined convolutional and recurrent neural networks for hierarchical classification of images", "addresses": [{"name": "Northwestern University", "source_name": "Northwestern University", "street_adddress": "Northwestern University, Northwestern Place, Downtown, Evanston, Cook County, Illinois, 60208, USA", "lat": "42.05511640", "lng": "-87.67581113", "type": "edu", "country": "United States"}], "year": "2018", "pdf": ["https://arxiv.org/pdf/1809.09574.pdf"], "doi": []}, {"id": "a5c8fc1ca4f06a344b53dc81ebc6d87f54896722", "title": "Learning to see people like people", "addresses": [{"name": "University of California, San Diego", "source_name": "University of California, San Diego", "street_adddress": "UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA", "lat": "32.87935255", "lng": "-117.23110049", "type": "edu", "country": "United States"}, {"name": "Purdue University", "source_name": "Purdue University", "street_adddress": "Purdue University, West Stadium Avenue, West Lafayette, Tippecanoe County, Indiana, 47907, USA", "lat": "40.43197220", "lng": "-86.92389368", "type": "edu", "country": "United States"}], "year": "2017", "pdf": ["https://arxiv.org/pdf/1705.04282.pdf"], "doi": []}, {"id": "7a9c2524a157a6b5d0204c2f3b187cd67eb26b4c", "title": "SINet: A Scale-Insensitive Convolutional Neural Network for Fast Vehicle Detection", "addresses": [{"name": "Hong Kong Polytechnic University", "source_name": "Hong Kong Polytechnic University", "street_adddress": "hong kong, 11, \u80b2\u624d\u9053 Yuk Choi Road, \u5c16\u6c99\u5480 Tsim Sha Tsui, \u6cb9\u5c16\u65fa\u5340 Yau Tsim Mong District, \u4e5d\u9f8d Kowloon, HK, 00000, \u4e2d\u56fd", "lat": "22.30457200", "lng": "114.17976285", "type": "edu", "country": "China"}, {"name": "Chinese University of Hong Kong", "source_name": "Chinese University of Hong Kong", "street_adddress": "Hong Kong, \u99ac\u6599\u6c34\u6c60\u65c1\u8def", "lat": "22.41626320", "lng": "114.21093180", "type": "edu", "country": "China"}, {"name": "South China University of Technology", "source_name": "South China University of Technology", "street_adddress": "\u534e\u5357\u7406\u5de5\u5927\u5b66, \u5927\u5b66\u57ce\u4e2d\u73af\u4e1c\u8def, \u5e7f\u5dde\u5927\u5b66\u57ce, \u65b0\u9020, \u756a\u79ba\u533a (Panyu), \u5e7f\u5dde\u5e02, \u5e7f\u4e1c\u7701, 510006, \u4e2d\u56fd", "lat": "23.05020420", "lng": "113.39880323", "type": "edu", "country": "China"}], "year": "2019", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8478157"]}, {"id": "c7f4e18ec3a805b8aee1d3d7552364c8f5f6ca1d", "title": "An Auto-adaptive CNN for Crowd Counting in Monitor Image", "addresses": [{"name": "Beijing University of Posts and Telecommunications", "source_name": "Beijing University of Posts and Telecommunications", "street_adddress": "\u5317\u4eac\u90ae\u7535\u5927\u5b66, \u897f\u571f\u57ce\u8def, \u6d77\u6dc0\u533a, \u5317\u4eac\u5e02, 100082, \u4e2d\u56fd", "lat": "39.96014880", "lng": "116.35193921", "type": "edu", "country": "China"}], "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8525541"]}, {"id": "b1db174463b0bbc54a61fcc83acfb89ad3e3d18f", "title": "Loss Functions for Multiset Prediction", "addresses": [{"name": "New York University", "source_name": "New York University", "street_adddress": "NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA", "lat": "40.72925325", "lng": "-73.99625394", "type": "edu", "country": "United States"}], "year": "2018", "pdf": ["https://arxiv.org/pdf/1711.05246.pdf"], "doi": []}, {"id": "81ba5202424906f64b77f68afca063658139fbb2", "title": "Social Scene Understanding: End-to-End Multi-person Action Localization and Collective Activity Recognition", "addresses": [{"name": "Stanford University", "source_name": "Stanford University", "street_adddress": "Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA", "lat": "37.43131385", "lng": "-122.16936535", "type": "edu", "country": "United States"}, {"name": "IDIAP Research Institute", "source_name": "IDIAP Research Institute", "street_adddress": "Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra", "lat": "46.10923700", "lng": "7.08453549", "type": "edu", "country": "Switzerland"}], "year": "2017", "pdf": ["https://arxiv.org/pdf/1611.09078.pdf"], "doi": []}, {"id": "830466596c0908399760f2009a09ce605e3121c9", "title": "Revisiting Perspective Information for Efficient Crowd Counting", "addresses": [{"name": "Peking University", "source_name": "Peking University", "street_adddress": "\u5317\u4eac\u5927\u5b66, 5\u53f7, \u9890\u548c\u56ed\u8def, \u7a3b\u9999\u56ed\u5357\u793e\u533a, \u6d77\u6dc0\u533a, \u5317\u4eac\u5e02, 100871, \u4e2d\u56fd", "lat": "39.99223790", "lng": "116.30393816", "type": "edu", "country": "China"}, {"name": "Tongji University", "source_name": "Tongji University", "street_adddress": "\u540c\u6d4e\u5927\u5b66, 1239, \u56db\u5e73\u8def, \u6c5f\u6e7e, \u8679\u53e3\u533a, \u4e0a\u6d77\u5e02, 200092, \u4e2d\u56fd", "lat": "31.28473925", "lng": "121.49694909", "type": "edu", "country": "China"}], "year": "2018", "pdf": ["https://arxiv.org/pdf/1807.01989.pdf"], "doi": []}, {"id": "1d1a2387663cb3510d6f5c0651c1dc55e48dcabc", "title": "Joint Pedestrian and Body Part Detection via Semantic Relationship Learning", "addresses": [{"name": "Hebei University of Technology", "source_name": "Hebei University of Technology, Tianjin, P. R. China", "street_adddress": "8 Dingzigu 1st Rd, Hongqiao Qu, China, 300131", "lat": "39.17963500", "lng": "117.16588200", "type": "edu", "country": "China"}, {"name": "Chinese Academy of Sciences", "source_name": "Chinese Academy of Sciences", "street_adddress": "\u4e2d\u56fd\u79d1\u5b66\u9662\u5fc3\u7406\u7814\u7a76\u6240, 16, \u6797\u8403\u8def, \u671d\u9633\u533a / Chaoyang, \u5317\u4eac\u5e02, 100101, \u4e2d\u56fd", "lat": "40.00447950", "lng": "116.37023800", "type": "edu", "country": "China"}, {"name": "Beijing Information Science and Technology University", "source_name": "Beijing Information Science and Technology University, Beijing, China", "street_adddress": "China, Beijing, Haidian, \u6e05\u6cb3\u56db\u62d4\u5b50", "lat": "40.04332040", "lng": "116.34181090", "type": "edu", "country": "China"}], "year": "2019", "pdf": ["https://pdfs.semanticscholar.org/1d1a/2387663cb3510d6f5c0651c1dc55e48dcabc.pdf"], "doi": []}, {"id": "91d2f0b7c23239740fa15dff0b5893b992c0ab34", "title": "Sequence-to-Segment Networks for Segment Detection", "addresses": [{"name": "Stony Brook University", "source_name": "Stony Brook University", "street_adddress": "Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA", "lat": "40.91531960", "lng": "-73.12706260", "type": "edu", "country": "United States"}], "year": "2018", "pdf": ["https://pdfs.semanticscholar.org/91d2/f0b7c23239740fa15dff0b5893b992c0ab34.pdf"], "doi": []}, {"id": "8e7c647d8e8ba726b03f7e7c5cc395f86b9de9be", "title": "Parsing Pose of People with Interaction", "addresses": [{"name": "California Institute of Technology", "source_name": "California Institute of Technology", "street_adddress": "California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA", "lat": "34.13710185", "lng": "-118.12527487", "type": "edu", "country": "United States"}], "year": "2018", "pdf": ["https://pdfs.semanticscholar.org/8e7c/647d8e8ba726b03f7e7c5cc395f86b9de9be.pdf"], "doi": []}, {"id": "2f02c1d4858d9c0f5d16099eb090560d5fa4f23f", "title": "Detecting Heads using Feature Refine Net and Cascaded Multi-scale Architecture", "addresses": [{"name": "South China University of Technology", "source_name": "South China University of Technology", "street_adddress": "\u534e\u5357\u7406\u5de5\u5927\u5b66, \u5927\u5b66\u57ce\u4e2d\u73af\u4e1c\u8def, \u5e7f\u5dde\u5927\u5b66\u57ce, \u65b0\u9020, \u756a\u79ba\u533a (Panyu), \u5e7f\u5dde\u5e02, \u5e7f\u4e1c\u7701, 510006, \u4e2d\u56fd", "lat": "23.05020420", "lng": "113.39880323", "type": "edu", "country": "China"}], "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8545068"]}, {"id": "c43c3ec39005aaf1f0ad2f641d92f41021342217", "title": "Pedestrian Detection with Autoregressive Network Phases", "addresses": [{"name": "Michigan State University", "source_name": "Michigan State University", "street_adddress": "Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu", "country": "United States"}], "year": "2018", "pdf": ["https://arxiv.org/pdf/1812.00440.pdf"], "doi": []}, {"id": "e1a784e360f2bf3911cffa30506d46c412659db5", "title": "MetaAnchor: Learning to Detect Objects with Customized Anchors", "addresses": [{"name": "Fudan University", "source_name": "Fudan University", "street_adddress": "\u590d\u65e6\u5927\u5b66, 220, \u90af\u90f8\u8def, \u4e94\u89d2\u573a\u8857\u9053, \u6768\u6d66\u533a, \u4e0a\u6d77\u5e02, 200433, \u4e2d\u56fd", "lat": "31.30104395", "lng": "121.50045497", "type": "edu", "country": "China"}], "year": "2018", "pdf": ["https://arxiv.org/pdf/1807.00980.pdf"], "doi": []}, {"id": "9da3ade53bf91556fe46828b216aab20a4e72294", "title": "SNc Neuron Detection Method Based on Deep Learning for Efficacy Evaluation of Anti-PD Drugs", "addresses": [{"name": "Peking University", "source_name": "Peking University", "street_adddress": "\u5317\u4eac\u5927\u5b66, 5\u53f7, \u9890\u548c\u56ed\u8def, \u7a3b\u9999\u56ed\u5357\u793e\u533a, \u6d77\u6dc0\u533a, \u5317\u4eac\u5e02, 100871, \u4e2d\u56fd", "lat": "39.99223790", "lng": "116.30393816", "type": "edu", "country": "China"}, {"name": "Shanghai Jiao Tong University", "source_name": "Shanghai Jiao Tong University", "street_adddress": "\u4e0a\u6d77\u4ea4\u901a\u5927\u5b66\uff08\u5f90\u6c47\u6821\u533a\uff09, \u6dee\u6d77\u897f\u8def, \u756a\u79ba\u5c0f\u533a, \u5e73\u9634\u6865, \u5f90\u6c47\u533a, \u4e0a\u6d77\u5e02, 200052, \u4e2d\u56fd", "lat": "31.20081505", "lng": "121.42840681", "type": "edu", "country": "China"}], "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8431470"]}, {"id": "cfa48bc1015b88809e362b4da19fe4459acb1d89", "title": "Learning to Filter Object Detections", "addresses": [{"name": "Max Planck Institute for Intelligent Systems", "source_name": "Max Planck Institute for Intelligent Systems", "street_adddress": "Heisenbergstra\u00dfe 3, 70569 Stuttgart, Germany", "lat": "48.74689390", "lng": "9.08051410", "type": "edu", "country": "Germany"}, {"name": "Microsoft", "source_name": "Microsoft Corporation, Redmond, WA, USA", "street_adddress": "One Microsoft Way, Redmond, WA 98052, USA", "lat": "47.64233180", "lng": "-122.13693020", "type": "company", "country": "United States"}], "year": "2017", "pdf": ["https://pdfs.semanticscholar.org/cfa4/8bc1015b88809e362b4da19fe4459acb1d89.pdf"], "doi": []}, {"id": "eb5780e49ad7e713e9f34cf067d45030fb29753e", "title": "Iterative fully convolutional neural networks for automatic vertebra segmentation and identification", "addresses": [{"name": "University Medical Center Utrecht", "source_name": "Rudolf Magnus Institute of Neuroscience, University Medical Center Utrecht, Utrecht, The Netherlands", "street_adddress": "Vondellaan 94, 3521 GH Utrecht, Netherlands", "lat": "52.07869500", "lng": "5.11974690", "type": "edu", "country": "Netherlands"}], "year": "2018", "pdf": ["https://arxiv.org/pdf/1804.04383.pdf"], "doi": []}, {"id": "54c9343c247ce5a82bc52f83df84c2bbae737ce4", "title": "Deep Learning on Attributed Graphs: A Journey from Graphs to Their Embeddings and Back. (L'apprentissage profond sur graphes attribu\u00e9s: Un voyage aller-retour aux plongements des graphes)", "addresses": [{"name": "INRIA Sophia Antipolis", "source_name": "INRIA Sophia Antipolis Meditérannée, Valbonne, France", "street_adddress": "2004 Route des Lucioles, 06902 Valbonne, France", "lat": "43.61581310", "lng": "7.06838000", "type": "edu", "country": "France"}], "year": "2018", "pdf": ["https://arxiv.org/pdf/1901.08296.pdf"], "doi": []}, {"id": "2d81cf3214281af85eb1d9d270a897d62302e88e", "title": "High density people estimation in video surveillance", "addresses": [{"name": "AvidBeam", "source_name": "AvidBeam", "street_adddress": "5 Wadi Al Nile, Maadi Al Khabiri Ash Sharqeyah, Al Maadi, Cairo Governorate 11728, Egypt", "lat": "29.95606300", "lng": "31.25547100", "type": "company", "country": "Egypt"}, {"name": "Faculty of Media Engineering & Technology German University in Cairo", "source_name": "Faculty of Media Engineering & Technology German University in Cairo, Egypt", "street_adddress": "Cairo Governorate, Egypt", "lat": "29.98663810", "lng": "31.44142180", "type": "edu", "country": "Egypt"}, {"name": "German University in Cairo", "source_name": "Faculty of Media Engineering and Technology, German University in Cairo, Egypt", "street_adddress": "Cairo Governorate, Egypt", "lat": "29.98663810", "lng": "31.44142180", "type": "edu", "country": "Egypt"}], "year": "2017", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8275348"]}, {"id": "d54d0dc1e7ef82f3ed0ee332e0777b6d73477a8c", "title": "Sidewalk-level People Flow Estimation Using Dashboard Cameras Based on Deep Learning", "addresses": [{"name": "Osaka University", "source_name": "Osaka University", "street_adddress": "\u5927\u962a\u5927\u5b66\u6e05\u660e\u5bee, \u670d\u90e8\u897f\u753a\u56db\u4e01\u76ee, \u8c4a\u4e2d\u5e02, \u5927\u962a\u5e9c, \u8fd1\u757f\u5730\u65b9, \u65e5\u672c", "lat": "34.80809035", "lng": "135.45785218", "type": "edu", "country": "Japan"}], "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8653595"]}, {"id": "9607f88fe6298f754e3f610c3a587d8e10b1b0a9", "title": "Scene Text Detection with Recurrent Instance Segmentation", "addresses": [{"name": "National Laboratory of Pattern Recognition", "source_name": "National Laboratory of Pattern Recognition & Chinese Academy of Sciences & University of Chinese Academy of Sciences, Beijing, China", "street_adddress": "China, Beijing, Haidian, Zhongguancun South 1st Alley, \u4e2d\u5173\u6751\u5357\u4e00\u6761", "lat": "39.98177000", "lng": "116.33008600", "type": "edu", "country": "China"}], "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8545073"]}, {"id": "ed2ba6448db8cf945ca24d4df11916c2c5c3edd1", "title": "Rapid Pedestrian Detection Based on Deep Omega-Shape Features with Partial Occlusion Handing", "addresses": [{"name": "University of Electronic Science and Technology of China", "source_name": "University of Electronic Science and Technology of China", "street_adddress": "2 Jianshe North Rd 2nd Section, Jianshe Road, Chenghua Qu, Chengdu Shi, Sichuan Sheng, China, 610054", "lat": "30.67272100", "lng": "104.09880600", "type": "edu", "country": "China"}], "year": "2018", "pdf": [], "doi": ["http://doi.org/10.1007/s11063-018-9837-1"]}, {"id": "deea683731f468c7234e1089f48c4546e7003b18", "title": "Multi-Task Vehicle Detection With Region-of-Interest Voting", "addresses": [{"name": "Alibaba Group, Hangzhou, China", "source_name": "Alibaba Group, Hangzhou, China", "street_adddress": "Alibaba Group, \u4e94\u5e38\u8857\u9053, \u4f59\u676d\u533a (Yuhang), \u676d\u5dde\u5e02 Hangzhou, \u6d59\u6c5f\u7701, \u4e2d\u56fd", "lat": "30.28106540", "lng": "120.02139087", "type": "edu", "country": "China"}, {"name": "Alibaba, Hangzhou, China", "source_name": "Alibaba, Hangzhou, China", "street_adddress": "699 Wangshang Rd, Binjiang Qu, Hangzhou Shi, Zhejiang Sheng, China", "lat": "30.18996400", "lng": "120.19210000", "type": "edu", "country": "China"}, {"name": "Zhejiang University", "source_name": "Zhejiang University", "street_adddress": "\u6d59\u6c5f\u5927\u5b66\u4e4b\u6c5f\u6821\u533a, \u4e4b\u6c5f\u8def, \u8f6c\u5858\u8857\u9053, \u897f\u6e56\u533a (Xihu), \u676d\u5dde\u5e02 Hangzhou, \u6d59\u6c5f\u7701, 310008, \u4e2d\u56fd", "lat": "30.19331415", "lng": "120.11930822", "type": "edu", "country": "China"}], "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8066331", "http://doi.org/10.1109/TIP.2017.2762591", "https://www.ncbi.nlm.nih.gov/pubmed/29028197", "https://www.wikidata.org/entity/Q50063923"]}, {"id": "1303b3e9f4bffb87b6dd34f7849f7a57e00253d1", "title": "Point in, Box out: Beyond Counting Persons in Crowds", "addresses": [{"name": "Sichuan University, Chengdu", "source_name": "Sichuan Univ., Chengdu", "street_adddress": "\u56db\u5ddd\u5927\u5b66\uff08\u534e\u897f\u6821\u533a\uff09, \u6821\u4e1c\u8def, \u6b66\u4faf\u533a, \u6b66\u4faf\u533a (Wuhou), \u6210\u90fd\u5e02 / Chengdu, \u56db\u5ddd\u7701, 610014, \u4e2d\u56fd", "lat": "30.64276900", "lng": "104.06751175", "type": "edu", "country": "China"}, {"name": "INRIA", "source_name": "INRIA Grenoble Rhone-Alpes, Grenoble, France", "street_adddress": "655 Avenue de l'Europe, 38330 Montbonnot-Saint-Martin, France", "lat": "45.21788600", "lng": "5.80736900", "type": "edu", "country": "France"}], "year": "2019", "pdf": ["https://arxiv.org/pdf/1904.01333.pdf"], "doi": []}, {"id": "4140ccbca5a585162420b82e9e4eeb9afbd1e7c2", "title": "Simulating Crowds in Egress Scenarios", "addresses": [{"name": "Pontifical Catholic University of Rio Grande do Sul", "source_name": "Pontifical Catholic University of Rio Grande do Sul", "street_adddress": "Av. Ipiranga, 6681 - Partenon, Porto Alegre - RS, 90619-900, Brazil", "lat": "-30.05934460", "lng": "-51.17349120", "type": "edu", "country": "Brazil"}, {"name": "University of Pennsylvania", "source_name": "University of Pennsylvania", "street_adddress": "Penn Museum, 3260, South Street, University City, Philadelphia, Philadelphia County, Pennsylvania, 19104, USA", "lat": "39.94923440", "lng": "-75.19198985", "type": "edu", "country": "United States"}, {"name": "Federal University of Rio Grande do Sul", "source_name": "Institute of Informatics, Federal University of Rio Grande do Sul, Porto Alegre, Brazil", "street_adddress": "Av. Paulo Gama, 110 - Farroupilha, Porto Alegre - RS, 90040-060, Brazil", "lat": "-30.03382480", "lng": "-51.21882800", "type": "edu", "country": "Brazil"}], "year": "2017", "pdf": [], "doi": ["http://doi.org/10.1007/978-3-319-65202-3"]}, {"id": "5031a110219231ceaa820725c6e77f87f7b2fde2", "title": "Training with Confusion for Fine-Grained Visual Classification", "addresses": [{"name": "Harvard University", "source_name": "Harvard University", "street_adddress": "Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA", "lat": "42.36782045", "lng": "-71.12666653", "type": "edu", "country": "United States"}, {"name": "MIT", "source_name": "Massachusetts Institute", "street_adddress": "MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA", "lat": "42.35839610", "lng": "-71.09567788", "type": "edu", "country": "United States"}, {"name": "Brigham Young University", "source_name": "Brigham Young University, Provo, USA", "street_adddress": "Provo, UT 84602, USA", "lat": "40.25184350", "lng": "-111.64931560", "type": "edu", "country": "United States"}], "year": "2017", "pdf": ["https://pdfs.semanticscholar.org/5031/a110219231ceaa820725c6e77f87f7b2fde2.pdf"], "doi": []}, {"id": "86c158ef6caaf247d5d14e07c5edded0147df8b7", "title": "Spatial Memory for Context Reasoning in Object Detection", "addresses": [{"name": "Carnegie Mellon University", "source_name": "Carnegie Mellon University Pittsburgh, PA - 15213, USA", "street_adddress": "Carnegie Mellon University, Forbes Avenue, Squirrel Hill North, PGH, Allegheny County, Pennsylvania, 15213, USA", "lat": "40.44416190", "lng": "-79.94272826", "type": "edu", "country": "United States"}], "year": "2017", "pdf": ["https://arxiv.org/pdf/1704.04224.pdf"], "doi": []}, {"id": "bba153ebdf11e6fb8716e35749c671ac96c14176", "title": "Image Crowd Counting Using Convolutional Neural Network and Markov Random Field", "addresses": [{"name": "Shanghai University", "source_name": "Shanghai University", "street_adddress": "\u4e0a\u6d77\u5927\u5b66, \u9526\u79cb\u8def, \u5927\u573a\u9547, \u5b9d\u5c71\u533a (Baoshan), \u4e0a\u6d77\u5e02, 201906, \u4e2d\u56fd", "lat": "31.32235655", "lng": "121.38400941", "type": "edu", "country": "China"}], "year": "2017", "pdf": ["https://arxiv.org/pdf/1706.03686.pdf"], "doi": []}, {"id": "fe5679c183cb432894d111bf02cc3243c89762ca", "title": "Adaptive Scenario Discovery for Crowd Counting", "addresses": [{"name": "East China Normal University", "source_name": "East China Normal University", "street_adddress": "\u534e\u4e1c\u5e08\u8303\u5927\u5b66, 3663, \u4e2d\u5c71\u5317\u8def, \u66f9\u5bb6\u6e21, \u666e\u9640\u533a, \u666e\u9640\u533a (Putuo), \u4e0a\u6d77\u5e02, 200062, \u4e2d\u56fd", "lat": "31.22849230", "lng": "121.40211389", "type": "edu", "country": "China"}], "year": "2018", "pdf": ["https://arxiv.org/pdf/1812.02393.pdf"], "doi": []}, {"id": "11824658170994e4d4655e8f688bace16a0d3e48", "title": "Multi-person Head Segmentation in Low Resolution Crowd Scenes Using Convolutional Encoder-Decoder Framework", "addresses": [{"name": "Qatar University", "source_name": "Qatar University", "street_adddress": "Qatar University, Roindabout 3, Al Tarfa (68), \u0623\u0645 \u0635\u0644\u0627\u0644, 24685, \u200f\u0642\u0637\u0631\u200e", "lat": "25.37461295", "lng": "51.48980354", "type": "edu", "country": "Qatar"}, {"name": "University of Warwick", "source_name": "University of Warwick", "street_adddress": "University of Warwick, University Road, Kirby Corner, Cannon Park, Coventry, West Midlands Combined Authority, West Midlands, England, CV4 7AL, UK", "lat": "52.37931310", "lng": "-1.56042520", "type": "edu", "country": "United Kingdom"}], "year": "2017", "pdf": ["https://pdfs.semanticscholar.org/1182/4658170994e4d4655e8f688bace16a0d3e48.pdf"], "doi": []}, {"id": "061356704ec86334dbbc073985375fe13cd39088", "title": "Very Deep Convolutional Networks for Large-Scale Image Recognition", "addresses": [{"name": "University of Oxford", "source_name": "University of Oxford", "street_adddress": "Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK", "lat": "51.75345380", "lng": "-1.25400997", "type": "edu", "country": "United Kingdom"}], "year": "2015", "pdf": ["https://arxiv.org/pdf/1409.1556.pdf"], "doi": []}, {"id": "14318685b5959b51d0f1e3db34643eb2855dc6d9", "title": "Going deeper with convolutions", "addresses": [{"name": "Google", "source_name": "Google, Inc.", "street_adddress": "1600 Amphitheatre Pkwy, Mountain View, CA 94043, USA", "lat": "37.42199990", "lng": "-122.08405750", "type": "company", "country": "United States"}, {"name": "University of Michigan", "source_name": "University of Michigan", "street_adddress": "University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA", "lat": "42.29421420", "lng": "-83.71003894", "type": "edu", "country": "United States"}, {"name": "University of North Carolina", "source_name": "University of North Carolina", "street_adddress": "University of North Carolina, Emergency Room Drive, Chapel Hill, Orange County, North Carolina, 27599, USA", "lat": "35.90503535", "lng": "-79.04775327", "type": "edu", "country": "United States"}], "year": "2015", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7298594"]}, {"id": "1bd1645a629f1b612960ab9bba276afd4cf7c666", "title": "End-to-End People Detection in Crowded Scenes", "addresses": [{"name": "Stanford University", "source_name": "Stanford University", "street_adddress": "Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA", "lat": "37.43131385", "lng": "-122.16936535", "type": "edu", "country": "United States"}], "year": "2016", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7780624"]}, {"id": "7e915bb8e4ada4f8d261bc855a4f587ea97764ca", "title": "People detection in crowded scenes via regional-based convolutional network", "addresses": [{"name": "Peking University", "source_name": "Peking University", "street_adddress": "\u5317\u4eac\u5927\u5b66, 5\u53f7, \u9890\u548c\u56ed\u8def, \u7a3b\u9999\u56ed\u5357\u793e\u533a, \u6d77\u6dc0\u533a, \u5317\u4eac\u5e02, 100871, \u4e2d\u56fd", "lat": "39.99223790", "lng": "116.30393816", "type": "edu", "country": "China"}], "year": "2016", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7877809", "http://doi.org/10.1109/ICSP.2016.7877809"]}, {"id": "591a4bfa6380c9fcd5f3ae690e3ac5c09b7bf37b", "title": "A Replacement Algorithm of Non-Maximum Suppression Base on Graph Clustering", "addresses": [{"name": "National University of Defense Technology, China", "source_name": "National University of Defence Technology, Changsha 410000, China", "street_adddress": "\u56fd\u9632\u79d1\u5b66\u6280\u672f\u5927\u5b66, \u4e09\u4e00\u5927\u9053, \u5f00\u798f\u533a, \u5f00\u798f\u533a (Kaifu), \u957f\u6c99\u5e02 / Changsha, \u6e56\u5357\u7701, 410073, \u4e2d\u56fd", "lat": "28.22902090", "lng": "112.99483204", "type": "mil", "country": "China"}], "year": "2017", "pdf": ["https://pdfs.semanticscholar.org/591a/4bfa6380c9fcd5f3ae690e3ac5c09b7bf37b.pdf"], "doi": []}, {"id": "b02d31c640b0a31fb18c4f170d841d8e21ffb66c", "title": "Localized region context and object feature fusion for people head detection", "addresses": [{"name": "National University of Defense Technology, China", "source_name": "National University of Defence Technology, Changsha 410000, China", "street_adddress": "\u56fd\u9632\u79d1\u5b66\u6280\u672f\u5927\u5b66, \u4e09\u4e00\u5927\u9053, \u5f00\u798f\u533a, \u5f00\u798f\u533a (Kaifu), \u957f\u6c99\u5e02 / Changsha, \u6e56\u5357\u7701, 410073, \u4e2d\u56fd", "lat": "28.22902090", "lng": "112.99483204", "type": "mil", "country": "China"}], "year": "2016", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7532426", "http://doi.org/10.1109/ICIP.2016.7532426"]}, {"id": "68ea88440fc48d59c7407e71a193ff1973f9ba7c", "title": "Shoulder Keypoint-Detection from Object Detection", "addresses": [{"name": "University of Ottawa", "source_name": "University of Ottawa", "street_adddress": "University of Ottawa, 1, Stewart Street, Byward Market, Lowertown, Rideau-Vanier, Ottawa, Ontario, K1N 6N5, Canada", "lat": "45.42580475", "lng": "-75.68740118", "type": "edu", "country": "Canada"}], "year": "2018", "pdf": ["https://pdfs.semanticscholar.org/68ea/88440fc48d59c7407e71a193ff1973f9ba7c.pdf"], "doi": []}]}
\ No newline at end of file +{"id": "1bd1645a629f1b612960ab9bba276afd4cf7c666", "paper": {"key": "brainwash", "name": "Brainwash", "title": "End-to-End People Detection in Crowded Scenes", "year": "2016", "addresses": [{"name": "Stanford University", "source_name": "Stanford University", "street_adddress": "Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA", "lat": "37.43131385", "lng": "-122.16936535", "type": "edu", "country": "United States"}]}, "citations": [{"id": "e35515f699b60472ac8f50d1da84fab3c55417d6", "title": "Key Parts Context and Scene Geometry in Human Head Detection", "addresses": [{"name": "Tsinghua University", "source_name": "Tsinghua University", "street_adddress": "\u6e05\u534e\u5927\u5b66, 30, \u53cc\u6e05\u8def, \u4e94\u9053\u53e3, \u540e\u516b\u5bb6, \u6d77\u6dc0\u533a, 100084, \u4e2d\u56fd", "lat": "40.00229045", "lng": "116.32098908", "type": "edu", "country": "China"}], "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8451832"]}, {"id": "5d5d267416aeb2bbddaf06f703e8683753abdcd0", "title": "Exploiting Multispectral and Contextual Information to Improve Human Detection", "addresses": [{"name": "State University of New Jersey", "source_name": "The State University of New Jersey", "street_adddress": "Rutgers New Brunswick: Livingston Campus, Joyce Kilmer Avenue, Piscataway Township, Middlesex County, New Jersey, 08854, USA", "lat": "40.51865195", "lng": "-74.44099801", "type": "edu", "country": "United States"}], "year": "2017", "pdf": ["https://pdfs.semanticscholar.org/5d5d/267416aeb2bbddaf06f703e8683753abdcd0.pdf"], "doi": []}, {"id": "0cf0ad8235929417d904acd1c672713ca4fdb105", "title": "Fusion of Head and Full-Body Detectors for Multi-object Tracking", "addresses": [{"name": "Technical University Munich", "source_name": "Technical University Munich", "street_adddress": "TUM, 21, Arcisstra\u00dfe, Bezirksteil K\u00f6nigsplatz, Stadtbezirk 03 Maxvorstadt, M\u00fcnchen, Obb, Bayern, 80333, Deutschland", "lat": "48.14955455", "lng": "11.56775314", "type": "edu", "country": "Germany"}], "year": "2018", "pdf": ["https://arxiv.org/pdf/1705.08314.pdf"], "doi": []}, {"id": "cab97d4dc67919f965cc884d80e4d6b743a256eb", "title": "Scale Mapping and Dynamic Re-Detecting in Dense Head Detection", "addresses": [{"name": "South China University of Technology", "source_name": "South China University of Technology", "street_adddress": "\u534e\u5357\u7406\u5de5\u5927\u5b66, \u5927\u5b66\u57ce\u4e2d\u73af\u4e1c\u8def, \u5e7f\u5dde\u5927\u5b66\u57ce, \u65b0\u9020, \u756a\u79ba\u533a (Panyu), \u5e7f\u5dde\u5e02, \u5e7f\u4e1c\u7701, 510006, \u4e2d\u56fd", "lat": "23.05020420", "lng": "113.39880323", "type": "edu", "country": "China"}], "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8451653"]}, {"id": "8a6665bfe3ad126d84414280ee2f884818560063", "title": "Vehicle Detection in Urban Traffic Surveillance Images Based on Convolutional Neural Networks with Feature Concatenation", "addresses": [{"name": "China University of Mining and Technology", "source_name": "China University of Mining and Technology", "street_adddress": "China University of Mining and Technology, 1\u53f7, \u5927\u5b66\u8def, \u6cc9\u5c71\u533a (Quanshan), \u5f90\u5dde\u5e02 / Xuzhou, \u6c5f\u82cf\u7701, 221116, \u4e2d\u56fd", "lat": "34.21525380", "lng": "117.13985410", "type": "edu", "country": "China"}], "year": "2019", "pdf": ["https://pdfs.semanticscholar.org/8a66/65bfe3ad126d84414280ee2f884818560063.pdf"], "doi": []}, {"id": "333880ee776d57555f54935978ccb2ce13cfdb07", "title": "Person classification leveraging Convolutional Neural Network for obstacle avoidance via Unmanned Aerial Vehicles", "addresses": [{"name": "Cranfield University", "source_name": "Cranfield University, UK", "street_adddress": "College Rd, Wharley End, Bedford MK43 0AL, UK", "lat": "52.07418180", "lng": "-0.62781230", "type": "edu", "country": "United Kingdom"}], "year": "2017", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8101662"]}, {"id": "f30bdc22fe454a28234cf858d5bf9de94701ee7a", "title": "Adaptive NMS: Refining Pedestrian Detection in a Crowd", "addresses": [{"name": "Beihang University", "source_name": "Beihang University", "street_adddress": "\u5317\u4eac\u822a\u7a7a\u822a\u5929\u5927\u5b66, 37, \u5b66\u9662\u8def, \u4e94\u9053\u53e3, \u540e\u516b\u5bb6, \u6d77\u6dc0\u533a, 100083, \u4e2d\u56fd", "lat": "39.98083330", "lng": "116.34101249", "type": "edu", "country": "China"}], "year": "2019", "pdf": ["https://arxiv.org/pdf/1904.03629.pdf"], "doi": []}, {"id": "976647b32fd7e1a5c8ee4a11792155903bb34e43", "title": "Multi-Organ Plant Classification Based on Convolutional and Recurrent Neural Networks", "addresses": [{"name": "University of Malaya, Kuala Lumpur", "source_name": "University of Malaya", "street_adddress": "UM, Lingkaran Wawasan, Bukit Pantai, Bangsar, KL, 50603, Malaysia", "lat": "3.12267405", "lng": "101.65356103", "type": "edu", "country": "Malaysia"}, {"name": "Kingston University", "source_name": "Kingston University", "street_adddress": "Kingston University, Kingston Hill, Kingston Vale, Kingston-upon-Thames, London, Greater London, England, KT2 7TF, UK", "lat": "51.42930860", "lng": "-0.26840440", "type": "edu", "country": "United Kingdom"}], "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8359391"]}, {"id": "66a08ff2ea7447093624632e7069e3da16961d30", "title": "An Incremental Framework for Video-Based Traffic Sign Detection, Tracking, and Recognition", "addresses": [{"name": "Northwestern Polytechnical University", "source_name": "Northwestern Polytechnical University", "street_adddress": "\u897f\u5317\u5de5\u4e1a\u5927\u5b66 \u53cb\u8c0a\u6821\u533a, 127\u53f7, \u53cb\u8c0a\u897f\u8def, \u957f\u5b89\u8def, \u7891\u6797\u533a (Beilin), \u897f\u5b89\u5e02, \u9655\u897f\u7701, 710072, \u4e2d\u56fd", "lat": "34.24691520", "lng": "108.91061982", "type": "edu", "country": "China"}], "year": "2017", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7605450"]}, {"id": "6fc8c988dd841c6c4f5e96b1b1458b6aa564b2de", "title": "Crowd Counting via Scale-Adaptive Convolutional Neural Network", "addresses": [{"name": "Shanghai Jiaotong University", "source_name": "Shanghai Jiaotong University", "street_adddress": "China, Shanghai, Minhang, \u4e1c\u5ddd\u8def \u90ae\u653f\u7f16\u7801: 200240", "lat": "31.02522010", "lng": "121.43377840", "type": "edu", "country": "China"}], "year": "2018", "pdf": ["https://arxiv.org/pdf/1711.04433.pdf"], "doi": []}, {"id": "e607b91f69ea2bff3194b07c5d22b4625bbe306e", "title": "Learning to See People Like People : Predicting Social Impressions of Faces", "addresses": [{"name": "Purdue University", "source_name": "Purdue University", "street_adddress": "Purdue University, West Stadium Avenue, West Lafayette, Tippecanoe County, Indiana, 47907, USA", "lat": "40.43197220", "lng": "-86.92389368", "type": "edu", "country": "United States"}], "year": "2017", "pdf": ["https://pdfs.semanticscholar.org/e607/b91f69ea2bff3194b07c5d22b4625bbe306e.pdf"], "doi": []}, {"id": "737f3cf354f40a6a7fd8a2058fe2803b8dd6c56b", "title": "Image Crowd Counting Using Convolutional Neural Network and Markov Random Field", "addresses": [{"name": "Shanghai University", "source_name": "Shanghai University", "street_adddress": "\u4e0a\u6d77\u5927\u5b66, \u9526\u79cb\u8def, \u5927\u573a\u9547, \u5b9d\u5c71\u533a (Baoshan), \u4e0a\u6d77\u5e02, 201906, \u4e2d\u56fd", "lat": "31.32235655", "lng": "121.38400941", "type": "edu", "country": "China"}], "year": "2017", "pdf": ["https://pdfs.semanticscholar.org/737f/3cf354f40a6a7fd8a2058fe2803b8dd6c56b.pdf"], "doi": []}, {"id": "3e667c54e848233db092b794f2cfbf47ea63b771", "title": "Combined convolutional and recurrent neural networks for hierarchical classification of images", "addresses": [{"name": "Northwestern University", "source_name": "Northwestern University", "street_adddress": "Northwestern University, Northwestern Place, Downtown, Evanston, Cook County, Illinois, 60208, USA", "lat": "42.05511640", "lng": "-87.67581113", "type": "edu", "country": "United States"}], "year": "2018", "pdf": ["https://arxiv.org/pdf/1809.09574.pdf"], "doi": []}, {"id": "a5c8fc1ca4f06a344b53dc81ebc6d87f54896722", "title": "Learning to see people like people", "addresses": [{"name": "University of California, San Diego", "source_name": "University of California, San Diego", "street_adddress": "UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA", "lat": "32.87935255", "lng": "-117.23110049", "type": "edu", "country": "United States"}, {"name": "Purdue University", "source_name": "Purdue University", "street_adddress": "Purdue University, West Stadium Avenue, West Lafayette, Tippecanoe County, Indiana, 47907, USA", "lat": "40.43197220", "lng": "-86.92389368", "type": "edu", "country": "United States"}], "year": "2017", "pdf": ["https://arxiv.org/pdf/1705.04282.pdf"], "doi": []}, {"id": "7a9c2524a157a6b5d0204c2f3b187cd67eb26b4c", "title": "SINet: A Scale-Insensitive Convolutional Neural Network for Fast Vehicle Detection", "addresses": [{"name": "Hong Kong Polytechnic University", "source_name": "Hong Kong Polytechnic University", "street_adddress": "hong kong, 11, \u80b2\u624d\u9053 Yuk Choi Road, \u5c16\u6c99\u5480 Tsim Sha Tsui, \u6cb9\u5c16\u65fa\u5340 Yau Tsim Mong District, \u4e5d\u9f8d Kowloon, HK, 00000, \u4e2d\u56fd", "lat": "22.30457200", "lng": "114.17976285", "type": "edu", "country": "China"}, {"name": "Chinese University of Hong Kong", "source_name": "Chinese University of Hong Kong", "street_adddress": "Hong Kong, \u99ac\u6599\u6c34\u6c60\u65c1\u8def", "lat": "22.41626320", "lng": "114.21093180", "type": "edu", "country": "China"}, {"name": "South China University of Technology", "source_name": "South China University of Technology", "street_adddress": "\u534e\u5357\u7406\u5de5\u5927\u5b66, \u5927\u5b66\u57ce\u4e2d\u73af\u4e1c\u8def, \u5e7f\u5dde\u5927\u5b66\u57ce, \u65b0\u9020, \u756a\u79ba\u533a (Panyu), \u5e7f\u5dde\u5e02, \u5e7f\u4e1c\u7701, 510006, \u4e2d\u56fd", "lat": "23.05020420", "lng": "113.39880323", "type": "edu", "country": "China"}], "year": "2019", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8478157"]}, {"id": "c7f4e18ec3a805b8aee1d3d7552364c8f5f6ca1d", "title": "An Auto-adaptive CNN for Crowd Counting in Monitor Image", "addresses": [{"name": "Beijing University of Posts and Telecommunications", "source_name": "Beijing University of Posts and Telecommunications", "street_adddress": "\u5317\u4eac\u90ae\u7535\u5927\u5b66, \u897f\u571f\u57ce\u8def, \u6d77\u6dc0\u533a, \u5317\u4eac\u5e02, 100082, \u4e2d\u56fd", "lat": "39.96014880", "lng": "116.35193921", "type": "edu", "country": "China"}], "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8525541"]}, {"id": "b1db174463b0bbc54a61fcc83acfb89ad3e3d18f", "title": "Loss Functions for Multiset Prediction", "addresses": [{"name": "New York University", "source_name": "New York University", "street_adddress": "NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA", "lat": "40.72925325", "lng": "-73.99625394", "type": "edu", "country": "United States"}], "year": "2018", "pdf": ["https://arxiv.org/pdf/1711.05246.pdf"], "doi": []}, {"id": "81ba5202424906f64b77f68afca063658139fbb2", "title": "Social Scene Understanding: End-to-End Multi-person Action Localization and Collective Activity Recognition", "addresses": [{"name": "Stanford University", "source_name": "Stanford University", "street_adddress": "Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA", "lat": "37.43131385", "lng": "-122.16936535", "type": "edu", "country": "United States"}, {"name": "IDIAP Research Institute", "source_name": "IDIAP Research Institute", "street_adddress": "Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra", "lat": "46.10923700", "lng": "7.08453549", "type": "edu", "country": "Switzerland"}], "year": "2017", "pdf": ["https://arxiv.org/pdf/1611.09078.pdf"], "doi": []}, {"id": "d488dad9fa81817c85a284b09ebf198bf6b640f9", "title": "FCHD: A fast and accurate head detector", "addresses": [{"name": "IIT Madras, India", "source_name": "IIT Madras, India", "street_adddress": "Indian Institute Of Technology, Chennai, Tamil Nadu 600036, India", "lat": "12.99149290", "lng": "80.23369070", "type": "edu", "country": "India"}], "year": "2018", "pdf": ["https://arxiv.org/pdf/1809.08766.pdf"], "doi": []}, {"id": "830466596c0908399760f2009a09ce605e3121c9", "title": "Revisiting Perspective Information for Efficient Crowd Counting", "addresses": [{"name": "Peking University", "source_name": "Peking University", "street_adddress": "\u5317\u4eac\u5927\u5b66, 5\u53f7, \u9890\u548c\u56ed\u8def, \u7a3b\u9999\u56ed\u5357\u793e\u533a, \u6d77\u6dc0\u533a, \u5317\u4eac\u5e02, 100871, \u4e2d\u56fd", "lat": "39.99223790", "lng": "116.30393816", "type": "edu", "country": "China"}, {"name": "Tongji University", "source_name": "Tongji University", "street_adddress": "\u540c\u6d4e\u5927\u5b66, 1239, \u56db\u5e73\u8def, \u6c5f\u6e7e, \u8679\u53e3\u533a, \u4e0a\u6d77\u5e02, 200092, \u4e2d\u56fd", "lat": "31.28473925", "lng": "121.49694909", "type": "edu", "country": "China"}], "year": "2018", "pdf": ["https://arxiv.org/pdf/1807.01989.pdf"], "doi": []}, {"id": "1d1a2387663cb3510d6f5c0651c1dc55e48dcabc", "title": "Joint Pedestrian and Body Part Detection via Semantic Relationship Learning", "addresses": [{"name": "Hebei University of Technology", "source_name": "Hebei University of Technology, Tianjin, P. R. China", "street_adddress": "8 Dingzigu 1st Rd, Hongqiao Qu, China, 300131", "lat": "39.17963500", "lng": "117.16588200", "type": "edu", "country": "China"}, {"name": "Chinese Academy of Sciences", "source_name": "Chinese Academy of Sciences", "street_adddress": "\u4e2d\u56fd\u79d1\u5b66\u9662\u5fc3\u7406\u7814\u7a76\u6240, 16, \u6797\u8403\u8def, \u671d\u9633\u533a / Chaoyang, \u5317\u4eac\u5e02, 100101, \u4e2d\u56fd", "lat": "40.00447950", "lng": "116.37023800", "type": "edu", "country": "China"}, {"name": "Beijing Information Science and Technology University", "source_name": "Beijing Information Science and Technology University, Beijing, China", "street_adddress": "China, Beijing, Haidian, \u6e05\u6cb3\u56db\u62d4\u5b50", "lat": "40.04332040", "lng": "116.34181090", "type": "edu", "country": "China"}], "year": "2019", "pdf": ["https://pdfs.semanticscholar.org/1d1a/2387663cb3510d6f5c0651c1dc55e48dcabc.pdf"], "doi": []}, {"id": "91d2f0b7c23239740fa15dff0b5893b992c0ab34", "title": "Sequence-to-Segment Networks for Segment Detection", "addresses": [{"name": "Stony Brook University", "source_name": "Stony Brook University", "street_adddress": "Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA", "lat": "40.91531960", "lng": "-73.12706260", "type": "edu", "country": "United States"}], "year": "2018", "pdf": ["https://pdfs.semanticscholar.org/91d2/f0b7c23239740fa15dff0b5893b992c0ab34.pdf"], "doi": []}, {"id": "8e7c647d8e8ba726b03f7e7c5cc395f86b9de9be", "title": "Parsing Pose of People with Interaction", "addresses": [{"name": "California Institute of Technology", "source_name": "California Institute of Technology", "street_adddress": "California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA", "lat": "34.13710185", "lng": "-118.12527487", "type": "edu", "country": "United States"}], "year": "2018", "pdf": ["https://pdfs.semanticscholar.org/8e7c/647d8e8ba726b03f7e7c5cc395f86b9de9be.pdf"], "doi": []}, {"id": "2f02c1d4858d9c0f5d16099eb090560d5fa4f23f", "title": "Detecting Heads using Feature Refine Net and Cascaded Multi-scale Architecture", "addresses": [{"name": "South China University of Technology", "source_name": "South China University of Technology", "street_adddress": "\u534e\u5357\u7406\u5de5\u5927\u5b66, \u5927\u5b66\u57ce\u4e2d\u73af\u4e1c\u8def, \u5e7f\u5dde\u5927\u5b66\u57ce, \u65b0\u9020, \u756a\u79ba\u533a (Panyu), \u5e7f\u5dde\u5e02, \u5e7f\u4e1c\u7701, 510006, \u4e2d\u56fd", "lat": "23.05020420", "lng": "113.39880323", "type": "edu", "country": "China"}], "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8545068"]}, {"id": "c43c3ec39005aaf1f0ad2f641d92f41021342217", "title": "Pedestrian Detection with Autoregressive Network Phases", "addresses": [{"name": "Michigan State University", "source_name": "Michigan State University", "street_adddress": "Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu", "country": "United States"}], "year": "2018", "pdf": ["https://arxiv.org/pdf/1812.00440.pdf"], "doi": []}, {"id": "e1a784e360f2bf3911cffa30506d46c412659db5", "title": "MetaAnchor: Learning to Detect Objects with Customized Anchors", "addresses": [{"name": "Fudan University", "source_name": "Fudan University", "street_adddress": "\u590d\u65e6\u5927\u5b66, 220, \u90af\u90f8\u8def, \u4e94\u89d2\u573a\u8857\u9053, \u6768\u6d66\u533a, \u4e0a\u6d77\u5e02, 200433, \u4e2d\u56fd", "lat": "31.30104395", "lng": "121.50045497", "type": "edu", "country": "China"}], "year": "2018", "pdf": ["https://arxiv.org/pdf/1807.00980.pdf"], "doi": []}, {"id": "9043df1de4f6e181875011c1379d1a7f68a28d6c", "title": "People Detection from Overhead Cameras", "addresses": [{"name": "Delft University of Technology", "source_name": "Delft University of Technology", "street_adddress": "TU Delft, Mekelweg, TU-wijk, Delft, Zuid-Holland, Nederland, 2628, Nederland", "lat": "51.99882735", "lng": "4.37396037", "type": "edu", "country": "Netherlands"}], "year": "2018", "pdf": ["https://pdfs.semanticscholar.org/9043/df1de4f6e181875011c1379d1a7f68a28d6c.pdf"], "doi": []}, {"id": "9da3ade53bf91556fe46828b216aab20a4e72294", "title": "SNc Neuron Detection Method Based on Deep Learning for Efficacy Evaluation of Anti-PD Drugs", "addresses": [{"name": "Peking University", "source_name": "Peking University", "street_adddress": "\u5317\u4eac\u5927\u5b66, 5\u53f7, \u9890\u548c\u56ed\u8def, \u7a3b\u9999\u56ed\u5357\u793e\u533a, \u6d77\u6dc0\u533a, \u5317\u4eac\u5e02, 100871, \u4e2d\u56fd", "lat": "39.99223790", "lng": "116.30393816", "type": "edu", "country": "China"}, {"name": "Shanghai Jiao Tong University", "source_name": "Shanghai Jiao Tong University", "street_adddress": "\u4e0a\u6d77\u4ea4\u901a\u5927\u5b66\uff08\u5f90\u6c47\u6821\u533a\uff09, \u6dee\u6d77\u897f\u8def, \u756a\u79ba\u5c0f\u533a, \u5e73\u9634\u6865, \u5f90\u6c47\u533a, \u4e0a\u6d77\u5e02, 200052, \u4e2d\u56fd", "lat": "31.20081505", "lng": "121.42840681", "type": "edu", "country": "China"}], "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8431470"]}, {"id": "cfa48bc1015b88809e362b4da19fe4459acb1d89", "title": "Learning to Filter Object Detections", "addresses": [{"name": "Max Planck Institute for Intelligent Systems", "source_name": "Max Planck Institute for Intelligent Systems", "street_adddress": "Heisenbergstra\u00dfe 3, 70569 Stuttgart, Germany", "lat": "48.74689390", "lng": "9.08051410", "type": "edu", "country": "Germany"}, {"name": "Microsoft", "source_name": "Microsoft Corporation, Redmond, WA, USA", "street_adddress": "One Microsoft Way, Redmond, WA 98052, USA", "lat": "47.64233180", "lng": "-122.13693020", "type": "company", "country": "United States"}], "year": "2017", "pdf": ["https://pdfs.semanticscholar.org/cfa4/8bc1015b88809e362b4da19fe4459acb1d89.pdf"], "doi": []}, {"id": "eb5780e49ad7e713e9f34cf067d45030fb29753e", "title": "Iterative fully convolutional neural networks for automatic vertebra segmentation and identification", "addresses": [{"name": "University Medical Center Utrecht", "source_name": "Rudolf Magnus Institute of Neuroscience, University Medical Center Utrecht, Utrecht, The Netherlands", "street_adddress": "Vondellaan 94, 3521 GH Utrecht, Netherlands", "lat": "52.07869500", "lng": "5.11974690", "type": "edu", "country": "Netherlands"}], "year": "2018", "pdf": ["https://arxiv.org/pdf/1804.04383.pdf"], "doi": []}, {"id": "54c9343c247ce5a82bc52f83df84c2bbae737ce4", "title": "Deep Learning on Attributed Graphs: A Journey from Graphs to Their Embeddings and Back. (L'apprentissage profond sur graphes attribu\u00e9s: Un voyage aller-retour aux plongements des graphes)", "addresses": [{"name": "INRIA Sophia Antipolis", "source_name": "INRIA Sophia Antipolis Meditérannée, Valbonne, France", "street_adddress": "2004 Route des Lucioles, 06902 Valbonne, France", "lat": "43.61581310", "lng": "7.06838000", "type": "edu", "country": "France"}], "year": "2018", "pdf": ["https://arxiv.org/pdf/1901.08296.pdf"], "doi": []}, {"id": "2d81cf3214281af85eb1d9d270a897d62302e88e", "title": "High density people estimation in video surveillance", "addresses": [{"name": "AvidBeam", "source_name": "AvidBeam", "street_adddress": "5 Wadi Al Nile, Maadi Al Khabiri Ash Sharqeyah, Al Maadi, Cairo Governorate 11728, Egypt", "lat": "29.95606300", "lng": "31.25547100", "type": "company", "country": "Egypt"}, {"name": "Faculty of Media Engineering & Technology German University in Cairo", "source_name": "Faculty of Media Engineering & Technology German University in Cairo, Egypt", "street_adddress": "Cairo Governorate, Egypt", "lat": "29.98663810", "lng": "31.44142180", "type": "edu", "country": "Egypt"}, {"name": "German University in Cairo", "source_name": "Faculty of Media Engineering and Technology, German University in Cairo, Egypt", "street_adddress": "Cairo Governorate, Egypt", "lat": "29.98663810", "lng": "31.44142180", "type": "edu", "country": "Egypt"}], "year": "2017", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8275348"]}, {"id": "d54d0dc1e7ef82f3ed0ee332e0777b6d73477a8c", "title": "Sidewalk-level People Flow Estimation Using Dashboard Cameras Based on Deep Learning", "addresses": [{"name": "Osaka University", "source_name": "Osaka University", "street_adddress": "\u5927\u962a\u5927\u5b66\u6e05\u660e\u5bee, \u670d\u90e8\u897f\u753a\u56db\u4e01\u76ee, \u8c4a\u4e2d\u5e02, \u5927\u962a\u5e9c, \u8fd1\u757f\u5730\u65b9, \u65e5\u672c", "lat": "34.80809035", "lng": "135.45785218", "type": "edu", "country": "Japan"}], "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8653595"]}, {"id": "9607f88fe6298f754e3f610c3a587d8e10b1b0a9", "title": "Scene Text Detection with Recurrent Instance Segmentation", "addresses": [{"name": "National Laboratory of Pattern Recognition", "source_name": "National Laboratory of Pattern Recognition & Chinese Academy of Sciences & University of Chinese Academy of Sciences, Beijing, China", "street_adddress": "China, Beijing, Haidian, Zhongguancun South 1st Alley, \u4e2d\u5173\u6751\u5357\u4e00\u6761", "lat": "39.98177000", "lng": "116.33008600", "type": "edu", "country": "China"}], "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8545073"]}, {"id": "ed2ba6448db8cf945ca24d4df11916c2c5c3edd1", "title": "Rapid Pedestrian Detection Based on Deep Omega-Shape Features with Partial Occlusion Handing", "addresses": [{"name": "University of Electronic Science and Technology of China", "source_name": "University of Electronic Science and Technology of China", "street_adddress": "2 Jianshe North Rd 2nd Section, Jianshe Road, Chenghua Qu, Chengdu Shi, Sichuan Sheng, China, 610054", "lat": "30.67272100", "lng": "104.09880600", "type": "edu", "country": "China"}], "year": "2018", "pdf": [], "doi": ["http://doi.org/10.1007/s11063-018-9837-1"]}, {"id": "03a65d274dc6caea94f6ab344e0b4969575327e3", "title": "CrowdHuman: A Benchmark for Detecting Human in a Crowd", "addresses": [{"name": "Megvii Inc. (Face++), China", "source_name": "Megvii Inc. (Face++), China", "street_adddress": "China", "lat": "35.86166000", "lng": "104.19539700", "type": "company", "country": "China"}], "year": "2018", "pdf": ["https://arxiv.org/pdf/1805.00123.pdf"], "doi": []}, {"id": "d78b190f98f9630cab261eabc399733af052f05c", "title": "Unsupervised Deep Domain Adaptation for Pedestrian Detection", "addresses": [{"name": "Shanghai Jiao Tong University", "source_name": "Shanghai Jiao Tong University", "street_adddress": "\u4e0a\u6d77\u4ea4\u901a\u5927\u5b66\uff08\u5f90\u6c47\u6821\u533a\uff09, \u6dee\u6d77\u897f\u8def, \u756a\u79ba\u5c0f\u533a, \u5e73\u9634\u6865, \u5f90\u6c47\u533a, \u4e0a\u6d77\u5e02, 200052, \u4e2d\u56fd", "lat": "31.20081505", "lng": "121.42840681", "type": "edu", "country": "China"}, {"name": "University of Twente", "source_name": "University of Twente", "street_adddress": "University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland", "lat": "52.23801390", "lng": "6.85667610", "type": "edu", "country": "Netherlands"}], "year": "2016", "pdf": ["https://arxiv.org/pdf/1802.03269.pdf"], "doi": []}, {"id": "deea683731f468c7234e1089f48c4546e7003b18", "title": "Multi-Task Vehicle Detection With Region-of-Interest Voting", "addresses": [{"name": "Alibaba Group, Hangzhou, China", "source_name": "Alibaba Group, Hangzhou, China", "street_adddress": "Alibaba Group, \u4e94\u5e38\u8857\u9053, \u4f59\u676d\u533a (Yuhang), \u676d\u5dde\u5e02 Hangzhou, \u6d59\u6c5f\u7701, \u4e2d\u56fd", "lat": "30.28106540", "lng": "120.02139087", "type": "edu", "country": "China"}, {"name": "Alibaba, Hangzhou, China", "source_name": "Alibaba, Hangzhou, China", "street_adddress": "699 Wangshang Rd, Binjiang Qu, Hangzhou Shi, Zhejiang Sheng, China", "lat": "30.18996400", "lng": "120.19210000", "type": "edu", "country": "China"}, {"name": "Zhejiang University", "source_name": "Zhejiang University", "street_adddress": "\u6d59\u6c5f\u5927\u5b66\u4e4b\u6c5f\u6821\u533a, \u4e4b\u6c5f\u8def, \u8f6c\u5858\u8857\u9053, \u897f\u6e56\u533a (Xihu), \u676d\u5dde\u5e02 Hangzhou, \u6d59\u6c5f\u7701, 310008, \u4e2d\u56fd", "lat": "30.19331415", "lng": "120.11930822", "type": "edu", "country": "China"}], "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8066331", "http://doi.org/10.1109/TIP.2017.2762591", "https://www.ncbi.nlm.nih.gov/pubmed/29028197", "https://www.wikidata.org/entity/Q50063923"]}, {"id": "1303b3e9f4bffb87b6dd34f7849f7a57e00253d1", "title": "Point in, Box out: Beyond Counting Persons in Crowds", "addresses": [{"name": "Sichuan University, Chengdu", "source_name": "Sichuan Univ., Chengdu", "street_adddress": "\u56db\u5ddd\u5927\u5b66\uff08\u534e\u897f\u6821\u533a\uff09, \u6821\u4e1c\u8def, \u6b66\u4faf\u533a, \u6b66\u4faf\u533a (Wuhou), \u6210\u90fd\u5e02 / Chengdu, \u56db\u5ddd\u7701, 610014, \u4e2d\u56fd", "lat": "30.64276900", "lng": "104.06751175", "type": "edu", "country": "China"}, {"name": "INRIA", "source_name": "INRIA Grenoble Rhone-Alpes, Grenoble, France", "street_adddress": "655 Avenue de l'Europe, 38330 Montbonnot-Saint-Martin, France", "lat": "45.21788600", "lng": "5.80736900", "type": "edu", "country": "France"}], "year": "2019", "pdf": ["https://arxiv.org/pdf/1904.01333.pdf"], "doi": []}, {"id": "4140ccbca5a585162420b82e9e4eeb9afbd1e7c2", "title": "Simulating Crowds in Egress Scenarios", "addresses": [{"name": "Pontifical Catholic University of Rio Grande do Sul", "source_name": "Pontifical Catholic University of Rio Grande do Sul", "street_adddress": "Av. Ipiranga, 6681 - Partenon, Porto Alegre - RS, 90619-900, Brazil", "lat": "-30.05934460", "lng": "-51.17349120", "type": "edu", "country": "Brazil"}, {"name": "University of Pennsylvania", "source_name": "University of Pennsylvania", "street_adddress": "Penn Museum, 3260, South Street, University City, Philadelphia, Philadelphia County, Pennsylvania, 19104, USA", "lat": "39.94923440", "lng": "-75.19198985", "type": "edu", "country": "United States"}, {"name": "Federal University of Rio Grande do Sul", "source_name": "Institute of Informatics, Federal University of Rio Grande do Sul, Porto Alegre, Brazil", "street_adddress": "Av. Paulo Gama, 110 - Farroupilha, Porto Alegre - RS, 90040-060, Brazil", "lat": "-30.03382480", "lng": "-51.21882800", "type": "edu", "country": "Brazil"}], "year": "2017", "pdf": [], "doi": ["http://doi.org/10.1007/978-3-319-65202-3"]}, {"id": "5031a110219231ceaa820725c6e77f87f7b2fde2", "title": "Training with Confusion for Fine-Grained Visual Classification", "addresses": [{"name": "Harvard University", "source_name": "Harvard University", "street_adddress": "Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA", "lat": "42.36782045", "lng": "-71.12666653", "type": "edu", "country": "United States"}, {"name": "MIT", "source_name": "Massachusetts Institute", "street_adddress": "MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA", "lat": "42.35839610", "lng": "-71.09567788", "type": "edu", "country": "United States"}, {"name": "Brigham Young University", "source_name": "Brigham Young University, Provo, USA", "street_adddress": "Provo, UT 84602, USA", "lat": "40.25184350", "lng": "-111.64931560", "type": "edu", "country": "United States"}], "year": "2017", "pdf": ["https://pdfs.semanticscholar.org/5031/a110219231ceaa820725c6e77f87f7b2fde2.pdf"], "doi": []}, {"id": "86c158ef6caaf247d5d14e07c5edded0147df8b7", "title": "Spatial Memory for Context Reasoning in Object Detection", "addresses": [{"name": "Carnegie Mellon University", "source_name": "Carnegie Mellon University Pittsburgh, PA - 15213, USA", "street_adddress": "Carnegie Mellon University, Forbes Avenue, Squirrel Hill North, PGH, Allegheny County, Pennsylvania, 15213, USA", "lat": "40.44416190", "lng": "-79.94272826", "type": "edu", "country": "United States"}], "year": "2017", "pdf": ["https://arxiv.org/pdf/1704.04224.pdf"], "doi": []}, {"id": "bba153ebdf11e6fb8716e35749c671ac96c14176", "title": "Image Crowd Counting Using Convolutional Neural Network and Markov Random Field", "addresses": [{"name": "Shanghai University", "source_name": "Shanghai University", "street_adddress": "\u4e0a\u6d77\u5927\u5b66, \u9526\u79cb\u8def, \u5927\u573a\u9547, \u5b9d\u5c71\u533a (Baoshan), \u4e0a\u6d77\u5e02, 201906, \u4e2d\u56fd", "lat": "31.32235655", "lng": "121.38400941", "type": "edu", "country": "China"}], "year": "2017", "pdf": ["https://arxiv.org/pdf/1706.03686.pdf"], "doi": []}, {"id": "fe5679c183cb432894d111bf02cc3243c89762ca", "title": "Adaptive Scenario Discovery for Crowd Counting", "addresses": [{"name": "East China Normal University", "source_name": "East China Normal University", "street_adddress": "\u534e\u4e1c\u5e08\u8303\u5927\u5b66, 3663, \u4e2d\u5c71\u5317\u8def, \u66f9\u5bb6\u6e21, \u666e\u9640\u533a, \u666e\u9640\u533a (Putuo), \u4e0a\u6d77\u5e02, 200062, \u4e2d\u56fd", "lat": "31.22849230", "lng": "121.40211389", "type": "edu", "country": "China"}], "year": "2018", "pdf": ["https://arxiv.org/pdf/1812.02393.pdf"], "doi": []}, {"id": "11824658170994e4d4655e8f688bace16a0d3e48", "title": "Multi-person Head Segmentation in Low Resolution Crowd Scenes Using Convolutional Encoder-Decoder Framework", "addresses": [{"name": "Qatar University", "source_name": "Qatar University", "street_adddress": "Qatar University, Roindabout 3, Al Tarfa (68), \u0623\u0645 \u0635\u0644\u0627\u0644, 24685, \u200f\u0642\u0637\u0631\u200e", "lat": "25.37461295", "lng": "51.48980354", "type": "edu", "country": "Qatar"}, {"name": "University of Warwick", "source_name": "University of Warwick", "street_adddress": "University of Warwick, University Road, Kirby Corner, Cannon Park, Coventry, West Midlands Combined Authority, West Midlands, England, CV4 7AL, UK", "lat": "52.37931310", "lng": "-1.56042520", "type": "edu", "country": "United Kingdom"}], "year": "2017", "pdf": ["https://pdfs.semanticscholar.org/1182/4658170994e4d4655e8f688bace16a0d3e48.pdf"], "doi": []}, {"id": "061356704ec86334dbbc073985375fe13cd39088", "title": "Very Deep Convolutional Networks for Large-Scale Image Recognition", "addresses": [{"name": "University of Oxford", "source_name": "University of Oxford", "street_adddress": "Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK", "lat": "51.75345380", "lng": "-1.25400997", "type": "edu", "country": "United Kingdom"}], "year": "2015", "pdf": ["https://arxiv.org/pdf/1409.1556.pdf"], "doi": []}, {"id": "14318685b5959b51d0f1e3db34643eb2855dc6d9", "title": "Going deeper with convolutions", "addresses": [{"name": "Google", "source_name": "Google, Inc.", "street_adddress": "1600 Amphitheatre Pkwy, Mountain View, CA 94043, USA", "lat": "37.42199990", "lng": "-122.08405750", "type": "company", "country": "United States"}, {"name": "University of Michigan", "source_name": "University of Michigan", "street_adddress": "University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA", "lat": "42.29421420", "lng": "-83.71003894", "type": "edu", "country": "United States"}, {"name": "University of North Carolina", "source_name": "University of North Carolina", "street_adddress": "University of North Carolina, Emergency Room Drive, Chapel Hill, Orange County, North Carolina, 27599, USA", "lat": "35.90503535", "lng": "-79.04775327", "type": "edu", "country": "United States"}], "year": "2015", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7298594"]}, {"id": "1bd1645a629f1b612960ab9bba276afd4cf7c666", "title": "End-to-End People Detection in Crowded Scenes", "addresses": [{"name": "Stanford University", "source_name": "Stanford University", "street_adddress": "Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA", "lat": "37.43131385", "lng": "-122.16936535", "type": "edu", "country": "United States"}], "year": "2016", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7780624"]}, {"id": "7e915bb8e4ada4f8d261bc855a4f587ea97764ca", "title": "People detection in crowded scenes via regional-based convolutional network", "addresses": [{"name": "Peking University", "source_name": "Peking University", "street_adddress": "\u5317\u4eac\u5927\u5b66, 5\u53f7, \u9890\u548c\u56ed\u8def, \u7a3b\u9999\u56ed\u5357\u793e\u533a, \u6d77\u6dc0\u533a, \u5317\u4eac\u5e02, 100871, \u4e2d\u56fd", "lat": "39.99223790", "lng": "116.30393816", "type": "edu", "country": "China"}], "year": "2016", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7877809", "http://doi.org/10.1109/ICSP.2016.7877809"]}, {"id": "591a4bfa6380c9fcd5f3ae690e3ac5c09b7bf37b", "title": "A Replacement Algorithm of Non-Maximum Suppression Base on Graph Clustering", "addresses": [{"name": "National University of Defense Technology, China", "source_name": "National University of Defence Technology, Changsha 410000, China", "street_adddress": "\u56fd\u9632\u79d1\u5b66\u6280\u672f\u5927\u5b66, \u4e09\u4e00\u5927\u9053, \u5f00\u798f\u533a, \u5f00\u798f\u533a (Kaifu), \u957f\u6c99\u5e02 / Changsha, \u6e56\u5357\u7701, 410073, \u4e2d\u56fd", "lat": "28.22902090", "lng": "112.99483204", "type": "mil", "country": "China"}], "year": "2017", "pdf": ["https://pdfs.semanticscholar.org/591a/4bfa6380c9fcd5f3ae690e3ac5c09b7bf37b.pdf"], "doi": []}, {"id": "b02d31c640b0a31fb18c4f170d841d8e21ffb66c", "title": "Localized region context and object feature fusion for people head detection", "addresses": [{"name": "National University of Defense Technology, China", "source_name": "National University of Defence Technology, Changsha 410000, China", "street_adddress": "\u56fd\u9632\u79d1\u5b66\u6280\u672f\u5927\u5b66, \u4e09\u4e00\u5927\u9053, \u5f00\u798f\u533a, \u5f00\u798f\u533a (Kaifu), \u957f\u6c99\u5e02 / Changsha, \u6e56\u5357\u7701, 410073, \u4e2d\u56fd", "lat": "28.22902090", "lng": "112.99483204", "type": "mil", "country": "China"}], "year": "2016", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7532426", "http://doi.org/10.1109/ICIP.2016.7532426"]}, {"id": "68ea88440fc48d59c7407e71a193ff1973f9ba7c", "title": "Shoulder Keypoint-Detection from Object Detection", "addresses": [{"name": "University of Ottawa", "source_name": "University of Ottawa", "street_adddress": "University of Ottawa, 1, Stewart Street, Byward Market, Lowertown, Rideau-Vanier, Ottawa, Ontario, K1N 6N5, Canada", "lat": "45.42580475", "lng": "-75.68740118", "type": "edu", "country": "Canada"}], "year": "2018", "pdf": ["https://pdfs.semanticscholar.org/68ea/88440fc48d59c7407e71a193ff1973f9ba7c.pdf"], "doi": []}]}
\ No newline at end of file diff --git a/site/datasets/final/brainwash.json b/site/datasets/final/brainwash.json index 21300e93..b1d897fa 100644 --- a/site/datasets/final/brainwash.json +++ b/site/datasets/final/brainwash.json @@ -1 +1 @@ -{"id": "1bd1645a629f1b612960ab9bba276afd4cf7c666", "dataset": {"key": "brainwash", "name_short": "Brainwash", "name_display": "Brainwash Dataset", "name_full": "Brainwash Dataset", "purpose": "Head detection", "comment": "", "created_by": "Stanford University (US), Max Planck Institute for Informatics (DE)", "funded_by": "Max Planck Center for Visual Computing and Communication", "funded_by_short": "Max Planck Center for Visual Computing and Communication", "used_by": "", "license": "", "url": "https://purl.stanford.edu/sx925dc9385", "dl_im": "Y", "dl_meta": "", "dl_paper": "", "dl_web": "", "mp_pub": "Y", "ft_share": "Y", "nyt_share": "Y", "cooperative": "N", "indoor": "Y", "outdoor": "", "campus": "", "cyberspace": "", "parent": "", "source": "cctv_indoor", "usernames": "", "names": "", "flickr_meta": "", "year_start": "", "year_end": "", "year_published": "2015", "ongoing": "", "images": "11,917 ", "videos": "", "tracklets": "", "identities": "", "img_per_person": "", "num_cameras": "", "faces_or_persons": "91,146", "female": "", "male": "", "landmarks": "", "width": "640", "height": "480", "color": "Y", "gray": "", "tags": "fd", "size_gb": "4.1", "agreement": "", "agreement_signed": "", "flickr": "", "facebook": "", "youtube": "", "vimeo": "", "google": "", "bing": "", "adam": "", "berit": "", "charlie": "Y", "notes": "", "derivative_of": "", "": ""}, "paper": {"paper_id": "1bd1645a629f1b612960ab9bba276afd4cf7c666", "key": "brainwash", "title": "End-to-End People Detection in Crowded Scenes", "year": "2016", "pdf": [], "address": {"name": "Stanford University", "source_name": "Stanford University", "street_adddress": "Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA", "lat": "37.43131385", "lng": "-122.16936535", "type": "edu", "country": "United States"}, "name": "Brainwash", "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7780624"]}, "addresses": [{"name": "Stanford University", "source_name": "Stanford University", "street_adddress": "Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA", "lat": "37.43131385", "lng": "-122.16936535", "type": "edu", "country": "United States"}], "additional_papers": [], "citations": [{"id": "e35515f699b60472ac8f50d1da84fab3c55417d6", "title": "Key Parts Context and Scene Geometry in Human Head Detection", "addresses": [{"name": "Tsinghua University", "source_name": "Tsinghua University", "street_adddress": "\u6e05\u534e\u5927\u5b66, 30, \u53cc\u6e05\u8def, \u4e94\u9053\u53e3, \u540e\u516b\u5bb6, \u6d77\u6dc0\u533a, 100084, \u4e2d\u56fd", "lat": "40.00229045", "lng": "116.32098908", "type": "edu", "country": "China"}], "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8451832"]}, {"id": "5d5d267416aeb2bbddaf06f703e8683753abdcd0", "title": "Exploiting Multispectral and Contextual Information to Improve Human Detection", "addresses": [{"name": "State University of New Jersey", "source_name": "The State University of New Jersey", "street_adddress": "Rutgers New Brunswick: Livingston Campus, Joyce Kilmer Avenue, Piscataway Township, Middlesex County, New Jersey, 08854, USA", "lat": "40.51865195", "lng": "-74.44099801", "type": "edu", "country": "United States"}], "year": "2017", "pdf": ["https://pdfs.semanticscholar.org/5d5d/267416aeb2bbddaf06f703e8683753abdcd0.pdf"], "doi": []}, {"id": "0cf0ad8235929417d904acd1c672713ca4fdb105", "title": "Fusion of Head and Full-Body Detectors for Multi-object Tracking", "addresses": [{"name": "Technical University Munich", "source_name": "Technical University Munich", "street_adddress": "TUM, 21, Arcisstra\u00dfe, Bezirksteil K\u00f6nigsplatz, Stadtbezirk 03 Maxvorstadt, M\u00fcnchen, Obb, Bayern, 80333, Deutschland", "lat": "48.14955455", "lng": "11.56775314", "type": "edu", "country": "Germany"}], "year": "2018", "pdf": ["https://arxiv.org/pdf/1705.08314.pdf"], "doi": []}, {"id": "cab97d4dc67919f965cc884d80e4d6b743a256eb", "title": "Scale Mapping and Dynamic Re-Detecting in Dense Head Detection", "addresses": [{"name": "South China University of Technology", "source_name": "South China University of Technology", "street_adddress": "\u534e\u5357\u7406\u5de5\u5927\u5b66, \u5927\u5b66\u57ce\u4e2d\u73af\u4e1c\u8def, \u5e7f\u5dde\u5927\u5b66\u57ce, \u65b0\u9020, \u756a\u79ba\u533a (Panyu), \u5e7f\u5dde\u5e02, \u5e7f\u4e1c\u7701, 510006, \u4e2d\u56fd", "lat": "23.05020420", "lng": "113.39880323", "type": "edu", "country": "China"}], "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8451653"]}, {"id": "8a6665bfe3ad126d84414280ee2f884818560063", "title": "Vehicle Detection in Urban Traffic Surveillance Images Based on Convolutional Neural Networks with Feature Concatenation", "addresses": [{"name": "China University of Mining and Technology", "source_name": "China University of Mining and Technology", "street_adddress": "China University of Mining and Technology, 1\u53f7, \u5927\u5b66\u8def, \u6cc9\u5c71\u533a (Quanshan), \u5f90\u5dde\u5e02 / Xuzhou, \u6c5f\u82cf\u7701, 221116, \u4e2d\u56fd", "lat": "34.21525380", "lng": "117.13985410", "type": "edu", "country": "China"}], "year": "2019", "pdf": ["https://pdfs.semanticscholar.org/8a66/65bfe3ad126d84414280ee2f884818560063.pdf"], "doi": []}, {"id": "333880ee776d57555f54935978ccb2ce13cfdb07", "title": "Person classification leveraging Convolutional Neural Network for obstacle avoidance via Unmanned Aerial Vehicles", "addresses": [{"name": "Cranfield University", "source_name": "Cranfield University, UK", "street_adddress": "College Rd, Wharley End, Bedford MK43 0AL, UK", "lat": "52.07418180", "lng": "-0.62781230", "type": "edu", "country": "United Kingdom"}], "year": "2017", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8101662"]}, {"id": "f30bdc22fe454a28234cf858d5bf9de94701ee7a", "title": "Adaptive NMS: Refining Pedestrian Detection in a Crowd", "addresses": [{"name": "Beihang University", "source_name": "Beihang University", "street_adddress": "\u5317\u4eac\u822a\u7a7a\u822a\u5929\u5927\u5b66, 37, \u5b66\u9662\u8def, \u4e94\u9053\u53e3, \u540e\u516b\u5bb6, \u6d77\u6dc0\u533a, 100083, \u4e2d\u56fd", "lat": "39.98083330", "lng": "116.34101249", "type": "edu", "country": "China"}], "year": "2019", "pdf": ["https://arxiv.org/pdf/1904.03629.pdf"], "doi": []}, {"id": "976647b32fd7e1a5c8ee4a11792155903bb34e43", "title": "Multi-Organ Plant Classification Based on Convolutional and Recurrent Neural Networks", "addresses": [{"name": "University of Malaya, Kuala Lumpur", "source_name": "University of Malaya", "street_adddress": "UM, Lingkaran Wawasan, Bukit Pantai, Bangsar, KL, 50603, Malaysia", "lat": "3.12267405", "lng": "101.65356103", "type": "edu", "country": "Malaysia"}, {"name": "Kingston University", "source_name": "Kingston University", "street_adddress": "Kingston University, Kingston Hill, Kingston Vale, Kingston-upon-Thames, London, Greater London, England, KT2 7TF, UK", "lat": "51.42930860", "lng": "-0.26840440", "type": "edu", "country": "United Kingdom"}], "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8359391"]}, {"id": "66a08ff2ea7447093624632e7069e3da16961d30", "title": "An Incremental Framework for Video-Based Traffic Sign Detection, Tracking, and Recognition", "addresses": [{"name": "Northwestern Polytechnical University", "source_name": "Northwestern Polytechnical University", "street_adddress": "\u897f\u5317\u5de5\u4e1a\u5927\u5b66 \u53cb\u8c0a\u6821\u533a, 127\u53f7, \u53cb\u8c0a\u897f\u8def, \u957f\u5b89\u8def, \u7891\u6797\u533a (Beilin), \u897f\u5b89\u5e02, \u9655\u897f\u7701, 710072, \u4e2d\u56fd", "lat": "34.24691520", "lng": "108.91061982", "type": "edu", "country": "China"}], "year": "2017", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7605450"]}, {"id": "6fc8c988dd841c6c4f5e96b1b1458b6aa564b2de", "title": "Crowd Counting via Scale-Adaptive Convolutional Neural Network", "addresses": [{"name": "Shanghai Jiaotong University", "source_name": "Shanghai Jiaotong University", "street_adddress": "China, Shanghai, Minhang, \u4e1c\u5ddd\u8def \u90ae\u653f\u7f16\u7801: 200240", "lat": "31.02522010", "lng": "121.43377840", "type": "edu", "country": "China"}], "year": "2018", "pdf": ["https://arxiv.org/pdf/1711.04433.pdf"], "doi": []}, {"id": "e607b91f69ea2bff3194b07c5d22b4625bbe306e", "title": "Learning to See People Like People : Predicting Social Impressions of Faces", "addresses": [{"name": "Purdue University", "source_name": "Purdue University", "street_adddress": "Purdue University, West Stadium Avenue, West Lafayette, Tippecanoe County, Indiana, 47907, USA", "lat": "40.43197220", "lng": "-86.92389368", "type": "edu", "country": "United States"}], "year": "2017", "pdf": ["https://pdfs.semanticscholar.org/e607/b91f69ea2bff3194b07c5d22b4625bbe306e.pdf"], "doi": []}, {"id": "737f3cf354f40a6a7fd8a2058fe2803b8dd6c56b", "title": "Image Crowd Counting Using Convolutional Neural Network and Markov Random Field", "addresses": [{"name": "Shanghai University", "source_name": "Shanghai University", "street_adddress": "\u4e0a\u6d77\u5927\u5b66, \u9526\u79cb\u8def, \u5927\u573a\u9547, \u5b9d\u5c71\u533a (Baoshan), \u4e0a\u6d77\u5e02, 201906, \u4e2d\u56fd", "lat": "31.32235655", "lng": "121.38400941", "type": "edu", "country": "China"}], "year": "2017", "pdf": ["https://pdfs.semanticscholar.org/737f/3cf354f40a6a7fd8a2058fe2803b8dd6c56b.pdf"], "doi": []}, {"id": "3e667c54e848233db092b794f2cfbf47ea63b771", "title": "Combined convolutional and recurrent neural networks for hierarchical classification of images", "addresses": [{"name": "Northwestern University", "source_name": "Northwestern University", "street_adddress": "Northwestern University, Northwestern Place, Downtown, Evanston, Cook County, Illinois, 60208, USA", "lat": "42.05511640", "lng": "-87.67581113", "type": "edu", "country": "United States"}], "year": "2018", "pdf": ["https://arxiv.org/pdf/1809.09574.pdf"], "doi": []}, {"id": "a5c8fc1ca4f06a344b53dc81ebc6d87f54896722", "title": "Learning to see people like people", "addresses": [{"name": "University of California, San Diego", "source_name": "University of California, San Diego", "street_adddress": "UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA", "lat": "32.87935255", "lng": "-117.23110049", "type": "edu", "country": "United States"}, {"name": "Purdue University", "source_name": "Purdue University", "street_adddress": "Purdue University, West Stadium Avenue, West Lafayette, Tippecanoe County, Indiana, 47907, USA", "lat": "40.43197220", "lng": "-86.92389368", "type": "edu", "country": "United States"}], "year": "2017", "pdf": ["https://arxiv.org/pdf/1705.04282.pdf"], "doi": []}, {"id": "7a9c2524a157a6b5d0204c2f3b187cd67eb26b4c", "title": "SINet: A Scale-Insensitive Convolutional Neural Network for Fast Vehicle Detection", "addresses": [{"name": "Hong Kong Polytechnic University", "source_name": "Hong Kong Polytechnic University", "street_adddress": "hong kong, 11, \u80b2\u624d\u9053 Yuk Choi Road, \u5c16\u6c99\u5480 Tsim Sha Tsui, \u6cb9\u5c16\u65fa\u5340 Yau Tsim Mong District, \u4e5d\u9f8d Kowloon, HK, 00000, \u4e2d\u56fd", "lat": "22.30457200", "lng": "114.17976285", "type": "edu", "country": "China"}, {"name": "Chinese University of Hong Kong", "source_name": "Chinese University of Hong Kong", "street_adddress": "Hong Kong, \u99ac\u6599\u6c34\u6c60\u65c1\u8def", "lat": "22.41626320", "lng": "114.21093180", "type": "edu", "country": "China"}, {"name": "South China University of Technology", "source_name": "South China University of Technology", "street_adddress": "\u534e\u5357\u7406\u5de5\u5927\u5b66, \u5927\u5b66\u57ce\u4e2d\u73af\u4e1c\u8def, \u5e7f\u5dde\u5927\u5b66\u57ce, \u65b0\u9020, \u756a\u79ba\u533a (Panyu), \u5e7f\u5dde\u5e02, \u5e7f\u4e1c\u7701, 510006, \u4e2d\u56fd", "lat": "23.05020420", "lng": "113.39880323", "type": "edu", "country": "China"}], "year": "2019", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8478157"]}, {"id": "c7f4e18ec3a805b8aee1d3d7552364c8f5f6ca1d", "title": "An Auto-adaptive CNN for Crowd Counting in Monitor Image", "addresses": [{"name": "Beijing University of Posts and Telecommunications", "source_name": "Beijing University of Posts and Telecommunications", "street_adddress": "\u5317\u4eac\u90ae\u7535\u5927\u5b66, \u897f\u571f\u57ce\u8def, \u6d77\u6dc0\u533a, \u5317\u4eac\u5e02, 100082, \u4e2d\u56fd", "lat": "39.96014880", "lng": "116.35193921", "type": "edu", "country": "China"}], "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8525541"]}, {"id": "b1db174463b0bbc54a61fcc83acfb89ad3e3d18f", "title": "Loss Functions for Multiset Prediction", "addresses": [{"name": "New York University", "source_name": "New York University", "street_adddress": "NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA", "lat": "40.72925325", "lng": "-73.99625394", "type": "edu", "country": "United States"}], "year": "2018", "pdf": ["https://arxiv.org/pdf/1711.05246.pdf"], "doi": []}, {"id": "81ba5202424906f64b77f68afca063658139fbb2", "title": "Social Scene Understanding: End-to-End Multi-person Action Localization and Collective Activity Recognition", "addresses": [{"name": "Stanford University", "source_name": "Stanford University", "street_adddress": "Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA", "lat": "37.43131385", "lng": "-122.16936535", "type": "edu", "country": "United States"}, {"name": "IDIAP Research Institute", "source_name": "IDIAP Research Institute", "street_adddress": "Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra", "lat": "46.10923700", "lng": "7.08453549", "type": "edu", "country": "Switzerland"}], "year": "2017", "pdf": ["https://arxiv.org/pdf/1611.09078.pdf"], "doi": []}, {"id": "830466596c0908399760f2009a09ce605e3121c9", "title": "Revisiting Perspective Information for Efficient Crowd Counting", "addresses": [{"name": "Peking University", "source_name": "Peking University", "street_adddress": "\u5317\u4eac\u5927\u5b66, 5\u53f7, \u9890\u548c\u56ed\u8def, \u7a3b\u9999\u56ed\u5357\u793e\u533a, \u6d77\u6dc0\u533a, \u5317\u4eac\u5e02, 100871, \u4e2d\u56fd", "lat": "39.99223790", "lng": "116.30393816", "type": "edu", "country": "China"}, {"name": "Tongji University", "source_name": "Tongji University", "street_adddress": "\u540c\u6d4e\u5927\u5b66, 1239, \u56db\u5e73\u8def, \u6c5f\u6e7e, \u8679\u53e3\u533a, \u4e0a\u6d77\u5e02, 200092, \u4e2d\u56fd", "lat": "31.28473925", "lng": "121.49694909", "type": "edu", "country": "China"}], "year": "2018", "pdf": ["https://arxiv.org/pdf/1807.01989.pdf"], "doi": []}, {"id": "1d1a2387663cb3510d6f5c0651c1dc55e48dcabc", "title": "Joint Pedestrian and Body Part Detection via Semantic Relationship Learning", "addresses": [{"name": "Hebei University of Technology", "source_name": "Hebei University of Technology, Tianjin, P. R. China", "street_adddress": "8 Dingzigu 1st Rd, Hongqiao Qu, China, 300131", "lat": "39.17963500", "lng": "117.16588200", "type": "edu", "country": "China"}, {"name": "Chinese Academy of Sciences", "source_name": "Chinese Academy of Sciences", "street_adddress": "\u4e2d\u56fd\u79d1\u5b66\u9662\u5fc3\u7406\u7814\u7a76\u6240, 16, \u6797\u8403\u8def, \u671d\u9633\u533a / Chaoyang, \u5317\u4eac\u5e02, 100101, \u4e2d\u56fd", "lat": "40.00447950", "lng": "116.37023800", "type": "edu", "country": "China"}, {"name": "Beijing Information Science and Technology University", "source_name": "Beijing Information Science and Technology University, Beijing, China", "street_adddress": "China, Beijing, Haidian, \u6e05\u6cb3\u56db\u62d4\u5b50", "lat": "40.04332040", "lng": "116.34181090", "type": "edu", "country": "China"}], "year": "2019", "pdf": ["https://pdfs.semanticscholar.org/1d1a/2387663cb3510d6f5c0651c1dc55e48dcabc.pdf"], "doi": []}, {"id": "91d2f0b7c23239740fa15dff0b5893b992c0ab34", "title": "Sequence-to-Segment Networks for Segment Detection", "addresses": [{"name": "Stony Brook University", "source_name": "Stony Brook University", "street_adddress": "Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA", "lat": "40.91531960", "lng": "-73.12706260", "type": "edu", "country": "United States"}], "year": "2018", "pdf": ["https://pdfs.semanticscholar.org/91d2/f0b7c23239740fa15dff0b5893b992c0ab34.pdf"], "doi": []}, {"id": "8e7c647d8e8ba726b03f7e7c5cc395f86b9de9be", "title": "Parsing Pose of People with Interaction", "addresses": [{"name": "California Institute of Technology", "source_name": "California Institute of Technology", "street_adddress": "California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA", "lat": "34.13710185", "lng": "-118.12527487", "type": "edu", "country": "United States"}], "year": "2018", "pdf": ["https://pdfs.semanticscholar.org/8e7c/647d8e8ba726b03f7e7c5cc395f86b9de9be.pdf"], "doi": []}, {"id": "2f02c1d4858d9c0f5d16099eb090560d5fa4f23f", "title": "Detecting Heads using Feature Refine Net and Cascaded Multi-scale Architecture", "addresses": [{"name": "South China University of Technology", "source_name": "South China University of Technology", "street_adddress": "\u534e\u5357\u7406\u5de5\u5927\u5b66, \u5927\u5b66\u57ce\u4e2d\u73af\u4e1c\u8def, \u5e7f\u5dde\u5927\u5b66\u57ce, \u65b0\u9020, \u756a\u79ba\u533a (Panyu), \u5e7f\u5dde\u5e02, \u5e7f\u4e1c\u7701, 510006, \u4e2d\u56fd", "lat": "23.05020420", "lng": "113.39880323", "type": "edu", "country": "China"}], "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8545068"]}, {"id": "c43c3ec39005aaf1f0ad2f641d92f41021342217", "title": "Pedestrian Detection with Autoregressive Network Phases", "addresses": [{"name": "Michigan State University", "source_name": "Michigan State University", "street_adddress": "Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu", "country": "United States"}], "year": "2018", "pdf": ["https://arxiv.org/pdf/1812.00440.pdf"], "doi": []}, {"id": "e1a784e360f2bf3911cffa30506d46c412659db5", "title": "MetaAnchor: Learning to Detect Objects with Customized Anchors", "addresses": [{"name": "Fudan University", "source_name": "Fudan University", "street_adddress": "\u590d\u65e6\u5927\u5b66, 220, \u90af\u90f8\u8def, \u4e94\u89d2\u573a\u8857\u9053, \u6768\u6d66\u533a, \u4e0a\u6d77\u5e02, 200433, \u4e2d\u56fd", "lat": "31.30104395", "lng": "121.50045497", "type": "edu", "country": "China"}], "year": "2018", "pdf": ["https://arxiv.org/pdf/1807.00980.pdf"], "doi": []}, {"id": "9da3ade53bf91556fe46828b216aab20a4e72294", "title": "SNc Neuron Detection Method Based on Deep Learning for Efficacy Evaluation of Anti-PD Drugs", "addresses": [{"name": "Peking University", "source_name": "Peking University", "street_adddress": "\u5317\u4eac\u5927\u5b66, 5\u53f7, \u9890\u548c\u56ed\u8def, \u7a3b\u9999\u56ed\u5357\u793e\u533a, \u6d77\u6dc0\u533a, \u5317\u4eac\u5e02, 100871, \u4e2d\u56fd", "lat": "39.99223790", "lng": "116.30393816", "type": "edu", "country": "China"}, {"name": "Shanghai Jiao Tong University", "source_name": "Shanghai Jiao Tong University", "street_adddress": "\u4e0a\u6d77\u4ea4\u901a\u5927\u5b66\uff08\u5f90\u6c47\u6821\u533a\uff09, \u6dee\u6d77\u897f\u8def, \u756a\u79ba\u5c0f\u533a, \u5e73\u9634\u6865, \u5f90\u6c47\u533a, \u4e0a\u6d77\u5e02, 200052, \u4e2d\u56fd", "lat": "31.20081505", "lng": "121.42840681", "type": "edu", "country": "China"}], "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8431470"]}, {"id": "cfa48bc1015b88809e362b4da19fe4459acb1d89", "title": "Learning to Filter Object Detections", "addresses": [{"name": "Max Planck Institute for Intelligent Systems", "source_name": "Max Planck Institute for Intelligent Systems", "street_adddress": "Heisenbergstra\u00dfe 3, 70569 Stuttgart, Germany", "lat": "48.74689390", "lng": "9.08051410", "type": "edu", "country": "Germany"}, {"name": "Microsoft", "source_name": "Microsoft Corporation, Redmond, WA, USA", "street_adddress": "One Microsoft Way, Redmond, WA 98052, USA", "lat": "47.64233180", "lng": "-122.13693020", "type": "company", "country": "United States"}], "year": "2017", "pdf": ["https://pdfs.semanticscholar.org/cfa4/8bc1015b88809e362b4da19fe4459acb1d89.pdf"], "doi": []}, {"id": "eb5780e49ad7e713e9f34cf067d45030fb29753e", "title": "Iterative fully convolutional neural networks for automatic vertebra segmentation and identification", "addresses": [{"name": "University Medical Center Utrecht", "source_name": "Rudolf Magnus Institute of Neuroscience, University Medical Center Utrecht, Utrecht, The Netherlands", "street_adddress": "Vondellaan 94, 3521 GH Utrecht, Netherlands", "lat": "52.07869500", "lng": "5.11974690", "type": "edu", "country": "Netherlands"}], "year": "2018", "pdf": ["https://arxiv.org/pdf/1804.04383.pdf"], "doi": []}, {"id": "54c9343c247ce5a82bc52f83df84c2bbae737ce4", "title": "Deep Learning on Attributed Graphs: A Journey from Graphs to Their Embeddings and Back. (L'apprentissage profond sur graphes attribu\u00e9s: Un voyage aller-retour aux plongements des graphes)", "addresses": [{"name": "INRIA Sophia Antipolis", "source_name": "INRIA Sophia Antipolis Meditérannée, Valbonne, France", "street_adddress": "2004 Route des Lucioles, 06902 Valbonne, France", "lat": "43.61581310", "lng": "7.06838000", "type": "edu", "country": "France"}], "year": "2018", "pdf": ["https://arxiv.org/pdf/1901.08296.pdf"], "doi": []}, {"id": "2d81cf3214281af85eb1d9d270a897d62302e88e", "title": "High density people estimation in video surveillance", "addresses": [{"name": "AvidBeam", "source_name": "AvidBeam", "street_adddress": "5 Wadi Al Nile, Maadi Al Khabiri Ash Sharqeyah, Al Maadi, Cairo Governorate 11728, Egypt", "lat": "29.95606300", "lng": "31.25547100", "type": "company", "country": "Egypt"}, {"name": "Faculty of Media Engineering & Technology German University in Cairo", "source_name": "Faculty of Media Engineering & Technology German University in Cairo, Egypt", "street_adddress": "Cairo Governorate, Egypt", "lat": "29.98663810", "lng": "31.44142180", "type": "edu", "country": "Egypt"}, {"name": "German University in Cairo", "source_name": "Faculty of Media Engineering and Technology, German University in Cairo, Egypt", "street_adddress": "Cairo Governorate, Egypt", "lat": "29.98663810", "lng": "31.44142180", "type": "edu", "country": "Egypt"}], "year": "2017", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8275348"]}, {"id": "d54d0dc1e7ef82f3ed0ee332e0777b6d73477a8c", "title": "Sidewalk-level People Flow Estimation Using Dashboard Cameras Based on Deep Learning", "addresses": [{"name": "Osaka University", "source_name": "Osaka University", "street_adddress": "\u5927\u962a\u5927\u5b66\u6e05\u660e\u5bee, \u670d\u90e8\u897f\u753a\u56db\u4e01\u76ee, \u8c4a\u4e2d\u5e02, \u5927\u962a\u5e9c, \u8fd1\u757f\u5730\u65b9, \u65e5\u672c", "lat": "34.80809035", "lng": "135.45785218", "type": "edu", "country": "Japan"}], "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8653595"]}, {"id": "9607f88fe6298f754e3f610c3a587d8e10b1b0a9", "title": "Scene Text Detection with Recurrent Instance Segmentation", "addresses": [{"name": "National Laboratory of Pattern Recognition", "source_name": "National Laboratory of Pattern Recognition & Chinese Academy of Sciences & University of Chinese Academy of Sciences, Beijing, China", "street_adddress": "China, Beijing, Haidian, Zhongguancun South 1st Alley, \u4e2d\u5173\u6751\u5357\u4e00\u6761", "lat": "39.98177000", "lng": "116.33008600", "type": "edu", "country": "China"}], "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8545073"]}, {"id": "ed2ba6448db8cf945ca24d4df11916c2c5c3edd1", "title": "Rapid Pedestrian Detection Based on Deep Omega-Shape Features with Partial Occlusion Handing", "addresses": [{"name": "University of Electronic Science and Technology of China", "source_name": "University of Electronic Science and Technology of China", "street_adddress": "2 Jianshe North Rd 2nd Section, Jianshe Road, Chenghua Qu, Chengdu Shi, Sichuan Sheng, China, 610054", "lat": "30.67272100", "lng": "104.09880600", "type": "edu", "country": "China"}], "year": "2018", "pdf": [], "doi": ["http://doi.org/10.1007/s11063-018-9837-1"]}, {"id": "deea683731f468c7234e1089f48c4546e7003b18", "title": "Multi-Task Vehicle Detection With Region-of-Interest Voting", "addresses": [{"name": "Alibaba Group, Hangzhou, China", "source_name": "Alibaba Group, Hangzhou, China", "street_adddress": "Alibaba Group, \u4e94\u5e38\u8857\u9053, \u4f59\u676d\u533a (Yuhang), \u676d\u5dde\u5e02 Hangzhou, \u6d59\u6c5f\u7701, \u4e2d\u56fd", "lat": "30.28106540", "lng": "120.02139087", "type": "edu", "country": "China"}, {"name": "Alibaba, Hangzhou, China", "source_name": "Alibaba, Hangzhou, China", "street_adddress": "699 Wangshang Rd, Binjiang Qu, Hangzhou Shi, Zhejiang Sheng, China", "lat": "30.18996400", "lng": "120.19210000", "type": "edu", "country": "China"}, {"name": "Zhejiang University", "source_name": "Zhejiang University", "street_adddress": "\u6d59\u6c5f\u5927\u5b66\u4e4b\u6c5f\u6821\u533a, \u4e4b\u6c5f\u8def, \u8f6c\u5858\u8857\u9053, \u897f\u6e56\u533a (Xihu), \u676d\u5dde\u5e02 Hangzhou, \u6d59\u6c5f\u7701, 310008, \u4e2d\u56fd", "lat": "30.19331415", "lng": "120.11930822", "type": "edu", "country": "China"}], "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8066331", "http://doi.org/10.1109/TIP.2017.2762591", "https://www.ncbi.nlm.nih.gov/pubmed/29028197", "https://www.wikidata.org/entity/Q50063923"]}, {"id": "1303b3e9f4bffb87b6dd34f7849f7a57e00253d1", "title": "Point in, Box out: Beyond Counting Persons in Crowds", "addresses": [{"name": "Sichuan University, Chengdu", "source_name": "Sichuan Univ., Chengdu", "street_adddress": "\u56db\u5ddd\u5927\u5b66\uff08\u534e\u897f\u6821\u533a\uff09, \u6821\u4e1c\u8def, \u6b66\u4faf\u533a, \u6b66\u4faf\u533a (Wuhou), \u6210\u90fd\u5e02 / Chengdu, \u56db\u5ddd\u7701, 610014, \u4e2d\u56fd", "lat": "30.64276900", "lng": "104.06751175", "type": "edu", "country": "China"}, {"name": "INRIA", "source_name": "INRIA Grenoble Rhone-Alpes, Grenoble, France", "street_adddress": "655 Avenue de l'Europe, 38330 Montbonnot-Saint-Martin, France", "lat": "45.21788600", "lng": "5.80736900", "type": "edu", "country": "France"}], "year": "2019", "pdf": ["https://arxiv.org/pdf/1904.01333.pdf"], "doi": []}, {"id": "4140ccbca5a585162420b82e9e4eeb9afbd1e7c2", "title": "Simulating Crowds in Egress Scenarios", "addresses": [{"name": "Pontifical Catholic University of Rio Grande do Sul", "source_name": "Pontifical Catholic University of Rio Grande do Sul", "street_adddress": "Av. Ipiranga, 6681 - Partenon, Porto Alegre - RS, 90619-900, Brazil", "lat": "-30.05934460", "lng": "-51.17349120", "type": "edu", "country": "Brazil"}, {"name": "University of Pennsylvania", "source_name": "University of Pennsylvania", "street_adddress": "Penn Museum, 3260, South Street, University City, Philadelphia, Philadelphia County, Pennsylvania, 19104, USA", "lat": "39.94923440", "lng": "-75.19198985", "type": "edu", "country": "United States"}, {"name": "Federal University of Rio Grande do Sul", "source_name": "Institute of Informatics, Federal University of Rio Grande do Sul, Porto Alegre, Brazil", "street_adddress": "Av. Paulo Gama, 110 - Farroupilha, Porto Alegre - RS, 90040-060, Brazil", "lat": "-30.03382480", "lng": "-51.21882800", "type": "edu", "country": "Brazil"}], "year": "2017", "pdf": [], "doi": ["http://doi.org/10.1007/978-3-319-65202-3"]}, {"id": "5031a110219231ceaa820725c6e77f87f7b2fde2", "title": "Training with Confusion for Fine-Grained Visual Classification", "addresses": [{"name": "Harvard University", "source_name": "Harvard University", "street_adddress": "Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA", "lat": "42.36782045", "lng": "-71.12666653", "type": "edu", "country": "United States"}, {"name": "MIT", "source_name": "Massachusetts Institute", "street_adddress": "MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA", "lat": "42.35839610", "lng": "-71.09567788", "type": "edu", "country": "United States"}, {"name": "Brigham Young University", "source_name": "Brigham Young University, Provo, USA", "street_adddress": "Provo, UT 84602, USA", "lat": "40.25184350", "lng": "-111.64931560", "type": "edu", "country": "United States"}], "year": "2017", "pdf": ["https://pdfs.semanticscholar.org/5031/a110219231ceaa820725c6e77f87f7b2fde2.pdf"], "doi": []}, {"id": "86c158ef6caaf247d5d14e07c5edded0147df8b7", "title": "Spatial Memory for Context Reasoning in Object Detection", "addresses": [{"name": "Carnegie Mellon University", "source_name": "Carnegie Mellon University Pittsburgh, PA - 15213, USA", "street_adddress": "Carnegie Mellon University, Forbes Avenue, Squirrel Hill North, PGH, Allegheny County, Pennsylvania, 15213, USA", "lat": "40.44416190", "lng": "-79.94272826", "type": "edu", "country": "United States"}], "year": "2017", "pdf": ["https://arxiv.org/pdf/1704.04224.pdf"], "doi": []}, {"id": "bba153ebdf11e6fb8716e35749c671ac96c14176", "title": "Image Crowd Counting Using Convolutional Neural Network and Markov Random Field", "addresses": [{"name": "Shanghai University", "source_name": "Shanghai University", "street_adddress": "\u4e0a\u6d77\u5927\u5b66, \u9526\u79cb\u8def, \u5927\u573a\u9547, \u5b9d\u5c71\u533a (Baoshan), \u4e0a\u6d77\u5e02, 201906, \u4e2d\u56fd", "lat": "31.32235655", "lng": "121.38400941", "type": "edu", "country": "China"}], "year": "2017", "pdf": ["https://arxiv.org/pdf/1706.03686.pdf"], "doi": []}, {"id": "fe5679c183cb432894d111bf02cc3243c89762ca", "title": "Adaptive Scenario Discovery for Crowd Counting", "addresses": [{"name": "East China Normal University", "source_name": "East China Normal University", "street_adddress": "\u534e\u4e1c\u5e08\u8303\u5927\u5b66, 3663, \u4e2d\u5c71\u5317\u8def, \u66f9\u5bb6\u6e21, \u666e\u9640\u533a, \u666e\u9640\u533a (Putuo), \u4e0a\u6d77\u5e02, 200062, \u4e2d\u56fd", "lat": "31.22849230", "lng": "121.40211389", "type": "edu", "country": "China"}], "year": "2018", "pdf": ["https://arxiv.org/pdf/1812.02393.pdf"], "doi": []}, {"id": "11824658170994e4d4655e8f688bace16a0d3e48", "title": "Multi-person Head Segmentation in Low Resolution Crowd Scenes Using Convolutional Encoder-Decoder Framework", "addresses": [{"name": "Qatar University", "source_name": "Qatar University", "street_adddress": "Qatar University, Roindabout 3, Al Tarfa (68), \u0623\u0645 \u0635\u0644\u0627\u0644, 24685, \u200f\u0642\u0637\u0631\u200e", "lat": "25.37461295", "lng": "51.48980354", "type": "edu", "country": "Qatar"}, {"name": "University of Warwick", "source_name": "University of Warwick", "street_adddress": "University of Warwick, University Road, Kirby Corner, Cannon Park, Coventry, West Midlands Combined Authority, West Midlands, England, CV4 7AL, UK", "lat": "52.37931310", "lng": "-1.56042520", "type": "edu", "country": "United Kingdom"}], "year": "2017", "pdf": ["https://pdfs.semanticscholar.org/1182/4658170994e4d4655e8f688bace16a0d3e48.pdf"], "doi": []}, {"id": "061356704ec86334dbbc073985375fe13cd39088", "title": "Very Deep Convolutional Networks for Large-Scale Image Recognition", "addresses": [{"name": "University of Oxford", "source_name": "University of Oxford", "street_adddress": "Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK", "lat": "51.75345380", "lng": "-1.25400997", "type": "edu", "country": "United Kingdom"}], "year": "2015", "pdf": ["https://arxiv.org/pdf/1409.1556.pdf"], "doi": []}, {"id": "14318685b5959b51d0f1e3db34643eb2855dc6d9", "title": "Going deeper with convolutions", "addresses": [{"name": "Google", "source_name": "Google, Inc.", "street_adddress": "1600 Amphitheatre Pkwy, Mountain View, CA 94043, USA", "lat": "37.42199990", "lng": "-122.08405750", "type": "company", "country": "United States"}, {"name": "University of Michigan", "source_name": "University of Michigan", "street_adddress": "University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA", "lat": "42.29421420", "lng": "-83.71003894", "type": "edu", "country": "United States"}, {"name": "University of North Carolina", "source_name": "University of North Carolina", "street_adddress": "University of North Carolina, Emergency Room Drive, Chapel Hill, Orange County, North Carolina, 27599, USA", "lat": "35.90503535", "lng": "-79.04775327", "type": "edu", "country": "United States"}], "year": "2015", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7298594"]}, {"id": "1bd1645a629f1b612960ab9bba276afd4cf7c666", "title": "End-to-End People Detection in Crowded Scenes", "addresses": [{"name": "Stanford University", "source_name": "Stanford University", "street_adddress": "Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA", "lat": "37.43131385", "lng": "-122.16936535", "type": "edu", "country": "United States"}], "year": "2016", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7780624"]}, {"id": "7e915bb8e4ada4f8d261bc855a4f587ea97764ca", "title": "People detection in crowded scenes via regional-based convolutional network", "addresses": [{"name": "Peking University", "source_name": "Peking University", "street_adddress": "\u5317\u4eac\u5927\u5b66, 5\u53f7, \u9890\u548c\u56ed\u8def, \u7a3b\u9999\u56ed\u5357\u793e\u533a, \u6d77\u6dc0\u533a, \u5317\u4eac\u5e02, 100871, \u4e2d\u56fd", "lat": "39.99223790", "lng": "116.30393816", "type": "edu", "country": "China"}], "year": "2016", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7877809", "http://doi.org/10.1109/ICSP.2016.7877809"]}, {"id": "591a4bfa6380c9fcd5f3ae690e3ac5c09b7bf37b", "title": "A Replacement Algorithm of Non-Maximum Suppression Base on Graph Clustering", "addresses": [{"name": "National University of Defense Technology, China", "source_name": "National University of Defence Technology, Changsha 410000, China", "street_adddress": "\u56fd\u9632\u79d1\u5b66\u6280\u672f\u5927\u5b66, \u4e09\u4e00\u5927\u9053, \u5f00\u798f\u533a, \u5f00\u798f\u533a (Kaifu), \u957f\u6c99\u5e02 / Changsha, \u6e56\u5357\u7701, 410073, \u4e2d\u56fd", "lat": "28.22902090", "lng": "112.99483204", "type": "mil", "country": "China"}], "year": "2017", "pdf": ["https://pdfs.semanticscholar.org/591a/4bfa6380c9fcd5f3ae690e3ac5c09b7bf37b.pdf"], "doi": []}, {"id": "b02d31c640b0a31fb18c4f170d841d8e21ffb66c", "title": "Localized region context and object feature fusion for people head detection", "addresses": [{"name": "National University of Defense Technology, China", "source_name": "National University of Defence Technology, Changsha 410000, China", "street_adddress": "\u56fd\u9632\u79d1\u5b66\u6280\u672f\u5927\u5b66, \u4e09\u4e00\u5927\u9053, \u5f00\u798f\u533a, \u5f00\u798f\u533a (Kaifu), \u957f\u6c99\u5e02 / Changsha, \u6e56\u5357\u7701, 410073, \u4e2d\u56fd", "lat": "28.22902090", "lng": "112.99483204", "type": "mil", "country": "China"}], "year": "2016", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7532426", "http://doi.org/10.1109/ICIP.2016.7532426"]}, {"id": "68ea88440fc48d59c7407e71a193ff1973f9ba7c", "title": "Shoulder Keypoint-Detection from Object Detection", "addresses": [{"name": "University of Ottawa", "source_name": "University of Ottawa", "street_adddress": "University of Ottawa, 1, Stewart Street, Byward Market, Lowertown, Rideau-Vanier, Ottawa, Ontario, K1N 6N5, Canada", "lat": "45.42580475", "lng": "-75.68740118", "type": "edu", "country": "Canada"}], "year": "2018", "pdf": ["https://pdfs.semanticscholar.org/68ea/88440fc48d59c7407e71a193ff1973f9ba7c.pdf"], "doi": []}]}
\ No newline at end of file +{"id": "1bd1645a629f1b612960ab9bba276afd4cf7c666", "dataset": {"key": "brainwash", "name_short": "Brainwash", "name_display": "Brainwash Dataset", "name_full": "Brainwash Dataset", "purpose": "Head detection", "comment": "", "created_by": "Stanford University (US), Max Planck Institute for Informatics (DE)", "funded_by": "Max Planck Center for Visual Computing and Communication", "funded_by_short": "Max Planck Center for Visual Computing and Communication", "used_by": "", "license": "", "url": "https://purl.stanford.edu/sx925dc9385", "dl_im": "Y", "dl_meta": "", "dl_paper": "", "dl_web": "", "mp_pub": "Y", "ft_share": "Y", "nyt_share": "Y", "cooperative": "N", "indoor": "Y", "outdoor": "", "campus": "", "cyberspace": "", "parent": "", "source": "cctv_indoor", "usernames": "", "names": "", "flickr_meta": "", "year_start": "", "year_end": "", "year_published": "2015", "ongoing": "", "images": "11,917 ", "videos": "", "tracklets": "", "identities": "", "img_per_person": "", "num_cameras": "", "faces_or_persons": "91,146", "female": "", "male": "", "landmarks": "", "width": "640", "height": "480", "color": "Y", "gray": "", "tags": "fd", "size_gb": "4.1", "agreement": "", "agreement_signed": "", "flickr": "", "facebook": "", "youtube": "", "vimeo": "", "google": "", "bing": "", "adam": "", "berit": "", "charlie": "Y", "notes": "", "derivative_of": "", "": ""}, "paper": {"paper_id": "1bd1645a629f1b612960ab9bba276afd4cf7c666", "key": "brainwash", "title": "End-to-End People Detection in Crowded Scenes", "year": "2016", "pdf": [], "address": {"name": "Stanford University", "source_name": "Stanford University", "street_adddress": "Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA", "lat": "37.43131385", "lng": "-122.16936535", "type": "edu", "country": "United States"}, "name": "Brainwash", "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7780624"]}, "addresses": [{"name": "Stanford University", "source_name": "Stanford University", "street_adddress": "Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA", "lat": "37.43131385", "lng": "-122.16936535", "type": "edu", "country": "United States"}], "additional_papers": [], "citations": [{"id": "e35515f699b60472ac8f50d1da84fab3c55417d6", "title": "Key Parts Context and Scene Geometry in Human Head Detection", "addresses": [{"name": "Tsinghua University", "source_name": "Tsinghua University", "street_adddress": "\u6e05\u534e\u5927\u5b66, 30, \u53cc\u6e05\u8def, \u4e94\u9053\u53e3, \u540e\u516b\u5bb6, \u6d77\u6dc0\u533a, 100084, \u4e2d\u56fd", "lat": "40.00229045", "lng": "116.32098908", "type": "edu", "country": "China"}], "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8451832"]}, {"id": "5d5d267416aeb2bbddaf06f703e8683753abdcd0", "title": "Exploiting Multispectral and Contextual Information to Improve Human Detection", "addresses": [{"name": "State University of New Jersey", "source_name": "The State University of New Jersey", "street_adddress": "Rutgers New Brunswick: Livingston Campus, Joyce Kilmer Avenue, Piscataway Township, Middlesex County, New Jersey, 08854, USA", "lat": "40.51865195", "lng": "-74.44099801", "type": "edu", "country": "United States"}], "year": "2017", "pdf": ["https://pdfs.semanticscholar.org/5d5d/267416aeb2bbddaf06f703e8683753abdcd0.pdf"], "doi": []}, {"id": "0cf0ad8235929417d904acd1c672713ca4fdb105", "title": "Fusion of Head and Full-Body Detectors for Multi-object Tracking", "addresses": [{"name": "Technical University Munich", "source_name": "Technical University Munich", "street_adddress": "TUM, 21, Arcisstra\u00dfe, Bezirksteil K\u00f6nigsplatz, Stadtbezirk 03 Maxvorstadt, M\u00fcnchen, Obb, Bayern, 80333, Deutschland", "lat": "48.14955455", "lng": "11.56775314", "type": "edu", "country": "Germany"}], "year": "2018", "pdf": ["https://arxiv.org/pdf/1705.08314.pdf"], "doi": []}, {"id": "cab97d4dc67919f965cc884d80e4d6b743a256eb", "title": "Scale Mapping and Dynamic Re-Detecting in Dense Head Detection", "addresses": [{"name": "South China University of Technology", "source_name": "South China University of Technology", "street_adddress": "\u534e\u5357\u7406\u5de5\u5927\u5b66, \u5927\u5b66\u57ce\u4e2d\u73af\u4e1c\u8def, \u5e7f\u5dde\u5927\u5b66\u57ce, \u65b0\u9020, \u756a\u79ba\u533a (Panyu), \u5e7f\u5dde\u5e02, \u5e7f\u4e1c\u7701, 510006, \u4e2d\u56fd", "lat": "23.05020420", "lng": "113.39880323", "type": "edu", "country": "China"}], "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8451653"]}, {"id": "8a6665bfe3ad126d84414280ee2f884818560063", "title": "Vehicle Detection in Urban Traffic Surveillance Images Based on Convolutional Neural Networks with Feature Concatenation", "addresses": [{"name": "China University of Mining and Technology", "source_name": "China University of Mining and Technology", "street_adddress": "China University of Mining and Technology, 1\u53f7, \u5927\u5b66\u8def, \u6cc9\u5c71\u533a (Quanshan), \u5f90\u5dde\u5e02 / Xuzhou, \u6c5f\u82cf\u7701, 221116, \u4e2d\u56fd", "lat": "34.21525380", "lng": "117.13985410", "type": "edu", "country": "China"}], "year": "2019", "pdf": ["https://pdfs.semanticscholar.org/8a66/65bfe3ad126d84414280ee2f884818560063.pdf"], "doi": []}, {"id": "333880ee776d57555f54935978ccb2ce13cfdb07", "title": "Person classification leveraging Convolutional Neural Network for obstacle avoidance via Unmanned Aerial Vehicles", "addresses": [{"name": "Cranfield University", "source_name": "Cranfield University, UK", "street_adddress": "College Rd, Wharley End, Bedford MK43 0AL, UK", "lat": "52.07418180", "lng": "-0.62781230", "type": "edu", "country": "United Kingdom"}], "year": "2017", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8101662"]}, {"id": "f30bdc22fe454a28234cf858d5bf9de94701ee7a", "title": "Adaptive NMS: Refining Pedestrian Detection in a Crowd", "addresses": [{"name": "Beihang University", "source_name": "Beihang University", "street_adddress": "\u5317\u4eac\u822a\u7a7a\u822a\u5929\u5927\u5b66, 37, \u5b66\u9662\u8def, \u4e94\u9053\u53e3, \u540e\u516b\u5bb6, \u6d77\u6dc0\u533a, 100083, \u4e2d\u56fd", "lat": "39.98083330", "lng": "116.34101249", "type": "edu", "country": "China"}], "year": "2019", "pdf": ["https://arxiv.org/pdf/1904.03629.pdf"], "doi": []}, {"id": "976647b32fd7e1a5c8ee4a11792155903bb34e43", "title": "Multi-Organ Plant Classification Based on Convolutional and Recurrent Neural Networks", "addresses": [{"name": "University of Malaya, Kuala Lumpur", "source_name": "University of Malaya", "street_adddress": "UM, Lingkaran Wawasan, Bukit Pantai, Bangsar, KL, 50603, Malaysia", "lat": "3.12267405", "lng": "101.65356103", "type": "edu", "country": "Malaysia"}, {"name": "Kingston University", "source_name": "Kingston University", "street_adddress": "Kingston University, Kingston Hill, Kingston Vale, Kingston-upon-Thames, London, Greater London, England, KT2 7TF, UK", "lat": "51.42930860", "lng": "-0.26840440", "type": "edu", "country": "United Kingdom"}], "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8359391"]}, {"id": "66a08ff2ea7447093624632e7069e3da16961d30", "title": "An Incremental Framework for Video-Based Traffic Sign Detection, Tracking, and Recognition", "addresses": [{"name": "Northwestern Polytechnical University", "source_name": "Northwestern Polytechnical University", "street_adddress": "\u897f\u5317\u5de5\u4e1a\u5927\u5b66 \u53cb\u8c0a\u6821\u533a, 127\u53f7, \u53cb\u8c0a\u897f\u8def, \u957f\u5b89\u8def, \u7891\u6797\u533a (Beilin), \u897f\u5b89\u5e02, \u9655\u897f\u7701, 710072, \u4e2d\u56fd", "lat": "34.24691520", "lng": "108.91061982", "type": "edu", "country": "China"}], "year": "2017", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7605450"]}, {"id": "6fc8c988dd841c6c4f5e96b1b1458b6aa564b2de", "title": "Crowd Counting via Scale-Adaptive Convolutional Neural Network", "addresses": [{"name": "Shanghai Jiaotong University", "source_name": "Shanghai Jiaotong University", "street_adddress": "China, Shanghai, Minhang, \u4e1c\u5ddd\u8def \u90ae\u653f\u7f16\u7801: 200240", "lat": "31.02522010", "lng": "121.43377840", "type": "edu", "country": "China"}], "year": "2018", "pdf": ["https://arxiv.org/pdf/1711.04433.pdf"], "doi": []}, {"id": "e607b91f69ea2bff3194b07c5d22b4625bbe306e", "title": "Learning to See People Like People : Predicting Social Impressions of Faces", "addresses": [{"name": "Purdue University", "source_name": "Purdue University", "street_adddress": "Purdue University, West Stadium Avenue, West Lafayette, Tippecanoe County, Indiana, 47907, USA", "lat": "40.43197220", "lng": "-86.92389368", "type": "edu", "country": "United States"}], "year": "2017", "pdf": ["https://pdfs.semanticscholar.org/e607/b91f69ea2bff3194b07c5d22b4625bbe306e.pdf"], "doi": []}, {"id": "737f3cf354f40a6a7fd8a2058fe2803b8dd6c56b", "title": "Image Crowd Counting Using Convolutional Neural Network and Markov Random Field", "addresses": [{"name": "Shanghai University", "source_name": "Shanghai University", "street_adddress": "\u4e0a\u6d77\u5927\u5b66, \u9526\u79cb\u8def, \u5927\u573a\u9547, \u5b9d\u5c71\u533a (Baoshan), \u4e0a\u6d77\u5e02, 201906, \u4e2d\u56fd", "lat": "31.32235655", "lng": "121.38400941", "type": "edu", "country": "China"}], "year": "2017", "pdf": ["https://pdfs.semanticscholar.org/737f/3cf354f40a6a7fd8a2058fe2803b8dd6c56b.pdf"], "doi": []}, {"id": "3e667c54e848233db092b794f2cfbf47ea63b771", "title": "Combined convolutional and recurrent neural networks for hierarchical classification of images", "addresses": [{"name": "Northwestern University", "source_name": "Northwestern University", "street_adddress": "Northwestern University, Northwestern Place, Downtown, Evanston, Cook County, Illinois, 60208, USA", "lat": "42.05511640", "lng": "-87.67581113", "type": "edu", "country": "United States"}], "year": "2018", "pdf": ["https://arxiv.org/pdf/1809.09574.pdf"], "doi": []}, {"id": "a5c8fc1ca4f06a344b53dc81ebc6d87f54896722", "title": "Learning to see people like people", "addresses": [{"name": "University of California, San Diego", "source_name": "University of California, San Diego", "street_adddress": "UCSD, 9500, Gilman Drive, Sixth College, University City, San Diego, San Diego County, California, 92093, USA", "lat": "32.87935255", "lng": "-117.23110049", "type": "edu", "country": "United States"}, {"name": "Purdue University", "source_name": "Purdue University", "street_adddress": "Purdue University, West Stadium Avenue, West Lafayette, Tippecanoe County, Indiana, 47907, USA", "lat": "40.43197220", "lng": "-86.92389368", "type": "edu", "country": "United States"}], "year": "2017", "pdf": ["https://arxiv.org/pdf/1705.04282.pdf"], "doi": []}, {"id": "7a9c2524a157a6b5d0204c2f3b187cd67eb26b4c", "title": "SINet: A Scale-Insensitive Convolutional Neural Network for Fast Vehicle Detection", "addresses": [{"name": "Hong Kong Polytechnic University", "source_name": "Hong Kong Polytechnic University", "street_adddress": "hong kong, 11, \u80b2\u624d\u9053 Yuk Choi Road, \u5c16\u6c99\u5480 Tsim Sha Tsui, \u6cb9\u5c16\u65fa\u5340 Yau Tsim Mong District, \u4e5d\u9f8d Kowloon, HK, 00000, \u4e2d\u56fd", "lat": "22.30457200", "lng": "114.17976285", "type": "edu", "country": "China"}, {"name": "Chinese University of Hong Kong", "source_name": "Chinese University of Hong Kong", "street_adddress": "Hong Kong, \u99ac\u6599\u6c34\u6c60\u65c1\u8def", "lat": "22.41626320", "lng": "114.21093180", "type": "edu", "country": "China"}, {"name": "South China University of Technology", "source_name": "South China University of Technology", "street_adddress": "\u534e\u5357\u7406\u5de5\u5927\u5b66, \u5927\u5b66\u57ce\u4e2d\u73af\u4e1c\u8def, \u5e7f\u5dde\u5927\u5b66\u57ce, \u65b0\u9020, \u756a\u79ba\u533a (Panyu), \u5e7f\u5dde\u5e02, \u5e7f\u4e1c\u7701, 510006, \u4e2d\u56fd", "lat": "23.05020420", "lng": "113.39880323", "type": "edu", "country": "China"}], "year": "2019", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8478157"]}, {"id": "c7f4e18ec3a805b8aee1d3d7552364c8f5f6ca1d", "title": "An Auto-adaptive CNN for Crowd Counting in Monitor Image", "addresses": [{"name": "Beijing University of Posts and Telecommunications", "source_name": "Beijing University of Posts and Telecommunications", "street_adddress": "\u5317\u4eac\u90ae\u7535\u5927\u5b66, \u897f\u571f\u57ce\u8def, \u6d77\u6dc0\u533a, \u5317\u4eac\u5e02, 100082, \u4e2d\u56fd", "lat": "39.96014880", "lng": "116.35193921", "type": "edu", "country": "China"}], "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8525541"]}, {"id": "b1db174463b0bbc54a61fcc83acfb89ad3e3d18f", "title": "Loss Functions for Multiset Prediction", "addresses": [{"name": "New York University", "source_name": "New York University", "street_adddress": "NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA", "lat": "40.72925325", "lng": "-73.99625394", "type": "edu", "country": "United States"}], "year": "2018", "pdf": ["https://arxiv.org/pdf/1711.05246.pdf"], "doi": []}, {"id": "81ba5202424906f64b77f68afca063658139fbb2", "title": "Social Scene Understanding: End-to-End Multi-person Action Localization and Collective Activity Recognition", "addresses": [{"name": "Stanford University", "source_name": "Stanford University", "street_adddress": "Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA", "lat": "37.43131385", "lng": "-122.16936535", "type": "edu", "country": "United States"}, {"name": "IDIAP Research Institute", "source_name": "IDIAP Research Institute", "street_adddress": "Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra", "lat": "46.10923700", "lng": "7.08453549", "type": "edu", "country": "Switzerland"}], "year": "2017", "pdf": ["https://arxiv.org/pdf/1611.09078.pdf"], "doi": []}, {"id": "d488dad9fa81817c85a284b09ebf198bf6b640f9", "title": "FCHD: A fast and accurate head detector", "addresses": [{"name": "IIT Madras, India", "source_name": "IIT Madras, India", "street_adddress": "Indian Institute Of Technology, Chennai, Tamil Nadu 600036, India", "lat": "12.99149290", "lng": "80.23369070", "type": "edu", "country": "India"}], "year": "2018", "pdf": ["https://arxiv.org/pdf/1809.08766.pdf"], "doi": []}, {"id": "830466596c0908399760f2009a09ce605e3121c9", "title": "Revisiting Perspective Information for Efficient Crowd Counting", "addresses": [{"name": "Peking University", "source_name": "Peking University", "street_adddress": "\u5317\u4eac\u5927\u5b66, 5\u53f7, \u9890\u548c\u56ed\u8def, \u7a3b\u9999\u56ed\u5357\u793e\u533a, \u6d77\u6dc0\u533a, \u5317\u4eac\u5e02, 100871, \u4e2d\u56fd", "lat": "39.99223790", "lng": "116.30393816", "type": "edu", "country": "China"}, {"name": "Tongji University", "source_name": "Tongji University", "street_adddress": "\u540c\u6d4e\u5927\u5b66, 1239, \u56db\u5e73\u8def, \u6c5f\u6e7e, \u8679\u53e3\u533a, \u4e0a\u6d77\u5e02, 200092, \u4e2d\u56fd", "lat": "31.28473925", "lng": "121.49694909", "type": "edu", "country": "China"}], "year": "2018", "pdf": ["https://arxiv.org/pdf/1807.01989.pdf"], "doi": []}, {"id": "1d1a2387663cb3510d6f5c0651c1dc55e48dcabc", "title": "Joint Pedestrian and Body Part Detection via Semantic Relationship Learning", "addresses": [{"name": "Hebei University of Technology", "source_name": "Hebei University of Technology, Tianjin, P. R. China", "street_adddress": "8 Dingzigu 1st Rd, Hongqiao Qu, China, 300131", "lat": "39.17963500", "lng": "117.16588200", "type": "edu", "country": "China"}, {"name": "Chinese Academy of Sciences", "source_name": "Chinese Academy of Sciences", "street_adddress": "\u4e2d\u56fd\u79d1\u5b66\u9662\u5fc3\u7406\u7814\u7a76\u6240, 16, \u6797\u8403\u8def, \u671d\u9633\u533a / Chaoyang, \u5317\u4eac\u5e02, 100101, \u4e2d\u56fd", "lat": "40.00447950", "lng": "116.37023800", "type": "edu", "country": "China"}, {"name": "Beijing Information Science and Technology University", "source_name": "Beijing Information Science and Technology University, Beijing, China", "street_adddress": "China, Beijing, Haidian, \u6e05\u6cb3\u56db\u62d4\u5b50", "lat": "40.04332040", "lng": "116.34181090", "type": "edu", "country": "China"}], "year": "2019", "pdf": ["https://pdfs.semanticscholar.org/1d1a/2387663cb3510d6f5c0651c1dc55e48dcabc.pdf"], "doi": []}, {"id": "91d2f0b7c23239740fa15dff0b5893b992c0ab34", "title": "Sequence-to-Segment Networks for Segment Detection", "addresses": [{"name": "Stony Brook University", "source_name": "Stony Brook University", "street_adddress": "Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA", "lat": "40.91531960", "lng": "-73.12706260", "type": "edu", "country": "United States"}], "year": "2018", "pdf": ["https://pdfs.semanticscholar.org/91d2/f0b7c23239740fa15dff0b5893b992c0ab34.pdf"], "doi": []}, {"id": "8e7c647d8e8ba726b03f7e7c5cc395f86b9de9be", "title": "Parsing Pose of People with Interaction", "addresses": [{"name": "California Institute of Technology", "source_name": "California Institute of Technology", "street_adddress": "California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA", "lat": "34.13710185", "lng": "-118.12527487", "type": "edu", "country": "United States"}], "year": "2018", "pdf": ["https://pdfs.semanticscholar.org/8e7c/647d8e8ba726b03f7e7c5cc395f86b9de9be.pdf"], "doi": []}, {"id": "2f02c1d4858d9c0f5d16099eb090560d5fa4f23f", "title": "Detecting Heads using Feature Refine Net and Cascaded Multi-scale Architecture", "addresses": [{"name": "South China University of Technology", "source_name": "South China University of Technology", "street_adddress": "\u534e\u5357\u7406\u5de5\u5927\u5b66, \u5927\u5b66\u57ce\u4e2d\u73af\u4e1c\u8def, \u5e7f\u5dde\u5927\u5b66\u57ce, \u65b0\u9020, \u756a\u79ba\u533a (Panyu), \u5e7f\u5dde\u5e02, \u5e7f\u4e1c\u7701, 510006, \u4e2d\u56fd", "lat": "23.05020420", "lng": "113.39880323", "type": "edu", "country": "China"}], "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8545068"]}, {"id": "c43c3ec39005aaf1f0ad2f641d92f41021342217", "title": "Pedestrian Detection with Autoregressive Network Phases", "addresses": [{"name": "Michigan State University", "source_name": "Michigan State University", "street_adddress": "Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA", "lat": "42.71856800", "lng": "-84.47791571", "type": "edu", "country": "United States"}], "year": "2018", "pdf": ["https://arxiv.org/pdf/1812.00440.pdf"], "doi": []}, {"id": "e1a784e360f2bf3911cffa30506d46c412659db5", "title": "MetaAnchor: Learning to Detect Objects with Customized Anchors", "addresses": [{"name": "Fudan University", "source_name": "Fudan University", "street_adddress": "\u590d\u65e6\u5927\u5b66, 220, \u90af\u90f8\u8def, \u4e94\u89d2\u573a\u8857\u9053, \u6768\u6d66\u533a, \u4e0a\u6d77\u5e02, 200433, \u4e2d\u56fd", "lat": "31.30104395", "lng": "121.50045497", "type": "edu", "country": "China"}], "year": "2018", "pdf": ["https://arxiv.org/pdf/1807.00980.pdf"], "doi": []}, {"id": "9043df1de4f6e181875011c1379d1a7f68a28d6c", "title": "People Detection from Overhead Cameras", "addresses": [{"name": "Delft University of Technology", "source_name": "Delft University of Technology", "street_adddress": "TU Delft, Mekelweg, TU-wijk, Delft, Zuid-Holland, Nederland, 2628, Nederland", "lat": "51.99882735", "lng": "4.37396037", "type": "edu", "country": "Netherlands"}], "year": "2018", "pdf": ["https://pdfs.semanticscholar.org/9043/df1de4f6e181875011c1379d1a7f68a28d6c.pdf"], "doi": []}, {"id": "9da3ade53bf91556fe46828b216aab20a4e72294", "title": "SNc Neuron Detection Method Based on Deep Learning for Efficacy Evaluation of Anti-PD Drugs", "addresses": [{"name": "Peking University", "source_name": "Peking University", "street_adddress": "\u5317\u4eac\u5927\u5b66, 5\u53f7, \u9890\u548c\u56ed\u8def, \u7a3b\u9999\u56ed\u5357\u793e\u533a, \u6d77\u6dc0\u533a, \u5317\u4eac\u5e02, 100871, \u4e2d\u56fd", "lat": "39.99223790", "lng": "116.30393816", "type": "edu", "country": "China"}, {"name": "Shanghai Jiao Tong University", "source_name": "Shanghai Jiao Tong University", "street_adddress": "\u4e0a\u6d77\u4ea4\u901a\u5927\u5b66\uff08\u5f90\u6c47\u6821\u533a\uff09, \u6dee\u6d77\u897f\u8def, \u756a\u79ba\u5c0f\u533a, \u5e73\u9634\u6865, \u5f90\u6c47\u533a, \u4e0a\u6d77\u5e02, 200052, \u4e2d\u56fd", "lat": "31.20081505", "lng": "121.42840681", "type": "edu", "country": "China"}], "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8431470"]}, {"id": "cfa48bc1015b88809e362b4da19fe4459acb1d89", "title": "Learning to Filter Object Detections", "addresses": [{"name": "Max Planck Institute for Intelligent Systems", "source_name": "Max Planck Institute for Intelligent Systems", "street_adddress": "Heisenbergstra\u00dfe 3, 70569 Stuttgart, Germany", "lat": "48.74689390", "lng": "9.08051410", "type": "edu", "country": "Germany"}, {"name": "Microsoft", "source_name": "Microsoft Corporation, Redmond, WA, USA", "street_adddress": "One Microsoft Way, Redmond, WA 98052, USA", "lat": "47.64233180", "lng": "-122.13693020", "type": "company", "country": "United States"}], "year": "2017", "pdf": ["https://pdfs.semanticscholar.org/cfa4/8bc1015b88809e362b4da19fe4459acb1d89.pdf"], "doi": []}, {"id": "eb5780e49ad7e713e9f34cf067d45030fb29753e", "title": "Iterative fully convolutional neural networks for automatic vertebra segmentation and identification", "addresses": [{"name": "University Medical Center Utrecht", "source_name": "Rudolf Magnus Institute of Neuroscience, University Medical Center Utrecht, Utrecht, The Netherlands", "street_adddress": "Vondellaan 94, 3521 GH Utrecht, Netherlands", "lat": "52.07869500", "lng": "5.11974690", "type": "edu", "country": "Netherlands"}], "year": "2018", "pdf": ["https://arxiv.org/pdf/1804.04383.pdf"], "doi": []}, {"id": "54c9343c247ce5a82bc52f83df84c2bbae737ce4", "title": "Deep Learning on Attributed Graphs: A Journey from Graphs to Their Embeddings and Back. (L'apprentissage profond sur graphes attribu\u00e9s: Un voyage aller-retour aux plongements des graphes)", "addresses": [{"name": "INRIA Sophia Antipolis", "source_name": "INRIA Sophia Antipolis Meditérannée, Valbonne, France", "street_adddress": "2004 Route des Lucioles, 06902 Valbonne, France", "lat": "43.61581310", "lng": "7.06838000", "type": "edu", "country": "France"}], "year": "2018", "pdf": ["https://arxiv.org/pdf/1901.08296.pdf"], "doi": []}, {"id": "2d81cf3214281af85eb1d9d270a897d62302e88e", "title": "High density people estimation in video surveillance", "addresses": [{"name": "AvidBeam", "source_name": "AvidBeam", "street_adddress": "5 Wadi Al Nile, Maadi Al Khabiri Ash Sharqeyah, Al Maadi, Cairo Governorate 11728, Egypt", "lat": "29.95606300", "lng": "31.25547100", "type": "company", "country": "Egypt"}, {"name": "Faculty of Media Engineering & Technology German University in Cairo", "source_name": "Faculty of Media Engineering & Technology German University in Cairo, Egypt", "street_adddress": "Cairo Governorate, Egypt", "lat": "29.98663810", "lng": "31.44142180", "type": "edu", "country": "Egypt"}, {"name": "German University in Cairo", "source_name": "Faculty of Media Engineering and Technology, German University in Cairo, Egypt", "street_adddress": "Cairo Governorate, Egypt", "lat": "29.98663810", "lng": "31.44142180", "type": "edu", "country": "Egypt"}], "year": "2017", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8275348"]}, {"id": "d54d0dc1e7ef82f3ed0ee332e0777b6d73477a8c", "title": "Sidewalk-level People Flow Estimation Using Dashboard Cameras Based on Deep Learning", "addresses": [{"name": "Osaka University", "source_name": "Osaka University", "street_adddress": "\u5927\u962a\u5927\u5b66\u6e05\u660e\u5bee, \u670d\u90e8\u897f\u753a\u56db\u4e01\u76ee, \u8c4a\u4e2d\u5e02, \u5927\u962a\u5e9c, \u8fd1\u757f\u5730\u65b9, \u65e5\u672c", "lat": "34.80809035", "lng": "135.45785218", "type": "edu", "country": "Japan"}], "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8653595"]}, {"id": "9607f88fe6298f754e3f610c3a587d8e10b1b0a9", "title": "Scene Text Detection with Recurrent Instance Segmentation", "addresses": [{"name": "National Laboratory of Pattern Recognition", "source_name": "National Laboratory of Pattern Recognition & Chinese Academy of Sciences & University of Chinese Academy of Sciences, Beijing, China", "street_adddress": "China, Beijing, Haidian, Zhongguancun South 1st Alley, \u4e2d\u5173\u6751\u5357\u4e00\u6761", "lat": "39.98177000", "lng": "116.33008600", "type": "edu", "country": "China"}], "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8545073"]}, {"id": "ed2ba6448db8cf945ca24d4df11916c2c5c3edd1", "title": "Rapid Pedestrian Detection Based on Deep Omega-Shape Features with Partial Occlusion Handing", "addresses": [{"name": "University of Electronic Science and Technology of China", "source_name": "University of Electronic Science and Technology of China", "street_adddress": "2 Jianshe North Rd 2nd Section, Jianshe Road, Chenghua Qu, Chengdu Shi, Sichuan Sheng, China, 610054", "lat": "30.67272100", "lng": "104.09880600", "type": "edu", "country": "China"}], "year": "2018", "pdf": [], "doi": ["http://doi.org/10.1007/s11063-018-9837-1"]}, {"id": "03a65d274dc6caea94f6ab344e0b4969575327e3", "title": "CrowdHuman: A Benchmark for Detecting Human in a Crowd", "addresses": [{"name": "Megvii Inc. (Face++), China", "source_name": "Megvii Inc. (Face++), China", "street_adddress": "China", "lat": "35.86166000", "lng": "104.19539700", "type": "company", "country": "China"}], "year": "2018", "pdf": ["https://arxiv.org/pdf/1805.00123.pdf"], "doi": []}, {"id": "d78b190f98f9630cab261eabc399733af052f05c", "title": "Unsupervised Deep Domain Adaptation for Pedestrian Detection", "addresses": [{"name": "Shanghai Jiao Tong University", "source_name": "Shanghai Jiao Tong University", "street_adddress": "\u4e0a\u6d77\u4ea4\u901a\u5927\u5b66\uff08\u5f90\u6c47\u6821\u533a\uff09, \u6dee\u6d77\u897f\u8def, \u756a\u79ba\u5c0f\u533a, \u5e73\u9634\u6865, \u5f90\u6c47\u533a, \u4e0a\u6d77\u5e02, 200052, \u4e2d\u56fd", "lat": "31.20081505", "lng": "121.42840681", "type": "edu", "country": "China"}, {"name": "University of Twente", "source_name": "University of Twente", "street_adddress": "University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland", "lat": "52.23801390", "lng": "6.85667610", "type": "edu", "country": "Netherlands"}], "year": "2016", "pdf": ["https://arxiv.org/pdf/1802.03269.pdf"], "doi": []}, {"id": "deea683731f468c7234e1089f48c4546e7003b18", "title": "Multi-Task Vehicle Detection With Region-of-Interest Voting", "addresses": [{"name": "Alibaba Group, Hangzhou, China", "source_name": "Alibaba Group, Hangzhou, China", "street_adddress": "Alibaba Group, \u4e94\u5e38\u8857\u9053, \u4f59\u676d\u533a (Yuhang), \u676d\u5dde\u5e02 Hangzhou, \u6d59\u6c5f\u7701, \u4e2d\u56fd", "lat": "30.28106540", "lng": "120.02139087", "type": "edu", "country": "China"}, {"name": "Alibaba, Hangzhou, China", "source_name": "Alibaba, Hangzhou, China", "street_adddress": "699 Wangshang Rd, Binjiang Qu, Hangzhou Shi, Zhejiang Sheng, China", "lat": "30.18996400", "lng": "120.19210000", "type": "edu", "country": "China"}, {"name": "Zhejiang University", "source_name": "Zhejiang University", "street_adddress": "\u6d59\u6c5f\u5927\u5b66\u4e4b\u6c5f\u6821\u533a, \u4e4b\u6c5f\u8def, \u8f6c\u5858\u8857\u9053, \u897f\u6e56\u533a (Xihu), \u676d\u5dde\u5e02 Hangzhou, \u6d59\u6c5f\u7701, 310008, \u4e2d\u56fd", "lat": "30.19331415", "lng": "120.11930822", "type": "edu", "country": "China"}], "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8066331", "http://doi.org/10.1109/TIP.2017.2762591", "https://www.ncbi.nlm.nih.gov/pubmed/29028197", "https://www.wikidata.org/entity/Q50063923"]}, {"id": "1303b3e9f4bffb87b6dd34f7849f7a57e00253d1", "title": "Point in, Box out: Beyond Counting Persons in Crowds", "addresses": [{"name": "Sichuan University, Chengdu", "source_name": "Sichuan Univ., Chengdu", "street_adddress": "\u56db\u5ddd\u5927\u5b66\uff08\u534e\u897f\u6821\u533a\uff09, \u6821\u4e1c\u8def, \u6b66\u4faf\u533a, \u6b66\u4faf\u533a (Wuhou), \u6210\u90fd\u5e02 / Chengdu, \u56db\u5ddd\u7701, 610014, \u4e2d\u56fd", "lat": "30.64276900", "lng": "104.06751175", "type": "edu", "country": "China"}, {"name": "INRIA", "source_name": "INRIA Grenoble Rhone-Alpes, Grenoble, France", "street_adddress": "655 Avenue de l'Europe, 38330 Montbonnot-Saint-Martin, France", "lat": "45.21788600", "lng": "5.80736900", "type": "edu", "country": "France"}], "year": "2019", "pdf": ["https://arxiv.org/pdf/1904.01333.pdf"], "doi": []}, {"id": "4140ccbca5a585162420b82e9e4eeb9afbd1e7c2", "title": "Simulating Crowds in Egress Scenarios", "addresses": [{"name": "Pontifical Catholic University of Rio Grande do Sul", "source_name": "Pontifical Catholic University of Rio Grande do Sul", "street_adddress": "Av. Ipiranga, 6681 - Partenon, Porto Alegre - RS, 90619-900, Brazil", "lat": "-30.05934460", "lng": "-51.17349120", "type": "edu", "country": "Brazil"}, {"name": "University of Pennsylvania", "source_name": "University of Pennsylvania", "street_adddress": "Penn Museum, 3260, South Street, University City, Philadelphia, Philadelphia County, Pennsylvania, 19104, USA", "lat": "39.94923440", "lng": "-75.19198985", "type": "edu", "country": "United States"}, {"name": "Federal University of Rio Grande do Sul", "source_name": "Institute of Informatics, Federal University of Rio Grande do Sul, Porto Alegre, Brazil", "street_adddress": "Av. Paulo Gama, 110 - Farroupilha, Porto Alegre - RS, 90040-060, Brazil", "lat": "-30.03382480", "lng": "-51.21882800", "type": "edu", "country": "Brazil"}], "year": "2017", "pdf": [], "doi": ["http://doi.org/10.1007/978-3-319-65202-3"]}, {"id": "5031a110219231ceaa820725c6e77f87f7b2fde2", "title": "Training with Confusion for Fine-Grained Visual Classification", "addresses": [{"name": "Harvard University", "source_name": "Harvard University", "street_adddress": "Harvard University, Soldiers Field Road, Allston, Boston, Suffolk County, Massachusetts, 02163, USA", "lat": "42.36782045", "lng": "-71.12666653", "type": "edu", "country": "United States"}, {"name": "MIT", "source_name": "Massachusetts Institute", "street_adddress": "MIT, Amherst Street, Cambridgeport, Cambridge, Middlesex County, Massachusetts, 02238, USA", "lat": "42.35839610", "lng": "-71.09567788", "type": "edu", "country": "United States"}, {"name": "Brigham Young University", "source_name": "Brigham Young University, Provo, USA", "street_adddress": "Provo, UT 84602, USA", "lat": "40.25184350", "lng": "-111.64931560", "type": "edu", "country": "United States"}], "year": "2017", "pdf": ["https://pdfs.semanticscholar.org/5031/a110219231ceaa820725c6e77f87f7b2fde2.pdf"], "doi": []}, {"id": "86c158ef6caaf247d5d14e07c5edded0147df8b7", "title": "Spatial Memory for Context Reasoning in Object Detection", "addresses": [{"name": "Carnegie Mellon University", "source_name": "Carnegie Mellon University Pittsburgh, PA - 15213, USA", "street_adddress": "Carnegie Mellon University, Forbes Avenue, Squirrel Hill North, PGH, Allegheny County, Pennsylvania, 15213, USA", "lat": "40.44416190", "lng": "-79.94272826", "type": "edu", "country": "United States"}], "year": "2017", "pdf": ["https://arxiv.org/pdf/1704.04224.pdf"], "doi": []}, {"id": "bba153ebdf11e6fb8716e35749c671ac96c14176", "title": "Image Crowd Counting Using Convolutional Neural Network and Markov Random Field", "addresses": [{"name": "Shanghai University", "source_name": "Shanghai University", "street_adddress": "\u4e0a\u6d77\u5927\u5b66, \u9526\u79cb\u8def, \u5927\u573a\u9547, \u5b9d\u5c71\u533a (Baoshan), \u4e0a\u6d77\u5e02, 201906, \u4e2d\u56fd", "lat": "31.32235655", "lng": "121.38400941", "type": "edu", "country": "China"}], "year": "2017", "pdf": ["https://arxiv.org/pdf/1706.03686.pdf"], "doi": []}, {"id": "fe5679c183cb432894d111bf02cc3243c89762ca", "title": "Adaptive Scenario Discovery for Crowd Counting", "addresses": [{"name": "East China Normal University", "source_name": "East China Normal University", "street_adddress": "\u534e\u4e1c\u5e08\u8303\u5927\u5b66, 3663, \u4e2d\u5c71\u5317\u8def, \u66f9\u5bb6\u6e21, \u666e\u9640\u533a, \u666e\u9640\u533a (Putuo), \u4e0a\u6d77\u5e02, 200062, \u4e2d\u56fd", "lat": "31.22849230", "lng": "121.40211389", "type": "edu", "country": "China"}], "year": "2018", "pdf": ["https://arxiv.org/pdf/1812.02393.pdf"], "doi": []}, {"id": "11824658170994e4d4655e8f688bace16a0d3e48", "title": "Multi-person Head Segmentation in Low Resolution Crowd Scenes Using Convolutional Encoder-Decoder Framework", "addresses": [{"name": "Qatar University", "source_name": "Qatar University", "street_adddress": "Qatar University, Roindabout 3, Al Tarfa (68), \u0623\u0645 \u0635\u0644\u0627\u0644, 24685, \u200f\u0642\u0637\u0631\u200e", "lat": "25.37461295", "lng": "51.48980354", "type": "edu", "country": "Qatar"}, {"name": "University of Warwick", "source_name": "University of Warwick", "street_adddress": "University of Warwick, University Road, Kirby Corner, Cannon Park, Coventry, West Midlands Combined Authority, West Midlands, England, CV4 7AL, UK", "lat": "52.37931310", "lng": "-1.56042520", "type": "edu", "country": "United Kingdom"}], "year": "2017", "pdf": ["https://pdfs.semanticscholar.org/1182/4658170994e4d4655e8f688bace16a0d3e48.pdf"], "doi": []}, {"id": "061356704ec86334dbbc073985375fe13cd39088", "title": "Very Deep Convolutional Networks for Large-Scale Image Recognition", "addresses": [{"name": "University of Oxford", "source_name": "University of Oxford", "street_adddress": "Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK", "lat": "51.75345380", "lng": "-1.25400997", "type": "edu", "country": "United Kingdom"}], "year": "2015", "pdf": ["https://arxiv.org/pdf/1409.1556.pdf"], "doi": []}, {"id": "14318685b5959b51d0f1e3db34643eb2855dc6d9", "title": "Going deeper with convolutions", "addresses": [{"name": "Google", "source_name": "Google, Inc.", "street_adddress": "1600 Amphitheatre Pkwy, Mountain View, CA 94043, USA", "lat": "37.42199990", "lng": "-122.08405750", "type": "company", "country": "United States"}, {"name": "University of Michigan", "source_name": "University of Michigan", "street_adddress": "University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA", "lat": "42.29421420", "lng": "-83.71003894", "type": "edu", "country": "United States"}, {"name": "University of North Carolina", "source_name": "University of North Carolina", "street_adddress": "University of North Carolina, Emergency Room Drive, Chapel Hill, Orange County, North Carolina, 27599, USA", "lat": "35.90503535", "lng": "-79.04775327", "type": "edu", "country": "United States"}], "year": "2015", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7298594"]}, {"id": "1bd1645a629f1b612960ab9bba276afd4cf7c666", "title": "End-to-End People Detection in Crowded Scenes", "addresses": [{"name": "Stanford University", "source_name": "Stanford University", "street_adddress": "Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA", "lat": "37.43131385", "lng": "-122.16936535", "type": "edu", "country": "United States"}], "year": "2016", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7780624"]}, {"id": "7e915bb8e4ada4f8d261bc855a4f587ea97764ca", "title": "People detection in crowded scenes via regional-based convolutional network", "addresses": [{"name": "Peking University", "source_name": "Peking University", "street_adddress": "\u5317\u4eac\u5927\u5b66, 5\u53f7, \u9890\u548c\u56ed\u8def, \u7a3b\u9999\u56ed\u5357\u793e\u533a, \u6d77\u6dc0\u533a, \u5317\u4eac\u5e02, 100871, \u4e2d\u56fd", "lat": "39.99223790", "lng": "116.30393816", "type": "edu", "country": "China"}], "year": "2016", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7877809", "http://doi.org/10.1109/ICSP.2016.7877809"]}, {"id": "591a4bfa6380c9fcd5f3ae690e3ac5c09b7bf37b", "title": "A Replacement Algorithm of Non-Maximum Suppression Base on Graph Clustering", "addresses": [{"name": "National University of Defense Technology, China", "source_name": "National University of Defence Technology, Changsha 410000, China", "street_adddress": "\u56fd\u9632\u79d1\u5b66\u6280\u672f\u5927\u5b66, \u4e09\u4e00\u5927\u9053, \u5f00\u798f\u533a, \u5f00\u798f\u533a (Kaifu), \u957f\u6c99\u5e02 / Changsha, \u6e56\u5357\u7701, 410073, \u4e2d\u56fd", "lat": "28.22902090", "lng": "112.99483204", "type": "mil", "country": "China"}], "year": "2017", "pdf": ["https://pdfs.semanticscholar.org/591a/4bfa6380c9fcd5f3ae690e3ac5c09b7bf37b.pdf"], "doi": []}, {"id": "b02d31c640b0a31fb18c4f170d841d8e21ffb66c", "title": "Localized region context and object feature fusion for people head detection", "addresses": [{"name": "National University of Defense Technology, China", "source_name": "National University of Defence Technology, Changsha 410000, China", "street_adddress": "\u56fd\u9632\u79d1\u5b66\u6280\u672f\u5927\u5b66, \u4e09\u4e00\u5927\u9053, \u5f00\u798f\u533a, \u5f00\u798f\u533a (Kaifu), \u957f\u6c99\u5e02 / Changsha, \u6e56\u5357\u7701, 410073, \u4e2d\u56fd", "lat": "28.22902090", "lng": "112.99483204", "type": "mil", "country": "China"}], "year": "2016", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7532426", "http://doi.org/10.1109/ICIP.2016.7532426"]}, {"id": "68ea88440fc48d59c7407e71a193ff1973f9ba7c", "title": "Shoulder Keypoint-Detection from Object Detection", "addresses": [{"name": "University of Ottawa", "source_name": "University of Ottawa", "street_adddress": "University of Ottawa, 1, Stewart Street, Byward Market, Lowertown, Rideau-Vanier, Ottawa, Ontario, K1N 6N5, Canada", "lat": "45.42580475", "lng": "-75.68740118", "type": "edu", "country": "Canada"}], "year": "2018", "pdf": ["https://pdfs.semanticscholar.org/68ea/88440fc48d59c7407e71a193ff1973f9ba7c.pdf"], "doi": []}]}
\ No newline at end of file diff --git a/site/datasets/unknown/brainwash.json b/site/datasets/unknown/brainwash.json index a584106d..2e2b4252 100644 --- a/site/datasets/unknown/brainwash.json +++ b/site/datasets/unknown/brainwash.json @@ -1 +1 @@ -{"id": "1bd1645a629f1b612960ab9bba276afd4cf7c666", "citations": [{"id": "02caadae027f983261d93e40f4d9d1f785163db4", "title": "Multi-Task Deep Networks for Depth-Based 6D Object Pose and Joint Registration in Crowd Scenarios", "year": "2018", "pdf": ["https://arxiv.org/pdf/1806.03891.pdf"], "doi": []}, {"id": "6f172b6635ad9e3d3e0ab65d931dcb354eb9ff73", "title": "Accurate Single Stage Detector Using Recurrent Rolling Convolution", "year": "2017", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8099570"]}, {"id": "34786071f672b55fcdb24213a95f2ee52623ff23", "title": "MultiNet: Real-time Joint Semantic Reasoning for Autonomous Driving", "year": "2018", "pdf": ["https://arxiv.org/pdf/1612.07695.pdf"], "doi": []}, {"id": "439f6206480b3ce069d75a95b1ffed9417117a17", "title": "Representations, Analysis and Recognition of Shape and Motion from Imaging Data", "year": "2017", "pdf": [], "doi": ["https://doi.org/10.1007/978-3-030-19816-9"]}, {"id": "95addf732b584f7a2959f143d860863df3d1f320", "title": "Deep Learning on Attributed Graphs", "year": "2018", "pdf": ["https://pdfs.semanticscholar.org/95ad/df732b584f7a2959f143d860863df3d1f320.pdf"], "doi": []}, {"id": "5d49632f8c8cd06cd5ce66f007aa140f40c12c45", "title": "Bus-Crowdedness Estimation by Shallow Convolutional Neural Network", "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8615907"]}, {"id": "b80a7bbde2986a0b3474258ec2fad0a75813d89f", "title": "Context Learning Network for Object Detection", "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8637445"]}, {"id": "84afbf356669f544f6c7e19fdb273edc93bf93ee", "title": "Scatteract: Automated Extraction of Data from Scatter Plots", "year": "2017", "pdf": ["https://arxiv.org/pdf/1704.06687.pdf"], "doi": []}, {"id": "bcdb697c9d748f7655859256b0228a51b19b6fee", "title": "Parallel RCNN: A deep learning method for people detection using RGB-D images", "year": "2017", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8302069"]}, {"id": "e586547a63400881c7a95d6ad6d5fa31ac237ca9", "title": "Variational Methods for Human Modeling", "year": "2018", "pdf": ["https://pdfs.semanticscholar.org/e586/547a63400881c7a95d6ad6d5fa31ac237ca9.pdf"], "doi": []}, {"id": "99c1bc2b09210d016a252ddae051ca7cf7fe0a56", "title": "Abnormality Extraction in Crowd", "year": "2017", "pdf": ["https://pdfs.semanticscholar.org/99c1/bc2b09210d016a252ddae051ca7cf7fe0a56.pdf"], "doi": []}, {"id": "d488dad9fa81817c85a284b09ebf198bf6b640f9", "title": "FCHD: A fast and accurate head detector", "year": "2018", "pdf": ["https://arxiv.org/pdf/1809.08766.pdf"], "doi": []}, {"id": "b8c51b9ad3da310b590629b050152460abf7effb", "title": "Recent Advances in Object Detection in the Age of Deep Convolutional Neural Networks", "year": "2018", "pdf": ["https://arxiv.org/pdf/1809.03193.pdf"], "doi": []}, {"id": "f38a1edab9f0f0f36718778ac8d510fd25c41269", "title": "Adversarial Adaptation From Synthesis to Reality in Fast Detector for Smoke Detection", "year": "2019", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8657935"]}, {"id": "6a0aaefce8a27a8727d896fa444ba27558b2d381", "title": "Relation Networks for Object Detection", "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8578476"]}, {"id": "82b1ca78b2fc7ae0459f5d7c61a78822b8a590d2", "title": "Deep Semantic Instance Segmentation of Tree-Like Structures Using Synthetic Data", "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8658794"]}, {"id": "9043df1de4f6e181875011c1379d1a7f68a28d6c", "title": "People Detection from Overhead Cameras", "year": "2018", "pdf": ["https://pdfs.semanticscholar.org/9043/df1de4f6e181875011c1379d1a7f68a28d6c.pdf"], "doi": []}, {"id": "a4ffdc6f0811b5adbf41d20433a44fc546197b75", "title": "GraphVAE: Towards Generation of Small Graphs Using Variational Autoencoders", "year": "2018", "pdf": ["https://arxiv.org/pdf/1802.03480.pdf"], "doi": []}, {"id": "750bc0d2c9105a352001875127d796599a994886", "title": "Position Detection and Direction Prediction for Arbitrary-Oriented Ships via Multitask Rotation Region Convolutional Neural Network", "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8464244"]}, {"id": "28cd46a078e8fad370b1aba34762a874374513a5", "title": "cvpaper.challenge in 2016: Futuristic Computer Vision through 1, 600 Papers Survey", "year": "2017", "pdf": ["https://arxiv.org/pdf/1707.06436.pdf"], "doi": []}, {"id": "03a65d274dc6caea94f6ab344e0b4969575327e3", "title": "CrowdHuman: A Benchmark for Detecting Human in a Crowd", "year": "2018", "pdf": ["https://arxiv.org/pdf/1805.00123.pdf"], "doi": []}, {"id": "8935ffe454758e2e5def0b5190de6e28c350b3b8", "title": "Learning to Reconstruct Face Geometries Research", "year": "2017", "pdf": ["https://pdfs.semanticscholar.org/ea06/abd83c491877f0ce04cc7780ce068578f282.pdf"], "doi": []}, {"id": "993acefc2e350f9661125bb74df136e2b614ea23", "title": "People detection on the Pepper Robot using Convolutional Neural Networks and 3D Blob detection", "year": "2017", "pdf": ["https://pdfs.semanticscholar.org/993a/cefc2e350f9661125bb74df136e2b614ea23.pdf"], "doi": []}, {"id": "d78b190f98f9630cab261eabc399733af052f05c", "title": "Unsupervised Deep Domain Adaptation for Pedestrian Detection", "year": "2016", "pdf": ["https://arxiv.org/pdf/1802.03269.pdf"], "doi": []}, {"id": "08e18921d7e405ad27956c75f2613230170997d5", "title": "Towards Variational Generation of Small Graphs", "year": "2018", "pdf": ["https://pdfs.semanticscholar.org/a001/f2440fa49d1c137c9ca1b892857270096ef9.pdf"], "doi": []}, {"id": "f6f4d887fb62d33a9a18cbb7bc58bd6247384a35", "title": "People detection in crowded scenes using hierarchical features", "year": "2017", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8261462"]}, {"id": "0ceda9dae8b9f322df65ca2ef02caca9758aec6f", "title": "Context-Aware CNNs for Person Head Detection", "year": "2015", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7410688"]}, {"id": "9f25b9efd09cc08c1ae14f301c90b903614968d8", "title": "Deep People Detection: A Comparative Study of SSD and LSTM-decoder", "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8575768"]}]}
\ No newline at end of file +{"id": "1bd1645a629f1b612960ab9bba276afd4cf7c666", "citations": [{"id": "02caadae027f983261d93e40f4d9d1f785163db4", "title": "Multi-Task Deep Networks for Depth-Based 6D Object Pose and Joint Registration in Crowd Scenarios", "year": "2018", "pdf": ["https://arxiv.org/pdf/1806.03891.pdf"], "doi": []}, {"id": "6f172b6635ad9e3d3e0ab65d931dcb354eb9ff73", "title": "Accurate Single Stage Detector Using Recurrent Rolling Convolution", "year": "2017", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8099570"]}, {"id": "34786071f672b55fcdb24213a95f2ee52623ff23", "title": "MultiNet: Real-time Joint Semantic Reasoning for Autonomous Driving", "year": "2018", "pdf": ["https://arxiv.org/pdf/1612.07695.pdf"], "doi": []}, {"id": "439f6206480b3ce069d75a95b1ffed9417117a17", "title": "Representations, Analysis and Recognition of Shape and Motion from Imaging Data", "year": "2017", "pdf": [], "doi": ["https://doi.org/10.1007/978-3-030-19816-9"]}, {"id": "95addf732b584f7a2959f143d860863df3d1f320", "title": "Deep Learning on Attributed Graphs", "year": "2018", "pdf": ["https://pdfs.semanticscholar.org/95ad/df732b584f7a2959f143d860863df3d1f320.pdf"], "doi": []}, {"id": "5d49632f8c8cd06cd5ce66f007aa140f40c12c45", "title": "Bus-Crowdedness Estimation by Shallow Convolutional Neural Network", "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8615907"]}, {"id": "b80a7bbde2986a0b3474258ec2fad0a75813d89f", "title": "Context Learning Network for Object Detection", "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8637445"]}, {"id": "84afbf356669f544f6c7e19fdb273edc93bf93ee", "title": "Scatteract: Automated Extraction of Data from Scatter Plots", "year": "2017", "pdf": ["https://arxiv.org/pdf/1704.06687.pdf"], "doi": []}, {"id": "bcdb697c9d748f7655859256b0228a51b19b6fee", "title": "Parallel RCNN: A deep learning method for people detection using RGB-D images", "year": "2017", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8302069"]}, {"id": "e586547a63400881c7a95d6ad6d5fa31ac237ca9", "title": "Variational Methods for Human Modeling", "year": "2018", "pdf": ["https://pdfs.semanticscholar.org/e586/547a63400881c7a95d6ad6d5fa31ac237ca9.pdf"], "doi": []}, {"id": "99c1bc2b09210d016a252ddae051ca7cf7fe0a56", "title": "Abnormality Extraction in Crowd", "year": "2017", "pdf": ["https://pdfs.semanticscholar.org/99c1/bc2b09210d016a252ddae051ca7cf7fe0a56.pdf"], "doi": []}, {"id": "b8c51b9ad3da310b590629b050152460abf7effb", "title": "Recent Advances in Object Detection in the Age of Deep Convolutional Neural Networks", "year": "2018", "pdf": ["https://arxiv.org/pdf/1809.03193.pdf"], "doi": []}, {"id": "f38a1edab9f0f0f36718778ac8d510fd25c41269", "title": "Adversarial Adaptation From Synthesis to Reality in Fast Detector for Smoke Detection", "year": "2019", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8657935"]}, {"id": "6a0aaefce8a27a8727d896fa444ba27558b2d381", "title": "Relation Networks for Object Detection", "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8578476"]}, {"id": "82b1ca78b2fc7ae0459f5d7c61a78822b8a590d2", "title": "Deep Semantic Instance Segmentation of Tree-Like Structures Using Synthetic Data", "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8658794"]}, {"id": "a4ffdc6f0811b5adbf41d20433a44fc546197b75", "title": "GraphVAE: Towards Generation of Small Graphs Using Variational Autoencoders", "year": "2018", "pdf": ["https://arxiv.org/pdf/1802.03480.pdf"], "doi": []}, {"id": "750bc0d2c9105a352001875127d796599a994886", "title": "Position Detection and Direction Prediction for Arbitrary-Oriented Ships via Multitask Rotation Region Convolutional Neural Network", "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8464244"]}, {"id": "28cd46a078e8fad370b1aba34762a874374513a5", "title": "cvpaper.challenge in 2016: Futuristic Computer Vision through 1, 600 Papers Survey", "year": "2017", "pdf": ["https://arxiv.org/pdf/1707.06436.pdf"], "doi": []}, {"id": "8935ffe454758e2e5def0b5190de6e28c350b3b8", "title": "Learning to Reconstruct Face Geometries Research", "year": "2017", "pdf": ["https://pdfs.semanticscholar.org/ea06/abd83c491877f0ce04cc7780ce068578f282.pdf"], "doi": []}, {"id": "993acefc2e350f9661125bb74df136e2b614ea23", "title": "People detection on the Pepper Robot using Convolutional Neural Networks and 3D Blob detection", "year": "2017", "pdf": ["https://pdfs.semanticscholar.org/993a/cefc2e350f9661125bb74df136e2b614ea23.pdf"], "doi": []}, {"id": "08e18921d7e405ad27956c75f2613230170997d5", "title": "Towards Variational Generation of Small Graphs", "year": "2018", "pdf": ["https://pdfs.semanticscholar.org/a001/f2440fa49d1c137c9ca1b892857270096ef9.pdf"], "doi": []}, {"id": "f6f4d887fb62d33a9a18cbb7bc58bd6247384a35", "title": "People detection in crowded scenes using hierarchical features", "year": "2017", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8261462"]}, {"id": "0ceda9dae8b9f322df65ca2ef02caca9758aec6f", "title": "Context-Aware CNNs for Person Head Detection", "year": "2015", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7410688"]}, {"id": "9f25b9efd09cc08c1ae14f301c90b903614968d8", "title": "Deep People Detection: A Comparative Study of SSD and LSTM-decoder", "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8575768"]}]}
\ No newline at end of file diff --git a/site/datasets/verified/brainwash.json b/site/datasets/verified/brainwash.json index e7dd9ff5..edfa0ac9 100644 --- a/site/datasets/verified/brainwash.json +++ b/site/datasets/verified/brainwash.json @@ -1 +1 @@ -{"id": "1bd1645a629f1b612960ab9bba276afd4cf7c666", "paper": {"key": "brainwash", "name": "Brainwash", "title": "End-to-End People Detection in Crowded Scenes", "year": "2016", "addresses": [{"name": "Stanford University", "source_name": "Stanford University", "street_adddress": "Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA", "lat": "37.43131385", "lng": "-122.16936535", "type": "edu", "country": "United States"}], "vetting": {"yes": 12, "no": 42, "total": 54}}, "citations": [{"id": "7e915bb8e4ada4f8d261bc855a4f587ea97764ca", "title": "People detection in crowded scenes via regional-based convolutional network", "addresses": [{"name": "Peking University", "source_name": "Peking University", "street_adddress": "\u5317\u4eac\u5927\u5b66, 5\u53f7, \u9890\u548c\u56ed\u8def, \u7a3b\u9999\u56ed\u5357\u793e\u533a, \u6d77\u6dc0\u533a, \u5317\u4eac\u5e02, 100871, \u4e2d\u56fd", "lat": "39.99223790", "lng": "116.30393816", "type": "edu", "country": "China"}], "year": "2016", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7877809", "http://doi.org/10.1109/ICSP.2016.7877809"]}, {"id": "591a4bfa6380c9fcd5f3ae690e3ac5c09b7bf37b", "title": "A Replacement Algorithm of Non-Maximum Suppression Base on Graph Clustering", "addresses": [{"name": "National University of Defense Technology, China", "source_name": "National University of Defence Technology, Changsha 410000, China", "street_adddress": "\u56fd\u9632\u79d1\u5b66\u6280\u672f\u5927\u5b66, \u4e09\u4e00\u5927\u9053, \u5f00\u798f\u533a, \u5f00\u798f\u533a (Kaifu), \u957f\u6c99\u5e02 / Changsha, \u6e56\u5357\u7701, 410073, \u4e2d\u56fd", "lat": "28.22902090", "lng": "112.99483204", "type": "mil", "country": "China"}], "year": "2017", "pdf": ["https://pdfs.semanticscholar.org/591a/4bfa6380c9fcd5f3ae690e3ac5c09b7bf37b.pdf"], "doi": []}, {"id": "b02d31c640b0a31fb18c4f170d841d8e21ffb66c", "title": "Localized region context and object feature fusion for people head detection", "addresses": [{"name": "National University of Defense Technology, China", "source_name": "National University of Defence Technology, Changsha 410000, China", "street_adddress": "\u56fd\u9632\u79d1\u5b66\u6280\u672f\u5927\u5b66, \u4e09\u4e00\u5927\u9053, \u5f00\u798f\u533a, \u5f00\u798f\u533a (Kaifu), \u957f\u6c99\u5e02 / Changsha, \u6e56\u5357\u7701, 410073, \u4e2d\u56fd", "lat": "28.22902090", "lng": "112.99483204", "type": "mil", "country": "China"}], "year": "2016", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7532426", "http://doi.org/10.1109/ICIP.2016.7532426"]}, {"id": "81ba5202424906f64b77f68afca063658139fbb2", "title": "Social Scene Understanding: End-to-End Multi-person Action Localization and Collective Activity Recognition", "addresses": [{"name": "Stanford University", "source_name": "Stanford University", "street_adddress": "Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA", "lat": "37.43131385", "lng": "-122.16936535", "type": "edu", "country": "United States"}, {"name": "IDIAP Research Institute", "source_name": "IDIAP Research Institute", "street_adddress": "Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra", "lat": "46.10923700", "lng": "7.08453549", "type": "edu", "country": "Switzerland"}], "year": "2017", "pdf": ["https://arxiv.org/pdf/1611.09078.pdf"], "doi": []}, {"id": "2f02c1d4858d9c0f5d16099eb090560d5fa4f23f", "title": "Detecting Heads using Feature Refine Net and Cascaded Multi-scale Architecture", "addresses": [{"name": "South China University of Technology", "source_name": "South China University of Technology", "street_adddress": "\u534e\u5357\u7406\u5de5\u5927\u5b66, \u5927\u5b66\u57ce\u4e2d\u73af\u4e1c\u8def, \u5e7f\u5dde\u5927\u5b66\u57ce, \u65b0\u9020, \u756a\u79ba\u533a (Panyu), \u5e7f\u5dde\u5e02, \u5e7f\u4e1c\u7701, 510006, \u4e2d\u56fd", "lat": "23.05020420", "lng": "113.39880323", "type": "edu", "country": "China"}], "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8545068"]}, {"id": "cab97d4dc67919f965cc884d80e4d6b743a256eb", "title": "Scale Mapping and Dynamic Re-Detecting in Dense Head Detection", "addresses": [{"name": "South China University of Technology", "source_name": "South China University of Technology", "street_adddress": "\u534e\u5357\u7406\u5de5\u5927\u5b66, \u5927\u5b66\u57ce\u4e2d\u73af\u4e1c\u8def, \u5e7f\u5dde\u5927\u5b66\u57ce, \u65b0\u9020, \u756a\u79ba\u533a (Panyu), \u5e7f\u5dde\u5e02, \u5e7f\u4e1c\u7701, 510006, \u4e2d\u56fd", "lat": "23.05020420", "lng": "113.39880323", "type": "edu", "country": "China"}], "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8451653"]}, {"id": "68ea88440fc48d59c7407e71a193ff1973f9ba7c", "title": "Shoulder Keypoint-Detection from Object Detection", "addresses": [{"name": "University of Ottawa", "source_name": "University of Ottawa", "street_adddress": "University of Ottawa, 1, Stewart Street, Byward Market, Lowertown, Rideau-Vanier, Ottawa, Ontario, K1N 6N5, Canada", "lat": "45.42580475", "lng": "-75.68740118", "type": "edu", "country": "Canada"}], "year": "2018", "pdf": ["https://pdfs.semanticscholar.org/68ea/88440fc48d59c7407e71a193ff1973f9ba7c.pdf"], "doi": []}]}
\ No newline at end of file +{"id": "1bd1645a629f1b612960ab9bba276afd4cf7c666", "paper": {"key": "brainwash", "name": "Brainwash", "title": "End-to-End People Detection in Crowded Scenes", "year": "2016", "addresses": [{"name": "Stanford University", "source_name": "Stanford University", "street_adddress": "Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA", "lat": "37.43131385", "lng": "-122.16936535", "type": "edu", "country": "United States"}], "vetting": {"yes": 12, "no": 42, "total": 54}}, "citations": [{"id": "7e915bb8e4ada4f8d261bc855a4f587ea97764ca", "title": "People detection in crowded scenes via regional-based convolutional network", "addresses": [{"name": "Peking University", "source_name": "Peking University", "street_adddress": "\u5317\u4eac\u5927\u5b66, 5\u53f7, \u9890\u548c\u56ed\u8def, \u7a3b\u9999\u56ed\u5357\u793e\u533a, \u6d77\u6dc0\u533a, \u5317\u4eac\u5e02, 100871, \u4e2d\u56fd", "lat": "39.99223790", "lng": "116.30393816", "type": "edu", "country": "China"}], "year": "2016", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7877809", "http://doi.org/10.1109/ICSP.2016.7877809"]}, {"id": "591a4bfa6380c9fcd5f3ae690e3ac5c09b7bf37b", "title": "A Replacement Algorithm of Non-Maximum Suppression Base on Graph Clustering", "addresses": [{"name": "National University of Defense Technology, China", "source_name": "National University of Defence Technology, Changsha 410000, China", "street_adddress": "\u56fd\u9632\u79d1\u5b66\u6280\u672f\u5927\u5b66, \u4e09\u4e00\u5927\u9053, \u5f00\u798f\u533a, \u5f00\u798f\u533a (Kaifu), \u957f\u6c99\u5e02 / Changsha, \u6e56\u5357\u7701, 410073, \u4e2d\u56fd", "lat": "28.22902090", "lng": "112.99483204", "type": "mil", "country": "China"}], "year": "2017", "pdf": ["https://pdfs.semanticscholar.org/591a/4bfa6380c9fcd5f3ae690e3ac5c09b7bf37b.pdf"], "doi": []}, {"id": "b02d31c640b0a31fb18c4f170d841d8e21ffb66c", "title": "Localized region context and object feature fusion for people head detection", "addresses": [{"name": "National University of Defense Technology, China", "source_name": "National University of Defence Technology, Changsha 410000, China", "street_adddress": "\u56fd\u9632\u79d1\u5b66\u6280\u672f\u5927\u5b66, \u4e09\u4e00\u5927\u9053, \u5f00\u798f\u533a, \u5f00\u798f\u533a (Kaifu), \u957f\u6c99\u5e02 / Changsha, \u6e56\u5357\u7701, 410073, \u4e2d\u56fd", "lat": "28.22902090", "lng": "112.99483204", "type": "mil", "country": "China"}], "year": "2016", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7532426", "http://doi.org/10.1109/ICIP.2016.7532426"]}, {"id": "81ba5202424906f64b77f68afca063658139fbb2", "title": "Social Scene Understanding: End-to-End Multi-person Action Localization and Collective Activity Recognition", "addresses": [{"name": "Stanford University", "source_name": "Stanford University", "street_adddress": "Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA", "lat": "37.43131385", "lng": "-122.16936535", "type": "edu", "country": "United States"}, {"name": "IDIAP Research Institute", "source_name": "IDIAP Research Institute", "street_adddress": "Idiap Research Institute, Parking Centre du parc, Martigny, Valais/Wallis, 1920, Schweiz/Suisse/Svizzera/Svizra", "lat": "46.10923700", "lng": "7.08453549", "type": "edu", "country": "Switzerland"}], "year": "2017", "pdf": ["https://arxiv.org/pdf/1611.09078.pdf"], "doi": []}, {"id": "03a65d274dc6caea94f6ab344e0b4969575327e3", "title": "CrowdHuman: A Benchmark for Detecting Human in a Crowd", "addresses": [{"name": "Megvii Inc. (Face++), China", "source_name": "Megvii Inc. (Face++), China", "street_adddress": "China", "lat": "35.86166000", "lng": "104.19539700", "type": "company", "country": "China"}], "year": "2018", "pdf": ["https://arxiv.org/pdf/1805.00123.pdf"], "doi": []}, {"id": "2f02c1d4858d9c0f5d16099eb090560d5fa4f23f", "title": "Detecting Heads using Feature Refine Net and Cascaded Multi-scale Architecture", "addresses": [{"name": "South China University of Technology", "source_name": "South China University of Technology", "street_adddress": "\u534e\u5357\u7406\u5de5\u5927\u5b66, \u5927\u5b66\u57ce\u4e2d\u73af\u4e1c\u8def, \u5e7f\u5dde\u5927\u5b66\u57ce, \u65b0\u9020, \u756a\u79ba\u533a (Panyu), \u5e7f\u5dde\u5e02, \u5e7f\u4e1c\u7701, 510006, \u4e2d\u56fd", "lat": "23.05020420", "lng": "113.39880323", "type": "edu", "country": "China"}], "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8545068"]}, {"id": "cab97d4dc67919f965cc884d80e4d6b743a256eb", "title": "Scale Mapping and Dynamic Re-Detecting in Dense Head Detection", "addresses": [{"name": "South China University of Technology", "source_name": "South China University of Technology", "street_adddress": "\u534e\u5357\u7406\u5de5\u5927\u5b66, \u5927\u5b66\u57ce\u4e2d\u73af\u4e1c\u8def, \u5e7f\u5dde\u5927\u5b66\u57ce, \u65b0\u9020, \u756a\u79ba\u533a (Panyu), \u5e7f\u5dde\u5e02, \u5e7f\u4e1c\u7701, 510006, \u4e2d\u56fd", "lat": "23.05020420", "lng": "113.39880323", "type": "edu", "country": "China"}], "year": "2018", "pdf": [], "doi": ["http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8451653"]}, {"id": "d78b190f98f9630cab261eabc399733af052f05c", "title": "Unsupervised Deep Domain Adaptation for Pedestrian Detection", "addresses": [{"name": "Shanghai Jiao Tong University", "source_name": "Shanghai Jiao Tong University", "street_adddress": "\u4e0a\u6d77\u4ea4\u901a\u5927\u5b66\uff08\u5f90\u6c47\u6821\u533a\uff09, \u6dee\u6d77\u897f\u8def, \u756a\u79ba\u5c0f\u533a, \u5e73\u9634\u6865, \u5f90\u6c47\u533a, \u4e0a\u6d77\u5e02, 200052, \u4e2d\u56fd", "lat": "31.20081505", "lng": "121.42840681", "type": "edu", "country": "China"}, {"name": "University of Twente", "source_name": "University of Twente", "street_adddress": "University of Twente, De Achterhorst;Hallenweg, Enschede, Regio Twente, Overijssel, Nederland, 7522NH, Nederland", "lat": "52.23801390", "lng": "6.85667610", "type": "edu", "country": "Netherlands"}], "year": "2016", "pdf": ["https://arxiv.org/pdf/1802.03269.pdf"], "doi": []}, {"id": "d488dad9fa81817c85a284b09ebf198bf6b640f9", "title": "FCHD: A fast and accurate head detector", "addresses": [{"name": "IIT Madras, India", "source_name": "IIT Madras, India", "street_adddress": "Indian Institute Of Technology, Chennai, Tamil Nadu 600036, India", "lat": "12.99149290", "lng": "80.23369070", "type": "edu", "country": "India"}], "year": "2018", "pdf": ["https://arxiv.org/pdf/1809.08766.pdf"], "doi": []}, {"id": "68ea88440fc48d59c7407e71a193ff1973f9ba7c", "title": "Shoulder Keypoint-Detection from Object Detection", "addresses": [{"name": "University of Ottawa", "source_name": "University of Ottawa", "street_adddress": "University of Ottawa, 1, Stewart Street, Byward Market, Lowertown, Rideau-Vanier, Ottawa, Ontario, K1N 6N5, Canada", "lat": "45.42580475", "lng": "-75.68740118", "type": "edu", "country": "Canada"}], "year": "2018", "pdf": ["https://pdfs.semanticscholar.org/68ea/88440fc48d59c7407e71a193ff1973f9ba7c.pdf"], "doi": []}, {"id": "9043df1de4f6e181875011c1379d1a7f68a28d6c", "title": "People Detection from Overhead Cameras", "addresses": [{"name": "Delft University of Technology", "source_name": "Delft University of Technology", "street_adddress": "TU Delft, Mekelweg, TU-wijk, Delft, Zuid-Holland, Nederland, 2628, Nederland", "lat": "51.99882735", "lng": "4.37396037", "type": "edu", "country": "Netherlands"}], "year": "2018", "pdf": ["https://pdfs.semanticscholar.org/9043/df1de4f6e181875011c1379d1a7f68a28d6c.pdf"], "doi": []}]}
\ No newline at end of file |
