summaryrefslogtreecommitdiff
path: root/reports/report_index.html
diff options
context:
space:
mode:
Diffstat (limited to 'reports/report_index.html')
-rw-r--r--reports/report_index.html2
1 files changed, 1 insertions, 1 deletions
diff --git a/reports/report_index.html b/reports/report_index.html
index 32fb9549..d876ee3a 100644
--- a/reports/report_index.html
+++ b/reports/report_index.html
@@ -1 +1 @@
-<!doctype html><html><head><title>All Papers</title><link rel='stylesheet' href='reports.css'></head><body><h2>All Papers</h2><table border='1' cellpadding='3' cellspacing='3'><th>Paper ID</th><th>Megapixels Key</th><th>Report Link</th><th>PDF Link</th><th>Journal</th><th>Type</th><th>Address</th><th>Lat</th><th>Lng</th><th>Coverage</th><th>Total Citations</th><th>Geocoded Citations</th><th>Unknown Citations</th><th>Empty Citations</th><th>With PDF</th><th>With DOI</th><tr><td>e4754afaa15b1b53e70743880484b8d0736990ff</td><td></td><td><a href="papers/e4754afaa15b1b53e70743880484b8d0736990ff.html">300 Faces In-The-Wild Challenge: database and results</a></td><td><a href="{'url': 'http://doi.org/10.1016/j.imavis.2016.01.002', 'linkType': 'doi'}">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td>42%</td><td>103</td><td>43</td><td>60</td><td>10</td><td>65</td><td>30</td></tr><tr><td>044d9a8c61383312cdafbcc44b9d00d650b21c70</td><td></td><td><a href="papers/044d9a8c61383312cdafbcc44b9d00d650b21c70.html">300 Faces in-the-Wild Challenge: The First Facial Landmark Localization Challenge</a></td><td><a href="https://ibug.doc.ic.ac.uk/media/uploads/documents/sagonas_iccv_2013_300_w.pdf">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision Workshops</td><td>edu</td><td>University of Twente</td><td>52.23801390</td><td>6.85667610</td><td>53%</td><td>285</td><td>150</td><td>134</td><td>25</td><td>185</td><td>82</td></tr><tr><td>c34532fe6bfbd1e6df477c9ffdbb043b77e7804d</td><td></td><td><a href="papers/c34532fe6bfbd1e6df477c9ffdbb043b77e7804d.html">A 3D Morphable Eye Region Model for Gaze Estimation</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/0d43/3b9435b874a1eea6d7999e86986c910fa285.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Carnegie Mellon University</td><td>37.41021930</td><td>-122.05965487</td><td>62%</td><td>21</td><td>13</td><td>8</td><td>0</td><td>17</td><td>4</td></tr><tr><td>013909077ad843eb6df7a3e8e290cfd5575999d2</td><td></td><td><a href="papers/013909077ad843eb6df7a3e8e290cfd5575999d2.html">A Semi-automatic Methodology for Facial Landmark Annotation</a></td><td><a href="http://ibug.doc.ic.ac.uk/media/uploads/documents/sagonas_cvpr_2013_amfg_w.pdf">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition Workshops</td><td>edu</td><td>University of Twente</td><td>52.23801390</td><td>6.85667610</td><td>56%</td><td>169</td><td>94</td><td>75</td><td>14</td><td>112</td><td>49</td></tr><tr><td>0d3bb75852098b25d90f31d2f48fd0cb4944702b</td><td>FaceScrub</td><td><a href="papers/0d3bb75852098b25d90f31d2f48fd0cb4944702b.html">A data-driven approach to cleaning large face datasets</a></td><td><a href="https://doi.org/10.1109/ICIP.2014.7025068">[pdf]</a></td><td>2014 IEEE International Conference on Image Processing (ICIP)</td><td>edu</td><td>University of Illinois, Urbana-Champaign</td><td>40.11116745</td><td>-88.22587665</td><td>42%</td><td>120</td><td>51</td><td>69</td><td>5</td><td>93</td><td>24</td></tr><tr><td>1be498d4bbc30c3bfd0029114c784bc2114d67c0</td><td>Adience</td><td><a href="papers/1be498d4bbc30c3bfd0029114c784bc2114d67c0.html">Age and Gender Estimation of Unfiltered Faces</a></td><td><a href="http://www.openu.ac.il/home/hassner/Adience/EidingerEnbarHassner_tifs.pdf">[pdf]</a></td><td>IEEE Transactions on Information Forensics and Security</td><td></td><td></td><td></td><td></td><td>43%</td><td>155</td><td>67</td><td>88</td><td>5</td><td>80</td><td>55</td></tr><tr><td>6dcf418c778f528b5792104760f1fbfe90c6dd6a</td><td></td><td><a href="papers/6dcf418c778f528b5792104760f1fbfe90c6dd6a.html">AgeDB: The First Manually Collected, In-the-Wild Age Database</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014984', 'linkType': 'ieee'}">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td></td><td></td><td></td><td></td><td>70%</td><td>10</td><td>7</td><td>2</td><td>1</td><td>9</td><td>0</td></tr><tr><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td><td></td><td><a href="papers/a74251efa970b92925b89eeef50a5e37d9281ad0.html">Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</a></td><td><a href="http://lrs.icg.tugraz.at/pubs/koestinger_befit_11.pdf">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td>51%</td><td>278</td><td>143</td><td>135</td><td>30</td><td>195</td><td>65</td></tr><tr><td>c6526dd3060d63a6c90e8b7ff340383c4e0e0dd8</td><td></td><td><a href="papers/c6526dd3060d63a6c90e8b7ff340383c4e0e0dd8.html">Anxiety promotes memory for mood-congruent faces but does not alter loss aversion.</a></td><td><a href="http://pdfs.semanticscholar.org/c652/6dd3060d63a6c90e8b7ff340383c4e0e0dd8.pdf">[pdf]</a></td><td>Scientific reports</td><td>edu</td><td>University College London</td><td>51.52316070</td><td>-0.12820370</td><td>25%</td><td>4</td><td>1</td><td>3</td><td>0</td><td>2</td><td>2</td></tr><tr><td>759a3b3821d9f0e08e0b0a62c8b693230afc3f8d</td><td></td><td><a href="papers/759a3b3821d9f0e08e0b0a62c8b693230afc3f8d.html">Attribute and simile classifiers for face verification</a></td><td><a href="http://homes.cs.washington.edu/~neeraj/projects/faceverification/base/papers/nk_iccv2009_attrs.pdf">[pdf]</a></td><td>2009 IEEE 12th International Conference on Computer Vision</td><td>edu</td><td>Columbia University</td><td>40.84198360</td><td>-73.94368971</td><td>48%</td><td>862</td><td>416</td><td>446</td><td>46</td><td>556</td><td>232</td></tr><tr><td>fcc6fe6007c322641796cb8792718641856a22a7</td><td></td><td><a href="papers/fcc6fe6007c322641796cb8792718641856a22a7.html">Automatic facial makeup detection with application in face recognition</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6612994', 'linkType': 'ieee'}">[pdf]</a></td><td>2013 International Conference on Biometrics (ICB)</td><td>edu</td><td>West Virginia University</td><td>39.65404635</td><td>-79.96475355</td><td>63%</td><td>43</td><td>27</td><td>16</td><td>0</td><td>16</td><td>21</td></tr><tr><td>0a85bdff552615643dd74646ac881862a7c7072d</td><td></td><td><a href="papers/0a85bdff552615643dd74646ac881862a7c7072d.html">Beyond frontal faces: Improving Person Recognition using multiple cues</a></td><td><a href="https://doi.org/10.1109/CVPR.2015.7299113">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>58%</td><td>45</td><td>26</td><td>18</td><td>2</td><td>37</td><td>5</td></tr><tr><td>37d6f0eb074d207b53885bd2eb78ccc8a04be597</td><td></td><td><a href="papers/37d6f0eb074d207b53885bd2eb78ccc8a04be597.html">Can facial cosmetics affect the matching accuracy of face recognition systems?</a></td><td><a href="http://www.cse.msu.edu/~climer/DantchevaChenRossFaceCosmetics_BTAS2012.pdf">[pdf]</a></td><td>2012 IEEE Fifth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td></td><td></td><td></td><td></td><td>41%</td><td>46</td><td>19</td><td>27</td><td>0</td><td>17</td><td>22</td></tr><tr><td>45c31cde87258414f33412b3b12fc5bec7cb3ba9</td><td>JAFFE</td><td><a href="papers/45c31cde87258414f33412b3b12fc5bec7cb3ba9.html">Coding Facial Expressions with Gabor Wavelets</a></td><td><a href="http://pdfs.semanticscholar.org/45c3/1cde87258414f33412b3b12fc5bec7cb3ba9.pdf">[pdf]</a></td><td></td><td>edu</td><td>Kyushu University</td><td>33.59914655</td><td>130.22359848</td><td>32%</td><td>804</td><td>254</td><td>550</td><td>45</td><td>383</td><td>263</td></tr><tr><td>23fc83c8cfff14a16df7ca497661264fc54ed746</td><td>CK</td><td><a href="papers/23fc83c8cfff14a16df7ca497661264fc54ed746.html">Comprehensive Database for Facial Expression Analysis</a></td><td><a href="http://pdfs.semanticscholar.org/23fc/83c8cfff14a16df7ca497661264fc54ed746.pdf">[pdf]</a></td><td></td><td>edu</td><td>Carnegie Mellon University</td><td>37.41021930</td><td>-122.05965487</td><td>33%</td><td>999</td><td>330</td><td>668</td><td>64</td><td>536</td><td>263</td></tr><tr><td>8355d095d3534ef511a9af68a3b2893339e3f96b</td><td></td><td><a href="papers/8355d095d3534ef511a9af68a3b2893339e3f96b.html">DEX: Deep EXpectation of Apparent Age from a Single Image</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406390', 'linkType': 'ieee'}">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision Workshop (ICCVW)</td><td></td><td></td><td></td><td></td><td>42%</td><td>102</td><td>43</td><td>56</td><td>5</td><td>61</td><td>28</td></tr><tr><td>10195a163ab6348eef37213a46f60a3d87f289c5</td><td></td><td><a href="papers/10195a163ab6348eef37213a46f60a3d87f289c5.html">Deep Expectation of Real and Apparent Age from a Single Image Without Facial Landmarks</a></td><td><a href="https://doi.org/10.1007/s11263-016-0940-3">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td>47%</td><td>121</td><td>57</td><td>63</td><td>10</td><td>84</td><td>29</td></tr><tr><td>56ae6d94fc6097ec4ca861f0daa87941d1c10b70</td><td>CMDP</td><td><a href="papers/56ae6d94fc6097ec4ca861f0daa87941d1c10b70.html">Distance Estimation of an Unknown Person from a Portrait</a></td><td><a href="http://pdfs.semanticscholar.org/56ae/6d94fc6097ec4ca861f0daa87941d1c10b70.pdf">[pdf]</a></td><td></td><td>edu</td><td>California Institute of Technology</td><td>34.13710185</td><td>-118.12527487</td><td>25%</td><td>8</td><td>2</td><td>6</td><td>0</td><td>5</td><td>1</td></tr><tr><td>133f01aec1534604d184d56de866a4bd531dac87</td><td></td><td><a href="papers/133f01aec1534604d184d56de866a4bd531dac87.html">Effective Unconstrained Face Recognition by Combining Multiple Descriptors and Learned Background Statistics</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/TPAMI.2010.230">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>38%</td><td>168</td><td>63</td><td>105</td><td>15</td><td>95</td><td>55</td></tr><tr><td>1ea8085fe1c79d12adffb02bd157b54d799568e4</td><td></td><td><a href="papers/1ea8085fe1c79d12adffb02bd157b54d799568e4.html">Eigenfaces vs. Fisherfaces: Recognition Using Class Speciic Linear Projection</a></td><td><a href="http://pdfs.semanticscholar.org/1ea8/085fe1c79d12adffb02bd157b54d799568e4.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>28%</td><td>583</td><td>162</td><td>420</td><td>44</td><td>300</td><td>162</td></tr><tr><td>447d8893a4bdc29fa1214e53499ffe67b28a6db5</td><td></td><td><a href="papers/447d8893a4bdc29fa1214e53499ffe67b28a6db5.html">Electronic Transport in Quantum Confined Systems</a></td><td><a href="http://pdfs.semanticscholar.org/447d/8893a4bdc29fa1214e53499ffe67b28a6db5.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>1</td><td>0</td><td>1</td><td>0</td><td>1</td><td>0</td></tr><tr><td>75da1df4ed319926c544eefe17ec8d720feef8c0</td><td>FDDB</td><td><a href="papers/75da1df4ed319926c544eefe17ec8d720feef8c0.html">FDDB: A Benchmark for Face Detection in Unconstrained Settings</a></td><td><a href="http://pdfs.semanticscholar.org/75da/1df4ed319926c544eefe17ec8d720feef8c0.pdf">[pdf]</a></td><td></td><td>edu</td><td>University of Massachusetts</td><td>42.38897850</td><td>-72.52869870</td><td>0%</td><td>1</td><td>0</td><td>1</td><td>0</td><td>0</td><td>0</td></tr><tr><td>0e986f51fe45b00633de9fd0c94d082d2be51406</td><td></td><td><a href="papers/0e986f51fe45b00633de9fd0c94d082d2be51406.html">Face detection, pose estimation, and landmark localization in the wild</a></td><td><a href="http://vision.ics.uci.edu/papers/ZhuR_CVPR_2012/ZhuR_CVPR_2012.pdf">[pdf]</a></td><td>2012 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>University of California, Irvine</td><td>33.64319010</td><td>-117.84016494</td><td>48%</td><td>999</td><td>475</td><td>524</td><td>52</td><td>601</td><td>303</td></tr><tr><td>560e0e58d0059259ddf86fcec1fa7975dee6a868</td><td></td><td><a href="papers/560e0e58d0059259ddf86fcec1fa7975dee6a868.html">Face recognition in unconstrained videos with matched background similarity</a></td><td><a href="http://www.cs.tau.ac.il/~wolf/papers/lvfw.pdf">[pdf]</a></td><td>CVPR 2011</td><td>edu</td><td>Open University of Israel</td><td>32.77824165</td><td>34.99565673</td><td>49%</td><td>457</td><td>225</td><td>230</td><td>27</td><td>273</td><td>149</td></tr><tr><td>670637d0303a863c1548d5b19f705860a23e285c</td><td></td><td><a href="papers/670637d0303a863c1548d5b19f705860a23e285c.html">Face swapping: automatically replacing faces in photographs</a></td><td><a href="https://classes.cs.uoregon.edu/16F/cis607photo/faces.pdf">[pdf]</a></td><td>ACM Trans. Graph.</td><td>edu</td><td>Columbia University</td><td>40.84198360</td><td>-73.94368971</td><td>50%</td><td>2</td><td>1</td><td>1</td><td>0</td><td>1</td><td>1</td></tr><tr><td>4c170a0dcc8de75587dae21ca508dab2f9343974</td><td>FaceTracer</td><td><a href="papers/4c170a0dcc8de75587dae21ca508dab2f9343974.html">FaceTracer: A Search Engine for Large Collections of Images with Faces</a></td><td><a href="http://pdfs.semanticscholar.org/73a8/1d311eedac8dea3ca24dc15b6990fa4a725e.pdf">[pdf]</a></td><td></td><td>edu</td><td>Columbia University</td><td>40.84198360</td><td>-73.94368971</td><td>42%</td><td>212</td><td>89</td><td>123</td><td>13</td><td>137</td><td>52</td></tr><tr><td>45e616093a92e5f1e61a7c6037d5f637aa8964af</td><td></td><td><a href="papers/45e616093a92e5f1e61a7c6037d5f637aa8964af.html">Fine-grained evaluation on face detection in the wild</a></td><td><a href="http://www.cs.toronto.edu/~byang/papers/malf_fg15.pdf">[pdf]</a></td><td>2015 11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)</td><td>edu</td><td>Chinese Academy of Sciences</td><td>40.00447950</td><td>116.37023800</td><td>59%</td><td>17</td><td>10</td><td>7</td><td>0</td><td>13</td><td>4</td></tr><tr><td>04661729f0ff6afe4b4d6223f18d0da1d479accf</td><td>CelebA</td><td><a href="papers/04661729f0ff6afe4b4d6223f18d0da1d479accf.html">From Facial Parts Responses to Face Detection: A Deep Learning Approach</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.419">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td>edu</td><td>Shenzhen Institutes of Advanced Technology</td><td>22.59805605</td><td>113.98533784</td><td>50%</td><td>150</td><td>75</td><td>74</td><td>12</td><td>93</td><td>48</td></tr><tr><td>18c72175ddbb7d5956d180b65a96005c100f6014</td><td>Yale Face Database B</td><td><a href="papers/18c72175ddbb7d5956d180b65a96005c100f6014.html">From Few to Many: Illumination Cone Models for Face Recognition under Variable Lighting and Pose</a></td><td><a href="http://pdfs.semanticscholar.org/97bb/c2b439a79d4dc0dc7199d71ed96ad5e3fd0e.pdf">[pdf]</a></td><td>IEEE Trans. Pattern Anal. Mach. Intell.</td><td></td><td></td><td></td><td></td><td>40%</td><td>999</td><td>397</td><td>601</td><td>65</td><td>519</td><td>330</td></tr><tr><td>04c2cda00e5536f4b1508cbd80041e9552880e67</td><td></td><td><a href="papers/04c2cda00e5536f4b1508cbd80041e9552880e67.html">Hipster Wars: Discovering Elements of Fashion Styles</a></td><td><a href="http://pdfs.semanticscholar.org/04c2/cda00e5536f4b1508cbd80041e9552880e67.pdf">[pdf]</a></td><td></td><td>edu</td><td>Tohoku University</td><td>38.25309450</td><td>140.87365930</td><td>48%</td><td>85</td><td>41</td><td>44</td><td>3</td><td>55</td><td>19</td></tr><tr><td>ca3e88d87e1344d076c964ea89d91a75c417f5ee</td><td></td><td><a href="papers/ca3e88d87e1344d076c964ea89d91a75c417f5ee.html">Indian Movie Face Database: A benchmark for face recognition under wide variations</a></td><td><span class="gray">[pdf]</a></td><td>2013 Fourth National Conference on Computer Vision, Pattern Recognition, Image Processing and Graphics (NCVPRIPG)</td><td></td><td></td><td></td><td></td><td>64%</td><td>14</td><td>9</td><td>5</td><td>0</td><td>10</td><td>4</td></tr><tr><td>95f12d27c3b4914e0668a268360948bce92f7db3</td><td>Helen</td><td><a href="papers/95f12d27c3b4914e0668a268360948bce92f7db3.html">Interactive Facial Feature Localization</a></td><td><a href="http://pdfs.semanticscholar.org/95f1/2d27c3b4914e0668a268360948bce92f7db3.pdf">[pdf]</a></td><td></td><td>edu</td><td>University of Illinois, Urbana-Champaign</td><td>40.11116745</td><td>-88.22587665</td><td>47%</td><td>323</td><td>153</td><td>169</td><td>23</td><td>198</td><td>105</td></tr><tr><td>4d423acc78273b75134e2afd1777ba6d3a398973</td><td></td><td><a href="papers/4d423acc78273b75134e2afd1777ba6d3a398973.html">International Conference on Automatic Face and Gesture Recognition The CMU Pose , Illumination , and Expression ( PIE ) Database</a></td><td><a href="http://pdfs.semanticscholar.org/4d42/3acc78273b75134e2afd1777ba6d3a398973.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>42%</td><td>723</td><td>301</td><td>422</td><td>55</td><td>392</td><td>237</td></tr><tr><td>137aa2f891d474fce1e7a1d1e9b3aefe21e22b34</td><td></td><td><a href="papers/137aa2f891d474fce1e7a1d1e9b3aefe21e22b34.html">Is the eye region more reliable than the face? A preliminary study of face-based recognition on a transgender dataset</a></td><td><a href="https://doi.org/10.1109/BTAS.2013.6712710">[pdf]</a></td><td>2013 IEEE Sixth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td>edu</td><td>University of North Carolina Wilmington</td><td>34.23755810</td><td>-77.92701290</td><td>29%</td><td>7</td><td>2</td><td>5</td><td>1</td><td>2</td><td>4</td></tr><tr><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td><td></td><td><a href="papers/370b5757a5379b15e30d619e4d3fb9e8e13f3256.html">Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments</a></td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>45%</td><td>999</td><td>451</td><td>545</td><td>62</td><td>613</td><td>293</td></tr><tr><td>0d2dd4fc016cb6a517d8fb43a7cc3ff62964832e</td><td></td><td><a href="papers/0d2dd4fc016cb6a517d8fb43a7cc3ff62964832e.html">Large Age-Gap face verification by feature injection in deep networks</a></td><td><a href="http://pdfs.semanticscholar.org/0d2d/d4fc016cb6a517d8fb43a7cc3ff62964832e.pdf">[pdf]</a></td><td>Pattern Recognition Letters</td><td></td><td></td><td></td><td></td><td>40%</td><td>5</td><td>2</td><td>3</td><td>0</td><td>2</td><td>2</td></tr><tr><td>853bd61bc48a431b9b1c7cab10c603830c488e39</td><td>CAISA Webface</td><td><a href="papers/853bd61bc48a431b9b1c7cab10c603830c488e39.html">Learning Face Representation from Scratch</a></td><td><a href="http://pdfs.semanticscholar.org/b8a2/0ed7e74325da76d7183d1ab77b082a92b447.pdf">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>53%</td><td>401</td><td>213</td><td>187</td><td>28</td><td>261</td><td>125</td></tr><tr><td>28d4e027c7e90b51b7d8908fce68128d1964668a</td><td>MegaFace 2</td><td><a href="papers/28d4e027c7e90b51b7d8908fce68128d1964668a.html">Level Playing Field for Million Scale Face Recognition</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1705.00393.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>41%</td><td>27</td><td>11</td><td>16</td><td>2</td><td>22</td><td>4</td></tr><tr><td>140438a77a771a8fb656b39a78ff488066eb6b50</td><td>LFWP</td><td><a href="papers/140438a77a771a8fb656b39a78ff488066eb6b50.html">Localizing Parts of Faces Using a Consensus of Exemplars</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2011.5995602">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td>edu</td><td>Columbia University</td><td>40.84198360</td><td>-73.94368971</td><td>49%</td><td>506</td><td>250</td><td>255</td><td>36</td><td>309</td><td>150</td></tr><tr><td>9055b155cbabdce3b98e16e5ac9c0edf00f9552f</td><td></td><td><a href="papers/9055b155cbabdce3b98e16e5ac9c0edf00f9552f.html">MORPH: a longitudinal image database of normal adult age-progression</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/FGR.2006.78">[pdf]</a></td><td>7th International Conference on Automatic Face and Gesture Recognition (FGR06)</td><td></td><td></td><td></td><td></td><td>42%</td><td>406</td><td>172</td><td>231</td><td>22</td><td>208</td><td>161</td></tr><tr><td>291265db88023e92bb8c8e6390438e5da148e8f5</td><td>MsCeleb</td><td><a href="papers/291265db88023e92bb8c8e6390438e5da148e8f5.html">MS-Celeb-1M: A Dataset and Benchmark for Large-Scale Face Recognition</a></td><td><a href="http://pdfs.semanticscholar.org/4603/cb8e05258bb0572ae912ad20903b8f99f4b1.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>150</td><td>70</td><td>79</td><td>14</td><td>115</td><td>29</td></tr><tr><td>203009d3608bdc31ffc3991a0310b9e98b630c4d</td><td></td><td><a href="papers/203009d3608bdc31ffc3991a0310b9e98b630c4d.html">Moving faces, looking places: validation of the Amsterdam Dynamic Facial Expression Set (ADFES).</a></td><td><span class="gray">[pdf]</a></td><td>Emotion</td><td></td><td></td><td></td><td></td><td>35%</td><td>77</td><td>27</td><td>50</td><td>6</td><td>52</td><td>14</td></tr><tr><td>2fda164863a06a92d3a910b96eef927269aeb730</td><td></td><td><a href="papers/2fda164863a06a92d3a910b96eef927269aeb730.html">Names and faces in the news</a></td><td><a href="http://www.cs.utexas.edu/~grauman/courses/spring2007/395T/papers/berg_names_and_faces.pdf">[pdf]</a></td><td>Proceedings of the 2004 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2004. CVPR 2004.</td><td></td><td></td><td></td><td></td><td>36%</td><td>290</td><td>104</td><td>186</td><td>19</td><td>197</td><td>45</td></tr><tr><td>3c8db0d86a6aa51b64ec09c7d25a721adcdfb7a3</td><td></td><td><a href="papers/3c8db0d86a6aa51b64ec09c7d25a721adcdfb7a3.html">Ordered trajectories for human action recognition with large number of classes</a></td><td><a href="https://doi.org/10.1016/j.imavis.2015.06.009">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td>100%</td><td>1</td><td>1</td><td>0</td><td>0</td><td>0</td><td>1</td></tr><tr><td>636b8ffc09b1b23ff714ac8350bb35635e49fa3c</td><td></td><td><a href="papers/636b8ffc09b1b23ff714ac8350bb35635e49fa3c.html">Pruning training sets for learning of object categories</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1467308', 'linkType': 'ieee'}">[pdf]</a></td><td>2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)</td><td></td><td></td><td></td><td></td><td>58%</td><td>60</td><td>35</td><td>25</td><td>4</td><td>41</td><td>12</td></tr><tr><td>140c95e53c619eac594d70f6369f518adfea12ef</td><td>IJB-A</td><td><a href="papers/140c95e53c619eac594d70f6369f518adfea12ef.html">Pushing the frontiers of unconstrained face detection and recognition: IARPA Janus Benchmark A</a></td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/1B_089_ext.pdf">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>46%</td><td>208</td><td>95</td><td>110</td><td>20</td><td>144</td><td>52</td></tr><tr><td>356b431d4f7a2a0a38cf971c84568207dcdbf189</td><td></td><td><a href="papers/356b431d4f7a2a0a38cf971c84568207dcdbf189.html">Recognize complex events from static images by fusing deep channels</a></td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Xiong_Recognize_Complex_Events_2015_CVPR_paper.pdf">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>Shenzhen Institutes of Advanced Technology</td><td>22.59805605</td><td>113.98533784</td><td>53%</td><td>43</td><td>23</td><td>20</td><td>1</td><td>30</td><td>12</td></tr><tr><td>2724ba85ec4a66de18da33925e537f3902f21249</td><td></td><td><a href="papers/2724ba85ec4a66de18da33925e537f3902f21249.html">Robust Face Landmark Estimation under Occlusion</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6751298', 'linkType': 'ieee'}">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td>53%</td><td>296</td><td>156</td><td>139</td><td>14</td><td>179</td><td>99</td></tr><tr><td>bd26dabab576adb6af30484183c9c9c8379bf2e0</td><td>SCUT-FBP</td><td><a href="papers/bd26dabab576adb6af30484183c9c9c8379bf2e0.html">SCUT-FBP: A Benchmark Dataset for Facial Beauty Perception</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1511.02459.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2015 IEEE International Conference on Systems, Man, and Cybernetics</td><td>edu</td><td>South China University of Technology</td><td>23.05020420</td><td>113.39880323</td><td>29%</td><td>14</td><td>4</td><td>10</td><td>3</td><td>5</td><td>7</td></tr><tr><td>51eba481dac6b229a7490f650dff7b17ce05df73</td><td>imSitu</td><td><a href="papers/51eba481dac6b229a7490f650dff7b17ce05df73.html">Situation Recognition: Visual Semantic Role Labeling for Image Understanding</a></td><td><a href="http://grail.cs.washington.edu/wp-content/uploads/2016/09/yatskar2016srv.pdf">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>University of Washington</td><td>47.65432380</td><td>-122.30800894</td><td>61%</td><td>44</td><td>27</td><td>17</td><td>2</td><td>41</td><td>2</td></tr><tr><td>23e824d1dfc33f3780dd18076284f07bd99f1c43</td><td></td><td><a href="papers/23e824d1dfc33f3780dd18076284f07bd99f1c43.html">Spoofing faces using makeup: An investigative study</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7947686', 'linkType': 'ieee'}">[pdf]</a></td><td>2017 IEEE International Conference on Identity, Security and Behavior Analysis (ISBA)</td><td>edu</td><td>INRIA Méditerranée</td><td>43.61581310</td><td>7.06838000</td><td>33%</td><td>3</td><td>1</td><td>2</td><td>0</td><td>1</td><td>2</td></tr><tr><td>1a40092b493c6b8840257ab7f96051d1a4dbfeb2</td><td></td><td><a href="papers/1a40092b493c6b8840257ab7f96051d1a4dbfeb2.html">Sports Videos in the Wild (SVW): A video dataset for sports analysis</a></td><td><a href="http://web.cse.msu.edu/~liuxm/publication/Safdarnejad_Liu_Udpa_Andrus_Wood_Craven_FG2015.pdf">[pdf]</a></td><td>2015 11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)</td><td>edu</td><td>Michigan State University</td><td>42.71856800</td><td>-84.47791571</td><td>67%</td><td>6</td><td>4</td><td>2</td><td>1</td><td>5</td><td>0</td></tr><tr><td>4d9a02d080636e9666c4d1cc438b9893391ec6c7</td><td></td><td><a href="papers/4d9a02d080636e9666c4d1cc438b9893391ec6c7.html">The Extended Cohn-Kanade Dataset (CK+): A complete dataset for action unit and emotion-specified expression</a></td><td><a href="http://www.iainm.com/iainm/Publications_files/2010_The%20Extended.pdf">[pdf]</a></td><td>2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition - Workshops</td><td></td><td></td><td></td><td></td><td>37%</td><td>916</td><td>337</td><td>577</td><td>51</td><td>420</td><td>361</td></tr><tr><td>96e0cfcd81cdeb8282e29ef9ec9962b125f379b0</td><td>MegaFace 2</td><td><a href="papers/96e0cfcd81cdeb8282e29ef9ec9962b125f379b0.html">The MegaFace Benchmark: 1 Million Faces for Recognition at Scale</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.527">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>University of Washington</td><td>47.65432380</td><td>-122.30800894</td><td>49%</td><td>114</td><td>56</td><td>58</td><td>10</td><td>88</td><td>22</td></tr><tr><td>b5f2846a506fc417e7da43f6a7679146d99c5e96</td><td>UCF101</td><td><a href="papers/b5f2846a506fc417e7da43f6a7679146d99c5e96.html">UCF101: A Dataset of 101 Human Actions Classes From Videos in The Wild</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1212.0402.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td>edu</td><td>University of Central Florida</td><td>28.59899755</td><td>-81.19712501</td><td>51%</td><td>934</td><td>476</td><td>455</td><td>65</td><td>658</td><td>230</td></tr><tr><td>31b05f65405534a696a847dd19c621b7b8588263</td><td></td><td><a href="papers/31b05f65405534a696a847dd19c621b7b8588263.html">UMDFaces: An annotated face dataset for training deep networks</a></td><td><a href="http://arxiv.org/abs/1611.01484">[pdf]</a></td><td>2017 IEEE International Joint Conference on Biometrics (IJCB)</td><td></td><td></td><td></td><td></td><td>48%</td><td>29</td><td>14</td><td>15</td><td>4</td><td>20</td><td>6</td></tr><tr><td>52d7eb0fbc3522434c13cc247549f74bb9609c5d</td><td>WIDER FACE</td><td><a href="papers/52d7eb0fbc3522434c13cc247549f74bb9609c5d.html">WIDER FACE: A Face Detection Benchmark</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1511.06523.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>50%</td><td>129</td><td>65</td><td>64</td><td>11</td><td>89</td><td>34</td></tr><tr><td>0b3a146c474166bba71e645452b3a8276ac05998</td><td></td><td><a href="papers/0b3a146c474166bba71e645452b3a8276ac05998.html">Whos In the Picture</a></td><td><a href="http://pdfs.semanticscholar.org/c6e5/17eb85bc6c68dff5d3fadb2d817e839c966b.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>34%</td><td>99</td><td>34</td><td>65</td><td>6</td><td>65</td><td>23</td></tr></table></body></html> \ No newline at end of file
+<!doctype html><html><head><meta charset='utf-8'><title>All Papers</title><link rel='stylesheet' href='reports.css'></head><body><h2>All Papers</h2><table border='1' cellpadding='3' cellspacing='3'><th>Paper ID</th><th>Megapixels Key</th><th>Report Link</th><th>PDF Link</th><th>Journal</th><th>Type</th><th>Address</th><th>Lat</th><th>Lng</th><th>Coverage</th><th>Total Citations</th><th>Geocoded Citations</th><th>Unknown Citations</th><th>Empty Citations</th><th>With PDF</th><th>With DOI</th><tr><td>e4754afaa15b1b53e70743880484b8d0736990ff</td><td></td><td><a href="papers/e4754afaa15b1b53e70743880484b8d0736990ff.html">300 Faces In-The-Wild Challenge: database and results</a></td><td><a href="{'url': 'http://doi.org/10.1016/j.imavis.2016.01.002', 'linkType': 'doi'}">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td>47%</td><td>103</td><td>48</td><td>55</td><td>10</td><td>65</td><td>30</td></tr><tr><td>044d9a8c61383312cdafbcc44b9d00d650b21c70</td><td></td><td><a href="papers/044d9a8c61383312cdafbcc44b9d00d650b21c70.html">300 Faces in-the-Wild Challenge: The First Facial Landmark Localization Challenge</a></td><td><a href="https://ibug.doc.ic.ac.uk/media/uploads/documents/sagonas_iccv_2013_300_w.pdf">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision Workshops</td><td>edu</td><td>University of Twente</td><td>52.23801390</td><td>6.85667610</td><td>56%</td><td>285</td><td>159</td><td>125</td><td>25</td><td>185</td><td>82</td></tr><tr><td>c34532fe6bfbd1e6df477c9ffdbb043b77e7804d</td><td></td><td><a href="papers/c34532fe6bfbd1e6df477c9ffdbb043b77e7804d.html">A 3D Morphable Eye Region Model for Gaze Estimation</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/0d43/3b9435b874a1eea6d7999e86986c910fa285.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Carnegie Mellon University</td><td>37.41021930</td><td>-122.05965487</td><td>71%</td><td>21</td><td>15</td><td>6</td><td>0</td><td>17</td><td>4</td></tr><tr><td>013909077ad843eb6df7a3e8e290cfd5575999d2</td><td></td><td><a href="papers/013909077ad843eb6df7a3e8e290cfd5575999d2.html">A Semi-automatic Methodology for Facial Landmark Annotation</a></td><td><a href="http://ibug.doc.ic.ac.uk/media/uploads/documents/sagonas_cvpr_2013_amfg_w.pdf">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition Workshops</td><td>edu</td><td>University of Twente</td><td>52.23801390</td><td>6.85667610</td><td>59%</td><td>169</td><td>100</td><td>69</td><td>14</td><td>112</td><td>49</td></tr><tr><td>0d3bb75852098b25d90f31d2f48fd0cb4944702b</td><td>FaceScrub</td><td><a href="papers/0d3bb75852098b25d90f31d2f48fd0cb4944702b.html">A data-driven approach to cleaning large face datasets</a></td><td><a href="https://doi.org/10.1109/ICIP.2014.7025068">[pdf]</a></td><td>2014 IEEE International Conference on Image Processing (ICIP)</td><td>edu</td><td>University of Illinois, Urbana-Champaign</td><td>40.11116745</td><td>-88.22587665</td><td>48%</td><td>120</td><td>57</td><td>63</td><td>5</td><td>93</td><td>24</td></tr><tr><td>1be498d4bbc30c3bfd0029114c784bc2114d67c0</td><td>Adience</td><td><a href="papers/1be498d4bbc30c3bfd0029114c784bc2114d67c0.html">Age and Gender Estimation of Unfiltered Faces</a></td><td><a href="http://www.openu.ac.il/home/hassner/Adience/EidingerEnbarHassner_tifs.pdf">[pdf]</a></td><td>IEEE Transactions on Information Forensics and Security</td><td></td><td></td><td></td><td></td><td>45%</td><td>155</td><td>69</td><td>86</td><td>5</td><td>80</td><td>55</td></tr><tr><td>6dcf418c778f528b5792104760f1fbfe90c6dd6a</td><td></td><td><a href="papers/6dcf418c778f528b5792104760f1fbfe90c6dd6a.html">AgeDB: The First Manually Collected, In-the-Wild Age Database</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014984', 'linkType': 'ieee'}">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td></td><td></td><td></td><td></td><td>70%</td><td>10</td><td>7</td><td>2</td><td>1</td><td>9</td><td>0</td></tr><tr><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td><td></td><td><a href="papers/a74251efa970b92925b89eeef50a5e37d9281ad0.html">Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</a></td><td><a href="http://lrs.icg.tugraz.at/pubs/koestinger_befit_11.pdf">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td>55%</td><td>278</td><td>153</td><td>125</td><td>30</td><td>195</td><td>65</td></tr><tr><td>c6526dd3060d63a6c90e8b7ff340383c4e0e0dd8</td><td></td><td><a href="papers/c6526dd3060d63a6c90e8b7ff340383c4e0e0dd8.html">Anxiety promotes memory for mood-congruent faces but does not alter loss aversion.</a></td><td><a href="http://pdfs.semanticscholar.org/c652/6dd3060d63a6c90e8b7ff340383c4e0e0dd8.pdf">[pdf]</a></td><td>Scientific reports</td><td>edu</td><td>University College London</td><td>51.52316070</td><td>-0.12820370</td><td>25%</td><td>4</td><td>1</td><td>3</td><td>0</td><td>2</td><td>2</td></tr><tr><td>759a3b3821d9f0e08e0b0a62c8b693230afc3f8d</td><td></td><td><a href="papers/759a3b3821d9f0e08e0b0a62c8b693230afc3f8d.html">Attribute and simile classifiers for face verification</a></td><td><a href="http://homes.cs.washington.edu/~neeraj/projects/faceverification/base/papers/nk_iccv2009_attrs.pdf">[pdf]</a></td><td>2009 IEEE 12th International Conference on Computer Vision</td><td>edu</td><td>Columbia University</td><td>40.84198360</td><td>-73.94368971</td><td>53%</td><td>862</td><td>457</td><td>405</td><td>46</td><td>556</td><td>232</td></tr><tr><td>fcc6fe6007c322641796cb8792718641856a22a7</td><td></td><td><a href="papers/fcc6fe6007c322641796cb8792718641856a22a7.html">Automatic facial makeup detection with application in face recognition</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6612994', 'linkType': 'ieee'}">[pdf]</a></td><td>2013 International Conference on Biometrics (ICB)</td><td>edu</td><td>West Virginia University</td><td>39.65404635</td><td>-79.96475355</td><td>67%</td><td>43</td><td>29</td><td>14</td><td>0</td><td>16</td><td>21</td></tr><tr><td>0a85bdff552615643dd74646ac881862a7c7072d</td><td></td><td><a href="papers/0a85bdff552615643dd74646ac881862a7c7072d.html">Beyond frontal faces: Improving Person Recognition using multiple cues</a></td><td><a href="https://doi.org/10.1109/CVPR.2015.7299113">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>64%</td><td>45</td><td>29</td><td>15</td><td>2</td><td>37</td><td>5</td></tr><tr><td>37d6f0eb074d207b53885bd2eb78ccc8a04be597</td><td></td><td><a href="papers/37d6f0eb074d207b53885bd2eb78ccc8a04be597.html">Can facial cosmetics affect the matching accuracy of face recognition systems?</a></td><td><a href="http://www.cse.msu.edu/~climer/DantchevaChenRossFaceCosmetics_BTAS2012.pdf">[pdf]</a></td><td>2012 IEEE Fifth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td></td><td></td><td></td><td></td><td>50%</td><td>46</td><td>23</td><td>23</td><td>0</td><td>17</td><td>22</td></tr><tr><td>45c31cde87258414f33412b3b12fc5bec7cb3ba9</td><td>JAFFE</td><td><a href="papers/45c31cde87258414f33412b3b12fc5bec7cb3ba9.html">Coding Facial Expressions with Gabor Wavelets</a></td><td><a href="http://pdfs.semanticscholar.org/45c3/1cde87258414f33412b3b12fc5bec7cb3ba9.pdf">[pdf]</a></td><td></td><td>edu</td><td>Kyushu University</td><td>33.59914655</td><td>130.22359848</td><td>39%</td><td>804</td><td>310</td><td>494</td><td>45</td><td>383</td><td>263</td></tr><tr><td>23fc83c8cfff14a16df7ca497661264fc54ed746</td><td>CK</td><td><a href="papers/23fc83c8cfff14a16df7ca497661264fc54ed746.html">Comprehensive Database for Facial Expression Analysis</a></td><td><a href="http://pdfs.semanticscholar.org/23fc/83c8cfff14a16df7ca497661264fc54ed746.pdf">[pdf]</a></td><td></td><td>edu</td><td>Carnegie Mellon University</td><td>37.41021930</td><td>-122.05965487</td><td>39%</td><td>999</td><td>386</td><td>612</td><td>64</td><td>536</td><td>263</td></tr><tr><td>8355d095d3534ef511a9af68a3b2893339e3f96b</td><td></td><td><a href="papers/8355d095d3534ef511a9af68a3b2893339e3f96b.html">DEX: Deep EXpectation of Apparent Age from a Single Image</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406390', 'linkType': 'ieee'}">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision Workshop (ICCVW)</td><td></td><td></td><td></td><td></td><td>44%</td><td>102</td><td>45</td><td>54</td><td>5</td><td>61</td><td>28</td></tr><tr><td>10195a163ab6348eef37213a46f60a3d87f289c5</td><td></td><td><a href="papers/10195a163ab6348eef37213a46f60a3d87f289c5.html">Deep Expectation of Real and Apparent Age from a Single Image Without Facial Landmarks</a></td><td><a href="https://doi.org/10.1007/s11263-016-0940-3">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td>49%</td><td>121</td><td>59</td><td>61</td><td>10</td><td>84</td><td>29</td></tr><tr><td>56ae6d94fc6097ec4ca861f0daa87941d1c10b70</td><td>CMDP</td><td><a href="papers/56ae6d94fc6097ec4ca861f0daa87941d1c10b70.html">Distance Estimation of an Unknown Person from a Portrait</a></td><td><a href="http://pdfs.semanticscholar.org/56ae/6d94fc6097ec4ca861f0daa87941d1c10b70.pdf">[pdf]</a></td><td></td><td>edu</td><td>California Institute of Technology</td><td>34.13710185</td><td>-118.12527487</td><td>25%</td><td>8</td><td>2</td><td>6</td><td>0</td><td>5</td><td>1</td></tr><tr><td>133f01aec1534604d184d56de866a4bd531dac87</td><td></td><td><a href="papers/133f01aec1534604d184d56de866a4bd531dac87.html">Effective Unconstrained Face Recognition by Combining Multiple Descriptors and Learned Background Statistics</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/TPAMI.2010.230">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>42%</td><td>168</td><td>71</td><td>97</td><td>15</td><td>95</td><td>55</td></tr><tr><td>1ea8085fe1c79d12adffb02bd157b54d799568e4</td><td></td><td><a href="papers/1ea8085fe1c79d12adffb02bd157b54d799568e4.html">Eigenfaces vs. Fisherfaces: Recognition Using Class Speciic Linear Projection</a></td><td><a href="http://pdfs.semanticscholar.org/1ea8/085fe1c79d12adffb02bd157b54d799568e4.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>33%</td><td>583</td><td>194</td><td>388</td><td>44</td><td>300</td><td>162</td></tr><tr><td>447d8893a4bdc29fa1214e53499ffe67b28a6db5</td><td></td><td><a href="papers/447d8893a4bdc29fa1214e53499ffe67b28a6db5.html">Electronic Transport in Quantum Confined Systems</a></td><td><a href="http://pdfs.semanticscholar.org/447d/8893a4bdc29fa1214e53499ffe67b28a6db5.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>1</td><td>0</td><td>1</td><td>0</td><td>1</td><td>0</td></tr><tr><td>75da1df4ed319926c544eefe17ec8d720feef8c0</td><td>FDDB</td><td><a href="papers/75da1df4ed319926c544eefe17ec8d720feef8c0.html">FDDB: A Benchmark for Face Detection in Unconstrained Settings</a></td><td><a href="http://pdfs.semanticscholar.org/75da/1df4ed319926c544eefe17ec8d720feef8c0.pdf">[pdf]</a></td><td></td><td>edu</td><td>University of Massachusetts</td><td>42.38897850</td><td>-72.52869870</td><td>0%</td><td>1</td><td>0</td><td>1</td><td>0</td><td>0</td><td>0</td></tr><tr><td>0e986f51fe45b00633de9fd0c94d082d2be51406</td><td></td><td><a href="papers/0e986f51fe45b00633de9fd0c94d082d2be51406.html">Face detection, pose estimation, and landmark localization in the wild</a></td><td><a href="http://vision.ics.uci.edu/papers/ZhuR_CVPR_2012/ZhuR_CVPR_2012.pdf">[pdf]</a></td><td>2012 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>University of California, Irvine</td><td>33.64319010</td><td>-117.84016494</td><td>54%</td><td>999</td><td>541</td><td>458</td><td>52</td><td>601</td><td>303</td></tr><tr><td>560e0e58d0059259ddf86fcec1fa7975dee6a868</td><td></td><td><a href="papers/560e0e58d0059259ddf86fcec1fa7975dee6a868.html">Face recognition in unconstrained videos with matched background similarity</a></td><td><a href="http://www.cs.tau.ac.il/~wolf/papers/lvfw.pdf">[pdf]</a></td><td>CVPR 2011</td><td>edu</td><td>Open University of Israel</td><td>32.77824165</td><td>34.99565673</td><td>53%</td><td>457</td><td>241</td><td>214</td><td>27</td><td>273</td><td>149</td></tr><tr><td>670637d0303a863c1548d5b19f705860a23e285c</td><td></td><td><a href="papers/670637d0303a863c1548d5b19f705860a23e285c.html">Face swapping: automatically replacing faces in photographs</a></td><td><a href="https://classes.cs.uoregon.edu/16F/cis607photo/faces.pdf">[pdf]</a></td><td>ACM Trans. Graph.</td><td>edu</td><td>Columbia University</td><td>40.84198360</td><td>-73.94368971</td><td>100%</td><td>2</td><td>2</td><td>0</td><td>0</td><td>1</td><td>1</td></tr><tr><td>4c170a0dcc8de75587dae21ca508dab2f9343974</td><td>FaceTracer</td><td><a href="papers/4c170a0dcc8de75587dae21ca508dab2f9343974.html">FaceTracer: A Search Engine for Large Collections of Images with Faces</a></td><td><a href="http://pdfs.semanticscholar.org/73a8/1d311eedac8dea3ca24dc15b6990fa4a725e.pdf">[pdf]</a></td><td></td><td>edu</td><td>Columbia University</td><td>40.84198360</td><td>-73.94368971</td><td>49%</td><td>212</td><td>103</td><td>109</td><td>13</td><td>137</td><td>52</td></tr><tr><td>45e616093a92e5f1e61a7c6037d5f637aa8964af</td><td></td><td><a href="papers/45e616093a92e5f1e61a7c6037d5f637aa8964af.html">Fine-grained evaluation on face detection in the wild</a></td><td><a href="http://www.cs.toronto.edu/~byang/papers/malf_fg15.pdf">[pdf]</a></td><td>2015 11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)</td><td>edu</td><td>Chinese Academy of Sciences</td><td>40.00447950</td><td>116.37023800</td><td>71%</td><td>17</td><td>12</td><td>5</td><td>0</td><td>13</td><td>4</td></tr><tr><td>04661729f0ff6afe4b4d6223f18d0da1d479accf</td><td>CelebA</td><td><a href="papers/04661729f0ff6afe4b4d6223f18d0da1d479accf.html">From Facial Parts Responses to Face Detection: A Deep Learning Approach</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.419">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td>edu</td><td>Shenzhen Institutes of Advanced Technology</td><td>22.59805605</td><td>113.98533784</td><td>57%</td><td>150</td><td>86</td><td>63</td><td>12</td><td>93</td><td>48</td></tr><tr><td>18c72175ddbb7d5956d180b65a96005c100f6014</td><td>Yale Face Database B</td><td><a href="papers/18c72175ddbb7d5956d180b65a96005c100f6014.html">From Few to Many: Illumination Cone Models for Face Recognition under Variable Lighting and Pose</a></td><td><a href="http://pdfs.semanticscholar.org/97bb/c2b439a79d4dc0dc7199d71ed96ad5e3fd0e.pdf">[pdf]</a></td><td>IEEE Trans. Pattern Anal. Mach. Intell.</td><td></td><td></td><td></td><td></td><td>45%</td><td>999</td><td>445</td><td>553</td><td>65</td><td>519</td><td>330</td></tr><tr><td>04c2cda00e5536f4b1508cbd80041e9552880e67</td><td></td><td><a href="papers/04c2cda00e5536f4b1508cbd80041e9552880e67.html">Hipster Wars: Discovering Elements of Fashion Styles</a></td><td><a href="http://pdfs.semanticscholar.org/04c2/cda00e5536f4b1508cbd80041e9552880e67.pdf">[pdf]</a></td><td></td><td>edu</td><td>Tohoku University</td><td>38.25309450</td><td>140.87365930</td><td>58%</td><td>85</td><td>49</td><td>36</td><td>3</td><td>55</td><td>19</td></tr><tr><td>ca3e88d87e1344d076c964ea89d91a75c417f5ee</td><td></td><td><a href="papers/ca3e88d87e1344d076c964ea89d91a75c417f5ee.html">Indian Movie Face Database: A benchmark for face recognition under wide variations</a></td><td><span class="gray">[pdf]</a></td><td>2013 Fourth National Conference on Computer Vision, Pattern Recognition, Image Processing and Graphics (NCVPRIPG)</td><td></td><td></td><td></td><td></td><td>64%</td><td>14</td><td>9</td><td>5</td><td>0</td><td>10</td><td>4</td></tr><tr><td>95f12d27c3b4914e0668a268360948bce92f7db3</td><td>Helen</td><td><a href="papers/95f12d27c3b4914e0668a268360948bce92f7db3.html">Interactive Facial Feature Localization</a></td><td><a href="http://pdfs.semanticscholar.org/95f1/2d27c3b4914e0668a268360948bce92f7db3.pdf">[pdf]</a></td><td></td><td>edu</td><td>University of Illinois, Urbana-Champaign</td><td>40.11116745</td><td>-88.22587665</td><td>54%</td><td>323</td><td>175</td><td>147</td><td>23</td><td>198</td><td>105</td></tr><tr><td>4d423acc78273b75134e2afd1777ba6d3a398973</td><td></td><td><a href="papers/4d423acc78273b75134e2afd1777ba6d3a398973.html">International Conference on Automatic Face and Gesture Recognition The CMU Pose , Illumination , and Expression ( PIE ) Database</a></td><td><a href="http://pdfs.semanticscholar.org/4d42/3acc78273b75134e2afd1777ba6d3a398973.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>46%</td><td>723</td><td>331</td><td>392</td><td>55</td><td>392</td><td>237</td></tr><tr><td>137aa2f891d474fce1e7a1d1e9b3aefe21e22b34</td><td></td><td><a href="papers/137aa2f891d474fce1e7a1d1e9b3aefe21e22b34.html">Is the eye region more reliable than the face? A preliminary study of face-based recognition on a transgender dataset</a></td><td><a href="https://doi.org/10.1109/BTAS.2013.6712710">[pdf]</a></td><td>2013 IEEE Sixth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td>edu</td><td>University of North Carolina Wilmington</td><td>34.23755810</td><td>-77.92701290</td><td>57%</td><td>7</td><td>4</td><td>3</td><td>1</td><td>2</td><td>4</td></tr><tr><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td><td></td><td><a href="papers/370b5757a5379b15e30d619e4d3fb9e8e13f3256.html">Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments</a></td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>999</td><td>496</td><td>500</td><td>62</td><td>613</td><td>293</td></tr><tr><td>0d2dd4fc016cb6a517d8fb43a7cc3ff62964832e</td><td></td><td><a href="papers/0d2dd4fc016cb6a517d8fb43a7cc3ff62964832e.html">Large Age-Gap face verification by feature injection in deep networks</a></td><td><a href="http://pdfs.semanticscholar.org/0d2d/d4fc016cb6a517d8fb43a7cc3ff62964832e.pdf">[pdf]</a></td><td>Pattern Recognition Letters</td><td></td><td></td><td></td><td></td><td>40%</td><td>5</td><td>2</td><td>3</td><td>0</td><td>2</td><td>2</td></tr><tr><td>853bd61bc48a431b9b1c7cab10c603830c488e39</td><td>CAISA Webface</td><td><a href="papers/853bd61bc48a431b9b1c7cab10c603830c488e39.html">Learning Face Representation from Scratch</a></td><td><a href="http://pdfs.semanticscholar.org/b8a2/0ed7e74325da76d7183d1ab77b082a92b447.pdf">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>58%</td><td>401</td><td>232</td><td>168</td><td>28</td><td>261</td><td>125</td></tr><tr><td>28d4e027c7e90b51b7d8908fce68128d1964668a</td><td>MegaFace 2</td><td><a href="papers/28d4e027c7e90b51b7d8908fce68128d1964668a.html">Level Playing Field for Million Scale Face Recognition</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1705.00393.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>41%</td><td>27</td><td>11</td><td>16</td><td>2</td><td>22</td><td>4</td></tr><tr><td>140438a77a771a8fb656b39a78ff488066eb6b50</td><td>LFWP</td><td><a href="papers/140438a77a771a8fb656b39a78ff488066eb6b50.html">Localizing Parts of Faces Using a Consensus of Exemplars</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2011.5995602">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td>edu</td><td>Columbia University</td><td>40.84198360</td><td>-73.94368971</td><td>54%</td><td>506</td><td>273</td><td>232</td><td>36</td><td>309</td><td>150</td></tr><tr><td>9055b155cbabdce3b98e16e5ac9c0edf00f9552f</td><td></td><td><a href="papers/9055b155cbabdce3b98e16e5ac9c0edf00f9552f.html">MORPH: a longitudinal image database of normal adult age-progression</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/FGR.2006.78">[pdf]</a></td><td>7th International Conference on Automatic Face and Gesture Recognition (FGR06)</td><td></td><td></td><td></td><td></td><td>47%</td><td>406</td><td>191</td><td>212</td><td>22</td><td>208</td><td>161</td></tr><tr><td>291265db88023e92bb8c8e6390438e5da148e8f5</td><td>MsCeleb</td><td><a href="papers/291265db88023e92bb8c8e6390438e5da148e8f5.html">MS-Celeb-1M: A Dataset and Benchmark for Large-Scale Face Recognition</a></td><td><a href="http://pdfs.semanticscholar.org/4603/cb8e05258bb0572ae912ad20903b8f99f4b1.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>49%</td><td>150</td><td>73</td><td>76</td><td>14</td><td>115</td><td>29</td></tr><tr><td>203009d3608bdc31ffc3991a0310b9e98b630c4d</td><td></td><td><a href="papers/203009d3608bdc31ffc3991a0310b9e98b630c4d.html">Moving faces, looking places: validation of the Amsterdam Dynamic Facial Expression Set (ADFES).</a></td><td><span class="gray">[pdf]</a></td><td>Emotion</td><td></td><td></td><td></td><td></td><td>39%</td><td>77</td><td>30</td><td>47</td><td>6</td><td>52</td><td>14</td></tr><tr><td>2fda164863a06a92d3a910b96eef927269aeb730</td><td></td><td><a href="papers/2fda164863a06a92d3a910b96eef927269aeb730.html">Names and faces in the news</a></td><td><a href="http://www.cs.utexas.edu/~grauman/courses/spring2007/395T/papers/berg_names_and_faces.pdf">[pdf]</a></td><td>Proceedings of the 2004 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2004. CVPR 2004.</td><td></td><td></td><td></td><td></td><td>41%</td><td>290</td><td>118</td><td>172</td><td>19</td><td>197</td><td>45</td></tr><tr><td>3c8db0d86a6aa51b64ec09c7d25a721adcdfb7a3</td><td></td><td><a href="papers/3c8db0d86a6aa51b64ec09c7d25a721adcdfb7a3.html">Ordered trajectories for human action recognition with large number of classes</a></td><td><a href="https://doi.org/10.1016/j.imavis.2015.06.009">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td>100%</td><td>1</td><td>1</td><td>0</td><td>0</td><td>0</td><td>1</td></tr><tr><td>636b8ffc09b1b23ff714ac8350bb35635e49fa3c</td><td></td><td><a href="papers/636b8ffc09b1b23ff714ac8350bb35635e49fa3c.html">Pruning training sets for learning of object categories</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1467308', 'linkType': 'ieee'}">[pdf]</a></td><td>2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)</td><td></td><td></td><td></td><td></td><td>58%</td><td>60</td><td>35</td><td>25</td><td>4</td><td>41</td><td>12</td></tr><tr><td>140c95e53c619eac594d70f6369f518adfea12ef</td><td>IJB-A</td><td><a href="papers/140c95e53c619eac594d70f6369f518adfea12ef.html">Pushing the frontiers of unconstrained face detection and recognition: IARPA Janus Benchmark A</a></td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/1B_089_ext.pdf">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>47%</td><td>208</td><td>98</td><td>107</td><td>20</td><td>144</td><td>52</td></tr><tr><td>356b431d4f7a2a0a38cf971c84568207dcdbf189</td><td></td><td><a href="papers/356b431d4f7a2a0a38cf971c84568207dcdbf189.html">Recognize complex events from static images by fusing deep channels</a></td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Xiong_Recognize_Complex_Events_2015_CVPR_paper.pdf">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>Shenzhen Institutes of Advanced Technology</td><td>22.59805605</td><td>113.98533784</td><td>60%</td><td>43</td><td>26</td><td>17</td><td>1</td><td>30</td><td>12</td></tr><tr><td>2724ba85ec4a66de18da33925e537f3902f21249</td><td></td><td><a href="papers/2724ba85ec4a66de18da33925e537f3902f21249.html">Robust Face Landmark Estimation under Occlusion</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6751298', 'linkType': 'ieee'}">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td>56%</td><td>296</td><td>167</td><td>128</td><td>14</td><td>179</td><td>99</td></tr><tr><td>bd26dabab576adb6af30484183c9c9c8379bf2e0</td><td>SCUT-FBP</td><td><a href="papers/bd26dabab576adb6af30484183c9c9c8379bf2e0.html">SCUT-FBP: A Benchmark Dataset for Facial Beauty Perception</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1511.02459.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2015 IEEE International Conference on Systems, Man, and Cybernetics</td><td>edu</td><td>South China University of Technology</td><td>23.05020420</td><td>113.39880323</td><td>43%</td><td>14</td><td>6</td><td>8</td><td>3</td><td>5</td><td>7</td></tr><tr><td>51eba481dac6b229a7490f650dff7b17ce05df73</td><td>imSitu</td><td><a href="papers/51eba481dac6b229a7490f650dff7b17ce05df73.html">Situation Recognition: Visual Semantic Role Labeling for Image Understanding</a></td><td><a href="http://grail.cs.washington.edu/wp-content/uploads/2016/09/yatskar2016srv.pdf">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>University of Washington</td><td>47.65432380</td><td>-122.30800894</td><td>64%</td><td>44</td><td>28</td><td>16</td><td>2</td><td>41</td><td>2</td></tr><tr><td>23e824d1dfc33f3780dd18076284f07bd99f1c43</td><td></td><td><a href="papers/23e824d1dfc33f3780dd18076284f07bd99f1c43.html">Spoofing faces using makeup: An investigative study</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7947686', 'linkType': 'ieee'}">[pdf]</a></td><td>2017 IEEE International Conference on Identity, Security and Behavior Analysis (ISBA)</td><td>edu</td><td>INRIA Méditerranée</td><td>43.61581310</td><td>7.06838000</td><td>33%</td><td>3</td><td>1</td><td>2</td><td>0</td><td>1</td><td>2</td></tr><tr><td>1a40092b493c6b8840257ab7f96051d1a4dbfeb2</td><td></td><td><a href="papers/1a40092b493c6b8840257ab7f96051d1a4dbfeb2.html">Sports Videos in the Wild (SVW): A video dataset for sports analysis</a></td><td><a href="http://web.cse.msu.edu/~liuxm/publication/Safdarnejad_Liu_Udpa_Andrus_Wood_Craven_FG2015.pdf">[pdf]</a></td><td>2015 11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)</td><td>edu</td><td>Michigan State University</td><td>42.71856800</td><td>-84.47791571</td><td>67%</td><td>6</td><td>4</td><td>2</td><td>1</td><td>5</td><td>0</td></tr><tr><td>4d9a02d080636e9666c4d1cc438b9893391ec6c7</td><td></td><td><a href="papers/4d9a02d080636e9666c4d1cc438b9893391ec6c7.html">The Extended Cohn-Kanade Dataset (CK+): A complete dataset for action unit and emotion-specified expression</a></td><td><a href="http://www.iainm.com/iainm/Publications_files/2010_The%20Extended.pdf">[pdf]</a></td><td>2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition - Workshops</td><td></td><td></td><td></td><td></td><td>44%</td><td>916</td><td>403</td><td>511</td><td>51</td><td>420</td><td>361</td></tr><tr><td>96e0cfcd81cdeb8282e29ef9ec9962b125f379b0</td><td>MegaFace 2</td><td><a href="papers/96e0cfcd81cdeb8282e29ef9ec9962b125f379b0.html">The MegaFace Benchmark: 1 Million Faces for Recognition at Scale</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.527">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>University of Washington</td><td>47.65432380</td><td>-122.30800894</td><td>53%</td><td>114</td><td>60</td><td>54</td><td>10</td><td>88</td><td>22</td></tr><tr><td>b5f2846a506fc417e7da43f6a7679146d99c5e96</td><td>UCF101</td><td><a href="papers/b5f2846a506fc417e7da43f6a7679146d99c5e96.html">UCF101: A Dataset of 101 Human Actions Classes From Videos in The Wild</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1212.0402.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td>edu</td><td>University of Central Florida</td><td>28.59899755</td><td>-81.19712501</td><td>57%</td><td>934</td><td>531</td><td>400</td><td>65</td><td>658</td><td>230</td></tr><tr><td>31b05f65405534a696a847dd19c621b7b8588263</td><td></td><td><a href="papers/31b05f65405534a696a847dd19c621b7b8588263.html">UMDFaces: An annotated face dataset for training deep networks</a></td><td><a href="http://arxiv.org/abs/1611.01484">[pdf]</a></td><td>2017 IEEE International Joint Conference on Biometrics (IJCB)</td><td></td><td></td><td></td><td></td><td>48%</td><td>29</td><td>14</td><td>15</td><td>4</td><td>20</td><td>6</td></tr><tr><td>52d7eb0fbc3522434c13cc247549f74bb9609c5d</td><td>WIDER FACE</td><td><a href="papers/52d7eb0fbc3522434c13cc247549f74bb9609c5d.html">WIDER FACE: A Face Detection Benchmark</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1511.06523.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>54%</td><td>129</td><td>70</td><td>59</td><td>11</td><td>89</td><td>34</td></tr><tr><td>0b3a146c474166bba71e645452b3a8276ac05998</td><td></td><td><a href="papers/0b3a146c474166bba71e645452b3a8276ac05998.html">Whos In the Picture</a></td><td><a href="http://pdfs.semanticscholar.org/c6e5/17eb85bc6c68dff5d3fadb2d817e839c966b.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>39%</td><td>99</td><td>39</td><td>60</td><td>6</td><td>65</td><td>23</td></tr></table></body></html> \ No newline at end of file