diff options
| author | Jules Laplace <julescarbon@gmail.com> | 2018-11-10 15:59:24 +0100 |
|---|---|---|
| committer | Jules Laplace <julescarbon@gmail.com> | 2018-11-10 15:59:24 +0100 |
| commit | e8ce7876c5869522f982073d70c3ee7be179e1f9 (patch) | |
| tree | 367d30870781187f4f78eb074cb7cb0b632aa0c7 | |
| parent | c412e5f0f8b71d137e4f18f8a8c7361e15c8f500 (diff) | |
citation coverage reports
| -rw-r--r-- | reports/misc/all_doi-1.csv (renamed from all_doi-1.csv) | 0 | ||||
| -rw-r--r-- | reports/misc/all_doi-2.csv (renamed from all_doi-2.csv) | 0 | ||||
| -rw-r--r-- | reports/misc/all_doi-3.csv (renamed from all_doi-3.csv) | 0 | ||||
| -rw-r--r-- | reports/misc/all_doi-4.csv (renamed from all_doi-4.csv) | 0 | ||||
| -rw-r--r-- | reports/misc/all_doi.csv (renamed from all_doi.csv) | 0 | ||||
| -rw-r--r-- | reports/misc/db_paper_doi.csv (renamed from db_paper_doi.csv) | 0 | ||||
| -rw-r--r-- | reports/misc/db_paper_pdf-1.csv (renamed from db_paper_pdf-1.csv) | 0 | ||||
| -rw-r--r-- | reports/misc/db_paper_pdf-2.csv (renamed from db_paper_pdf-2.csv) | 0 | ||||
| -rw-r--r-- | reports/misc/db_paper_pdf-3.csv (renamed from db_paper_pdf-3.csv) | 0 | ||||
| -rw-r--r-- | reports/misc/db_paper_pdf.csv (renamed from db_paper_pdf.csv) | 0 | ||||
| -rw-r--r-- | reports/misc/db_paper_pdf_list.csv (renamed from db_paper_pdf_list.csv) | 0 | ||||
| -rw-r--r-- | reports/misc/missing-1.csv (renamed from missing-1.csv) | 0 | ||||
| -rw-r--r-- | reports/misc/missing-2.csv (renamed from missing-2.csv) | 0 | ||||
| -rw-r--r-- | reports/misc/missing-3.csv (renamed from missing-3.csv) | 0 | ||||
| -rw-r--r-- | reports/misc/missing.csv (renamed from missing.csv) | 0 | ||||
| -rw-r--r-- | reports/misc/raw_paper_doi.csv (renamed from raw_paper_doi.csv) | 0 | ||||
| -rw-r--r-- | reports/misc/raw_paper_pdf.csv (renamed from raw_paper_pdf.csv) | 0 | ||||
| -rw-r--r-- | reports/misc/raw_paper_pdf_list.csv (renamed from raw_paper_pdf_list.csv) | 0 | ||||
| -rw-r--r-- | reports/report_coverage.html | 1 | ||||
| -rw-r--r-- | reports/report_index.html | 1 | ||||
| -rw-r--r-- | reports/reports.css | 3 | ||||
| -rw-r--r-- | s2-citation-report.py | 167 | ||||
| -rw-r--r-- | util.py | 20 |
23 files changed, 158 insertions, 34 deletions
diff --git a/all_doi-1.csv b/reports/misc/all_doi-1.csv index 16e74d90..16e74d90 100644 --- a/all_doi-1.csv +++ b/reports/misc/all_doi-1.csv diff --git a/all_doi-2.csv b/reports/misc/all_doi-2.csv index d798b9ad..d798b9ad 100644 --- a/all_doi-2.csv +++ b/reports/misc/all_doi-2.csv diff --git a/all_doi-3.csv b/reports/misc/all_doi-3.csv index 9517ab77..9517ab77 100644 --- a/all_doi-3.csv +++ b/reports/misc/all_doi-3.csv diff --git a/all_doi-4.csv b/reports/misc/all_doi-4.csv index 81bb4df3..81bb4df3 100644 --- a/all_doi-4.csv +++ b/reports/misc/all_doi-4.csv diff --git a/all_doi.csv b/reports/misc/all_doi.csv index 83caf30a..83caf30a 100644 --- a/all_doi.csv +++ b/reports/misc/all_doi.csv diff --git a/db_paper_doi.csv b/reports/misc/db_paper_doi.csv index 69384d5d..69384d5d 100644 --- a/db_paper_doi.csv +++ b/reports/misc/db_paper_doi.csv diff --git a/db_paper_pdf-1.csv b/reports/misc/db_paper_pdf-1.csv index 810fada9..810fada9 100644 --- a/db_paper_pdf-1.csv +++ b/reports/misc/db_paper_pdf-1.csv diff --git a/db_paper_pdf-2.csv b/reports/misc/db_paper_pdf-2.csv index 0adc7ca6..0adc7ca6 100644 --- a/db_paper_pdf-2.csv +++ b/reports/misc/db_paper_pdf-2.csv diff --git a/db_paper_pdf-3.csv b/reports/misc/db_paper_pdf-3.csv index 93605c7b..93605c7b 100644 --- a/db_paper_pdf-3.csv +++ b/reports/misc/db_paper_pdf-3.csv diff --git a/db_paper_pdf.csv b/reports/misc/db_paper_pdf.csv index 5547d808..5547d808 100644 --- a/db_paper_pdf.csv +++ b/reports/misc/db_paper_pdf.csv diff --git a/db_paper_pdf_list.csv b/reports/misc/db_paper_pdf_list.csv index e8a675d3..e8a675d3 100644 --- a/db_paper_pdf_list.csv +++ b/reports/misc/db_paper_pdf_list.csv diff --git a/missing-1.csv b/reports/misc/missing-1.csv index ade27ed5..ade27ed5 100644 --- a/missing-1.csv +++ b/reports/misc/missing-1.csv diff --git a/missing-2.csv b/reports/misc/missing-2.csv index c3182fe7..c3182fe7 100644 --- a/missing-2.csv +++ b/reports/misc/missing-2.csv diff --git a/missing-3.csv b/reports/misc/missing-3.csv index a11c16e4..a11c16e4 100644 --- a/missing-3.csv +++ b/reports/misc/missing-3.csv diff --git a/missing.csv b/reports/misc/missing.csv index 62bee46d..62bee46d 100644 --- a/missing.csv +++ b/reports/misc/missing.csv diff --git a/raw_paper_doi.csv b/reports/misc/raw_paper_doi.csv index bd56e667..bd56e667 100644 --- a/raw_paper_doi.csv +++ b/reports/misc/raw_paper_doi.csv diff --git a/raw_paper_pdf.csv b/reports/misc/raw_paper_pdf.csv index c9827c27..c9827c27 100644 --- a/raw_paper_pdf.csv +++ b/reports/misc/raw_paper_pdf.csv diff --git a/raw_paper_pdf_list.csv b/reports/misc/raw_paper_pdf_list.csv index b78a3755..b78a3755 100644 --- a/raw_paper_pdf_list.csv +++ b/reports/misc/raw_paper_pdf_list.csv diff --git a/reports/report_coverage.html b/reports/report_coverage.html new file mode 100644 index 00000000..d221536e --- /dev/null +++ b/reports/report_coverage.html @@ -0,0 +1 @@ +<!doctype html><html><head><title>Coverage</title><link rel='stylesheet' href='reports.css'></head><body><h2>Coverage</h2><table border='1' cellpadding='3' cellspacing='3'><th>Paper ID</th><th>Megapixels Key</th><th>Report Link</th><th>PDF Link</th><th>Journal</th><th>Type</th><th>Address</th><th>Lat</th><th>Lng</th><th>Coverage</th><th>Total Citations</th><th>Geocoded Citations</th><th>Unknown Citations</th><th>Empty Citations</th><th>With PDF</th><th>With DOI</th><tr><td>0e986f51fe45b00633de9fd0c94d082d2be51406</td><td></td><td><a href="papers/0e986f51fe45b00633de9fd0c94d082d2be51406.html">Face detection, pose estimation, and landmark localization in the wild</a></td><td><a href="http://vision.ics.uci.edu/papers/ZhuR_CVPR_2012/ZhuR_CVPR_2012.pdf">[pdf]</a></td><td>2012 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>University of California, Irvine</td><td>33.64319010</td><td>-117.84016494</td><td>48%</td><td>999</td><td>475</td><td>524</td><td>52</td><td>601</td><td>303</td></tr><tr><td>b5f2846a506fc417e7da43f6a7679146d99c5e96</td><td>UCF101</td><td><a href="papers/b5f2846a506fc417e7da43f6a7679146d99c5e96.html">UCF101: A Dataset of 101 Human Actions Classes From Videos in The Wild</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1212.0402.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td>edu</td><td>University of Central Florida</td><td>28.59899755</td><td>-81.19712501</td><td>51%</td><td>934</td><td>475</td><td>456</td><td>65</td><td>658</td><td>230</td></tr><tr><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td><td></td><td><a href="papers/370b5757a5379b15e30d619e4d3fb9e8e13f3256.html">Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments</a></td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>45%</td><td>999</td><td>451</td><td>545</td><td>62</td><td>613</td><td>293</td></tr><tr><td>759a3b3821d9f0e08e0b0a62c8b693230afc3f8d</td><td></td><td><a href="papers/759a3b3821d9f0e08e0b0a62c8b693230afc3f8d.html">Attribute and simile classifiers for face verification</a></td><td><a href="http://homes.cs.washington.edu/~neeraj/projects/faceverification/base/papers/nk_iccv2009_attrs.pdf">[pdf]</a></td><td>2009 IEEE 12th International Conference on Computer Vision</td><td>edu</td><td>Columbia University</td><td>40.84198360</td><td>-73.94368971</td><td>48%</td><td>862</td><td>416</td><td>446</td><td>46</td><td>556</td><td>232</td></tr><tr><td>18c72175ddbb7d5956d180b65a96005c100f6014</td><td>Yale Face Database B</td><td><a href="papers/18c72175ddbb7d5956d180b65a96005c100f6014.html">From Few to Many: Illumination Cone Models for Face Recognition under Variable Lighting and Pose</a></td><td><a href="http://pdfs.semanticscholar.org/97bb/c2b439a79d4dc0dc7199d71ed96ad5e3fd0e.pdf">[pdf]</a></td><td>IEEE Trans. Pattern Anal. Mach. Intell.</td><td></td><td></td><td></td><td></td><td>40%</td><td>999</td><td>397</td><td>601</td><td>65</td><td>519</td><td>330</td></tr><tr><td>4d9a02d080636e9666c4d1cc438b9893391ec6c7</td><td></td><td><a href="papers/4d9a02d080636e9666c4d1cc438b9893391ec6c7.html">The Extended Cohn-Kanade Dataset (CK+): A complete dataset for action unit and emotion-specified expression</a></td><td><a href="http://www.iainm.com/iainm/Publications_files/2010_The%20Extended.pdf">[pdf]</a></td><td>2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition - Workshops</td><td></td><td></td><td></td><td></td><td>37%</td><td>916</td><td>337</td><td>577</td><td>51</td><td>420</td><td>361</td></tr><tr><td>23fc83c8cfff14a16df7ca497661264fc54ed746</td><td>CK</td><td><a href="papers/23fc83c8cfff14a16df7ca497661264fc54ed746.html">Comprehensive Database for Facial Expression Analysis</a></td><td><a href="http://pdfs.semanticscholar.org/23fc/83c8cfff14a16df7ca497661264fc54ed746.pdf">[pdf]</a></td><td></td><td>edu</td><td>Carnegie Mellon University</td><td>37.41021930</td><td>-122.05965487</td><td>33%</td><td>999</td><td>330</td><td>668</td><td>64</td><td>536</td><td>263</td></tr><tr><td>4d423acc78273b75134e2afd1777ba6d3a398973</td><td></td><td><a href="papers/4d423acc78273b75134e2afd1777ba6d3a398973.html">International Conference on Automatic Face and Gesture Recognition The CMU Pose , Illumination , and Expression ( PIE ) Database</a></td><td><a href="http://pdfs.semanticscholar.org/4d42/3acc78273b75134e2afd1777ba6d3a398973.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>42%</td><td>723</td><td>301</td><td>422</td><td>55</td><td>392</td><td>237</td></tr><tr><td>45c31cde87258414f33412b3b12fc5bec7cb3ba9</td><td>JAFFE</td><td><a href="papers/45c31cde87258414f33412b3b12fc5bec7cb3ba9.html">Coding Facial Expressions with Gabor Wavelets</a></td><td><a href="http://pdfs.semanticscholar.org/45c3/1cde87258414f33412b3b12fc5bec7cb3ba9.pdf">[pdf]</a></td><td></td><td>edu</td><td>Kyushu University</td><td>33.59914655</td><td>130.22359848</td><td>32%</td><td>804</td><td>254</td><td>550</td><td>45</td><td>383</td><td>263</td></tr><tr><td>140438a77a771a8fb656b39a78ff488066eb6b50</td><td>LFWP</td><td><a href="papers/140438a77a771a8fb656b39a78ff488066eb6b50.html">Localizing Parts of Faces Using a Consensus of Exemplars</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2011.5995602">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td>edu</td><td>Columbia University</td><td>40.84198360</td><td>-73.94368971</td><td>49%</td><td>506</td><td>250</td><td>255</td><td>36</td><td>309</td><td>150</td></tr><tr><td>560e0e58d0059259ddf86fcec1fa7975dee6a868</td><td></td><td><a href="papers/560e0e58d0059259ddf86fcec1fa7975dee6a868.html">Face recognition in unconstrained videos with matched background similarity</a></td><td><a href="http://www.cs.tau.ac.il/~wolf/papers/lvfw.pdf">[pdf]</a></td><td>CVPR 2011</td><td>edu</td><td>Open University of Israel</td><td>32.77824165</td><td>34.99565673</td><td>49%</td><td>457</td><td>225</td><td>230</td><td>27</td><td>273</td><td>149</td></tr><tr><td>853bd61bc48a431b9b1c7cab10c603830c488e39</td><td>CAISA Webface</td><td><a href="papers/853bd61bc48a431b9b1c7cab10c603830c488e39.html">Learning Face Representation from Scratch</a></td><td><a href="http://pdfs.semanticscholar.org/b8a2/0ed7e74325da76d7183d1ab77b082a92b447.pdf">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>53%</td><td>401</td><td>213</td><td>187</td><td>28</td><td>261</td><td>125</td></tr><tr><td>9055b155cbabdce3b98e16e5ac9c0edf00f9552f</td><td></td><td><a href="papers/9055b155cbabdce3b98e16e5ac9c0edf00f9552f.html">MORPH: a longitudinal image database of normal adult age-progression</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/FGR.2006.78">[pdf]</a></td><td>7th International Conference on Automatic Face and Gesture Recognition (FGR06)</td><td></td><td></td><td></td><td></td><td>42%</td><td>406</td><td>172</td><td>231</td><td>22</td><td>208</td><td>161</td></tr><tr><td>1ea8085fe1c79d12adffb02bd157b54d799568e4</td><td></td><td><a href="papers/1ea8085fe1c79d12adffb02bd157b54d799568e4.html">Eigenfaces vs. Fisherfaces: Recognition Using Class Speciic Linear Projection</a></td><td><a href="http://pdfs.semanticscholar.org/1ea8/085fe1c79d12adffb02bd157b54d799568e4.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>28%</td><td>583</td><td>162</td><td>420</td><td>44</td><td>300</td><td>162</td></tr><tr><td>2724ba85ec4a66de18da33925e537f3902f21249</td><td></td><td><a href="papers/2724ba85ec4a66de18da33925e537f3902f21249.html">Robust Face Landmark Estimation under Occlusion</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6751298', 'linkType': 'ieee'}">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td>53%</td><td>296</td><td>156</td><td>139</td><td>14</td><td>179</td><td>99</td></tr><tr><td>95f12d27c3b4914e0668a268360948bce92f7db3</td><td>Helen</td><td><a href="papers/95f12d27c3b4914e0668a268360948bce92f7db3.html">Interactive Facial Feature Localization</a></td><td><a href="http://pdfs.semanticscholar.org/95f1/2d27c3b4914e0668a268360948bce92f7db3.pdf">[pdf]</a></td><td></td><td>edu</td><td>University of Illinois, Urbana-Champaign</td><td>40.11116745</td><td>-88.22587665</td><td>47%</td><td>323</td><td>153</td><td>169</td><td>23</td><td>198</td><td>105</td></tr><tr><td>044d9a8c61383312cdafbcc44b9d00d650b21c70</td><td></td><td><a href="papers/044d9a8c61383312cdafbcc44b9d00d650b21c70.html">300 Faces in-the-Wild Challenge: The First Facial Landmark Localization Challenge</a></td><td><a href="https://ibug.doc.ic.ac.uk/media/uploads/documents/sagonas_iccv_2013_300_w.pdf">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision Workshops</td><td>edu</td><td>University of Twente</td><td>52.23801390</td><td>6.85667610</td><td>53%</td><td>285</td><td>150</td><td>134</td><td>25</td><td>185</td><td>82</td></tr><tr><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td><td></td><td><a href="papers/a74251efa970b92925b89eeef50a5e37d9281ad0.html">Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</a></td><td><a href="http://lrs.icg.tugraz.at/pubs/koestinger_befit_11.pdf">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td>51%</td><td>278</td><td>143</td><td>135</td><td>30</td><td>195</td><td>65</td></tr><tr><td>2fda164863a06a92d3a910b96eef927269aeb730</td><td></td><td><a href="papers/2fda164863a06a92d3a910b96eef927269aeb730.html">Names and faces in the news</a></td><td><a href="http://www.cs.utexas.edu/~grauman/courses/spring2007/395T/papers/berg_names_and_faces.pdf">[pdf]</a></td><td>Proceedings of the 2004 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2004. CVPR 2004.</td><td></td><td></td><td></td><td></td><td>36%</td><td>290</td><td>104</td><td>186</td><td>19</td><td>197</td><td>45</td></tr><tr><td>140c95e53c619eac594d70f6369f518adfea12ef</td><td>IJB-A</td><td><a href="papers/140c95e53c619eac594d70f6369f518adfea12ef.html">Pushing the frontiers of unconstrained face detection and recognition: IARPA Janus Benchmark A</a></td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/1B_089_ext.pdf">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>46%</td><td>208</td><td>95</td><td>110</td><td>20</td><td>144</td><td>52</td></tr><tr><td>013909077ad843eb6df7a3e8e290cfd5575999d2</td><td></td><td><a href="papers/013909077ad843eb6df7a3e8e290cfd5575999d2.html">A Semi-automatic Methodology for Facial Landmark Annotation</a></td><td><a href="http://ibug.doc.ic.ac.uk/media/uploads/documents/sagonas_cvpr_2013_amfg_w.pdf">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition Workshops</td><td>edu</td><td>University of Twente</td><td>52.23801390</td><td>6.85667610</td><td>56%</td><td>169</td><td>94</td><td>75</td><td>14</td><td>112</td><td>49</td></tr><tr><td>4c170a0dcc8de75587dae21ca508dab2f9343974</td><td>FaceTracer</td><td><a href="papers/4c170a0dcc8de75587dae21ca508dab2f9343974.html">FaceTracer: A Search Engine for Large Collections of Images with Faces</a></td><td><a href="http://pdfs.semanticscholar.org/73a8/1d311eedac8dea3ca24dc15b6990fa4a725e.pdf">[pdf]</a></td><td></td><td>edu</td><td>Columbia University</td><td>40.84198360</td><td>-73.94368971</td><td>42%</td><td>212</td><td>89</td><td>123</td><td>13</td><td>137</td><td>52</td></tr><tr><td>04661729f0ff6afe4b4d6223f18d0da1d479accf</td><td>CelebA</td><td><a href="papers/04661729f0ff6afe4b4d6223f18d0da1d479accf.html">From Facial Parts Responses to Face Detection: A Deep Learning Approach</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.419">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td>edu</td><td>Shenzhen Institutes of Advanced Technology</td><td>22.59805605</td><td>113.98533784</td><td>50%</td><td>150</td><td>75</td><td>74</td><td>12</td><td>93</td><td>48</td></tr><tr><td>291265db88023e92bb8c8e6390438e5da148e8f5</td><td>MsCeleb</td><td><a href="papers/291265db88023e92bb8c8e6390438e5da148e8f5.html">MS-Celeb-1M: A Dataset and Benchmark for Large-Scale Face Recognition</a></td><td><a href="http://pdfs.semanticscholar.org/4603/cb8e05258bb0572ae912ad20903b8f99f4b1.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>150</td><td>70</td><td>79</td><td>14</td><td>115</td><td>29</td></tr><tr><td>1be498d4bbc30c3bfd0029114c784bc2114d67c0</td><td>Adience</td><td><a href="papers/1be498d4bbc30c3bfd0029114c784bc2114d67c0.html">Age and Gender Estimation of Unfiltered Faces</a></td><td><a href="http://www.openu.ac.il/home/hassner/Adience/EidingerEnbarHassner_tifs.pdf">[pdf]</a></td><td>IEEE Transactions on Information Forensics and Security</td><td></td><td></td><td></td><td></td><td>43%</td><td>155</td><td>67</td><td>88</td><td>5</td><td>80</td><td>55</td></tr><tr><td>52d7eb0fbc3522434c13cc247549f74bb9609c5d</td><td>WIDER FACE</td><td><a href="papers/52d7eb0fbc3522434c13cc247549f74bb9609c5d.html">WIDER FACE: A Face Detection Benchmark</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1511.06523.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>50%</td><td>129</td><td>65</td><td>64</td><td>11</td><td>89</td><td>34</td></tr><tr><td>133f01aec1534604d184d56de866a4bd531dac87</td><td></td><td><a href="papers/133f01aec1534604d184d56de866a4bd531dac87.html">Effective Unconstrained Face Recognition by Combining Multiple Descriptors and Learned Background Statistics</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/TPAMI.2010.230">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>38%</td><td>168</td><td>63</td><td>105</td><td>15</td><td>95</td><td>55</td></tr><tr><td>10195a163ab6348eef37213a46f60a3d87f289c5</td><td></td><td><a href="papers/10195a163ab6348eef37213a46f60a3d87f289c5.html">Deep Expectation of Real and Apparent Age from a Single Image Without Facial Landmarks</a></td><td><a href="https://doi.org/10.1007/s11263-016-0940-3">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td>47%</td><td>121</td><td>57</td><td>63</td><td>10</td><td>84</td><td>29</td></tr><tr><td>96e0cfcd81cdeb8282e29ef9ec9962b125f379b0</td><td>MegaFace 2</td><td><a href="papers/96e0cfcd81cdeb8282e29ef9ec9962b125f379b0.html">The MegaFace Benchmark: 1 Million Faces for Recognition at Scale</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.527">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>University of Washington</td><td>47.65432380</td><td>-122.30800894</td><td>49%</td><td>114</td><td>56</td><td>58</td><td>10</td><td>88</td><td>22</td></tr><tr><td>0d3bb75852098b25d90f31d2f48fd0cb4944702b</td><td>FaceScrub</td><td><a href="papers/0d3bb75852098b25d90f31d2f48fd0cb4944702b.html">A data-driven approach to cleaning large face datasets</a></td><td><a href="https://doi.org/10.1109/ICIP.2014.7025068">[pdf]</a></td><td>2014 IEEE International Conference on Image Processing (ICIP)</td><td>edu</td><td>University of Illinois, Urbana-Champaign</td><td>40.11116745</td><td>-88.22587665</td><td>42%</td><td>120</td><td>51</td><td>69</td><td>5</td><td>93</td><td>24</td></tr><tr><td>e4754afaa15b1b53e70743880484b8d0736990ff</td><td></td><td><a href="papers/e4754afaa15b1b53e70743880484b8d0736990ff.html">300 Faces In-The-Wild Challenge: database and results</a></td><td><a href="{'url': 'http://doi.org/10.1016/j.imavis.2016.01.002', 'linkType': 'doi'}">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td>42%</td><td>103</td><td>43</td><td>60</td><td>10</td><td>65</td><td>30</td></tr><tr><td>8355d095d3534ef511a9af68a3b2893339e3f96b</td><td></td><td><a href="papers/8355d095d3534ef511a9af68a3b2893339e3f96b.html">DEX: Deep EXpectation of Apparent Age from a Single Image</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406390', 'linkType': 'ieee'}">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision Workshop (ICCVW)</td><td></td><td></td><td></td><td></td><td>42%</td><td>102</td><td>43</td><td>56</td><td>5</td><td>61</td><td>28</td></tr><tr><td>04c2cda00e5536f4b1508cbd80041e9552880e67</td><td></td><td><a href="papers/04c2cda00e5536f4b1508cbd80041e9552880e67.html">Hipster Wars: Discovering Elements of Fashion Styles</a></td><td><a href="http://pdfs.semanticscholar.org/04c2/cda00e5536f4b1508cbd80041e9552880e67.pdf">[pdf]</a></td><td></td><td>edu</td><td>Tohoku University</td><td>38.25309450</td><td>140.87365930</td><td>48%</td><td>85</td><td>41</td><td>44</td><td>3</td><td>55</td><td>19</td></tr><tr><td>636b8ffc09b1b23ff714ac8350bb35635e49fa3c</td><td></td><td><a href="papers/636b8ffc09b1b23ff714ac8350bb35635e49fa3c.html">Pruning training sets for learning of object categories</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1467308', 'linkType': 'ieee'}">[pdf]</a></td><td>2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)</td><td></td><td></td><td></td><td></td><td>58%</td><td>60</td><td>35</td><td>25</td><td>4</td><td>41</td><td>12</td></tr><tr><td>0b3a146c474166bba71e645452b3a8276ac05998</td><td></td><td><a href="papers/0b3a146c474166bba71e645452b3a8276ac05998.html">Whos In the Picture</a></td><td><a href="http://pdfs.semanticscholar.org/c6e5/17eb85bc6c68dff5d3fadb2d817e839c966b.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>34%</td><td>99</td><td>34</td><td>65</td><td>6</td><td>65</td><td>23</td></tr><tr><td>51eba481dac6b229a7490f650dff7b17ce05df73</td><td>imSitu</td><td><a href="papers/51eba481dac6b229a7490f650dff7b17ce05df73.html">Situation Recognition: Visual Semantic Role Labeling for Image Understanding</a></td><td><a href="http://grail.cs.washington.edu/wp-content/uploads/2016/09/yatskar2016srv.pdf">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>University of Washington</td><td>47.65432380</td><td>-122.30800894</td><td>61%</td><td>44</td><td>27</td><td>17</td><td>2</td><td>41</td><td>2</td></tr><tr><td>fcc6fe6007c322641796cb8792718641856a22a7</td><td></td><td><a href="papers/fcc6fe6007c322641796cb8792718641856a22a7.html">Automatic facial makeup detection with application in face recognition</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6612994', 'linkType': 'ieee'}">[pdf]</a></td><td>2013 International Conference on Biometrics (ICB)</td><td>edu</td><td>West Virginia University</td><td>39.65404635</td><td>-79.96475355</td><td>63%</td><td>43</td><td>27</td><td>16</td><td>0</td><td>16</td><td>21</td></tr><tr><td>203009d3608bdc31ffc3991a0310b9e98b630c4d</td><td></td><td><a href="papers/203009d3608bdc31ffc3991a0310b9e98b630c4d.html">Moving faces, looking places: validation of the Amsterdam Dynamic Facial Expression Set (ADFES).</a></td><td><span class="gray">[pdf]</a></td><td>Emotion</td><td></td><td></td><td></td><td></td><td>35%</td><td>77</td><td>27</td><td>50</td><td>6</td><td>52</td><td>14</td></tr><tr><td>0a85bdff552615643dd74646ac881862a7c7072d</td><td></td><td><a href="papers/0a85bdff552615643dd74646ac881862a7c7072d.html">Beyond frontal faces: Improving Person Recognition using multiple cues</a></td><td><a href="https://doi.org/10.1109/CVPR.2015.7299113">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>58%</td><td>45</td><td>26</td><td>18</td><td>2</td><td>37</td><td>5</td></tr><tr><td>356b431d4f7a2a0a38cf971c84568207dcdbf189</td><td></td><td><a href="papers/356b431d4f7a2a0a38cf971c84568207dcdbf189.html">Recognize complex events from static images by fusing deep channels</a></td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Xiong_Recognize_Complex_Events_2015_CVPR_paper.pdf">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>Shenzhen Institutes of Advanced Technology</td><td>22.59805605</td><td>113.98533784</td><td>53%</td><td>43</td><td>23</td><td>20</td><td>1</td><td>30</td><td>12</td></tr><tr><td>37d6f0eb074d207b53885bd2eb78ccc8a04be597</td><td></td><td><a href="papers/37d6f0eb074d207b53885bd2eb78ccc8a04be597.html">Can facial cosmetics affect the matching accuracy of face recognition systems?</a></td><td><a href="http://www.cse.msu.edu/~climer/DantchevaChenRossFaceCosmetics_BTAS2012.pdf">[pdf]</a></td><td>2012 IEEE Fifth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td></td><td></td><td></td><td></td><td>41%</td><td>46</td><td>19</td><td>27</td><td>0</td><td>17</td><td>22</td></tr><tr><td>31b05f65405534a696a847dd19c621b7b8588263</td><td></td><td><a href="papers/31b05f65405534a696a847dd19c621b7b8588263.html">UMDFaces: An annotated face dataset for training deep networks</a></td><td><a href="http://arxiv.org/abs/1611.01484">[pdf]</a></td><td>2017 IEEE International Joint Conference on Biometrics (IJCB)</td><td></td><td></td><td></td><td></td><td>48%</td><td>29</td><td>14</td><td>15</td><td>4</td><td>20</td><td>6</td></tr><tr><td>c34532fe6bfbd1e6df477c9ffdbb043b77e7804d</td><td></td><td><a href="papers/c34532fe6bfbd1e6df477c9ffdbb043b77e7804d.html">A 3D Morphable Eye Region Model for Gaze Estimation</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/0d43/3b9435b874a1eea6d7999e86986c910fa285.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Carnegie Mellon University</td><td>37.41021930</td><td>-122.05965487</td><td>62%</td><td>21</td><td>13</td><td>8</td><td>0</td><td>17</td><td>4</td></tr><tr><td>28d4e027c7e90b51b7d8908fce68128d1964668a</td><td>MegaFace 2</td><td><a href="papers/28d4e027c7e90b51b7d8908fce68128d1964668a.html">Level Playing Field for Million Scale Face Recognition</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1705.00393.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>41%</td><td>27</td><td>11</td><td>16</td><td>2</td><td>22</td><td>4</td></tr><tr><td>45e616093a92e5f1e61a7c6037d5f637aa8964af</td><td></td><td><a href="papers/45e616093a92e5f1e61a7c6037d5f637aa8964af.html">Fine-grained evaluation on face detection in the wild</a></td><td><a href="http://www.cs.toronto.edu/~byang/papers/malf_fg15.pdf">[pdf]</a></td><td>2015 11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)</td><td>edu</td><td>Chinese Academy of Sciences</td><td>40.00447950</td><td>116.37023800</td><td>59%</td><td>17</td><td>10</td><td>7</td><td>0</td><td>13</td><td>4</td></tr><tr><td>ca3e88d87e1344d076c964ea89d91a75c417f5ee</td><td></td><td><a href="papers/ca3e88d87e1344d076c964ea89d91a75c417f5ee.html">Indian Movie Face Database: A benchmark for face recognition under wide variations</a></td><td><span class="gray">[pdf]</a></td><td>2013 Fourth National Conference on Computer Vision, Pattern Recognition, Image Processing and Graphics (NCVPRIPG)</td><td></td><td></td><td></td><td></td><td>64%</td><td>14</td><td>9</td><td>5</td><td>0</td><td>10</td><td>4</td></tr><tr><td>6dcf418c778f528b5792104760f1fbfe90c6dd6a</td><td></td><td><a href="papers/6dcf418c778f528b5792104760f1fbfe90c6dd6a.html">AgeDB: The First Manually Collected, In-the-Wild Age Database</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014984', 'linkType': 'ieee'}">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td></td><td></td><td></td><td></td><td>70%</td><td>10</td><td>7</td><td>2</td><td>1</td><td>9</td><td>0</td></tr><tr><td>bd26dabab576adb6af30484183c9c9c8379bf2e0</td><td>SCUT-FBP</td><td><a href="papers/bd26dabab576adb6af30484183c9c9c8379bf2e0.html">SCUT-FBP: A Benchmark Dataset for Facial Beauty Perception</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1511.02459.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2015 IEEE International Conference on Systems, Man, and Cybernetics</td><td>edu</td><td>South China University of Technology</td><td>23.05020420</td><td>113.39880323</td><td>29%</td><td>14</td><td>4</td><td>10</td><td>3</td><td>5</td><td>7</td></tr><tr><td>1a40092b493c6b8840257ab7f96051d1a4dbfeb2</td><td></td><td><a href="papers/1a40092b493c6b8840257ab7f96051d1a4dbfeb2.html">Sports Videos in the Wild (SVW): A video dataset for sports analysis</a></td><td><a href="http://web.cse.msu.edu/~liuxm/publication/Safdarnejad_Liu_Udpa_Andrus_Wood_Craven_FG2015.pdf">[pdf]</a></td><td>2015 11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)</td><td>edu</td><td>Michigan State University</td><td>42.71856800</td><td>-84.47791571</td><td>67%</td><td>6</td><td>4</td><td>2</td><td>1</td><td>5</td><td>0</td></tr><tr><td>0d2dd4fc016cb6a517d8fb43a7cc3ff62964832e</td><td></td><td><a href="papers/0d2dd4fc016cb6a517d8fb43a7cc3ff62964832e.html">Large Age-Gap face verification by feature injection in deep networks</a></td><td><a href="http://pdfs.semanticscholar.org/0d2d/d4fc016cb6a517d8fb43a7cc3ff62964832e.pdf">[pdf]</a></td><td>Pattern Recognition Letters</td><td></td><td></td><td></td><td></td><td>40%</td><td>5</td><td>2</td><td>3</td><td>0</td><td>2</td><td>2</td></tr><tr><td>56ae6d94fc6097ec4ca861f0daa87941d1c10b70</td><td>CMDP</td><td><a href="papers/56ae6d94fc6097ec4ca861f0daa87941d1c10b70.html">Distance Estimation of an Unknown Person from a Portrait</a></td><td><a href="http://pdfs.semanticscholar.org/56ae/6d94fc6097ec4ca861f0daa87941d1c10b70.pdf">[pdf]</a></td><td></td><td>edu</td><td>California Institute of Technology</td><td>34.13710185</td><td>-118.12527487</td><td>25%</td><td>8</td><td>2</td><td>6</td><td>0</td><td>5</td><td>1</td></tr><tr><td>137aa2f891d474fce1e7a1d1e9b3aefe21e22b34</td><td></td><td><a href="papers/137aa2f891d474fce1e7a1d1e9b3aefe21e22b34.html">Is the eye region more reliable than the face? A preliminary study of face-based recognition on a transgender dataset</a></td><td><a href="https://doi.org/10.1109/BTAS.2013.6712710">[pdf]</a></td><td>2013 IEEE Sixth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td>edu</td><td>University of North Carolina Wilmington</td><td>34.23755810</td><td>-77.92701290</td><td>29%</td><td>7</td><td>2</td><td>5</td><td>1</td><td>2</td><td>4</td></tr><tr><td>3c8db0d86a6aa51b64ec09c7d25a721adcdfb7a3</td><td></td><td><a href="papers/3c8db0d86a6aa51b64ec09c7d25a721adcdfb7a3.html">Ordered trajectories for human action recognition with large number of classes</a></td><td><a href="https://doi.org/10.1016/j.imavis.2015.06.009">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td>100%</td><td>1</td><td>1</td><td>0</td><td>0</td><td>0</td><td>1</td></tr><tr><td>670637d0303a863c1548d5b19f705860a23e285c</td><td></td><td><a href="papers/670637d0303a863c1548d5b19f705860a23e285c.html">Face swapping: automatically replacing faces in photographs</a></td><td><a href="https://classes.cs.uoregon.edu/16F/cis607photo/faces.pdf">[pdf]</a></td><td>ACM Trans. Graph.</td><td>edu</td><td>Columbia University</td><td>40.84198360</td><td>-73.94368971</td><td>50%</td><td>2</td><td>1</td><td>1</td><td>0</td><td>1</td><td>1</td></tr><tr><td>c6526dd3060d63a6c90e8b7ff340383c4e0e0dd8</td><td></td><td><a href="papers/c6526dd3060d63a6c90e8b7ff340383c4e0e0dd8.html">Anxiety promotes memory for mood-congruent faces but does not alter loss aversion.</a></td><td><a href="http://pdfs.semanticscholar.org/c652/6dd3060d63a6c90e8b7ff340383c4e0e0dd8.pdf">[pdf]</a></td><td>Scientific reports</td><td>edu</td><td>University College London</td><td>51.52316070</td><td>-0.12820370</td><td>25%</td><td>4</td><td>1</td><td>3</td><td>0</td><td>2</td><td>2</td></tr><tr><td>23e824d1dfc33f3780dd18076284f07bd99f1c43</td><td></td><td><a href="papers/23e824d1dfc33f3780dd18076284f07bd99f1c43.html">Spoofing faces using makeup: An investigative study</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7947686', 'linkType': 'ieee'}">[pdf]</a></td><td>2017 IEEE International Conference on Identity, Security and Behavior Analysis (ISBA)</td><td>edu</td><td>INRIA Méditerranée</td><td>43.61581310</td><td>7.06838000</td><td>33%</td><td>3</td><td>1</td><td>2</td><td>0</td><td>1</td><td>2</td></tr><tr><td>75da1df4ed319926c544eefe17ec8d720feef8c0</td><td>FDDB</td><td><a href="papers/75da1df4ed319926c544eefe17ec8d720feef8c0.html">FDDB: A Benchmark for Face Detection in Unconstrained Settings</a></td><td><a href="http://pdfs.semanticscholar.org/75da/1df4ed319926c544eefe17ec8d720feef8c0.pdf">[pdf]</a></td><td></td><td>edu</td><td>University of Massachusetts</td><td>42.38897850</td><td>-72.52869870</td><td>0%</td><td>1</td><td>0</td><td>1</td><td>0</td><td>0</td><td>0</td></tr><tr><td>447d8893a4bdc29fa1214e53499ffe67b28a6db5</td><td></td><td><a href="papers/447d8893a4bdc29fa1214e53499ffe67b28a6db5.html">Electronic Transport in Quantum Confined Systems</a></td><td><a href="http://pdfs.semanticscholar.org/447d/8893a4bdc29fa1214e53499ffe67b28a6db5.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>1</td><td>0</td><td>1</td><td>0</td><td>1</td><td>0</td></tr></table></body></html>
\ No newline at end of file diff --git a/reports/report_index.html b/reports/report_index.html new file mode 100644 index 00000000..5db8e7ab --- /dev/null +++ b/reports/report_index.html @@ -0,0 +1 @@ +<!doctype html><html><head><title>All Papers</title><link rel='stylesheet' href='reports.css'></head><body><h2>All Papers</h2><table border='1' cellpadding='3' cellspacing='3'><th>Paper ID</th><th>Megapixels Key</th><th>Report Link</th><th>PDF Link</th><th>Journal</th><th>Type</th><th>Address</th><th>Lat</th><th>Lng</th><th>Coverage</th><th>Total Citations</th><th>Geocoded Citations</th><th>Unknown Citations</th><th>Empty Citations</th><th>With PDF</th><th>With DOI</th><tr><td>013909077ad843eb6df7a3e8e290cfd5575999d2</td><td></td><td><a href="papers/013909077ad843eb6df7a3e8e290cfd5575999d2.html">A Semi-automatic Methodology for Facial Landmark Annotation</a></td><td><a href="http://ibug.doc.ic.ac.uk/media/uploads/documents/sagonas_cvpr_2013_amfg_w.pdf">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition Workshops</td><td>edu</td><td>University of Twente</td><td>52.23801390</td><td>6.85667610</td><td>56%</td><td>169</td><td>94</td><td>75</td><td>14</td><td>112</td><td>49</td></tr><tr><td>044d9a8c61383312cdafbcc44b9d00d650b21c70</td><td></td><td><a href="papers/044d9a8c61383312cdafbcc44b9d00d650b21c70.html">300 Faces in-the-Wild Challenge: The First Facial Landmark Localization Challenge</a></td><td><a href="https://ibug.doc.ic.ac.uk/media/uploads/documents/sagonas_iccv_2013_300_w.pdf">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision Workshops</td><td>edu</td><td>University of Twente</td><td>52.23801390</td><td>6.85667610</td><td>53%</td><td>285</td><td>150</td><td>134</td><td>25</td><td>185</td><td>82</td></tr><tr><td>04661729f0ff6afe4b4d6223f18d0da1d479accf</td><td>CelebA</td><td><a href="papers/04661729f0ff6afe4b4d6223f18d0da1d479accf.html">From Facial Parts Responses to Face Detection: A Deep Learning Approach</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.419">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td>edu</td><td>Shenzhen Institutes of Advanced Technology</td><td>22.59805605</td><td>113.98533784</td><td>50%</td><td>150</td><td>75</td><td>74</td><td>12</td><td>93</td><td>48</td></tr><tr><td>04c2cda00e5536f4b1508cbd80041e9552880e67</td><td></td><td><a href="papers/04c2cda00e5536f4b1508cbd80041e9552880e67.html">Hipster Wars: Discovering Elements of Fashion Styles</a></td><td><a href="http://pdfs.semanticscholar.org/04c2/cda00e5536f4b1508cbd80041e9552880e67.pdf">[pdf]</a></td><td></td><td>edu</td><td>Tohoku University</td><td>38.25309450</td><td>140.87365930</td><td>48%</td><td>85</td><td>41</td><td>44</td><td>3</td><td>55</td><td>19</td></tr><tr><td>0a85bdff552615643dd74646ac881862a7c7072d</td><td></td><td><a href="papers/0a85bdff552615643dd74646ac881862a7c7072d.html">Beyond frontal faces: Improving Person Recognition using multiple cues</a></td><td><a href="https://doi.org/10.1109/CVPR.2015.7299113">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>58%</td><td>45</td><td>26</td><td>18</td><td>2</td><td>37</td><td>5</td></tr><tr><td>0b3a146c474166bba71e645452b3a8276ac05998</td><td></td><td><a href="papers/0b3a146c474166bba71e645452b3a8276ac05998.html">Whos In the Picture</a></td><td><a href="http://pdfs.semanticscholar.org/c6e5/17eb85bc6c68dff5d3fadb2d817e839c966b.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>34%</td><td>99</td><td>34</td><td>65</td><td>6</td><td>65</td><td>23</td></tr><tr><td>0d2dd4fc016cb6a517d8fb43a7cc3ff62964832e</td><td></td><td><a href="papers/0d2dd4fc016cb6a517d8fb43a7cc3ff62964832e.html">Large Age-Gap face verification by feature injection in deep networks</a></td><td><a href="http://pdfs.semanticscholar.org/0d2d/d4fc016cb6a517d8fb43a7cc3ff62964832e.pdf">[pdf]</a></td><td>Pattern Recognition Letters</td><td></td><td></td><td></td><td></td><td>40%</td><td>5</td><td>2</td><td>3</td><td>0</td><td>2</td><td>2</td></tr><tr><td>0d3bb75852098b25d90f31d2f48fd0cb4944702b</td><td>FaceScrub</td><td><a href="papers/0d3bb75852098b25d90f31d2f48fd0cb4944702b.html">A data-driven approach to cleaning large face datasets</a></td><td><a href="https://doi.org/10.1109/ICIP.2014.7025068">[pdf]</a></td><td>2014 IEEE International Conference on Image Processing (ICIP)</td><td>edu</td><td>University of Illinois, Urbana-Champaign</td><td>40.11116745</td><td>-88.22587665</td><td>42%</td><td>120</td><td>51</td><td>69</td><td>5</td><td>93</td><td>24</td></tr><tr><td>0e986f51fe45b00633de9fd0c94d082d2be51406</td><td></td><td><a href="papers/0e986f51fe45b00633de9fd0c94d082d2be51406.html">Face detection, pose estimation, and landmark localization in the wild</a></td><td><a href="http://vision.ics.uci.edu/papers/ZhuR_CVPR_2012/ZhuR_CVPR_2012.pdf">[pdf]</a></td><td>2012 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>University of California, Irvine</td><td>33.64319010</td><td>-117.84016494</td><td>48%</td><td>999</td><td>475</td><td>524</td><td>52</td><td>601</td><td>303</td></tr><tr><td>10195a163ab6348eef37213a46f60a3d87f289c5</td><td></td><td><a href="papers/10195a163ab6348eef37213a46f60a3d87f289c5.html">Deep Expectation of Real and Apparent Age from a Single Image Without Facial Landmarks</a></td><td><a href="https://doi.org/10.1007/s11263-016-0940-3">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td>47%</td><td>121</td><td>57</td><td>63</td><td>10</td><td>84</td><td>29</td></tr><tr><td>133f01aec1534604d184d56de866a4bd531dac87</td><td></td><td><a href="papers/133f01aec1534604d184d56de866a4bd531dac87.html">Effective Unconstrained Face Recognition by Combining Multiple Descriptors and Learned Background Statistics</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/TPAMI.2010.230">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>38%</td><td>168</td><td>63</td><td>105</td><td>15</td><td>95</td><td>55</td></tr><tr><td>137aa2f891d474fce1e7a1d1e9b3aefe21e22b34</td><td></td><td><a href="papers/137aa2f891d474fce1e7a1d1e9b3aefe21e22b34.html">Is the eye region more reliable than the face? A preliminary study of face-based recognition on a transgender dataset</a></td><td><a href="https://doi.org/10.1109/BTAS.2013.6712710">[pdf]</a></td><td>2013 IEEE Sixth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td>edu</td><td>University of North Carolina Wilmington</td><td>34.23755810</td><td>-77.92701290</td><td>29%</td><td>7</td><td>2</td><td>5</td><td>1</td><td>2</td><td>4</td></tr><tr><td>140438a77a771a8fb656b39a78ff488066eb6b50</td><td>LFWP</td><td><a href="papers/140438a77a771a8fb656b39a78ff488066eb6b50.html">Localizing Parts of Faces Using a Consensus of Exemplars</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2011.5995602">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td>edu</td><td>Columbia University</td><td>40.84198360</td><td>-73.94368971</td><td>49%</td><td>506</td><td>250</td><td>255</td><td>36</td><td>309</td><td>150</td></tr><tr><td>140c95e53c619eac594d70f6369f518adfea12ef</td><td>IJB-A</td><td><a href="papers/140c95e53c619eac594d70f6369f518adfea12ef.html">Pushing the frontiers of unconstrained face detection and recognition: IARPA Janus Benchmark A</a></td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/1B_089_ext.pdf">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>46%</td><td>208</td><td>95</td><td>110</td><td>20</td><td>144</td><td>52</td></tr><tr><td>18c72175ddbb7d5956d180b65a96005c100f6014</td><td>Yale Face Database B</td><td><a href="papers/18c72175ddbb7d5956d180b65a96005c100f6014.html">From Few to Many: Illumination Cone Models for Face Recognition under Variable Lighting and Pose</a></td><td><a href="http://pdfs.semanticscholar.org/97bb/c2b439a79d4dc0dc7199d71ed96ad5e3fd0e.pdf">[pdf]</a></td><td>IEEE Trans. Pattern Anal. Mach. Intell.</td><td></td><td></td><td></td><td></td><td>40%</td><td>999</td><td>397</td><td>601</td><td>65</td><td>519</td><td>330</td></tr><tr><td>1a40092b493c6b8840257ab7f96051d1a4dbfeb2</td><td></td><td><a href="papers/1a40092b493c6b8840257ab7f96051d1a4dbfeb2.html">Sports Videos in the Wild (SVW): A video dataset for sports analysis</a></td><td><a href="http://web.cse.msu.edu/~liuxm/publication/Safdarnejad_Liu_Udpa_Andrus_Wood_Craven_FG2015.pdf">[pdf]</a></td><td>2015 11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)</td><td>edu</td><td>Michigan State University</td><td>42.71856800</td><td>-84.47791571</td><td>67%</td><td>6</td><td>4</td><td>2</td><td>1</td><td>5</td><td>0</td></tr><tr><td>1be498d4bbc30c3bfd0029114c784bc2114d67c0</td><td>Adience</td><td><a href="papers/1be498d4bbc30c3bfd0029114c784bc2114d67c0.html">Age and Gender Estimation of Unfiltered Faces</a></td><td><a href="http://www.openu.ac.il/home/hassner/Adience/EidingerEnbarHassner_tifs.pdf">[pdf]</a></td><td>IEEE Transactions on Information Forensics and Security</td><td></td><td></td><td></td><td></td><td>43%</td><td>155</td><td>67</td><td>88</td><td>5</td><td>80</td><td>55</td></tr><tr><td>1ea8085fe1c79d12adffb02bd157b54d799568e4</td><td></td><td><a href="papers/1ea8085fe1c79d12adffb02bd157b54d799568e4.html">Eigenfaces vs. Fisherfaces: Recognition Using Class Speciic Linear Projection</a></td><td><a href="http://pdfs.semanticscholar.org/1ea8/085fe1c79d12adffb02bd157b54d799568e4.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>28%</td><td>583</td><td>162</td><td>420</td><td>44</td><td>300</td><td>162</td></tr><tr><td>203009d3608bdc31ffc3991a0310b9e98b630c4d</td><td></td><td><a href="papers/203009d3608bdc31ffc3991a0310b9e98b630c4d.html">Moving faces, looking places: validation of the Amsterdam Dynamic Facial Expression Set (ADFES).</a></td><td><span class="gray">[pdf]</a></td><td>Emotion</td><td></td><td></td><td></td><td></td><td>35%</td><td>77</td><td>27</td><td>50</td><td>6</td><td>52</td><td>14</td></tr><tr><td>23e824d1dfc33f3780dd18076284f07bd99f1c43</td><td></td><td><a href="papers/23e824d1dfc33f3780dd18076284f07bd99f1c43.html">Spoofing faces using makeup: An investigative study</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7947686', 'linkType': 'ieee'}">[pdf]</a></td><td>2017 IEEE International Conference on Identity, Security and Behavior Analysis (ISBA)</td><td>edu</td><td>INRIA Méditerranée</td><td>43.61581310</td><td>7.06838000</td><td>33%</td><td>3</td><td>1</td><td>2</td><td>0</td><td>1</td><td>2</td></tr><tr><td>23fc83c8cfff14a16df7ca497661264fc54ed746</td><td>CK</td><td><a href="papers/23fc83c8cfff14a16df7ca497661264fc54ed746.html">Comprehensive Database for Facial Expression Analysis</a></td><td><a href="http://pdfs.semanticscholar.org/23fc/83c8cfff14a16df7ca497661264fc54ed746.pdf">[pdf]</a></td><td></td><td>edu</td><td>Carnegie Mellon University</td><td>37.41021930</td><td>-122.05965487</td><td>33%</td><td>999</td><td>330</td><td>668</td><td>64</td><td>536</td><td>263</td></tr><tr><td>2724ba85ec4a66de18da33925e537f3902f21249</td><td></td><td><a href="papers/2724ba85ec4a66de18da33925e537f3902f21249.html">Robust Face Landmark Estimation under Occlusion</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6751298', 'linkType': 'ieee'}">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td>53%</td><td>296</td><td>156</td><td>139</td><td>14</td><td>179</td><td>99</td></tr><tr><td>28d4e027c7e90b51b7d8908fce68128d1964668a</td><td>MegaFace 2</td><td><a href="papers/28d4e027c7e90b51b7d8908fce68128d1964668a.html">Level Playing Field for Million Scale Face Recognition</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1705.00393.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>41%</td><td>27</td><td>11</td><td>16</td><td>2</td><td>22</td><td>4</td></tr><tr><td>291265db88023e92bb8c8e6390438e5da148e8f5</td><td>MsCeleb</td><td><a href="papers/291265db88023e92bb8c8e6390438e5da148e8f5.html">MS-Celeb-1M: A Dataset and Benchmark for Large-Scale Face Recognition</a></td><td><a href="http://pdfs.semanticscholar.org/4603/cb8e05258bb0572ae912ad20903b8f99f4b1.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>150</td><td>70</td><td>79</td><td>14</td><td>115</td><td>29</td></tr><tr><td>2fda164863a06a92d3a910b96eef927269aeb730</td><td></td><td><a href="papers/2fda164863a06a92d3a910b96eef927269aeb730.html">Names and faces in the news</a></td><td><a href="http://www.cs.utexas.edu/~grauman/courses/spring2007/395T/papers/berg_names_and_faces.pdf">[pdf]</a></td><td>Proceedings of the 2004 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2004. CVPR 2004.</td><td></td><td></td><td></td><td></td><td>36%</td><td>290</td><td>104</td><td>186</td><td>19</td><td>197</td><td>45</td></tr><tr><td>31b05f65405534a696a847dd19c621b7b8588263</td><td></td><td><a href="papers/31b05f65405534a696a847dd19c621b7b8588263.html">UMDFaces: An annotated face dataset for training deep networks</a></td><td><a href="http://arxiv.org/abs/1611.01484">[pdf]</a></td><td>2017 IEEE International Joint Conference on Biometrics (IJCB)</td><td></td><td></td><td></td><td></td><td>48%</td><td>29</td><td>14</td><td>15</td><td>4</td><td>20</td><td>6</td></tr><tr><td>356b431d4f7a2a0a38cf971c84568207dcdbf189</td><td></td><td><a href="papers/356b431d4f7a2a0a38cf971c84568207dcdbf189.html">Recognize complex events from static images by fusing deep channels</a></td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Xiong_Recognize_Complex_Events_2015_CVPR_paper.pdf">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>Shenzhen Institutes of Advanced Technology</td><td>22.59805605</td><td>113.98533784</td><td>53%</td><td>43</td><td>23</td><td>20</td><td>1</td><td>30</td><td>12</td></tr><tr><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td><td></td><td><a href="papers/370b5757a5379b15e30d619e4d3fb9e8e13f3256.html">Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments</a></td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>45%</td><td>999</td><td>451</td><td>545</td><td>62</td><td>613</td><td>293</td></tr><tr><td>37d6f0eb074d207b53885bd2eb78ccc8a04be597</td><td></td><td><a href="papers/37d6f0eb074d207b53885bd2eb78ccc8a04be597.html">Can facial cosmetics affect the matching accuracy of face recognition systems?</a></td><td><a href="http://www.cse.msu.edu/~climer/DantchevaChenRossFaceCosmetics_BTAS2012.pdf">[pdf]</a></td><td>2012 IEEE Fifth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td></td><td></td><td></td><td></td><td>41%</td><td>46</td><td>19</td><td>27</td><td>0</td><td>17</td><td>22</td></tr><tr><td>3c8db0d86a6aa51b64ec09c7d25a721adcdfb7a3</td><td></td><td><a href="papers/3c8db0d86a6aa51b64ec09c7d25a721adcdfb7a3.html">Ordered trajectories for human action recognition with large number of classes</a></td><td><a href="https://doi.org/10.1016/j.imavis.2015.06.009">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td>100%</td><td>1</td><td>1</td><td>0</td><td>0</td><td>0</td><td>1</td></tr><tr><td>447d8893a4bdc29fa1214e53499ffe67b28a6db5</td><td></td><td><a href="papers/447d8893a4bdc29fa1214e53499ffe67b28a6db5.html">Electronic Transport in Quantum Confined Systems</a></td><td><a href="http://pdfs.semanticscholar.org/447d/8893a4bdc29fa1214e53499ffe67b28a6db5.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>1</td><td>0</td><td>1</td><td>0</td><td>1</td><td>0</td></tr><tr><td>45c31cde87258414f33412b3b12fc5bec7cb3ba9</td><td>JAFFE</td><td><a href="papers/45c31cde87258414f33412b3b12fc5bec7cb3ba9.html">Coding Facial Expressions with Gabor Wavelets</a></td><td><a href="http://pdfs.semanticscholar.org/45c3/1cde87258414f33412b3b12fc5bec7cb3ba9.pdf">[pdf]</a></td><td></td><td>edu</td><td>Kyushu University</td><td>33.59914655</td><td>130.22359848</td><td>32%</td><td>804</td><td>254</td><td>550</td><td>45</td><td>383</td><td>263</td></tr><tr><td>45e616093a92e5f1e61a7c6037d5f637aa8964af</td><td></td><td><a href="papers/45e616093a92e5f1e61a7c6037d5f637aa8964af.html">Fine-grained evaluation on face detection in the wild</a></td><td><a href="http://www.cs.toronto.edu/~byang/papers/malf_fg15.pdf">[pdf]</a></td><td>2015 11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)</td><td>edu</td><td>Chinese Academy of Sciences</td><td>40.00447950</td><td>116.37023800</td><td>59%</td><td>17</td><td>10</td><td>7</td><td>0</td><td>13</td><td>4</td></tr><tr><td>4c170a0dcc8de75587dae21ca508dab2f9343974</td><td>FaceTracer</td><td><a href="papers/4c170a0dcc8de75587dae21ca508dab2f9343974.html">FaceTracer: A Search Engine for Large Collections of Images with Faces</a></td><td><a href="http://pdfs.semanticscholar.org/73a8/1d311eedac8dea3ca24dc15b6990fa4a725e.pdf">[pdf]</a></td><td></td><td>edu</td><td>Columbia University</td><td>40.84198360</td><td>-73.94368971</td><td>42%</td><td>212</td><td>89</td><td>123</td><td>13</td><td>137</td><td>52</td></tr><tr><td>4d423acc78273b75134e2afd1777ba6d3a398973</td><td></td><td><a href="papers/4d423acc78273b75134e2afd1777ba6d3a398973.html">International Conference on Automatic Face and Gesture Recognition The CMU Pose , Illumination , and Expression ( PIE ) Database</a></td><td><a href="http://pdfs.semanticscholar.org/4d42/3acc78273b75134e2afd1777ba6d3a398973.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>42%</td><td>723</td><td>301</td><td>422</td><td>55</td><td>392</td><td>237</td></tr><tr><td>4d9a02d080636e9666c4d1cc438b9893391ec6c7</td><td></td><td><a href="papers/4d9a02d080636e9666c4d1cc438b9893391ec6c7.html">The Extended Cohn-Kanade Dataset (CK+): A complete dataset for action unit and emotion-specified expression</a></td><td><a href="http://www.iainm.com/iainm/Publications_files/2010_The%20Extended.pdf">[pdf]</a></td><td>2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition - Workshops</td><td></td><td></td><td></td><td></td><td>37%</td><td>916</td><td>337</td><td>577</td><td>51</td><td>420</td><td>361</td></tr><tr><td>51eba481dac6b229a7490f650dff7b17ce05df73</td><td>imSitu</td><td><a href="papers/51eba481dac6b229a7490f650dff7b17ce05df73.html">Situation Recognition: Visual Semantic Role Labeling for Image Understanding</a></td><td><a href="http://grail.cs.washington.edu/wp-content/uploads/2016/09/yatskar2016srv.pdf">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>University of Washington</td><td>47.65432380</td><td>-122.30800894</td><td>61%</td><td>44</td><td>27</td><td>17</td><td>2</td><td>41</td><td>2</td></tr><tr><td>52d7eb0fbc3522434c13cc247549f74bb9609c5d</td><td>WIDER FACE</td><td><a href="papers/52d7eb0fbc3522434c13cc247549f74bb9609c5d.html">WIDER FACE: A Face Detection Benchmark</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1511.06523.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>50%</td><td>129</td><td>65</td><td>64</td><td>11</td><td>89</td><td>34</td></tr><tr><td>560e0e58d0059259ddf86fcec1fa7975dee6a868</td><td></td><td><a href="papers/560e0e58d0059259ddf86fcec1fa7975dee6a868.html">Face recognition in unconstrained videos with matched background similarity</a></td><td><a href="http://www.cs.tau.ac.il/~wolf/papers/lvfw.pdf">[pdf]</a></td><td>CVPR 2011</td><td>edu</td><td>Open University of Israel</td><td>32.77824165</td><td>34.99565673</td><td>49%</td><td>457</td><td>225</td><td>230</td><td>27</td><td>273</td><td>149</td></tr><tr><td>56ae6d94fc6097ec4ca861f0daa87941d1c10b70</td><td>CMDP</td><td><a href="papers/56ae6d94fc6097ec4ca861f0daa87941d1c10b70.html">Distance Estimation of an Unknown Person from a Portrait</a></td><td><a href="http://pdfs.semanticscholar.org/56ae/6d94fc6097ec4ca861f0daa87941d1c10b70.pdf">[pdf]</a></td><td></td><td>edu</td><td>California Institute of Technology</td><td>34.13710185</td><td>-118.12527487</td><td>25%</td><td>8</td><td>2</td><td>6</td><td>0</td><td>5</td><td>1</td></tr><tr><td>636b8ffc09b1b23ff714ac8350bb35635e49fa3c</td><td></td><td><a href="papers/636b8ffc09b1b23ff714ac8350bb35635e49fa3c.html">Pruning training sets for learning of object categories</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1467308', 'linkType': 'ieee'}">[pdf]</a></td><td>2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)</td><td></td><td></td><td></td><td></td><td>58%</td><td>60</td><td>35</td><td>25</td><td>4</td><td>41</td><td>12</td></tr><tr><td>670637d0303a863c1548d5b19f705860a23e285c</td><td></td><td><a href="papers/670637d0303a863c1548d5b19f705860a23e285c.html">Face swapping: automatically replacing faces in photographs</a></td><td><a href="https://classes.cs.uoregon.edu/16F/cis607photo/faces.pdf">[pdf]</a></td><td>ACM Trans. Graph.</td><td>edu</td><td>Columbia University</td><td>40.84198360</td><td>-73.94368971</td><td>50%</td><td>2</td><td>1</td><td>1</td><td>0</td><td>1</td><td>1</td></tr><tr><td>6dcf418c778f528b5792104760f1fbfe90c6dd6a</td><td></td><td><a href="papers/6dcf418c778f528b5792104760f1fbfe90c6dd6a.html">AgeDB: The First Manually Collected, In-the-Wild Age Database</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014984', 'linkType': 'ieee'}">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td></td><td></td><td></td><td></td><td>70%</td><td>10</td><td>7</td><td>2</td><td>1</td><td>9</td><td>0</td></tr><tr><td>759a3b3821d9f0e08e0b0a62c8b693230afc3f8d</td><td></td><td><a href="papers/759a3b3821d9f0e08e0b0a62c8b693230afc3f8d.html">Attribute and simile classifiers for face verification</a></td><td><a href="http://homes.cs.washington.edu/~neeraj/projects/faceverification/base/papers/nk_iccv2009_attrs.pdf">[pdf]</a></td><td>2009 IEEE 12th International Conference on Computer Vision</td><td>edu</td><td>Columbia University</td><td>40.84198360</td><td>-73.94368971</td><td>48%</td><td>862</td><td>416</td><td>446</td><td>46</td><td>556</td><td>232</td></tr><tr><td>75da1df4ed319926c544eefe17ec8d720feef8c0</td><td>FDDB</td><td><a href="papers/75da1df4ed319926c544eefe17ec8d720feef8c0.html">FDDB: A Benchmark for Face Detection in Unconstrained Settings</a></td><td><a href="http://pdfs.semanticscholar.org/75da/1df4ed319926c544eefe17ec8d720feef8c0.pdf">[pdf]</a></td><td></td><td>edu</td><td>University of Massachusetts</td><td>42.38897850</td><td>-72.52869870</td><td>0%</td><td>1</td><td>0</td><td>1</td><td>0</td><td>0</td><td>0</td></tr><tr><td>8355d095d3534ef511a9af68a3b2893339e3f96b</td><td></td><td><a href="papers/8355d095d3534ef511a9af68a3b2893339e3f96b.html">DEX: Deep EXpectation of Apparent Age from a Single Image</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406390', 'linkType': 'ieee'}">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision Workshop (ICCVW)</td><td></td><td></td><td></td><td></td><td>42%</td><td>102</td><td>43</td><td>56</td><td>5</td><td>61</td><td>28</td></tr><tr><td>853bd61bc48a431b9b1c7cab10c603830c488e39</td><td>CAISA Webface</td><td><a href="papers/853bd61bc48a431b9b1c7cab10c603830c488e39.html">Learning Face Representation from Scratch</a></td><td><a href="http://pdfs.semanticscholar.org/b8a2/0ed7e74325da76d7183d1ab77b082a92b447.pdf">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>53%</td><td>401</td><td>213</td><td>187</td><td>28</td><td>261</td><td>125</td></tr><tr><td>9055b155cbabdce3b98e16e5ac9c0edf00f9552f</td><td></td><td><a href="papers/9055b155cbabdce3b98e16e5ac9c0edf00f9552f.html">MORPH: a longitudinal image database of normal adult age-progression</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/FGR.2006.78">[pdf]</a></td><td>7th International Conference on Automatic Face and Gesture Recognition (FGR06)</td><td></td><td></td><td></td><td></td><td>42%</td><td>406</td><td>172</td><td>231</td><td>22</td><td>208</td><td>161</td></tr><tr><td>95f12d27c3b4914e0668a268360948bce92f7db3</td><td>Helen</td><td><a href="papers/95f12d27c3b4914e0668a268360948bce92f7db3.html">Interactive Facial Feature Localization</a></td><td><a href="http://pdfs.semanticscholar.org/95f1/2d27c3b4914e0668a268360948bce92f7db3.pdf">[pdf]</a></td><td></td><td>edu</td><td>University of Illinois, Urbana-Champaign</td><td>40.11116745</td><td>-88.22587665</td><td>47%</td><td>323</td><td>153</td><td>169</td><td>23</td><td>198</td><td>105</td></tr><tr><td>96e0cfcd81cdeb8282e29ef9ec9962b125f379b0</td><td>MegaFace 2</td><td><a href="papers/96e0cfcd81cdeb8282e29ef9ec9962b125f379b0.html">The MegaFace Benchmark: 1 Million Faces for Recognition at Scale</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.527">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>University of Washington</td><td>47.65432380</td><td>-122.30800894</td><td>49%</td><td>114</td><td>56</td><td>58</td><td>10</td><td>88</td><td>22</td></tr><tr><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td><td></td><td><a href="papers/a74251efa970b92925b89eeef50a5e37d9281ad0.html">Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</a></td><td><a href="http://lrs.icg.tugraz.at/pubs/koestinger_befit_11.pdf">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td>51%</td><td>278</td><td>143</td><td>135</td><td>30</td><td>195</td><td>65</td></tr><tr><td>b5f2846a506fc417e7da43f6a7679146d99c5e96</td><td>UCF101</td><td><a href="papers/b5f2846a506fc417e7da43f6a7679146d99c5e96.html">UCF101: A Dataset of 101 Human Actions Classes From Videos in The Wild</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1212.0402.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td>edu</td><td>University of Central Florida</td><td>28.59899755</td><td>-81.19712501</td><td>51%</td><td>934</td><td>475</td><td>456</td><td>65</td><td>658</td><td>230</td></tr><tr><td>bd26dabab576adb6af30484183c9c9c8379bf2e0</td><td>SCUT-FBP</td><td><a href="papers/bd26dabab576adb6af30484183c9c9c8379bf2e0.html">SCUT-FBP: A Benchmark Dataset for Facial Beauty Perception</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1511.02459.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2015 IEEE International Conference on Systems, Man, and Cybernetics</td><td>edu</td><td>South China University of Technology</td><td>23.05020420</td><td>113.39880323</td><td>29%</td><td>14</td><td>4</td><td>10</td><td>3</td><td>5</td><td>7</td></tr><tr><td>c34532fe6bfbd1e6df477c9ffdbb043b77e7804d</td><td></td><td><a href="papers/c34532fe6bfbd1e6df477c9ffdbb043b77e7804d.html">A 3D Morphable Eye Region Model for Gaze Estimation</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/0d43/3b9435b874a1eea6d7999e86986c910fa285.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Carnegie Mellon University</td><td>37.41021930</td><td>-122.05965487</td><td>62%</td><td>21</td><td>13</td><td>8</td><td>0</td><td>17</td><td>4</td></tr><tr><td>c6526dd3060d63a6c90e8b7ff340383c4e0e0dd8</td><td></td><td><a href="papers/c6526dd3060d63a6c90e8b7ff340383c4e0e0dd8.html">Anxiety promotes memory for mood-congruent faces but does not alter loss aversion.</a></td><td><a href="http://pdfs.semanticscholar.org/c652/6dd3060d63a6c90e8b7ff340383c4e0e0dd8.pdf">[pdf]</a></td><td>Scientific reports</td><td>edu</td><td>University College London</td><td>51.52316070</td><td>-0.12820370</td><td>25%</td><td>4</td><td>1</td><td>3</td><td>0</td><td>2</td><td>2</td></tr><tr><td>ca3e88d87e1344d076c964ea89d91a75c417f5ee</td><td></td><td><a href="papers/ca3e88d87e1344d076c964ea89d91a75c417f5ee.html">Indian Movie Face Database: A benchmark for face recognition under wide variations</a></td><td><span class="gray">[pdf]</a></td><td>2013 Fourth National Conference on Computer Vision, Pattern Recognition, Image Processing and Graphics (NCVPRIPG)</td><td></td><td></td><td></td><td></td><td>64%</td><td>14</td><td>9</td><td>5</td><td>0</td><td>10</td><td>4</td></tr><tr><td>e4754afaa15b1b53e70743880484b8d0736990ff</td><td></td><td><a href="papers/e4754afaa15b1b53e70743880484b8d0736990ff.html">300 Faces In-The-Wild Challenge: database and results</a></td><td><a href="{'url': 'http://doi.org/10.1016/j.imavis.2016.01.002', 'linkType': 'doi'}">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td>42%</td><td>103</td><td>43</td><td>60</td><td>10</td><td>65</td><td>30</td></tr><tr><td>fcc6fe6007c322641796cb8792718641856a22a7</td><td></td><td><a href="papers/fcc6fe6007c322641796cb8792718641856a22a7.html">Automatic facial makeup detection with application in face recognition</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6612994', 'linkType': 'ieee'}">[pdf]</a></td><td>2013 International Conference on Biometrics (ICB)</td><td>edu</td><td>West Virginia University</td><td>39.65404635</td><td>-79.96475355</td><td>63%</td><td>43</td><td>27</td><td>16</td><td>0</td><td>16</td><td>21</td></tr></table></body></html>
\ No newline at end of file diff --git a/reports/reports.css b/reports/reports.css index b3441f81..1fb2afaf 100644 --- a/reports/reports.css +++ b/reports/reports.css @@ -7,4 +7,5 @@ td { #mapid { width: 100vw; height: 30vw; -}
\ No newline at end of file +} +.gray { color: #888; }
\ No newline at end of file diff --git a/s2-citation-report.py b/s2-citation-report.py index 58b7ed8f..26e148fe 100644 --- a/s2-citation-report.py +++ b/s2-citation-report.py @@ -10,16 +10,83 @@ from util import * @click.command() def s2_citation_report(): addresses = AddressBook() + megapixels = load_megapixels_queries() + papers = [] for fn in glob.iglob('datasets/s2/papers/**/*.json', recursive=True): - process_paper(fn, addresses) + paper_data = process_paper(fn, addresses, megapixels) + papers.append(paper_data) + write_papers_report('reports/report_index.html', 'All Papers', papers, 'paperId') + write_papers_report('reports/report_coverage.html', 'Coverage', papers, 'citations_geocoded', reverse=True) + +def write_papers_report(fn, title, papers, key, reverse=False): + sorted_papers = [] + for paper in sorted(papers, key=lambda x: x[key], reverse=reverse): + sorted_papers.append([ + paper['paperId'], + paper['key'], + LinkLine(paper['report_link'], paper['title']), + LinkLine(paper['pdf_link'], '[pdf]'), + paper['journal'], + paper['address_type'], + paper['address'], + paper['lat'], + paper['lng'], + str(percent(paper['citation_count'], paper['citations_geocoded'])) + '%', + paper['citation_count'], + paper['citations_geocoded'], + paper['citations_unknown'], + paper['citations_empty'], + paper['citations_pdf'], + paper['citations_doi'], + ]) + sorted_paper_keys = [ + 'Paper ID', + 'Megapixels Key', + 'Report Link', + 'PDF Link', + 'Journal', + 'Type', + 'Address', + 'Lat', + 'Lng', + 'Coverage', + 'Total Citations', + 'Geocoded Citations', + 'Unknown Citations', + 'Empty Citations', + 'With PDF', + 'With DOI', + ] + write_report(fn, title=title, keys=sorted_paper_keys, rows=sorted_papers) + +def process_paper(fn, addresses, megapixels): + res = { + 'paperId': '', + 'key': '', + 'title': '', + 'journal': '', + 'address': '', + 'address_type': '', + 'lat': '', + 'lng': '', + 'pdf_link': '', + 'report_link': '', + 'citation_count': 0, + 'citations_geocoded': 0, + 'citations_unknown': 0, + 'citations_empty': 0, + 'citations_pdf': 0, + 'citations_doi': 0, + } -def process_paper(fn, addresses): - res = {} - address_count = 0 - geocode_count = 0 geocoded_citations = [] unknown_citations = [] display_geocoded_citations = [] + empty_citations = [] + pdf_count = 0 + doi_count = 0 + address_count = 0 + with open(fn, 'r') as f: data = json.load(f) print('>> {}'.format(data['paperId'])) @@ -27,14 +94,42 @@ def process_paper(fn, addresses): if paper.data is None: print("Paper missing! {}".format(data['paperId'])) return + res['paperId'] = paper.paper_id res['title'] = paper.title res['journal'] = paper.journal - res['authors'] = paper.authors - res['citations'] = [] + res['report_link'] = 'papers/{}.html'.format(paper.paper_id) + res['pdf_link'] = paper.pdf_link + # res['authors'] = ', '.join(paper.authors) + # res['citations'] = [] + + if res['title'] in megapixels: + res['key'] = megapixels[res['title']]['Database Name'] + + paper_institutions = load_institutions(paper.paper_id) + paper_address = None + for inst in sorted(paper_institutions, key=operator.itemgetter(1)): + # print(inst[1]) + institution = inst[1] + if paper_address is None: + paper_address = addresses.find(institution) + + if paper_address: + print(paper_address) + res['address'] = paper_address[0] + res['lat'] = paper_address[3] + res['lng'] = paper_address[4] + res['address_type'] = paper_address[5] + for cite in data['citations']: citationId = cite['paperId'] citation = load_paper(citationId) + has_pdf = os.path.exists(file_path('pdf', citationId, 'paper.txt')) + has_doi = os.path.exists(file_path('doi', citationId, 'paper.doi')) + if has_pdf: + pdf_count += 1 + if has_doi: + doi_count += 1 if citation.data is None: print("Citation missing! {}".format(cite['paperId'])) continue @@ -50,12 +145,11 @@ def process_paper(fn, addresses): next_address = addresses.find(institution) if next_address: address = next_address - geocode_count += 1 geocoded_institutions.append(institution) else: unknown_institutions.append(institution) if not address: - if os.path.exists(file_path('pdf', citationId, 'paper.txt')): + if has_pdf: headings, found_abstract = read_headings(file_path('pdf', citationId, 'paper.txt'), citation) heading_string = '\n'.join(headings[0:20]) found_addresses = [] @@ -66,18 +160,22 @@ def process_paper(fn, addresses): next_address = addresses.find(l) if next_address: address = next_address - geocode_count += 1 geocoded_institutions.append(heading) else: unknown_institutions.append(heading) + else: + empty_citations.append([ + citationId, + citation.title, + ]) - res['citations'].append({ - 'title': citation.title, - 'journal': citation.journal, - 'authors': citation.authors, - 'institutions': [inst[1] for inst in institutions], - 'geocoded': geocoded_institutions, - }) + # res['citations'].append({ + # 'title': citation.title, + # 'journal': citation.journal, + # 'authors': citation.authors, + # 'institutions': [inst[1] for inst in institutions], + # 'geocoded': geocoded_institutions, + # }) if address: geocoded_citations.append([ citation.title, @@ -88,20 +186,16 @@ def process_paper(fn, addresses): ] + address) else: unknown_citations.append([ + # citationId, citation.title, '<br>'.join(unknown_institutions), ]) - - paper_institutions = load_institutions(paper.paper_id) - paper_address = None - for inst in sorted(paper_institutions, key=operator.itemgetter(1)): - # print(inst[1]) - address_count += 1 - institution = inst[1] - paper_address = addresses.find(institution) - - if paper_address: - print(paper_address) + res['citation_count'] = len(data['citations']) + res['citations_geocoded'] = len(geocoded_citations) + res['citations_unknown'] = len(unknown_citations) + res['citations_empty'] = len(empty_citations) + res['citations_pdf'] = pdf_count + res['citations_doi'] = doi_count total_citations = len(geocoded_citations) + len(unknown_citations) os.makedirs('reports/papers/', exist_ok=True) @@ -122,9 +216,10 @@ def process_paper(fn, addresses): f.write('<li>Journal: {}</li>'.format(paper.journal)) if paper_address: f.write('<li>Research institution: {}</li>'.format(paper_address[0])) - f.write('<li>Address: {}</li>'.format(paper_address[3])) - f.write('<li>{}</li>'.format(paper.year)) - f.write('<li>{} / {} citations were located ({} %).</li>'.format(len(geocoded_citations), total_citations, math.floor(len(geocoded_citations) / total_citations * 100))) + f.write('<li>Address: {}</li>'.format(paper_address[2])) + f.write('<li>Lat/Lng: {}, {}</li>'.format(paper_address[3], paper_address[4])) + f.write('<li>Year: {}</li>'.format(paper.year)) + f.write('<li>Coverage: {} / {} citations were located ({} %).</li>'.format(len(geocoded_citations), total_citations, math.floor(len(geocoded_citations) / total_citations * 100))) f.write('</ul>') f.write('<h3>{}</h3>'.format('Geocoded Citations')) write_table(f, keys=None, rows=sorted(display_geocoded_citations, key=operator.itemgetter(0))) @@ -145,6 +240,16 @@ def process_paper(fn, addresses): f.write("</html>") return res +def load_megapixels_queries(): + keys, rows = read_csv('datasets/citations-2018310.csv') + lookup = {} + for row in rows: + rec = {} + for index, key in enumerate(keys): + rec[key] = row[index] + lookup[rec['Title'].strip()] = rec + return lookup + def load_institutions(paperId): if os.path.exists(file_path('pdf', paperId, 'institutions.json')): return read_json(file_path('pdf', paperId, 'institutions.json'))['institutions'] @@ -87,6 +87,9 @@ def write_report(fn, title=None, keys=None, rows=[]): f.write("</html>") print("{} {}".format(fn, count)) +def percent(m, n): + return round(n / m * 100) + class NameLine(object): def __init__(self, s): self.s = s.strip() @@ -102,9 +105,12 @@ class BoldLine(object): class LinkLine(object): def __init__(self, href, txt): self.href = href - self.txt = txt.strip + self.txt = txt.strip() def __str__(self): - return '<a href="{}">{}</a>'.format(self.href, self.txt) + if self.href: + return '<a href="{}">{}</a>'.format(self.href, self.txt) + else: + return '<span class="gray">{}</a>'.format(self.txt) def write_table(f, keys, rows): count = 0 @@ -145,6 +151,13 @@ class DbPaper(object): @property def authors(self): return [ (author['ids'][0] if len(author['ids']) else '', author['name']) for author in self.data['authors'] ] + @property + def pdf_link(self): + if self.data['s2PdfUrl']: + return self.data['s2PdfUrl'] + if len(self.data['pdfUrls']): + return self.data['pdfUrls'][0] + return None def record(self): return [ self.paper_id, self.title, self.journal, self.year ] @@ -173,6 +186,9 @@ class RawPaper(object): @property def authors(self): return [ (author[0]['ids'][0] if len(author[0]['ids']) else '', author[0]['name']) for author in self.data['authors'] ] + @property + def pdf_link(self): + return self.data['primaryPaperLink'] or None def record(self): return [ self.paper_id, self.title, self.journal, self.year ] |
