summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--client/index.js55
-rw-r--r--client/tables.js43
-rw-r--r--megapixels/app/site/parser.py2
-rw-r--r--scraper/reports/report_coverage.html2
-rw-r--r--scraper/reports/report_index.html2
-rw-r--r--scraper/s2-citation-report.py67
-rw-r--r--scraper/s2-search.py20
-rw-r--r--scraper/util.py2
-rw-r--r--site/datasets/citations/22f656d0f8426c84a33a267977f511f127bfd7f3.json2
-rw-r--r--site/datasets/citations/27a2fad58dd8727e280f97036e0d2bc55ef5424c.json2
-rw-r--r--site/datasets/citations/37d6f0eb074d207b53885bd2eb78ccc8a04be597.json2
-rw-r--r--site/datasets/citations/4b1d23d17476fcf78f4cbadf69fb130b1aa627c0.json2
-rw-r--r--site/datasets/citations/datasets.csv241
-rw-r--r--site/public/index.html2
-rw-r--r--site/public/test/citations/index.html1
-rw-r--r--site/public/test/csv/index.html2
-rw-r--r--site/public/test/datasets/index.html3
-rw-r--r--site/public/test/face_search/index.html1
-rw-r--r--site/public/test/gallery/index.html1
-rw-r--r--site/public/test/map/index.html4
-rw-r--r--site/public/test/name_search/index.html4
-rw-r--r--site/public/test/style/index.html1
22 files changed, 381 insertions, 80 deletions
diff --git a/client/index.js b/client/index.js
index a8783522..27110a18 100644
--- a/client/index.js
+++ b/client/index.js
@@ -2,13 +2,11 @@ import React from 'react'
import ReactDOM from 'react-dom'
import { AppContainer } from 'react-hot-loader'
import { Provider } from 'react-redux'
-import Tabulator from 'tabulator-tables'
-import csv from 'parse-csv'
-// import parse from 'csv-parse'
import { toArray } from './util'
import Applet from './applet'
import { store } from './store'
+import appendTable from './tables'
function appendReactApplet(el, payload) {
ReactDOM.render(
@@ -20,55 +18,6 @@ function appendReactApplet(el, payload) {
)
}
-function appendTabulator(el, payload) {
- const table = new Tabulator(el, {
- height: '311px',
- layout: 'fitDataFill',
- placeholder: 'No Data Set',
- columns: payload.fields.split(', ').map(field => {
- switch (field) {
- default:
- return { title: field, field: field.toLowerCase(), sorter: 'string' }
- }
- }),
- // {title:'Name', field:'name', sorter:'string', width:200},
- // {title:'Progress', field:'progress', sorter:'number', formatter:'progress'},
- // {title:'Gender', field:'gender', sorter:"string"},
- // {title:"Rating", field:"rating", formatter:"star", align:"center", width:100},
- // {title:"Favourite Color", field:"col", sorter:"string", sortable:false},
- // {title:"Date Of Birth", field:"dob", sorter:"date", align:"center"},
- // {title:"Driver", field:"car", align:"center", formatter:"tickCross", sorter:"boolean"},
- })
- let path = payload.opt
- // let columns = payload.fields.split(',').map(s => s.trim())
- // console.log(path, columns)
- fetch('https://nyc3.digitaloceanspaces.com/megapixels/v1/datasets/lfw/assets/lfw_names_gender_kg_min.csv', { mode: 'cors' })
- .then(r => r.text())
- .then(text => {
- const data = csv.toJSON(text, { headers: { included: true } })
- console.log(data)
- table.setData(data)
- // const parser = parse()
- // console.log(parser)
- // parser.on('readable', () => {
- // let record
- // let output = []
- // do {
- // record = parser.read()
- // if (record) output.push(record)
- // } while (record)
- // output.shift()
- // table.setData(output)
- // })
- // parser.on('error', err => {
- // console.error(err.message)
- // })
- // parser.write(data)
- // parser.end()
- // table.setData(path, null, xs{ method: 'get', mode: 'cors', cache: 'no-cache' })
- })
-}
-
function appendApplets() {
toArray(document.querySelectorAll('.applet')).forEach(el => {
console.log(el.dataset.payload)
@@ -81,7 +30,7 @@ function appendApplets() {
console.log(payload)
switch (payload.command) {
case 'load file':
- appendTabulator(el, payload)
+ appendTable(el, payload)
break
default:
appendReactApplet(el, payload)
diff --git a/client/tables.js b/client/tables.js
new file mode 100644
index 00000000..6b00bbde
--- /dev/null
+++ b/client/tables.js
@@ -0,0 +1,43 @@
+import Tabulator from 'tabulator-tables'
+import csv from 'parse-csv'
+
+const datasetColumns = [
+ { title: 'Title', field: 'title', sorter: 'string' },
+ { title: 'Images', field: 'images', sorter: 'number' },
+ { title: 'People', field: 'people', sorter: 'number' },
+ { title: 'Year', field: 'year', sorter: 'number' },
+ { title: 'Citations', field: 'citations', sorter: 'number' },
+ { title: 'Influenced', field: 'influenced', sorter: 'number' },
+ // { title: 'Origin', field: 'origin', sorter: 'string' },
+]
+
+function getColumns(payload) {
+ if (payload.opt.match('datasets.csv')) {
+ return datasetColumns
+ }
+ return (payload.fields || '').split(', ').map(field => {
+ switch (field) {
+ default:
+ return { title: field, field: field.toLowerCase(), sorter: 'string' }
+ }
+ })
+}
+
+export default function append(el, payload) {
+ const columns = getColumns(payload)
+ const table = new Tabulator(el, {
+ height: '311px',
+ layout: 'fitDataFill',
+ placeholder: 'No Data Set',
+ columns,
+ })
+ // let path = payload.opt
+ // console.log(path, columns)
+ fetch(payload.opt, { mode: 'cors' })
+ .then(r => r.text())
+ .then(text => {
+ const data = csv.toJSON(text, { headers: { included: true } })
+ console.log(data)
+ table.setData(data)
+ })
+}
diff --git a/megapixels/app/site/parser.py b/megapixels/app/site/parser.py
index 44186be6..de3ce1ff 100644
--- a/megapixels/app/site/parser.py
+++ b/megapixels/app/site/parser.py
@@ -71,7 +71,7 @@ def format_applet(section, s3_path):
if opt:
applet['opt'] = opt
if command == 'load file':
- if opt[0] != '/':
+ if opt[0:4] != 'http':
applet['opt'] = s3_path + opt
if len(payload) > 1:
applet['fields'] = payload[1]
diff --git a/scraper/reports/report_coverage.html b/scraper/reports/report_coverage.html
index 41716aaa..0f6d7c55 100644
--- a/scraper/reports/report_coverage.html
+++ b/scraper/reports/report_coverage.html
@@ -1 +1 @@
-<!doctype html><html><head><meta charset='utf-8'><title>Coverage</title><link rel='stylesheet' href='reports.css'></head><body><h2>Coverage</h2><table border='1' cellpadding='3' cellspacing='3'><th>Paper ID</th><th>Megapixels Key</th><th>Megapixels Name</th><th>Report Link</th><th>PDF Link</th><th>Journal</th><th>Type</th><th>Address</th><th>Lat</th><th>Lng</th><th>Coverage</th><th>Total Citations</th><th>Geocoded Citations</th><th>Unknown Citations</th><th>Empty Citations</th><th>With PDF</th><th>With DOI</th><tr><td>b5f2846a506fc417e7da43f6a7679146d99c5e96</td><td>ucf_101</td><td>UCF101</td><td><a href="papers/b5f2846a506fc417e7da43f6a7679146d99c5e96.html">UCF101: A Dataset of 101 Human Actions Classes From Videos in The Wild</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1212.0402.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td>edu</td><td>University of Central Florida</td><td>28.59899755</td><td>-81.19712501</td><td>54%</td><td>999</td><td>535</td><td>464</td><td>73</td><td>708</td><td>212</td></tr><tr><td>0e986f51fe45b00633de9fd0c94d082d2be51406</td><td>afw</td><td>AFW</td><td><a href="papers/0e986f51fe45b00633de9fd0c94d082d2be51406.html">Face detection, pose estimation, and landmark localization in the wild</a></td><td><a href="http://vision.ics.uci.edu/papers/ZhuR_CVPR_2012/ZhuR_CVPR_2012.pdf">[pdf]</a></td><td>2012 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>University of California, Irvine</td><td>33.64319010</td><td>-117.84016494</td><td>52%</td><td>999</td><td>521</td><td>478</td><td>59</td><td>607</td><td>273</td></tr><tr><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td><td>3dddb_unconstrained</td><td>3D Dynamic</td><td><a href="papers/370b5757a5379b15e30d619e4d3fb9e8e13f3256.html">Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments</a></td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>999</td><td>472</td><td>526</td><td>71</td><td>619</td><td>260</td></tr><tr><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td><td>ar_facedb</td><td>AR Face</td><td><a href="papers/370b5757a5379b15e30d619e4d3fb9e8e13f3256.html">Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments</a></td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>999</td><td>472</td><td>526</td><td>71</td><td>619</td><td>260</td></tr><tr><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td><td>lfw</td><td>LFW</td><td><a href="papers/370b5757a5379b15e30d619e4d3fb9e8e13f3256.html">Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments</a></td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>999</td><td>472</td><td>526</td><td>71</td><td>619</td><td>260</td></tr><tr><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td><td>m2vtsdb_extended</td><td>xm2vtsdb</td><td><a href="papers/370b5757a5379b15e30d619e4d3fb9e8e13f3256.html">Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments</a></td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>999</td><td>472</td><td>526</td><td>71</td><td>619</td><td>260</td></tr><tr><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td><td>put_face</td><td>Put Face</td><td><a href="papers/370b5757a5379b15e30d619e4d3fb9e8e13f3256.html">Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments</a></td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>999</td><td>472</td><td>526</td><td>71</td><td>619</td><td>260</td></tr><tr><td>759a3b3821d9f0e08e0b0a62c8b693230afc3f8d</td><td>pubfig</td><td>PubFig</td><td><a href="papers/759a3b3821d9f0e08e0b0a62c8b693230afc3f8d.html">Attribute and simile classifiers for face verification</a></td><td><a href="http://homes.cs.washington.edu/~neeraj/projects/faceverification/base/papers/nk_iccv2009_attrs.pdf">[pdf]</a></td><td>2009 IEEE 12th International Conference on Computer Vision</td><td>edu</td><td>Columbia University</td><td>40.84198360</td><td>-73.94368971</td><td>51%</td><td>894</td><td>454</td><td>440</td><td>55</td><td>587</td><td>222</td></tr><tr><td>18c72175ddbb7d5956d180b65a96005c100f6014</td><td>yale_faces</td><td>YaleFaces</td><td><a href="papers/18c72175ddbb7d5956d180b65a96005c100f6014.html">From Few to Many: Illumination Cone Models for Face Recognition under Variable Lighting and Pose</a></td><td><a href="http://pdfs.semanticscholar.org/97bb/c2b439a79d4dc0dc7199d71ed96ad5e3fd0e.pdf">[pdf]</a></td><td>IEEE Trans. Pattern Anal. Mach. Intell.</td><td></td><td></td><td></td><td></td><td>42%</td><td>999</td><td>422</td><td>577</td><td>80</td><td>538</td><td>295</td></tr><tr><td>4d9a02d080636e9666c4d1cc438b9893391ec6c7</td><td>cohn_kanade_plus</td><td>CK+</td><td><a href="papers/4d9a02d080636e9666c4d1cc438b9893391ec6c7.html">The Extended Cohn-Kanade Dataset (CK+): A complete dataset for action unit and emotion-specified expression</a></td><td><a href="http://www.iainm.com/iainm/Publications_files/2010_The%20Extended.pdf">[pdf]</a></td><td>2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition - Workshops</td><td></td><td></td><td></td><td></td><td>41%</td><td>975</td><td>403</td><td>572</td><td>65</td><td>460</td><td>345</td></tr><tr><td>2e384f057211426ac5922f1b33d2aa8df5d51f57</td><td>a_pascal_yahoo</td><td>aPascal</td><td><a href="papers/2e384f057211426ac5922f1b33d2aa8df5d51f57.html">Describing objects by their attributes</a></td><td><a href="http://www-2.cs.cmu.edu/~dhoiem/publications/cvpr2009_attributes.pdf">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>39%</td><td>999</td><td>393</td><td>606</td><td>71</td><td>727</td><td>73</td></tr><tr><td>162ea969d1929ed180cc6de9f0bf116993ff6e06</td><td>vgg_faces</td><td>VGG Face</td><td><a href="papers/162ea969d1929ed180cc6de9f0bf116993ff6e06.html">Deep Face Recognition</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/f372/ab9b3270d4e4f6a0258c83c2736c3a5c0454.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>39%</td><td>999</td><td>393</td><td>606</td><td>71</td><td>621</td><td>156</td></tr><tr><td>23fc83c8cfff14a16df7ca497661264fc54ed746</td><td>cohn_kanade</td><td>CK</td><td><a href="papers/23fc83c8cfff14a16df7ca497661264fc54ed746.html">Comprehensive Database for Facial Expression Analysis</a></td><td><a href="http://pdfs.semanticscholar.org/23fc/83c8cfff14a16df7ca497661264fc54ed746.pdf">[pdf]</a></td><td></td><td>edu</td><td>Carnegie Mellon University</td><td>37.41021930</td><td>-122.05965487</td><td>38%</td><td>999</td><td>380</td><td>619</td><td>75</td><td>555</td><td>252</td></tr><tr><td>01959ef569f74c286956024866c1d107099199f7</td><td>vqa</td><td>VQA</td><td><a href="papers/01959ef569f74c286956024866c1d107099199f7.html">VQA: Visual Question Answering</a></td><td><a href="http://arxiv.org/pdf/1505.00468v3.pdf">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td>47%</td><td>731</td><td>344</td><td>387</td><td>47</td><td>628</td><td>4</td></tr><tr><td>6424b69f3ff4d35249c0bb7ef912fbc2c86f4ff4</td><td>celeba</td><td>CelebA</td><td><a href="papers/6424b69f3ff4d35249c0bb7ef912fbc2c86f4ff4.html">Deep Learning Face Attributes in the Wild</a></td><td><a href="http://arxiv.org/pdf/1411.7766v2.pdf">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>42%</td><td>808</td><td>340</td><td>468</td><td>69</td><td>666</td><td>50</td></tr><tr><td>4d423acc78273b75134e2afd1777ba6d3a398973</td><td>cmu_pie</td><td>CMU PIE</td><td><a href="papers/4d423acc78273b75134e2afd1777ba6d3a398973.html">International Conference on Automatic Face and Gesture Recognition The CMU Pose , Illumination , and Expression ( PIE ) Database</a></td><td><a href="http://pdfs.semanticscholar.org/4d42/3acc78273b75134e2afd1777ba6d3a398973.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>44%</td><td>742</td><td>330</td><td>412</td><td>61</td><td>410</td><td>232</td></tr><tr><td>abe9f3b91fd26fa1b50cd685c0d20debfb372f73</td><td>voc</td><td>VOC</td><td><a href="papers/abe9f3b91fd26fa1b50cd685c0d20debfb372f73.html">The Pascal Visual Object Classes Challenge: A Retrospective</a></td><td><a href="http://homepages.inf.ed.ac.uk/ckiw/postscript/ijcv_voc14.pdf">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td>32%</td><td>999</td><td>315</td><td>684</td><td>75</td><td>698</td><td>6</td></tr><tr><td>45c31cde87258414f33412b3b12fc5bec7cb3ba9</td><td>jaffe</td><td>JAFFE</td><td><a href="papers/45c31cde87258414f33412b3b12fc5bec7cb3ba9.html">Coding Facial Expressions with Gabor Wavelets</a></td><td><a href="http://pdfs.semanticscholar.org/45c3/1cde87258414f33412b3b12fc5bec7cb3ba9.pdf">[pdf]</a></td><td></td><td>edu</td><td>Kyushu University</td><td>33.59914655</td><td>130.22359848</td><td>36%</td><td>848</td><td>308</td><td>540</td><td>56</td><td>413</td><td>255</td></tr><tr><td>31b58ced31f22eab10bd3ee2d9174e7c14c27c01</td><td>tiny_images</td><td>Tiny Images</td><td><a href="papers/31b58ced31f22eab10bd3ee2d9174e7c14c27c01.html">Nonparametric Object and Scene Recognition</a></td><td><a href="http://pdfs.semanticscholar.org/31b5/8ced31f22eab10bd3ee2d9174e7c14c27c01.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>31%</td><td>999</td><td>305</td><td>694</td><td>93</td><td>670</td><td>9</td></tr><tr><td>140438a77a771a8fb656b39a78ff488066eb6b50</td><td>lfw_p</td><td>LFWP</td><td><a href="papers/140438a77a771a8fb656b39a78ff488066eb6b50.html">Localizing Parts of Faces Using a Consensus of Exemplars</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2011.5995602">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td>edu</td><td>Columbia University</td><td>40.84198360</td><td>-73.94368971</td><td>53%</td><td>521</td><td>274</td><td>247</td><td>40</td><td>321</td><td>144</td></tr><tr><td>18ae7c9a4bbc832b8b14bc4122070d7939f5e00e</td><td>frgc</td><td>FRGC</td><td><a href="papers/18ae7c9a4bbc832b8b14bc4122070d7939f5e00e.html">Overview of the face recognition grand challenge</a></td><td><a href="http://www3.nd.edu/~kwb/PhillipsEtAlCVPR_2005.pdf">[pdf]</a></td><td>2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)</td><td></td><td></td><td></td><td></td><td>25%</td><td>999</td><td>253</td><td>746</td><td>110</td><td>572</td><td>64</td></tr><tr><td>32cde90437ab5a70cf003ea36f66f2de0e24b3ab</td><td>cityscapes</td><td>Cityscapes</td><td><a href="papers/32cde90437ab5a70cf003ea36f66f2de0e24b3ab.html">The Cityscapes Dataset for Semantic Urban Scene Understanding</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1604.01685.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>33%</td><td>771</td><td>252</td><td>519</td><td>54</td><td>622</td><td>0</td></tr><tr><td>32cde90437ab5a70cf003ea36f66f2de0e24b3ab</td><td>cityscapes</td><td>Cityscapes</td><td><a href="papers/32cde90437ab5a70cf003ea36f66f2de0e24b3ab.html">The Cityscapes Dataset for Semantic Urban Scene Understanding</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1604.01685.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>33%</td><td>771</td><td>252</td><td>519</td><td>54</td><td>622</td><td>0</td></tr><tr><td>2ad0ee93d029e790ebb50574f403a09854b65b7e</td><td>yale_faces</td><td>YaleFaces</td><td><a href="papers/2ad0ee93d029e790ebb50574f403a09854b65b7e.html">Acquiring linear subspaces for face recognition under variable lighting</a></td><td><a href="http://vision.cornell.edu/se3/wp-content/uploads/2014/09/pami05.pdf">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>25%</td><td>999</td><td>251</td><td>748</td><td>110</td><td>509</td><td>113</td></tr><tr><td>560e0e58d0059259ddf86fcec1fa7975dee6a868</td><td>youtube_faces</td><td>YouTubeFaces</td><td><a href="papers/560e0e58d0059259ddf86fcec1fa7975dee6a868.html">Face recognition in unconstrained videos with matched background similarity</a></td><td><a href="http://www.cs.tau.ac.il/~wolf/papers/lvfw.pdf">[pdf]</a></td><td>CVPR 2011</td><td>edu</td><td>Open University of Israel</td><td>32.77824165</td><td>34.99565673</td><td>50%</td><td>485</td><td>244</td><td>240</td><td>32</td><td>290</td><td>140</td></tr><tr><td>3607afdb204de9a5a9300ae98aa4635d9effcda2</td><td>sheffield</td><td>Sheffield Face</td><td><a href="papers/3607afdb204de9a5a9300ae98aa4635d9effcda2.html">Face Description with Local Binary Patterns: Application to Face Recognition</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/TPAMI.2006.244">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>24%</td><td>999</td><td>238</td><td>761</td><td>65</td><td>483</td><td>87</td></tr><tr><td>853bd61bc48a431b9b1c7cab10c603830c488e39</td><td>casia_webface</td><td>CASIA Webface</td><td><a href="papers/853bd61bc48a431b9b1c7cab10c603830c488e39.html">Learning Face Representation from Scratch</a></td><td><a href="http://pdfs.semanticscholar.org/b8a2/0ed7e74325da76d7183d1ab77b082a92b447.pdf">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>53%</td><td>436</td><td>233</td><td>203</td><td>32</td><td>284</td><td>115</td></tr><tr><td>2830fb5282de23d7784b4b4bc37065d27839a412</td><td>h3d</td><td>H3D</td><td><a href="papers/2830fb5282de23d7784b4b4bc37065d27839a412.html">Poselets: Body part detectors trained using 3D human pose annotations</a></td><td><a href="http://vision.stanford.edu/teaching/cs231b_spring1213/papers/ICCV09_BourdevMalik.pdf">[pdf]</a></td><td>2009 IEEE 12th International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td>32%</td><td>707</td><td>223</td><td>484</td><td>62</td><td>482</td><td>18</td></tr><tr><td>10d6b12fa07c7c8d6c8c3f42c7f1c061c131d4c5</td><td>inria_person</td><td>INRIA Pedestrian</td><td><a href="papers/10d6b12fa07c7c8d6c8c3f42c7f1c061c131d4c5.html">Histograms of oriented gradients for human detection</a></td><td><a href="http://nichol.as/papers/Dalai/Histograms%20of%20oriented%20gradients%20for%20human%20detection.pdf">[pdf]</a></td><td>2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)</td><td></td><td></td><td></td><td></td><td>22%</td><td>999</td><td>217</td><td>782</td><td>67</td><td>520</td><td>22</td></tr><tr><td>55206f0b5f57ce17358999145506cd01e570358c</td><td>orl</td><td>ORL</td><td><a href="papers/55206f0b5f57ce17358999145506cd01e570358c.html">O M 4 . 1 The Subject Database 4 . 2 Experiment Plan 5 . 1 Varying the Overlap 4 Experimental Setup 5 Parameterisation Results</a></td><td><a href="http://pdfs.semanticscholar.org/5520/6f0b5f57ce17358999145506cd01e570358c.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>21%</td><td>999</td><td>214</td><td>785</td><td>96</td><td>550</td><td>57</td></tr><tr><td>9055b155cbabdce3b98e16e5ac9c0edf00f9552f</td><td>morph</td><td>MORPH Commercial</td><td><a href="papers/9055b155cbabdce3b98e16e5ac9c0edf00f9552f.html">MORPH: a longitudinal image database of normal adult age-progression</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/FGR.2006.78">[pdf]</a></td><td>7th International Conference on Automatic Face and Gesture Recognition (FGR06)</td><td></td><td></td><td></td><td></td><td>46%</td><td>424</td><td>195</td><td>229</td><td>27</td><td>231</td><td>155</td></tr><tr><td>9055b155cbabdce3b98e16e5ac9c0edf00f9552f</td><td>morph_nc</td><td>MORPH Non-Commercial</td><td><a href="papers/9055b155cbabdce3b98e16e5ac9c0edf00f9552f.html">MORPH: a longitudinal image database of normal adult age-progression</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/FGR.2006.78">[pdf]</a></td><td>7th International Conference on Automatic Face and Gesture Recognition (FGR06)</td><td></td><td></td><td></td><td></td><td>46%</td><td>424</td><td>195</td><td>229</td><td>27</td><td>231</td><td>155</td></tr><tr><td>93884e46c49f7ae1c7c34046fbc28882f2bd6341</td><td>kdef</td><td>KDEF</td><td><a href="papers/93884e46c49f7ae1c7c34046fbc28882f2bd6341.html">Gaze fixation and the neural circuitry of face processing in autism</a></td><td><a href="{'url': 'http://doi.org/10.1038/nn1421', 'linkType': 'nature'}">[pdf]</a></td><td>Nature Neuroscience</td><td></td><td></td><td></td><td></td><td>31%</td><td>608</td><td>190</td><td>418</td><td>92</td><td>463</td><td>0</td></tr><tr><td>8a3c5507237957d013a0fe0f082cab7f757af6ee</td><td>mafl</td><td>MAFL</td><td><a href="papers/8a3c5507237957d013a0fe0f082cab7f757af6ee.html">Facial Landmark Detection by Deep Multi-task Learning</a></td><td><a href="http://pdfs.semanticscholar.org/fcd7/1c18192928a2e0b264edd4d919ab2f8f652a.pdf">[pdf]</a></td><td></td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>48%</td><td>383</td><td>182</td><td>201</td><td>25</td><td>259</td><td>60</td></tr><tr><td>8a3c5507237957d013a0fe0f082cab7f757af6ee</td><td>mtfl</td><td>MTFL</td><td><a href="papers/8a3c5507237957d013a0fe0f082cab7f757af6ee.html">Facial Landmark Detection by Deep Multi-task Learning</a></td><td><a href="http://pdfs.semanticscholar.org/fcd7/1c18192928a2e0b264edd4d919ab2f8f652a.pdf">[pdf]</a></td><td></td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>48%</td><td>383</td><td>182</td><td>201</td><td>25</td><td>259</td><td>60</td></tr><tr><td>6bd36e9fd0ef20a3074e1430a6cc601e6d407fc3</td><td>cuhk03</td><td>CUHK03</td><td><a href="papers/6bd36e9fd0ef20a3074e1430a6cc601e6d407fc3.html">DeepReID: Deep Filter Pairing Neural Network for Person Re-identification</a></td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Li_DeepReID_Deep_Filter_2014_CVPR_paper.pdf">[pdf]</a></td><td>2014 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>35%</td><td>512</td><td>180</td><td>332</td><td>29</td><td>323</td><td>4</td></tr><tr><td>3325860c0c82a93b2eac654f5324dd6a776f609e</td><td>mpii_human_pose</td><td>MPII Human Pose</td><td><a href="papers/3325860c0c82a93b2eac654f5324dd6a776f609e.html">2D Human Pose Estimation: New Benchmark and State of the Art Analysis</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6909866', 'linkType': 'ieee'}">[pdf]</a></td><td>2014 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>50%</td><td>356</td><td>179</td><td>177</td><td>21</td><td>299</td><td>3</td></tr><tr><td>95f12d27c3b4914e0668a268360948bce92f7db3</td><td>helen</td><td>Helen</td><td><a href="papers/95f12d27c3b4914e0668a268360948bce92f7db3.html">Interactive Facial Feature Localization</a></td><td><a href="http://pdfs.semanticscholar.org/95f1/2d27c3b4914e0668a268360948bce92f7db3.pdf">[pdf]</a></td><td></td><td>edu</td><td>University of Illinois, Urbana-Champaign</td><td>40.11116745</td><td>-88.22587665</td><td>52%</td><td>339</td><td>177</td><td>162</td><td>27</td><td>208</td><td>100</td></tr><tr><td>2724ba85ec4a66de18da33925e537f3902f21249</td><td>cofw</td><td>COFW</td><td><a href="papers/2724ba85ec4a66de18da33925e537f3902f21249.html">Robust Face Landmark Estimation under Occlusion</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6751298', 'linkType': 'ieee'}">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td>55%</td><td>305</td><td>167</td><td>138</td><td>16</td><td>186</td><td>95</td></tr><tr><td>044d9a8c61383312cdafbcc44b9d00d650b21c70</td><td>fiw_300</td><td>300-W</td><td><a href="papers/044d9a8c61383312cdafbcc44b9d00d650b21c70.html">300 Faces in-the-Wild Challenge: The First Facial Landmark Localization Challenge</a></td><td><a href="https://ibug.doc.ic.ac.uk/media/uploads/documents/sagonas_iccv_2013_300_w.pdf">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision Workshops</td><td>edu</td><td>University of Twente</td><td>52.23801390</td><td>6.85667610</td><td>56%</td><td>285</td><td>159</td><td>125</td><td>28</td><td>188</td><td>82</td></tr><tr><td>6273b3491e94ea4dd1ce42b791d77bdc96ee73a8</td><td>viper</td><td>VIPeR</td><td><a href="papers/6273b3491e94ea4dd1ce42b791d77bdc96ee73a8.html">Evaluating Appearance Models for Recognition, Reacquisition, and Tracking</a></td><td><a href="http://pdfs.semanticscholar.org/6273/b3491e94ea4dd1ce42b791d77bdc96ee73a8.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>27%</td><td>584</td><td>159</td><td>425</td><td>38</td><td>336</td><td>9</td></tr><tr><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td><td>aflw</td><td>AFLW</td><td><a href="papers/a74251efa970b92925b89eeef50a5e37d9281ad0.html">Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</a></td><td><a href="http://lrs.icg.tugraz.at/pubs/koestinger_befit_11.pdf">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td>53%</td><td>292</td><td>155</td><td>137</td><td>38</td><td>207</td><td>59</td></tr><tr><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td><td>imm_face</td><td>IMM Face Dataset</td><td><a href="papers/a74251efa970b92925b89eeef50a5e37d9281ad0.html">Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</a></td><td><a href="http://lrs.icg.tugraz.at/pubs/koestinger_befit_11.pdf">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td>53%</td><td>292</td><td>155</td><td>137</td><td>38</td><td>207</td><td>59</td></tr><tr><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td><td>muct</td><td>MUCT</td><td><a href="papers/a74251efa970b92925b89eeef50a5e37d9281ad0.html">Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</a></td><td><a href="http://lrs.icg.tugraz.at/pubs/koestinger_befit_11.pdf">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td>53%</td><td>292</td><td>155</td><td>137</td><td>38</td><td>207</td><td>59</td></tr><tr><td>4308bd8c28e37e2ed9a3fcfe74d5436cce34b410</td><td>market_1501</td><td>Market 1501</td><td><a href="papers/4308bd8c28e37e2ed9a3fcfe74d5436cce34b410.html">Scalable Person Re-identification: A Benchmark</a></td><td><a href="https://www.microsoft.com/en-us/research/wp-content/uploads/2017/01/ICCV15-ReIDDataset.pdf">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td>38%</td><td>394</td><td>149</td><td>245</td><td>18</td><td>271</td><td>3</td></tr><tr><td>2a75f34663a60ab1b04a0049ed1d14335129e908</td><td>mmi_facial_expression</td><td>MMI Facial Expression Dataset</td><td><a href="papers/2a75f34663a60ab1b04a0049ed1d14335129e908.html">Web-based database for facial expression analysis</a></td><td><a href="http://ibug.doc.ic.ac.uk/media/uploads/documents/PanticEtAl-ICME2005-final.pdf">[pdf]</a></td><td>2005 IEEE International Conference on Multimedia and Expo</td><td></td><td></td><td></td><td></td><td>32%</td><td>440</td><td>142</td><td>298</td><td>44</td><td>258</td><td>82</td></tr><tr><td>639937b3a1b8bded3f7e9a40e85bd3770016cf3c</td><td>bfm</td><td>BFM</td><td><a href="papers/639937b3a1b8bded3f7e9a40e85bd3770016cf3c.html">A 3D Face Model for Pose and Illumination Invariant Face Recognition</a></td><td><a href="https://pdfs.semanticscholar.org/6399/37b3a1b8bded3f7e9a40e85bd3770016cf3c.pdf">[pdf]</a></td><td>2009 Sixth IEEE International Conference on Advanced Video and Signal Based Surveillance</td><td></td><td></td><td></td><td></td><td>41%</td><td>323</td><td>131</td><td>192</td><td>29</td><td>221</td><td>25</td></tr><tr><td>cc589c499dcf323fe4a143bbef0074c3e31f9b60</td><td>bu_3dfe</td><td>BU-3DFE</td><td><a href="papers/cc589c499dcf323fe4a143bbef0074c3e31f9b60.html">A 3D facial expression database for facial behavior research</a></td><td><a href="http://www.cs.binghamton.edu/~lijun/Research/3DFE/Yin_FGR06_a.pdf">[pdf]</a></td><td>7th International Conference on Automatic Face and Gesture Recognition (FGR06)</td><td>edu</td><td>SUNY Binghamton</td><td>42.08779975</td><td>-75.97066066</td><td>24%</td><td>555</td><td>131</td><td>424</td><td>47</td><td>283</td><td>48</td></tr><tr><td>696ca58d93f6404fea0fc75c62d1d7b378f47628</td><td>coco</td><td>COCO</td><td><a href="papers/696ca58d93f6404fea0fc75c62d1d7b378f47628.html">Microsoft COCO Captions: Data Collection and Evaluation Server</a></td><td><a href="http://pdfs.semanticscholar.org/ba95/81c33a7eebe87c50e61763e4c8d1723538f2.pdf">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>46%</td><td>283</td><td>129</td><td>154</td><td>16</td><td>231</td><td>4</td></tr><tr><td>4053e3423fb70ad9140ca89351df49675197196a</td><td>bio_id</td><td>BioID Face</td><td><a href="papers/4053e3423fb70ad9140ca89351df49675197196a.html">Robust Face Detection Using the Hausdorff Distance</a></td><td><a href="http://pdfs.semanticscholar.org/4053/e3423fb70ad9140ca89351df49675197196a.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>26%</td><td>498</td><td>127</td><td>371</td><td>55</td><td>319</td><td>32</td></tr><tr><td>3765df816dc5a061bc261e190acc8bdd9d47bec0</td><td>rafd</td><td>RaFD</td><td><a href="papers/3765df816dc5a061bc261e190acc8bdd9d47bec0.html">Presentation and validation of the Radboud Faces Database</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/3765/df816dc5a061bc261e190acc8bdd9d47bec0.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>28%</td><td>446</td><td>127</td><td>319</td><td>43</td><td>307</td><td>19</td></tr><tr><td>2fda164863a06a92d3a910b96eef927269aeb730</td><td>names_and_faces_news</td><td>News Dataset</td><td><a href="papers/2fda164863a06a92d3a910b96eef927269aeb730.html">Names and faces in the news</a></td><td><a href="http://www.cs.utexas.edu/~grauman/courses/spring2007/395T/papers/berg_names_and_faces.pdf">[pdf]</a></td><td>Proceedings of the 2004 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2004. CVPR 2004.</td><td></td><td></td><td></td><td></td><td>41%</td><td>294</td><td>120</td><td>174</td><td>24</td><td>207</td><td>45</td></tr><tr><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td><td>leeds_sports_pose</td><td>Leeds Sports Pose</td><td><a href="papers/4b1d23d17476fcf78f4cbadf69fb130b1aa627c0.html">Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</a></td><td><a href="http://pdfs.semanticscholar.org/4b1d/23d17476fcf78f4cbadf69fb130b1aa627c0.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>278</td><td>119</td><td>159</td><td>12</td><td>198</td><td>2</td></tr><tr><td>2258e01865367018ed6f4262c880df85b94959f8</td><td>mot</td><td>MOT</td><td><a href="papers/2258e01865367018ed6f4262c880df85b94959f8.html">Evaluating Multiple Object Tracking Performance: The CLEAR MOT Metrics</a></td><td><a href="http://pdfs.semanticscholar.org/2e0b/00f4043e2d4b04c59c88bb54bcd907d0dcd4.pdf">[pdf]</a></td><td>EURASIP J. Image and Video Processing</td><td></td><td></td><td></td><td></td><td>20%</td><td>586</td><td>119</td><td>467</td><td>48</td><td>336</td><td>3</td></tr><tr><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td><td>stickmen_buffy</td><td>Buffy Stickmen</td><td><a href="papers/4b1d23d17476fcf78f4cbadf69fb130b1aa627c0.html">Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</a></td><td><a href="http://pdfs.semanticscholar.org/4b1d/23d17476fcf78f4cbadf69fb130b1aa627c0.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>278</td><td>119</td><td>159</td><td>12</td><td>198</td><td>2</td></tr><tr><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td><a href="papers/4b1d23d17476fcf78f4cbadf69fb130b1aa627c0.html">Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</a></td><td><a href="http://pdfs.semanticscholar.org/4b1d/23d17476fcf78f4cbadf69fb130b1aa627c0.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>278</td><td>119</td><td>159</td><td>12</td><td>198</td><td>2</td></tr><tr><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td><td>stickmen_buffy</td><td>Buffy Stickmen</td><td><a href="papers/6dd0597f8513dc100cd0bc1b493768cde45098a9.html">Learning to parse images of articulated bodies</a></td><td><a href="http://pdfs.semanticscholar.org/9cd7/4c43dbf9be0b9caae4606ee53e6d45850471.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>31%</td><td>373</td><td>117</td><td>256</td><td>30</td><td>238</td><td>2</td></tr><tr><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td><a href="papers/6dd0597f8513dc100cd0bc1b493768cde45098a9.html">Learning to parse images of articulated bodies</a></td><td><a href="http://pdfs.semanticscholar.org/9cd7/4c43dbf9be0b9caae4606ee53e6d45850471.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>31%</td><td>373</td><td>117</td><td>256</td><td>30</td><td>238</td><td>2</td></tr><tr><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td><td>tud_campus</td><td>TUD-Campus</td><td><a href="papers/3316521a5527c7700af8ae6aef32a79a8b83672c.html">People-tracking-by-detection and people-detection-by-tracking</a></td><td><a href="http://mplab.ucsd.edu/wp-content/uploads/CVPR2008/Conference/data/papers/243.pdf">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>22%</td><td>529</td><td>116</td><td>413</td><td>41</td><td>316</td><td>1</td></tr><tr><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td><td>tud_crossing</td><td>TUD-Crossing</td><td><a href="papers/3316521a5527c7700af8ae6aef32a79a8b83672c.html">People-tracking-by-detection and people-detection-by-tracking</a></td><td><a href="http://mplab.ucsd.edu/wp-content/uploads/CVPR2008/Conference/data/papers/243.pdf">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>22%</td><td>529</td><td>116</td><td>413</td><td>41</td><td>316</td><td>1</td></tr><tr><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td><td>tud_pedestrian</td><td>TUD-Pedestrian</td><td><a href="papers/3316521a5527c7700af8ae6aef32a79a8b83672c.html">People-tracking-by-detection and people-detection-by-tracking</a></td><td><a href="http://mplab.ucsd.edu/wp-content/uploads/CVPR2008/Conference/data/papers/243.pdf">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>22%</td><td>529</td><td>116</td><td>413</td><td>41</td><td>316</td><td>1</td></tr><tr><td>0f0fcf041559703998abf310e56f8a2f90ee6f21</td><td>feret</td><td>FERET</td><td><a href="papers/0f0fcf041559703998abf310e56f8a2f90ee6f21.html">The FERET Evaluation Methodology for Face-Recognition Algorithms</a></td><td><a href="http://pdfs.semanticscholar.org/0f0f/cf041559703998abf310e56f8a2f90ee6f21.pdf">[pdf]</a></td><td>IEEE Trans. Pattern Anal. Mach. Intell.</td><td></td><td></td><td></td><td></td><td>11%</td><td>999</td><td>109</td><td>259</td><td>32</td><td>213</td><td>51</td></tr><tr><td>140c95e53c619eac594d70f6369f518adfea12ef</td><td>ijb_a</td><td>IJB-A</td><td><a href="papers/140c95e53c619eac594d70f6369f518adfea12ef.html">Pushing the frontiers of unconstrained face detection and recognition: IARPA Janus Benchmark A</a></td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/1B_089_ext.pdf">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>48%</td><td>222</td><td>107</td><td>115</td><td>21</td><td>158</td><td>48</td></tr><tr><td>4c170a0dcc8de75587dae21ca508dab2f9343974</td><td>face_tracer</td><td>FaceTracer</td><td><a href="papers/4c170a0dcc8de75587dae21ca508dab2f9343974.html">FaceTracer: A Search Engine for Large Collections of Images with Faces</a></td><td><a href="http://pdfs.semanticscholar.org/73a8/1d311eedac8dea3ca24dc15b6990fa4a725e.pdf">[pdf]</a></td><td></td><td>edu</td><td>Columbia University</td><td>40.84198360</td><td>-73.94368971</td><td>48%</td><td>218</td><td>105</td><td>113</td><td>17</td><td>146</td><td>52</td></tr><tr><td>28312c3a47c1be3a67365700744d3d6665b86f22</td><td>hrt_transgender</td><td>HRT Transgender</td><td><a href="papers/28312c3a47c1be3a67365700744d3d6665b86f22.html">Face Recognition: A Literature Survey1</a></td><td><a href="http://pdfs.semanticscholar.org/2831/2c3a47c1be3a67365700744d3d6665b86f22.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>11%</td><td>999</td><td>105</td><td>265</td><td>32</td><td>217</td><td>39</td></tr><tr><td>013909077ad843eb6df7a3e8e290cfd5575999d2</td><td>fiw_300</td><td>300-W</td><td><a href="papers/013909077ad843eb6df7a3e8e290cfd5575999d2.html">A Semi-automatic Methodology for Facial Landmark Annotation</a></td><td><a href="http://ibug.doc.ic.ac.uk/media/uploads/documents/sagonas_cvpr_2013_amfg_w.pdf">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition Workshops</td><td>edu</td><td>University of Twente</td><td>52.23801390</td><td>6.85667610</td><td>59%</td><td>169</td><td>100</td><td>69</td><td>14</td><td>112</td><td>49</td></tr><tr><td>7808937b46acad36e43c30ae4e9f3fd57462853d</td><td>berkeley_pose</td><td>BPAD</td><td><a href="papers/7808937b46acad36e43c30ae4e9f3fd57462853d.html">Describing people: A poselet-based approach to attribute classification</a></td><td><a href="http://ttic.uchicago.edu/~smaji/papers/attributes-iccv11.pdf">[pdf]</a></td><td>2011 International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td>43%</td><td>221</td><td>96</td><td>125</td><td>14</td><td>160</td><td>23</td></tr><tr><td>f72f6a45ee240cc99296a287ff725aaa7e7ebb35</td><td>caltech_pedestrians</td><td>Caltech Pedestrians</td><td><a href="papers/f72f6a45ee240cc99296a287ff725aaa7e7ebb35.html">Pedestrian Detection: An Evaluation of the State of the Art</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5975165', 'linkType': 'ieee'}">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>10%</td><td>999</td><td>96</td><td>355</td><td>26</td><td>252</td><td>5</td></tr><tr><td>e8de844fefd54541b71c9823416daa238be65546</td><td>visual_phrases</td><td>Phrasal Recognition</td><td><a href="papers/e8de844fefd54541b71c9823416daa238be65546.html">Recognition using visual phrases</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995711', 'linkType': 'ieee'}">[pdf]</a></td><td>CVPR 2011</td><td></td><td></td><td></td><td></td><td>41%</td><td>233</td><td>95</td><td>138</td><td>18</td><td>174</td><td>5</td></tr><tr><td>16c7c31a7553d99f1837fc6e88e77b5ccbb346b8</td><td>prid</td><td>PRID</td><td><a href="papers/16c7c31a7553d99f1837fc6e88e77b5ccbb346b8.html">Person Re-identification by Descriptive and Discriminative Classification</a></td><td><a href="http://pdfs.semanticscholar.org/4c1b/f0592be3e535faf256c95e27982db9b3d3d3.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>27%</td><td>352</td><td>94</td><td>258</td><td>26</td><td>195</td><td>3</td></tr><tr><td>1dc35905a1deff8bc74688f2d7e2f48fd2273275</td><td>caltech_pedestrians</td><td>Caltech Pedestrians</td><td><a href="papers/1dc35905a1deff8bc74688f2d7e2f48fd2273275.html">Pedestrian detection: A benchmark</a></td><td><a href="http://vision.ucsd.edu/~pdollar/files/papers/DollarCVPR09peds.pdf">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>17%</td><td>519</td><td>89</td><td>430</td><td>27</td><td>286</td><td>2</td></tr><tr><td>833fa04463d90aab4a9fe2870d480f0b40df446e</td><td>sun_attributes</td><td>SUN</td><td><a href="papers/833fa04463d90aab4a9fe2870d480f0b40df446e.html">SUN attribute database: Discovering, annotating, and recognizing scene attributes</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2012.6247998">[pdf]</a></td><td>2012 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>32%</td><td>260</td><td>84</td><td>85</td><td>18</td><td>143</td><td>10</td></tr><tr><td>35b0331dfcd2897abd5749b49ff5e2b8ba0f7a62</td><td>coco_qa</td><td>COCO QA</td><td><a href="papers/35b0331dfcd2897abd5749b49ff5e2b8ba0f7a62.html">Exploring Models and Data for Image Question Answering</a></td><td><a href="http://pdfs.semanticscholar.org/aa79/9c29c0d44ece1864467af520fe70540c069b.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>191</td><td>83</td><td>108</td><td>12</td><td>163</td><td>1</td></tr><tr><td>291265db88023e92bb8c8e6390438e5da148e8f5</td><td>msceleb</td><td>MsCeleb</td><td><a href="papers/291265db88023e92bb8c8e6390438e5da148e8f5.html">MS-Celeb-1M: A Dataset and Benchmark for Large-Scale Face Recognition</a></td><td><a href="http://pdfs.semanticscholar.org/4603/cb8e05258bb0572ae912ad20903b8f99f4b1.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>167</td><td>83</td><td>84</td><td>15</td><td>131</td><td>27</td></tr><tr><td>13f06b08f371ba8b5d31c3e288b4deb61335b462</td><td>eth_andreas_ess</td><td>ETHZ Pedestrian</td><td><a href="papers/13f06b08f371ba8b5d31c3e288b4deb61335b462.html">Depth and Appearance for Mobile Scene Analysis</a></td><td><a href="http://www.mmp.rwth-aachen.de/publications/pdf/ess-depthandappearance-iccv07.pdf/at_download/file">[pdf]</a></td><td>2007 IEEE 11th International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td>25%</td><td>319</td><td>79</td><td>240</td><td>27</td><td>192</td><td>0</td></tr><tr><td>52d7eb0fbc3522434c13cc247549f74bb9609c5d</td><td>wider_face</td><td>WIDER FACE</td><td><a href="papers/52d7eb0fbc3522434c13cc247549f74bb9609c5d.html">WIDER FACE: A Face Detection Benchmark</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1511.06523.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>53%</td><td>148</td><td>78</td><td>70</td><td>16</td><td>107</td><td>34</td></tr><tr><td>2485c98aa44131d1a2f7d1355b1e372f2bb148ad</td><td>cas_peal</td><td>CAS-PEAL</td><td><a href="papers/2485c98aa44131d1a2f7d1355b1e372f2bb148ad.html">The CAS-PEAL Large-Scale Chinese Face Database and Baseline Evaluations</a></td><td><a href="https://doi.org/10.1109/TSMCA.2007.909557">[pdf]</a></td><td>IEEE Transactions on Systems, Man, and Cybernetics - Part A: Systems and Humans</td><td></td><td></td><td></td><td></td><td>18%</td><td>415</td><td>76</td><td>339</td><td>39</td><td>182</td><td>35</td></tr><tr><td>1aad2da473888cb7ebc1bfaa15bfa0f1502ce005</td><td>jpl_pose</td><td>JPL-Interaction dataset</td><td><a href="papers/1aad2da473888cb7ebc1bfaa15bfa0f1502ce005.html">First-Person Activity Recognition: What Are They Doing to Me?</a></td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2013/papers/Ryoo_First-Person_Activity_Recognition_2013_CVPR_paper.pdf">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>51%</td><td>148</td><td>76</td><td>72</td><td>8</td><td>109</td><td>3</td></tr><tr><td>2485c98aa44131d1a2f7d1355b1e372f2bb148ad</td><td>m2vts</td><td>m2vts</td><td><a href="papers/2485c98aa44131d1a2f7d1355b1e372f2bb148ad.html">The CAS-PEAL Large-Scale Chinese Face Database and Baseline Evaluations</a></td><td><a href="https://doi.org/10.1109/TSMCA.2007.909557">[pdf]</a></td><td>IEEE Transactions on Systems, Man, and Cybernetics - Part A: Systems and Humans</td><td></td><td></td><td></td><td></td><td>18%</td><td>415</td><td>76</td><td>339</td><td>39</td><td>182</td><td>35</td></tr><tr><td>436f798d1a4e54e5947c1e7d7375c31b2bdb4064</td><td>tud_multiview</td><td>TUD-Multiview</td><td><a href="papers/436f798d1a4e54e5947c1e7d7375c31b2bdb4064.html">Monocular 3D pose estimation and tracking by detection</a></td><td><a href="http://lmb.informatik.uni-freiburg.de/lectures/seminar_brox/seminar_ws1011/cvpr10_andriluka.pdf">[pdf]</a></td><td>2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>25%</td><td>302</td><td>76</td><td>226</td><td>32</td><td>199</td><td>1</td></tr><tr><td>436f798d1a4e54e5947c1e7d7375c31b2bdb4064</td><td>tud_stadtmitte</td><td>TUD-Stadtmitte</td><td><a href="papers/436f798d1a4e54e5947c1e7d7375c31b2bdb4064.html">Monocular 3D pose estimation and tracking by detection</a></td><td><a href="http://lmb.informatik.uni-freiburg.de/lectures/seminar_brox/seminar_ws1011/cvpr10_andriluka.pdf">[pdf]</a></td><td>2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>25%</td><td>302</td><td>76</td><td>226</td><td>32</td><td>199</td><td>1</td></tr><tr><td>133f01aec1534604d184d56de866a4bd531dac87</td><td>lfw_a</td><td>LFW-a</td><td><a href="papers/133f01aec1534604d184d56de866a4bd531dac87.html">Effective Unconstrained Face Recognition by Combining Multiple Descriptors and Learned Background Statistics</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/TPAMI.2010.230">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>42%</td><td>177</td><td>75</td><td>102</td><td>15</td><td>102</td><td>54</td></tr><tr><td>1be498d4bbc30c3bfd0029114c784bc2114d67c0</td><td>adience</td><td>Adience</td><td><a href="papers/1be498d4bbc30c3bfd0029114c784bc2114d67c0.html">Age and Gender Estimation of Unfiltered Faces</a></td><td><a href="http://www.openu.ac.il/home/hassner/Adience/EidingerEnbarHassner_tifs.pdf">[pdf]</a></td><td>IEEE Transactions on Information Forensics and Security</td><td></td><td></td><td></td><td></td><td>43%</td><td>168</td><td>72</td><td>96</td><td>7</td><td>89</td><td>53</td></tr><tr><td>21d9d0deed16f0ad62a4865e9acf0686f4f15492</td><td>images_of_groups</td><td>Images of Groups</td><td><a href="papers/21d9d0deed16f0ad62a4865e9acf0686f4f15492.html">Understanding images of groups of people</a></td><td><a href="http://amp.ece.cmu.edu/people/Andy/Andy_files/cvpr09.pdf">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>36%</td><td>202</td><td>72</td><td>130</td><td>12</td><td>126</td><td>24</td></tr><tr><td>18010284894ed0edcca74e5bf768ee2e15ef7841</td><td>deep_fashion</td><td>DeepFashion</td><td><a href="papers/18010284894ed0edcca74e5bf768ee2e15ef7841.html">DeepFashion: Powering Robust Clothes Recognition and Retrieval with Rich Annotations</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7780493', 'linkType': 'ieee'}">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>47%</td><td>150</td><td>71</td><td>79</td><td>4</td><td>111</td><td>8</td></tr><tr><td>4e4746094bf60ee83e40d8597a6191e463b57f76</td><td>leeds_sports_pose_extended</td><td>Leeds Sports Pose Extended</td><td><a href="papers/4e4746094bf60ee83e40d8597a6191e463b57f76.html">Learning effective human pose estimation from inaccurate annotation</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995318', 'linkType': 'ieee'}">[pdf]</a></td><td>CVPR 2011</td><td></td><td></td><td></td><td></td><td>40%</td><td>173</td><td>70</td><td>103</td><td>9</td><td>116</td><td>2</td></tr><tr><td>2eed184680edcdec8a3b605ad1a3ba8e8f7cc2e9</td><td>graz</td><td>Graz Pedestrian</td><td><a href="papers/2eed184680edcdec8a3b605ad1a3ba8e8f7cc2e9.html">Generic object recognition with boosting</a></td><td><a href="http://www.emt.tu-graz.ac.at/~pinz/onlinepapers/Reprint_Vol_28_No_3_2006.pdf">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>24%</td><td>286</td><td>69</td><td>217</td><td>16</td><td>189</td><td>0</td></tr><tr><td>44484d2866f222bbb9b6b0870890f9eea1ffb2d0</td><td>cuhk01</td><td>CUHK01</td><td><a href="papers/44484d2866f222bbb9b6b0870890f9eea1ffb2d0.html">Human Reidentification with Transferred Metric Learning</a></td><td><a href="http://pdfs.semanticscholar.org/4448/4d2866f222bbb9b6b0870890f9eea1ffb2d0.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>26%</td><td>258</td><td>67</td><td>191</td><td>12</td><td>141</td><td>1</td></tr><tr><td>96e0cfcd81cdeb8282e29ef9ec9962b125f379b0</td><td>megaface</td><td>MegaFace</td><td><a href="papers/96e0cfcd81cdeb8282e29ef9ec9962b125f379b0.html">The MegaFace Benchmark: 1 Million Faces for Recognition at Scale</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.527">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>University of Washington</td><td>47.65432380</td><td>-122.30800894</td><td>55%</td><td>121</td><td>66</td><td>55</td><td>11</td><td>98</td><td>20</td></tr><tr><td>9361b784e73e9238d5cefbea5ac40d35d1e3103f</td><td>towncenter</td><td>TownCenter</td><td><a href="papers/9361b784e73e9238d5cefbea5ac40d35d1e3103f.html">Stable Multi-Target Tracking in Real-Time Surveillance Video (Preprint)</a></td><td><a href="http://pdfs.semanticscholar.org/9361/b784e73e9238d5cefbea5ac40d35d1e3103f.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>21%</td><td>310</td><td>64</td><td>246</td><td>24</td><td>177</td><td>4</td></tr><tr><td>10195a163ab6348eef37213a46f60a3d87f289c5</td><td>imdb_wiki</td><td>IMDB</td><td><a href="papers/10195a163ab6348eef37213a46f60a3d87f289c5.html">Deep Expectation of Real and Apparent Age from a Single Image Without Facial Landmarks</a></td><td><a href="https://doi.org/10.1007/s11263-016-0940-3">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td>44%</td><td>133</td><td>59</td><td>74</td><td>14</td><td>90</td><td>28</td></tr><tr><td>b1f4423c227fa37b9680787be38857069247a307</td><td>afew_va</td><td>AFEW-VA</td><td><a href="papers/b1f4423c227fa37b9680787be38857069247a307.html">Collecting Large, Richly Annotated Facial-Expression Databases from Movies</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6200254', 'linkType': 'ieee'}">[pdf]</a></td><td>IEEE MultiMedia</td><td>edu</td><td>Australian National University</td><td>-35.27769990</td><td>149.11852700</td><td>33%</td><td>175</td><td>58</td><td>66</td><td>7</td><td>54</td><td>49</td></tr><tr><td>2acf7e58f0a526b957be2099c10aab693f795973</td><td>bosphorus</td><td>The Bosphorus</td><td><a href="papers/2acf7e58f0a526b957be2099c10aab693f795973.html">Bosphorus Database for 3D Face Analysis</a></td><td><a href="http://pdfs.semanticscholar.org/4254/fbba3846008f50671edc9cf70b99d7304543.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>18%</td><td>328</td><td>58</td><td>270</td><td>18</td><td>143</td><td>37</td></tr><tr><td>27a2fad58dd8727e280f97036e0d2bc55ef5424c</td><td>duke_mtmc</td><td>Duke MTMC</td><td><a href="papers/27a2fad58dd8727e280f97036e0d2bc55ef5424c.html">Performance Measures and a Data Set for Multi-target, Multi-camera Tracking</a></td><td><a href="http://pdfs.semanticscholar.org/b5f2/4f49f9a5e47d6601399dc068158ad88d7651.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>136</td><td>58</td><td>78</td><td>6</td><td>107</td><td>0</td></tr><tr><td>27a2fad58dd8727e280f97036e0d2bc55ef5424c</td><td>mot</td><td>MOT</td><td><a href="papers/27a2fad58dd8727e280f97036e0d2bc55ef5424c.html">Performance Measures and a Data Set for Multi-target, Multi-camera Tracking</a></td><td><a href="http://pdfs.semanticscholar.org/b5f2/4f49f9a5e47d6601399dc068158ad88d7651.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>136</td><td>58</td><td>78</td><td>6</td><td>107</td><td>0</td></tr><tr><td>56ffa7d906b08d02d6d5a12c7377a57e24ef3391</td><td>unbc_shoulder_pain</td><td>UNBC-McMaster Pain</td><td><a href="papers/56ffa7d906b08d02d6d5a12c7377a57e24ef3391.html">Painful data: The UNBC-McMaster shoulder pain expression archive database</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5771462', 'linkType': 'ieee'}">[pdf]</a></td><td>Face and Gesture 2011</td><td></td><td></td><td></td><td></td><td>32%</td><td>184</td><td>58</td><td>126</td><td>23</td><td>112</td><td>23</td></tr><tr><td>38b55d95189c5e69cf4ab45098a48fba407609b4</td><td>cuhk02</td><td>CUHK02</td><td><a href="papers/38b55d95189c5e69cf4ab45098a48fba407609b4.html">Locally Aligned Feature Transforms across Views</a></td><td><a href="http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_CVPR2013/data/Papers/4989d594.pdf">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>24%</td><td>242</td><td>57</td><td>185</td><td>17</td><td>139</td><td>1</td></tr><tr><td>2f5d44dc3e1b5955942133ff872ebd31716ec604</td><td>frav3d</td><td>FRAV3D</td><td><a href="papers/2f5d44dc3e1b5955942133ff872ebd31716ec604.html">2D and 3D face recognition: A survey</a></td><td><a href="http://pdfs.semanticscholar.org/2f5d/44dc3e1b5955942133ff872ebd31716ec604.pdf">[pdf]</a></td><td>Pattern Recognition Letters</td><td></td><td></td><td></td><td></td><td>15%</td><td>389</td><td>57</td><td>332</td><td>28</td><td>198</td><td>17</td></tr><tr><td>a6e695ddd07aad719001c0fc1129328452385949</td><td>yfcc_100m</td><td>YFCC100M</td><td><a href="papers/a6e695ddd07aad719001c0fc1129328452385949.html">The New Data and New Challenges in Multimedia Research</a></td><td><span class="gray">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>36%</td><td>160</td><td>57</td><td>103</td><td>11</td><td>105</td><td>4</td></tr><tr><td>0d3bb75852098b25d90f31d2f48fd0cb4944702b</td><td>face_scrub</td><td>FaceScrub</td><td><a href="papers/0d3bb75852098b25d90f31d2f48fd0cb4944702b.html">A data-driven approach to cleaning large face datasets</a></td><td><a href="https://doi.org/10.1109/ICIP.2014.7025068">[pdf]</a></td><td>2014 IEEE International Conference on Image Processing (ICIP)</td><td>edu</td><td>University of Illinois, Urbana-Champaign</td><td>40.11116745</td><td>-88.22587665</td><td>46%</td><td>123</td><td>56</td><td>67</td><td>6</td><td>95</td><td>21</td></tr><tr><td>b91f54e1581fbbf60392364323d00a0cd43e493c</td><td>bp4d_spontanous</td><td>BP4D-Spontanous</td><td><a href="papers/b91f54e1581fbbf60392364323d00a0cd43e493c.html">A high-resolution spontaneous 3D dynamic facial expression database</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6553788', 'linkType': 'ieee'}">[pdf]</a></td><td>2013 10th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)</td><td>edu</td><td>SUNY Binghamton</td><td>42.08779975</td><td>-75.97066066</td><td>36%</td><td>151</td><td>54</td><td>97</td><td>7</td><td>85</td><td>26</td></tr><tr><td>4f93cd09785c6e77bf4bc5a788e079df524c8d21</td><td>soton</td><td>SOTON HiD</td><td><a href="papers/4f93cd09785c6e77bf4bc5a788e079df524c8d21.html">On a large sequence-based human gait database</a></td><td><a href="http://pdfs.semanticscholar.org/4f93/cd09785c6e77bf4bc5a788e079df524c8d21.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>36%</td><td>148</td><td>54</td><td>94</td><td>16</td><td>98</td><td>0</td></tr><tr><td>0df0d1adea39a5bef318b74faa37de7f3e00b452</td><td>mpii_gaze</td><td>MPIIGaze</td><td><a href="papers/0df0d1adea39a5bef318b74faa37de7f3e00b452.html">Appearance-based gaze estimation in the wild</a></td><td><a href="https://scalable.mpi-inf.mpg.de/files/2015/09/zhang_CVPR15.pdf">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>Max Planck Institute for Informatics</td><td>49.25795660</td><td>7.04577417</td><td>38%</td><td>138</td><td>52</td><td>86</td><td>3</td><td>94</td><td>7</td></tr><tr><td>e4754afaa15b1b53e70743880484b8d0736990ff</td><td>fiw_300</td><td>300-W</td><td><a href="papers/e4754afaa15b1b53e70743880484b8d0736990ff.html">300 Faces In-The-Wild Challenge: database and results</a></td><td><a href="{'url': 'http://doi.org/10.1016/j.imavis.2016.01.002', 'linkType': 'doi'}">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td>45%</td><td>114</td><td>51</td><td>63</td><td>10</td><td>70</td><td>31</td></tr><tr><td>c0387e788a52f10bf35d4d50659cfa515d89fbec</td><td>mars</td><td>MARS</td><td><a href="papers/c0387e788a52f10bf35d4d50659cfa515d89fbec.html">MARS: A Video Benchmark for Large-Scale Person Re-Identification</a></td><td><a href="http://pdfs.semanticscholar.org/c038/7e788a52f10bf35d4d50659cfa515d89fbec.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>34%</td><td>146</td><td>49</td><td>97</td><td>6</td><td>96</td><td>0</td></tr><tr><td>98bb029afe2a1239c3fdab517323066f0957b81b</td><td>sdu_vid</td><td>SDU-VID</td><td><a href="papers/98bb029afe2a1239c3fdab517323066f0957b81b.html">Person Re-identification by Video Ranking</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>25%</td><td>196</td><td>49</td><td>124</td><td>11</td><td>98</td><td>1</td></tr><tr><td>04c2cda00e5536f4b1508cbd80041e9552880e67</td><td>hipsterwars</td><td>Hipsterwars</td><td><a href="papers/04c2cda00e5536f4b1508cbd80041e9552880e67.html">Hipster Wars: Discovering Elements of Fashion Styles</a></td><td><a href="http://pdfs.semanticscholar.org/04c2/cda00e5536f4b1508cbd80041e9552880e67.pdf">[pdf]</a></td><td></td><td>edu</td><td>Tohoku University</td><td>38.25309450</td><td>140.87365930</td><td>53%</td><td>91</td><td>48</td><td>43</td><td>5</td><td>60</td><td>15</td></tr><tr><td>109df0e8e5969ddf01e073143e83599228a1163f</td><td>multi_pie</td><td>MULTIPIE</td><td><a href="papers/109df0e8e5969ddf01e073143e83599228a1163f.html">Scheduling heterogeneous multi-cores through performance impact estimation (PIE)</a></td><td><a href="http://dl.acm.org/citation.cfm?id=2337184">[pdf]</a></td><td>2012 39th Annual International Symposium on Computer Architecture (ISCA)</td><td></td><td></td><td></td><td></td><td>25%</td><td>192</td><td>48</td><td>144</td><td>8</td><td>99</td><td>0</td></tr><tr><td>32c801cb7fbeb742edfd94cccfca4934baec71da</td><td>ucf_crowd</td><td>UCF-CC-50</td><td><a href="papers/32c801cb7fbeb742edfd94cccfca4934baec71da.html">Multi-source Multi-scale Counting in Extremely Dense Crowd Images</a></td><td><a href="http://www.cs.ucf.edu/~haroon/datafiles/Idrees_Counting_CVPR_2013.pdf">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>38%</td><td>125</td><td>48</td><td>77</td><td>6</td><td>72</td><td>1</td></tr><tr><td>2d3482dcff69c7417c7b933f22de606a0e8e42d4</td><td>lfw</td><td>LFW</td><td><a href="papers/2d3482dcff69c7417c7b933f22de606a0e8e42d4.html">Labeled Faces in the Wild : Updates and New Reporting Procedures</a></td><td><a href="http://pdfs.semanticscholar.org/2d34/82dcff69c7417c7b933f22de606a0e8e42d4.pdf">[pdf]</a></td><td></td><td>edu</td><td>University of Massachusetts</td><td>42.38897850</td><td>-72.52869870</td><td>41%</td><td>116</td><td>47</td><td>46</td><td>4</td><td>62</td><td>19</td></tr><tr><td>8355d095d3534ef511a9af68a3b2893339e3f96b</td><td>imdb_wiki</td><td>IMDB</td><td><a href="papers/8355d095d3534ef511a9af68a3b2893339e3f96b.html">DEX: Deep EXpectation of Apparent Age from a Single Image</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406390', 'linkType': 'ieee'}">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision Workshop (ICCVW)</td><td></td><td></td><td></td><td></td><td>44%</td><td>102</td><td>45</td><td>54</td><td>6</td><td>62</td><td>28</td></tr><tr><td>66e6f08873325d37e0ec20a4769ce881e04e964e</td><td>sun_attributes</td><td>SUN</td><td><a href="papers/66e6f08873325d37e0ec20a4769ce881e04e964e.html">The SUN Attribute Database: Beyond Categories for Deeper Scene Understanding</a></td><td><a href="http://www.cc.gatech.edu/~hays/papers/attribute_ijcv.pdf">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td>38%</td><td>112</td><td>43</td><td>69</td><td>14</td><td>83</td><td>2</td></tr><tr><td>46a01565e6afe7c074affb752e7069ee3bf2e4ef</td><td>sdu_vid</td><td>SDU-VID</td><td><a href="papers/46a01565e6afe7c074affb752e7069ee3bf2e4ef.html">Local Descriptors Encoded by Fisher Vectors for Person Re-identification</a></td><td><a href="http://pdfs.semanticscholar.org/dd1d/51c3a59cb71cbfe1433ebeb4d973f7f9ddc1.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>22%</td><td>193</td><td>42</td><td>133</td><td>15</td><td>101</td><td>2</td></tr><tr><td>6ad5a38df8dd4cdddd74f31996ce096d41219f72</td><td>tud_brussels</td><td>TUD-Brussels</td><td><a href="papers/6ad5a38df8dd4cdddd74f31996ce096d41219f72.html">Multi-cue onboard pedestrian detection</a></td><td><a href="https://www.mpi-inf.mpg.de/fileadmin/inf/d2/wojek/poster_cwojek_cvpr09.pdf">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>19%</td><td>217</td><td>41</td><td>176</td><td>14</td><td>131</td><td>1</td></tr><tr><td>6ad5a38df8dd4cdddd74f31996ce096d41219f72</td><td>tud_motionpairs</td><td>TUD-Motionparis</td><td><a href="papers/6ad5a38df8dd4cdddd74f31996ce096d41219f72.html">Multi-cue onboard pedestrian detection</a></td><td><a href="https://www.mpi-inf.mpg.de/fileadmin/inf/d2/wojek/poster_cwojek_cvpr09.pdf">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>19%</td><td>217</td><td>41</td><td>176</td><td>14</td><td>131</td><td>1</td></tr><tr><td>0486214fb58ee9a04edfe7d6a74c6d0f661a7668</td><td>chokepoint</td><td>ChokePoint</td><td><a href="papers/0486214fb58ee9a04edfe7d6a74c6d0f661a7668.html">Patch-based probabilistic image quality assessment for face selection and improved video-based face recognition</a></td><td><a href="http://conradsanderson.id.au/pdfs/wong_face_selection_cvpr_biometrics_2011.pdf">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td>edu</td><td>University of Queensland</td><td>-27.49741805</td><td>153.01316956</td><td>30%</td><td>128</td><td>39</td><td>89</td><td>6</td><td>68</td><td>14</td></tr><tr><td>2a4bbee0b4cf52d5aadbbc662164f7efba89566c</td><td>peta</td><td>PETA</td><td><a href="papers/2a4bbee0b4cf52d5aadbbc662164f7efba89566c.html">Pedestrian Attribute Recognition At Far Distance</a></td><td><a href="http://personal.ie.cuhk.edu.hk/~pluo/pdf/mm14.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>46%</td><td>80</td><td>37</td><td>43</td><td>2</td><td>51</td><td>3</td></tr><tr><td>636b8ffc09b1b23ff714ac8350bb35635e49fa3c</td><td>caltech_10k_web_faces</td><td>Caltech 10K Web Faces</td><td><a href="papers/636b8ffc09b1b23ff714ac8350bb35635e49fa3c.html">Pruning training sets for learning of object categories</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1467308', 'linkType': 'ieee'}">[pdf]</a></td><td>2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)</td><td></td><td></td><td></td><td></td><td>58%</td><td>60</td><td>35</td><td>25</td><td>5</td><td>42</td><td>12</td></tr><tr><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td><td>mafl</td><td>MAFL</td><td><a href="papers/a0fd85b3400c7b3e11122f44dc5870ae2de9009a.html">Learning Deep Representation for Face Alignment with Auxiliary Attributes</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1408.3967.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>33%</td><td>105</td><td>35</td><td>50</td><td>8</td><td>55</td><td>16</td></tr><tr><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td><td>mtfl</td><td>MTFL</td><td><a href="papers/a0fd85b3400c7b3e11122f44dc5870ae2de9009a.html">Learning Deep Representation for Face Alignment with Auxiliary Attributes</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1408.3967.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>33%</td><td>105</td><td>35</td><td>50</td><td>8</td><td>55</td><td>16</td></tr><tr><td>3b5b6d19d4733ab606c39c69a889f9e67967f151</td><td>qmul_grid</td><td>GRID</td><td><a href="papers/3b5b6d19d4733ab606c39c69a889f9e67967f151.html">Multi-camera activity correlation analysis</a></td><td><a href="http://vision.lbl.gov/Conferences/cvpr/Papers/data/papers/0163.pdf">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>25%</td><td>138</td><td>35</td><td>103</td><td>8</td><td>76</td><td>1</td></tr><tr><td>214c966d1f9c2a4b66f4535d9a0d4078e63a5867</td><td>brainwash</td><td>Brainwash</td><td><a href="papers/214c966d1f9c2a4b66f4535d9a0d4078e63a5867.html">Brainwash: A Data System for Feature Engineering</a></td><td><a href="http://pdfs.semanticscholar.org/ae44/8015b2ff2bd3b8a5c9a3266f954f5af9ffa9.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>57</td><td>34</td><td>23</td><td>2</td><td>50</td><td>0</td></tr><tr><td>0dc11a37cadda92886c56a6fb5191ded62099c28</td><td>stickmen_family</td><td>We Are Family Stickmen</td><td><a href="papers/0dc11a37cadda92886c56a6fb5191ded62099c28.html">We Are Family: Joint Pose Estimation of Multiple Persons</a></td><td><a href="http://pdfs.semanticscholar.org/0dc1/1a37cadda92886c56a6fb5191ded62099c28.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>44%</td><td>77</td><td>34</td><td>43</td><td>4</td><td>57</td><td>1</td></tr><tr><td>4df3143922bcdf7db78eb91e6b5359d6ada004d2</td><td>cfd</td><td>CFD</td><td><a href="papers/4df3143922bcdf7db78eb91e6b5359d6ada004d2.html">The Chicago face database: A free stimulus set of faces and norming data.</a></td><td><a href="http://pdfs.semanticscholar.org/4df3/143922bcdf7db78eb91e6b5359d6ada004d2.pdf">[pdf]</a></td><td>Behavior research methods</td><td></td><td></td><td></td><td></td><td>39%</td><td>83</td><td>32</td><td>51</td><td>2</td><td>62</td><td>3</td></tr><tr><td>c900e0ad4c95948baaf0acd8449fde26f9b4952a</td><td>emotio_net</td><td>EmotioNet Database</td><td><a href="papers/c900e0ad4c95948baaf0acd8449fde26f9b4952a.html">EmotioNet: An Accurate, Real-Time Algorithm for the Automatic Annotation of a Million Facial Expressions in the Wild</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7780969', 'linkType': 'ieee'}">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>44%</td><td>72</td><td>32</td><td>40</td><td>7</td><td>54</td><td>8</td></tr><tr><td>0c4a139bb87c6743c7905b29a3cfec27a5130652</td><td>feret</td><td>FERET</td><td><a href="papers/0c4a139bb87c6743c7905b29a3cfec27a5130652.html">The FERET Verification Testing Protocol for Face Recognition Algorithms</a></td><td><a href="http://pdfs.semanticscholar.org/0c4a/139bb87c6743c7905b29a3cfec27a5130652.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>28%</td><td>112</td><td>31</td><td>81</td><td>12</td><td>76</td><td>4</td></tr><tr><td>3cd40bfa1ff193a96bde0207e5140a399476466c</td><td>tvhi</td><td>TVHI</td><td><a href="papers/3cd40bfa1ff193a96bde0207e5140a399476466c.html">High Five: Recognising human interactions in TV shows</a></td><td><a href="http://pdfs.semanticscholar.org/3cd4/0bfa1ff193a96bde0207e5140a399476466c.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>34%</td><td>91</td><td>31</td><td>60</td><td>11</td><td>64</td><td>1</td></tr><tr><td>6618cff7f2ed440a0d2fa9e74ad5469df5cdbe4c</td><td>afad</td><td>AFAD</td><td><a href="papers/6618cff7f2ed440a0d2fa9e74ad5469df5cdbe4c.html">Ordinal Regression with Multiple Output CNN for Age Estimation</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.532">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>44%</td><td>68</td><td>30</td><td>38</td><td>8</td><td>49</td><td>7</td></tr><tr><td>fcc6fe6007c322641796cb8792718641856a22a7</td><td>miw</td><td>MIW</td><td><a href="papers/fcc6fe6007c322641796cb8792718641856a22a7.html">Automatic facial makeup detection with application in face recognition</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6612994', 'linkType': 'ieee'}">[pdf]</a></td><td>2013 International Conference on Biometrics (ICB)</td><td>edu</td><td>West Virginia University</td><td>39.65404635</td><td>-79.96475355</td><td>65%</td><td>46</td><td>30</td><td>16</td><td>1</td><td>18</td><td>21</td></tr><tr><td>0a85bdff552615643dd74646ac881862a7c7072d</td><td>pipa</td><td>PIPA</td><td><a href="papers/0a85bdff552615643dd74646ac881862a7c7072d.html">Beyond frontal faces: Improving Person Recognition using multiple cues</a></td><td><a href="https://doi.org/10.1109/CVPR.2015.7299113">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>60%</td><td>50</td><td>30</td><td>19</td><td>2</td><td>40</td><td>4</td></tr><tr><td>fcc6fe6007c322641796cb8792718641856a22a7</td><td>youtube_makeup</td><td>YMU</td><td><a href="papers/fcc6fe6007c322641796cb8792718641856a22a7.html">Automatic facial makeup detection with application in face recognition</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6612994', 'linkType': 'ieee'}">[pdf]</a></td><td>2013 International Conference on Biometrics (ICB)</td><td>edu</td><td>West Virginia University</td><td>39.65404635</td><td>-79.96475355</td><td>65%</td><td>46</td><td>30</td><td>16</td><td>1</td><td>18</td><td>21</td></tr><tr><td>51eba481dac6b229a7490f650dff7b17ce05df73</td><td>imsitu</td><td>imSitu</td><td><a href="papers/51eba481dac6b229a7490f650dff7b17ce05df73.html">Situation Recognition: Visual Semantic Role Labeling for Image Understanding</a></td><td><a href="http://grail.cs.washington.edu/wp-content/uploads/2016/09/yatskar2016srv.pdf">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>University of Washington</td><td>47.65432380</td><td>-122.30800894</td><td>60%</td><td>48</td><td>29</td><td>19</td><td>2</td><td>45</td><td>2</td></tr><tr><td>7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22</td><td>lfw</td><td>LFW</td><td><a href="papers/7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22.html">Labeled Faces in the Wild: A Survey</a></td><td><a href="http://pdfs.semanticscholar.org/7de6/e81d775e9cd7becbfd1bd685f4e2a5eebb22.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>29%</td><td>99</td><td>29</td><td>70</td><td>9</td><td>63</td><td>12</td></tr><tr><td>2bf8541199728262f78d4dced6fb91479b39b738</td><td>clothing_co_parsing</td><td>CCP</td><td><a href="papers/2bf8541199728262f78d4dced6fb91479b39b738.html">Clothing Co-parsing by Joint Image Segmentation and Labeling</a></td><td><a href="https://arxiv.org/pdf/1502.00739v1.pdf">[pdf]</a></td><td>2014 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>47%</td><td>60</td><td>28</td><td>32</td><td>0</td><td>36</td><td>6</td></tr><tr><td>42505464808dfb446f521fc6ff2cfeffd4d68ff1</td><td>gavab_db</td><td>Gavab</td><td><a href="papers/42505464808dfb446f521fc6ff2cfeffd4d68ff1.html">Expression invariant 3D face recognition with a Morphable Model</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4813376', 'linkType': 'ieee'}">[pdf]</a></td><td>2008 8th IEEE International Conference on Automatic Face & Gesture Recognition</td><td></td><td></td><td></td><td></td><td>29%</td><td>94</td><td>27</td><td>67</td><td>10</td><td>57</td><td>5</td></tr><tr><td>066000d44d6691d27202896691f08b27117918b9</td><td>psu</td><td>PSU</td><td><a href="papers/066000d44d6691d27202896691f08b27117918b9.html">Vision-Based Analysis of Small Groups in Pedestrian Crowds</a></td><td><a href="http://vision.cse.psu.edu/publications/pdfs/GeCollinsRubackPAMI2011.pdf">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>18%</td><td>151</td><td>27</td><td>124</td><td>9</td><td>78</td><td>2</td></tr><tr><td>3b4ec8af470948a72a6ed37a9fd226719a874ebc</td><td>sdu_vid</td><td>SDU-VID</td><td><a href="papers/3b4ec8af470948a72a6ed37a9fd226719a874ebc.html">A Spatio-Temporal Appearance Representation for Video-Based Pedestrian Re-Identification</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.434">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td>32%</td><td>85</td><td>27</td><td>58</td><td>9</td><td>51</td><td>0</td></tr><tr><td>47aeb3b82f54b5ae8142b4bdda7b614433e69b9a</td><td>am_fed</td><td>AM-FED</td><td><a href="papers/47aeb3b82f54b5ae8142b4bdda7b614433e69b9a.html">Affectiva-MIT Facial Expression Dataset (AM-FED): Naturalistic and Spontaneous Facial Expressions Collected "In-the-Wild"</a></td><td><a href="http://pdfs.semanticscholar.org/5d06/437656dd94616d7d87260d5eb77513ded30f.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>36%</td><td>73</td><td>26</td><td>47</td><td>6</td><td>39</td><td>16</td></tr><tr><td>356b431d4f7a2a0a38cf971c84568207dcdbf189</td><td>wider</td><td>WIDER</td><td><a href="papers/356b431d4f7a2a0a38cf971c84568207dcdbf189.html">Recognize complex events from static images by fusing deep channels</a></td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Xiong_Recognize_Complex_Events_2015_CVPR_paper.pdf">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>Shenzhen Institutes of Advanced Technology</td><td>22.59805605</td><td>113.98533784</td><td>58%</td><td>45</td><td>26</td><td>19</td><td>1</td><td>30</td><td>12</td></tr><tr><td>9c23859ec7313f2e756a3e85575735e0c52249f4</td><td>facebook_100</td><td>Facebook100</td><td><a href="papers/9c23859ec7313f2e756a3e85575735e0c52249f4.html">Scaling up biologically-inspired computer vision: A case study in unconstrained face recognition on facebook</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5981788', 'linkType': 'ieee'}">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td>edu</td><td>Harvard University</td><td>42.36782045</td><td>-71.12666653</td><td>50%</td><td>50</td><td>25</td><td>25</td><td>3</td><td>39</td><td>4</td></tr><tr><td>0b84f07af44f964817675ad961def8a51406dd2e</td><td>prw</td><td>PRW</td><td><a href="papers/0b84f07af44f964817675ad961def8a51406dd2e.html">Person Re-identification in the Wild</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2017.357">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>University of Technology Sydney</td><td>-33.88096510</td><td>151.20107299</td><td>38%</td><td>65</td><td>25</td><td>40</td><td>1</td><td>46</td><td>0</td></tr><tr><td>9c23859ec7313f2e756a3e85575735e0c52249f4</td><td>pubfig_83</td><td>pubfig83</td><td><a href="papers/9c23859ec7313f2e756a3e85575735e0c52249f4.html">Scaling up biologically-inspired computer vision: A case study in unconstrained face recognition on facebook</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5981788', 'linkType': 'ieee'}">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td>edu</td><td>Harvard University</td><td>42.36782045</td><td>-71.12666653</td><td>50%</td><td>50</td><td>25</td><td>25</td><td>3</td><td>39</td><td>4</td></tr><tr><td>2160788824c4c29ffe213b2cbeb3f52972d73f37</td><td>3d_rma</td><td>3D-RMA</td><td><a href="papers/2160788824c4c29ffe213b2cbeb3f52972d73f37.html">Automatic 3D face authentication</a></td><td><a href="http://pdfs.semanticscholar.org/2160/788824c4c29ffe213b2cbeb3f52972d73f37.pdf">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td>25%</td><td>95</td><td>24</td><td>71</td><td>8</td><td>60</td><td>2</td></tr><tr><td>37d6f0eb074d207b53885bd2eb78ccc8a04be597</td><td>vmu</td><td>VMU</td><td><a href="papers/37d6f0eb074d207b53885bd2eb78ccc8a04be597.html">Can facial cosmetics affect the matching accuracy of face recognition systems?</a></td><td><a href="http://www.cse.msu.edu/~climer/DantchevaChenRossFaceCosmetics_BTAS2012.pdf">[pdf]</a></td><td>2012 IEEE Fifth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td></td><td></td><td></td><td></td><td>49%</td><td>49</td><td>24</td><td>25</td><td>0</td><td>18</td><td>22</td></tr><tr><td>37d6f0eb074d207b53885bd2eb78ccc8a04be597</td><td>youtube_makeup</td><td>YMU</td><td><a href="papers/37d6f0eb074d207b53885bd2eb78ccc8a04be597.html">Can facial cosmetics affect the matching accuracy of face recognition systems?</a></td><td><a href="http://www.cse.msu.edu/~climer/DantchevaChenRossFaceCosmetics_BTAS2012.pdf">[pdf]</a></td><td>2012 IEEE Fifth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td></td><td></td><td></td><td></td><td>49%</td><td>49</td><td>24</td><td>25</td><td>0</td><td>18</td><td>22</td></tr><tr><td>070de852bc6eb275d7ca3a9cdde8f6be8795d1a3</td><td>d3dfacs</td><td>D3DFACS</td><td><a href="papers/070de852bc6eb275d7ca3a9cdde8f6be8795d1a3.html">A FACS valid 3D dynamic action unit database with applications to 3D dynamic morphable facial modeling</a></td><td><a href="http://www.cs.bath.ac.uk/~dpc/D3DFACS/ICCV_final_2011.pdf">[pdf]</a></td><td>2011 International Conference on Computer Vision</td><td>edu</td><td>Jacobs University</td><td>53.41291480</td><td>-2.96897915</td><td>44%</td><td>52</td><td>23</td><td>29</td><td>5</td><td>37</td><td>4</td></tr><tr><td>3394168ff0719b03ff65bcea35336a76b21fe5e4</td><td>penn_fudan</td><td>Penn Fudan</td><td><a href="papers/3394168ff0719b03ff65bcea35336a76b21fe5e4.html">Object Detection Combining Recognition and Segmentation</a></td><td><a href="http://pdfs.semanticscholar.org/f531/a554cade14b9b340de6730683a28c292dd74.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>23%</td><td>101</td><td>23</td><td>78</td><td>11</td><td>58</td><td>0</td></tr><tr><td>2e8d0f1802e50cccfd3c0aabac0d0beab3a7846e</td><td>3dpes</td><td>3DPeS</td><td><a href="papers/2e8d0f1802e50cccfd3c0aabac0d0beab3a7846e.html">3DPeS: 3D people dataset for surveillance and forensics</a></td><td><a href="http://doi.acm.org/10.1145/2072572.2072590">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>18%</td><td>122</td><td>22</td><td>100</td><td>11</td><td>71</td><td>1</td></tr><tr><td>08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7</td><td>kin_face</td><td>UB KinFace</td><td><a href="papers/08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7.html">Understanding Kin Relationships in a Photo</a></td><td><a href="http://www1.ece.neu.edu/~yunfu/papers/Kinship-TMM.pdf">[pdf]</a></td><td>IEEE Transactions on Multimedia</td><td></td><td></td><td></td><td></td><td>24%</td><td>89</td><td>21</td><td>46</td><td>2</td><td>20</td><td>14</td></tr><tr><td>eb027969f9310e0ae941e2adee2d42cdf07d938c</td><td>vgg_faces2</td><td>VGG Face2</td><td><a href="papers/eb027969f9310e0ae941e2adee2d42cdf07d938c.html">VGGFace2: A Dataset for Recognising Faces across Pose and Age</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1710.08092.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2018 13th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2018)</td><td>edu</td><td>University of Oxford</td><td>51.75345380</td><td>-1.25400997</td><td>38%</td><td>56</td><td>21</td><td>35</td><td>6</td><td>50</td><td>3</td></tr><tr><td>f1af714b92372c8e606485a3982eab2f16772ad8</td><td>mug_faces</td><td>MUG Faces</td><td><a href="papers/f1af714b92372c8e606485a3982eab2f16772ad8.html">The MUG facial expression database</a></td><td><a href="http://ieeexplore.ieee.org/document/5617662/">[pdf]</a></td><td>11th International Workshop on Image Analysis for Multimedia Interactive Services WIAMIS 10</td><td>edu</td><td>Aristotle University of Thessaloniki</td><td>40.62984145</td><td>22.95889350</td><td>28%</td><td>68</td><td>19</td><td>49</td><td>5</td><td>28</td><td>19</td></tr><tr><td>2ce2560cf59db59ce313bbeb004e8ce55c5ce928</td><td>texas_3dfrd</td><td>Texas 3DFRD</td><td><a href="papers/2ce2560cf59db59ce313bbeb004e8ce55c5ce928.html">Anthropometric 3D Face Recognition</a></td><td><a href="http://live.ece.utexas.edu/publications/2010/sg_ijcv_june10.pdf">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td>22%</td><td>88</td><td>19</td><td>43</td><td>4</td><td>42</td><td>2</td></tr><tr><td>31b05f65405534a696a847dd19c621b7b8588263</td><td>umd_faces</td><td>UMD</td><td><a href="papers/31b05f65405534a696a847dd19c621b7b8588263.html">UMDFaces: An annotated face dataset for training deep networks</a></td><td><a href="http://arxiv.org/abs/1611.01484">[pdf]</a></td><td>2017 IEEE International Joint Conference on Biometrics (IJCB)</td><td></td><td></td><td></td><td></td><td>54%</td><td>35</td><td>19</td><td>16</td><td>5</td><td>28</td><td>6</td></tr><tr><td>8b2dd5c61b23ead5ae5508bb8ce808b5ea266730</td><td>10k_US_adult_faces</td><td>10K US Adult Faces</td><td><a href="papers/8b2dd5c61b23ead5ae5508bb8ce808b5ea266730.html">The intrinsic memorability of face photographs.</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/8b2d/d5c61b23ead5ae5508bb8ce808b5ea266730.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Journal of experimental psychology. General</td><td></td><td></td><td></td><td></td><td>36%</td><td>47</td><td>17</td><td>30</td><td>3</td><td>33</td><td>1</td></tr><tr><td>69a68f9cf874c69e2232f47808016c2736b90c35</td><td>celeba_plus</td><td>CelebFaces+</td><td><a href="papers/69a68f9cf874c69e2232f47808016c2736b90c35.html">Learning Deep Representation for Imbalanced Classification</a></td><td><a href="http://personal.ie.cuhk.edu.hk/~ccloy/files/cvpr_2016_imbalanced.pdf">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>Shenzhen Institutes of Advanced Technology</td><td>22.59805605</td><td>113.98533784</td><td>33%</td><td>51</td><td>17</td><td>34</td><td>1</td><td>39</td><td>2</td></tr><tr><td>79828e6e9f137a583082b8b5a9dfce0c301989b8</td><td>mapillary</td><td>Mapillary</td><td><a href="papers/79828e6e9f137a583082b8b5a9dfce0c301989b8.html">The Mapillary Vistas Dataset for Semantic Understanding of Street Scenes</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237796', 'linkType': 'ieee'}">[pdf]</a></td><td>2017 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td>39%</td><td>44</td><td>17</td><td>27</td><td>0</td><td>36</td><td>0</td></tr><tr><td>5194cbd51f9769ab25260446b4fa17204752e799</td><td>violent_flows</td><td>Violent Flows</td><td><a href="papers/5194cbd51f9769ab25260446b4fa17204752e799.html">Violent flows: Real-time detection of violent crowd behavior</a></td><td><a href="http://www.wisdom.weizmann.ac.il/mathusers/kliper/Papers/violent_flows.pdf">[pdf]</a></td><td>2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops</td><td></td><td></td><td></td><td></td><td>20%</td><td>83</td><td>17</td><td>66</td><td>6</td><td>42</td><td>2</td></tr><tr><td>20388099cc415c772926e47bcbbe554e133343d1</td><td>cafe</td><td>CAFE</td><td><a href="papers/20388099cc415c772926e47bcbbe554e133343d1.html">The Child Affective Facial Expression (CAFE) set: validity and reliability from untrained adults</a></td><td><a href="http://pdfs.semanticscholar.org/2038/8099cc415c772926e47bcbbe554e133343d1.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>48%</td><td>33</td><td>16</td><td>17</td><td>3</td><td>28</td><td>1</td></tr><tr><td>c34532fe6bfbd1e6df477c9ffdbb043b77e7804d</td><td>columbia_gaze</td><td>Columbia Gaze</td><td><a href="papers/c34532fe6bfbd1e6df477c9ffdbb043b77e7804d.html">A 3D Morphable Eye Region Model for Gaze Estimation</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/0d43/3b9435b874a1eea6d7999e86986c910fa285.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Carnegie Mellon University</td><td>37.41021930</td><td>-122.05965487</td><td>67%</td><td>24</td><td>16</td><td>8</td><td>0</td><td>18</td><td>4</td></tr><tr><td>47662d1a368daf70ba70ef2d59eb6209f98b675d</td><td>fia</td><td>CMU FiA</td><td><a href="papers/47662d1a368daf70ba70ef2d59eb6209f98b675d.html">The CMU Face In Action (FIA) Database</a></td><td><a href="http://pdfs.semanticscholar.org/bb47/a03401811f9d2ca2da12138697acbc7b97a3.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>29%</td><td>55</td><td>16</td><td>39</td><td>5</td><td>38</td><td>7</td></tr><tr><td>213a579af9e4f57f071b884aa872651372b661fd</td><td>bbc_pose</td><td>BBC Pose</td><td><a href="papers/213a579af9e4f57f071b884aa872651372b661fd.html">Automatic and Efficient Human Pose Estimation for Sign Language Videos</a></td><td><a href="https://doi.org/10.1007/s11263-013-0672-6">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td>60%</td><td>25</td><td>15</td><td>10</td><td>1</td><td>18</td><td>1</td></tr><tr><td>1e3df3ca8feab0b36fd293fe689f93bb2aaac591</td><td>immediacy</td><td>Immediacy</td><td><a href="papers/1e3df3ca8feab0b36fd293fe689f93bb2aaac591.html">Multi-task Recurrent Neural Network for Immediacy Prediction</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.383">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td>60%</td><td>25</td><td>15</td><td>10</td><td>2</td><td>20</td><td>0</td></tr><tr><td>a5a44a32a91474f00a3cda671a802e87c899fbb4</td><td>moments_in_time</td><td>Moments in Time</td><td><a href="papers/a5a44a32a91474f00a3cda671a802e87c899fbb4.html">Moments in Time Dataset: one million videos for event understanding</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1801.03150.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>60%</td><td>25</td><td>15</td><td>10</td><td>2</td><td>25</td><td>0</td></tr><tr><td>4946ba10a4d5a7d0a38372f23e6622bd347ae273</td><td>coco_action</td><td>COCO-a</td><td><a href="papers/4946ba10a4d5a7d0a38372f23e6622bd347ae273.html">RONCHI AND PERONA: DESCRIBING COMMON HUMAN VISUAL ACTIONS IN IMAGES 1 Describing Common Human Visual Actions in Images</a></td><td><a href="http://pdfs.semanticscholar.org/b38d/cf5fa5174c0d718d65cc4f3889b03c4a21df.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>26</td><td>14</td><td>12</td><td>0</td><td>25</td><td>0</td></tr><tr><td>2161f6b7ee3c0acc81603b01dc0df689683577b9</td><td>large_scale_person_search</td><td>Large Scale Person Search</td><td><a href="papers/2161f6b7ee3c0acc81603b01dc0df689683577b9.html">End-to-End Deep Learning for Person Search</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/2161/f6b7ee3c0acc81603b01dc0df689683577b9.pdf', 'linkType': 's2'}">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>34%</td><td>41</td><td>14</td><td>27</td><td>2</td><td>27</td><td>0</td></tr><tr><td>1c2802c2199b6d15ecefe7ba0c39bfe44363de38</td><td>youtube_poses</td><td>YouTube Pose</td><td><a href="papers/1c2802c2199b6d15ecefe7ba0c39bfe44363de38.html">Personalizing Human Video Pose Estimation</a></td><td><a href="http://arxiv.org/pdf/1511.06676v1.pdf">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>44%</td><td>32</td><td>14</td><td>18</td><td>2</td><td>27</td><td>0</td></tr><tr><td>ea050801199f98a1c7c1df6769f23f658299a3ae</td><td>mpi_large</td><td>Large MPI Facial Expression</td><td><a href="papers/ea050801199f98a1c7c1df6769f23f658299a3ae.html">The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</a></td><td><a href="http://pdfs.semanticscholar.org/ea05/0801199f98a1c7c1df6769f23f658299a3ae.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>46%</td><td>28</td><td>13</td><td>15</td><td>4</td><td>24</td><td>3</td></tr><tr><td>ea050801199f98a1c7c1df6769f23f658299a3ae</td><td>mpi_small</td><td>Small MPI Facial Expression</td><td><a href="papers/ea050801199f98a1c7c1df6769f23f658299a3ae.html">The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</a></td><td><a href="http://pdfs.semanticscholar.org/ea05/0801199f98a1c7c1df6769f23f658299a3ae.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>46%</td><td>28</td><td>13</td><td>15</td><td>4</td><td>24</td><td>3</td></tr><tr><td>16e8b0a1e8451d5f697b94c0c2b32a00abee1d52</td><td>umb</td><td>UMB</td><td><a href="papers/16e8b0a1e8451d5f697b94c0c2b32a00abee1d52.html">UMB-DB: A database of partially occluded 3D faces</a></td><td><a href="https://doi.org/10.1109/ICCVW.2011.6130509">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td>29%</td><td>45</td><td>13</td><td>32</td><td>2</td><td>20</td><td>3</td></tr><tr><td>d08cc366a4a0192a01e9a7495af1eb5d9f9e73ae</td><td>b3d_ac</td><td>B3D(AC)</td><td><a href="papers/d08cc366a4a0192a01e9a7495af1eb5d9f9e73ae.html">A 3-D Audio-Visual Corpus of Affective Communication</a></td><td><a href="http://files.is.tue.mpg.de/jgall/download/jgall_avcorpus_mm10.pdf">[pdf]</a></td><td>IEEE Transactions on Multimedia</td><td></td><td></td><td></td><td></td><td>31%</td><td>39</td><td>12</td><td>27</td><td>2</td><td>27</td><td>7</td></tr><tr><td>0b440695c822a8e35184fb2f60dcdaa8a6de84ae</td><td>kinectface</td><td>KinectFaceDB</td><td><a href="papers/0b440695c822a8e35184fb2f60dcdaa8a6de84ae.html">KinectFaceDB: A Kinect Database for Face Recognition</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6866883', 'linkType': 'ieee'}">[pdf]</a></td><td>IEEE Transactions on Systems, Man, and Cybernetics: Systems</td><td></td><td></td><td></td><td></td><td>16%</td><td>75</td><td>12</td><td>63</td><td>6</td><td>25</td><td>8</td></tr><tr><td>45e616093a92e5f1e61a7c6037d5f637aa8964af</td><td>malf</td><td>MALF</td><td><a href="papers/45e616093a92e5f1e61a7c6037d5f637aa8964af.html">Fine-grained evaluation on face detection in the wild</a></td><td><a href="http://www.cs.toronto.edu/~byang/papers/malf_fg15.pdf">[pdf]</a></td><td>2015 11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)</td><td>edu</td><td>Chinese Academy of Sciences</td><td>40.00447950</td><td>116.37023800</td><td>71%</td><td>17</td><td>12</td><td>5</td><td>0</td><td>13</td><td>4</td></tr><tr><td>2edb87494278ad11641b6cf7a3f8996de12b8e14</td><td>qmul_grid</td><td>GRID</td><td><a href="papers/2edb87494278ad11641b6cf7a3f8996de12b8e14.html">Time-Delayed Correlation Analysis for Multi-Camera Activity Understanding</a></td><td><a href="https://doi.org/10.1007/s11263-010-0347-5">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td>16%</td><td>77</td><td>12</td><td>39</td><td>3</td><td>32</td><td>0</td></tr><tr><td>09d78009687bec46e70efcf39d4612822e61cb8c</td><td>raid</td><td>RAiD</td><td><a href="papers/09d78009687bec46e70efcf39d4612822e61cb8c.html">Consistent Re-identification in a Camera Network</a></td><td><a href="http://pdfs.semanticscholar.org/c27f/099e6e7e3f7f9979cbe9e0a5175fc5848ea0.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>27%</td><td>45</td><td>12</td><td>33</td><td>7</td><td>34</td><td>1</td></tr><tr><td>22646e00a7ba34d1b5fbe3b1efcd91a1e1be3c2b</td><td>saivt</td><td>SAIVT SoftBio</td><td><a href="papers/22646e00a7ba34d1b5fbe3b1efcd91a1e1be3c2b.html">A Database for Person Re-Identification in Multi-Camera Surveillance Networks</a></td><td><a href="http://eprints.qut.edu.au/53437/3/Bialkowski_Database4PersonReID_DICTA.pdf">[pdf]</a></td><td>2012 International Conference on Digital Image Computing Techniques and Applications (DICTA)</td><td></td><td></td><td></td><td></td><td>21%</td><td>58</td><td>12</td><td>46</td><td>7</td><td>40</td><td>1</td></tr><tr><td>44d23df380af207f5ac5b41459c722c87283e1eb</td><td>wider_attribute</td><td>WIDER Attribute</td><td><a href="papers/44d23df380af207f5ac5b41459c722c87283e1eb.html">Human Attribute Recognition by Deep Hierarchical Contexts</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/8e28/07f2dd53b03a759e372e07f7191cae65c9fd.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>67%</td><td>18</td><td>12</td><td>6</td><td>0</td><td>16</td><td>0</td></tr><tr><td>28d4e027c7e90b51b7d8908fce68128d1964668a</td><td>megaface</td><td>MegaFace</td><td><a href="papers/28d4e027c7e90b51b7d8908fce68128d1964668a.html">Level Playing Field for Million Scale Face Recognition</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1705.00393.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>41%</td><td>27</td><td>11</td><td>16</td><td>2</td><td>22</td><td>4</td></tr><tr><td>221c18238b829c12b911706947ab38fd017acef7</td><td>rap_pedestrian</td><td>RAP</td><td><a href="papers/221c18238b829c12b911706947ab38fd017acef7.html">A Richly Annotated Dataset for Pedestrian Attribute Recognition</a></td><td><a href="http://pdfs.semanticscholar.org/221c/18238b829c12b911706947ab38fd017acef7.pdf">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>52%</td><td>21</td><td>11</td><td>10</td><td>0</td><td>18</td><td>0</td></tr><tr><td>4d58f886f5150b2d5e48fd1b5a49e09799bf895d</td><td>texas_3dfrd</td><td>Texas 3DFRD</td><td><a href="papers/4d58f886f5150b2d5e48fd1b5a49e09799bf895d.html">Texas 3D Face Recognition Database</a></td><td><a href="http://live.ece.utexas.edu/publications/2010/sg_ssiai_may10.pdf">[pdf]</a></td><td>2010 IEEE Southwest Symposium on Image Analysis & Interpretation (SSIAI)</td><td></td><td></td><td></td><td></td><td>18%</td><td>61</td><td>11</td><td>50</td><td>3</td><td>36</td><td>2</td></tr><tr><td>488e475eeb3bb39a145f23ede197cd3620f1d98a</td><td>apis</td><td>APiS1.0</td><td><a href="papers/488e475eeb3bb39a145f23ede197cd3620f1d98a.html">Pedestrian Attribute Classification in Surveillance: Database and Evaluation</a></td><td><a href="http://www.cv-foundation.org/openaccess/content_iccv_workshops_2013/W10/papers/Zhu_Pedestrian_Attribute_Classification_2013_ICCV_paper.pdf">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision Workshops</td><td></td><td></td><td></td><td></td><td>38%</td><td>26</td><td>10</td><td>16</td><td>1</td><td>13</td><td>2</td></tr><tr><td>53ae38a6bb2b21b42bac4f0c4c8ed1f9fa02f9d4</td><td>bp4d_plus</td><td>BP4D+</td><td><a href="papers/53ae38a6bb2b21b42bac4f0c4c8ed1f9fa02f9d4.html">Multimodal Spontaneous Emotion Corpus for Human Behavior Analysis</a></td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Zhang_Multimodal_Spontaneous_Emotion_CVPR_2016_paper.pdf">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>25%</td><td>40</td><td>10</td><td>30</td><td>0</td><td>20</td><td>6</td></tr><tr><td>298cbc3dfbbb3a20af4eed97906650a4ea1c29e0</td><td>ferplus</td><td>FER+</td><td><a href="papers/298cbc3dfbbb3a20af4eed97906650a4ea1c29e0.html">Training deep networks for facial expression recognition with crowd-sourced label distribution</a></td><td><a href="http://arxiv.org/pdf/1608.01041v1.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>34%</td><td>29</td><td>10</td><td>19</td><td>0</td><td>15</td><td>3</td></tr><tr><td>488e475eeb3bb39a145f23ede197cd3620f1d98a</td><td>svs</td><td>SVS</td><td><a href="papers/488e475eeb3bb39a145f23ede197cd3620f1d98a.html">Pedestrian Attribute Classification in Surveillance: Database and Evaluation</a></td><td><a href="http://www.cv-foundation.org/openaccess/content_iccv_workshops_2013/W10/papers/Zhu_Pedestrian_Attribute_Classification_2013_ICCV_paper.pdf">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision Workshops</td><td></td><td></td><td></td><td></td><td>38%</td><td>26</td><td>10</td><td>16</td><td>1</td><td>13</td><td>2</td></tr><tr><td>5753b2b5e442eaa3be066daa4a2ca8d8a0bb1725</td><td>50_people_one_question</td><td>50 People One Question</td><td><a href="papers/5753b2b5e442eaa3be066daa4a2ca8d8a0bb1725.html">Merging Pose Estimates Across Space and Time</a></td><td><a href="http://pdfs.semanticscholar.org/63b2/f5348af0f969dfc2afb4977732393c6459ec.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>15</td><td>9</td><td>6</td><td>0</td><td>11</td><td>2</td></tr><tr><td>6dcf418c778f528b5792104760f1fbfe90c6dd6a</td><td>agedb</td><td>AgeDB</td><td><a href="papers/6dcf418c778f528b5792104760f1fbfe90c6dd6a.html">AgeDB: The First Manually Collected, In-the-Wild Age Database</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014984', 'linkType': 'ieee'}">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td></td><td></td><td></td><td></td><td>82%</td><td>11</td><td>9</td><td>2</td><td>0</td><td>10</td><td>0</td></tr><tr><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td><td>casablanca</td><td>Casablanca</td><td><a href="papers/0ceda9dae8b9f322df65ca2ef02caca9758aec6f.html">Context-Aware CNNs for Person Head Detection</a></td><td><a href="http://arxiv.org/pdf/1511.07917v1.pdf">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td>33%</td><td>27</td><td>9</td><td>18</td><td>1</td><td>22</td><td>0</td></tr><tr><td>4fefd1bc8dc4e0ab37ee3324ddfa43ad9d6a04a7</td><td>deep_fashion</td><td>DeepFashion</td><td><a href="papers/4fefd1bc8dc4e0ab37ee3324ddfa43ad9d6a04a7.html">Fashion Landmark Detection in the Wild</a></td><td><a href="http://pdfs.semanticscholar.org/d8ca/e259c1c5bba0c096f480dc7322bbaebfac1a.pdf">[pdf]</a></td><td></td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>39%</td><td>23</td><td>9</td><td>8</td><td>0</td><td>15</td><td>0</td></tr><tr><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td><td>hollywood_headset</td><td>HollywoodHeads</td><td><a href="papers/0ceda9dae8b9f322df65ca2ef02caca9758aec6f.html">Context-Aware CNNs for Person Head Detection</a></td><td><a href="http://arxiv.org/pdf/1511.07917v1.pdf">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td>33%</td><td>27</td><td>9</td><td>18</td><td>1</td><td>22</td><td>0</td></tr><tr><td>faf40ce28857aedf183e193486f5b4b0a8c478a2</td><td>iit_dehli_ear</td><td>IIT Dehli Ear</td><td><a href="papers/faf40ce28857aedf183e193486f5b4b0a8c478a2.html">Automated Human Identification Using Ear Imaging</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/faf4/0ce28857aedf183e193486f5b4b0a8c478a2.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>13%</td><td>70</td><td>9</td><td>61</td><td>6</td><td>28</td><td>1</td></tr><tr><td>ca3e88d87e1344d076c964ea89d91a75c417f5ee</td><td>imfdb</td><td>IMFDB</td><td><a href="papers/ca3e88d87e1344d076c964ea89d91a75c417f5ee.html">Indian Movie Face Database: A benchmark for face recognition under wide variations</a></td><td><span class="gray">[pdf]</a></td><td>2013 Fourth National Conference on Computer Vision, Pattern Recognition, Image Processing and Graphics (NCVPRIPG)</td><td></td><td></td><td></td><td></td><td>60%</td><td>15</td><td>9</td><td>6</td><td>0</td><td>10</td><td>4</td></tr><tr><td>4793f11fbca4a7dba898b9fff68f70d868e2497c</td><td>kin_face</td><td>UB KinFace</td><td><a href="papers/4793f11fbca4a7dba898b9fff68f70d868e2497c.html">Kinship Verification through Transfer Learning</a></td><td><a href="http://pdfs.semanticscholar.org/4793/f11fbca4a7dba898b9fff68f70d868e2497c.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>14%</td><td>66</td><td>9</td><td>39</td><td>2</td><td>18</td><td>5</td></tr><tr><td>7ace44190729927e5cb0dd5d363fcae966fe13f7</td><td>nudedetection</td><td>Nude Detection</td><td><a href="papers/7ace44190729927e5cb0dd5d363fcae966fe13f7.html">A bag-of-features approach based on Hue-SIFT descriptor for nude detection</a></td><td><a href="http://ieeexplore.ieee.org/document/7077625/">[pdf]</a></td><td>2009 17th European Signal Processing Conference</td><td></td><td></td><td></td><td></td><td>18%</td><td>51</td><td>9</td><td>42</td><td>1</td><td>18</td><td>0</td></tr><tr><td>4e6ee936eb50dd032f7138702fa39b7c18ee8907</td><td>dartmouth_children</td><td>Dartmouth Children</td><td><a href="papers/4e6ee936eb50dd032f7138702fa39b7c18ee8907.html">The Dartmouth Database of Children’s Faces: Acquisition and Validation of a New Face Stimulus Set</a></td><td><a href="http://pdfs.semanticscholar.org/4e6e/e936eb50dd032f7138702fa39b7c18ee8907.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>40%</td><td>20</td><td>8</td><td>12</td><td>2</td><td>16</td><td>0</td></tr><tr><td>fd8168f1c50de85bac58a8d328df0a50248b16ae</td><td>nd_2006</td><td>ND-2006</td><td><a href="papers/fd8168f1c50de85bac58a8d328df0a50248b16ae.html">Using a Multi-Instance Enrollment Representation to Improve 3D Face Recognition</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4401928', 'linkType': 'ieee'}">[pdf]</a></td><td>2007 First IEEE International Conference on Biometrics: Theory, Applications, and Systems</td><td></td><td></td><td></td><td></td><td>25%</td><td>32</td><td>8</td><td>24</td><td>3</td><td>16</td><td>1</td></tr><tr><td>774cbb45968607a027ae4729077734db000a1ec5</td><td>urban_tribes</td><td>Urban Tribes</td><td><a href="papers/774cbb45968607a027ae4729077734db000a1ec5.html">From Bikers to Surfers: Visual Recognition of Urban Tribes</a></td><td><a href="http://pdfs.semanticscholar.org/774c/bb45968607a027ae4729077734db000a1ec5.pdf">[pdf]</a></td><td></td><td>edu</td><td>Columbia University</td><td>40.84198360</td><td>-73.94368971</td><td>47%</td><td>17</td><td>8</td><td>9</td><td>1</td><td>12</td><td>1</td></tr><tr><td>2624d84503bc2f8e190e061c5480b6aa4d89277a</td><td>afew_va</td><td>AFEW-VA</td><td><a href="papers/2624d84503bc2f8e190e061c5480b6aa4d89277a.html">AFEW-VA database for valence and arousal estimation in-the-wild</a></td><td><a href="http://pdfs.semanticscholar.org/2624/d84503bc2f8e190e061c5480b6aa4d89277a.pdf">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td>47%</td><td>15</td><td>7</td><td>8</td><td>1</td><td>10</td><td>3</td></tr><tr><td>84fe5b4ac805af63206012d29523a1e033bc827e</td><td>awe_ears</td><td>AWE Ears</td><td><a href="papers/84fe5b4ac805af63206012d29523a1e033bc827e.html">Ear recognition: More than a survey</a></td><td><a href="http://pdfs.semanticscholar.org/84fe/5b4ac805af63206012d29523a1e033bc827e.pdf">[pdf]</a></td><td>Neurocomputing</td><td></td><td></td><td></td><td></td><td>29%</td><td>24</td><td>7</td><td>17</td><td>0</td><td>11</td><td>0</td></tr><tr><td>5801690199c1917fa58c35c3dead177c0b8f9f2d</td><td>camel</td><td>CAMEL</td><td><a href="papers/5801690199c1917fa58c35c3dead177c0b8f9f2d.html">Application of Object Based Classification and High Resolution Satellite Imagery for Savanna Ecosystem Analysis</a></td><td><a href="http://pdfs.semanticscholar.org/5801/690199c1917fa58c35c3dead177c0b8f9f2d.pdf">[pdf]</a></td><td>Remote Sensing</td><td></td><td></td><td></td><td></td><td>37%</td><td>19</td><td>7</td><td>12</td><td>1</td><td>16</td><td>0</td></tr><tr><td>0297448f3ed948e136bb06ceff10eccb34e5bb77</td><td>ilids_mcts</td><td></td><td><a href="papers/0297448f3ed948e136bb06ceff10eccb34e5bb77.html">Imagery Library for Intelligent Detection Systems (i-LIDS); A Standard for Testing Video Based Detection Systems</a></td><td><span class="gray">[pdf]</a></td><td>Proceedings 40th Annual 2006 International Carnahan Conference on Security Technology</td><td></td><td></td><td></td><td></td><td>22%</td><td>32</td><td>7</td><td>25</td><td>2</td><td>17</td><td>0</td></tr><tr><td>ec792ad2433b6579f2566c932ee414111e194537</td><td>msmt_17</td><td>MSMT17</td><td><a href="papers/ec792ad2433b6579f2566c932ee414111e194537.html">Person Transfer GAN to Bridge Domain Gap for Person Re-Identification</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1711.08565.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>50%</td><td>14</td><td>7</td><td>7</td><td>1</td><td>11</td><td>0</td></tr><tr><td>b92a1ed9622b8268ae3ac9090e25789fc41cc9b8</td><td>pornodb</td><td>Pornography DB</td><td><a href="papers/b92a1ed9622b8268ae3ac9090e25789fc41cc9b8.html">Pooling in image representation: The visual codeword point of view</a></td><td><a href="http://pdfs.semanticscholar.org/b92a/1ed9622b8268ae3ac9090e25789fc41cc9b8.pdf">[pdf]</a></td><td>Computer Vision and Image Understanding</td><td></td><td></td><td></td><td></td><td>9%</td><td>77</td><td>7</td><td>70</td><td>7</td><td>43</td><td>2</td></tr><tr><td>19d1b811df60f86cbd5e04a094b07f32fff7a32a</td><td>york_3d</td><td>UOY 3D Face Database</td><td><a href="papers/19d1b811df60f86cbd5e04a094b07f32fff7a32a.html">Three-dimensional face recognition: an eigensurface approach</a></td><td><a href="http://www-users.cs.york.ac.uk/~nep/research/3Dface/tomh/3DFaceRecognition-Eigensurface-ICIP(web)2.pdf">[pdf]</a></td><td>2004 International Conference on Image Processing, 2004. ICIP '04.</td><td></td><td></td><td></td><td></td><td>19%</td><td>36</td><td>7</td><td>29</td><td>4</td><td>25</td><td>1</td></tr><tr><td>2a171f8d14b6b8735001a11c217af9587d095848</td><td>expw</td><td>ExpW</td><td><a href="papers/2a171f8d14b6b8735001a11c217af9587d095848.html">Learning Social Relation Traits from Face Images</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.414">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>30%</td><td>20</td><td>6</td><td>14</td><td>5</td><td>15</td><td>0</td></tr><tr><td>25474c21613607f6bb7687a281d5f9d4ffa1f9f3</td><td>faceplace</td><td>Face Place</td><td><a href="papers/25474c21613607f6bb7687a281d5f9d4ffa1f9f3.html">Recognizing disguised faces</a></td><td><a href="http://pdfs.semanticscholar.org/d936/7ceb0be378c3a9ddf7cb741c678c1a3c574c.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>25%</td><td>24</td><td>6</td><td>18</td><td>0</td><td>16</td><td>1</td></tr><tr><td>0cb2dd5f178e3a297a0c33068961018659d0f443</td><td>ijb_b</td><td>IJB-B</td><td><a href="papers/0cb2dd5f178e3a297a0c33068961018659d0f443.html">IARPA Janus Benchmark-B Face Dataset</a></td><td><a href="http://www.vislab.ucr.edu/Biometrics2017/program_slides/Noblis_CVPRW_IJBB.pdf">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td></td><td></td><td></td><td></td><td>24%</td><td>25</td><td>6</td><td>19</td><td>6</td><td>21</td><td>3</td></tr><tr><td>bd26dabab576adb6af30484183c9c9c8379bf2e0</td><td>scut_fbp</td><td>SCUT-FBP</td><td><a href="papers/bd26dabab576adb6af30484183c9c9c8379bf2e0.html">SCUT-FBP: A Benchmark Dataset for Facial Beauty Perception</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1511.02459.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2015 IEEE International Conference on Systems, Man, and Cybernetics</td><td>edu</td><td>South China University of Technology</td><td>23.05020420</td><td>113.39880323</td><td>43%</td><td>14</td><td>6</td><td>8</td><td>3</td><td>5</td><td>7</td></tr><tr><td>2a171f8d14b6b8735001a11c217af9587d095848</td><td>social_relation</td><td>Social Relation</td><td><a href="papers/2a171f8d14b6b8735001a11c217af9587d095848.html">Learning Social Relation Traits from Face Images</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.414">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>30%</td><td>20</td><td>6</td><td>14</td><td>5</td><td>15</td><td>0</td></tr><tr><td>4156b7e88f2e0ab0a7c095b9bab199ae2b23bd06</td><td>distance_nighttime</td><td>Long Distance Heterogeneous Face</td><td><a href="papers/4156b7e88f2e0ab0a7c095b9bab199ae2b23bd06.html">Nighttime Face Recognition at Long Distance: Cross-Distance and Cross-Spectral Matching</a></td><td><a href="http://pdfs.semanticscholar.org/4156/b7e88f2e0ab0a7c095b9bab199ae2b23bd06.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>24%</td><td>21</td><td>5</td><td>16</td><td>3</td><td>11</td><td>1</td></tr><tr><td>2f43b614607163abf41dfe5d17ef6749a1b61304</td><td>hrt_transgender</td><td>HRT Transgender</td><td><a href="papers/2f43b614607163abf41dfe5d17ef6749a1b61304.html">Investigating the Periocular-Based Face Recognition Across Gender Transformation</a></td><td><a href="https://doi.org/10.1109/TIFS.2014.2361479">[pdf]</a></td><td>IEEE Transactions on Information Forensics and Security</td><td>edu</td><td>University of North Carolina at Wilmington</td><td>34.22498270</td><td>-77.86907744</td><td>38%</td><td>13</td><td>5</td><td>3</td><td>0</td><td>3</td><td>3</td></tr><tr><td>c570d1247e337f91e555c3be0e8c8a5aba539d9f</td><td>mcgill</td><td>McGill Real World</td><td><a href="papers/c570d1247e337f91e555c3be0e8c8a5aba539d9f.html">Robust semi-automatic head pose labeling for real-world face video sequences</a></td><td><a href="https://doi.org/10.1007/s11042-012-1352-1">[pdf]</a></td><td>Multimedia Tools and Applications</td><td>edu</td><td>McGill University</td><td>45.50397610</td><td>-73.57496870</td><td>28%</td><td>18</td><td>5</td><td>6</td><td>0</td><td>7</td><td>4</td></tr><tr><td>6f3c76b7c0bd8e1d122c6ea808a271fd4749c951</td><td>ward</td><td>WARD</td><td><a href="papers/6f3c76b7c0bd8e1d122c6ea808a271fd4749c951.html">Re-identify people in wide area camera network</a></td><td><a href="https://doi.org/10.1109/CVPRW.2012.6239203">[pdf]</a></td><td>2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops</td><td></td><td></td><td></td><td></td><td>9%</td><td>55</td><td>5</td><td>50</td><td>2</td><td>35</td><td>0</td></tr><tr><td>6403117f9c005ae81f1e8e6d1302f4a045e3d99d</td><td>alert_airport</td><td>ALERT Airport</td><td><a href="papers/6403117f9c005ae81f1e8e6d1302f4a045e3d99d.html">A Systematic Evaluation and Benchmark for Person Re-Identification: Features, Metrics, and Datasets.</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1605.09653.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>IEEE transactions on pattern analysis and machine intelligence</td><td></td><td></td><td></td><td></td><td>27%</td><td>15</td><td>4</td><td>11</td><td>1</td><td>10</td><td>0</td></tr><tr><td>014b8df0180f33b9fea98f34ae611c6447d761d2</td><td>buhmap_db</td><td>BUHMAP-DB </td><td><a href="papers/014b8df0180f33b9fea98f34ae611c6447d761d2.html">Facial feature tracking and expression recognition for sign language</a></td><td><a href="http://www.cmpe.boun.edu.tr/pilab/pilabfiles/databases/buhmap/files/ari2008facialfeaturetracking.pdf">[pdf]</a></td><td>2008 23rd International Symposium on Computer and Information Sciences</td><td></td><td></td><td></td><td></td><td>16%</td><td>25</td><td>4</td><td>21</td><td>1</td><td>10</td><td>2</td></tr><tr><td>57fe081950f21ca03b5b375ae3e84b399c015861</td><td>cvc_01_barcelona</td><td>CVC-01</td><td><a href="papers/57fe081950f21ca03b5b375ae3e84b399c015861.html">Adaptive Image Sampling and Windows Classification for On–board Pedestrian Detection</a></td><td><a href="http://pdfs.semanticscholar.org/57fe/081950f21ca03b5b375ae3e84b399c015861.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>9%</td><td>44</td><td>4</td><td>40</td><td>1</td><td>21</td><td>0</td></tr><tr><td>22f656d0f8426c84a33a267977f511f127bfd7f3</td><td>expw</td><td>ExpW</td><td><a href="papers/22f656d0f8426c84a33a267977f511f127bfd7f3.html">From Facial Expression Recognition to Interpersonal Relation Prediction</a></td><td><a href="http://arxiv.org/abs/1609.06426">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td>44%</td><td>9</td><td>4</td><td>5</td><td>0</td><td>5</td><td>1</td></tr><tr><td>b71d1aa90dcbe3638888725314c0d56640c1fef1</td><td>ifdb</td><td>IFDB</td><td><a href="papers/b71d1aa90dcbe3638888725314c0d56640c1fef1.html">Iranian Face Database with age, pose and expression</a></td><td><span class="gray">[pdf]</a></td><td>2007 International Conference on Machine Vision</td><td></td><td></td><td></td><td></td><td>20%</td><td>20</td><td>4</td><td>16</td><td>2</td><td>11</td><td>3</td></tr><tr><td>2eb84aaba316b095d4bb51da1a3e4365bbf9ab1d</td><td>kin_face</td><td>UB KinFace</td><td><a href="papers/2eb84aaba316b095d4bb51da1a3e4365bbf9ab1d.html">Genealogical face recognition based on UB KinFace database</a></td><td><a href="https://doi.org/10.1109/CVPRW.2011.5981801">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td>edu</td><td>SUNY Buffalo</td><td>42.93362780</td><td>-78.88394479</td><td>13%</td><td>30</td><td>4</td><td>26</td><td>1</td><td>9</td><td>5</td></tr><tr><td>e27ef52c641c2b5100a1b34fd0b819e84a31b4df</td><td>sarc3d</td><td>Sarc3D</td><td><a href="papers/e27ef52c641c2b5100a1b34fd0b819e84a31b4df.html">SARC3D: A New 3D Body Model for People Tracking and Re-identification</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/e27e/f52c641c2b5100a1b34fd0b819e84a31b4df.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>14%</td><td>29</td><td>4</td><td>25</td><td>3</td><td>17</td><td>0</td></tr><tr><td>22f656d0f8426c84a33a267977f511f127bfd7f3</td><td>social_relation</td><td>Social Relation</td><td><a href="papers/22f656d0f8426c84a33a267977f511f127bfd7f3.html">From Facial Expression Recognition to Interpersonal Relation Prediction</a></td><td><a href="http://arxiv.org/abs/1609.06426">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td>44%</td><td>9</td><td>4</td><td>5</td><td>0</td><td>5</td><td>1</td></tr><tr><td>1a40092b493c6b8840257ab7f96051d1a4dbfeb2</td><td>sports_videos_in_the_wild</td><td>SVW</td><td><a href="papers/1a40092b493c6b8840257ab7f96051d1a4dbfeb2.html">Sports Videos in the Wild (SVW): A video dataset for sports analysis</a></td><td><a href="http://web.cse.msu.edu/~liuxm/publication/Safdarnejad_Liu_Udpa_Andrus_Wood_Craven_FG2015.pdf">[pdf]</a></td><td>2015 11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)</td><td>edu</td><td>Michigan State University</td><td>42.71856800</td><td>-84.47791571</td><td>67%</td><td>6</td><td>4</td><td>2</td><td>1</td><td>5</td><td>0</td></tr><tr><td>7ebb153704706e457ab57b432793d2b6e5d12592</td><td>vgg_celebs_in_places</td><td>CIP</td><td><a href="papers/7ebb153704706e457ab57b432793d2b6e5d12592.html">Faces in Places: compound query retrieval</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/7ebb/153704706e457ab57b432793d2b6e5d12592.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td>edu</td><td>University of Oxford</td><td>51.75345380</td><td>-1.25400997</td><td>80%</td><td>5</td><td>4</td><td>1</td><td>0</td><td>4</td><td>0</td></tr><tr><td>8d5998cd984e7cce307da7d46f155f9db99c6590</td><td>chalearn</td><td>ChaLearn</td><td><a href="papers/8d5998cd984e7cce307da7d46f155f9db99c6590.html">ChaLearn looking at people: A review of events and resources</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1701.02664.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2017 International Joint Conference on Neural Networks (IJCNN)</td><td></td><td></td><td></td><td></td><td>30%</td><td>10</td><td>3</td><td>7</td><td>1</td><td>6</td><td>0</td></tr><tr><td>a5acda0e8c0937bfed013e6382da127103e41395</td><td>disfa</td><td>DISFA</td><td><a href="papers/a5acda0e8c0937bfed013e6382da127103e41395.html">Extended DISFA Dataset: Investigating Posed and Spontaneous Facial Expressions</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7789672', 'linkType': 'ieee'}">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td></td><td></td><td></td><td></td><td>38%</td><td>8</td><td>3</td><td>5</td><td>1</td><td>5</td><td>0</td></tr><tr><td>137aa2f891d474fce1e7a1d1e9b3aefe21e22b34</td><td>hrt_transgender</td><td>HRT Transgender</td><td><a href="papers/137aa2f891d474fce1e7a1d1e9b3aefe21e22b34.html">Is the eye region more reliable than the face? A preliminary study of face-based recognition on a transgender dataset</a></td><td><a href="https://doi.org/10.1109/BTAS.2013.6712710">[pdf]</a></td><td>2013 IEEE Sixth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td>edu</td><td>University of North Carolina Wilmington</td><td>34.23755810</td><td>-77.92701290</td><td>43%</td><td>7</td><td>3</td><td>4</td><td>1</td><td>2</td><td>3</td></tr><tr><td>57178b36c21fd7f4529ac6748614bb3374714e91</td><td>ijb_c</td><td>IJB-C</td><td><a href="papers/57178b36c21fd7f4529ac6748614bb3374714e91.html">IARPA Janus Benchmark - C: Face Dataset and Protocol</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411217', 'linkType': 'ieee'}">[pdf]</a></td><td>2018 International Conference on Biometrics (ICB)</td><td></td><td></td><td></td><td></td><td>33%</td><td>9</td><td>3</td><td>6</td><td>2</td><td>9</td><td>0</td></tr><tr><td>07fcbae86f7a3ad3ea1cf95178459ee9eaf77cb1</td><td>uccs</td><td>UCCS</td><td><a href="papers/07fcbae86f7a3ad3ea1cf95178459ee9eaf77cb1.html">Large scale unconstrained open set face database</a></td><td><a href="http://www.vast.uccs.edu/~tboult/PAPERS/BTAS13-Sapkota-Boult-UCCSFaceDB.pdf">[pdf]</a></td><td>2013 IEEE Sixth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td>edu</td><td>University of Colorado at Colorado Springs</td><td>38.89646790</td><td>-104.80505940</td><td>60%</td><td>5</td><td>3</td><td>2</td><td>0</td><td>3</td><td>0</td></tr><tr><td>8627f019882b024aef92e4eb9355c499c733e5b7</td><td>used</td><td>USED Social Event Dataset</td><td><a href="papers/8627f019882b024aef92e4eb9355c499c733e5b7.html">USED: a large-scale social event detection dataset</a></td><td><a href="http://doi.acm.org/10.1145/2910017.2910624">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>7</td><td>3</td><td>4</td><td>0</td><td>3</td><td>2</td></tr><tr><td>4563b46d42079242f06567b3f2e2f7a80cb3befe</td><td>vadana</td><td>VADANA</td><td><a href="papers/4563b46d42079242f06567b3f2e2f7a80cb3befe.html">VADANA: A dense dataset for facial image analysis</a></td><td><a href="http://vims.cis.udel.edu/publications/VADANA_BeFIT2011.pdf">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td>19%</td><td>16</td><td>3</td><td>13</td><td>0</td><td>6</td><td>6</td></tr><tr><td>56ae6d94fc6097ec4ca861f0daa87941d1c10b70</td><td>cmdp</td><td>CMDP</td><td><a href="papers/56ae6d94fc6097ec4ca861f0daa87941d1c10b70.html">Distance Estimation of an Unknown Person from a Portrait</a></td><td><a href="http://pdfs.semanticscholar.org/56ae/6d94fc6097ec4ca861f0daa87941d1c10b70.pdf">[pdf]</a></td><td></td><td>edu</td><td>California Institute of Technology</td><td>34.13710185</td><td>-118.12527487</td><td>22%</td><td>9</td><td>2</td><td>7</td><td>0</td><td>6</td><td>1</td></tr><tr><td>670637d0303a863c1548d5b19f705860a23e285c</td><td>face_tracer</td><td>FaceTracer</td><td><a href="papers/670637d0303a863c1548d5b19f705860a23e285c.html">Face swapping: automatically replacing faces in photographs</a></td><td><a href="https://classes.cs.uoregon.edu/16F/cis607photo/faces.pdf">[pdf]</a></td><td>ACM Trans. Graph.</td><td>edu</td><td>Columbia University</td><td>40.84198360</td><td>-73.94368971</td><td>100%</td><td>2</td><td>2</td><td>0</td><td>0</td><td>1</td><td>1</td></tr><tr><td>dd65f71dac86e36eecbd3ed225d016c3336b4a13</td><td>families_in_the_wild</td><td>FIW</td><td><a href="papers/dd65f71dac86e36eecbd3ed225d016c3336b4a13.html">Visual Kinship Recognition of Families in the Wild</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8337841', 'linkType': 'ieee'}">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>67%</td><td>3</td><td>2</td><td>1</td><td>0</td><td>2</td><td>0</td></tr><tr><td>6dbe8e5121c534339d6e41f8683e85f87e6abf81</td><td>gallagher</td><td>Gallagher</td><td><a href="papers/6dbe8e5121c534339d6e41f8683e85f87e6abf81.html">Clothing Cosegmentation for Shopping Images With Cluttered Background</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7423747', 'linkType': 'ieee'}">[pdf]</a></td><td>IEEE Transactions on Multimedia</td><td></td><td></td><td></td><td></td><td>33%</td><td>6</td><td>2</td><td>4</td><td>0</td><td>3</td><td>2</td></tr><tr><td>8f02ec0be21461fbcedf51d864f944cfc42c875f</td><td>hda_plus</td><td>HDA+</td><td><a href="papers/8f02ec0be21461fbcedf51d864f944cfc42c875f.html">The HDA+ Data Set for Research on Fully Automated Re-identification Systems</a></td><td><a href="http://pdfs.semanticscholar.org/8f02/ec0be21461fbcedf51d864f944cfc42c875f.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>12%</td><td>17</td><td>2</td><td>15</td><td>2</td><td>11</td><td>0</td></tr><tr><td>99eb4cea0d9bc9fe777a5c5172f8638a37a7f262</td><td>ilids_vid_reid</td><td>iLIDS-VID</td><td><a href="papers/99eb4cea0d9bc9fe777a5c5172f8638a37a7f262.html">Person Re-identification by Exploiting Spatio-Temporal Cues and Multi-view Metric Learning</a></td><td><a href="https://doi.org/10.1109/LSP.2016.2574323">[pdf]</a></td><td>IEEE Signal Processing Letters</td><td></td><td></td><td></td><td></td><td>29%</td><td>7</td><td>2</td><td>5</td><td>0</td><td>4</td><td>0</td></tr><tr><td>0d2dd4fc016cb6a517d8fb43a7cc3ff62964832e</td><td>lag</td><td>LAG</td><td><a href="papers/0d2dd4fc016cb6a517d8fb43a7cc3ff62964832e.html">Large Age-Gap face verification by feature injection in deep networks</a></td><td><a href="http://pdfs.semanticscholar.org/0d2d/d4fc016cb6a517d8fb43a7cc3ff62964832e.pdf">[pdf]</a></td><td>Pattern Recognition Letters</td><td></td><td></td><td></td><td></td><td>29%</td><td>7</td><td>2</td><td>5</td><td>0</td><td>3</td><td>2</td></tr><tr><td>a7fe834a0af614ce6b50dc093132b031dd9a856b</td><td>market1203</td><td>Market 1203</td><td><a href="papers/a7fe834a0af614ce6b50dc093132b031dd9a856b.html">Orientation Driven Bag of Appearances for Person Re-identification</a></td><td><a href="http://pdfs.semanticscholar.org/a7fe/834a0af614ce6b50dc093132b031dd9a856b.pdf">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>29%</td><td>7</td><td>2</td><td>5</td><td>0</td><td>3</td><td>0</td></tr><tr><td>ad01687649d95cd5b56d7399a9603c4b8e2217d7</td><td>mrp_drone</td><td>MRP Drone</td><td><a href="papers/ad01687649d95cd5b56d7399a9603c4b8e2217d7.html">Investigating Open-World Person Re-identi cation Using a Drone</a></td><td><a href="http://pdfs.semanticscholar.org/ad01/687649d95cd5b56d7399a9603c4b8e2217d7.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>40%</td><td>5</td><td>2</td><td>3</td><td>0</td><td>3</td><td>0</td></tr><tr><td>a7fe834a0af614ce6b50dc093132b031dd9a856b</td><td>pku_reid</td><td>PKU-Reid</td><td><a href="papers/a7fe834a0af614ce6b50dc093132b031dd9a856b.html">Orientation Driven Bag of Appearances for Person Re-identification</a></td><td><a href="http://pdfs.semanticscholar.org/a7fe/834a0af614ce6b50dc093132b031dd9a856b.pdf">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>29%</td><td>7</td><td>2</td><td>5</td><td>0</td><td>3</td><td>0</td></tr><tr><td>041d3eedf5e45ce5c5229f0181c5c576ed1fafd6</td><td>ucf_selfie</td><td>UCF Selfie</td><td><a href="papers/041d3eedf5e45ce5c5229f0181c5c576ed1fafd6.html">How to Take a Good Selfie?</a></td><td><a href="http://doi.acm.org/10.1145/2733373.2806365">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>22%</td><td>9</td><td>2</td><td>7</td><td>0</td><td>5</td><td>0</td></tr><tr><td>4b4106614c1d553365bad75d7866bff0de6056ed</td><td>czech_news_agency</td><td>UFI</td><td><a href="papers/4b4106614c1d553365bad75d7866bff0de6056ed.html">Unconstrained Facial Images: Database for Face Recognition Under Real-World Conditions</a></td><td><a href="http://pdfs.semanticscholar.org/4b41/06614c1d553365bad75d7866bff0de6056ed.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>10%</td><td>10</td><td>1</td><td>9</td><td>0</td><td>4</td><td>2</td></tr><tr><td>563c940054e4b456661762c1ab858e6f730c3159</td><td>data_61</td><td>Data61 Pedestrian</td><td><a href="papers/563c940054e4b456661762c1ab858e6f730c3159.html">A Multi-modal Graphical Model for Scene Analysis</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/WACV.2015.139">[pdf]</a></td><td>2015 IEEE Winter Conference on Applications of Computer Vision</td><td></td><td></td><td></td><td></td><td>12%</td><td>8</td><td>1</td><td>7</td><td>0</td><td>5</td><td>0</td></tr><tr><td>c6526dd3060d63a6c90e8b7ff340383c4e0e0dd8</td><td>face_research_lab</td><td>Face Research Lab London</td><td><a href="papers/c6526dd3060d63a6c90e8b7ff340383c4e0e0dd8.html">Anxiety promotes memory for mood-congruent faces but does not alter loss aversion.</a></td><td><a href="http://pdfs.semanticscholar.org/c652/6dd3060d63a6c90e8b7ff340383c4e0e0dd8.pdf">[pdf]</a></td><td>Scientific reports</td><td>edu</td><td>University College London</td><td>51.52316070</td><td>-0.12820370</td><td>25%</td><td>4</td><td>1</td><td>3</td><td>0</td><td>2</td><td>2</td></tr><tr><td>17b46e2dad927836c689d6787ddb3387c6159ece</td><td>geofaces</td><td>GeoFaces</td><td><a href="papers/17b46e2dad927836c689d6787ddb3387c6159ece.html">GeoFaceExplorer: exploring the geo-dependence of facial attributes</a></td><td><a href="http://doi.acm.org/10.1145/2676440.2676443">[pdf]</a></td><td></td><td>edu</td><td>University of Kentucky</td><td>38.03337420</td><td>-84.50177580</td><td>50%</td><td>2</td><td>1</td><td>1</td><td>0</td><td>1</td><td>0</td></tr><tr><td>55c40cbcf49a0225e72d911d762c27bb1c2d14aa</td><td>ifad</td><td>IFAD</td><td><a href="papers/55c40cbcf49a0225e72d911d762c27bb1c2d14aa.html">Indian Face Age Database : A Database for Face Recognition with Age Variation</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/55c4/0cbcf49a0225e72d911d762c27bb1c2d14aa.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>50%</td><td>2</td><td>1</td><td>1</td><td>0</td><td>2</td><td>0</td></tr><tr><td>e3e44385a71a52fd483c58eb3cdf8d03960c0b70</td><td>mcgill</td><td>McGill Real World</td><td><a href="papers/e3e44385a71a52fd483c58eb3cdf8d03960c0b70.html">A Hierarchical Graphical Model for Recognizing Human Actions and Interactions in Video</a></td><td><a href="http://pdfs.semanticscholar.org/e3e4/4385a71a52fd483c58eb3cdf8d03960c0b70.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>2</td><td>1</td><td>1</td><td>0</td><td>1</td><td>0</td></tr><tr><td>d80a3d1f3a438e02a6685e66ee908446766fefa9</td><td>megaage</td><td>MegaAge</td><td><a href="papers/d80a3d1f3a438e02a6685e66ee908446766fefa9.html">Quantifying Facial Age by Posterior of Age Comparisons</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1708.09687.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>25%</td><td>4</td><td>1</td><td>3</td><td>1</td><td>4</td><td>0</td></tr><tr><td>23e824d1dfc33f3780dd18076284f07bd99f1c43</td><td>mifs</td><td>MIFS</td><td><a href="papers/23e824d1dfc33f3780dd18076284f07bd99f1c43.html">Spoofing faces using makeup: An investigative study</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7947686', 'linkType': 'ieee'}">[pdf]</a></td><td>2017 IEEE International Conference on Identity, Security and Behavior Analysis (ISBA)</td><td>edu</td><td>INRIA Méditerranée</td><td>43.61581310</td><td>7.06838000</td><td>20%</td><td>5</td><td>1</td><td>4</td><td>0</td><td>1</td><td>2</td></tr><tr><td>fb82681ac5d3487bd8e52dbb3d1fa220eeac855e</td><td>pilot_parliament</td><td>PPB</td><td><a href="papers/fb82681ac5d3487bd8e52dbb3d1fa220eeac855e.html">1 Network Notebook</a></td><td><a href="http://pdfs.semanticscholar.org/fb82/681ac5d3487bd8e52dbb3d1fa220eeac855e.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>9%</td><td>11</td><td>1</td><td>10</td><td>1</td><td>10</td><td>0</td></tr><tr><td>9e5378e7b336c89735d3bb15cf67eff96f86d39a</td><td>precarious</td><td>Precarious</td><td><a href="papers/9e5378e7b336c89735d3bb15cf67eff96f86d39a.html">Expecting the Unexpected: Training Detectors for Unusual Pedestrians with Adversarial Imposters</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1703.06283.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>8%</td><td>12</td><td>1</td><td>11</td><td>1</td><td>10</td><td>0</td></tr><tr><td>54983972aafc8e149259d913524581357b0f91c3</td><td>reseed</td><td>ReSEED</td><td><a href="papers/54983972aafc8e149259d913524581357b0f91c3.html">ReSEED: social event dEtection dataset</a></td><td><a href="https://pub.uni-bielefeld.de/download/2663466/2686734">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>17%</td><td>6</td><td>1</td><td>5</td><td>1</td><td>1</td><td>1</td></tr><tr><td>c9bda86e23cab9e4f15ea0c4cbb6cc02b9dfb709</td><td>stanford_drone</td><td>Stanford Drone</td><td><a href="papers/c9bda86e23cab9e4f15ea0c4cbb6cc02b9dfb709.html">Learning to predict human behaviour in crowded scenes</a></td><td><a href="http://pdfs.semanticscholar.org/c9bd/a86e23cab9e4f15ea0c4cbb6cc02b9dfb709.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>20%</td><td>5</td><td>1</td><td>4</td><td>1</td><td>5</td><td>0</td></tr><tr><td>9696ad8b164f5e10fcfe23aacf74bd6168aebb15</td><td>4dfab</td><td>4DFAB</td><td><a href="papers/9696ad8b164f5e10fcfe23aacf74bd6168aebb15.html">4DFAB: A Large Scale 4D Facial Expression Database for Biometric Applications</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1712.01443.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>0%</td><td>4</td><td>0</td><td>4</td><td>0</td><td>2</td><td>0</td></tr><tr><td>f152b6ee251cca940dd853c54e6a7b78fbc6b235</td><td>affectnet</td><td>AffectNet</td><td><a href="papers/f152b6ee251cca940dd853c54e6a7b78fbc6b235.html">Skybiometry and AffectNet on Facial Emotion Recognition Using Supervised Machine Learning Algorithms</a></td><td><a href="{'url': 'http://dl.acm.org/citation.cfm?id=3232665', 'linkType': 'acm'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>1ed1a49534ad8dd00f81939449f6389cfbc25321</td><td>bjut_3d</td><td>BJUT-3D</td><td><a href="papers/1ed1a49534ad8dd00f81939449f6389cfbc25321.html">A novel face recognition method based on 3D face model</a></td><td><a href="https://doi.org/10.1109/ROBIO.2007.4522202">[pdf]</a></td><td>2007 IEEE International Conference on Robotics and Biomimetics (ROBIO)</td><td></td><td></td><td></td><td></td><td>0%</td><td>2</td><td>0</td><td>2</td><td>0</td><td>1</td><td>1</td></tr><tr><td>65355cbb581a219bd7461d48b3afd115263ea760</td><td>complex_activities</td><td>Ongoing Complex Activities</td><td><a href="papers/65355cbb581a219bd7461d48b3afd115263ea760.html">Recognition of ongoing complex activities by sequence prediction over a hierarchical label space</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/WACV.2016.7477586">[pdf]</a></td><td>2016 IEEE Winter Conference on Applications of Computer Vision (WACV)</td><td></td><td></td><td></td><td></td><td>0%</td><td>2</td><td>0</td><td>2</td><td>0</td><td>2</td><td>0</td></tr><tr><td>f0e17f27f029db4ad650ff278fe3c10ecb6cb0c4</td><td>europersons</td><td>EuroCity Persons</td><td><a href="papers/f0e17f27f029db4ad650ff278fe3c10ecb6cb0c4.html">The EuroCity Persons Dataset: A Novel Benchmark for Object Detection</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1805.07193.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>0%</td><td>1</td><td>0</td><td>1</td><td>0</td><td>1</td><td>0</td></tr><tr><td>75da1df4ed319926c544eefe17ec8d720feef8c0</td><td>fddb</td><td>FDDB</td><td><a href="papers/75da1df4ed319926c544eefe17ec8d720feef8c0.html">FDDB: A Benchmark for Face Detection in Unconstrained Settings</a></td><td><a href="http://pdfs.semanticscholar.org/75da/1df4ed319926c544eefe17ec8d720feef8c0.pdf">[pdf]</a></td><td></td><td>edu</td><td>University of Massachusetts</td><td>42.38897850</td><td>-72.52869870</td><td>0%</td><td>1</td><td>0</td><td>1</td><td>0</td><td>0</td><td>0</td></tr><tr><td>b6b1b0632eb9d4ab1427278f5e5c46f97753c73d</td><td>fei</td><td>FEI</td><td><a href="papers/b6b1b0632eb9d4ab1427278f5e5c46f97753c73d.html">Generalização cartográfica automatizada para um banco de dados cadastral</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/b6b1/b0632eb9d4ab1427278f5e5c46f97753c73d.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>3dc3f0b64ef80f573e3a5f96e456e52ee980b877</td><td>georgia_tech_face_database</td><td>Georgia Tech Face</td><td><a href="papers/3dc3f0b64ef80f573e3a5f96e456e52ee980b877.html">Maximum Likelihood Training of the Embedded HMM for Face Detection and Recognition</a></td><td><a href="http://pdfs.semanticscholar.org/3dc3/f0b64ef80f573e3a5f96e456e52ee980b877.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>3</td><td>0</td><td>3</td><td>0</td><td>2</td><td>0</td></tr><tr><td>bd88bb2e4f351352d88ee7375af834360e223498</td><td>hda_plus</td><td>HDA+</td><td><a href="papers/bd88bb2e4f351352d88ee7375af834360e223498.html">A Multi - camera video data set for research on High - Definition surveillance</a></td><td><a href="http://pdfs.semanticscholar.org/bd88/bb2e4f351352d88ee7375af834360e223498.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>1</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>24830e3979d4ed01b9fd0feebf4a8fd22e0c35fd</td><td>hi4d_adsip</td><td>Hi4D-ADSIP</td><td><a href="papers/24830e3979d4ed01b9fd0feebf4a8fd22e0c35fd.html">High-resolution comprehensive 3-D dynamic database for facial articulation analysis</a></td><td><a href="http://www.researchgate.net/profile/Wei_Quan3/publication/221430048_High-resolution_comprehensive_3-D_dynamic_database_for_facial_articulation_analysis/links/0deec534309495805d000000.pdf">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td>0%</td><td>5</td><td>0</td><td>5</td><td>0</td><td>1</td><td>0</td></tr><tr><td>066d71fcd997033dce4ca58df924397dfe0b5fd1</td><td>ifdb</td><td>IFDB</td><td><a href="papers/066d71fcd997033dce4ca58df924397dfe0b5fd1.html">Iranian Face Database and Evaluation with a New Detection Algorithm</a></td><td><a href="http://pdfs.semanticscholar.org/066d/71fcd997033dce4ca58df924397dfe0b5fd1.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>ad62c6e17bc39b4dec20d32f6ac667ae42d2c118</td><td>jiku_mobile</td><td>Jiku Mobile Video Dataset</td><td><a href="papers/ad62c6e17bc39b4dec20d32f6ac667ae42d2c118.html">A Synchronization Ground Truth for the Jiku Mobile Video Dataset</a></td><td><a href="http://pdfs.semanticscholar.org/ad62/c6e17bc39b4dec20d32f6ac667ae42d2c118.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>1</td><td>0</td><td>1</td><td>0</td><td>0</td><td>0</td></tr><tr><td>079a0a3bf5200994e1f972b1b9197bf2f90e87d4</td><td>mit_cbcl</td><td>MIT CBCL</td><td><a href="papers/079a0a3bf5200994e1f972b1b9197bf2f90e87d4.html">Component-Based Face Recognition with 3D Morphable Models</a></td><td><a href="http://www.bheisele.com/avbpa2003.pdf">[pdf]</a></td><td>2004 Conference on Computer Vision and Pattern Recognition Workshop</td><td></td><td></td><td></td><td></td><td>0%</td><td>12</td><td>0</td><td>12</td><td>0</td><td>8</td><td>0</td></tr><tr><td>c06b13d0ec3f5c43e2782cd22542588e233733c3</td><td>nova_emotions</td><td>Novaemötions Dataset</td><td><a href="papers/c06b13d0ec3f5c43e2782cd22542588e233733c3.html">Crowdsourcing facial expressions for affective-interaction</a></td><td><a href="https://doi.org/10.1016/j.cviu.2016.02.001">[pdf]</a></td><td>Computer Vision and Image Understanding</td><td></td><td></td><td></td><td></td><td>0%</td><td>1</td><td>0</td><td>1</td><td>0</td><td>1</td><td>0</td></tr><tr><td>7f4040b482d16354d5938c1d1b926b544652bf5b</td><td>nova_emotions</td><td>Novaemötions Dataset</td><td><a href="papers/7f4040b482d16354d5938c1d1b926b544652bf5b.html">Competitive affective gaming: winning with a smile</a></td><td><a href="http://doi.acm.org/10.1145/2502081.2502115">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>8</td><td>0</td><td>3</td><td>0</td><td>2</td><td>0</td></tr><tr><td>22909dd19a0ec3b6065334cb5be5392cb24d839d</td><td>pets</td><td>PETS 2017</td><td><a href="papers/22909dd19a0ec3b6065334cb5be5392cb24d839d.html">PETS 2017: Dataset and Challenge</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014998', 'linkType': 'ieee'}">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td></td><td></td><td></td><td></td><td>0%</td><td>8</td><td>0</td><td>8</td><td>0</td><td>2</td><td>0</td></tr><tr><td>f6c8d5e35d7e4d60a0104f233ac1a3ab757da53f</td><td>pku</td><td>PKU</td><td><a href="papers/f6c8d5e35d7e4d60a0104f233ac1a3ab757da53f.html">Swiss-System Based Cascade Ranking for Gait-Based Person Re-Identification</a></td><td><a href="http://pdfs.semanticscholar.org/f6c8/d5e35d7e4d60a0104f233ac1a3ab757da53f.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>3</td><td>0</td><td>3</td><td>0</td><td>1</td><td>0</td></tr><tr><td>c866a2afc871910e3282fd9498dce4ab20f6a332</td><td>qmul_surv_face</td><td>QMUL-SurvFace</td><td><a href="papers/c866a2afc871910e3282fd9498dce4ab20f6a332.html">Surveillance Face Recognition Challenge</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1804.09691.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>f3b84a03985de3890b400b68e2a92c0a00afd9d0</td><td>scface</td><td>SCface</td><td><a href="papers/f3b84a03985de3890b400b68e2a92c0a00afd9d0.html">Large Variability Surveillance Camera Face Database</a></td><td><span class="gray">[pdf]</a></td><td>2015 Seventh International Conference on Computational Intelligence, Modelling and Simulation (CIMSim)</td><td></td><td></td><td></td><td></td><td>0%</td><td>1</td><td>0</td><td>1</td><td>0</td><td>0</td><td>0</td></tr><tr><td>d3f5a1848b0028d8ab51d0b0673732cad2e3c8c9</td><td>stair_actions</td><td>STAIR Action</td><td><a href="papers/d3f5a1848b0028d8ab51d0b0673732cad2e3c8c9.html">STAIR Actions: A Video Dataset of Everyday Home Actions</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1804.04326.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>377f2b65e6a9300448bdccf678cde59449ecd337</td><td>ufdd</td><td>UFDD</td><td><a href="papers/377f2b65e6a9300448bdccf678cde59449ecd337.html">Pushing the Limits of Unconstrained Face Detection: a Challenge Dataset and Baseline Results</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1804.10275.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td>edu</td><td>Johns Hopkins University</td><td>39.32905300</td><td>-76.61942500</td><td>0%</td><td>2</td><td>0</td><td>2</td><td>0</td><td>2</td><td>0</td></tr><tr><td>447d8893a4bdc29fa1214e53499ffe67b28a6db5</td><td>umd_faces</td><td>UMD</td><td><a href="papers/447d8893a4bdc29fa1214e53499ffe67b28a6db5.html">Electronic Transport in Quantum Confined Systems</a></td><td><a href="http://pdfs.semanticscholar.org/447d/8893a4bdc29fa1214e53499ffe67b28a6db5.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>1</td><td>0</td><td>1</td><td>0</td><td>1</td><td>0</td></tr><tr><td>922e0a51a3b8c67c4c6ac09a577ff674cbd28b34</td><td>v47</td><td>V47</td><td><a href="papers/922e0a51a3b8c67c4c6ac09a577ff674cbd28b34.html">Re-identification of pedestrians with variable occlusion and scale</a></td><td><a href="https://doi.org/10.1109/ICCVW.2011.6130477">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td>0%</td><td>10</td><td>0</td><td>10</td><td>2</td><td>6</td><td>0</td></tr><tr><td>9b9bf5e623cb8af7407d2d2d857bc3f1b531c182</td><td>who_goes_there</td><td>WGT</td><td><a href="papers/9b9bf5e623cb8af7407d2d2d857bc3f1b531c182.html">Who goes there?: approaches to mapping facial appearance diversity</a></td><td><a href="http://doi.acm.org/10.1145/2996913.2996997">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>77c81c13a110a341c140995bedb98101b9e84f7f</td><td>wildtrack</td><td>WildTrack</td><td><a href="papers/77c81c13a110a341c140995bedb98101b9e84f7f.html">WILDTRACK : A Multi-camera HD Dataset for Dense Unscripted Pedestrian Detection</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/fe1c/ec4e4995b8615855572374ae3efc94949105.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>5ad4e9f947c1653c247d418f05dad758a3f9277b</td><td>wlfdb</td><td></td><td><a href="papers/5ad4e9f947c1653c247d418f05dad758a3f9277b.html">WLFDB: Weakly Labeled Face Databases</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/5ad4/e9f947c1653c247d418f05dad758a3f9277b.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>0%</td><td>1</td><td>0</td><td>1</td><td>0</td><td>0</td><td>0</td></tr><tr><td>a94cae786d515d3450d48267e12ca954aab791c4</td><td>yawdd</td><td>YawDD</td><td><a href="papers/a94cae786d515d3450d48267e12ca954aab791c4.html">YawDD: a yawning detection dataset</a></td><td><a href="http://www.site.uottawa.ca/~shervin/pubs/CogniVue-Dataset-ACM-MMSys2014.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>14</td><td>0</td><td>14</td><td>1</td><td>2</td><td>1</td></tr></table></body></html> \ No newline at end of file
+<!doctype html><html><head><meta charset='utf-8'><title>Coverage</title><link rel='stylesheet' href='reports.css'></head><body><h2>Coverage</h2><table border='1' cellpadding='3' cellspacing='3'><th>Paper ID</th><th>Megapixels Key</th><th>Megapixels Name</th><th>Report Link</th><th>PDF Link</th><th>Journal</th><th>Type</th><th>Address</th><th>Lat</th><th>Lng</th><th>Coverage</th><th>Total Citations</th><th>Geocoded Citations</th><th>Unknown Citations</th><th>Empty Citations</th><th>With PDF</th><th>With DOI</th><tr><td>b5f2846a506fc417e7da43f6a7679146d99c5e96</td><td>ucf_101</td><td>UCF101</td><td><a href="papers/b5f2846a506fc417e7da43f6a7679146d99c5e96.html">UCF101: A Dataset of 101 Human Actions Classes From Videos in The Wild</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1212.0402.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td>edu</td><td>University of Central Florida</td><td>28.59899755</td><td>-81.19712501</td><td>54%</td><td>999</td><td>535</td><td>464</td><td>73</td><td>708</td><td>212</td></tr><tr><td>0e986f51fe45b00633de9fd0c94d082d2be51406</td><td>afw</td><td>AFW</td><td><a href="papers/0e986f51fe45b00633de9fd0c94d082d2be51406.html">Face detection, pose estimation, and landmark localization in the wild</a></td><td><a href="http://vision.ics.uci.edu/papers/ZhuR_CVPR_2012/ZhuR_CVPR_2012.pdf">[pdf]</a></td><td>2012 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>University of California, Irvine</td><td>33.64319010</td><td>-117.84016494</td><td>52%</td><td>999</td><td>521</td><td>478</td><td>59</td><td>607</td><td>273</td></tr><tr><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td><td>3dddb_unconstrained</td><td>3D Dynamic</td><td><a href="papers/370b5757a5379b15e30d619e4d3fb9e8e13f3256.html">Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments</a></td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>999</td><td>472</td><td>526</td><td>71</td><td>619</td><td>260</td></tr><tr><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td><td>ar_facedb</td><td>AR Face</td><td><a href="papers/370b5757a5379b15e30d619e4d3fb9e8e13f3256.html">Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments</a></td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>999</td><td>472</td><td>526</td><td>71</td><td>619</td><td>260</td></tr><tr><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td><td>m2vtsdb_extended</td><td>xm2vtsdb</td><td><a href="papers/370b5757a5379b15e30d619e4d3fb9e8e13f3256.html">Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments</a></td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>999</td><td>472</td><td>526</td><td>71</td><td>619</td><td>260</td></tr><tr><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td><td>put_face</td><td>Put Face</td><td><a href="papers/370b5757a5379b15e30d619e4d3fb9e8e13f3256.html">Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments</a></td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>999</td><td>472</td><td>526</td><td>71</td><td>619</td><td>260</td></tr><tr><td>759a3b3821d9f0e08e0b0a62c8b693230afc3f8d</td><td>pubfig</td><td>PubFig</td><td><a href="papers/759a3b3821d9f0e08e0b0a62c8b693230afc3f8d.html">Attribute and simile classifiers for face verification</a></td><td><a href="http://homes.cs.washington.edu/~neeraj/projects/faceverification/base/papers/nk_iccv2009_attrs.pdf">[pdf]</a></td><td>2009 IEEE 12th International Conference on Computer Vision</td><td>edu</td><td>Columbia University</td><td>40.84198360</td><td>-73.94368971</td><td>51%</td><td>894</td><td>454</td><td>440</td><td>55</td><td>587</td><td>222</td></tr><tr><td>18c72175ddbb7d5956d180b65a96005c100f6014</td><td>yale_faces</td><td>YaleFaces</td><td><a href="papers/18c72175ddbb7d5956d180b65a96005c100f6014.html">From Few to Many: Illumination Cone Models for Face Recognition under Variable Lighting and Pose</a></td><td><a href="http://pdfs.semanticscholar.org/97bb/c2b439a79d4dc0dc7199d71ed96ad5e3fd0e.pdf">[pdf]</a></td><td>IEEE Trans. Pattern Anal. Mach. Intell.</td><td></td><td></td><td></td><td></td><td>42%</td><td>999</td><td>422</td><td>577</td><td>80</td><td>538</td><td>295</td></tr><tr><td>18c72175ddbb7d5956d180b65a96005c100f6014</td><td>yale_faces</td><td>YaleFaces</td><td><a href="papers/18c72175ddbb7d5956d180b65a96005c100f6014.html">From Few to Many: Illumination Cone Models for Face Recognition under Variable Lighting and Pose</a></td><td><a href="http://pdfs.semanticscholar.org/97bb/c2b439a79d4dc0dc7199d71ed96ad5e3fd0e.pdf">[pdf]</a></td><td>IEEE Trans. Pattern Anal. Mach. Intell.</td><td></td><td></td><td></td><td></td><td>42%</td><td>999</td><td>422</td><td>577</td><td>80</td><td>538</td><td>295</td></tr><tr><td>4d9a02d080636e9666c4d1cc438b9893391ec6c7</td><td>cohn_kanade_plus</td><td>CK+</td><td><a href="papers/4d9a02d080636e9666c4d1cc438b9893391ec6c7.html">The Extended Cohn-Kanade Dataset (CK+): A complete dataset for action unit and emotion-specified expression</a></td><td><a href="http://www.iainm.com/iainm/Publications_files/2010_The%20Extended.pdf">[pdf]</a></td><td>2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition - Workshops</td><td></td><td></td><td></td><td></td><td>41%</td><td>975</td><td>403</td><td>572</td><td>65</td><td>460</td><td>345</td></tr><tr><td>2e384f057211426ac5922f1b33d2aa8df5d51f57</td><td>a_pascal_yahoo</td><td>aPascal</td><td><a href="papers/2e384f057211426ac5922f1b33d2aa8df5d51f57.html">Describing objects by their attributes</a></td><td><a href="http://www-2.cs.cmu.edu/~dhoiem/publications/cvpr2009_attributes.pdf">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>39%</td><td>999</td><td>393</td><td>606</td><td>71</td><td>727</td><td>73</td></tr><tr><td>162ea969d1929ed180cc6de9f0bf116993ff6e06</td><td>vgg_faces</td><td>VGG Face</td><td><a href="papers/162ea969d1929ed180cc6de9f0bf116993ff6e06.html">Deep Face Recognition</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/f372/ab9b3270d4e4f6a0258c83c2736c3a5c0454.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>39%</td><td>999</td><td>393</td><td>606</td><td>71</td><td>621</td><td>156</td></tr><tr><td>23fc83c8cfff14a16df7ca497661264fc54ed746</td><td>cohn_kanade</td><td>CK</td><td><a href="papers/23fc83c8cfff14a16df7ca497661264fc54ed746.html">Comprehensive Database for Facial Expression Analysis</a></td><td><a href="http://pdfs.semanticscholar.org/23fc/83c8cfff14a16df7ca497661264fc54ed746.pdf">[pdf]</a></td><td></td><td>edu</td><td>Carnegie Mellon University</td><td>37.41021930</td><td>-122.05965487</td><td>38%</td><td>999</td><td>380</td><td>619</td><td>75</td><td>555</td><td>252</td></tr><tr><td>01959ef569f74c286956024866c1d107099199f7</td><td>vqa</td><td>VQA</td><td><a href="papers/01959ef569f74c286956024866c1d107099199f7.html">VQA: Visual Question Answering</a></td><td><a href="http://arxiv.org/pdf/1505.00468v3.pdf">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td>47%</td><td>731</td><td>344</td><td>387</td><td>47</td><td>628</td><td>4</td></tr><tr><td>6424b69f3ff4d35249c0bb7ef912fbc2c86f4ff4</td><td>celeba</td><td>CelebA</td><td><a href="papers/6424b69f3ff4d35249c0bb7ef912fbc2c86f4ff4.html">Deep Learning Face Attributes in the Wild</a></td><td><a href="http://arxiv.org/pdf/1411.7766v2.pdf">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>42%</td><td>808</td><td>340</td><td>468</td><td>69</td><td>666</td><td>50</td></tr><tr><td>4d423acc78273b75134e2afd1777ba6d3a398973</td><td>cmu_pie</td><td>CMU PIE</td><td><a href="papers/4d423acc78273b75134e2afd1777ba6d3a398973.html">International Conference on Automatic Face and Gesture Recognition The CMU Pose , Illumination , and Expression ( PIE ) Database</a></td><td><a href="http://pdfs.semanticscholar.org/4d42/3acc78273b75134e2afd1777ba6d3a398973.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>44%</td><td>742</td><td>330</td><td>412</td><td>61</td><td>410</td><td>232</td></tr><tr><td>abe9f3b91fd26fa1b50cd685c0d20debfb372f73</td><td>voc</td><td>VOC</td><td><a href="papers/abe9f3b91fd26fa1b50cd685c0d20debfb372f73.html">The Pascal Visual Object Classes Challenge: A Retrospective</a></td><td><a href="http://homepages.inf.ed.ac.uk/ckiw/postscript/ijcv_voc14.pdf">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td>32%</td><td>999</td><td>315</td><td>684</td><td>75</td><td>698</td><td>6</td></tr><tr><td>45c31cde87258414f33412b3b12fc5bec7cb3ba9</td><td>jaffe</td><td>JAFFE</td><td><a href="papers/45c31cde87258414f33412b3b12fc5bec7cb3ba9.html">Coding Facial Expressions with Gabor Wavelets</a></td><td><a href="http://pdfs.semanticscholar.org/45c3/1cde87258414f33412b3b12fc5bec7cb3ba9.pdf">[pdf]</a></td><td></td><td>edu</td><td>Kyushu University</td><td>33.59914655</td><td>130.22359848</td><td>36%</td><td>848</td><td>308</td><td>540</td><td>56</td><td>413</td><td>255</td></tr><tr><td>31b58ced31f22eab10bd3ee2d9174e7c14c27c01</td><td>tiny_images</td><td>Tiny Images</td><td><a href="papers/31b58ced31f22eab10bd3ee2d9174e7c14c27c01.html">Nonparametric Object and Scene Recognition</a></td><td><a href="http://pdfs.semanticscholar.org/31b5/8ced31f22eab10bd3ee2d9174e7c14c27c01.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>31%</td><td>999</td><td>305</td><td>694</td><td>93</td><td>670</td><td>9</td></tr><tr><td>140438a77a771a8fb656b39a78ff488066eb6b50</td><td>lfw_p</td><td>LFWP</td><td><a href="papers/140438a77a771a8fb656b39a78ff488066eb6b50.html">Localizing Parts of Faces Using a Consensus of Exemplars</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2011.5995602">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td>edu</td><td>Columbia University</td><td>40.84198360</td><td>-73.94368971</td><td>53%</td><td>521</td><td>274</td><td>247</td><td>40</td><td>321</td><td>144</td></tr><tr><td>18ae7c9a4bbc832b8b14bc4122070d7939f5e00e</td><td>frgc</td><td>FRGC</td><td><a href="papers/18ae7c9a4bbc832b8b14bc4122070d7939f5e00e.html">Overview of the face recognition grand challenge</a></td><td><a href="http://www3.nd.edu/~kwb/PhillipsEtAlCVPR_2005.pdf">[pdf]</a></td><td>2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)</td><td></td><td></td><td></td><td></td><td>25%</td><td>999</td><td>253</td><td>746</td><td>110</td><td>572</td><td>64</td></tr><tr><td>32cde90437ab5a70cf003ea36f66f2de0e24b3ab</td><td>cityscapes</td><td>Cityscapes</td><td><a href="papers/32cde90437ab5a70cf003ea36f66f2de0e24b3ab.html">The Cityscapes Dataset for Semantic Urban Scene Understanding</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1604.01685.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>33%</td><td>771</td><td>252</td><td>519</td><td>54</td><td>622</td><td>0</td></tr><tr><td>32cde90437ab5a70cf003ea36f66f2de0e24b3ab</td><td>cityscapes</td><td>Cityscapes</td><td><a href="papers/32cde90437ab5a70cf003ea36f66f2de0e24b3ab.html">The Cityscapes Dataset for Semantic Urban Scene Understanding</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1604.01685.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>33%</td><td>771</td><td>252</td><td>519</td><td>54</td><td>622</td><td>0</td></tr><tr><td>560e0e58d0059259ddf86fcec1fa7975dee6a868</td><td>youtube_faces</td><td>YouTubeFaces</td><td><a href="papers/560e0e58d0059259ddf86fcec1fa7975dee6a868.html">Face recognition in unconstrained videos with matched background similarity</a></td><td><a href="http://www.cs.tau.ac.il/~wolf/papers/lvfw.pdf">[pdf]</a></td><td>CVPR 2011</td><td>edu</td><td>Open University of Israel</td><td>32.77824165</td><td>34.99565673</td><td>50%</td><td>485</td><td>244</td><td>240</td><td>32</td><td>290</td><td>140</td></tr><tr><td>3607afdb204de9a5a9300ae98aa4635d9effcda2</td><td>sheffield</td><td>Sheffield Face</td><td><a href="papers/3607afdb204de9a5a9300ae98aa4635d9effcda2.html">Face Description with Local Binary Patterns: Application to Face Recognition</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/TPAMI.2006.244">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>24%</td><td>999</td><td>238</td><td>761</td><td>65</td><td>483</td><td>87</td></tr><tr><td>853bd61bc48a431b9b1c7cab10c603830c488e39</td><td>casia_webface</td><td>CASIA Webface</td><td><a href="papers/853bd61bc48a431b9b1c7cab10c603830c488e39.html">Learning Face Representation from Scratch</a></td><td><a href="http://pdfs.semanticscholar.org/b8a2/0ed7e74325da76d7183d1ab77b082a92b447.pdf">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>53%</td><td>436</td><td>233</td><td>203</td><td>32</td><td>284</td><td>115</td></tr><tr><td>2830fb5282de23d7784b4b4bc37065d27839a412</td><td>h3d</td><td>H3D</td><td><a href="papers/2830fb5282de23d7784b4b4bc37065d27839a412.html">Poselets: Body part detectors trained using 3D human pose annotations</a></td><td><a href="http://vision.stanford.edu/teaching/cs231b_spring1213/papers/ICCV09_BourdevMalik.pdf">[pdf]</a></td><td>2009 IEEE 12th International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td>32%</td><td>707</td><td>223</td><td>484</td><td>62</td><td>482</td><td>18</td></tr><tr><td>10d6b12fa07c7c8d6c8c3f42c7f1c061c131d4c5</td><td>inria_person</td><td>INRIA Pedestrian</td><td><a href="papers/10d6b12fa07c7c8d6c8c3f42c7f1c061c131d4c5.html">Histograms of oriented gradients for human detection</a></td><td><a href="http://nichol.as/papers/Dalai/Histograms%20of%20oriented%20gradients%20for%20human%20detection.pdf">[pdf]</a></td><td>2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)</td><td></td><td></td><td></td><td></td><td>22%</td><td>999</td><td>217</td><td>782</td><td>67</td><td>520</td><td>22</td></tr><tr><td>55206f0b5f57ce17358999145506cd01e570358c</td><td>orl</td><td>ORL</td><td><a href="papers/55206f0b5f57ce17358999145506cd01e570358c.html">O M 4 . 1 The Subject Database 4 . 2 Experiment Plan 5 . 1 Varying the Overlap 4 Experimental Setup 5 Parameterisation Results</a></td><td><a href="http://pdfs.semanticscholar.org/5520/6f0b5f57ce17358999145506cd01e570358c.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>21%</td><td>999</td><td>214</td><td>785</td><td>96</td><td>550</td><td>57</td></tr><tr><td>9055b155cbabdce3b98e16e5ac9c0edf00f9552f</td><td>morph</td><td>MORPH Commercial</td><td><a href="papers/9055b155cbabdce3b98e16e5ac9c0edf00f9552f.html">MORPH: a longitudinal image database of normal adult age-progression</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/FGR.2006.78">[pdf]</a></td><td>7th International Conference on Automatic Face and Gesture Recognition (FGR06)</td><td></td><td></td><td></td><td></td><td>46%</td><td>424</td><td>195</td><td>229</td><td>27</td><td>231</td><td>155</td></tr><tr><td>9055b155cbabdce3b98e16e5ac9c0edf00f9552f</td><td>morph_nc</td><td>MORPH Non-Commercial</td><td><a href="papers/9055b155cbabdce3b98e16e5ac9c0edf00f9552f.html">MORPH: a longitudinal image database of normal adult age-progression</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/FGR.2006.78">[pdf]</a></td><td>7th International Conference on Automatic Face and Gesture Recognition (FGR06)</td><td></td><td></td><td></td><td></td><td>46%</td><td>424</td><td>195</td><td>229</td><td>27</td><td>231</td><td>155</td></tr><tr><td>93884e46c49f7ae1c7c34046fbc28882f2bd6341</td><td>kdef</td><td>KDEF</td><td><a href="papers/93884e46c49f7ae1c7c34046fbc28882f2bd6341.html">Gaze fixation and the neural circuitry of face processing in autism</a></td><td><a href="{'url': 'http://doi.org/10.1038/nn1421', 'linkType': 'nature'}">[pdf]</a></td><td>Nature Neuroscience</td><td></td><td></td><td></td><td></td><td>31%</td><td>608</td><td>190</td><td>418</td><td>92</td><td>463</td><td>0</td></tr><tr><td>6bd36e9fd0ef20a3074e1430a6cc601e6d407fc3</td><td>cuhk03</td><td>CUHK03</td><td><a href="papers/6bd36e9fd0ef20a3074e1430a6cc601e6d407fc3.html">DeepReID: Deep Filter Pairing Neural Network for Person Re-identification</a></td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Li_DeepReID_Deep_Filter_2014_CVPR_paper.pdf">[pdf]</a></td><td>2014 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>35%</td><td>512</td><td>180</td><td>332</td><td>29</td><td>323</td><td>4</td></tr><tr><td>3325860c0c82a93b2eac654f5324dd6a776f609e</td><td>mpii_human_pose</td><td>MPII Human Pose</td><td><a href="papers/3325860c0c82a93b2eac654f5324dd6a776f609e.html">2D Human Pose Estimation: New Benchmark and State of the Art Analysis</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6909866', 'linkType': 'ieee'}">[pdf]</a></td><td>2014 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>50%</td><td>356</td><td>179</td><td>177</td><td>21</td><td>299</td><td>3</td></tr><tr><td>95f12d27c3b4914e0668a268360948bce92f7db3</td><td>helen</td><td>Helen</td><td><a href="papers/95f12d27c3b4914e0668a268360948bce92f7db3.html">Interactive Facial Feature Localization</a></td><td><a href="http://pdfs.semanticscholar.org/95f1/2d27c3b4914e0668a268360948bce92f7db3.pdf">[pdf]</a></td><td></td><td>edu</td><td>University of Illinois, Urbana-Champaign</td><td>40.11116745</td><td>-88.22587665</td><td>52%</td><td>339</td><td>177</td><td>162</td><td>27</td><td>208</td><td>100</td></tr><tr><td>2724ba85ec4a66de18da33925e537f3902f21249</td><td>cofw</td><td>COFW</td><td><a href="papers/2724ba85ec4a66de18da33925e537f3902f21249.html">Robust Face Landmark Estimation under Occlusion</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6751298', 'linkType': 'ieee'}">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td>55%</td><td>305</td><td>167</td><td>138</td><td>16</td><td>186</td><td>95</td></tr><tr><td>6273b3491e94ea4dd1ce42b791d77bdc96ee73a8</td><td>viper</td><td>VIPeR</td><td><a href="papers/6273b3491e94ea4dd1ce42b791d77bdc96ee73a8.html">Evaluating Appearance Models for Recognition, Reacquisition, and Tracking</a></td><td><a href="http://pdfs.semanticscholar.org/6273/b3491e94ea4dd1ce42b791d77bdc96ee73a8.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>27%</td><td>584</td><td>159</td><td>425</td><td>38</td><td>336</td><td>9</td></tr><tr><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td><td>aflw</td><td>AFLW</td><td><a href="papers/a74251efa970b92925b89eeef50a5e37d9281ad0.html">Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</a></td><td><a href="http://lrs.icg.tugraz.at/pubs/koestinger_befit_11.pdf">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td>53%</td><td>292</td><td>155</td><td>137</td><td>38</td><td>207</td><td>59</td></tr><tr><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td><td>imm_face</td><td>IMM Face Dataset</td><td><a href="papers/a74251efa970b92925b89eeef50a5e37d9281ad0.html">Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</a></td><td><a href="http://lrs.icg.tugraz.at/pubs/koestinger_befit_11.pdf">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td>53%</td><td>292</td><td>155</td><td>137</td><td>38</td><td>207</td><td>59</td></tr><tr><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td><td>muct</td><td>MUCT</td><td><a href="papers/a74251efa970b92925b89eeef50a5e37d9281ad0.html">Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</a></td><td><a href="http://lrs.icg.tugraz.at/pubs/koestinger_befit_11.pdf">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td>53%</td><td>292</td><td>155</td><td>137</td><td>38</td><td>207</td><td>59</td></tr><tr><td>4308bd8c28e37e2ed9a3fcfe74d5436cce34b410</td><td>market_1501</td><td>Market 1501</td><td><a href="papers/4308bd8c28e37e2ed9a3fcfe74d5436cce34b410.html">Scalable Person Re-identification: A Benchmark</a></td><td><a href="https://www.microsoft.com/en-us/research/wp-content/uploads/2017/01/ICCV15-ReIDDataset.pdf">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td>38%</td><td>394</td><td>149</td><td>245</td><td>18</td><td>271</td><td>3</td></tr><tr><td>2a75f34663a60ab1b04a0049ed1d14335129e908</td><td>mmi_facial_expression</td><td>MMI Facial Expression Dataset</td><td><a href="papers/2a75f34663a60ab1b04a0049ed1d14335129e908.html">Web-based database for facial expression analysis</a></td><td><a href="http://ibug.doc.ic.ac.uk/media/uploads/documents/PanticEtAl-ICME2005-final.pdf">[pdf]</a></td><td>2005 IEEE International Conference on Multimedia and Expo</td><td></td><td></td><td></td><td></td><td>32%</td><td>440</td><td>142</td><td>298</td><td>44</td><td>258</td><td>82</td></tr><tr><td>639937b3a1b8bded3f7e9a40e85bd3770016cf3c</td><td>bfm</td><td>BFM</td><td><a href="papers/639937b3a1b8bded3f7e9a40e85bd3770016cf3c.html">A 3D Face Model for Pose and Illumination Invariant Face Recognition</a></td><td><a href="https://pdfs.semanticscholar.org/6399/37b3a1b8bded3f7e9a40e85bd3770016cf3c.pdf">[pdf]</a></td><td>2009 Sixth IEEE International Conference on Advanced Video and Signal Based Surveillance</td><td></td><td></td><td></td><td></td><td>41%</td><td>323</td><td>131</td><td>192</td><td>29</td><td>221</td><td>25</td></tr><tr><td>cc589c499dcf323fe4a143bbef0074c3e31f9b60</td><td>bu_3dfe</td><td>BU-3DFE</td><td><a href="papers/cc589c499dcf323fe4a143bbef0074c3e31f9b60.html">A 3D facial expression database for facial behavior research</a></td><td><a href="http://www.cs.binghamton.edu/~lijun/Research/3DFE/Yin_FGR06_a.pdf">[pdf]</a></td><td>7th International Conference on Automatic Face and Gesture Recognition (FGR06)</td><td>edu</td><td>SUNY Binghamton</td><td>42.08779975</td><td>-75.97066066</td><td>24%</td><td>555</td><td>131</td><td>424</td><td>47</td><td>283</td><td>48</td></tr><tr><td>696ca58d93f6404fea0fc75c62d1d7b378f47628</td><td>coco</td><td>COCO</td><td><a href="papers/696ca58d93f6404fea0fc75c62d1d7b378f47628.html">Microsoft COCO Captions: Data Collection and Evaluation Server</a></td><td><a href="http://pdfs.semanticscholar.org/ba95/81c33a7eebe87c50e61763e4c8d1723538f2.pdf">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>46%</td><td>283</td><td>129</td><td>154</td><td>16</td><td>231</td><td>4</td></tr><tr><td>4053e3423fb70ad9140ca89351df49675197196a</td><td>bio_id</td><td>BioID Face</td><td><a href="papers/4053e3423fb70ad9140ca89351df49675197196a.html">Robust Face Detection Using the Hausdorff Distance</a></td><td><a href="http://pdfs.semanticscholar.org/4053/e3423fb70ad9140ca89351df49675197196a.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>26%</td><td>498</td><td>127</td><td>371</td><td>55</td><td>319</td><td>32</td></tr><tr><td>3765df816dc5a061bc261e190acc8bdd9d47bec0</td><td>rafd</td><td>RaFD</td><td><a href="papers/3765df816dc5a061bc261e190acc8bdd9d47bec0.html">Presentation and validation of the Radboud Faces Database</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/3765/df816dc5a061bc261e190acc8bdd9d47bec0.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>28%</td><td>446</td><td>127</td><td>319</td><td>43</td><td>307</td><td>19</td></tr><tr><td>2fda164863a06a92d3a910b96eef927269aeb730</td><td>names_and_faces_news</td><td>News Dataset</td><td><a href="papers/2fda164863a06a92d3a910b96eef927269aeb730.html">Names and faces in the news</a></td><td><a href="http://www.cs.utexas.edu/~grauman/courses/spring2007/395T/papers/berg_names_and_faces.pdf">[pdf]</a></td><td>Proceedings of the 2004 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2004. CVPR 2004.</td><td></td><td></td><td></td><td></td><td>41%</td><td>294</td><td>120</td><td>174</td><td>24</td><td>207</td><td>45</td></tr><tr><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td><td>leeds_sports_pose</td><td>Leeds Sports Pose</td><td><a href="papers/4b1d23d17476fcf78f4cbadf69fb130b1aa627c0.html">Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</a></td><td><a href="http://pdfs.semanticscholar.org/4b1d/23d17476fcf78f4cbadf69fb130b1aa627c0.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>278</td><td>119</td><td>159</td><td>12</td><td>198</td><td>2</td></tr><tr><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td><td>stickmen_buffy</td><td>Buffy Stickmen</td><td><a href="papers/4b1d23d17476fcf78f4cbadf69fb130b1aa627c0.html">Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</a></td><td><a href="http://pdfs.semanticscholar.org/4b1d/23d17476fcf78f4cbadf69fb130b1aa627c0.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>278</td><td>119</td><td>159</td><td>12</td><td>198</td><td>2</td></tr><tr><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td><td>stickmen_buffy</td><td>Buffy Stickmen</td><td><a href="papers/4b1d23d17476fcf78f4cbadf69fb130b1aa627c0.html">Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</a></td><td><a href="http://pdfs.semanticscholar.org/4b1d/23d17476fcf78f4cbadf69fb130b1aa627c0.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>278</td><td>119</td><td>159</td><td>12</td><td>198</td><td>2</td></tr><tr><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td><a href="papers/6dd0597f8513dc100cd0bc1b493768cde45098a9.html">Learning to parse images of articulated bodies</a></td><td><a href="http://pdfs.semanticscholar.org/9cd7/4c43dbf9be0b9caae4606ee53e6d45850471.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>31%</td><td>373</td><td>117</td><td>256</td><td>30</td><td>238</td><td>2</td></tr><tr><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td><a href="papers/6dd0597f8513dc100cd0bc1b493768cde45098a9.html">Learning to parse images of articulated bodies</a></td><td><a href="http://pdfs.semanticscholar.org/9cd7/4c43dbf9be0b9caae4606ee53e6d45850471.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>31%</td><td>373</td><td>117</td><td>256</td><td>30</td><td>238</td><td>2</td></tr><tr><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td><td>tud_campus</td><td>TUD-Campus</td><td><a href="papers/3316521a5527c7700af8ae6aef32a79a8b83672c.html">People-tracking-by-detection and people-detection-by-tracking</a></td><td><a href="http://mplab.ucsd.edu/wp-content/uploads/CVPR2008/Conference/data/papers/243.pdf">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>22%</td><td>529</td><td>116</td><td>413</td><td>41</td><td>316</td><td>1</td></tr><tr><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td><td>tud_crossing</td><td>TUD-Crossing</td><td><a href="papers/3316521a5527c7700af8ae6aef32a79a8b83672c.html">People-tracking-by-detection and people-detection-by-tracking</a></td><td><a href="http://mplab.ucsd.edu/wp-content/uploads/CVPR2008/Conference/data/papers/243.pdf">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>22%</td><td>529</td><td>116</td><td>413</td><td>41</td><td>316</td><td>1</td></tr><tr><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td><td>tud_pedestrian</td><td>TUD-Pedestrian</td><td><a href="papers/3316521a5527c7700af8ae6aef32a79a8b83672c.html">People-tracking-by-detection and people-detection-by-tracking</a></td><td><a href="http://mplab.ucsd.edu/wp-content/uploads/CVPR2008/Conference/data/papers/243.pdf">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>22%</td><td>529</td><td>116</td><td>413</td><td>41</td><td>316</td><td>1</td></tr><tr><td>0f0fcf041559703998abf310e56f8a2f90ee6f21</td><td>feret</td><td>FERET</td><td><a href="papers/0f0fcf041559703998abf310e56f8a2f90ee6f21.html">The FERET Evaluation Methodology for Face-Recognition Algorithms</a></td><td><a href="http://pdfs.semanticscholar.org/0f0f/cf041559703998abf310e56f8a2f90ee6f21.pdf">[pdf]</a></td><td>IEEE Trans. Pattern Anal. Mach. Intell.</td><td></td><td></td><td></td><td></td><td>11%</td><td>999</td><td>109</td><td>259</td><td>32</td><td>213</td><td>51</td></tr><tr><td>0f0fcf041559703998abf310e56f8a2f90ee6f21</td><td>feret</td><td>FERET</td><td><a href="papers/0f0fcf041559703998abf310e56f8a2f90ee6f21.html">The FERET Evaluation Methodology for Face-Recognition Algorithms</a></td><td><a href="http://pdfs.semanticscholar.org/0f0f/cf041559703998abf310e56f8a2f90ee6f21.pdf">[pdf]</a></td><td>IEEE Trans. Pattern Anal. Mach. Intell.</td><td></td><td></td><td></td><td></td><td>11%</td><td>999</td><td>109</td><td>259</td><td>32</td><td>213</td><td>51</td></tr><tr><td>0f0fcf041559703998abf310e56f8a2f90ee6f21</td><td>feret</td><td>FERET</td><td><a href="papers/0f0fcf041559703998abf310e56f8a2f90ee6f21.html">The FERET Evaluation Methodology for Face-Recognition Algorithms</a></td><td><a href="http://pdfs.semanticscholar.org/0f0f/cf041559703998abf310e56f8a2f90ee6f21.pdf">[pdf]</a></td><td>IEEE Trans. Pattern Anal. Mach. Intell.</td><td></td><td></td><td></td><td></td><td>11%</td><td>999</td><td>109</td><td>259</td><td>32</td><td>213</td><td>51</td></tr><tr><td>0f0fcf041559703998abf310e56f8a2f90ee6f21</td><td>feret</td><td>FERET</td><td><a href="papers/0f0fcf041559703998abf310e56f8a2f90ee6f21.html">The FERET Evaluation Methodology for Face-Recognition Algorithms</a></td><td><a href="http://pdfs.semanticscholar.org/0f0f/cf041559703998abf310e56f8a2f90ee6f21.pdf">[pdf]</a></td><td>IEEE Trans. Pattern Anal. Mach. Intell.</td><td></td><td></td><td></td><td></td><td>11%</td><td>999</td><td>109</td><td>259</td><td>32</td><td>213</td><td>51</td></tr><tr><td>140c95e53c619eac594d70f6369f518adfea12ef</td><td>ijb_a</td><td>IJB-A</td><td><a href="papers/140c95e53c619eac594d70f6369f518adfea12ef.html">Pushing the frontiers of unconstrained face detection and recognition: IARPA Janus Benchmark A</a></td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/1B_089_ext.pdf">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>48%</td><td>222</td><td>107</td><td>115</td><td>21</td><td>158</td><td>48</td></tr><tr><td>28312c3a47c1be3a67365700744d3d6665b86f22</td><td>hrt_transgender</td><td>HRT Transgender</td><td><a href="papers/28312c3a47c1be3a67365700744d3d6665b86f22.html">Face Recognition: A Literature Survey1</a></td><td><a href="http://pdfs.semanticscholar.org/2831/2c3a47c1be3a67365700744d3d6665b86f22.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>11%</td><td>999</td><td>105</td><td>265</td><td>32</td><td>217</td><td>39</td></tr><tr><td>28312c3a47c1be3a67365700744d3d6665b86f22</td><td>hrt_transgender</td><td>HRT Transgender</td><td><a href="papers/28312c3a47c1be3a67365700744d3d6665b86f22.html">Face Recognition: A Literature Survey1</a></td><td><a href="http://pdfs.semanticscholar.org/2831/2c3a47c1be3a67365700744d3d6665b86f22.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>11%</td><td>999</td><td>105</td><td>265</td><td>32</td><td>217</td><td>39</td></tr><tr><td>28312c3a47c1be3a67365700744d3d6665b86f22</td><td>hrt_transgender</td><td>HRT Transgender</td><td><a href="papers/28312c3a47c1be3a67365700744d3d6665b86f22.html">Face Recognition: A Literature Survey1</a></td><td><a href="http://pdfs.semanticscholar.org/2831/2c3a47c1be3a67365700744d3d6665b86f22.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>11%</td><td>999</td><td>105</td><td>265</td><td>32</td><td>217</td><td>39</td></tr><tr><td>013909077ad843eb6df7a3e8e290cfd5575999d2</td><td>fiw_300</td><td>300-W</td><td><a href="papers/013909077ad843eb6df7a3e8e290cfd5575999d2.html">A Semi-automatic Methodology for Facial Landmark Annotation</a></td><td><a href="http://ibug.doc.ic.ac.uk/media/uploads/documents/sagonas_cvpr_2013_amfg_w.pdf">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition Workshops</td><td>edu</td><td>University of Twente</td><td>52.23801390</td><td>6.85667610</td><td>59%</td><td>169</td><td>100</td><td>69</td><td>14</td><td>112</td><td>49</td></tr><tr><td>013909077ad843eb6df7a3e8e290cfd5575999d2</td><td>fiw_300</td><td>300-W</td><td><a href="papers/013909077ad843eb6df7a3e8e290cfd5575999d2.html">A Semi-automatic Methodology for Facial Landmark Annotation</a></td><td><a href="http://ibug.doc.ic.ac.uk/media/uploads/documents/sagonas_cvpr_2013_amfg_w.pdf">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition Workshops</td><td>edu</td><td>University of Twente</td><td>52.23801390</td><td>6.85667610</td><td>59%</td><td>169</td><td>100</td><td>69</td><td>14</td><td>112</td><td>49</td></tr><tr><td>013909077ad843eb6df7a3e8e290cfd5575999d2</td><td>fiw_300</td><td>300-W</td><td><a href="papers/013909077ad843eb6df7a3e8e290cfd5575999d2.html">A Semi-automatic Methodology for Facial Landmark Annotation</a></td><td><a href="http://ibug.doc.ic.ac.uk/media/uploads/documents/sagonas_cvpr_2013_amfg_w.pdf">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition Workshops</td><td>edu</td><td>University of Twente</td><td>52.23801390</td><td>6.85667610</td><td>59%</td><td>169</td><td>100</td><td>69</td><td>14</td><td>112</td><td>49</td></tr><tr><td>7808937b46acad36e43c30ae4e9f3fd57462853d</td><td>berkeley_pose</td><td>BPAD</td><td><a href="papers/7808937b46acad36e43c30ae4e9f3fd57462853d.html">Describing people: A poselet-based approach to attribute classification</a></td><td><a href="http://ttic.uchicago.edu/~smaji/papers/attributes-iccv11.pdf">[pdf]</a></td><td>2011 International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td>43%</td><td>221</td><td>96</td><td>125</td><td>14</td><td>160</td><td>23</td></tr><tr><td>f72f6a45ee240cc99296a287ff725aaa7e7ebb35</td><td>caltech_pedestrians</td><td>Caltech Pedestrians</td><td><a href="papers/f72f6a45ee240cc99296a287ff725aaa7e7ebb35.html">Pedestrian Detection: An Evaluation of the State of the Art</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5975165', 'linkType': 'ieee'}">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>10%</td><td>999</td><td>96</td><td>355</td><td>26</td><td>252</td><td>5</td></tr><tr><td>f72f6a45ee240cc99296a287ff725aaa7e7ebb35</td><td>caltech_pedestrians</td><td>Caltech Pedestrians</td><td><a href="papers/f72f6a45ee240cc99296a287ff725aaa7e7ebb35.html">Pedestrian Detection: An Evaluation of the State of the Art</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5975165', 'linkType': 'ieee'}">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>10%</td><td>999</td><td>96</td><td>355</td><td>26</td><td>252</td><td>5</td></tr><tr><td>e8de844fefd54541b71c9823416daa238be65546</td><td>visual_phrases</td><td>Phrasal Recognition</td><td><a href="papers/e8de844fefd54541b71c9823416daa238be65546.html">Recognition using visual phrases</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995711', 'linkType': 'ieee'}">[pdf]</a></td><td>CVPR 2011</td><td></td><td></td><td></td><td></td><td>41%</td><td>233</td><td>95</td><td>138</td><td>18</td><td>174</td><td>5</td></tr><tr><td>16c7c31a7553d99f1837fc6e88e77b5ccbb346b8</td><td>prid</td><td>PRID</td><td><a href="papers/16c7c31a7553d99f1837fc6e88e77b5ccbb346b8.html">Person Re-identification by Descriptive and Discriminative Classification</a></td><td><a href="http://pdfs.semanticscholar.org/4c1b/f0592be3e535faf256c95e27982db9b3d3d3.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>27%</td><td>352</td><td>94</td><td>258</td><td>26</td><td>195</td><td>3</td></tr><tr><td>833fa04463d90aab4a9fe2870d480f0b40df446e</td><td>sun_attributes</td><td>SUN</td><td><a href="papers/833fa04463d90aab4a9fe2870d480f0b40df446e.html">SUN attribute database: Discovering, annotating, and recognizing scene attributes</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2012.6247998">[pdf]</a></td><td>2012 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>32%</td><td>260</td><td>84</td><td>85</td><td>18</td><td>143</td><td>10</td></tr><tr><td>833fa04463d90aab4a9fe2870d480f0b40df446e</td><td>sun_attributes</td><td>SUN</td><td><a href="papers/833fa04463d90aab4a9fe2870d480f0b40df446e.html">SUN attribute database: Discovering, annotating, and recognizing scene attributes</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2012.6247998">[pdf]</a></td><td>2012 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>32%</td><td>260</td><td>84</td><td>85</td><td>18</td><td>143</td><td>10</td></tr><tr><td>35b0331dfcd2897abd5749b49ff5e2b8ba0f7a62</td><td>coco_qa</td><td>COCO QA</td><td><a href="papers/35b0331dfcd2897abd5749b49ff5e2b8ba0f7a62.html">Exploring Models and Data for Image Question Answering</a></td><td><a href="http://pdfs.semanticscholar.org/aa79/9c29c0d44ece1864467af520fe70540c069b.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>191</td><td>83</td><td>108</td><td>12</td><td>163</td><td>1</td></tr><tr><td>291265db88023e92bb8c8e6390438e5da148e8f5</td><td>msceleb</td><td>MsCeleb</td><td><a href="papers/291265db88023e92bb8c8e6390438e5da148e8f5.html">MS-Celeb-1M: A Dataset and Benchmark for Large-Scale Face Recognition</a></td><td><a href="http://pdfs.semanticscholar.org/4603/cb8e05258bb0572ae912ad20903b8f99f4b1.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>167</td><td>83</td><td>84</td><td>15</td><td>131</td><td>27</td></tr><tr><td>13f06b08f371ba8b5d31c3e288b4deb61335b462</td><td>eth_andreas_ess</td><td>ETHZ Pedestrian</td><td><a href="papers/13f06b08f371ba8b5d31c3e288b4deb61335b462.html">Depth and Appearance for Mobile Scene Analysis</a></td><td><a href="http://www.mmp.rwth-aachen.de/publications/pdf/ess-depthandappearance-iccv07.pdf/at_download/file">[pdf]</a></td><td>2007 IEEE 11th International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td>25%</td><td>319</td><td>79</td><td>240</td><td>27</td><td>192</td><td>0</td></tr><tr><td>52d7eb0fbc3522434c13cc247549f74bb9609c5d</td><td>wider_face</td><td>WIDER FACE</td><td><a href="papers/52d7eb0fbc3522434c13cc247549f74bb9609c5d.html">WIDER FACE: A Face Detection Benchmark</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1511.06523.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>53%</td><td>148</td><td>78</td><td>70</td><td>16</td><td>107</td><td>34</td></tr><tr><td>2485c98aa44131d1a2f7d1355b1e372f2bb148ad</td><td>cas_peal</td><td>CAS-PEAL</td><td><a href="papers/2485c98aa44131d1a2f7d1355b1e372f2bb148ad.html">The CAS-PEAL Large-Scale Chinese Face Database and Baseline Evaluations</a></td><td><a href="https://doi.org/10.1109/TSMCA.2007.909557">[pdf]</a></td><td>IEEE Transactions on Systems, Man, and Cybernetics - Part A: Systems and Humans</td><td></td><td></td><td></td><td></td><td>18%</td><td>415</td><td>76</td><td>339</td><td>39</td><td>182</td><td>35</td></tr><tr><td>1aad2da473888cb7ebc1bfaa15bfa0f1502ce005</td><td>jpl_pose</td><td>JPL-Interaction dataset</td><td><a href="papers/1aad2da473888cb7ebc1bfaa15bfa0f1502ce005.html">First-Person Activity Recognition: What Are They Doing to Me?</a></td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2013/papers/Ryoo_First-Person_Activity_Recognition_2013_CVPR_paper.pdf">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>51%</td><td>148</td><td>76</td><td>72</td><td>8</td><td>109</td><td>3</td></tr><tr><td>2485c98aa44131d1a2f7d1355b1e372f2bb148ad</td><td>m2vts</td><td>m2vts</td><td><a href="papers/2485c98aa44131d1a2f7d1355b1e372f2bb148ad.html">The CAS-PEAL Large-Scale Chinese Face Database and Baseline Evaluations</a></td><td><a href="https://doi.org/10.1109/TSMCA.2007.909557">[pdf]</a></td><td>IEEE Transactions on Systems, Man, and Cybernetics - Part A: Systems and Humans</td><td></td><td></td><td></td><td></td><td>18%</td><td>415</td><td>76</td><td>339</td><td>39</td><td>182</td><td>35</td></tr><tr><td>436f798d1a4e54e5947c1e7d7375c31b2bdb4064</td><td>tud_multiview</td><td>TUD-Multiview</td><td><a href="papers/436f798d1a4e54e5947c1e7d7375c31b2bdb4064.html">Monocular 3D pose estimation and tracking by detection</a></td><td><a href="http://lmb.informatik.uni-freiburg.de/lectures/seminar_brox/seminar_ws1011/cvpr10_andriluka.pdf">[pdf]</a></td><td>2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>25%</td><td>302</td><td>76</td><td>226</td><td>32</td><td>199</td><td>1</td></tr><tr><td>436f798d1a4e54e5947c1e7d7375c31b2bdb4064</td><td>tud_stadtmitte</td><td>TUD-Stadtmitte</td><td><a href="papers/436f798d1a4e54e5947c1e7d7375c31b2bdb4064.html">Monocular 3D pose estimation and tracking by detection</a></td><td><a href="http://lmb.informatik.uni-freiburg.de/lectures/seminar_brox/seminar_ws1011/cvpr10_andriluka.pdf">[pdf]</a></td><td>2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>25%</td><td>302</td><td>76</td><td>226</td><td>32</td><td>199</td><td>1</td></tr><tr><td>133f01aec1534604d184d56de866a4bd531dac87</td><td>lfw_a</td><td>LFW-a</td><td><a href="papers/133f01aec1534604d184d56de866a4bd531dac87.html">Effective Unconstrained Face Recognition by Combining Multiple Descriptors and Learned Background Statistics</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/TPAMI.2010.230">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>42%</td><td>177</td><td>75</td><td>102</td><td>15</td><td>102</td><td>54</td></tr><tr><td>1be498d4bbc30c3bfd0029114c784bc2114d67c0</td><td>adience</td><td>Adience</td><td><a href="papers/1be498d4bbc30c3bfd0029114c784bc2114d67c0.html">Age and Gender Estimation of Unfiltered Faces</a></td><td><a href="http://www.openu.ac.il/home/hassner/Adience/EidingerEnbarHassner_tifs.pdf">[pdf]</a></td><td>IEEE Transactions on Information Forensics and Security</td><td></td><td></td><td></td><td></td><td>43%</td><td>168</td><td>72</td><td>96</td><td>7</td><td>89</td><td>53</td></tr><tr><td>21d9d0deed16f0ad62a4865e9acf0686f4f15492</td><td>images_of_groups</td><td>Images of Groups</td><td><a href="papers/21d9d0deed16f0ad62a4865e9acf0686f4f15492.html">Understanding images of groups of people</a></td><td><a href="http://amp.ece.cmu.edu/people/Andy/Andy_files/cvpr09.pdf">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>36%</td><td>202</td><td>72</td><td>130</td><td>12</td><td>126</td><td>24</td></tr><tr><td>4e4746094bf60ee83e40d8597a6191e463b57f76</td><td>leeds_sports_pose_extended</td><td>Leeds Sports Pose Extended</td><td><a href="papers/4e4746094bf60ee83e40d8597a6191e463b57f76.html">Learning effective human pose estimation from inaccurate annotation</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995318', 'linkType': 'ieee'}">[pdf]</a></td><td>CVPR 2011</td><td></td><td></td><td></td><td></td><td>40%</td><td>173</td><td>70</td><td>103</td><td>9</td><td>116</td><td>2</td></tr><tr><td>44484d2866f222bbb9b6b0870890f9eea1ffb2d0</td><td>cuhk01</td><td>CUHK01</td><td><a href="papers/44484d2866f222bbb9b6b0870890f9eea1ffb2d0.html">Human Reidentification with Transferred Metric Learning</a></td><td><a href="http://pdfs.semanticscholar.org/4448/4d2866f222bbb9b6b0870890f9eea1ffb2d0.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>26%</td><td>258</td><td>67</td><td>191</td><td>12</td><td>141</td><td>1</td></tr><tr><td>9361b784e73e9238d5cefbea5ac40d35d1e3103f</td><td>towncenter</td><td>TownCenter</td><td><a href="papers/9361b784e73e9238d5cefbea5ac40d35d1e3103f.html">Stable Multi-Target Tracking in Real-Time Surveillance Video (Preprint)</a></td><td><a href="http://pdfs.semanticscholar.org/9361/b784e73e9238d5cefbea5ac40d35d1e3103f.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>21%</td><td>310</td><td>64</td><td>246</td><td>24</td><td>177</td><td>4</td></tr><tr><td>b1f4423c227fa37b9680787be38857069247a307</td><td>afew_va</td><td>AFEW-VA</td><td><a href="papers/b1f4423c227fa37b9680787be38857069247a307.html">Collecting Large, Richly Annotated Facial-Expression Databases from Movies</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6200254', 'linkType': 'ieee'}">[pdf]</a></td><td>IEEE MultiMedia</td><td>edu</td><td>Australian National University</td><td>-35.27769990</td><td>149.11852700</td><td>33%</td><td>175</td><td>58</td><td>66</td><td>7</td><td>54</td><td>49</td></tr><tr><td>b1f4423c227fa37b9680787be38857069247a307</td><td>afew_va</td><td>AFEW-VA</td><td><a href="papers/b1f4423c227fa37b9680787be38857069247a307.html">Collecting Large, Richly Annotated Facial-Expression Databases from Movies</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6200254', 'linkType': 'ieee'}">[pdf]</a></td><td>IEEE MultiMedia</td><td>edu</td><td>Australian National University</td><td>-35.27769990</td><td>149.11852700</td><td>33%</td><td>175</td><td>58</td><td>66</td><td>7</td><td>54</td><td>49</td></tr><tr><td>2acf7e58f0a526b957be2099c10aab693f795973</td><td>bosphorus</td><td>The Bosphorus</td><td><a href="papers/2acf7e58f0a526b957be2099c10aab693f795973.html">Bosphorus Database for 3D Face Analysis</a></td><td><a href="http://pdfs.semanticscholar.org/4254/fbba3846008f50671edc9cf70b99d7304543.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>18%</td><td>328</td><td>58</td><td>270</td><td>18</td><td>143</td><td>37</td></tr><tr><td>27a2fad58dd8727e280f97036e0d2bc55ef5424c</td><td>duke_mtmc</td><td>Duke MTMC</td><td><a href="papers/27a2fad58dd8727e280f97036e0d2bc55ef5424c.html">Performance Measures and a Data Set for Multi-target, Multi-camera Tracking</a></td><td><a href="http://pdfs.semanticscholar.org/b5f2/4f49f9a5e47d6601399dc068158ad88d7651.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>136</td><td>58</td><td>78</td><td>6</td><td>107</td><td>0</td></tr><tr><td>56ffa7d906b08d02d6d5a12c7377a57e24ef3391</td><td>unbc_shoulder_pain</td><td>UNBC-McMaster Pain</td><td><a href="papers/56ffa7d906b08d02d6d5a12c7377a57e24ef3391.html">Painful data: The UNBC-McMaster shoulder pain expression archive database</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5771462', 'linkType': 'ieee'}">[pdf]</a></td><td>Face and Gesture 2011</td><td></td><td></td><td></td><td></td><td>32%</td><td>184</td><td>58</td><td>126</td><td>23</td><td>112</td><td>23</td></tr><tr><td>38b55d95189c5e69cf4ab45098a48fba407609b4</td><td>cuhk02</td><td>CUHK02</td><td><a href="papers/38b55d95189c5e69cf4ab45098a48fba407609b4.html">Locally Aligned Feature Transforms across Views</a></td><td><a href="http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_CVPR2013/data/Papers/4989d594.pdf">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>24%</td><td>242</td><td>57</td><td>185</td><td>17</td><td>139</td><td>1</td></tr><tr><td>2f5d44dc3e1b5955942133ff872ebd31716ec604</td><td>frav3d</td><td>FRAV3D</td><td><a href="papers/2f5d44dc3e1b5955942133ff872ebd31716ec604.html">2D and 3D face recognition: A survey</a></td><td><a href="http://pdfs.semanticscholar.org/2f5d/44dc3e1b5955942133ff872ebd31716ec604.pdf">[pdf]</a></td><td>Pattern Recognition Letters</td><td></td><td></td><td></td><td></td><td>15%</td><td>389</td><td>57</td><td>332</td><td>28</td><td>198</td><td>17</td></tr><tr><td>a6e695ddd07aad719001c0fc1129328452385949</td><td>yfcc_100m</td><td>YFCC100M</td><td><a href="papers/a6e695ddd07aad719001c0fc1129328452385949.html">The New Data and New Challenges in Multimedia Research</a></td><td><span class="gray">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>36%</td><td>160</td><td>57</td><td>103</td><td>11</td><td>105</td><td>4</td></tr><tr><td>0d3bb75852098b25d90f31d2f48fd0cb4944702b</td><td>face_scrub</td><td>FaceScrub</td><td><a href="papers/0d3bb75852098b25d90f31d2f48fd0cb4944702b.html">A data-driven approach to cleaning large face datasets</a></td><td><a href="https://doi.org/10.1109/ICIP.2014.7025068">[pdf]</a></td><td>2014 IEEE International Conference on Image Processing (ICIP)</td><td>edu</td><td>University of Illinois, Urbana-Champaign</td><td>40.11116745</td><td>-88.22587665</td><td>46%</td><td>123</td><td>56</td><td>67</td><td>6</td><td>95</td><td>21</td></tr><tr><td>b91f54e1581fbbf60392364323d00a0cd43e493c</td><td>bp4d_spontanous</td><td>BP4D-Spontanous</td><td><a href="papers/b91f54e1581fbbf60392364323d00a0cd43e493c.html">A high-resolution spontaneous 3D dynamic facial expression database</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6553788', 'linkType': 'ieee'}">[pdf]</a></td><td>2013 10th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)</td><td>edu</td><td>SUNY Binghamton</td><td>42.08779975</td><td>-75.97066066</td><td>36%</td><td>151</td><td>54</td><td>97</td><td>7</td><td>85</td><td>26</td></tr><tr><td>4f93cd09785c6e77bf4bc5a788e079df524c8d21</td><td>soton</td><td>SOTON HiD</td><td><a href="papers/4f93cd09785c6e77bf4bc5a788e079df524c8d21.html">On a large sequence-based human gait database</a></td><td><a href="http://pdfs.semanticscholar.org/4f93/cd09785c6e77bf4bc5a788e079df524c8d21.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>36%</td><td>148</td><td>54</td><td>94</td><td>16</td><td>98</td><td>0</td></tr><tr><td>0df0d1adea39a5bef318b74faa37de7f3e00b452</td><td>mpii_gaze</td><td>MPIIGaze</td><td><a href="papers/0df0d1adea39a5bef318b74faa37de7f3e00b452.html">Appearance-based gaze estimation in the wild</a></td><td><a href="https://scalable.mpi-inf.mpg.de/files/2015/09/zhang_CVPR15.pdf">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>Max Planck Institute for Informatics</td><td>49.25795660</td><td>7.04577417</td><td>38%</td><td>138</td><td>52</td><td>86</td><td>3</td><td>94</td><td>7</td></tr><tr><td>c0387e788a52f10bf35d4d50659cfa515d89fbec</td><td>mars</td><td>MARS</td><td><a href="papers/c0387e788a52f10bf35d4d50659cfa515d89fbec.html">MARS: A Video Benchmark for Large-Scale Person Re-Identification</a></td><td><a href="http://pdfs.semanticscholar.org/c038/7e788a52f10bf35d4d50659cfa515d89fbec.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>34%</td><td>146</td><td>49</td><td>97</td><td>6</td><td>96</td><td>0</td></tr><tr><td>98bb029afe2a1239c3fdab517323066f0957b81b</td><td>sdu_vid</td><td>SDU-VID</td><td><a href="papers/98bb029afe2a1239c3fdab517323066f0957b81b.html">Person Re-identification by Video Ranking</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>25%</td><td>196</td><td>49</td><td>124</td><td>11</td><td>98</td><td>1</td></tr><tr><td>98bb029afe2a1239c3fdab517323066f0957b81b</td><td>sdu_vid</td><td>SDU-VID</td><td><a href="papers/98bb029afe2a1239c3fdab517323066f0957b81b.html">Person Re-identification by Video Ranking</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>25%</td><td>196</td><td>49</td><td>124</td><td>11</td><td>98</td><td>1</td></tr><tr><td>98bb029afe2a1239c3fdab517323066f0957b81b</td><td>sdu_vid</td><td>SDU-VID</td><td><a href="papers/98bb029afe2a1239c3fdab517323066f0957b81b.html">Person Re-identification by Video Ranking</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>25%</td><td>196</td><td>49</td><td>124</td><td>11</td><td>98</td><td>1</td></tr><tr><td>04c2cda00e5536f4b1508cbd80041e9552880e67</td><td>hipsterwars</td><td>Hipsterwars</td><td><a href="papers/04c2cda00e5536f4b1508cbd80041e9552880e67.html">Hipster Wars: Discovering Elements of Fashion Styles</a></td><td><a href="http://pdfs.semanticscholar.org/04c2/cda00e5536f4b1508cbd80041e9552880e67.pdf">[pdf]</a></td><td></td><td>edu</td><td>Tohoku University</td><td>38.25309450</td><td>140.87365930</td><td>53%</td><td>91</td><td>48</td><td>43</td><td>5</td><td>60</td><td>15</td></tr><tr><td>109df0e8e5969ddf01e073143e83599228a1163f</td><td>multi_pie</td><td>MULTIPIE</td><td><a href="papers/109df0e8e5969ddf01e073143e83599228a1163f.html">Scheduling heterogeneous multi-cores through performance impact estimation (PIE)</a></td><td><a href="http://dl.acm.org/citation.cfm?id=2337184">[pdf]</a></td><td>2012 39th Annual International Symposium on Computer Architecture (ISCA)</td><td></td><td></td><td></td><td></td><td>25%</td><td>192</td><td>48</td><td>144</td><td>8</td><td>99</td><td>0</td></tr><tr><td>32c801cb7fbeb742edfd94cccfca4934baec71da</td><td>ucf_crowd</td><td>UCF-CC-50</td><td><a href="papers/32c801cb7fbeb742edfd94cccfca4934baec71da.html">Multi-source Multi-scale Counting in Extremely Dense Crowd Images</a></td><td><a href="http://www.cs.ucf.edu/~haroon/datafiles/Idrees_Counting_CVPR_2013.pdf">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>38%</td><td>125</td><td>48</td><td>77</td><td>6</td><td>72</td><td>1</td></tr><tr><td>2d3482dcff69c7417c7b933f22de606a0e8e42d4</td><td>lfw</td><td>LFW</td><td><a href="papers/2d3482dcff69c7417c7b933f22de606a0e8e42d4.html">Labeled Faces in the Wild : Updates and New Reporting Procedures</a></td><td><a href="http://pdfs.semanticscholar.org/2d34/82dcff69c7417c7b933f22de606a0e8e42d4.pdf">[pdf]</a></td><td></td><td>edu</td><td>University of Massachusetts</td><td>42.38897850</td><td>-72.52869870</td><td>41%</td><td>116</td><td>47</td><td>46</td><td>4</td><td>62</td><td>19</td></tr><tr><td>2d3482dcff69c7417c7b933f22de606a0e8e42d4</td><td>lfw</td><td>LFW</td><td><a href="papers/2d3482dcff69c7417c7b933f22de606a0e8e42d4.html">Labeled Faces in the Wild : Updates and New Reporting Procedures</a></td><td><a href="http://pdfs.semanticscholar.org/2d34/82dcff69c7417c7b933f22de606a0e8e42d4.pdf">[pdf]</a></td><td></td><td>edu</td><td>University of Massachusetts</td><td>42.38897850</td><td>-72.52869870</td><td>41%</td><td>116</td><td>47</td><td>46</td><td>4</td><td>62</td><td>19</td></tr><tr><td>2d3482dcff69c7417c7b933f22de606a0e8e42d4</td><td>lfw</td><td>LFW</td><td><a href="papers/2d3482dcff69c7417c7b933f22de606a0e8e42d4.html">Labeled Faces in the Wild : Updates and New Reporting Procedures</a></td><td><a href="http://pdfs.semanticscholar.org/2d34/82dcff69c7417c7b933f22de606a0e8e42d4.pdf">[pdf]</a></td><td></td><td>edu</td><td>University of Massachusetts</td><td>42.38897850</td><td>-72.52869870</td><td>41%</td><td>116</td><td>47</td><td>46</td><td>4</td><td>62</td><td>19</td></tr><tr><td>8355d095d3534ef511a9af68a3b2893339e3f96b</td><td>imdb_wiki</td><td>IMDB</td><td><a href="papers/8355d095d3534ef511a9af68a3b2893339e3f96b.html">DEX: Deep EXpectation of Apparent Age from a Single Image</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406390', 'linkType': 'ieee'}">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision Workshop (ICCVW)</td><td></td><td></td><td></td><td></td><td>44%</td><td>102</td><td>45</td><td>54</td><td>6</td><td>62</td><td>28</td></tr><tr><td>8355d095d3534ef511a9af68a3b2893339e3f96b</td><td>imdb_wiki</td><td>IMDB</td><td><a href="papers/8355d095d3534ef511a9af68a3b2893339e3f96b.html">DEX: Deep EXpectation of Apparent Age from a Single Image</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406390', 'linkType': 'ieee'}">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision Workshop (ICCVW)</td><td></td><td></td><td></td><td></td><td>44%</td><td>102</td><td>45</td><td>54</td><td>6</td><td>62</td><td>28</td></tr><tr><td>6ad5a38df8dd4cdddd74f31996ce096d41219f72</td><td>tud_brussels</td><td>TUD-Brussels</td><td><a href="papers/6ad5a38df8dd4cdddd74f31996ce096d41219f72.html">Multi-cue onboard pedestrian detection</a></td><td><a href="https://www.mpi-inf.mpg.de/fileadmin/inf/d2/wojek/poster_cwojek_cvpr09.pdf">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>19%</td><td>217</td><td>41</td><td>176</td><td>14</td><td>131</td><td>1</td></tr><tr><td>6ad5a38df8dd4cdddd74f31996ce096d41219f72</td><td>tud_motionpairs</td><td>TUD-Motionparis</td><td><a href="papers/6ad5a38df8dd4cdddd74f31996ce096d41219f72.html">Multi-cue onboard pedestrian detection</a></td><td><a href="https://www.mpi-inf.mpg.de/fileadmin/inf/d2/wojek/poster_cwojek_cvpr09.pdf">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>19%</td><td>217</td><td>41</td><td>176</td><td>14</td><td>131</td><td>1</td></tr><tr><td>0486214fb58ee9a04edfe7d6a74c6d0f661a7668</td><td>chokepoint</td><td>ChokePoint</td><td><a href="papers/0486214fb58ee9a04edfe7d6a74c6d0f661a7668.html">Patch-based probabilistic image quality assessment for face selection and improved video-based face recognition</a></td><td><a href="http://conradsanderson.id.au/pdfs/wong_face_selection_cvpr_biometrics_2011.pdf">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td>edu</td><td>University of Queensland</td><td>-27.49741805</td><td>153.01316956</td><td>30%</td><td>128</td><td>39</td><td>89</td><td>6</td><td>68</td><td>14</td></tr><tr><td>2a4bbee0b4cf52d5aadbbc662164f7efba89566c</td><td>peta</td><td>PETA</td><td><a href="papers/2a4bbee0b4cf52d5aadbbc662164f7efba89566c.html">Pedestrian Attribute Recognition At Far Distance</a></td><td><a href="http://personal.ie.cuhk.edu.hk/~pluo/pdf/mm14.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>46%</td><td>80</td><td>37</td><td>43</td><td>2</td><td>51</td><td>3</td></tr><tr><td>636b8ffc09b1b23ff714ac8350bb35635e49fa3c</td><td>caltech_10k_web_faces</td><td>Caltech 10K Web Faces</td><td><a href="papers/636b8ffc09b1b23ff714ac8350bb35635e49fa3c.html">Pruning training sets for learning of object categories</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1467308', 'linkType': 'ieee'}">[pdf]</a></td><td>2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)</td><td></td><td></td><td></td><td></td><td>58%</td><td>60</td><td>35</td><td>25</td><td>5</td><td>42</td><td>12</td></tr><tr><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td><td>mafl</td><td>MAFL</td><td><a href="papers/a0fd85b3400c7b3e11122f44dc5870ae2de9009a.html">Learning Deep Representation for Face Alignment with Auxiliary Attributes</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1408.3967.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>33%</td><td>105</td><td>35</td><td>50</td><td>8</td><td>55</td><td>16</td></tr><tr><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td><td>mafl</td><td>MAFL</td><td><a href="papers/a0fd85b3400c7b3e11122f44dc5870ae2de9009a.html">Learning Deep Representation for Face Alignment with Auxiliary Attributes</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1408.3967.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>33%</td><td>105</td><td>35</td><td>50</td><td>8</td><td>55</td><td>16</td></tr><tr><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td><td>mtfl</td><td>MTFL</td><td><a href="papers/a0fd85b3400c7b3e11122f44dc5870ae2de9009a.html">Learning Deep Representation for Face Alignment with Auxiliary Attributes</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1408.3967.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>33%</td><td>105</td><td>35</td><td>50</td><td>8</td><td>55</td><td>16</td></tr><tr><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td><td>mtfl</td><td>MTFL</td><td><a href="papers/a0fd85b3400c7b3e11122f44dc5870ae2de9009a.html">Learning Deep Representation for Face Alignment with Auxiliary Attributes</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1408.3967.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>33%</td><td>105</td><td>35</td><td>50</td><td>8</td><td>55</td><td>16</td></tr><tr><td>214c966d1f9c2a4b66f4535d9a0d4078e63a5867</td><td>brainwash</td><td>Brainwash</td><td><a href="papers/214c966d1f9c2a4b66f4535d9a0d4078e63a5867.html">Brainwash: A Data System for Feature Engineering</a></td><td><a href="http://pdfs.semanticscholar.org/ae44/8015b2ff2bd3b8a5c9a3266f954f5af9ffa9.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>57</td><td>34</td><td>23</td><td>2</td><td>50</td><td>0</td></tr><tr><td>0dc11a37cadda92886c56a6fb5191ded62099c28</td><td>stickmen_family</td><td>We Are Family Stickmen</td><td><a href="papers/0dc11a37cadda92886c56a6fb5191ded62099c28.html">We Are Family: Joint Pose Estimation of Multiple Persons</a></td><td><a href="http://pdfs.semanticscholar.org/0dc1/1a37cadda92886c56a6fb5191ded62099c28.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>44%</td><td>77</td><td>34</td><td>43</td><td>4</td><td>57</td><td>1</td></tr><tr><td>4df3143922bcdf7db78eb91e6b5359d6ada004d2</td><td>cfd</td><td>CFD</td><td><a href="papers/4df3143922bcdf7db78eb91e6b5359d6ada004d2.html">The Chicago face database: A free stimulus set of faces and norming data.</a></td><td><a href="http://pdfs.semanticscholar.org/4df3/143922bcdf7db78eb91e6b5359d6ada004d2.pdf">[pdf]</a></td><td>Behavior research methods</td><td></td><td></td><td></td><td></td><td>39%</td><td>83</td><td>32</td><td>51</td><td>2</td><td>62</td><td>3</td></tr><tr><td>c900e0ad4c95948baaf0acd8449fde26f9b4952a</td><td>emotio_net</td><td>EmotioNet Database</td><td><a href="papers/c900e0ad4c95948baaf0acd8449fde26f9b4952a.html">EmotioNet: An Accurate, Real-Time Algorithm for the Automatic Annotation of a Million Facial Expressions in the Wild</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7780969', 'linkType': 'ieee'}">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>44%</td><td>72</td><td>32</td><td>40</td><td>7</td><td>54</td><td>8</td></tr><tr><td>3cd40bfa1ff193a96bde0207e5140a399476466c</td><td>tvhi</td><td>TVHI</td><td><a href="papers/3cd40bfa1ff193a96bde0207e5140a399476466c.html">High Five: Recognising human interactions in TV shows</a></td><td><a href="http://pdfs.semanticscholar.org/3cd4/0bfa1ff193a96bde0207e5140a399476466c.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>34%</td><td>91</td><td>31</td><td>60</td><td>11</td><td>64</td><td>1</td></tr><tr><td>6618cff7f2ed440a0d2fa9e74ad5469df5cdbe4c</td><td>afad</td><td>AFAD</td><td><a href="papers/6618cff7f2ed440a0d2fa9e74ad5469df5cdbe4c.html">Ordinal Regression with Multiple Output CNN for Age Estimation</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.532">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>44%</td><td>68</td><td>30</td><td>38</td><td>8</td><td>49</td><td>7</td></tr><tr><td>fcc6fe6007c322641796cb8792718641856a22a7</td><td>miw</td><td>MIW</td><td><a href="papers/fcc6fe6007c322641796cb8792718641856a22a7.html">Automatic facial makeup detection with application in face recognition</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6612994', 'linkType': 'ieee'}">[pdf]</a></td><td>2013 International Conference on Biometrics (ICB)</td><td>edu</td><td>West Virginia University</td><td>39.65404635</td><td>-79.96475355</td><td>65%</td><td>46</td><td>30</td><td>16</td><td>1</td><td>18</td><td>21</td></tr><tr><td>0a85bdff552615643dd74646ac881862a7c7072d</td><td>pipa</td><td>PIPA</td><td><a href="papers/0a85bdff552615643dd74646ac881862a7c7072d.html">Beyond frontal faces: Improving Person Recognition using multiple cues</a></td><td><a href="https://doi.org/10.1109/CVPR.2015.7299113">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>60%</td><td>50</td><td>30</td><td>19</td><td>2</td><td>40</td><td>4</td></tr><tr><td>fcc6fe6007c322641796cb8792718641856a22a7</td><td>youtube_makeup</td><td>YMU</td><td><a href="papers/fcc6fe6007c322641796cb8792718641856a22a7.html">Automatic facial makeup detection with application in face recognition</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6612994', 'linkType': 'ieee'}">[pdf]</a></td><td>2013 International Conference on Biometrics (ICB)</td><td>edu</td><td>West Virginia University</td><td>39.65404635</td><td>-79.96475355</td><td>65%</td><td>46</td><td>30</td><td>16</td><td>1</td><td>18</td><td>21</td></tr><tr><td>fcc6fe6007c322641796cb8792718641856a22a7</td><td>youtube_makeup</td><td>YMU</td><td><a href="papers/fcc6fe6007c322641796cb8792718641856a22a7.html">Automatic facial makeup detection with application in face recognition</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6612994', 'linkType': 'ieee'}">[pdf]</a></td><td>2013 International Conference on Biometrics (ICB)</td><td>edu</td><td>West Virginia University</td><td>39.65404635</td><td>-79.96475355</td><td>65%</td><td>46</td><td>30</td><td>16</td><td>1</td><td>18</td><td>21</td></tr><tr><td>51eba481dac6b229a7490f650dff7b17ce05df73</td><td>imsitu</td><td>imSitu</td><td><a href="papers/51eba481dac6b229a7490f650dff7b17ce05df73.html">Situation Recognition: Visual Semantic Role Labeling for Image Understanding</a></td><td><a href="http://grail.cs.washington.edu/wp-content/uploads/2016/09/yatskar2016srv.pdf">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>University of Washington</td><td>47.65432380</td><td>-122.30800894</td><td>60%</td><td>48</td><td>29</td><td>19</td><td>2</td><td>45</td><td>2</td></tr><tr><td>2bf8541199728262f78d4dced6fb91479b39b738</td><td>clothing_co_parsing</td><td>CCP</td><td><a href="papers/2bf8541199728262f78d4dced6fb91479b39b738.html">Clothing Co-parsing by Joint Image Segmentation and Labeling</a></td><td><a href="https://arxiv.org/pdf/1502.00739v1.pdf">[pdf]</a></td><td>2014 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>47%</td><td>60</td><td>28</td><td>32</td><td>0</td><td>36</td><td>6</td></tr><tr><td>42505464808dfb446f521fc6ff2cfeffd4d68ff1</td><td>gavab_db</td><td>Gavab</td><td><a href="papers/42505464808dfb446f521fc6ff2cfeffd4d68ff1.html">Expression invariant 3D face recognition with a Morphable Model</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4813376', 'linkType': 'ieee'}">[pdf]</a></td><td>2008 8th IEEE International Conference on Automatic Face & Gesture Recognition</td><td></td><td></td><td></td><td></td><td>29%</td><td>94</td><td>27</td><td>67</td><td>10</td><td>57</td><td>5</td></tr><tr><td>066000d44d6691d27202896691f08b27117918b9</td><td>psu</td><td>PSU</td><td><a href="papers/066000d44d6691d27202896691f08b27117918b9.html">Vision-Based Analysis of Small Groups in Pedestrian Crowds</a></td><td><a href="http://vision.cse.psu.edu/publications/pdfs/GeCollinsRubackPAMI2011.pdf">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>18%</td><td>151</td><td>27</td><td>124</td><td>9</td><td>78</td><td>2</td></tr><tr><td>47aeb3b82f54b5ae8142b4bdda7b614433e69b9a</td><td>am_fed</td><td>AM-FED</td><td><a href="papers/47aeb3b82f54b5ae8142b4bdda7b614433e69b9a.html">Affectiva-MIT Facial Expression Dataset (AM-FED): Naturalistic and Spontaneous Facial Expressions Collected "In-the-Wild"</a></td><td><a href="http://pdfs.semanticscholar.org/5d06/437656dd94616d7d87260d5eb77513ded30f.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>36%</td><td>73</td><td>26</td><td>47</td><td>6</td><td>39</td><td>16</td></tr><tr><td>356b431d4f7a2a0a38cf971c84568207dcdbf189</td><td>wider</td><td>WIDER</td><td><a href="papers/356b431d4f7a2a0a38cf971c84568207dcdbf189.html">Recognize complex events from static images by fusing deep channels</a></td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Xiong_Recognize_Complex_Events_2015_CVPR_paper.pdf">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>Shenzhen Institutes of Advanced Technology</td><td>22.59805605</td><td>113.98533784</td><td>58%</td><td>45</td><td>26</td><td>19</td><td>1</td><td>30</td><td>12</td></tr><tr><td>9c23859ec7313f2e756a3e85575735e0c52249f4</td><td>facebook_100</td><td>Facebook100</td><td><a href="papers/9c23859ec7313f2e756a3e85575735e0c52249f4.html">Scaling up biologically-inspired computer vision: A case study in unconstrained face recognition on facebook</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5981788', 'linkType': 'ieee'}">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td>edu</td><td>Harvard University</td><td>42.36782045</td><td>-71.12666653</td><td>50%</td><td>50</td><td>25</td><td>25</td><td>3</td><td>39</td><td>4</td></tr><tr><td>0b84f07af44f964817675ad961def8a51406dd2e</td><td>prw</td><td>PRW</td><td><a href="papers/0b84f07af44f964817675ad961def8a51406dd2e.html">Person Re-identification in the Wild</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2017.357">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>University of Technology Sydney</td><td>-33.88096510</td><td>151.20107299</td><td>38%</td><td>65</td><td>25</td><td>40</td><td>1</td><td>46</td><td>0</td></tr><tr><td>9c23859ec7313f2e756a3e85575735e0c52249f4</td><td>pubfig_83</td><td>pubfig83</td><td><a href="papers/9c23859ec7313f2e756a3e85575735e0c52249f4.html">Scaling up biologically-inspired computer vision: A case study in unconstrained face recognition on facebook</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5981788', 'linkType': 'ieee'}">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td>edu</td><td>Harvard University</td><td>42.36782045</td><td>-71.12666653</td><td>50%</td><td>50</td><td>25</td><td>25</td><td>3</td><td>39</td><td>4</td></tr><tr><td>2160788824c4c29ffe213b2cbeb3f52972d73f37</td><td>3d_rma</td><td>3D-RMA</td><td><a href="papers/2160788824c4c29ffe213b2cbeb3f52972d73f37.html">Automatic 3D face authentication</a></td><td><a href="http://pdfs.semanticscholar.org/2160/788824c4c29ffe213b2cbeb3f52972d73f37.pdf">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td>25%</td><td>95</td><td>24</td><td>71</td><td>8</td><td>60</td><td>2</td></tr><tr><td>37d6f0eb074d207b53885bd2eb78ccc8a04be597</td><td>vmu</td><td>VMU</td><td><a href="papers/37d6f0eb074d207b53885bd2eb78ccc8a04be597.html">Can facial cosmetics affect the matching accuracy of face recognition systems?</a></td><td><a href="http://www.cse.msu.edu/~climer/DantchevaChenRossFaceCosmetics_BTAS2012.pdf">[pdf]</a></td><td>2012 IEEE Fifth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td></td><td></td><td></td><td></td><td>49%</td><td>49</td><td>24</td><td>25</td><td>0</td><td>18</td><td>22</td></tr><tr><td>070de852bc6eb275d7ca3a9cdde8f6be8795d1a3</td><td>d3dfacs</td><td>D3DFACS</td><td><a href="papers/070de852bc6eb275d7ca3a9cdde8f6be8795d1a3.html">A FACS valid 3D dynamic action unit database with applications to 3D dynamic morphable facial modeling</a></td><td><a href="http://www.cs.bath.ac.uk/~dpc/D3DFACS/ICCV_final_2011.pdf">[pdf]</a></td><td>2011 International Conference on Computer Vision</td><td>edu</td><td>Jacobs University</td><td>53.41291480</td><td>-2.96897915</td><td>44%</td><td>52</td><td>23</td><td>29</td><td>5</td><td>37</td><td>4</td></tr><tr><td>3394168ff0719b03ff65bcea35336a76b21fe5e4</td><td>penn_fudan</td><td>Penn Fudan</td><td><a href="papers/3394168ff0719b03ff65bcea35336a76b21fe5e4.html">Object Detection Combining Recognition and Segmentation</a></td><td><a href="http://pdfs.semanticscholar.org/f531/a554cade14b9b340de6730683a28c292dd74.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>23%</td><td>101</td><td>23</td><td>78</td><td>11</td><td>58</td><td>0</td></tr><tr><td>2e8d0f1802e50cccfd3c0aabac0d0beab3a7846e</td><td>3dpes</td><td>3DPeS</td><td><a href="papers/2e8d0f1802e50cccfd3c0aabac0d0beab3a7846e.html">3DPeS: 3D people dataset for surveillance and forensics</a></td><td><a href="http://doi.acm.org/10.1145/2072572.2072590">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>18%</td><td>122</td><td>22</td><td>100</td><td>11</td><td>71</td><td>1</td></tr><tr><td>08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7</td><td>kin_face</td><td>UB KinFace</td><td><a href="papers/08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7.html">Understanding Kin Relationships in a Photo</a></td><td><a href="http://www1.ece.neu.edu/~yunfu/papers/Kinship-TMM.pdf">[pdf]</a></td><td>IEEE Transactions on Multimedia</td><td></td><td></td><td></td><td></td><td>24%</td><td>89</td><td>21</td><td>46</td><td>2</td><td>20</td><td>14</td></tr><tr><td>08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7</td><td>kin_face</td><td>UB KinFace</td><td><a href="papers/08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7.html">Understanding Kin Relationships in a Photo</a></td><td><a href="http://www1.ece.neu.edu/~yunfu/papers/Kinship-TMM.pdf">[pdf]</a></td><td>IEEE Transactions on Multimedia</td><td></td><td></td><td></td><td></td><td>24%</td><td>89</td><td>21</td><td>46</td><td>2</td><td>20</td><td>14</td></tr><tr><td>08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7</td><td>kin_face</td><td>UB KinFace</td><td><a href="papers/08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7.html">Understanding Kin Relationships in a Photo</a></td><td><a href="http://www1.ece.neu.edu/~yunfu/papers/Kinship-TMM.pdf">[pdf]</a></td><td>IEEE Transactions on Multimedia</td><td></td><td></td><td></td><td></td><td>24%</td><td>89</td><td>21</td><td>46</td><td>2</td><td>20</td><td>14</td></tr><tr><td>eb027969f9310e0ae941e2adee2d42cdf07d938c</td><td>vgg_faces2</td><td>VGG Face2</td><td><a href="papers/eb027969f9310e0ae941e2adee2d42cdf07d938c.html">VGGFace2: A Dataset for Recognising Faces across Pose and Age</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1710.08092.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2018 13th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2018)</td><td>edu</td><td>University of Oxford</td><td>51.75345380</td><td>-1.25400997</td><td>38%</td><td>56</td><td>21</td><td>35</td><td>6</td><td>50</td><td>3</td></tr><tr><td>f1af714b92372c8e606485a3982eab2f16772ad8</td><td>mug_faces</td><td>MUG Faces</td><td><a href="papers/f1af714b92372c8e606485a3982eab2f16772ad8.html">The MUG facial expression database</a></td><td><a href="http://ieeexplore.ieee.org/document/5617662/">[pdf]</a></td><td>11th International Workshop on Image Analysis for Multimedia Interactive Services WIAMIS 10</td><td>edu</td><td>Aristotle University of Thessaloniki</td><td>40.62984145</td><td>22.95889350</td><td>28%</td><td>68</td><td>19</td><td>49</td><td>5</td><td>28</td><td>19</td></tr><tr><td>2ce2560cf59db59ce313bbeb004e8ce55c5ce928</td><td>texas_3dfrd</td><td>Texas 3DFRD</td><td><a href="papers/2ce2560cf59db59ce313bbeb004e8ce55c5ce928.html">Anthropometric 3D Face Recognition</a></td><td><a href="http://live.ece.utexas.edu/publications/2010/sg_ijcv_june10.pdf">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td>22%</td><td>88</td><td>19</td><td>43</td><td>4</td><td>42</td><td>2</td></tr><tr><td>2ce2560cf59db59ce313bbeb004e8ce55c5ce928</td><td>texas_3dfrd</td><td>Texas 3DFRD</td><td><a href="papers/2ce2560cf59db59ce313bbeb004e8ce55c5ce928.html">Anthropometric 3D Face Recognition</a></td><td><a href="http://live.ece.utexas.edu/publications/2010/sg_ijcv_june10.pdf">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td>22%</td><td>88</td><td>19</td><td>43</td><td>4</td><td>42</td><td>2</td></tr><tr><td>8b2dd5c61b23ead5ae5508bb8ce808b5ea266730</td><td>10k_US_adult_faces</td><td>10K US Adult Faces</td><td><a href="papers/8b2dd5c61b23ead5ae5508bb8ce808b5ea266730.html">The intrinsic memorability of face photographs.</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/8b2d/d5c61b23ead5ae5508bb8ce808b5ea266730.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Journal of experimental psychology. General</td><td></td><td></td><td></td><td></td><td>36%</td><td>47</td><td>17</td><td>30</td><td>3</td><td>33</td><td>1</td></tr><tr><td>69a68f9cf874c69e2232f47808016c2736b90c35</td><td>celeba_plus</td><td>CelebFaces+</td><td><a href="papers/69a68f9cf874c69e2232f47808016c2736b90c35.html">Learning Deep Representation for Imbalanced Classification</a></td><td><a href="http://personal.ie.cuhk.edu.hk/~ccloy/files/cvpr_2016_imbalanced.pdf">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>Shenzhen Institutes of Advanced Technology</td><td>22.59805605</td><td>113.98533784</td><td>33%</td><td>51</td><td>17</td><td>34</td><td>1</td><td>39</td><td>2</td></tr><tr><td>79828e6e9f137a583082b8b5a9dfce0c301989b8</td><td>mapillary</td><td>Mapillary</td><td><a href="papers/79828e6e9f137a583082b8b5a9dfce0c301989b8.html">The Mapillary Vistas Dataset for Semantic Understanding of Street Scenes</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237796', 'linkType': 'ieee'}">[pdf]</a></td><td>2017 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td>39%</td><td>44</td><td>17</td><td>27</td><td>0</td><td>36</td><td>0</td></tr><tr><td>5194cbd51f9769ab25260446b4fa17204752e799</td><td>violent_flows</td><td>Violent Flows</td><td><a href="papers/5194cbd51f9769ab25260446b4fa17204752e799.html">Violent flows: Real-time detection of violent crowd behavior</a></td><td><a href="http://www.wisdom.weizmann.ac.il/mathusers/kliper/Papers/violent_flows.pdf">[pdf]</a></td><td>2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops</td><td></td><td></td><td></td><td></td><td>20%</td><td>83</td><td>17</td><td>66</td><td>6</td><td>42</td><td>2</td></tr><tr><td>20388099cc415c772926e47bcbbe554e133343d1</td><td>cafe</td><td>CAFE</td><td><a href="papers/20388099cc415c772926e47bcbbe554e133343d1.html">The Child Affective Facial Expression (CAFE) set: validity and reliability from untrained adults</a></td><td><a href="http://pdfs.semanticscholar.org/2038/8099cc415c772926e47bcbbe554e133343d1.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>48%</td><td>33</td><td>16</td><td>17</td><td>3</td><td>28</td><td>1</td></tr><tr><td>c34532fe6bfbd1e6df477c9ffdbb043b77e7804d</td><td>columbia_gaze</td><td>Columbia Gaze</td><td><a href="papers/c34532fe6bfbd1e6df477c9ffdbb043b77e7804d.html">A 3D Morphable Eye Region Model for Gaze Estimation</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/0d43/3b9435b874a1eea6d7999e86986c910fa285.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Carnegie Mellon University</td><td>37.41021930</td><td>-122.05965487</td><td>67%</td><td>24</td><td>16</td><td>8</td><td>0</td><td>18</td><td>4</td></tr><tr><td>47662d1a368daf70ba70ef2d59eb6209f98b675d</td><td>fia</td><td>CMU FiA</td><td><a href="papers/47662d1a368daf70ba70ef2d59eb6209f98b675d.html">The CMU Face In Action (FIA) Database</a></td><td><a href="http://pdfs.semanticscholar.org/bb47/a03401811f9d2ca2da12138697acbc7b97a3.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>29%</td><td>55</td><td>16</td><td>39</td><td>5</td><td>38</td><td>7</td></tr><tr><td>213a579af9e4f57f071b884aa872651372b661fd</td><td>bbc_pose</td><td>BBC Pose</td><td><a href="papers/213a579af9e4f57f071b884aa872651372b661fd.html">Automatic and Efficient Human Pose Estimation for Sign Language Videos</a></td><td><a href="https://doi.org/10.1007/s11263-013-0672-6">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td>60%</td><td>25</td><td>15</td><td>10</td><td>1</td><td>18</td><td>1</td></tr><tr><td>1e3df3ca8feab0b36fd293fe689f93bb2aaac591</td><td>immediacy</td><td>Immediacy</td><td><a href="papers/1e3df3ca8feab0b36fd293fe689f93bb2aaac591.html">Multi-task Recurrent Neural Network for Immediacy Prediction</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.383">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td>60%</td><td>25</td><td>15</td><td>10</td><td>2</td><td>20</td><td>0</td></tr><tr><td>a5a44a32a91474f00a3cda671a802e87c899fbb4</td><td>moments_in_time</td><td>Moments in Time</td><td><a href="papers/a5a44a32a91474f00a3cda671a802e87c899fbb4.html">Moments in Time Dataset: one million videos for event understanding</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1801.03150.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>60%</td><td>25</td><td>15</td><td>10</td><td>2</td><td>25</td><td>0</td></tr><tr><td>4946ba10a4d5a7d0a38372f23e6622bd347ae273</td><td>coco_action</td><td>COCO-a</td><td><a href="papers/4946ba10a4d5a7d0a38372f23e6622bd347ae273.html">RONCHI AND PERONA: DESCRIBING COMMON HUMAN VISUAL ACTIONS IN IMAGES 1 Describing Common Human Visual Actions in Images</a></td><td><a href="http://pdfs.semanticscholar.org/b38d/cf5fa5174c0d718d65cc4f3889b03c4a21df.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>26</td><td>14</td><td>12</td><td>0</td><td>25</td><td>0</td></tr><tr><td>2161f6b7ee3c0acc81603b01dc0df689683577b9</td><td>large_scale_person_search</td><td>Large Scale Person Search</td><td><a href="papers/2161f6b7ee3c0acc81603b01dc0df689683577b9.html">End-to-End Deep Learning for Person Search</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/2161/f6b7ee3c0acc81603b01dc0df689683577b9.pdf', 'linkType': 's2'}">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>34%</td><td>41</td><td>14</td><td>27</td><td>2</td><td>27</td><td>0</td></tr><tr><td>1c2802c2199b6d15ecefe7ba0c39bfe44363de38</td><td>youtube_poses</td><td>YouTube Pose</td><td><a href="papers/1c2802c2199b6d15ecefe7ba0c39bfe44363de38.html">Personalizing Human Video Pose Estimation</a></td><td><a href="http://arxiv.org/pdf/1511.06676v1.pdf">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>44%</td><td>32</td><td>14</td><td>18</td><td>2</td><td>27</td><td>0</td></tr><tr><td>ea050801199f98a1c7c1df6769f23f658299a3ae</td><td>mpi_large</td><td>Large MPI Facial Expression</td><td><a href="papers/ea050801199f98a1c7c1df6769f23f658299a3ae.html">The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</a></td><td><a href="http://pdfs.semanticscholar.org/ea05/0801199f98a1c7c1df6769f23f658299a3ae.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>46%</td><td>28</td><td>13</td><td>15</td><td>4</td><td>24</td><td>3</td></tr><tr><td>ea050801199f98a1c7c1df6769f23f658299a3ae</td><td>mpi_small</td><td>Small MPI Facial Expression</td><td><a href="papers/ea050801199f98a1c7c1df6769f23f658299a3ae.html">The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</a></td><td><a href="http://pdfs.semanticscholar.org/ea05/0801199f98a1c7c1df6769f23f658299a3ae.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>46%</td><td>28</td><td>13</td><td>15</td><td>4</td><td>24</td><td>3</td></tr><tr><td>16e8b0a1e8451d5f697b94c0c2b32a00abee1d52</td><td>umb</td><td>UMB</td><td><a href="papers/16e8b0a1e8451d5f697b94c0c2b32a00abee1d52.html">UMB-DB: A database of partially occluded 3D faces</a></td><td><a href="https://doi.org/10.1109/ICCVW.2011.6130509">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td>29%</td><td>45</td><td>13</td><td>32</td><td>2</td><td>20</td><td>3</td></tr><tr><td>d08cc366a4a0192a01e9a7495af1eb5d9f9e73ae</td><td>b3d_ac</td><td>B3D(AC)</td><td><a href="papers/d08cc366a4a0192a01e9a7495af1eb5d9f9e73ae.html">A 3-D Audio-Visual Corpus of Affective Communication</a></td><td><a href="http://files.is.tue.mpg.de/jgall/download/jgall_avcorpus_mm10.pdf">[pdf]</a></td><td>IEEE Transactions on Multimedia</td><td></td><td></td><td></td><td></td><td>31%</td><td>39</td><td>12</td><td>27</td><td>2</td><td>27</td><td>7</td></tr><tr><td>0b440695c822a8e35184fb2f60dcdaa8a6de84ae</td><td>kinectface</td><td>KinectFaceDB</td><td><a href="papers/0b440695c822a8e35184fb2f60dcdaa8a6de84ae.html">KinectFaceDB: A Kinect Database for Face Recognition</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6866883', 'linkType': 'ieee'}">[pdf]</a></td><td>IEEE Transactions on Systems, Man, and Cybernetics: Systems</td><td></td><td></td><td></td><td></td><td>16%</td><td>75</td><td>12</td><td>63</td><td>6</td><td>25</td><td>8</td></tr><tr><td>45e616093a92e5f1e61a7c6037d5f637aa8964af</td><td>malf</td><td>MALF</td><td><a href="papers/45e616093a92e5f1e61a7c6037d5f637aa8964af.html">Fine-grained evaluation on face detection in the wild</a></td><td><a href="http://www.cs.toronto.edu/~byang/papers/malf_fg15.pdf">[pdf]</a></td><td>2015 11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)</td><td>edu</td><td>Chinese Academy of Sciences</td><td>40.00447950</td><td>116.37023800</td><td>71%</td><td>17</td><td>12</td><td>5</td><td>0</td><td>13</td><td>4</td></tr><tr><td>2edb87494278ad11641b6cf7a3f8996de12b8e14</td><td>qmul_grid</td><td>GRID</td><td><a href="papers/2edb87494278ad11641b6cf7a3f8996de12b8e14.html">Time-Delayed Correlation Analysis for Multi-Camera Activity Understanding</a></td><td><a href="https://doi.org/10.1007/s11263-010-0347-5">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td>16%</td><td>77</td><td>12</td><td>39</td><td>3</td><td>32</td><td>0</td></tr><tr><td>2edb87494278ad11641b6cf7a3f8996de12b8e14</td><td>qmul_grid</td><td>GRID</td><td><a href="papers/2edb87494278ad11641b6cf7a3f8996de12b8e14.html">Time-Delayed Correlation Analysis for Multi-Camera Activity Understanding</a></td><td><a href="https://doi.org/10.1007/s11263-010-0347-5">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td>16%</td><td>77</td><td>12</td><td>39</td><td>3</td><td>32</td><td>0</td></tr><tr><td>09d78009687bec46e70efcf39d4612822e61cb8c</td><td>raid</td><td>RAiD</td><td><a href="papers/09d78009687bec46e70efcf39d4612822e61cb8c.html">Consistent Re-identification in a Camera Network</a></td><td><a href="http://pdfs.semanticscholar.org/c27f/099e6e7e3f7f9979cbe9e0a5175fc5848ea0.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>27%</td><td>45</td><td>12</td><td>33</td><td>7</td><td>34</td><td>1</td></tr><tr><td>22646e00a7ba34d1b5fbe3b1efcd91a1e1be3c2b</td><td>saivt</td><td>SAIVT SoftBio</td><td><a href="papers/22646e00a7ba34d1b5fbe3b1efcd91a1e1be3c2b.html">A Database for Person Re-Identification in Multi-Camera Surveillance Networks</a></td><td><a href="http://eprints.qut.edu.au/53437/3/Bialkowski_Database4PersonReID_DICTA.pdf">[pdf]</a></td><td>2012 International Conference on Digital Image Computing Techniques and Applications (DICTA)</td><td></td><td></td><td></td><td></td><td>21%</td><td>58</td><td>12</td><td>46</td><td>7</td><td>40</td><td>1</td></tr><tr><td>44d23df380af207f5ac5b41459c722c87283e1eb</td><td>wider_attribute</td><td>WIDER Attribute</td><td><a href="papers/44d23df380af207f5ac5b41459c722c87283e1eb.html">Human Attribute Recognition by Deep Hierarchical Contexts</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/8e28/07f2dd53b03a759e372e07f7191cae65c9fd.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>67%</td><td>18</td><td>12</td><td>6</td><td>0</td><td>16</td><td>0</td></tr><tr><td>28d4e027c7e90b51b7d8908fce68128d1964668a</td><td>megaface</td><td>MegaFace</td><td><a href="papers/28d4e027c7e90b51b7d8908fce68128d1964668a.html">Level Playing Field for Million Scale Face Recognition</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1705.00393.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>41%</td><td>27</td><td>11</td><td>16</td><td>2</td><td>22</td><td>4</td></tr><tr><td>28d4e027c7e90b51b7d8908fce68128d1964668a</td><td>megaface</td><td>MegaFace</td><td><a href="papers/28d4e027c7e90b51b7d8908fce68128d1964668a.html">Level Playing Field for Million Scale Face Recognition</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1705.00393.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>41%</td><td>27</td><td>11</td><td>16</td><td>2</td><td>22</td><td>4</td></tr><tr><td>221c18238b829c12b911706947ab38fd017acef7</td><td>rap_pedestrian</td><td>RAP</td><td><a href="papers/221c18238b829c12b911706947ab38fd017acef7.html">A Richly Annotated Dataset for Pedestrian Attribute Recognition</a></td><td><a href="http://pdfs.semanticscholar.org/221c/18238b829c12b911706947ab38fd017acef7.pdf">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>52%</td><td>21</td><td>11</td><td>10</td><td>0</td><td>18</td><td>0</td></tr><tr><td>488e475eeb3bb39a145f23ede197cd3620f1d98a</td><td>apis</td><td>APiS1.0</td><td><a href="papers/488e475eeb3bb39a145f23ede197cd3620f1d98a.html">Pedestrian Attribute Classification in Surveillance: Database and Evaluation</a></td><td><a href="http://www.cv-foundation.org/openaccess/content_iccv_workshops_2013/W10/papers/Zhu_Pedestrian_Attribute_Classification_2013_ICCV_paper.pdf">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision Workshops</td><td></td><td></td><td></td><td></td><td>38%</td><td>26</td><td>10</td><td>16</td><td>1</td><td>13</td><td>2</td></tr><tr><td>53ae38a6bb2b21b42bac4f0c4c8ed1f9fa02f9d4</td><td>bp4d_plus</td><td>BP4D+</td><td><a href="papers/53ae38a6bb2b21b42bac4f0c4c8ed1f9fa02f9d4.html">Multimodal Spontaneous Emotion Corpus for Human Behavior Analysis</a></td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Zhang_Multimodal_Spontaneous_Emotion_CVPR_2016_paper.pdf">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>25%</td><td>40</td><td>10</td><td>30</td><td>0</td><td>20</td><td>6</td></tr><tr><td>298cbc3dfbbb3a20af4eed97906650a4ea1c29e0</td><td>ferplus</td><td>FER+</td><td><a href="papers/298cbc3dfbbb3a20af4eed97906650a4ea1c29e0.html">Training deep networks for facial expression recognition with crowd-sourced label distribution</a></td><td><a href="http://arxiv.org/pdf/1608.01041v1.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>34%</td><td>29</td><td>10</td><td>19</td><td>0</td><td>15</td><td>3</td></tr><tr><td>488e475eeb3bb39a145f23ede197cd3620f1d98a</td><td>svs</td><td>SVS</td><td><a href="papers/488e475eeb3bb39a145f23ede197cd3620f1d98a.html">Pedestrian Attribute Classification in Surveillance: Database and Evaluation</a></td><td><a href="http://www.cv-foundation.org/openaccess/content_iccv_workshops_2013/W10/papers/Zhu_Pedestrian_Attribute_Classification_2013_ICCV_paper.pdf">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision Workshops</td><td></td><td></td><td></td><td></td><td>38%</td><td>26</td><td>10</td><td>16</td><td>1</td><td>13</td><td>2</td></tr><tr><td>5753b2b5e442eaa3be066daa4a2ca8d8a0bb1725</td><td>50_people_one_question</td><td>50 People One Question</td><td><a href="papers/5753b2b5e442eaa3be066daa4a2ca8d8a0bb1725.html">Merging Pose Estimates Across Space and Time</a></td><td><a href="http://pdfs.semanticscholar.org/63b2/f5348af0f969dfc2afb4977732393c6459ec.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>15</td><td>9</td><td>6</td><td>0</td><td>11</td><td>2</td></tr><tr><td>6dcf418c778f528b5792104760f1fbfe90c6dd6a</td><td>agedb</td><td>AgeDB</td><td><a href="papers/6dcf418c778f528b5792104760f1fbfe90c6dd6a.html">AgeDB: The First Manually Collected, In-the-Wild Age Database</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014984', 'linkType': 'ieee'}">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td></td><td></td><td></td><td></td><td>82%</td><td>11</td><td>9</td><td>2</td><td>0</td><td>10</td><td>0</td></tr><tr><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td><td>casablanca</td><td>Casablanca</td><td><a href="papers/0ceda9dae8b9f322df65ca2ef02caca9758aec6f.html">Context-Aware CNNs for Person Head Detection</a></td><td><a href="http://arxiv.org/pdf/1511.07917v1.pdf">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td>33%</td><td>27</td><td>9</td><td>18</td><td>1</td><td>22</td><td>0</td></tr><tr><td>4fefd1bc8dc4e0ab37ee3324ddfa43ad9d6a04a7</td><td>deep_fashion</td><td>DeepFashion</td><td><a href="papers/4fefd1bc8dc4e0ab37ee3324ddfa43ad9d6a04a7.html">Fashion Landmark Detection in the Wild</a></td><td><a href="http://pdfs.semanticscholar.org/d8ca/e259c1c5bba0c096f480dc7322bbaebfac1a.pdf">[pdf]</a></td><td></td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>39%</td><td>23</td><td>9</td><td>8</td><td>0</td><td>15</td><td>0</td></tr><tr><td>4fefd1bc8dc4e0ab37ee3324ddfa43ad9d6a04a7</td><td>deep_fashion</td><td>DeepFashion</td><td><a href="papers/4fefd1bc8dc4e0ab37ee3324ddfa43ad9d6a04a7.html">Fashion Landmark Detection in the Wild</a></td><td><a href="http://pdfs.semanticscholar.org/d8ca/e259c1c5bba0c096f480dc7322bbaebfac1a.pdf">[pdf]</a></td><td></td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>39%</td><td>23</td><td>9</td><td>8</td><td>0</td><td>15</td><td>0</td></tr><tr><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td><td>hollywood_headset</td><td>HollywoodHeads</td><td><a href="papers/0ceda9dae8b9f322df65ca2ef02caca9758aec6f.html">Context-Aware CNNs for Person Head Detection</a></td><td><a href="http://arxiv.org/pdf/1511.07917v1.pdf">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td>33%</td><td>27</td><td>9</td><td>18</td><td>1</td><td>22</td><td>0</td></tr><tr><td>faf40ce28857aedf183e193486f5b4b0a8c478a2</td><td>iit_dehli_ear</td><td>IIT Dehli Ear</td><td><a href="papers/faf40ce28857aedf183e193486f5b4b0a8c478a2.html">Automated Human Identification Using Ear Imaging</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/faf4/0ce28857aedf183e193486f5b4b0a8c478a2.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>13%</td><td>70</td><td>9</td><td>61</td><td>6</td><td>28</td><td>1</td></tr><tr><td>ca3e88d87e1344d076c964ea89d91a75c417f5ee</td><td>imfdb</td><td>IMFDB</td><td><a href="papers/ca3e88d87e1344d076c964ea89d91a75c417f5ee.html">Indian Movie Face Database: A benchmark for face recognition under wide variations</a></td><td><span class="gray">[pdf]</a></td><td>2013 Fourth National Conference on Computer Vision, Pattern Recognition, Image Processing and Graphics (NCVPRIPG)</td><td></td><td></td><td></td><td></td><td>60%</td><td>15</td><td>9</td><td>6</td><td>0</td><td>10</td><td>4</td></tr><tr><td>7ace44190729927e5cb0dd5d363fcae966fe13f7</td><td>nudedetection</td><td>Nude Detection</td><td><a href="papers/7ace44190729927e5cb0dd5d363fcae966fe13f7.html">A bag-of-features approach based on Hue-SIFT descriptor for nude detection</a></td><td><a href="http://ieeexplore.ieee.org/document/7077625/">[pdf]</a></td><td>2009 17th European Signal Processing Conference</td><td></td><td></td><td></td><td></td><td>18%</td><td>51</td><td>9</td><td>42</td><td>1</td><td>18</td><td>0</td></tr><tr><td>4e6ee936eb50dd032f7138702fa39b7c18ee8907</td><td>dartmouth_children</td><td>Dartmouth Children</td><td><a href="papers/4e6ee936eb50dd032f7138702fa39b7c18ee8907.html">The Dartmouth Database of Children’s Faces: Acquisition and Validation of a New Face Stimulus Set</a></td><td><a href="http://pdfs.semanticscholar.org/4e6e/e936eb50dd032f7138702fa39b7c18ee8907.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>40%</td><td>20</td><td>8</td><td>12</td><td>2</td><td>16</td><td>0</td></tr><tr><td>fd8168f1c50de85bac58a8d328df0a50248b16ae</td><td>nd_2006</td><td>ND-2006</td><td><a href="papers/fd8168f1c50de85bac58a8d328df0a50248b16ae.html">Using a Multi-Instance Enrollment Representation to Improve 3D Face Recognition</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4401928', 'linkType': 'ieee'}">[pdf]</a></td><td>2007 First IEEE International Conference on Biometrics: Theory, Applications, and Systems</td><td></td><td></td><td></td><td></td><td>25%</td><td>32</td><td>8</td><td>24</td><td>3</td><td>16</td><td>1</td></tr><tr><td>774cbb45968607a027ae4729077734db000a1ec5</td><td>urban_tribes</td><td>Urban Tribes</td><td><a href="papers/774cbb45968607a027ae4729077734db000a1ec5.html">From Bikers to Surfers: Visual Recognition of Urban Tribes</a></td><td><a href="http://pdfs.semanticscholar.org/774c/bb45968607a027ae4729077734db000a1ec5.pdf">[pdf]</a></td><td></td><td>edu</td><td>Columbia University</td><td>40.84198360</td><td>-73.94368971</td><td>47%</td><td>17</td><td>8</td><td>9</td><td>1</td><td>12</td><td>1</td></tr><tr><td>84fe5b4ac805af63206012d29523a1e033bc827e</td><td>awe_ears</td><td>AWE Ears</td><td><a href="papers/84fe5b4ac805af63206012d29523a1e033bc827e.html">Ear recognition: More than a survey</a></td><td><a href="http://pdfs.semanticscholar.org/84fe/5b4ac805af63206012d29523a1e033bc827e.pdf">[pdf]</a></td><td>Neurocomputing</td><td></td><td></td><td></td><td></td><td>29%</td><td>24</td><td>7</td><td>17</td><td>0</td><td>11</td><td>0</td></tr><tr><td>5801690199c1917fa58c35c3dead177c0b8f9f2d</td><td>camel</td><td>CAMEL</td><td><a href="papers/5801690199c1917fa58c35c3dead177c0b8f9f2d.html">Application of Object Based Classification and High Resolution Satellite Imagery for Savanna Ecosystem Analysis</a></td><td><a href="http://pdfs.semanticscholar.org/5801/690199c1917fa58c35c3dead177c0b8f9f2d.pdf">[pdf]</a></td><td>Remote Sensing</td><td></td><td></td><td></td><td></td><td>37%</td><td>19</td><td>7</td><td>12</td><td>1</td><td>16</td><td>0</td></tr><tr><td>0297448f3ed948e136bb06ceff10eccb34e5bb77</td><td>ilids_mcts</td><td></td><td><a href="papers/0297448f3ed948e136bb06ceff10eccb34e5bb77.html">Imagery Library for Intelligent Detection Systems (i-LIDS); A Standard for Testing Video Based Detection Systems</a></td><td><span class="gray">[pdf]</a></td><td>Proceedings 40th Annual 2006 International Carnahan Conference on Security Technology</td><td></td><td></td><td></td><td></td><td>22%</td><td>32</td><td>7</td><td>25</td><td>2</td><td>17</td><td>0</td></tr><tr><td>ec792ad2433b6579f2566c932ee414111e194537</td><td>msmt_17</td><td>MSMT17</td><td><a href="papers/ec792ad2433b6579f2566c932ee414111e194537.html">Person Transfer GAN to Bridge Domain Gap for Person Re-Identification</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1711.08565.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>50%</td><td>14</td><td>7</td><td>7</td><td>1</td><td>11</td><td>0</td></tr><tr><td>b92a1ed9622b8268ae3ac9090e25789fc41cc9b8</td><td>pornodb</td><td>Pornography DB</td><td><a href="papers/b92a1ed9622b8268ae3ac9090e25789fc41cc9b8.html">Pooling in image representation: The visual codeword point of view</a></td><td><a href="http://pdfs.semanticscholar.org/b92a/1ed9622b8268ae3ac9090e25789fc41cc9b8.pdf">[pdf]</a></td><td>Computer Vision and Image Understanding</td><td></td><td></td><td></td><td></td><td>9%</td><td>77</td><td>7</td><td>70</td><td>7</td><td>43</td><td>2</td></tr><tr><td>19d1b811df60f86cbd5e04a094b07f32fff7a32a</td><td>york_3d</td><td>UOY 3D Face Database</td><td><a href="papers/19d1b811df60f86cbd5e04a094b07f32fff7a32a.html">Three-dimensional face recognition: an eigensurface approach</a></td><td><a href="http://www-users.cs.york.ac.uk/~nep/research/3Dface/tomh/3DFaceRecognition-Eigensurface-ICIP(web)2.pdf">[pdf]</a></td><td>2004 International Conference on Image Processing, 2004. ICIP '04.</td><td></td><td></td><td></td><td></td><td>19%</td><td>36</td><td>7</td><td>29</td><td>4</td><td>25</td><td>1</td></tr><tr><td>25474c21613607f6bb7687a281d5f9d4ffa1f9f3</td><td>faceplace</td><td>Face Place</td><td><a href="papers/25474c21613607f6bb7687a281d5f9d4ffa1f9f3.html">Recognizing disguised faces</a></td><td><a href="http://pdfs.semanticscholar.org/d936/7ceb0be378c3a9ddf7cb741c678c1a3c574c.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>25%</td><td>24</td><td>6</td><td>18</td><td>0</td><td>16</td><td>1</td></tr><tr><td>0cb2dd5f178e3a297a0c33068961018659d0f443</td><td>ijb_b</td><td>IJB-B</td><td><a href="papers/0cb2dd5f178e3a297a0c33068961018659d0f443.html">IARPA Janus Benchmark-B Face Dataset</a></td><td><a href="http://www.vislab.ucr.edu/Biometrics2017/program_slides/Noblis_CVPRW_IJBB.pdf">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td></td><td></td><td></td><td></td><td>24%</td><td>25</td><td>6</td><td>19</td><td>6</td><td>21</td><td>3</td></tr><tr><td>bd26dabab576adb6af30484183c9c9c8379bf2e0</td><td>scut_fbp</td><td>SCUT-FBP</td><td><a href="papers/bd26dabab576adb6af30484183c9c9c8379bf2e0.html">SCUT-FBP: A Benchmark Dataset for Facial Beauty Perception</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1511.02459.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2015 IEEE International Conference on Systems, Man, and Cybernetics</td><td>edu</td><td>South China University of Technology</td><td>23.05020420</td><td>113.39880323</td><td>43%</td><td>14</td><td>6</td><td>8</td><td>3</td><td>5</td><td>7</td></tr><tr><td>2a171f8d14b6b8735001a11c217af9587d095848</td><td>social_relation</td><td>Social Relation</td><td><a href="papers/2a171f8d14b6b8735001a11c217af9587d095848.html">Learning Social Relation Traits from Face Images</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.414">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>30%</td><td>20</td><td>6</td><td>14</td><td>5</td><td>15</td><td>0</td></tr><tr><td>2a171f8d14b6b8735001a11c217af9587d095848</td><td>social_relation</td><td>Social Relation</td><td><a href="papers/2a171f8d14b6b8735001a11c217af9587d095848.html">Learning Social Relation Traits from Face Images</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.414">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>30%</td><td>20</td><td>6</td><td>14</td><td>5</td><td>15</td><td>0</td></tr><tr><td>4156b7e88f2e0ab0a7c095b9bab199ae2b23bd06</td><td>distance_nighttime</td><td>Long Distance Heterogeneous Face</td><td><a href="papers/4156b7e88f2e0ab0a7c095b9bab199ae2b23bd06.html">Nighttime Face Recognition at Long Distance: Cross-Distance and Cross-Spectral Matching</a></td><td><a href="http://pdfs.semanticscholar.org/4156/b7e88f2e0ab0a7c095b9bab199ae2b23bd06.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>24%</td><td>21</td><td>5</td><td>16</td><td>3</td><td>11</td><td>1</td></tr><tr><td>c570d1247e337f91e555c3be0e8c8a5aba539d9f</td><td>mcgill</td><td>McGill Real World</td><td><a href="papers/c570d1247e337f91e555c3be0e8c8a5aba539d9f.html">Robust semi-automatic head pose labeling for real-world face video sequences</a></td><td><a href="https://doi.org/10.1007/s11042-012-1352-1">[pdf]</a></td><td>Multimedia Tools and Applications</td><td>edu</td><td>McGill University</td><td>45.50397610</td><td>-73.57496870</td><td>28%</td><td>18</td><td>5</td><td>6</td><td>0</td><td>7</td><td>4</td></tr><tr><td>c570d1247e337f91e555c3be0e8c8a5aba539d9f</td><td>mcgill</td><td>McGill Real World</td><td><a href="papers/c570d1247e337f91e555c3be0e8c8a5aba539d9f.html">Robust semi-automatic head pose labeling for real-world face video sequences</a></td><td><a href="https://doi.org/10.1007/s11042-012-1352-1">[pdf]</a></td><td>Multimedia Tools and Applications</td><td>edu</td><td>McGill University</td><td>45.50397610</td><td>-73.57496870</td><td>28%</td><td>18</td><td>5</td><td>6</td><td>0</td><td>7</td><td>4</td></tr><tr><td>6f3c76b7c0bd8e1d122c6ea808a271fd4749c951</td><td>ward</td><td>WARD</td><td><a href="papers/6f3c76b7c0bd8e1d122c6ea808a271fd4749c951.html">Re-identify people in wide area camera network</a></td><td><a href="https://doi.org/10.1109/CVPRW.2012.6239203">[pdf]</a></td><td>2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops</td><td></td><td></td><td></td><td></td><td>9%</td><td>55</td><td>5</td><td>50</td><td>2</td><td>35</td><td>0</td></tr><tr><td>6403117f9c005ae81f1e8e6d1302f4a045e3d99d</td><td>alert_airport</td><td>ALERT Airport</td><td><a href="papers/6403117f9c005ae81f1e8e6d1302f4a045e3d99d.html">A Systematic Evaluation and Benchmark for Person Re-Identification: Features, Metrics, and Datasets.</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1605.09653.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>IEEE transactions on pattern analysis and machine intelligence</td><td></td><td></td><td></td><td></td><td>27%</td><td>15</td><td>4</td><td>11</td><td>1</td><td>10</td><td>0</td></tr><tr><td>014b8df0180f33b9fea98f34ae611c6447d761d2</td><td>buhmap_db</td><td>BUHMAP-DB </td><td><a href="papers/014b8df0180f33b9fea98f34ae611c6447d761d2.html">Facial feature tracking and expression recognition for sign language</a></td><td><a href="http://www.cmpe.boun.edu.tr/pilab/pilabfiles/databases/buhmap/files/ari2008facialfeaturetracking.pdf">[pdf]</a></td><td>2008 23rd International Symposium on Computer and Information Sciences</td><td></td><td></td><td></td><td></td><td>16%</td><td>25</td><td>4</td><td>21</td><td>1</td><td>10</td><td>2</td></tr><tr><td>57fe081950f21ca03b5b375ae3e84b399c015861</td><td>cvc_01_barcelona</td><td>CVC-01</td><td><a href="papers/57fe081950f21ca03b5b375ae3e84b399c015861.html">Adaptive Image Sampling and Windows Classification for On–board Pedestrian Detection</a></td><td><a href="http://pdfs.semanticscholar.org/57fe/081950f21ca03b5b375ae3e84b399c015861.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>9%</td><td>44</td><td>4</td><td>40</td><td>1</td><td>21</td><td>0</td></tr><tr><td>22f656d0f8426c84a33a267977f511f127bfd7f3</td><td>expw</td><td>ExpW</td><td><a href="papers/22f656d0f8426c84a33a267977f511f127bfd7f3.html">From Facial Expression Recognition to Interpersonal Relation Prediction</a></td><td><a href="http://arxiv.org/abs/1609.06426">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td>44%</td><td>9</td><td>4</td><td>5</td><td>0</td><td>5</td><td>1</td></tr><tr><td>22f656d0f8426c84a33a267977f511f127bfd7f3</td><td>expw</td><td>ExpW</td><td><a href="papers/22f656d0f8426c84a33a267977f511f127bfd7f3.html">From Facial Expression Recognition to Interpersonal Relation Prediction</a></td><td><a href="http://arxiv.org/abs/1609.06426">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td>44%</td><td>9</td><td>4</td><td>5</td><td>0</td><td>5</td><td>1</td></tr><tr><td>e27ef52c641c2b5100a1b34fd0b819e84a31b4df</td><td>sarc3d</td><td>Sarc3D</td><td><a href="papers/e27ef52c641c2b5100a1b34fd0b819e84a31b4df.html">SARC3D: A New 3D Body Model for People Tracking and Re-identification</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/e27e/f52c641c2b5100a1b34fd0b819e84a31b4df.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>14%</td><td>29</td><td>4</td><td>25</td><td>3</td><td>17</td><td>0</td></tr><tr><td>1a40092b493c6b8840257ab7f96051d1a4dbfeb2</td><td>sports_videos_in_the_wild</td><td>SVW</td><td><a href="papers/1a40092b493c6b8840257ab7f96051d1a4dbfeb2.html">Sports Videos in the Wild (SVW): A video dataset for sports analysis</a></td><td><a href="http://web.cse.msu.edu/~liuxm/publication/Safdarnejad_Liu_Udpa_Andrus_Wood_Craven_FG2015.pdf">[pdf]</a></td><td>2015 11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)</td><td>edu</td><td>Michigan State University</td><td>42.71856800</td><td>-84.47791571</td><td>67%</td><td>6</td><td>4</td><td>2</td><td>1</td><td>5</td><td>0</td></tr><tr><td>7ebb153704706e457ab57b432793d2b6e5d12592</td><td>vgg_celebs_in_places</td><td>CIP</td><td><a href="papers/7ebb153704706e457ab57b432793d2b6e5d12592.html">Faces in Places: compound query retrieval</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/7ebb/153704706e457ab57b432793d2b6e5d12592.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td>edu</td><td>University of Oxford</td><td>51.75345380</td><td>-1.25400997</td><td>80%</td><td>5</td><td>4</td><td>1</td><td>0</td><td>4</td><td>0</td></tr><tr><td>8d5998cd984e7cce307da7d46f155f9db99c6590</td><td>chalearn</td><td>ChaLearn</td><td><a href="papers/8d5998cd984e7cce307da7d46f155f9db99c6590.html">ChaLearn looking at people: A review of events and resources</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1701.02664.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2017 International Joint Conference on Neural Networks (IJCNN)</td><td></td><td></td><td></td><td></td><td>30%</td><td>10</td><td>3</td><td>7</td><td>1</td><td>6</td><td>0</td></tr><tr><td>a5acda0e8c0937bfed013e6382da127103e41395</td><td>disfa</td><td>DISFA</td><td><a href="papers/a5acda0e8c0937bfed013e6382da127103e41395.html">Extended DISFA Dataset: Investigating Posed and Spontaneous Facial Expressions</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7789672', 'linkType': 'ieee'}">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td></td><td></td><td></td><td></td><td>38%</td><td>8</td><td>3</td><td>5</td><td>1</td><td>5</td><td>0</td></tr><tr><td>57178b36c21fd7f4529ac6748614bb3374714e91</td><td>ijb_c</td><td>IJB-C</td><td><a href="papers/57178b36c21fd7f4529ac6748614bb3374714e91.html">IARPA Janus Benchmark - C: Face Dataset and Protocol</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411217', 'linkType': 'ieee'}">[pdf]</a></td><td>2018 International Conference on Biometrics (ICB)</td><td></td><td></td><td></td><td></td><td>33%</td><td>9</td><td>3</td><td>6</td><td>2</td><td>9</td><td>0</td></tr><tr><td>07fcbae86f7a3ad3ea1cf95178459ee9eaf77cb1</td><td>uccs</td><td>UCCS</td><td><a href="papers/07fcbae86f7a3ad3ea1cf95178459ee9eaf77cb1.html">Large scale unconstrained open set face database</a></td><td><a href="http://www.vast.uccs.edu/~tboult/PAPERS/BTAS13-Sapkota-Boult-UCCSFaceDB.pdf">[pdf]</a></td><td>2013 IEEE Sixth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td>edu</td><td>University of Colorado at Colorado Springs</td><td>38.89646790</td><td>-104.80505940</td><td>60%</td><td>5</td><td>3</td><td>2</td><td>0</td><td>3</td><td>0</td></tr><tr><td>8627f019882b024aef92e4eb9355c499c733e5b7</td><td>used</td><td>USED Social Event Dataset</td><td><a href="papers/8627f019882b024aef92e4eb9355c499c733e5b7.html">USED: a large-scale social event detection dataset</a></td><td><a href="http://doi.acm.org/10.1145/2910017.2910624">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>7</td><td>3</td><td>4</td><td>0</td><td>3</td><td>2</td></tr><tr><td>4563b46d42079242f06567b3f2e2f7a80cb3befe</td><td>vadana</td><td>VADANA</td><td><a href="papers/4563b46d42079242f06567b3f2e2f7a80cb3befe.html">VADANA: A dense dataset for facial image analysis</a></td><td><a href="http://vims.cis.udel.edu/publications/VADANA_BeFIT2011.pdf">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td>19%</td><td>16</td><td>3</td><td>13</td><td>0</td><td>6</td><td>6</td></tr><tr><td>56ae6d94fc6097ec4ca861f0daa87941d1c10b70</td><td>cmdp</td><td>CMDP</td><td><a href="papers/56ae6d94fc6097ec4ca861f0daa87941d1c10b70.html">Distance Estimation of an Unknown Person from a Portrait</a></td><td><a href="http://pdfs.semanticscholar.org/56ae/6d94fc6097ec4ca861f0daa87941d1c10b70.pdf">[pdf]</a></td><td></td><td>edu</td><td>California Institute of Technology</td><td>34.13710185</td><td>-118.12527487</td><td>22%</td><td>9</td><td>2</td><td>7</td><td>0</td><td>6</td><td>1</td></tr><tr><td>670637d0303a863c1548d5b19f705860a23e285c</td><td>face_tracer</td><td>FaceTracer</td><td><a href="papers/670637d0303a863c1548d5b19f705860a23e285c.html">Face swapping: automatically replacing faces in photographs</a></td><td><a href="https://classes.cs.uoregon.edu/16F/cis607photo/faces.pdf">[pdf]</a></td><td>ACM Trans. Graph.</td><td>edu</td><td>Columbia University</td><td>40.84198360</td><td>-73.94368971</td><td>100%</td><td>2</td><td>2</td><td>0</td><td>0</td><td>1</td><td>1</td></tr><tr><td>670637d0303a863c1548d5b19f705860a23e285c</td><td>face_tracer</td><td>FaceTracer</td><td><a href="papers/670637d0303a863c1548d5b19f705860a23e285c.html">Face swapping: automatically replacing faces in photographs</a></td><td><a href="https://classes.cs.uoregon.edu/16F/cis607photo/faces.pdf">[pdf]</a></td><td>ACM Trans. Graph.</td><td>edu</td><td>Columbia University</td><td>40.84198360</td><td>-73.94368971</td><td>100%</td><td>2</td><td>2</td><td>0</td><td>0</td><td>1</td><td>1</td></tr><tr><td>dd65f71dac86e36eecbd3ed225d016c3336b4a13</td><td>families_in_the_wild</td><td>FIW</td><td><a href="papers/dd65f71dac86e36eecbd3ed225d016c3336b4a13.html">Visual Kinship Recognition of Families in the Wild</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8337841', 'linkType': 'ieee'}">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>67%</td><td>3</td><td>2</td><td>1</td><td>0</td><td>2</td><td>0</td></tr><tr><td>6dbe8e5121c534339d6e41f8683e85f87e6abf81</td><td>gallagher</td><td>Gallagher</td><td><a href="papers/6dbe8e5121c534339d6e41f8683e85f87e6abf81.html">Clothing Cosegmentation for Shopping Images With Cluttered Background</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7423747', 'linkType': 'ieee'}">[pdf]</a></td><td>IEEE Transactions on Multimedia</td><td></td><td></td><td></td><td></td><td>33%</td><td>6</td><td>2</td><td>4</td><td>0</td><td>3</td><td>2</td></tr><tr><td>99eb4cea0d9bc9fe777a5c5172f8638a37a7f262</td><td>ilids_vid_reid</td><td>iLIDS-VID</td><td><a href="papers/99eb4cea0d9bc9fe777a5c5172f8638a37a7f262.html">Person Re-identification by Exploiting Spatio-Temporal Cues and Multi-view Metric Learning</a></td><td><a href="https://doi.org/10.1109/LSP.2016.2574323">[pdf]</a></td><td>IEEE Signal Processing Letters</td><td></td><td></td><td></td><td></td><td>29%</td><td>7</td><td>2</td><td>5</td><td>0</td><td>4</td><td>0</td></tr><tr><td>0d2dd4fc016cb6a517d8fb43a7cc3ff62964832e</td><td>lag</td><td>LAG</td><td><a href="papers/0d2dd4fc016cb6a517d8fb43a7cc3ff62964832e.html">Large Age-Gap face verification by feature injection in deep networks</a></td><td><a href="http://pdfs.semanticscholar.org/0d2d/d4fc016cb6a517d8fb43a7cc3ff62964832e.pdf">[pdf]</a></td><td>Pattern Recognition Letters</td><td></td><td></td><td></td><td></td><td>29%</td><td>7</td><td>2</td><td>5</td><td>0</td><td>3</td><td>2</td></tr><tr><td>a7fe834a0af614ce6b50dc093132b031dd9a856b</td><td>market1203</td><td>Market 1203</td><td><a href="papers/a7fe834a0af614ce6b50dc093132b031dd9a856b.html">Orientation Driven Bag of Appearances for Person Re-identification</a></td><td><a href="http://pdfs.semanticscholar.org/a7fe/834a0af614ce6b50dc093132b031dd9a856b.pdf">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>29%</td><td>7</td><td>2</td><td>5</td><td>0</td><td>3</td><td>0</td></tr><tr><td>ad01687649d95cd5b56d7399a9603c4b8e2217d7</td><td>mrp_drone</td><td>MRP Drone</td><td><a href="papers/ad01687649d95cd5b56d7399a9603c4b8e2217d7.html">Investigating Open-World Person Re-identi cation Using a Drone</a></td><td><a href="http://pdfs.semanticscholar.org/ad01/687649d95cd5b56d7399a9603c4b8e2217d7.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>40%</td><td>5</td><td>2</td><td>3</td><td>0</td><td>3</td><td>0</td></tr><tr><td>a7fe834a0af614ce6b50dc093132b031dd9a856b</td><td>pku_reid</td><td>PKU-Reid</td><td><a href="papers/a7fe834a0af614ce6b50dc093132b031dd9a856b.html">Orientation Driven Bag of Appearances for Person Re-identification</a></td><td><a href="http://pdfs.semanticscholar.org/a7fe/834a0af614ce6b50dc093132b031dd9a856b.pdf">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>29%</td><td>7</td><td>2</td><td>5</td><td>0</td><td>3</td><td>0</td></tr><tr><td>041d3eedf5e45ce5c5229f0181c5c576ed1fafd6</td><td>ucf_selfie</td><td>UCF Selfie</td><td><a href="papers/041d3eedf5e45ce5c5229f0181c5c576ed1fafd6.html">How to Take a Good Selfie?</a></td><td><a href="http://doi.acm.org/10.1145/2733373.2806365">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>22%</td><td>9</td><td>2</td><td>7</td><td>0</td><td>5</td><td>0</td></tr><tr><td>4b4106614c1d553365bad75d7866bff0de6056ed</td><td>czech_news_agency</td><td>UFI</td><td><a href="papers/4b4106614c1d553365bad75d7866bff0de6056ed.html">Unconstrained Facial Images: Database for Face Recognition Under Real-World Conditions</a></td><td><a href="http://pdfs.semanticscholar.org/4b41/06614c1d553365bad75d7866bff0de6056ed.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>10%</td><td>10</td><td>1</td><td>9</td><td>0</td><td>4</td><td>2</td></tr><tr><td>563c940054e4b456661762c1ab858e6f730c3159</td><td>data_61</td><td>Data61 Pedestrian</td><td><a href="papers/563c940054e4b456661762c1ab858e6f730c3159.html">A Multi-modal Graphical Model for Scene Analysis</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/WACV.2015.139">[pdf]</a></td><td>2015 IEEE Winter Conference on Applications of Computer Vision</td><td></td><td></td><td></td><td></td><td>12%</td><td>8</td><td>1</td><td>7</td><td>0</td><td>5</td><td>0</td></tr><tr><td>c6526dd3060d63a6c90e8b7ff340383c4e0e0dd8</td><td>face_research_lab</td><td>Face Research Lab London</td><td><a href="papers/c6526dd3060d63a6c90e8b7ff340383c4e0e0dd8.html">Anxiety promotes memory for mood-congruent faces but does not alter loss aversion.</a></td><td><a href="http://pdfs.semanticscholar.org/c652/6dd3060d63a6c90e8b7ff340383c4e0e0dd8.pdf">[pdf]</a></td><td>Scientific reports</td><td>edu</td><td>University College London</td><td>51.52316070</td><td>-0.12820370</td><td>25%</td><td>4</td><td>1</td><td>3</td><td>0</td><td>2</td><td>2</td></tr><tr><td>17b46e2dad927836c689d6787ddb3387c6159ece</td><td>geofaces</td><td>GeoFaces</td><td><a href="papers/17b46e2dad927836c689d6787ddb3387c6159ece.html">GeoFaceExplorer: exploring the geo-dependence of facial attributes</a></td><td><a href="http://doi.acm.org/10.1145/2676440.2676443">[pdf]</a></td><td></td><td>edu</td><td>University of Kentucky</td><td>38.03337420</td><td>-84.50177580</td><td>50%</td><td>2</td><td>1</td><td>1</td><td>0</td><td>1</td><td>0</td></tr><tr><td>55c40cbcf49a0225e72d911d762c27bb1c2d14aa</td><td>ifad</td><td>IFAD</td><td><a href="papers/55c40cbcf49a0225e72d911d762c27bb1c2d14aa.html">Indian Face Age Database : A Database for Face Recognition with Age Variation</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/55c4/0cbcf49a0225e72d911d762c27bb1c2d14aa.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>50%</td><td>2</td><td>1</td><td>1</td><td>0</td><td>2</td><td>0</td></tr><tr><td>d80a3d1f3a438e02a6685e66ee908446766fefa9</td><td>megaage</td><td>MegaAge</td><td><a href="papers/d80a3d1f3a438e02a6685e66ee908446766fefa9.html">Quantifying Facial Age by Posterior of Age Comparisons</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1708.09687.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>25%</td><td>4</td><td>1</td><td>3</td><td>1</td><td>4</td><td>0</td></tr><tr><td>23e824d1dfc33f3780dd18076284f07bd99f1c43</td><td>mifs</td><td>MIFS</td><td><a href="papers/23e824d1dfc33f3780dd18076284f07bd99f1c43.html">Spoofing faces using makeup: An investigative study</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7947686', 'linkType': 'ieee'}">[pdf]</a></td><td>2017 IEEE International Conference on Identity, Security and Behavior Analysis (ISBA)</td><td>edu</td><td>INRIA Méditerranée</td><td>43.61581310</td><td>7.06838000</td><td>20%</td><td>5</td><td>1</td><td>4</td><td>0</td><td>1</td><td>2</td></tr><tr><td>fb82681ac5d3487bd8e52dbb3d1fa220eeac855e</td><td>pilot_parliament</td><td>PPB</td><td><a href="papers/fb82681ac5d3487bd8e52dbb3d1fa220eeac855e.html">1 Network Notebook</a></td><td><a href="http://pdfs.semanticscholar.org/fb82/681ac5d3487bd8e52dbb3d1fa220eeac855e.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>9%</td><td>11</td><td>1</td><td>10</td><td>1</td><td>10</td><td>0</td></tr><tr><td>9e5378e7b336c89735d3bb15cf67eff96f86d39a</td><td>precarious</td><td>Precarious</td><td><a href="papers/9e5378e7b336c89735d3bb15cf67eff96f86d39a.html">Expecting the Unexpected: Training Detectors for Unusual Pedestrians with Adversarial Imposters</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1703.06283.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>8%</td><td>12</td><td>1</td><td>11</td><td>1</td><td>10</td><td>0</td></tr><tr><td>54983972aafc8e149259d913524581357b0f91c3</td><td>reseed</td><td>ReSEED</td><td><a href="papers/54983972aafc8e149259d913524581357b0f91c3.html">ReSEED: social event dEtection dataset</a></td><td><a href="https://pub.uni-bielefeld.de/download/2663466/2686734">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>17%</td><td>6</td><td>1</td><td>5</td><td>1</td><td>1</td><td>1</td></tr><tr><td>c9bda86e23cab9e4f15ea0c4cbb6cc02b9dfb709</td><td>stanford_drone</td><td>Stanford Drone</td><td><a href="papers/c9bda86e23cab9e4f15ea0c4cbb6cc02b9dfb709.html">Learning to predict human behaviour in crowded scenes</a></td><td><a href="http://pdfs.semanticscholar.org/c9bd/a86e23cab9e4f15ea0c4cbb6cc02b9dfb709.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>20%</td><td>5</td><td>1</td><td>4</td><td>1</td><td>5</td><td>0</td></tr><tr><td>9696ad8b164f5e10fcfe23aacf74bd6168aebb15</td><td>4dfab</td><td>4DFAB</td><td><a href="papers/9696ad8b164f5e10fcfe23aacf74bd6168aebb15.html">4DFAB: A Large Scale 4D Facial Expression Database for Biometric Applications</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1712.01443.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>0%</td><td>4</td><td>0</td><td>4</td><td>0</td><td>2</td><td>0</td></tr><tr><td>f152b6ee251cca940dd853c54e6a7b78fbc6b235</td><td>affectnet</td><td>AffectNet</td><td><a href="papers/f152b6ee251cca940dd853c54e6a7b78fbc6b235.html">Skybiometry and AffectNet on Facial Emotion Recognition Using Supervised Machine Learning Algorithms</a></td><td><a href="{'url': 'http://dl.acm.org/citation.cfm?id=3232665', 'linkType': 'acm'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>1ed1a49534ad8dd00f81939449f6389cfbc25321</td><td>bjut_3d</td><td>BJUT-3D</td><td><a href="papers/1ed1a49534ad8dd00f81939449f6389cfbc25321.html">A novel face recognition method based on 3D face model</a></td><td><a href="https://doi.org/10.1109/ROBIO.2007.4522202">[pdf]</a></td><td>2007 IEEE International Conference on Robotics and Biomimetics (ROBIO)</td><td></td><td></td><td></td><td></td><td>0%</td><td>2</td><td>0</td><td>2</td><td>0</td><td>1</td><td>1</td></tr><tr><td>65355cbb581a219bd7461d48b3afd115263ea760</td><td>complex_activities</td><td>Ongoing Complex Activities</td><td><a href="papers/65355cbb581a219bd7461d48b3afd115263ea760.html">Recognition of ongoing complex activities by sequence prediction over a hierarchical label space</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/WACV.2016.7477586">[pdf]</a></td><td>2016 IEEE Winter Conference on Applications of Computer Vision (WACV)</td><td></td><td></td><td></td><td></td><td>0%</td><td>2</td><td>0</td><td>2</td><td>0</td><td>2</td><td>0</td></tr><tr><td>f0e17f27f029db4ad650ff278fe3c10ecb6cb0c4</td><td>europersons</td><td>EuroCity Persons</td><td><a href="papers/f0e17f27f029db4ad650ff278fe3c10ecb6cb0c4.html">The EuroCity Persons Dataset: A Novel Benchmark for Object Detection</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1805.07193.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>0%</td><td>1</td><td>0</td><td>1</td><td>0</td><td>1</td><td>0</td></tr><tr><td>75da1df4ed319926c544eefe17ec8d720feef8c0</td><td>fddb</td><td>FDDB</td><td><a href="papers/75da1df4ed319926c544eefe17ec8d720feef8c0.html">FDDB: A Benchmark for Face Detection in Unconstrained Settings</a></td><td><a href="http://pdfs.semanticscholar.org/75da/1df4ed319926c544eefe17ec8d720feef8c0.pdf">[pdf]</a></td><td></td><td>edu</td><td>University of Massachusetts</td><td>42.38897850</td><td>-72.52869870</td><td>0%</td><td>1</td><td>0</td><td>1</td><td>0</td><td>0</td><td>0</td></tr><tr><td>b6b1b0632eb9d4ab1427278f5e5c46f97753c73d</td><td>fei</td><td>FEI</td><td><a href="papers/b6b1b0632eb9d4ab1427278f5e5c46f97753c73d.html">Generalização cartográfica automatizada para um banco de dados cadastral</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/b6b1/b0632eb9d4ab1427278f5e5c46f97753c73d.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>3dc3f0b64ef80f573e3a5f96e456e52ee980b877</td><td>georgia_tech_face_database</td><td>Georgia Tech Face</td><td><a href="papers/3dc3f0b64ef80f573e3a5f96e456e52ee980b877.html">Maximum Likelihood Training of the Embedded HMM for Face Detection and Recognition</a></td><td><a href="http://pdfs.semanticscholar.org/3dc3/f0b64ef80f573e3a5f96e456e52ee980b877.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>3</td><td>0</td><td>3</td><td>0</td><td>2</td><td>0</td></tr><tr><td>bd88bb2e4f351352d88ee7375af834360e223498</td><td>hda_plus</td><td>HDA+</td><td><a href="papers/bd88bb2e4f351352d88ee7375af834360e223498.html">A Multi - camera video data set for research on High - Definition surveillance</a></td><td><a href="http://pdfs.semanticscholar.org/bd88/bb2e4f351352d88ee7375af834360e223498.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>1</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>bd88bb2e4f351352d88ee7375af834360e223498</td><td>hda_plus</td><td>HDA+</td><td><a href="papers/bd88bb2e4f351352d88ee7375af834360e223498.html">A Multi - camera video data set for research on High - Definition surveillance</a></td><td><a href="http://pdfs.semanticscholar.org/bd88/bb2e4f351352d88ee7375af834360e223498.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>1</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>24830e3979d4ed01b9fd0feebf4a8fd22e0c35fd</td><td>hi4d_adsip</td><td>Hi4D-ADSIP</td><td><a href="papers/24830e3979d4ed01b9fd0feebf4a8fd22e0c35fd.html">High-resolution comprehensive 3-D dynamic database for facial articulation analysis</a></td><td><a href="http://www.researchgate.net/profile/Wei_Quan3/publication/221430048_High-resolution_comprehensive_3-D_dynamic_database_for_facial_articulation_analysis/links/0deec534309495805d000000.pdf">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td>0%</td><td>5</td><td>0</td><td>5</td><td>0</td><td>1</td><td>0</td></tr><tr><td>066d71fcd997033dce4ca58df924397dfe0b5fd1</td><td>ifdb</td><td>IFDB</td><td><a href="papers/066d71fcd997033dce4ca58df924397dfe0b5fd1.html">Iranian Face Database and Evaluation with a New Detection Algorithm</a></td><td><a href="http://pdfs.semanticscholar.org/066d/71fcd997033dce4ca58df924397dfe0b5fd1.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>066d71fcd997033dce4ca58df924397dfe0b5fd1</td><td>ifdb</td><td>IFDB</td><td><a href="papers/066d71fcd997033dce4ca58df924397dfe0b5fd1.html">Iranian Face Database and Evaluation with a New Detection Algorithm</a></td><td><a href="http://pdfs.semanticscholar.org/066d/71fcd997033dce4ca58df924397dfe0b5fd1.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>ad62c6e17bc39b4dec20d32f6ac667ae42d2c118</td><td>jiku_mobile</td><td>Jiku Mobile Video Dataset</td><td><a href="papers/ad62c6e17bc39b4dec20d32f6ac667ae42d2c118.html">A Synchronization Ground Truth for the Jiku Mobile Video Dataset</a></td><td><a href="http://pdfs.semanticscholar.org/ad62/c6e17bc39b4dec20d32f6ac667ae42d2c118.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>1</td><td>0</td><td>1</td><td>0</td><td>0</td><td>0</td></tr><tr><td>079a0a3bf5200994e1f972b1b9197bf2f90e87d4</td><td>mit_cbcl</td><td>MIT CBCL</td><td><a href="papers/079a0a3bf5200994e1f972b1b9197bf2f90e87d4.html">Component-Based Face Recognition with 3D Morphable Models</a></td><td><a href="http://www.bheisele.com/avbpa2003.pdf">[pdf]</a></td><td>2004 Conference on Computer Vision and Pattern Recognition Workshop</td><td></td><td></td><td></td><td></td><td>0%</td><td>12</td><td>0</td><td>12</td><td>0</td><td>8</td><td>0</td></tr><tr><td>7f4040b482d16354d5938c1d1b926b544652bf5b</td><td>nova_emotions</td><td>Novaemötions Dataset</td><td><a href="papers/7f4040b482d16354d5938c1d1b926b544652bf5b.html">Competitive affective gaming: winning with a smile</a></td><td><a href="http://doi.acm.org/10.1145/2502081.2502115">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>8</td><td>0</td><td>3</td><td>0</td><td>2</td><td>0</td></tr><tr><td>7f4040b482d16354d5938c1d1b926b544652bf5b</td><td>nova_emotions</td><td>Novaemötions Dataset</td><td><a href="papers/7f4040b482d16354d5938c1d1b926b544652bf5b.html">Competitive affective gaming: winning with a smile</a></td><td><a href="http://doi.acm.org/10.1145/2502081.2502115">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>8</td><td>0</td><td>3</td><td>0</td><td>2</td><td>0</td></tr><tr><td>22909dd19a0ec3b6065334cb5be5392cb24d839d</td><td>pets</td><td>PETS 2017</td><td><a href="papers/22909dd19a0ec3b6065334cb5be5392cb24d839d.html">PETS 2017: Dataset and Challenge</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014998', 'linkType': 'ieee'}">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td></td><td></td><td></td><td></td><td>0%</td><td>8</td><td>0</td><td>8</td><td>0</td><td>2</td><td>0</td></tr><tr><td>f6c8d5e35d7e4d60a0104f233ac1a3ab757da53f</td><td>pku</td><td>PKU</td><td><a href="papers/f6c8d5e35d7e4d60a0104f233ac1a3ab757da53f.html">Swiss-System Based Cascade Ranking for Gait-Based Person Re-Identification</a></td><td><a href="http://pdfs.semanticscholar.org/f6c8/d5e35d7e4d60a0104f233ac1a3ab757da53f.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>3</td><td>0</td><td>3</td><td>0</td><td>1</td><td>0</td></tr><tr><td>c866a2afc871910e3282fd9498dce4ab20f6a332</td><td>qmul_surv_face</td><td>QMUL-SurvFace</td><td><a href="papers/c866a2afc871910e3282fd9498dce4ab20f6a332.html">Surveillance Face Recognition Challenge</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1804.09691.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>f3b84a03985de3890b400b68e2a92c0a00afd9d0</td><td>scface</td><td>SCface</td><td><a href="papers/f3b84a03985de3890b400b68e2a92c0a00afd9d0.html">Large Variability Surveillance Camera Face Database</a></td><td><span class="gray">[pdf]</a></td><td>2015 Seventh International Conference on Computational Intelligence, Modelling and Simulation (CIMSim)</td><td></td><td></td><td></td><td></td><td>0%</td><td>1</td><td>0</td><td>1</td><td>0</td><td>0</td><td>0</td></tr><tr><td>d3f5a1848b0028d8ab51d0b0673732cad2e3c8c9</td><td>stair_actions</td><td>STAIR Action</td><td><a href="papers/d3f5a1848b0028d8ab51d0b0673732cad2e3c8c9.html">STAIR Actions: A Video Dataset of Everyday Home Actions</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1804.04326.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>377f2b65e6a9300448bdccf678cde59449ecd337</td><td>ufdd</td><td>UFDD</td><td><a href="papers/377f2b65e6a9300448bdccf678cde59449ecd337.html">Pushing the Limits of Unconstrained Face Detection: a Challenge Dataset and Baseline Results</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1804.10275.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td>edu</td><td>Johns Hopkins University</td><td>39.32905300</td><td>-76.61942500</td><td>0%</td><td>2</td><td>0</td><td>2</td><td>0</td><td>2</td><td>0</td></tr><tr><td>447d8893a4bdc29fa1214e53499ffe67b28a6db5</td><td>umd_faces</td><td>UMD</td><td><a href="papers/447d8893a4bdc29fa1214e53499ffe67b28a6db5.html">Electronic Transport in Quantum Confined Systems</a></td><td><a href="http://pdfs.semanticscholar.org/447d/8893a4bdc29fa1214e53499ffe67b28a6db5.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>1</td><td>0</td><td>1</td><td>0</td><td>1</td><td>0</td></tr><tr><td>447d8893a4bdc29fa1214e53499ffe67b28a6db5</td><td>umd_faces</td><td>UMD</td><td><a href="papers/447d8893a4bdc29fa1214e53499ffe67b28a6db5.html">Electronic Transport in Quantum Confined Systems</a></td><td><a href="http://pdfs.semanticscholar.org/447d/8893a4bdc29fa1214e53499ffe67b28a6db5.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>1</td><td>0</td><td>1</td><td>0</td><td>1</td><td>0</td></tr><tr><td>922e0a51a3b8c67c4c6ac09a577ff674cbd28b34</td><td>v47</td><td>V47</td><td><a href="papers/922e0a51a3b8c67c4c6ac09a577ff674cbd28b34.html">Re-identification of pedestrians with variable occlusion and scale</a></td><td><a href="https://doi.org/10.1109/ICCVW.2011.6130477">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td>0%</td><td>10</td><td>0</td><td>10</td><td>2</td><td>6</td><td>0</td></tr><tr><td>9b9bf5e623cb8af7407d2d2d857bc3f1b531c182</td><td>who_goes_there</td><td>WGT</td><td><a href="papers/9b9bf5e623cb8af7407d2d2d857bc3f1b531c182.html">Who goes there?: approaches to mapping facial appearance diversity</a></td><td><a href="http://doi.acm.org/10.1145/2996913.2996997">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>77c81c13a110a341c140995bedb98101b9e84f7f</td><td>wildtrack</td><td>WildTrack</td><td><a href="papers/77c81c13a110a341c140995bedb98101b9e84f7f.html">WILDTRACK : A Multi-camera HD Dataset for Dense Unscripted Pedestrian Detection</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/fe1c/ec4e4995b8615855572374ae3efc94949105.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>5ad4e9f947c1653c247d418f05dad758a3f9277b</td><td>wlfdb</td><td></td><td><a href="papers/5ad4e9f947c1653c247d418f05dad758a3f9277b.html">WLFDB: Weakly Labeled Face Databases</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/5ad4/e9f947c1653c247d418f05dad758a3f9277b.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>0%</td><td>1</td><td>0</td><td>1</td><td>0</td><td>0</td><td>0</td></tr><tr><td>a94cae786d515d3450d48267e12ca954aab791c4</td><td>yawdd</td><td>YawDD</td><td><a href="papers/a94cae786d515d3450d48267e12ca954aab791c4.html">YawDD: a yawning detection dataset</a></td><td><a href="http://www.site.uottawa.ca/~shervin/pubs/CogniVue-Dataset-ACM-MMSys2014.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>14</td><td>0</td><td>14</td><td>1</td><td>2</td><td>1</td></tr></table></body></html> \ No newline at end of file
diff --git a/scraper/reports/report_index.html b/scraper/reports/report_index.html
index ea6b5111..933f2953 100644
--- a/scraper/reports/report_index.html
+++ b/scraper/reports/report_index.html
@@ -1 +1 @@
-<!doctype html><html><head><meta charset='utf-8'><title>All Papers</title><link rel='stylesheet' href='reports.css'></head><body><h2>All Papers</h2><table border='1' cellpadding='3' cellspacing='3'><th>Paper ID</th><th>Megapixels Key</th><th>Megapixels Name</th><th>Report Link</th><th>PDF Link</th><th>Journal</th><th>Type</th><th>Address</th><th>Lat</th><th>Lng</th><th>Coverage</th><th>Total Citations</th><th>Geocoded Citations</th><th>Unknown Citations</th><th>Empty Citations</th><th>With PDF</th><th>With DOI</th><tr><td>fb82681ac5d3487bd8e52dbb3d1fa220eeac855e</td><td>pilot_parliament</td><td>PPB</td><td><a href="papers/fb82681ac5d3487bd8e52dbb3d1fa220eeac855e.html">1 Network Notebook</a></td><td><a href="http://pdfs.semanticscholar.org/fb82/681ac5d3487bd8e52dbb3d1fa220eeac855e.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>9%</td><td>11</td><td>1</td><td>10</td><td>1</td><td>10</td><td>0</td></tr><tr><td>3325860c0c82a93b2eac654f5324dd6a776f609e</td><td>mpii_human_pose</td><td>MPII Human Pose</td><td><a href="papers/3325860c0c82a93b2eac654f5324dd6a776f609e.html">2D Human Pose Estimation: New Benchmark and State of the Art Analysis</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6909866', 'linkType': 'ieee'}">[pdf]</a></td><td>2014 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>50%</td><td>356</td><td>179</td><td>177</td><td>21</td><td>299</td><td>3</td></tr><tr><td>2f5d44dc3e1b5955942133ff872ebd31716ec604</td><td>frav3d</td><td>FRAV3D</td><td><a href="papers/2f5d44dc3e1b5955942133ff872ebd31716ec604.html">2D and 3D face recognition: A survey</a></td><td><a href="http://pdfs.semanticscholar.org/2f5d/44dc3e1b5955942133ff872ebd31716ec604.pdf">[pdf]</a></td><td>Pattern Recognition Letters</td><td></td><td></td><td></td><td></td><td>15%</td><td>389</td><td>57</td><td>332</td><td>28</td><td>198</td><td>17</td></tr><tr><td>e4754afaa15b1b53e70743880484b8d0736990ff</td><td>fiw_300</td><td>300-W</td><td><a href="papers/e4754afaa15b1b53e70743880484b8d0736990ff.html">300 Faces In-The-Wild Challenge: database and results</a></td><td><a href="{'url': 'http://doi.org/10.1016/j.imavis.2016.01.002', 'linkType': 'doi'}">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td>45%</td><td>114</td><td>51</td><td>63</td><td>10</td><td>70</td><td>31</td></tr><tr><td>044d9a8c61383312cdafbcc44b9d00d650b21c70</td><td>fiw_300</td><td>300-W</td><td><a href="papers/044d9a8c61383312cdafbcc44b9d00d650b21c70.html">300 Faces in-the-Wild Challenge: The First Facial Landmark Localization Challenge</a></td><td><a href="https://ibug.doc.ic.ac.uk/media/uploads/documents/sagonas_iccv_2013_300_w.pdf">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision Workshops</td><td>edu</td><td>University of Twente</td><td>52.23801390</td><td>6.85667610</td><td>56%</td><td>285</td><td>159</td><td>125</td><td>28</td><td>188</td><td>82</td></tr><tr><td>2e8d0f1802e50cccfd3c0aabac0d0beab3a7846e</td><td>3dpes</td><td>3DPeS</td><td><a href="papers/2e8d0f1802e50cccfd3c0aabac0d0beab3a7846e.html">3DPeS: 3D people dataset for surveillance and forensics</a></td><td><a href="http://doi.acm.org/10.1145/2072572.2072590">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>18%</td><td>122</td><td>22</td><td>100</td><td>11</td><td>71</td><td>1</td></tr><tr><td>9696ad8b164f5e10fcfe23aacf74bd6168aebb15</td><td>4dfab</td><td>4DFAB</td><td><a href="papers/9696ad8b164f5e10fcfe23aacf74bd6168aebb15.html">4DFAB: A Large Scale 4D Facial Expression Database for Biometric Applications</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1712.01443.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>0%</td><td>4</td><td>0</td><td>4</td><td>0</td><td>2</td><td>0</td></tr><tr><td>d08cc366a4a0192a01e9a7495af1eb5d9f9e73ae</td><td>b3d_ac</td><td>B3D(AC)</td><td><a href="papers/d08cc366a4a0192a01e9a7495af1eb5d9f9e73ae.html">A 3-D Audio-Visual Corpus of Affective Communication</a></td><td><a href="http://files.is.tue.mpg.de/jgall/download/jgall_avcorpus_mm10.pdf">[pdf]</a></td><td>IEEE Transactions on Multimedia</td><td></td><td></td><td></td><td></td><td>31%</td><td>39</td><td>12</td><td>27</td><td>2</td><td>27</td><td>7</td></tr><tr><td>639937b3a1b8bded3f7e9a40e85bd3770016cf3c</td><td>bfm</td><td>BFM</td><td><a href="papers/639937b3a1b8bded3f7e9a40e85bd3770016cf3c.html">A 3D Face Model for Pose and Illumination Invariant Face Recognition</a></td><td><a href="https://pdfs.semanticscholar.org/6399/37b3a1b8bded3f7e9a40e85bd3770016cf3c.pdf">[pdf]</a></td><td>2009 Sixth IEEE International Conference on Advanced Video and Signal Based Surveillance</td><td></td><td></td><td></td><td></td><td>41%</td><td>323</td><td>131</td><td>192</td><td>29</td><td>221</td><td>25</td></tr><tr><td>c34532fe6bfbd1e6df477c9ffdbb043b77e7804d</td><td>columbia_gaze</td><td>Columbia Gaze</td><td><a href="papers/c34532fe6bfbd1e6df477c9ffdbb043b77e7804d.html">A 3D Morphable Eye Region Model for Gaze Estimation</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/0d43/3b9435b874a1eea6d7999e86986c910fa285.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Carnegie Mellon University</td><td>37.41021930</td><td>-122.05965487</td><td>67%</td><td>24</td><td>16</td><td>8</td><td>0</td><td>18</td><td>4</td></tr><tr><td>cc589c499dcf323fe4a143bbef0074c3e31f9b60</td><td>bu_3dfe</td><td>BU-3DFE</td><td><a href="papers/cc589c499dcf323fe4a143bbef0074c3e31f9b60.html">A 3D facial expression database for facial behavior research</a></td><td><a href="http://www.cs.binghamton.edu/~lijun/Research/3DFE/Yin_FGR06_a.pdf">[pdf]</a></td><td>7th International Conference on Automatic Face and Gesture Recognition (FGR06)</td><td>edu</td><td>SUNY Binghamton</td><td>42.08779975</td><td>-75.97066066</td><td>24%</td><td>555</td><td>131</td><td>424</td><td>47</td><td>283</td><td>48</td></tr><tr><td>22646e00a7ba34d1b5fbe3b1efcd91a1e1be3c2b</td><td>saivt</td><td>SAIVT SoftBio</td><td><a href="papers/22646e00a7ba34d1b5fbe3b1efcd91a1e1be3c2b.html">A Database for Person Re-Identification in Multi-Camera Surveillance Networks</a></td><td><a href="http://eprints.qut.edu.au/53437/3/Bialkowski_Database4PersonReID_DICTA.pdf">[pdf]</a></td><td>2012 International Conference on Digital Image Computing Techniques and Applications (DICTA)</td><td></td><td></td><td></td><td></td><td>21%</td><td>58</td><td>12</td><td>46</td><td>7</td><td>40</td><td>1</td></tr><tr><td>070de852bc6eb275d7ca3a9cdde8f6be8795d1a3</td><td>d3dfacs</td><td>D3DFACS</td><td><a href="papers/070de852bc6eb275d7ca3a9cdde8f6be8795d1a3.html">A FACS valid 3D dynamic action unit database with applications to 3D dynamic morphable facial modeling</a></td><td><a href="http://www.cs.bath.ac.uk/~dpc/D3DFACS/ICCV_final_2011.pdf">[pdf]</a></td><td>2011 International Conference on Computer Vision</td><td>edu</td><td>Jacobs University</td><td>53.41291480</td><td>-2.96897915</td><td>44%</td><td>52</td><td>23</td><td>29</td><td>5</td><td>37</td><td>4</td></tr><tr><td>e3e44385a71a52fd483c58eb3cdf8d03960c0b70</td><td>mcgill</td><td>McGill Real World</td><td><a href="papers/e3e44385a71a52fd483c58eb3cdf8d03960c0b70.html">A Hierarchical Graphical Model for Recognizing Human Actions and Interactions in Video</a></td><td><a href="http://pdfs.semanticscholar.org/e3e4/4385a71a52fd483c58eb3cdf8d03960c0b70.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>2</td><td>1</td><td>1</td><td>0</td><td>1</td><td>0</td></tr><tr><td>bd88bb2e4f351352d88ee7375af834360e223498</td><td>hda_plus</td><td>HDA+</td><td><a href="papers/bd88bb2e4f351352d88ee7375af834360e223498.html">A Multi - camera video data set for research on High - Definition surveillance</a></td><td><a href="http://pdfs.semanticscholar.org/bd88/bb2e4f351352d88ee7375af834360e223498.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>1</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>563c940054e4b456661762c1ab858e6f730c3159</td><td>data_61</td><td>Data61 Pedestrian</td><td><a href="papers/563c940054e4b456661762c1ab858e6f730c3159.html">A Multi-modal Graphical Model for Scene Analysis</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/WACV.2015.139">[pdf]</a></td><td>2015 IEEE Winter Conference on Applications of Computer Vision</td><td></td><td></td><td></td><td></td><td>12%</td><td>8</td><td>1</td><td>7</td><td>0</td><td>5</td><td>0</td></tr><tr><td>221c18238b829c12b911706947ab38fd017acef7</td><td>rap_pedestrian</td><td>RAP</td><td><a href="papers/221c18238b829c12b911706947ab38fd017acef7.html">A Richly Annotated Dataset for Pedestrian Attribute Recognition</a></td><td><a href="http://pdfs.semanticscholar.org/221c/18238b829c12b911706947ab38fd017acef7.pdf">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>52%</td><td>21</td><td>11</td><td>10</td><td>0</td><td>18</td><td>0</td></tr><tr><td>013909077ad843eb6df7a3e8e290cfd5575999d2</td><td>fiw_300</td><td>300-W</td><td><a href="papers/013909077ad843eb6df7a3e8e290cfd5575999d2.html">A Semi-automatic Methodology for Facial Landmark Annotation</a></td><td><a href="http://ibug.doc.ic.ac.uk/media/uploads/documents/sagonas_cvpr_2013_amfg_w.pdf">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition Workshops</td><td>edu</td><td>University of Twente</td><td>52.23801390</td><td>6.85667610</td><td>59%</td><td>169</td><td>100</td><td>69</td><td>14</td><td>112</td><td>49</td></tr><tr><td>3b4ec8af470948a72a6ed37a9fd226719a874ebc</td><td>sdu_vid</td><td>SDU-VID</td><td><a href="papers/3b4ec8af470948a72a6ed37a9fd226719a874ebc.html">A Spatio-Temporal Appearance Representation for Video-Based Pedestrian Re-Identification</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.434">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td>32%</td><td>85</td><td>27</td><td>58</td><td>9</td><td>51</td><td>0</td></tr><tr><td>ad62c6e17bc39b4dec20d32f6ac667ae42d2c118</td><td>jiku_mobile</td><td>Jiku Mobile Video Dataset</td><td><a href="papers/ad62c6e17bc39b4dec20d32f6ac667ae42d2c118.html">A Synchronization Ground Truth for the Jiku Mobile Video Dataset</a></td><td><a href="http://pdfs.semanticscholar.org/ad62/c6e17bc39b4dec20d32f6ac667ae42d2c118.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>1</td><td>0</td><td>1</td><td>0</td><td>0</td><td>0</td></tr><tr><td>6403117f9c005ae81f1e8e6d1302f4a045e3d99d</td><td>alert_airport</td><td>ALERT Airport</td><td><a href="papers/6403117f9c005ae81f1e8e6d1302f4a045e3d99d.html">A Systematic Evaluation and Benchmark for Person Re-Identification: Features, Metrics, and Datasets.</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1605.09653.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>IEEE transactions on pattern analysis and machine intelligence</td><td></td><td></td><td></td><td></td><td>27%</td><td>15</td><td>4</td><td>11</td><td>1</td><td>10</td><td>0</td></tr><tr><td>7ace44190729927e5cb0dd5d363fcae966fe13f7</td><td>nudedetection</td><td>Nude Detection</td><td><a href="papers/7ace44190729927e5cb0dd5d363fcae966fe13f7.html">A bag-of-features approach based on Hue-SIFT descriptor for nude detection</a></td><td><a href="http://ieeexplore.ieee.org/document/7077625/">[pdf]</a></td><td>2009 17th European Signal Processing Conference</td><td></td><td></td><td></td><td></td><td>18%</td><td>51</td><td>9</td><td>42</td><td>1</td><td>18</td><td>0</td></tr><tr><td>0d3bb75852098b25d90f31d2f48fd0cb4944702b</td><td>face_scrub</td><td>FaceScrub</td><td><a href="papers/0d3bb75852098b25d90f31d2f48fd0cb4944702b.html">A data-driven approach to cleaning large face datasets</a></td><td><a href="https://doi.org/10.1109/ICIP.2014.7025068">[pdf]</a></td><td>2014 IEEE International Conference on Image Processing (ICIP)</td><td>edu</td><td>University of Illinois, Urbana-Champaign</td><td>40.11116745</td><td>-88.22587665</td><td>46%</td><td>123</td><td>56</td><td>67</td><td>6</td><td>95</td><td>21</td></tr><tr><td>b91f54e1581fbbf60392364323d00a0cd43e493c</td><td>bp4d_spontanous</td><td>BP4D-Spontanous</td><td><a href="papers/b91f54e1581fbbf60392364323d00a0cd43e493c.html">A high-resolution spontaneous 3D dynamic facial expression database</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6553788', 'linkType': 'ieee'}">[pdf]</a></td><td>2013 10th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)</td><td>edu</td><td>SUNY Binghamton</td><td>42.08779975</td><td>-75.97066066</td><td>36%</td><td>151</td><td>54</td><td>97</td><td>7</td><td>85</td><td>26</td></tr><tr><td>1ed1a49534ad8dd00f81939449f6389cfbc25321</td><td>bjut_3d</td><td>BJUT-3D</td><td><a href="papers/1ed1a49534ad8dd00f81939449f6389cfbc25321.html">A novel face recognition method based on 3D face model</a></td><td><a href="https://doi.org/10.1109/ROBIO.2007.4522202">[pdf]</a></td><td>2007 IEEE International Conference on Robotics and Biomimetics (ROBIO)</td><td></td><td></td><td></td><td></td><td>0%</td><td>2</td><td>0</td><td>2</td><td>0</td><td>1</td><td>1</td></tr><tr><td>2624d84503bc2f8e190e061c5480b6aa4d89277a</td><td>afew_va</td><td>AFEW-VA</td><td><a href="papers/2624d84503bc2f8e190e061c5480b6aa4d89277a.html">AFEW-VA database for valence and arousal estimation in-the-wild</a></td><td><a href="http://pdfs.semanticscholar.org/2624/d84503bc2f8e190e061c5480b6aa4d89277a.pdf">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td>47%</td><td>15</td><td>7</td><td>8</td><td>1</td><td>10</td><td>3</td></tr><tr><td>2ad0ee93d029e790ebb50574f403a09854b65b7e</td><td>yale_faces</td><td>YaleFaces</td><td><a href="papers/2ad0ee93d029e790ebb50574f403a09854b65b7e.html">Acquiring linear subspaces for face recognition under variable lighting</a></td><td><a href="http://vision.cornell.edu/se3/wp-content/uploads/2014/09/pami05.pdf">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>25%</td><td>999</td><td>251</td><td>748</td><td>110</td><td>509</td><td>113</td></tr><tr><td>57fe081950f21ca03b5b375ae3e84b399c015861</td><td>cvc_01_barcelona</td><td>CVC-01</td><td><a href="papers/57fe081950f21ca03b5b375ae3e84b399c015861.html">Adaptive Image Sampling and Windows Classification for On–board Pedestrian Detection</a></td><td><a href="http://pdfs.semanticscholar.org/57fe/081950f21ca03b5b375ae3e84b399c015861.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>9%</td><td>44</td><td>4</td><td>40</td><td>1</td><td>21</td><td>0</td></tr><tr><td>47aeb3b82f54b5ae8142b4bdda7b614433e69b9a</td><td>am_fed</td><td>AM-FED</td><td><a href="papers/47aeb3b82f54b5ae8142b4bdda7b614433e69b9a.html">Affectiva-MIT Facial Expression Dataset (AM-FED): Naturalistic and Spontaneous Facial Expressions Collected "In-the-Wild"</a></td><td><a href="http://pdfs.semanticscholar.org/5d06/437656dd94616d7d87260d5eb77513ded30f.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>36%</td><td>73</td><td>26</td><td>47</td><td>6</td><td>39</td><td>16</td></tr><tr><td>1be498d4bbc30c3bfd0029114c784bc2114d67c0</td><td>adience</td><td>Adience</td><td><a href="papers/1be498d4bbc30c3bfd0029114c784bc2114d67c0.html">Age and Gender Estimation of Unfiltered Faces</a></td><td><a href="http://www.openu.ac.il/home/hassner/Adience/EidingerEnbarHassner_tifs.pdf">[pdf]</a></td><td>IEEE Transactions on Information Forensics and Security</td><td></td><td></td><td></td><td></td><td>43%</td><td>168</td><td>72</td><td>96</td><td>7</td><td>89</td><td>53</td></tr><tr><td>6dcf418c778f528b5792104760f1fbfe90c6dd6a</td><td>agedb</td><td>AgeDB</td><td><a href="papers/6dcf418c778f528b5792104760f1fbfe90c6dd6a.html">AgeDB: The First Manually Collected, In-the-Wild Age Database</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014984', 'linkType': 'ieee'}">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td></td><td></td><td></td><td></td><td>82%</td><td>11</td><td>9</td><td>2</td><td>0</td><td>10</td><td>0</td></tr><tr><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td><td>aflw</td><td>AFLW</td><td><a href="papers/a74251efa970b92925b89eeef50a5e37d9281ad0.html">Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</a></td><td><a href="http://lrs.icg.tugraz.at/pubs/koestinger_befit_11.pdf">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td>53%</td><td>292</td><td>155</td><td>137</td><td>38</td><td>207</td><td>59</td></tr><tr><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td><td>imm_face</td><td>IMM Face Dataset</td><td><a href="papers/a74251efa970b92925b89eeef50a5e37d9281ad0.html">Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</a></td><td><a href="http://lrs.icg.tugraz.at/pubs/koestinger_befit_11.pdf">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td>53%</td><td>292</td><td>155</td><td>137</td><td>38</td><td>207</td><td>59</td></tr><tr><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td><td>muct</td><td>MUCT</td><td><a href="papers/a74251efa970b92925b89eeef50a5e37d9281ad0.html">Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</a></td><td><a href="http://lrs.icg.tugraz.at/pubs/koestinger_befit_11.pdf">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td>53%</td><td>292</td><td>155</td><td>137</td><td>38</td><td>207</td><td>59</td></tr><tr><td>2ce2560cf59db59ce313bbeb004e8ce55c5ce928</td><td>texas_3dfrd</td><td>Texas 3DFRD</td><td><a href="papers/2ce2560cf59db59ce313bbeb004e8ce55c5ce928.html">Anthropometric 3D Face Recognition</a></td><td><a href="http://live.ece.utexas.edu/publications/2010/sg_ijcv_june10.pdf">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td>22%</td><td>88</td><td>19</td><td>43</td><td>4</td><td>42</td><td>2</td></tr><tr><td>c6526dd3060d63a6c90e8b7ff340383c4e0e0dd8</td><td>face_research_lab</td><td>Face Research Lab London</td><td><a href="papers/c6526dd3060d63a6c90e8b7ff340383c4e0e0dd8.html">Anxiety promotes memory for mood-congruent faces but does not alter loss aversion.</a></td><td><a href="http://pdfs.semanticscholar.org/c652/6dd3060d63a6c90e8b7ff340383c4e0e0dd8.pdf">[pdf]</a></td><td>Scientific reports</td><td>edu</td><td>University College London</td><td>51.52316070</td><td>-0.12820370</td><td>25%</td><td>4</td><td>1</td><td>3</td><td>0</td><td>2</td><td>2</td></tr><tr><td>0df0d1adea39a5bef318b74faa37de7f3e00b452</td><td>mpii_gaze</td><td>MPIIGaze</td><td><a href="papers/0df0d1adea39a5bef318b74faa37de7f3e00b452.html">Appearance-based gaze estimation in the wild</a></td><td><a href="https://scalable.mpi-inf.mpg.de/files/2015/09/zhang_CVPR15.pdf">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>Max Planck Institute for Informatics</td><td>49.25795660</td><td>7.04577417</td><td>38%</td><td>138</td><td>52</td><td>86</td><td>3</td><td>94</td><td>7</td></tr><tr><td>5801690199c1917fa58c35c3dead177c0b8f9f2d</td><td>camel</td><td>CAMEL</td><td><a href="papers/5801690199c1917fa58c35c3dead177c0b8f9f2d.html">Application of Object Based Classification and High Resolution Satellite Imagery for Savanna Ecosystem Analysis</a></td><td><a href="http://pdfs.semanticscholar.org/5801/690199c1917fa58c35c3dead177c0b8f9f2d.pdf">[pdf]</a></td><td>Remote Sensing</td><td></td><td></td><td></td><td></td><td>37%</td><td>19</td><td>7</td><td>12</td><td>1</td><td>16</td><td>0</td></tr><tr><td>759a3b3821d9f0e08e0b0a62c8b693230afc3f8d</td><td>pubfig</td><td>PubFig</td><td><a href="papers/759a3b3821d9f0e08e0b0a62c8b693230afc3f8d.html">Attribute and simile classifiers for face verification</a></td><td><a href="http://homes.cs.washington.edu/~neeraj/projects/faceverification/base/papers/nk_iccv2009_attrs.pdf">[pdf]</a></td><td>2009 IEEE 12th International Conference on Computer Vision</td><td>edu</td><td>Columbia University</td><td>40.84198360</td><td>-73.94368971</td><td>51%</td><td>894</td><td>454</td><td>440</td><td>55</td><td>587</td><td>222</td></tr><tr><td>faf40ce28857aedf183e193486f5b4b0a8c478a2</td><td>iit_dehli_ear</td><td>IIT Dehli Ear</td><td><a href="papers/faf40ce28857aedf183e193486f5b4b0a8c478a2.html">Automated Human Identification Using Ear Imaging</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/faf4/0ce28857aedf183e193486f5b4b0a8c478a2.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>13%</td><td>70</td><td>9</td><td>61</td><td>6</td><td>28</td><td>1</td></tr><tr><td>2160788824c4c29ffe213b2cbeb3f52972d73f37</td><td>3d_rma</td><td>3D-RMA</td><td><a href="papers/2160788824c4c29ffe213b2cbeb3f52972d73f37.html">Automatic 3D face authentication</a></td><td><a href="http://pdfs.semanticscholar.org/2160/788824c4c29ffe213b2cbeb3f52972d73f37.pdf">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td>25%</td><td>95</td><td>24</td><td>71</td><td>8</td><td>60</td><td>2</td></tr><tr><td>213a579af9e4f57f071b884aa872651372b661fd</td><td>bbc_pose</td><td>BBC Pose</td><td><a href="papers/213a579af9e4f57f071b884aa872651372b661fd.html">Automatic and Efficient Human Pose Estimation for Sign Language Videos</a></td><td><a href="https://doi.org/10.1007/s11263-013-0672-6">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td>60%</td><td>25</td><td>15</td><td>10</td><td>1</td><td>18</td><td>1</td></tr><tr><td>fcc6fe6007c322641796cb8792718641856a22a7</td><td>miw</td><td>MIW</td><td><a href="papers/fcc6fe6007c322641796cb8792718641856a22a7.html">Automatic facial makeup detection with application in face recognition</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6612994', 'linkType': 'ieee'}">[pdf]</a></td><td>2013 International Conference on Biometrics (ICB)</td><td>edu</td><td>West Virginia University</td><td>39.65404635</td><td>-79.96475355</td><td>65%</td><td>46</td><td>30</td><td>16</td><td>1</td><td>18</td><td>21</td></tr><tr><td>fcc6fe6007c322641796cb8792718641856a22a7</td><td>youtube_makeup</td><td>YMU</td><td><a href="papers/fcc6fe6007c322641796cb8792718641856a22a7.html">Automatic facial makeup detection with application in face recognition</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6612994', 'linkType': 'ieee'}">[pdf]</a></td><td>2013 International Conference on Biometrics (ICB)</td><td>edu</td><td>West Virginia University</td><td>39.65404635</td><td>-79.96475355</td><td>65%</td><td>46</td><td>30</td><td>16</td><td>1</td><td>18</td><td>21</td></tr><tr><td>0a85bdff552615643dd74646ac881862a7c7072d</td><td>pipa</td><td>PIPA</td><td><a href="papers/0a85bdff552615643dd74646ac881862a7c7072d.html">Beyond frontal faces: Improving Person Recognition using multiple cues</a></td><td><a href="https://doi.org/10.1109/CVPR.2015.7299113">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>60%</td><td>50</td><td>30</td><td>19</td><td>2</td><td>40</td><td>4</td></tr><tr><td>2acf7e58f0a526b957be2099c10aab693f795973</td><td>bosphorus</td><td>The Bosphorus</td><td><a href="papers/2acf7e58f0a526b957be2099c10aab693f795973.html">Bosphorus Database for 3D Face Analysis</a></td><td><a href="http://pdfs.semanticscholar.org/4254/fbba3846008f50671edc9cf70b99d7304543.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>18%</td><td>328</td><td>58</td><td>270</td><td>18</td><td>143</td><td>37</td></tr><tr><td>214c966d1f9c2a4b66f4535d9a0d4078e63a5867</td><td>brainwash</td><td>Brainwash</td><td><a href="papers/214c966d1f9c2a4b66f4535d9a0d4078e63a5867.html">Brainwash: A Data System for Feature Engineering</a></td><td><a href="http://pdfs.semanticscholar.org/ae44/8015b2ff2bd3b8a5c9a3266f954f5af9ffa9.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>57</td><td>34</td><td>23</td><td>2</td><td>50</td><td>0</td></tr><tr><td>37d6f0eb074d207b53885bd2eb78ccc8a04be597</td><td>vmu</td><td>VMU</td><td><a href="papers/37d6f0eb074d207b53885bd2eb78ccc8a04be597.html">Can facial cosmetics affect the matching accuracy of face recognition systems?</a></td><td><a href="http://www.cse.msu.edu/~climer/DantchevaChenRossFaceCosmetics_BTAS2012.pdf">[pdf]</a></td><td>2012 IEEE Fifth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td></td><td></td><td></td><td></td><td>49%</td><td>49</td><td>24</td><td>25</td><td>0</td><td>18</td><td>22</td></tr><tr><td>37d6f0eb074d207b53885bd2eb78ccc8a04be597</td><td>youtube_makeup</td><td>YMU</td><td><a href="papers/37d6f0eb074d207b53885bd2eb78ccc8a04be597.html">Can facial cosmetics affect the matching accuracy of face recognition systems?</a></td><td><a href="http://www.cse.msu.edu/~climer/DantchevaChenRossFaceCosmetics_BTAS2012.pdf">[pdf]</a></td><td>2012 IEEE Fifth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td></td><td></td><td></td><td></td><td>49%</td><td>49</td><td>24</td><td>25</td><td>0</td><td>18</td><td>22</td></tr><tr><td>8d5998cd984e7cce307da7d46f155f9db99c6590</td><td>chalearn</td><td>ChaLearn</td><td><a href="papers/8d5998cd984e7cce307da7d46f155f9db99c6590.html">ChaLearn looking at people: A review of events and resources</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1701.02664.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2017 International Joint Conference on Neural Networks (IJCNN)</td><td></td><td></td><td></td><td></td><td>30%</td><td>10</td><td>3</td><td>7</td><td>1</td><td>6</td><td>0</td></tr><tr><td>2bf8541199728262f78d4dced6fb91479b39b738</td><td>clothing_co_parsing</td><td>CCP</td><td><a href="papers/2bf8541199728262f78d4dced6fb91479b39b738.html">Clothing Co-parsing by Joint Image Segmentation and Labeling</a></td><td><a href="https://arxiv.org/pdf/1502.00739v1.pdf">[pdf]</a></td><td>2014 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>47%</td><td>60</td><td>28</td><td>32</td><td>0</td><td>36</td><td>6</td></tr><tr><td>6dbe8e5121c534339d6e41f8683e85f87e6abf81</td><td>gallagher</td><td>Gallagher</td><td><a href="papers/6dbe8e5121c534339d6e41f8683e85f87e6abf81.html">Clothing Cosegmentation for Shopping Images With Cluttered Background</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7423747', 'linkType': 'ieee'}">[pdf]</a></td><td>IEEE Transactions on Multimedia</td><td></td><td></td><td></td><td></td><td>33%</td><td>6</td><td>2</td><td>4</td><td>0</td><td>3</td><td>2</td></tr><tr><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td><td>leeds_sports_pose</td><td>Leeds Sports Pose</td><td><a href="papers/4b1d23d17476fcf78f4cbadf69fb130b1aa627c0.html">Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</a></td><td><a href="http://pdfs.semanticscholar.org/4b1d/23d17476fcf78f4cbadf69fb130b1aa627c0.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>278</td><td>119</td><td>159</td><td>12</td><td>198</td><td>2</td></tr><tr><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td><td>stickmen_buffy</td><td>Buffy Stickmen</td><td><a href="papers/4b1d23d17476fcf78f4cbadf69fb130b1aa627c0.html">Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</a></td><td><a href="http://pdfs.semanticscholar.org/4b1d/23d17476fcf78f4cbadf69fb130b1aa627c0.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>278</td><td>119</td><td>159</td><td>12</td><td>198</td><td>2</td></tr><tr><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td><a href="papers/4b1d23d17476fcf78f4cbadf69fb130b1aa627c0.html">Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</a></td><td><a href="http://pdfs.semanticscholar.org/4b1d/23d17476fcf78f4cbadf69fb130b1aa627c0.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>278</td><td>119</td><td>159</td><td>12</td><td>198</td><td>2</td></tr><tr><td>45c31cde87258414f33412b3b12fc5bec7cb3ba9</td><td>jaffe</td><td>JAFFE</td><td><a href="papers/45c31cde87258414f33412b3b12fc5bec7cb3ba9.html">Coding Facial Expressions with Gabor Wavelets</a></td><td><a href="http://pdfs.semanticscholar.org/45c3/1cde87258414f33412b3b12fc5bec7cb3ba9.pdf">[pdf]</a></td><td></td><td>edu</td><td>Kyushu University</td><td>33.59914655</td><td>130.22359848</td><td>36%</td><td>848</td><td>308</td><td>540</td><td>56</td><td>413</td><td>255</td></tr><tr><td>b1f4423c227fa37b9680787be38857069247a307</td><td>afew_va</td><td>AFEW-VA</td><td><a href="papers/b1f4423c227fa37b9680787be38857069247a307.html">Collecting Large, Richly Annotated Facial-Expression Databases from Movies</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6200254', 'linkType': 'ieee'}">[pdf]</a></td><td>IEEE MultiMedia</td><td>edu</td><td>Australian National University</td><td>-35.27769990</td><td>149.11852700</td><td>33%</td><td>175</td><td>58</td><td>66</td><td>7</td><td>54</td><td>49</td></tr><tr><td>7f4040b482d16354d5938c1d1b926b544652bf5b</td><td>nova_emotions</td><td>Novaemötions Dataset</td><td><a href="papers/7f4040b482d16354d5938c1d1b926b544652bf5b.html">Competitive affective gaming: winning with a smile</a></td><td><a href="http://doi.acm.org/10.1145/2502081.2502115">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>8</td><td>0</td><td>3</td><td>0</td><td>2</td><td>0</td></tr><tr><td>079a0a3bf5200994e1f972b1b9197bf2f90e87d4</td><td>mit_cbcl</td><td>MIT CBCL</td><td><a href="papers/079a0a3bf5200994e1f972b1b9197bf2f90e87d4.html">Component-Based Face Recognition with 3D Morphable Models</a></td><td><a href="http://www.bheisele.com/avbpa2003.pdf">[pdf]</a></td><td>2004 Conference on Computer Vision and Pattern Recognition Workshop</td><td></td><td></td><td></td><td></td><td>0%</td><td>12</td><td>0</td><td>12</td><td>0</td><td>8</td><td>0</td></tr><tr><td>23fc83c8cfff14a16df7ca497661264fc54ed746</td><td>cohn_kanade</td><td>CK</td><td><a href="papers/23fc83c8cfff14a16df7ca497661264fc54ed746.html">Comprehensive Database for Facial Expression Analysis</a></td><td><a href="http://pdfs.semanticscholar.org/23fc/83c8cfff14a16df7ca497661264fc54ed746.pdf">[pdf]</a></td><td></td><td>edu</td><td>Carnegie Mellon University</td><td>37.41021930</td><td>-122.05965487</td><td>38%</td><td>999</td><td>380</td><td>619</td><td>75</td><td>555</td><td>252</td></tr><tr><td>09d78009687bec46e70efcf39d4612822e61cb8c</td><td>raid</td><td>RAiD</td><td><a href="papers/09d78009687bec46e70efcf39d4612822e61cb8c.html">Consistent Re-identification in a Camera Network</a></td><td><a href="http://pdfs.semanticscholar.org/c27f/099e6e7e3f7f9979cbe9e0a5175fc5848ea0.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>27%</td><td>45</td><td>12</td><td>33</td><td>7</td><td>34</td><td>1</td></tr><tr><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td><td>casablanca</td><td>Casablanca</td><td><a href="papers/0ceda9dae8b9f322df65ca2ef02caca9758aec6f.html">Context-Aware CNNs for Person Head Detection</a></td><td><a href="http://arxiv.org/pdf/1511.07917v1.pdf">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td>33%</td><td>27</td><td>9</td><td>18</td><td>1</td><td>22</td><td>0</td></tr><tr><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td><td>hollywood_headset</td><td>HollywoodHeads</td><td><a href="papers/0ceda9dae8b9f322df65ca2ef02caca9758aec6f.html">Context-Aware CNNs for Person Head Detection</a></td><td><a href="http://arxiv.org/pdf/1511.07917v1.pdf">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td>33%</td><td>27</td><td>9</td><td>18</td><td>1</td><td>22</td><td>0</td></tr><tr><td>c06b13d0ec3f5c43e2782cd22542588e233733c3</td><td>nova_emotions</td><td>Novaemötions Dataset</td><td><a href="papers/c06b13d0ec3f5c43e2782cd22542588e233733c3.html">Crowdsourcing facial expressions for affective-interaction</a></td><td><a href="https://doi.org/10.1016/j.cviu.2016.02.001">[pdf]</a></td><td>Computer Vision and Image Understanding</td><td></td><td></td><td></td><td></td><td>0%</td><td>1</td><td>0</td><td>1</td><td>0</td><td>1</td><td>0</td></tr><tr><td>8355d095d3534ef511a9af68a3b2893339e3f96b</td><td>imdb_wiki</td><td>IMDB</td><td><a href="papers/8355d095d3534ef511a9af68a3b2893339e3f96b.html">DEX: Deep EXpectation of Apparent Age from a Single Image</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406390', 'linkType': 'ieee'}">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision Workshop (ICCVW)</td><td></td><td></td><td></td><td></td><td>44%</td><td>102</td><td>45</td><td>54</td><td>6</td><td>62</td><td>28</td></tr><tr><td>10195a163ab6348eef37213a46f60a3d87f289c5</td><td>imdb_wiki</td><td>IMDB</td><td><a href="papers/10195a163ab6348eef37213a46f60a3d87f289c5.html">Deep Expectation of Real and Apparent Age from a Single Image Without Facial Landmarks</a></td><td><a href="https://doi.org/10.1007/s11263-016-0940-3">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td>44%</td><td>133</td><td>59</td><td>74</td><td>14</td><td>90</td><td>28</td></tr><tr><td>162ea969d1929ed180cc6de9f0bf116993ff6e06</td><td>vgg_faces</td><td>VGG Face</td><td><a href="papers/162ea969d1929ed180cc6de9f0bf116993ff6e06.html">Deep Face Recognition</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/f372/ab9b3270d4e4f6a0258c83c2736c3a5c0454.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>39%</td><td>999</td><td>393</td><td>606</td><td>71</td><td>621</td><td>156</td></tr><tr><td>6424b69f3ff4d35249c0bb7ef912fbc2c86f4ff4</td><td>celeba</td><td>CelebA</td><td><a href="papers/6424b69f3ff4d35249c0bb7ef912fbc2c86f4ff4.html">Deep Learning Face Attributes in the Wild</a></td><td><a href="http://arxiv.org/pdf/1411.7766v2.pdf">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>42%</td><td>808</td><td>340</td><td>468</td><td>69</td><td>666</td><td>50</td></tr><tr><td>18010284894ed0edcca74e5bf768ee2e15ef7841</td><td>deep_fashion</td><td>DeepFashion</td><td><a href="papers/18010284894ed0edcca74e5bf768ee2e15ef7841.html">DeepFashion: Powering Robust Clothes Recognition and Retrieval with Rich Annotations</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7780493', 'linkType': 'ieee'}">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>47%</td><td>150</td><td>71</td><td>79</td><td>4</td><td>111</td><td>8</td></tr><tr><td>6bd36e9fd0ef20a3074e1430a6cc601e6d407fc3</td><td>cuhk03</td><td>CUHK03</td><td><a href="papers/6bd36e9fd0ef20a3074e1430a6cc601e6d407fc3.html">DeepReID: Deep Filter Pairing Neural Network for Person Re-identification</a></td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Li_DeepReID_Deep_Filter_2014_CVPR_paper.pdf">[pdf]</a></td><td>2014 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>35%</td><td>512</td><td>180</td><td>332</td><td>29</td><td>323</td><td>4</td></tr><tr><td>13f06b08f371ba8b5d31c3e288b4deb61335b462</td><td>eth_andreas_ess</td><td>ETHZ Pedestrian</td><td><a href="papers/13f06b08f371ba8b5d31c3e288b4deb61335b462.html">Depth and Appearance for Mobile Scene Analysis</a></td><td><a href="http://www.mmp.rwth-aachen.de/publications/pdf/ess-depthandappearance-iccv07.pdf/at_download/file">[pdf]</a></td><td>2007 IEEE 11th International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td>25%</td><td>319</td><td>79</td><td>240</td><td>27</td><td>192</td><td>0</td></tr><tr><td>2e384f057211426ac5922f1b33d2aa8df5d51f57</td><td>a_pascal_yahoo</td><td>aPascal</td><td><a href="papers/2e384f057211426ac5922f1b33d2aa8df5d51f57.html">Describing objects by their attributes</a></td><td><a href="http://www-2.cs.cmu.edu/~dhoiem/publications/cvpr2009_attributes.pdf">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>39%</td><td>999</td><td>393</td><td>606</td><td>71</td><td>727</td><td>73</td></tr><tr><td>7808937b46acad36e43c30ae4e9f3fd57462853d</td><td>berkeley_pose</td><td>BPAD</td><td><a href="papers/7808937b46acad36e43c30ae4e9f3fd57462853d.html">Describing people: A poselet-based approach to attribute classification</a></td><td><a href="http://ttic.uchicago.edu/~smaji/papers/attributes-iccv11.pdf">[pdf]</a></td><td>2011 International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td>43%</td><td>221</td><td>96</td><td>125</td><td>14</td><td>160</td><td>23</td></tr><tr><td>56ae6d94fc6097ec4ca861f0daa87941d1c10b70</td><td>cmdp</td><td>CMDP</td><td><a href="papers/56ae6d94fc6097ec4ca861f0daa87941d1c10b70.html">Distance Estimation of an Unknown Person from a Portrait</a></td><td><a href="http://pdfs.semanticscholar.org/56ae/6d94fc6097ec4ca861f0daa87941d1c10b70.pdf">[pdf]</a></td><td></td><td>edu</td><td>California Institute of Technology</td><td>34.13710185</td><td>-118.12527487</td><td>22%</td><td>9</td><td>2</td><td>7</td><td>0</td><td>6</td><td>1</td></tr><tr><td>84fe5b4ac805af63206012d29523a1e033bc827e</td><td>awe_ears</td><td>AWE Ears</td><td><a href="papers/84fe5b4ac805af63206012d29523a1e033bc827e.html">Ear recognition: More than a survey</a></td><td><a href="http://pdfs.semanticscholar.org/84fe/5b4ac805af63206012d29523a1e033bc827e.pdf">[pdf]</a></td><td>Neurocomputing</td><td></td><td></td><td></td><td></td><td>29%</td><td>24</td><td>7</td><td>17</td><td>0</td><td>11</td><td>0</td></tr><tr><td>133f01aec1534604d184d56de866a4bd531dac87</td><td>lfw_a</td><td>LFW-a</td><td><a href="papers/133f01aec1534604d184d56de866a4bd531dac87.html">Effective Unconstrained Face Recognition by Combining Multiple Descriptors and Learned Background Statistics</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/TPAMI.2010.230">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>42%</td><td>177</td><td>75</td><td>102</td><td>15</td><td>102</td><td>54</td></tr><tr><td>447d8893a4bdc29fa1214e53499ffe67b28a6db5</td><td>umd_faces</td><td>UMD</td><td><a href="papers/447d8893a4bdc29fa1214e53499ffe67b28a6db5.html">Electronic Transport in Quantum Confined Systems</a></td><td><a href="http://pdfs.semanticscholar.org/447d/8893a4bdc29fa1214e53499ffe67b28a6db5.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>1</td><td>0</td><td>1</td><td>0</td><td>1</td><td>0</td></tr><tr><td>c900e0ad4c95948baaf0acd8449fde26f9b4952a</td><td>emotio_net</td><td>EmotioNet Database</td><td><a href="papers/c900e0ad4c95948baaf0acd8449fde26f9b4952a.html">EmotioNet: An Accurate, Real-Time Algorithm for the Automatic Annotation of a Million Facial Expressions in the Wild</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7780969', 'linkType': 'ieee'}">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>44%</td><td>72</td><td>32</td><td>40</td><td>7</td><td>54</td><td>8</td></tr><tr><td>2161f6b7ee3c0acc81603b01dc0df689683577b9</td><td>large_scale_person_search</td><td>Large Scale Person Search</td><td><a href="papers/2161f6b7ee3c0acc81603b01dc0df689683577b9.html">End-to-End Deep Learning for Person Search</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/2161/f6b7ee3c0acc81603b01dc0df689683577b9.pdf', 'linkType': 's2'}">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>34%</td><td>41</td><td>14</td><td>27</td><td>2</td><td>27</td><td>0</td></tr><tr><td>6273b3491e94ea4dd1ce42b791d77bdc96ee73a8</td><td>viper</td><td>VIPeR</td><td><a href="papers/6273b3491e94ea4dd1ce42b791d77bdc96ee73a8.html">Evaluating Appearance Models for Recognition, Reacquisition, and Tracking</a></td><td><a href="http://pdfs.semanticscholar.org/6273/b3491e94ea4dd1ce42b791d77bdc96ee73a8.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>27%</td><td>584</td><td>159</td><td>425</td><td>38</td><td>336</td><td>9</td></tr><tr><td>2258e01865367018ed6f4262c880df85b94959f8</td><td>mot</td><td>MOT</td><td><a href="papers/2258e01865367018ed6f4262c880df85b94959f8.html">Evaluating Multiple Object Tracking Performance: The CLEAR MOT Metrics</a></td><td><a href="http://pdfs.semanticscholar.org/2e0b/00f4043e2d4b04c59c88bb54bcd907d0dcd4.pdf">[pdf]</a></td><td>EURASIP J. Image and Video Processing</td><td></td><td></td><td></td><td></td><td>20%</td><td>586</td><td>119</td><td>467</td><td>48</td><td>336</td><td>3</td></tr><tr><td>9e5378e7b336c89735d3bb15cf67eff96f86d39a</td><td>precarious</td><td>Precarious</td><td><a href="papers/9e5378e7b336c89735d3bb15cf67eff96f86d39a.html">Expecting the Unexpected: Training Detectors for Unusual Pedestrians with Adversarial Imposters</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1703.06283.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>8%</td><td>12</td><td>1</td><td>11</td><td>1</td><td>10</td><td>0</td></tr><tr><td>35b0331dfcd2897abd5749b49ff5e2b8ba0f7a62</td><td>coco_qa</td><td>COCO QA</td><td><a href="papers/35b0331dfcd2897abd5749b49ff5e2b8ba0f7a62.html">Exploring Models and Data for Image Question Answering</a></td><td><a href="http://pdfs.semanticscholar.org/aa79/9c29c0d44ece1864467af520fe70540c069b.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>191</td><td>83</td><td>108</td><td>12</td><td>163</td><td>1</td></tr><tr><td>42505464808dfb446f521fc6ff2cfeffd4d68ff1</td><td>gavab_db</td><td>Gavab</td><td><a href="papers/42505464808dfb446f521fc6ff2cfeffd4d68ff1.html">Expression invariant 3D face recognition with a Morphable Model</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4813376', 'linkType': 'ieee'}">[pdf]</a></td><td>2008 8th IEEE International Conference on Automatic Face & Gesture Recognition</td><td></td><td></td><td></td><td></td><td>29%</td><td>94</td><td>27</td><td>67</td><td>10</td><td>57</td><td>5</td></tr><tr><td>a5acda0e8c0937bfed013e6382da127103e41395</td><td>disfa</td><td>DISFA</td><td><a href="papers/a5acda0e8c0937bfed013e6382da127103e41395.html">Extended DISFA Dataset: Investigating Posed and Spontaneous Facial Expressions</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7789672', 'linkType': 'ieee'}">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td></td><td></td><td></td><td></td><td>38%</td><td>8</td><td>3</td><td>5</td><td>1</td><td>5</td><td>0</td></tr><tr><td>75da1df4ed319926c544eefe17ec8d720feef8c0</td><td>fddb</td><td>FDDB</td><td><a href="papers/75da1df4ed319926c544eefe17ec8d720feef8c0.html">FDDB: A Benchmark for Face Detection in Unconstrained Settings</a></td><td><a href="http://pdfs.semanticscholar.org/75da/1df4ed319926c544eefe17ec8d720feef8c0.pdf">[pdf]</a></td><td></td><td>edu</td><td>University of Massachusetts</td><td>42.38897850</td><td>-72.52869870</td><td>0%</td><td>1</td><td>0</td><td>1</td><td>0</td><td>0</td><td>0</td></tr><tr><td>3607afdb204de9a5a9300ae98aa4635d9effcda2</td><td>sheffield</td><td>Sheffield Face</td><td><a href="papers/3607afdb204de9a5a9300ae98aa4635d9effcda2.html">Face Description with Local Binary Patterns: Application to Face Recognition</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/TPAMI.2006.244">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>24%</td><td>999</td><td>238</td><td>761</td><td>65</td><td>483</td><td>87</td></tr><tr><td>28312c3a47c1be3a67365700744d3d6665b86f22</td><td>hrt_transgender</td><td>HRT Transgender</td><td><a href="papers/28312c3a47c1be3a67365700744d3d6665b86f22.html">Face Recognition: A Literature Survey1</a></td><td><a href="http://pdfs.semanticscholar.org/2831/2c3a47c1be3a67365700744d3d6665b86f22.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>11%</td><td>999</td><td>105</td><td>265</td><td>32</td><td>217</td><td>39</td></tr><tr><td>0e986f51fe45b00633de9fd0c94d082d2be51406</td><td>afw</td><td>AFW</td><td><a href="papers/0e986f51fe45b00633de9fd0c94d082d2be51406.html">Face detection, pose estimation, and landmark localization in the wild</a></td><td><a href="http://vision.ics.uci.edu/papers/ZhuR_CVPR_2012/ZhuR_CVPR_2012.pdf">[pdf]</a></td><td>2012 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>University of California, Irvine</td><td>33.64319010</td><td>-117.84016494</td><td>52%</td><td>999</td><td>521</td><td>478</td><td>59</td><td>607</td><td>273</td></tr><tr><td>560e0e58d0059259ddf86fcec1fa7975dee6a868</td><td>youtube_faces</td><td>YouTubeFaces</td><td><a href="papers/560e0e58d0059259ddf86fcec1fa7975dee6a868.html">Face recognition in unconstrained videos with matched background similarity</a></td><td><a href="http://www.cs.tau.ac.il/~wolf/papers/lvfw.pdf">[pdf]</a></td><td>CVPR 2011</td><td>edu</td><td>Open University of Israel</td><td>32.77824165</td><td>34.99565673</td><td>50%</td><td>485</td><td>244</td><td>240</td><td>32</td><td>290</td><td>140</td></tr><tr><td>670637d0303a863c1548d5b19f705860a23e285c</td><td>face_tracer</td><td>FaceTracer</td><td><a href="papers/670637d0303a863c1548d5b19f705860a23e285c.html">Face swapping: automatically replacing faces in photographs</a></td><td><a href="https://classes.cs.uoregon.edu/16F/cis607photo/faces.pdf">[pdf]</a></td><td>ACM Trans. Graph.</td><td>edu</td><td>Columbia University</td><td>40.84198360</td><td>-73.94368971</td><td>100%</td><td>2</td><td>2</td><td>0</td><td>0</td><td>1</td><td>1</td></tr><tr><td>4c170a0dcc8de75587dae21ca508dab2f9343974</td><td>face_tracer</td><td>FaceTracer</td><td><a href="papers/4c170a0dcc8de75587dae21ca508dab2f9343974.html">FaceTracer: A Search Engine for Large Collections of Images with Faces</a></td><td><a href="http://pdfs.semanticscholar.org/73a8/1d311eedac8dea3ca24dc15b6990fa4a725e.pdf">[pdf]</a></td><td></td><td>edu</td><td>Columbia University</td><td>40.84198360</td><td>-73.94368971</td><td>48%</td><td>218</td><td>105</td><td>113</td><td>17</td><td>146</td><td>52</td></tr><tr><td>7ebb153704706e457ab57b432793d2b6e5d12592</td><td>vgg_celebs_in_places</td><td>CIP</td><td><a href="papers/7ebb153704706e457ab57b432793d2b6e5d12592.html">Faces in Places: compound query retrieval</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/7ebb/153704706e457ab57b432793d2b6e5d12592.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td>edu</td><td>University of Oxford</td><td>51.75345380</td><td>-1.25400997</td><td>80%</td><td>5</td><td>4</td><td>1</td><td>0</td><td>4</td><td>0</td></tr><tr><td>8a3c5507237957d013a0fe0f082cab7f757af6ee</td><td>mafl</td><td>MAFL</td><td><a href="papers/8a3c5507237957d013a0fe0f082cab7f757af6ee.html">Facial Landmark Detection by Deep Multi-task Learning</a></td><td><a href="http://pdfs.semanticscholar.org/fcd7/1c18192928a2e0b264edd4d919ab2f8f652a.pdf">[pdf]</a></td><td></td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>48%</td><td>383</td><td>182</td><td>201</td><td>25</td><td>259</td><td>60</td></tr><tr><td>8a3c5507237957d013a0fe0f082cab7f757af6ee</td><td>mtfl</td><td>MTFL</td><td><a href="papers/8a3c5507237957d013a0fe0f082cab7f757af6ee.html">Facial Landmark Detection by Deep Multi-task Learning</a></td><td><a href="http://pdfs.semanticscholar.org/fcd7/1c18192928a2e0b264edd4d919ab2f8f652a.pdf">[pdf]</a></td><td></td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>48%</td><td>383</td><td>182</td><td>201</td><td>25</td><td>259</td><td>60</td></tr><tr><td>014b8df0180f33b9fea98f34ae611c6447d761d2</td><td>buhmap_db</td><td>BUHMAP-DB </td><td><a href="papers/014b8df0180f33b9fea98f34ae611c6447d761d2.html">Facial feature tracking and expression recognition for sign language</a></td><td><a href="http://www.cmpe.boun.edu.tr/pilab/pilabfiles/databases/buhmap/files/ari2008facialfeaturetracking.pdf">[pdf]</a></td><td>2008 23rd International Symposium on Computer and Information Sciences</td><td></td><td></td><td></td><td></td><td>16%</td><td>25</td><td>4</td><td>21</td><td>1</td><td>10</td><td>2</td></tr><tr><td>4fefd1bc8dc4e0ab37ee3324ddfa43ad9d6a04a7</td><td>deep_fashion</td><td>DeepFashion</td><td><a href="papers/4fefd1bc8dc4e0ab37ee3324ddfa43ad9d6a04a7.html">Fashion Landmark Detection in the Wild</a></td><td><a href="http://pdfs.semanticscholar.org/d8ca/e259c1c5bba0c096f480dc7322bbaebfac1a.pdf">[pdf]</a></td><td></td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>39%</td><td>23</td><td>9</td><td>8</td><td>0</td><td>15</td><td>0</td></tr><tr><td>45e616093a92e5f1e61a7c6037d5f637aa8964af</td><td>malf</td><td>MALF</td><td><a href="papers/45e616093a92e5f1e61a7c6037d5f637aa8964af.html">Fine-grained evaluation on face detection in the wild</a></td><td><a href="http://www.cs.toronto.edu/~byang/papers/malf_fg15.pdf">[pdf]</a></td><td>2015 11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)</td><td>edu</td><td>Chinese Academy of Sciences</td><td>40.00447950</td><td>116.37023800</td><td>71%</td><td>17</td><td>12</td><td>5</td><td>0</td><td>13</td><td>4</td></tr><tr><td>1aad2da473888cb7ebc1bfaa15bfa0f1502ce005</td><td>jpl_pose</td><td>JPL-Interaction dataset</td><td><a href="papers/1aad2da473888cb7ebc1bfaa15bfa0f1502ce005.html">First-Person Activity Recognition: What Are They Doing to Me?</a></td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2013/papers/Ryoo_First-Person_Activity_Recognition_2013_CVPR_paper.pdf">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>51%</td><td>148</td><td>76</td><td>72</td><td>8</td><td>109</td><td>3</td></tr><tr><td>774cbb45968607a027ae4729077734db000a1ec5</td><td>urban_tribes</td><td>Urban Tribes</td><td><a href="papers/774cbb45968607a027ae4729077734db000a1ec5.html">From Bikers to Surfers: Visual Recognition of Urban Tribes</a></td><td><a href="http://pdfs.semanticscholar.org/774c/bb45968607a027ae4729077734db000a1ec5.pdf">[pdf]</a></td><td></td><td>edu</td><td>Columbia University</td><td>40.84198360</td><td>-73.94368971</td><td>47%</td><td>17</td><td>8</td><td>9</td><td>1</td><td>12</td><td>1</td></tr><tr><td>22f656d0f8426c84a33a267977f511f127bfd7f3</td><td>expw</td><td>ExpW</td><td><a href="papers/22f656d0f8426c84a33a267977f511f127bfd7f3.html">From Facial Expression Recognition to Interpersonal Relation Prediction</a></td><td><a href="http://arxiv.org/abs/1609.06426">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td>44%</td><td>9</td><td>4</td><td>5</td><td>0</td><td>5</td><td>1</td></tr><tr><td>22f656d0f8426c84a33a267977f511f127bfd7f3</td><td>social_relation</td><td>Social Relation</td><td><a href="papers/22f656d0f8426c84a33a267977f511f127bfd7f3.html">From Facial Expression Recognition to Interpersonal Relation Prediction</a></td><td><a href="http://arxiv.org/abs/1609.06426">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td>44%</td><td>9</td><td>4</td><td>5</td><td>0</td><td>5</td><td>1</td></tr><tr><td>18c72175ddbb7d5956d180b65a96005c100f6014</td><td>yale_faces</td><td>YaleFaces</td><td><a href="papers/18c72175ddbb7d5956d180b65a96005c100f6014.html">From Few to Many: Illumination Cone Models for Face Recognition under Variable Lighting and Pose</a></td><td><a href="http://pdfs.semanticscholar.org/97bb/c2b439a79d4dc0dc7199d71ed96ad5e3fd0e.pdf">[pdf]</a></td><td>IEEE Trans. Pattern Anal. Mach. Intell.</td><td></td><td></td><td></td><td></td><td>42%</td><td>999</td><td>422</td><td>577</td><td>80</td><td>538</td><td>295</td></tr><tr><td>93884e46c49f7ae1c7c34046fbc28882f2bd6341</td><td>kdef</td><td>KDEF</td><td><a href="papers/93884e46c49f7ae1c7c34046fbc28882f2bd6341.html">Gaze fixation and the neural circuitry of face processing in autism</a></td><td><a href="{'url': 'http://doi.org/10.1038/nn1421', 'linkType': 'nature'}">[pdf]</a></td><td>Nature Neuroscience</td><td></td><td></td><td></td><td></td><td>31%</td><td>608</td><td>190</td><td>418</td><td>92</td><td>463</td><td>0</td></tr><tr><td>2eb84aaba316b095d4bb51da1a3e4365bbf9ab1d</td><td>kin_face</td><td>UB KinFace</td><td><a href="papers/2eb84aaba316b095d4bb51da1a3e4365bbf9ab1d.html">Genealogical face recognition based on UB KinFace database</a></td><td><a href="https://doi.org/10.1109/CVPRW.2011.5981801">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td>edu</td><td>SUNY Buffalo</td><td>42.93362780</td><td>-78.88394479</td><td>13%</td><td>30</td><td>4</td><td>26</td><td>1</td><td>9</td><td>5</td></tr><tr><td>b6b1b0632eb9d4ab1427278f5e5c46f97753c73d</td><td>fei</td><td>FEI</td><td><a href="papers/b6b1b0632eb9d4ab1427278f5e5c46f97753c73d.html">Generalização cartográfica automatizada para um banco de dados cadastral</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/b6b1/b0632eb9d4ab1427278f5e5c46f97753c73d.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>2eed184680edcdec8a3b605ad1a3ba8e8f7cc2e9</td><td>graz</td><td>Graz Pedestrian</td><td><a href="papers/2eed184680edcdec8a3b605ad1a3ba8e8f7cc2e9.html">Generic object recognition with boosting</a></td><td><a href="http://www.emt.tu-graz.ac.at/~pinz/onlinepapers/Reprint_Vol_28_No_3_2006.pdf">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>24%</td><td>286</td><td>69</td><td>217</td><td>16</td><td>189</td><td>0</td></tr><tr><td>17b46e2dad927836c689d6787ddb3387c6159ece</td><td>geofaces</td><td>GeoFaces</td><td><a href="papers/17b46e2dad927836c689d6787ddb3387c6159ece.html">GeoFaceExplorer: exploring the geo-dependence of facial attributes</a></td><td><a href="http://doi.acm.org/10.1145/2676440.2676443">[pdf]</a></td><td></td><td>edu</td><td>University of Kentucky</td><td>38.03337420</td><td>-84.50177580</td><td>50%</td><td>2</td><td>1</td><td>1</td><td>0</td><td>1</td><td>0</td></tr><tr><td>3cd40bfa1ff193a96bde0207e5140a399476466c</td><td>tvhi</td><td>TVHI</td><td><a href="papers/3cd40bfa1ff193a96bde0207e5140a399476466c.html">High Five: Recognising human interactions in TV shows</a></td><td><a href="http://pdfs.semanticscholar.org/3cd4/0bfa1ff193a96bde0207e5140a399476466c.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>34%</td><td>91</td><td>31</td><td>60</td><td>11</td><td>64</td><td>1</td></tr><tr><td>24830e3979d4ed01b9fd0feebf4a8fd22e0c35fd</td><td>hi4d_adsip</td><td>Hi4D-ADSIP</td><td><a href="papers/24830e3979d4ed01b9fd0feebf4a8fd22e0c35fd.html">High-resolution comprehensive 3-D dynamic database for facial articulation analysis</a></td><td><a href="http://www.researchgate.net/profile/Wei_Quan3/publication/221430048_High-resolution_comprehensive_3-D_dynamic_database_for_facial_articulation_analysis/links/0deec534309495805d000000.pdf">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td>0%</td><td>5</td><td>0</td><td>5</td><td>0</td><td>1</td><td>0</td></tr><tr><td>04c2cda00e5536f4b1508cbd80041e9552880e67</td><td>hipsterwars</td><td>Hipsterwars</td><td><a href="papers/04c2cda00e5536f4b1508cbd80041e9552880e67.html">Hipster Wars: Discovering Elements of Fashion Styles</a></td><td><a href="http://pdfs.semanticscholar.org/04c2/cda00e5536f4b1508cbd80041e9552880e67.pdf">[pdf]</a></td><td></td><td>edu</td><td>Tohoku University</td><td>38.25309450</td><td>140.87365930</td><td>53%</td><td>91</td><td>48</td><td>43</td><td>5</td><td>60</td><td>15</td></tr><tr><td>10d6b12fa07c7c8d6c8c3f42c7f1c061c131d4c5</td><td>inria_person</td><td>INRIA Pedestrian</td><td><a href="papers/10d6b12fa07c7c8d6c8c3f42c7f1c061c131d4c5.html">Histograms of oriented gradients for human detection</a></td><td><a href="http://nichol.as/papers/Dalai/Histograms%20of%20oriented%20gradients%20for%20human%20detection.pdf">[pdf]</a></td><td>2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)</td><td></td><td></td><td></td><td></td><td>22%</td><td>999</td><td>217</td><td>782</td><td>67</td><td>520</td><td>22</td></tr><tr><td>041d3eedf5e45ce5c5229f0181c5c576ed1fafd6</td><td>ucf_selfie</td><td>UCF Selfie</td><td><a href="papers/041d3eedf5e45ce5c5229f0181c5c576ed1fafd6.html">How to Take a Good Selfie?</a></td><td><a href="http://doi.acm.org/10.1145/2733373.2806365">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>22%</td><td>9</td><td>2</td><td>7</td><td>0</td><td>5</td><td>0</td></tr><tr><td>44d23df380af207f5ac5b41459c722c87283e1eb</td><td>wider_attribute</td><td>WIDER Attribute</td><td><a href="papers/44d23df380af207f5ac5b41459c722c87283e1eb.html">Human Attribute Recognition by Deep Hierarchical Contexts</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/8e28/07f2dd53b03a759e372e07f7191cae65c9fd.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>67%</td><td>18</td><td>12</td><td>6</td><td>0</td><td>16</td><td>0</td></tr><tr><td>44484d2866f222bbb9b6b0870890f9eea1ffb2d0</td><td>cuhk01</td><td>CUHK01</td><td><a href="papers/44484d2866f222bbb9b6b0870890f9eea1ffb2d0.html">Human Reidentification with Transferred Metric Learning</a></td><td><a href="http://pdfs.semanticscholar.org/4448/4d2866f222bbb9b6b0870890f9eea1ffb2d0.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>26%</td><td>258</td><td>67</td><td>191</td><td>12</td><td>141</td><td>1</td></tr><tr><td>57178b36c21fd7f4529ac6748614bb3374714e91</td><td>ijb_c</td><td>IJB-C</td><td><a href="papers/57178b36c21fd7f4529ac6748614bb3374714e91.html">IARPA Janus Benchmark - C: Face Dataset and Protocol</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411217', 'linkType': 'ieee'}">[pdf]</a></td><td>2018 International Conference on Biometrics (ICB)</td><td></td><td></td><td></td><td></td><td>33%</td><td>9</td><td>3</td><td>6</td><td>2</td><td>9</td><td>0</td></tr><tr><td>0cb2dd5f178e3a297a0c33068961018659d0f443</td><td>ijb_b</td><td>IJB-B</td><td><a href="papers/0cb2dd5f178e3a297a0c33068961018659d0f443.html">IARPA Janus Benchmark-B Face Dataset</a></td><td><a href="http://www.vislab.ucr.edu/Biometrics2017/program_slides/Noblis_CVPRW_IJBB.pdf">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td></td><td></td><td></td><td></td><td>24%</td><td>25</td><td>6</td><td>19</td><td>6</td><td>21</td><td>3</td></tr><tr><td>0297448f3ed948e136bb06ceff10eccb34e5bb77</td><td>ilids_mcts</td><td></td><td><a href="papers/0297448f3ed948e136bb06ceff10eccb34e5bb77.html">Imagery Library for Intelligent Detection Systems (i-LIDS); A Standard for Testing Video Based Detection Systems</a></td><td><span class="gray">[pdf]</a></td><td>Proceedings 40th Annual 2006 International Carnahan Conference on Security Technology</td><td></td><td></td><td></td><td></td><td>22%</td><td>32</td><td>7</td><td>25</td><td>2</td><td>17</td><td>0</td></tr><tr><td>55c40cbcf49a0225e72d911d762c27bb1c2d14aa</td><td>ifad</td><td>IFAD</td><td><a href="papers/55c40cbcf49a0225e72d911d762c27bb1c2d14aa.html">Indian Face Age Database : A Database for Face Recognition with Age Variation</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/55c4/0cbcf49a0225e72d911d762c27bb1c2d14aa.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>50%</td><td>2</td><td>1</td><td>1</td><td>0</td><td>2</td><td>0</td></tr><tr><td>ca3e88d87e1344d076c964ea89d91a75c417f5ee</td><td>imfdb</td><td>IMFDB</td><td><a href="papers/ca3e88d87e1344d076c964ea89d91a75c417f5ee.html">Indian Movie Face Database: A benchmark for face recognition under wide variations</a></td><td><span class="gray">[pdf]</a></td><td>2013 Fourth National Conference on Computer Vision, Pattern Recognition, Image Processing and Graphics (NCVPRIPG)</td><td></td><td></td><td></td><td></td><td>60%</td><td>15</td><td>9</td><td>6</td><td>0</td><td>10</td><td>4</td></tr><tr><td>95f12d27c3b4914e0668a268360948bce92f7db3</td><td>helen</td><td>Helen</td><td><a href="papers/95f12d27c3b4914e0668a268360948bce92f7db3.html">Interactive Facial Feature Localization</a></td><td><a href="http://pdfs.semanticscholar.org/95f1/2d27c3b4914e0668a268360948bce92f7db3.pdf">[pdf]</a></td><td></td><td>edu</td><td>University of Illinois, Urbana-Champaign</td><td>40.11116745</td><td>-88.22587665</td><td>52%</td><td>339</td><td>177</td><td>162</td><td>27</td><td>208</td><td>100</td></tr><tr><td>4d423acc78273b75134e2afd1777ba6d3a398973</td><td>cmu_pie</td><td>CMU PIE</td><td><a href="papers/4d423acc78273b75134e2afd1777ba6d3a398973.html">International Conference on Automatic Face and Gesture Recognition The CMU Pose , Illumination , and Expression ( PIE ) Database</a></td><td><a href="http://pdfs.semanticscholar.org/4d42/3acc78273b75134e2afd1777ba6d3a398973.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>44%</td><td>742</td><td>330</td><td>412</td><td>61</td><td>410</td><td>232</td></tr><tr><td>ad01687649d95cd5b56d7399a9603c4b8e2217d7</td><td>mrp_drone</td><td>MRP Drone</td><td><a href="papers/ad01687649d95cd5b56d7399a9603c4b8e2217d7.html">Investigating Open-World Person Re-identi cation Using a Drone</a></td><td><a href="http://pdfs.semanticscholar.org/ad01/687649d95cd5b56d7399a9603c4b8e2217d7.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>40%</td><td>5</td><td>2</td><td>3</td><td>0</td><td>3</td><td>0</td></tr><tr><td>2f43b614607163abf41dfe5d17ef6749a1b61304</td><td>hrt_transgender</td><td>HRT Transgender</td><td><a href="papers/2f43b614607163abf41dfe5d17ef6749a1b61304.html">Investigating the Periocular-Based Face Recognition Across Gender Transformation</a></td><td><a href="https://doi.org/10.1109/TIFS.2014.2361479">[pdf]</a></td><td>IEEE Transactions on Information Forensics and Security</td><td>edu</td><td>University of North Carolina at Wilmington</td><td>34.22498270</td><td>-77.86907744</td><td>38%</td><td>13</td><td>5</td><td>3</td><td>0</td><td>3</td><td>3</td></tr><tr><td>066d71fcd997033dce4ca58df924397dfe0b5fd1</td><td>ifdb</td><td>IFDB</td><td><a href="papers/066d71fcd997033dce4ca58df924397dfe0b5fd1.html">Iranian Face Database and Evaluation with a New Detection Algorithm</a></td><td><a href="http://pdfs.semanticscholar.org/066d/71fcd997033dce4ca58df924397dfe0b5fd1.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>b71d1aa90dcbe3638888725314c0d56640c1fef1</td><td>ifdb</td><td>IFDB</td><td><a href="papers/b71d1aa90dcbe3638888725314c0d56640c1fef1.html">Iranian Face Database with age, pose and expression</a></td><td><span class="gray">[pdf]</a></td><td>2007 International Conference on Machine Vision</td><td></td><td></td><td></td><td></td><td>20%</td><td>20</td><td>4</td><td>16</td><td>2</td><td>11</td><td>3</td></tr><tr><td>137aa2f891d474fce1e7a1d1e9b3aefe21e22b34</td><td>hrt_transgender</td><td>HRT Transgender</td><td><a href="papers/137aa2f891d474fce1e7a1d1e9b3aefe21e22b34.html">Is the eye region more reliable than the face? A preliminary study of face-based recognition on a transgender dataset</a></td><td><a href="https://doi.org/10.1109/BTAS.2013.6712710">[pdf]</a></td><td>2013 IEEE Sixth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td>edu</td><td>University of North Carolina Wilmington</td><td>34.23755810</td><td>-77.92701290</td><td>43%</td><td>7</td><td>3</td><td>4</td><td>1</td><td>2</td><td>3</td></tr><tr><td>0b440695c822a8e35184fb2f60dcdaa8a6de84ae</td><td>kinectface</td><td>KinectFaceDB</td><td><a href="papers/0b440695c822a8e35184fb2f60dcdaa8a6de84ae.html">KinectFaceDB: A Kinect Database for Face Recognition</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6866883', 'linkType': 'ieee'}">[pdf]</a></td><td>IEEE Transactions on Systems, Man, and Cybernetics: Systems</td><td></td><td></td><td></td><td></td><td>16%</td><td>75</td><td>12</td><td>63</td><td>6</td><td>25</td><td>8</td></tr><tr><td>4793f11fbca4a7dba898b9fff68f70d868e2497c</td><td>kin_face</td><td>UB KinFace</td><td><a href="papers/4793f11fbca4a7dba898b9fff68f70d868e2497c.html">Kinship Verification through Transfer Learning</a></td><td><a href="http://pdfs.semanticscholar.org/4793/f11fbca4a7dba898b9fff68f70d868e2497c.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>14%</td><td>66</td><td>9</td><td>39</td><td>2</td><td>18</td><td>5</td></tr><tr><td>2d3482dcff69c7417c7b933f22de606a0e8e42d4</td><td>lfw</td><td>LFW</td><td><a href="papers/2d3482dcff69c7417c7b933f22de606a0e8e42d4.html">Labeled Faces in the Wild : Updates and New Reporting Procedures</a></td><td><a href="http://pdfs.semanticscholar.org/2d34/82dcff69c7417c7b933f22de606a0e8e42d4.pdf">[pdf]</a></td><td></td><td>edu</td><td>University of Massachusetts</td><td>42.38897850</td><td>-72.52869870</td><td>41%</td><td>116</td><td>47</td><td>46</td><td>4</td><td>62</td><td>19</td></tr><tr><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td><td>3dddb_unconstrained</td><td>3D Dynamic</td><td><a href="papers/370b5757a5379b15e30d619e4d3fb9e8e13f3256.html">Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments</a></td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>999</td><td>472</td><td>526</td><td>71</td><td>619</td><td>260</td></tr><tr><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td><td>ar_facedb</td><td>AR Face</td><td><a href="papers/370b5757a5379b15e30d619e4d3fb9e8e13f3256.html">Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments</a></td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>999</td><td>472</td><td>526</td><td>71</td><td>619</td><td>260</td></tr><tr><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td><td>lfw</td><td>LFW</td><td><a href="papers/370b5757a5379b15e30d619e4d3fb9e8e13f3256.html">Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments</a></td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>999</td><td>472</td><td>526</td><td>71</td><td>619</td><td>260</td></tr><tr><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td><td>m2vtsdb_extended</td><td>xm2vtsdb</td><td><a href="papers/370b5757a5379b15e30d619e4d3fb9e8e13f3256.html">Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments</a></td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>999</td><td>472</td><td>526</td><td>71</td><td>619</td><td>260</td></tr><tr><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td><td>put_face</td><td>Put Face</td><td><a href="papers/370b5757a5379b15e30d619e4d3fb9e8e13f3256.html">Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments</a></td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>999</td><td>472</td><td>526</td><td>71</td><td>619</td><td>260</td></tr><tr><td>7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22</td><td>lfw</td><td>LFW</td><td><a href="papers/7de6e81d775e9cd7becbfd1bd685f4e2a5eebb22.html">Labeled Faces in the Wild: A Survey</a></td><td><a href="http://pdfs.semanticscholar.org/7de6/e81d775e9cd7becbfd1bd685f4e2a5eebb22.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>29%</td><td>99</td><td>29</td><td>70</td><td>9</td><td>63</td><td>12</td></tr><tr><td>0d2dd4fc016cb6a517d8fb43a7cc3ff62964832e</td><td>lag</td><td>LAG</td><td><a href="papers/0d2dd4fc016cb6a517d8fb43a7cc3ff62964832e.html">Large Age-Gap face verification by feature injection in deep networks</a></td><td><a href="http://pdfs.semanticscholar.org/0d2d/d4fc016cb6a517d8fb43a7cc3ff62964832e.pdf">[pdf]</a></td><td>Pattern Recognition Letters</td><td></td><td></td><td></td><td></td><td>29%</td><td>7</td><td>2</td><td>5</td><td>0</td><td>3</td><td>2</td></tr><tr><td>f3b84a03985de3890b400b68e2a92c0a00afd9d0</td><td>scface</td><td>SCface</td><td><a href="papers/f3b84a03985de3890b400b68e2a92c0a00afd9d0.html">Large Variability Surveillance Camera Face Database</a></td><td><span class="gray">[pdf]</a></td><td>2015 Seventh International Conference on Computational Intelligence, Modelling and Simulation (CIMSim)</td><td></td><td></td><td></td><td></td><td>0%</td><td>1</td><td>0</td><td>1</td><td>0</td><td>0</td><td>0</td></tr><tr><td>07fcbae86f7a3ad3ea1cf95178459ee9eaf77cb1</td><td>uccs</td><td>UCCS</td><td><a href="papers/07fcbae86f7a3ad3ea1cf95178459ee9eaf77cb1.html">Large scale unconstrained open set face database</a></td><td><a href="http://www.vast.uccs.edu/~tboult/PAPERS/BTAS13-Sapkota-Boult-UCCSFaceDB.pdf">[pdf]</a></td><td>2013 IEEE Sixth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td>edu</td><td>University of Colorado at Colorado Springs</td><td>38.89646790</td><td>-104.80505940</td><td>60%</td><td>5</td><td>3</td><td>2</td><td>0</td><td>3</td><td>0</td></tr><tr><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td><td>mafl</td><td>MAFL</td><td><a href="papers/a0fd85b3400c7b3e11122f44dc5870ae2de9009a.html">Learning Deep Representation for Face Alignment with Auxiliary Attributes</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1408.3967.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>33%</td><td>105</td><td>35</td><td>50</td><td>8</td><td>55</td><td>16</td></tr><tr><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td><td>mtfl</td><td>MTFL</td><td><a href="papers/a0fd85b3400c7b3e11122f44dc5870ae2de9009a.html">Learning Deep Representation for Face Alignment with Auxiliary Attributes</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1408.3967.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>33%</td><td>105</td><td>35</td><td>50</td><td>8</td><td>55</td><td>16</td></tr><tr><td>69a68f9cf874c69e2232f47808016c2736b90c35</td><td>celeba_plus</td><td>CelebFaces+</td><td><a href="papers/69a68f9cf874c69e2232f47808016c2736b90c35.html">Learning Deep Representation for Imbalanced Classification</a></td><td><a href="http://personal.ie.cuhk.edu.hk/~ccloy/files/cvpr_2016_imbalanced.pdf">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>Shenzhen Institutes of Advanced Technology</td><td>22.59805605</td><td>113.98533784</td><td>33%</td><td>51</td><td>17</td><td>34</td><td>1</td><td>39</td><td>2</td></tr><tr><td>853bd61bc48a431b9b1c7cab10c603830c488e39</td><td>casia_webface</td><td>CASIA Webface</td><td><a href="papers/853bd61bc48a431b9b1c7cab10c603830c488e39.html">Learning Face Representation from Scratch</a></td><td><a href="http://pdfs.semanticscholar.org/b8a2/0ed7e74325da76d7183d1ab77b082a92b447.pdf">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>53%</td><td>436</td><td>233</td><td>203</td><td>32</td><td>284</td><td>115</td></tr><tr><td>2a171f8d14b6b8735001a11c217af9587d095848</td><td>expw</td><td>ExpW</td><td><a href="papers/2a171f8d14b6b8735001a11c217af9587d095848.html">Learning Social Relation Traits from Face Images</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.414">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>30%</td><td>20</td><td>6</td><td>14</td><td>5</td><td>15</td><td>0</td></tr><tr><td>2a171f8d14b6b8735001a11c217af9587d095848</td><td>social_relation</td><td>Social Relation</td><td><a href="papers/2a171f8d14b6b8735001a11c217af9587d095848.html">Learning Social Relation Traits from Face Images</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.414">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>30%</td><td>20</td><td>6</td><td>14</td><td>5</td><td>15</td><td>0</td></tr><tr><td>4e4746094bf60ee83e40d8597a6191e463b57f76</td><td>leeds_sports_pose_extended</td><td>Leeds Sports Pose Extended</td><td><a href="papers/4e4746094bf60ee83e40d8597a6191e463b57f76.html">Learning effective human pose estimation from inaccurate annotation</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995318', 'linkType': 'ieee'}">[pdf]</a></td><td>CVPR 2011</td><td></td><td></td><td></td><td></td><td>40%</td><td>173</td><td>70</td><td>103</td><td>9</td><td>116</td><td>2</td></tr><tr><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td><td>stickmen_buffy</td><td>Buffy Stickmen</td><td><a href="papers/6dd0597f8513dc100cd0bc1b493768cde45098a9.html">Learning to parse images of articulated bodies</a></td><td><a href="http://pdfs.semanticscholar.org/9cd7/4c43dbf9be0b9caae4606ee53e6d45850471.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>31%</td><td>373</td><td>117</td><td>256</td><td>30</td><td>238</td><td>2</td></tr><tr><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td><a href="papers/6dd0597f8513dc100cd0bc1b493768cde45098a9.html">Learning to parse images of articulated bodies</a></td><td><a href="http://pdfs.semanticscholar.org/9cd7/4c43dbf9be0b9caae4606ee53e6d45850471.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>31%</td><td>373</td><td>117</td><td>256</td><td>30</td><td>238</td><td>2</td></tr><tr><td>c9bda86e23cab9e4f15ea0c4cbb6cc02b9dfb709</td><td>stanford_drone</td><td>Stanford Drone</td><td><a href="papers/c9bda86e23cab9e4f15ea0c4cbb6cc02b9dfb709.html">Learning to predict human behaviour in crowded scenes</a></td><td><a href="http://pdfs.semanticscholar.org/c9bd/a86e23cab9e4f15ea0c4cbb6cc02b9dfb709.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>20%</td><td>5</td><td>1</td><td>4</td><td>1</td><td>5</td><td>0</td></tr><tr><td>28d4e027c7e90b51b7d8908fce68128d1964668a</td><td>megaface</td><td>MegaFace</td><td><a href="papers/28d4e027c7e90b51b7d8908fce68128d1964668a.html">Level Playing Field for Million Scale Face Recognition</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1705.00393.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>41%</td><td>27</td><td>11</td><td>16</td><td>2</td><td>22</td><td>4</td></tr><tr><td>46a01565e6afe7c074affb752e7069ee3bf2e4ef</td><td>sdu_vid</td><td>SDU-VID</td><td><a href="papers/46a01565e6afe7c074affb752e7069ee3bf2e4ef.html">Local Descriptors Encoded by Fisher Vectors for Person Re-identification</a></td><td><a href="http://pdfs.semanticscholar.org/dd1d/51c3a59cb71cbfe1433ebeb4d973f7f9ddc1.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>22%</td><td>193</td><td>42</td><td>133</td><td>15</td><td>101</td><td>2</td></tr><tr><td>140438a77a771a8fb656b39a78ff488066eb6b50</td><td>lfw_p</td><td>LFWP</td><td><a href="papers/140438a77a771a8fb656b39a78ff488066eb6b50.html">Localizing Parts of Faces Using a Consensus of Exemplars</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2011.5995602">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td>edu</td><td>Columbia University</td><td>40.84198360</td><td>-73.94368971</td><td>53%</td><td>521</td><td>274</td><td>247</td><td>40</td><td>321</td><td>144</td></tr><tr><td>38b55d95189c5e69cf4ab45098a48fba407609b4</td><td>cuhk02</td><td>CUHK02</td><td><a href="papers/38b55d95189c5e69cf4ab45098a48fba407609b4.html">Locally Aligned Feature Transforms across Views</a></td><td><a href="http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_CVPR2013/data/Papers/4989d594.pdf">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>24%</td><td>242</td><td>57</td><td>185</td><td>17</td><td>139</td><td>1</td></tr><tr><td>c0387e788a52f10bf35d4d50659cfa515d89fbec</td><td>mars</td><td>MARS</td><td><a href="papers/c0387e788a52f10bf35d4d50659cfa515d89fbec.html">MARS: A Video Benchmark for Large-Scale Person Re-Identification</a></td><td><a href="http://pdfs.semanticscholar.org/c038/7e788a52f10bf35d4d50659cfa515d89fbec.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>34%</td><td>146</td><td>49</td><td>97</td><td>6</td><td>96</td><td>0</td></tr><tr><td>9055b155cbabdce3b98e16e5ac9c0edf00f9552f</td><td>morph</td><td>MORPH Commercial</td><td><a href="papers/9055b155cbabdce3b98e16e5ac9c0edf00f9552f.html">MORPH: a longitudinal image database of normal adult age-progression</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/FGR.2006.78">[pdf]</a></td><td>7th International Conference on Automatic Face and Gesture Recognition (FGR06)</td><td></td><td></td><td></td><td></td><td>46%</td><td>424</td><td>195</td><td>229</td><td>27</td><td>231</td><td>155</td></tr><tr><td>9055b155cbabdce3b98e16e5ac9c0edf00f9552f</td><td>morph_nc</td><td>MORPH Non-Commercial</td><td><a href="papers/9055b155cbabdce3b98e16e5ac9c0edf00f9552f.html">MORPH: a longitudinal image database of normal adult age-progression</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/FGR.2006.78">[pdf]</a></td><td>7th International Conference on Automatic Face and Gesture Recognition (FGR06)</td><td></td><td></td><td></td><td></td><td>46%</td><td>424</td><td>195</td><td>229</td><td>27</td><td>231</td><td>155</td></tr><tr><td>291265db88023e92bb8c8e6390438e5da148e8f5</td><td>msceleb</td><td>MsCeleb</td><td><a href="papers/291265db88023e92bb8c8e6390438e5da148e8f5.html">MS-Celeb-1M: A Dataset and Benchmark for Large-Scale Face Recognition</a></td><td><a href="http://pdfs.semanticscholar.org/4603/cb8e05258bb0572ae912ad20903b8f99f4b1.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>167</td><td>83</td><td>84</td><td>15</td><td>131</td><td>27</td></tr><tr><td>3dc3f0b64ef80f573e3a5f96e456e52ee980b877</td><td>georgia_tech_face_database</td><td>Georgia Tech Face</td><td><a href="papers/3dc3f0b64ef80f573e3a5f96e456e52ee980b877.html">Maximum Likelihood Training of the Embedded HMM for Face Detection and Recognition</a></td><td><a href="http://pdfs.semanticscholar.org/3dc3/f0b64ef80f573e3a5f96e456e52ee980b877.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>3</td><td>0</td><td>3</td><td>0</td><td>2</td><td>0</td></tr><tr><td>5753b2b5e442eaa3be066daa4a2ca8d8a0bb1725</td><td>50_people_one_question</td><td>50 People One Question</td><td><a href="papers/5753b2b5e442eaa3be066daa4a2ca8d8a0bb1725.html">Merging Pose Estimates Across Space and Time</a></td><td><a href="http://pdfs.semanticscholar.org/63b2/f5348af0f969dfc2afb4977732393c6459ec.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>15</td><td>9</td><td>6</td><td>0</td><td>11</td><td>2</td></tr><tr><td>696ca58d93f6404fea0fc75c62d1d7b378f47628</td><td>coco</td><td>COCO</td><td><a href="papers/696ca58d93f6404fea0fc75c62d1d7b378f47628.html">Microsoft COCO Captions: Data Collection and Evaluation Server</a></td><td><a href="http://pdfs.semanticscholar.org/ba95/81c33a7eebe87c50e61763e4c8d1723538f2.pdf">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>46%</td><td>283</td><td>129</td><td>154</td><td>16</td><td>231</td><td>4</td></tr><tr><td>a5a44a32a91474f00a3cda671a802e87c899fbb4</td><td>moments_in_time</td><td>Moments in Time</td><td><a href="papers/a5a44a32a91474f00a3cda671a802e87c899fbb4.html">Moments in Time Dataset: one million videos for event understanding</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1801.03150.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>60%</td><td>25</td><td>15</td><td>10</td><td>2</td><td>25</td><td>0</td></tr><tr><td>436f798d1a4e54e5947c1e7d7375c31b2bdb4064</td><td>tud_multiview</td><td>TUD-Multiview</td><td><a href="papers/436f798d1a4e54e5947c1e7d7375c31b2bdb4064.html">Monocular 3D pose estimation and tracking by detection</a></td><td><a href="http://lmb.informatik.uni-freiburg.de/lectures/seminar_brox/seminar_ws1011/cvpr10_andriluka.pdf">[pdf]</a></td><td>2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>25%</td><td>302</td><td>76</td><td>226</td><td>32</td><td>199</td><td>1</td></tr><tr><td>436f798d1a4e54e5947c1e7d7375c31b2bdb4064</td><td>tud_stadtmitte</td><td>TUD-Stadtmitte</td><td><a href="papers/436f798d1a4e54e5947c1e7d7375c31b2bdb4064.html">Monocular 3D pose estimation and tracking by detection</a></td><td><a href="http://lmb.informatik.uni-freiburg.de/lectures/seminar_brox/seminar_ws1011/cvpr10_andriluka.pdf">[pdf]</a></td><td>2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>25%</td><td>302</td><td>76</td><td>226</td><td>32</td><td>199</td><td>1</td></tr><tr><td>3b5b6d19d4733ab606c39c69a889f9e67967f151</td><td>qmul_grid</td><td>GRID</td><td><a href="papers/3b5b6d19d4733ab606c39c69a889f9e67967f151.html">Multi-camera activity correlation analysis</a></td><td><a href="http://vision.lbl.gov/Conferences/cvpr/Papers/data/papers/0163.pdf">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>25%</td><td>138</td><td>35</td><td>103</td><td>8</td><td>76</td><td>1</td></tr><tr><td>6ad5a38df8dd4cdddd74f31996ce096d41219f72</td><td>tud_brussels</td><td>TUD-Brussels</td><td><a href="papers/6ad5a38df8dd4cdddd74f31996ce096d41219f72.html">Multi-cue onboard pedestrian detection</a></td><td><a href="https://www.mpi-inf.mpg.de/fileadmin/inf/d2/wojek/poster_cwojek_cvpr09.pdf">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>19%</td><td>217</td><td>41</td><td>176</td><td>14</td><td>131</td><td>1</td></tr><tr><td>6ad5a38df8dd4cdddd74f31996ce096d41219f72</td><td>tud_motionpairs</td><td>TUD-Motionparis</td><td><a href="papers/6ad5a38df8dd4cdddd74f31996ce096d41219f72.html">Multi-cue onboard pedestrian detection</a></td><td><a href="https://www.mpi-inf.mpg.de/fileadmin/inf/d2/wojek/poster_cwojek_cvpr09.pdf">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>19%</td><td>217</td><td>41</td><td>176</td><td>14</td><td>131</td><td>1</td></tr><tr><td>32c801cb7fbeb742edfd94cccfca4934baec71da</td><td>ucf_crowd</td><td>UCF-CC-50</td><td><a href="papers/32c801cb7fbeb742edfd94cccfca4934baec71da.html">Multi-source Multi-scale Counting in Extremely Dense Crowd Images</a></td><td><a href="http://www.cs.ucf.edu/~haroon/datafiles/Idrees_Counting_CVPR_2013.pdf">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>38%</td><td>125</td><td>48</td><td>77</td><td>6</td><td>72</td><td>1</td></tr><tr><td>1e3df3ca8feab0b36fd293fe689f93bb2aaac591</td><td>immediacy</td><td>Immediacy</td><td><a href="papers/1e3df3ca8feab0b36fd293fe689f93bb2aaac591.html">Multi-task Recurrent Neural Network for Immediacy Prediction</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.383">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td>60%</td><td>25</td><td>15</td><td>10</td><td>2</td><td>20</td><td>0</td></tr><tr><td>53ae38a6bb2b21b42bac4f0c4c8ed1f9fa02f9d4</td><td>bp4d_plus</td><td>BP4D+</td><td><a href="papers/53ae38a6bb2b21b42bac4f0c4c8ed1f9fa02f9d4.html">Multimodal Spontaneous Emotion Corpus for Human Behavior Analysis</a></td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Zhang_Multimodal_Spontaneous_Emotion_CVPR_2016_paper.pdf">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>25%</td><td>40</td><td>10</td><td>30</td><td>0</td><td>20</td><td>6</td></tr><tr><td>2fda164863a06a92d3a910b96eef927269aeb730</td><td>names_and_faces_news</td><td>News Dataset</td><td><a href="papers/2fda164863a06a92d3a910b96eef927269aeb730.html">Names and faces in the news</a></td><td><a href="http://www.cs.utexas.edu/~grauman/courses/spring2007/395T/papers/berg_names_and_faces.pdf">[pdf]</a></td><td>Proceedings of the 2004 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2004. CVPR 2004.</td><td></td><td></td><td></td><td></td><td>41%</td><td>294</td><td>120</td><td>174</td><td>24</td><td>207</td><td>45</td></tr><tr><td>4156b7e88f2e0ab0a7c095b9bab199ae2b23bd06</td><td>distance_nighttime</td><td>Long Distance Heterogeneous Face</td><td><a href="papers/4156b7e88f2e0ab0a7c095b9bab199ae2b23bd06.html">Nighttime Face Recognition at Long Distance: Cross-Distance and Cross-Spectral Matching</a></td><td><a href="http://pdfs.semanticscholar.org/4156/b7e88f2e0ab0a7c095b9bab199ae2b23bd06.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>24%</td><td>21</td><td>5</td><td>16</td><td>3</td><td>11</td><td>1</td></tr><tr><td>31b58ced31f22eab10bd3ee2d9174e7c14c27c01</td><td>tiny_images</td><td>Tiny Images</td><td><a href="papers/31b58ced31f22eab10bd3ee2d9174e7c14c27c01.html">Nonparametric Object and Scene Recognition</a></td><td><a href="http://pdfs.semanticscholar.org/31b5/8ced31f22eab10bd3ee2d9174e7c14c27c01.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>31%</td><td>999</td><td>305</td><td>694</td><td>93</td><td>670</td><td>9</td></tr><tr><td>55206f0b5f57ce17358999145506cd01e570358c</td><td>orl</td><td>ORL</td><td><a href="papers/55206f0b5f57ce17358999145506cd01e570358c.html">O M 4 . 1 The Subject Database 4 . 2 Experiment Plan 5 . 1 Varying the Overlap 4 Experimental Setup 5 Parameterisation Results</a></td><td><a href="http://pdfs.semanticscholar.org/5520/6f0b5f57ce17358999145506cd01e570358c.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>21%</td><td>999</td><td>214</td><td>785</td><td>96</td><td>550</td><td>57</td></tr><tr><td>3394168ff0719b03ff65bcea35336a76b21fe5e4</td><td>penn_fudan</td><td>Penn Fudan</td><td><a href="papers/3394168ff0719b03ff65bcea35336a76b21fe5e4.html">Object Detection Combining Recognition and Segmentation</a></td><td><a href="http://pdfs.semanticscholar.org/f531/a554cade14b9b340de6730683a28c292dd74.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>23%</td><td>101</td><td>23</td><td>78</td><td>11</td><td>58</td><td>0</td></tr><tr><td>4f93cd09785c6e77bf4bc5a788e079df524c8d21</td><td>soton</td><td>SOTON HiD</td><td><a href="papers/4f93cd09785c6e77bf4bc5a788e079df524c8d21.html">On a large sequence-based human gait database</a></td><td><a href="http://pdfs.semanticscholar.org/4f93/cd09785c6e77bf4bc5a788e079df524c8d21.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>36%</td><td>148</td><td>54</td><td>94</td><td>16</td><td>98</td><td>0</td></tr><tr><td>6618cff7f2ed440a0d2fa9e74ad5469df5cdbe4c</td><td>afad</td><td>AFAD</td><td><a href="papers/6618cff7f2ed440a0d2fa9e74ad5469df5cdbe4c.html">Ordinal Regression with Multiple Output CNN for Age Estimation</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.532">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>44%</td><td>68</td><td>30</td><td>38</td><td>8</td><td>49</td><td>7</td></tr><tr><td>a7fe834a0af614ce6b50dc093132b031dd9a856b</td><td>market1203</td><td>Market 1203</td><td><a href="papers/a7fe834a0af614ce6b50dc093132b031dd9a856b.html">Orientation Driven Bag of Appearances for Person Re-identification</a></td><td><a href="http://pdfs.semanticscholar.org/a7fe/834a0af614ce6b50dc093132b031dd9a856b.pdf">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>29%</td><td>7</td><td>2</td><td>5</td><td>0</td><td>3</td><td>0</td></tr><tr><td>a7fe834a0af614ce6b50dc093132b031dd9a856b</td><td>pku_reid</td><td>PKU-Reid</td><td><a href="papers/a7fe834a0af614ce6b50dc093132b031dd9a856b.html">Orientation Driven Bag of Appearances for Person Re-identification</a></td><td><a href="http://pdfs.semanticscholar.org/a7fe/834a0af614ce6b50dc093132b031dd9a856b.pdf">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>29%</td><td>7</td><td>2</td><td>5</td><td>0</td><td>3</td><td>0</td></tr><tr><td>18ae7c9a4bbc832b8b14bc4122070d7939f5e00e</td><td>frgc</td><td>FRGC</td><td><a href="papers/18ae7c9a4bbc832b8b14bc4122070d7939f5e00e.html">Overview of the face recognition grand challenge</a></td><td><a href="http://www3.nd.edu/~kwb/PhillipsEtAlCVPR_2005.pdf">[pdf]</a></td><td>2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)</td><td></td><td></td><td></td><td></td><td>25%</td><td>999</td><td>253</td><td>746</td><td>110</td><td>572</td><td>64</td></tr><tr><td>22909dd19a0ec3b6065334cb5be5392cb24d839d</td><td>pets</td><td>PETS 2017</td><td><a href="papers/22909dd19a0ec3b6065334cb5be5392cb24d839d.html">PETS 2017: Dataset and Challenge</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014998', 'linkType': 'ieee'}">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td></td><td></td><td></td><td></td><td>0%</td><td>8</td><td>0</td><td>8</td><td>0</td><td>2</td><td>0</td></tr><tr><td>56ffa7d906b08d02d6d5a12c7377a57e24ef3391</td><td>unbc_shoulder_pain</td><td>UNBC-McMaster Pain</td><td><a href="papers/56ffa7d906b08d02d6d5a12c7377a57e24ef3391.html">Painful data: The UNBC-McMaster shoulder pain expression archive database</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5771462', 'linkType': 'ieee'}">[pdf]</a></td><td>Face and Gesture 2011</td><td></td><td></td><td></td><td></td><td>32%</td><td>184</td><td>58</td><td>126</td><td>23</td><td>112</td><td>23</td></tr><tr><td>0486214fb58ee9a04edfe7d6a74c6d0f661a7668</td><td>chokepoint</td><td>ChokePoint</td><td><a href="papers/0486214fb58ee9a04edfe7d6a74c6d0f661a7668.html">Patch-based probabilistic image quality assessment for face selection and improved video-based face recognition</a></td><td><a href="http://conradsanderson.id.au/pdfs/wong_face_selection_cvpr_biometrics_2011.pdf">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td>edu</td><td>University of Queensland</td><td>-27.49741805</td><td>153.01316956</td><td>30%</td><td>128</td><td>39</td><td>89</td><td>6</td><td>68</td><td>14</td></tr><tr><td>488e475eeb3bb39a145f23ede197cd3620f1d98a</td><td>apis</td><td>APiS1.0</td><td><a href="papers/488e475eeb3bb39a145f23ede197cd3620f1d98a.html">Pedestrian Attribute Classification in Surveillance: Database and Evaluation</a></td><td><a href="http://www.cv-foundation.org/openaccess/content_iccv_workshops_2013/W10/papers/Zhu_Pedestrian_Attribute_Classification_2013_ICCV_paper.pdf">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision Workshops</td><td></td><td></td><td></td><td></td><td>38%</td><td>26</td><td>10</td><td>16</td><td>1</td><td>13</td><td>2</td></tr><tr><td>488e475eeb3bb39a145f23ede197cd3620f1d98a</td><td>svs</td><td>SVS</td><td><a href="papers/488e475eeb3bb39a145f23ede197cd3620f1d98a.html">Pedestrian Attribute Classification in Surveillance: Database and Evaluation</a></td><td><a href="http://www.cv-foundation.org/openaccess/content_iccv_workshops_2013/W10/papers/Zhu_Pedestrian_Attribute_Classification_2013_ICCV_paper.pdf">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision Workshops</td><td></td><td></td><td></td><td></td><td>38%</td><td>26</td><td>10</td><td>16</td><td>1</td><td>13</td><td>2</td></tr><tr><td>2a4bbee0b4cf52d5aadbbc662164f7efba89566c</td><td>peta</td><td>PETA</td><td><a href="papers/2a4bbee0b4cf52d5aadbbc662164f7efba89566c.html">Pedestrian Attribute Recognition At Far Distance</a></td><td><a href="http://personal.ie.cuhk.edu.hk/~pluo/pdf/mm14.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>46%</td><td>80</td><td>37</td><td>43</td><td>2</td><td>51</td><td>3</td></tr><tr><td>f72f6a45ee240cc99296a287ff725aaa7e7ebb35</td><td>caltech_pedestrians</td><td>Caltech Pedestrians</td><td><a href="papers/f72f6a45ee240cc99296a287ff725aaa7e7ebb35.html">Pedestrian Detection: An Evaluation of the State of the Art</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5975165', 'linkType': 'ieee'}">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>10%</td><td>999</td><td>96</td><td>355</td><td>26</td><td>252</td><td>5</td></tr><tr><td>1dc35905a1deff8bc74688f2d7e2f48fd2273275</td><td>caltech_pedestrians</td><td>Caltech Pedestrians</td><td><a href="papers/1dc35905a1deff8bc74688f2d7e2f48fd2273275.html">Pedestrian detection: A benchmark</a></td><td><a href="http://vision.ucsd.edu/~pdollar/files/papers/DollarCVPR09peds.pdf">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>17%</td><td>519</td><td>89</td><td>430</td><td>27</td><td>286</td><td>2</td></tr><tr><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td><td>tud_campus</td><td>TUD-Campus</td><td><a href="papers/3316521a5527c7700af8ae6aef32a79a8b83672c.html">People-tracking-by-detection and people-detection-by-tracking</a></td><td><a href="http://mplab.ucsd.edu/wp-content/uploads/CVPR2008/Conference/data/papers/243.pdf">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>22%</td><td>529</td><td>116</td><td>413</td><td>41</td><td>316</td><td>1</td></tr><tr><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td><td>tud_crossing</td><td>TUD-Crossing</td><td><a href="papers/3316521a5527c7700af8ae6aef32a79a8b83672c.html">People-tracking-by-detection and people-detection-by-tracking</a></td><td><a href="http://mplab.ucsd.edu/wp-content/uploads/CVPR2008/Conference/data/papers/243.pdf">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>22%</td><td>529</td><td>116</td><td>413</td><td>41</td><td>316</td><td>1</td></tr><tr><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td><td>tud_pedestrian</td><td>TUD-Pedestrian</td><td><a href="papers/3316521a5527c7700af8ae6aef32a79a8b83672c.html">People-tracking-by-detection and people-detection-by-tracking</a></td><td><a href="http://mplab.ucsd.edu/wp-content/uploads/CVPR2008/Conference/data/papers/243.pdf">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>22%</td><td>529</td><td>116</td><td>413</td><td>41</td><td>316</td><td>1</td></tr><tr><td>27a2fad58dd8727e280f97036e0d2bc55ef5424c</td><td>duke_mtmc</td><td>Duke MTMC</td><td><a href="papers/27a2fad58dd8727e280f97036e0d2bc55ef5424c.html">Performance Measures and a Data Set for Multi-target, Multi-camera Tracking</a></td><td><a href="http://pdfs.semanticscholar.org/b5f2/4f49f9a5e47d6601399dc068158ad88d7651.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>136</td><td>58</td><td>78</td><td>6</td><td>107</td><td>0</td></tr><tr><td>27a2fad58dd8727e280f97036e0d2bc55ef5424c</td><td>mot</td><td>MOT</td><td><a href="papers/27a2fad58dd8727e280f97036e0d2bc55ef5424c.html">Performance Measures and a Data Set for Multi-target, Multi-camera Tracking</a></td><td><a href="http://pdfs.semanticscholar.org/b5f2/4f49f9a5e47d6601399dc068158ad88d7651.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>136</td><td>58</td><td>78</td><td>6</td><td>107</td><td>0</td></tr><tr><td>16c7c31a7553d99f1837fc6e88e77b5ccbb346b8</td><td>prid</td><td>PRID</td><td><a href="papers/16c7c31a7553d99f1837fc6e88e77b5ccbb346b8.html">Person Re-identification by Descriptive and Discriminative Classification</a></td><td><a href="http://pdfs.semanticscholar.org/4c1b/f0592be3e535faf256c95e27982db9b3d3d3.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>27%</td><td>352</td><td>94</td><td>258</td><td>26</td><td>195</td><td>3</td></tr><tr><td>99eb4cea0d9bc9fe777a5c5172f8638a37a7f262</td><td>ilids_vid_reid</td><td>iLIDS-VID</td><td><a href="papers/99eb4cea0d9bc9fe777a5c5172f8638a37a7f262.html">Person Re-identification by Exploiting Spatio-Temporal Cues and Multi-view Metric Learning</a></td><td><a href="https://doi.org/10.1109/LSP.2016.2574323">[pdf]</a></td><td>IEEE Signal Processing Letters</td><td></td><td></td><td></td><td></td><td>29%</td><td>7</td><td>2</td><td>5</td><td>0</td><td>4</td><td>0</td></tr><tr><td>98bb029afe2a1239c3fdab517323066f0957b81b</td><td>sdu_vid</td><td>SDU-VID</td><td><a href="papers/98bb029afe2a1239c3fdab517323066f0957b81b.html">Person Re-identification by Video Ranking</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>25%</td><td>196</td><td>49</td><td>124</td><td>11</td><td>98</td><td>1</td></tr><tr><td>0b84f07af44f964817675ad961def8a51406dd2e</td><td>prw</td><td>PRW</td><td><a href="papers/0b84f07af44f964817675ad961def8a51406dd2e.html">Person Re-identification in the Wild</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2017.357">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>University of Technology Sydney</td><td>-33.88096510</td><td>151.20107299</td><td>38%</td><td>65</td><td>25</td><td>40</td><td>1</td><td>46</td><td>0</td></tr><tr><td>ec792ad2433b6579f2566c932ee414111e194537</td><td>msmt_17</td><td>MSMT17</td><td><a href="papers/ec792ad2433b6579f2566c932ee414111e194537.html">Person Transfer GAN to Bridge Domain Gap for Person Re-Identification</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1711.08565.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>50%</td><td>14</td><td>7</td><td>7</td><td>1</td><td>11</td><td>0</td></tr><tr><td>1c2802c2199b6d15ecefe7ba0c39bfe44363de38</td><td>youtube_poses</td><td>YouTube Pose</td><td><a href="papers/1c2802c2199b6d15ecefe7ba0c39bfe44363de38.html">Personalizing Human Video Pose Estimation</a></td><td><a href="http://arxiv.org/pdf/1511.06676v1.pdf">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>44%</td><td>32</td><td>14</td><td>18</td><td>2</td><td>27</td><td>0</td></tr><tr><td>b92a1ed9622b8268ae3ac9090e25789fc41cc9b8</td><td>pornodb</td><td>Pornography DB</td><td><a href="papers/b92a1ed9622b8268ae3ac9090e25789fc41cc9b8.html">Pooling in image representation: The visual codeword point of view</a></td><td><a href="http://pdfs.semanticscholar.org/b92a/1ed9622b8268ae3ac9090e25789fc41cc9b8.pdf">[pdf]</a></td><td>Computer Vision and Image Understanding</td><td></td><td></td><td></td><td></td><td>9%</td><td>77</td><td>7</td><td>70</td><td>7</td><td>43</td><td>2</td></tr><tr><td>2830fb5282de23d7784b4b4bc37065d27839a412</td><td>h3d</td><td>H3D</td><td><a href="papers/2830fb5282de23d7784b4b4bc37065d27839a412.html">Poselets: Body part detectors trained using 3D human pose annotations</a></td><td><a href="http://vision.stanford.edu/teaching/cs231b_spring1213/papers/ICCV09_BourdevMalik.pdf">[pdf]</a></td><td>2009 IEEE 12th International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td>32%</td><td>707</td><td>223</td><td>484</td><td>62</td><td>482</td><td>18</td></tr><tr><td>3765df816dc5a061bc261e190acc8bdd9d47bec0</td><td>rafd</td><td>RaFD</td><td><a href="papers/3765df816dc5a061bc261e190acc8bdd9d47bec0.html">Presentation and validation of the Radboud Faces Database</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/3765/df816dc5a061bc261e190acc8bdd9d47bec0.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>28%</td><td>446</td><td>127</td><td>319</td><td>43</td><td>307</td><td>19</td></tr><tr><td>636b8ffc09b1b23ff714ac8350bb35635e49fa3c</td><td>caltech_10k_web_faces</td><td>Caltech 10K Web Faces</td><td><a href="papers/636b8ffc09b1b23ff714ac8350bb35635e49fa3c.html">Pruning training sets for learning of object categories</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1467308', 'linkType': 'ieee'}">[pdf]</a></td><td>2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)</td><td></td><td></td><td></td><td></td><td>58%</td><td>60</td><td>35</td><td>25</td><td>5</td><td>42</td><td>12</td></tr><tr><td>377f2b65e6a9300448bdccf678cde59449ecd337</td><td>ufdd</td><td>UFDD</td><td><a href="papers/377f2b65e6a9300448bdccf678cde59449ecd337.html">Pushing the Limits of Unconstrained Face Detection: a Challenge Dataset and Baseline Results</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1804.10275.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td>edu</td><td>Johns Hopkins University</td><td>39.32905300</td><td>-76.61942500</td><td>0%</td><td>2</td><td>0</td><td>2</td><td>0</td><td>2</td><td>0</td></tr><tr><td>140c95e53c619eac594d70f6369f518adfea12ef</td><td>ijb_a</td><td>IJB-A</td><td><a href="papers/140c95e53c619eac594d70f6369f518adfea12ef.html">Pushing the frontiers of unconstrained face detection and recognition: IARPA Janus Benchmark A</a></td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/1B_089_ext.pdf">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>48%</td><td>222</td><td>107</td><td>115</td><td>21</td><td>158</td><td>48</td></tr><tr><td>d80a3d1f3a438e02a6685e66ee908446766fefa9</td><td>megaage</td><td>MegaAge</td><td><a href="papers/d80a3d1f3a438e02a6685e66ee908446766fefa9.html">Quantifying Facial Age by Posterior of Age Comparisons</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1708.09687.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>25%</td><td>4</td><td>1</td><td>3</td><td>1</td><td>4</td><td>0</td></tr><tr><td>4946ba10a4d5a7d0a38372f23e6622bd347ae273</td><td>coco_action</td><td>COCO-a</td><td><a href="papers/4946ba10a4d5a7d0a38372f23e6622bd347ae273.html">RONCHI AND PERONA: DESCRIBING COMMON HUMAN VISUAL ACTIONS IN IMAGES 1 Describing Common Human Visual Actions in Images</a></td><td><a href="http://pdfs.semanticscholar.org/b38d/cf5fa5174c0d718d65cc4f3889b03c4a21df.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>26</td><td>14</td><td>12</td><td>0</td><td>25</td><td>0</td></tr><tr><td>922e0a51a3b8c67c4c6ac09a577ff674cbd28b34</td><td>v47</td><td>V47</td><td><a href="papers/922e0a51a3b8c67c4c6ac09a577ff674cbd28b34.html">Re-identification of pedestrians with variable occlusion and scale</a></td><td><a href="https://doi.org/10.1109/ICCVW.2011.6130477">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td>0%</td><td>10</td><td>0</td><td>10</td><td>2</td><td>6</td><td>0</td></tr><tr><td>6f3c76b7c0bd8e1d122c6ea808a271fd4749c951</td><td>ward</td><td>WARD</td><td><a href="papers/6f3c76b7c0bd8e1d122c6ea808a271fd4749c951.html">Re-identify people in wide area camera network</a></td><td><a href="https://doi.org/10.1109/CVPRW.2012.6239203">[pdf]</a></td><td>2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops</td><td></td><td></td><td></td><td></td><td>9%</td><td>55</td><td>5</td><td>50</td><td>2</td><td>35</td><td>0</td></tr><tr><td>54983972aafc8e149259d913524581357b0f91c3</td><td>reseed</td><td>ReSEED</td><td><a href="papers/54983972aafc8e149259d913524581357b0f91c3.html">ReSEED: social event dEtection dataset</a></td><td><a href="https://pub.uni-bielefeld.de/download/2663466/2686734">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>17%</td><td>6</td><td>1</td><td>5</td><td>1</td><td>1</td><td>1</td></tr><tr><td>65355cbb581a219bd7461d48b3afd115263ea760</td><td>complex_activities</td><td>Ongoing Complex Activities</td><td><a href="papers/65355cbb581a219bd7461d48b3afd115263ea760.html">Recognition of ongoing complex activities by sequence prediction over a hierarchical label space</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/WACV.2016.7477586">[pdf]</a></td><td>2016 IEEE Winter Conference on Applications of Computer Vision (WACV)</td><td></td><td></td><td></td><td></td><td>0%</td><td>2</td><td>0</td><td>2</td><td>0</td><td>2</td><td>0</td></tr><tr><td>e8de844fefd54541b71c9823416daa238be65546</td><td>visual_phrases</td><td>Phrasal Recognition</td><td><a href="papers/e8de844fefd54541b71c9823416daa238be65546.html">Recognition using visual phrases</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995711', 'linkType': 'ieee'}">[pdf]</a></td><td>CVPR 2011</td><td></td><td></td><td></td><td></td><td>41%</td><td>233</td><td>95</td><td>138</td><td>18</td><td>174</td><td>5</td></tr><tr><td>356b431d4f7a2a0a38cf971c84568207dcdbf189</td><td>wider</td><td>WIDER</td><td><a href="papers/356b431d4f7a2a0a38cf971c84568207dcdbf189.html">Recognize complex events from static images by fusing deep channels</a></td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Xiong_Recognize_Complex_Events_2015_CVPR_paper.pdf">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>Shenzhen Institutes of Advanced Technology</td><td>22.59805605</td><td>113.98533784</td><td>58%</td><td>45</td><td>26</td><td>19</td><td>1</td><td>30</td><td>12</td></tr><tr><td>25474c21613607f6bb7687a281d5f9d4ffa1f9f3</td><td>faceplace</td><td>Face Place</td><td><a href="papers/25474c21613607f6bb7687a281d5f9d4ffa1f9f3.html">Recognizing disguised faces</a></td><td><a href="http://pdfs.semanticscholar.org/d936/7ceb0be378c3a9ddf7cb741c678c1a3c574c.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>25%</td><td>24</td><td>6</td><td>18</td><td>0</td><td>16</td><td>1</td></tr><tr><td>4053e3423fb70ad9140ca89351df49675197196a</td><td>bio_id</td><td>BioID Face</td><td><a href="papers/4053e3423fb70ad9140ca89351df49675197196a.html">Robust Face Detection Using the Hausdorff Distance</a></td><td><a href="http://pdfs.semanticscholar.org/4053/e3423fb70ad9140ca89351df49675197196a.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>26%</td><td>498</td><td>127</td><td>371</td><td>55</td><td>319</td><td>32</td></tr><tr><td>2724ba85ec4a66de18da33925e537f3902f21249</td><td>cofw</td><td>COFW</td><td><a href="papers/2724ba85ec4a66de18da33925e537f3902f21249.html">Robust Face Landmark Estimation under Occlusion</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6751298', 'linkType': 'ieee'}">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td>55%</td><td>305</td><td>167</td><td>138</td><td>16</td><td>186</td><td>95</td></tr><tr><td>c570d1247e337f91e555c3be0e8c8a5aba539d9f</td><td>mcgill</td><td>McGill Real World</td><td><a href="papers/c570d1247e337f91e555c3be0e8c8a5aba539d9f.html">Robust semi-automatic head pose labeling for real-world face video sequences</a></td><td><a href="https://doi.org/10.1007/s11042-012-1352-1">[pdf]</a></td><td>Multimedia Tools and Applications</td><td>edu</td><td>McGill University</td><td>45.50397610</td><td>-73.57496870</td><td>28%</td><td>18</td><td>5</td><td>6</td><td>0</td><td>7</td><td>4</td></tr><tr><td>e27ef52c641c2b5100a1b34fd0b819e84a31b4df</td><td>sarc3d</td><td>Sarc3D</td><td><a href="papers/e27ef52c641c2b5100a1b34fd0b819e84a31b4df.html">SARC3D: A New 3D Body Model for People Tracking and Re-identification</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/e27e/f52c641c2b5100a1b34fd0b819e84a31b4df.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>14%</td><td>29</td><td>4</td><td>25</td><td>3</td><td>17</td><td>0</td></tr><tr><td>bd26dabab576adb6af30484183c9c9c8379bf2e0</td><td>scut_fbp</td><td>SCUT-FBP</td><td><a href="papers/bd26dabab576adb6af30484183c9c9c8379bf2e0.html">SCUT-FBP: A Benchmark Dataset for Facial Beauty Perception</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1511.02459.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2015 IEEE International Conference on Systems, Man, and Cybernetics</td><td>edu</td><td>South China University of Technology</td><td>23.05020420</td><td>113.39880323</td><td>43%</td><td>14</td><td>6</td><td>8</td><td>3</td><td>5</td><td>7</td></tr><tr><td>d3f5a1848b0028d8ab51d0b0673732cad2e3c8c9</td><td>stair_actions</td><td>STAIR Action</td><td><a href="papers/d3f5a1848b0028d8ab51d0b0673732cad2e3c8c9.html">STAIR Actions: A Video Dataset of Everyday Home Actions</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1804.04326.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>833fa04463d90aab4a9fe2870d480f0b40df446e</td><td>sun_attributes</td><td>SUN</td><td><a href="papers/833fa04463d90aab4a9fe2870d480f0b40df446e.html">SUN attribute database: Discovering, annotating, and recognizing scene attributes</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2012.6247998">[pdf]</a></td><td>2012 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>32%</td><td>260</td><td>84</td><td>85</td><td>18</td><td>143</td><td>10</td></tr><tr><td>4308bd8c28e37e2ed9a3fcfe74d5436cce34b410</td><td>market_1501</td><td>Market 1501</td><td><a href="papers/4308bd8c28e37e2ed9a3fcfe74d5436cce34b410.html">Scalable Person Re-identification: A Benchmark</a></td><td><a href="https://www.microsoft.com/en-us/research/wp-content/uploads/2017/01/ICCV15-ReIDDataset.pdf">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td>38%</td><td>394</td><td>149</td><td>245</td><td>18</td><td>271</td><td>3</td></tr><tr><td>9c23859ec7313f2e756a3e85575735e0c52249f4</td><td>facebook_100</td><td>Facebook100</td><td><a href="papers/9c23859ec7313f2e756a3e85575735e0c52249f4.html">Scaling up biologically-inspired computer vision: A case study in unconstrained face recognition on facebook</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5981788', 'linkType': 'ieee'}">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td>edu</td><td>Harvard University</td><td>42.36782045</td><td>-71.12666653</td><td>50%</td><td>50</td><td>25</td><td>25</td><td>3</td><td>39</td><td>4</td></tr><tr><td>9c23859ec7313f2e756a3e85575735e0c52249f4</td><td>pubfig_83</td><td>pubfig83</td><td><a href="papers/9c23859ec7313f2e756a3e85575735e0c52249f4.html">Scaling up biologically-inspired computer vision: A case study in unconstrained face recognition on facebook</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5981788', 'linkType': 'ieee'}">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td>edu</td><td>Harvard University</td><td>42.36782045</td><td>-71.12666653</td><td>50%</td><td>50</td><td>25</td><td>25</td><td>3</td><td>39</td><td>4</td></tr><tr><td>109df0e8e5969ddf01e073143e83599228a1163f</td><td>multi_pie</td><td>MULTIPIE</td><td><a href="papers/109df0e8e5969ddf01e073143e83599228a1163f.html">Scheduling heterogeneous multi-cores through performance impact estimation (PIE)</a></td><td><a href="http://dl.acm.org/citation.cfm?id=2337184">[pdf]</a></td><td>2012 39th Annual International Symposium on Computer Architecture (ISCA)</td><td></td><td></td><td></td><td></td><td>25%</td><td>192</td><td>48</td><td>144</td><td>8</td><td>99</td><td>0</td></tr><tr><td>51eba481dac6b229a7490f650dff7b17ce05df73</td><td>imsitu</td><td>imSitu</td><td><a href="papers/51eba481dac6b229a7490f650dff7b17ce05df73.html">Situation Recognition: Visual Semantic Role Labeling for Image Understanding</a></td><td><a href="http://grail.cs.washington.edu/wp-content/uploads/2016/09/yatskar2016srv.pdf">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>University of Washington</td><td>47.65432380</td><td>-122.30800894</td><td>60%</td><td>48</td><td>29</td><td>19</td><td>2</td><td>45</td><td>2</td></tr><tr><td>f152b6ee251cca940dd853c54e6a7b78fbc6b235</td><td>affectnet</td><td>AffectNet</td><td><a href="papers/f152b6ee251cca940dd853c54e6a7b78fbc6b235.html">Skybiometry and AffectNet on Facial Emotion Recognition Using Supervised Machine Learning Algorithms</a></td><td><a href="{'url': 'http://dl.acm.org/citation.cfm?id=3232665', 'linkType': 'acm'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>23e824d1dfc33f3780dd18076284f07bd99f1c43</td><td>mifs</td><td>MIFS</td><td><a href="papers/23e824d1dfc33f3780dd18076284f07bd99f1c43.html">Spoofing faces using makeup: An investigative study</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7947686', 'linkType': 'ieee'}">[pdf]</a></td><td>2017 IEEE International Conference on Identity, Security and Behavior Analysis (ISBA)</td><td>edu</td><td>INRIA Méditerranée</td><td>43.61581310</td><td>7.06838000</td><td>20%</td><td>5</td><td>1</td><td>4</td><td>0</td><td>1</td><td>2</td></tr><tr><td>1a40092b493c6b8840257ab7f96051d1a4dbfeb2</td><td>sports_videos_in_the_wild</td><td>SVW</td><td><a href="papers/1a40092b493c6b8840257ab7f96051d1a4dbfeb2.html">Sports Videos in the Wild (SVW): A video dataset for sports analysis</a></td><td><a href="http://web.cse.msu.edu/~liuxm/publication/Safdarnejad_Liu_Udpa_Andrus_Wood_Craven_FG2015.pdf">[pdf]</a></td><td>2015 11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)</td><td>edu</td><td>Michigan State University</td><td>42.71856800</td><td>-84.47791571</td><td>67%</td><td>6</td><td>4</td><td>2</td><td>1</td><td>5</td><td>0</td></tr><tr><td>9361b784e73e9238d5cefbea5ac40d35d1e3103f</td><td>towncenter</td><td>TownCenter</td><td><a href="papers/9361b784e73e9238d5cefbea5ac40d35d1e3103f.html">Stable Multi-Target Tracking in Real-Time Surveillance Video (Preprint)</a></td><td><a href="http://pdfs.semanticscholar.org/9361/b784e73e9238d5cefbea5ac40d35d1e3103f.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>21%</td><td>310</td><td>64</td><td>246</td><td>24</td><td>177</td><td>4</td></tr><tr><td>c866a2afc871910e3282fd9498dce4ab20f6a332</td><td>qmul_surv_face</td><td>QMUL-SurvFace</td><td><a href="papers/c866a2afc871910e3282fd9498dce4ab20f6a332.html">Surveillance Face Recognition Challenge</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1804.09691.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>f6c8d5e35d7e4d60a0104f233ac1a3ab757da53f</td><td>pku</td><td>PKU</td><td><a href="papers/f6c8d5e35d7e4d60a0104f233ac1a3ab757da53f.html">Swiss-System Based Cascade Ranking for Gait-Based Person Re-Identification</a></td><td><a href="http://pdfs.semanticscholar.org/f6c8/d5e35d7e4d60a0104f233ac1a3ab757da53f.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>3</td><td>0</td><td>3</td><td>0</td><td>1</td><td>0</td></tr><tr><td>4d58f886f5150b2d5e48fd1b5a49e09799bf895d</td><td>texas_3dfrd</td><td>Texas 3DFRD</td><td><a href="papers/4d58f886f5150b2d5e48fd1b5a49e09799bf895d.html">Texas 3D Face Recognition Database</a></td><td><a href="http://live.ece.utexas.edu/publications/2010/sg_ssiai_may10.pdf">[pdf]</a></td><td>2010 IEEE Southwest Symposium on Image Analysis & Interpretation (SSIAI)</td><td></td><td></td><td></td><td></td><td>18%</td><td>61</td><td>11</td><td>50</td><td>3</td><td>36</td><td>2</td></tr><tr><td>2485c98aa44131d1a2f7d1355b1e372f2bb148ad</td><td>cas_peal</td><td>CAS-PEAL</td><td><a href="papers/2485c98aa44131d1a2f7d1355b1e372f2bb148ad.html">The CAS-PEAL Large-Scale Chinese Face Database and Baseline Evaluations</a></td><td><a href="https://doi.org/10.1109/TSMCA.2007.909557">[pdf]</a></td><td>IEEE Transactions on Systems, Man, and Cybernetics - Part A: Systems and Humans</td><td></td><td></td><td></td><td></td><td>18%</td><td>415</td><td>76</td><td>339</td><td>39</td><td>182</td><td>35</td></tr><tr><td>2485c98aa44131d1a2f7d1355b1e372f2bb148ad</td><td>m2vts</td><td>m2vts</td><td><a href="papers/2485c98aa44131d1a2f7d1355b1e372f2bb148ad.html">The CAS-PEAL Large-Scale Chinese Face Database and Baseline Evaluations</a></td><td><a href="https://doi.org/10.1109/TSMCA.2007.909557">[pdf]</a></td><td>IEEE Transactions on Systems, Man, and Cybernetics - Part A: Systems and Humans</td><td></td><td></td><td></td><td></td><td>18%</td><td>415</td><td>76</td><td>339</td><td>39</td><td>182</td><td>35</td></tr><tr><td>47662d1a368daf70ba70ef2d59eb6209f98b675d</td><td>fia</td><td>CMU FiA</td><td><a href="papers/47662d1a368daf70ba70ef2d59eb6209f98b675d.html">The CMU Face In Action (FIA) Database</a></td><td><a href="http://pdfs.semanticscholar.org/bb47/a03401811f9d2ca2da12138697acbc7b97a3.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>29%</td><td>55</td><td>16</td><td>39</td><td>5</td><td>38</td><td>7</td></tr><tr><td>4df3143922bcdf7db78eb91e6b5359d6ada004d2</td><td>cfd</td><td>CFD</td><td><a href="papers/4df3143922bcdf7db78eb91e6b5359d6ada004d2.html">The Chicago face database: A free stimulus set of faces and norming data.</a></td><td><a href="http://pdfs.semanticscholar.org/4df3/143922bcdf7db78eb91e6b5359d6ada004d2.pdf">[pdf]</a></td><td>Behavior research methods</td><td></td><td></td><td></td><td></td><td>39%</td><td>83</td><td>32</td><td>51</td><td>2</td><td>62</td><td>3</td></tr><tr><td>20388099cc415c772926e47bcbbe554e133343d1</td><td>cafe</td><td>CAFE</td><td><a href="papers/20388099cc415c772926e47bcbbe554e133343d1.html">The Child Affective Facial Expression (CAFE) set: validity and reliability from untrained adults</a></td><td><a href="http://pdfs.semanticscholar.org/2038/8099cc415c772926e47bcbbe554e133343d1.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>48%</td><td>33</td><td>16</td><td>17</td><td>3</td><td>28</td><td>1</td></tr><tr><td>32cde90437ab5a70cf003ea36f66f2de0e24b3ab</td><td>cityscapes</td><td>Cityscapes</td><td><a href="papers/32cde90437ab5a70cf003ea36f66f2de0e24b3ab.html">The Cityscapes Dataset for Semantic Urban Scene Understanding</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1604.01685.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>33%</td><td>771</td><td>252</td><td>519</td><td>54</td><td>622</td><td>0</td></tr><tr><td>32cde90437ab5a70cf003ea36f66f2de0e24b3ab</td><td>cityscapes</td><td>Cityscapes</td><td><a href="papers/32cde90437ab5a70cf003ea36f66f2de0e24b3ab.html">The Cityscapes Dataset for Semantic Urban Scene Understanding</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1604.01685.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>33%</td><td>771</td><td>252</td><td>519</td><td>54</td><td>622</td><td>0</td></tr><tr><td>4e6ee936eb50dd032f7138702fa39b7c18ee8907</td><td>dartmouth_children</td><td>Dartmouth Children</td><td><a href="papers/4e6ee936eb50dd032f7138702fa39b7c18ee8907.html">The Dartmouth Database of Children’s Faces: Acquisition and Validation of a New Face Stimulus Set</a></td><td><a href="http://pdfs.semanticscholar.org/4e6e/e936eb50dd032f7138702fa39b7c18ee8907.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>40%</td><td>20</td><td>8</td><td>12</td><td>2</td><td>16</td><td>0</td></tr><tr><td>f0e17f27f029db4ad650ff278fe3c10ecb6cb0c4</td><td>europersons</td><td>EuroCity Persons</td><td><a href="papers/f0e17f27f029db4ad650ff278fe3c10ecb6cb0c4.html">The EuroCity Persons Dataset: A Novel Benchmark for Object Detection</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1805.07193.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>0%</td><td>1</td><td>0</td><td>1</td><td>0</td><td>1</td><td>0</td></tr><tr><td>4d9a02d080636e9666c4d1cc438b9893391ec6c7</td><td>cohn_kanade_plus</td><td>CK+</td><td><a href="papers/4d9a02d080636e9666c4d1cc438b9893391ec6c7.html">The Extended Cohn-Kanade Dataset (CK+): A complete dataset for action unit and emotion-specified expression</a></td><td><a href="http://www.iainm.com/iainm/Publications_files/2010_The%20Extended.pdf">[pdf]</a></td><td>2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition - Workshops</td><td></td><td></td><td></td><td></td><td>41%</td><td>975</td><td>403</td><td>572</td><td>65</td><td>460</td><td>345</td></tr><tr><td>0f0fcf041559703998abf310e56f8a2f90ee6f21</td><td>feret</td><td>FERET</td><td><a href="papers/0f0fcf041559703998abf310e56f8a2f90ee6f21.html">The FERET Evaluation Methodology for Face-Recognition Algorithms</a></td><td><a href="http://pdfs.semanticscholar.org/0f0f/cf041559703998abf310e56f8a2f90ee6f21.pdf">[pdf]</a></td><td>IEEE Trans. Pattern Anal. Mach. Intell.</td><td></td><td></td><td></td><td></td><td>11%</td><td>999</td><td>109</td><td>259</td><td>32</td><td>213</td><td>51</td></tr><tr><td>0c4a139bb87c6743c7905b29a3cfec27a5130652</td><td>feret</td><td>FERET</td><td><a href="papers/0c4a139bb87c6743c7905b29a3cfec27a5130652.html">The FERET Verification Testing Protocol for Face Recognition Algorithms</a></td><td><a href="http://pdfs.semanticscholar.org/0c4a/139bb87c6743c7905b29a3cfec27a5130652.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>28%</td><td>112</td><td>31</td><td>81</td><td>12</td><td>76</td><td>4</td></tr><tr><td>8f02ec0be21461fbcedf51d864f944cfc42c875f</td><td>hda_plus</td><td>HDA+</td><td><a href="papers/8f02ec0be21461fbcedf51d864f944cfc42c875f.html">The HDA+ Data Set for Research on Fully Automated Re-identification Systems</a></td><td><a href="http://pdfs.semanticscholar.org/8f02/ec0be21461fbcedf51d864f944cfc42c875f.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>12%</td><td>17</td><td>2</td><td>15</td><td>2</td><td>11</td><td>0</td></tr><tr><td>ea050801199f98a1c7c1df6769f23f658299a3ae</td><td>mpi_large</td><td>Large MPI Facial Expression</td><td><a href="papers/ea050801199f98a1c7c1df6769f23f658299a3ae.html">The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</a></td><td><a href="http://pdfs.semanticscholar.org/ea05/0801199f98a1c7c1df6769f23f658299a3ae.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>46%</td><td>28</td><td>13</td><td>15</td><td>4</td><td>24</td><td>3</td></tr><tr><td>ea050801199f98a1c7c1df6769f23f658299a3ae</td><td>mpi_small</td><td>Small MPI Facial Expression</td><td><a href="papers/ea050801199f98a1c7c1df6769f23f658299a3ae.html">The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</a></td><td><a href="http://pdfs.semanticscholar.org/ea05/0801199f98a1c7c1df6769f23f658299a3ae.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>46%</td><td>28</td><td>13</td><td>15</td><td>4</td><td>24</td><td>3</td></tr><tr><td>f1af714b92372c8e606485a3982eab2f16772ad8</td><td>mug_faces</td><td>MUG Faces</td><td><a href="papers/f1af714b92372c8e606485a3982eab2f16772ad8.html">The MUG facial expression database</a></td><td><a href="http://ieeexplore.ieee.org/document/5617662/">[pdf]</a></td><td>11th International Workshop on Image Analysis for Multimedia Interactive Services WIAMIS 10</td><td>edu</td><td>Aristotle University of Thessaloniki</td><td>40.62984145</td><td>22.95889350</td><td>28%</td><td>68</td><td>19</td><td>49</td><td>5</td><td>28</td><td>19</td></tr><tr><td>79828e6e9f137a583082b8b5a9dfce0c301989b8</td><td>mapillary</td><td>Mapillary</td><td><a href="papers/79828e6e9f137a583082b8b5a9dfce0c301989b8.html">The Mapillary Vistas Dataset for Semantic Understanding of Street Scenes</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237796', 'linkType': 'ieee'}">[pdf]</a></td><td>2017 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td>39%</td><td>44</td><td>17</td><td>27</td><td>0</td><td>36</td><td>0</td></tr><tr><td>96e0cfcd81cdeb8282e29ef9ec9962b125f379b0</td><td>megaface</td><td>MegaFace</td><td><a href="papers/96e0cfcd81cdeb8282e29ef9ec9962b125f379b0.html">The MegaFace Benchmark: 1 Million Faces for Recognition at Scale</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.527">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>University of Washington</td><td>47.65432380</td><td>-122.30800894</td><td>55%</td><td>121</td><td>66</td><td>55</td><td>11</td><td>98</td><td>20</td></tr><tr><td>a6e695ddd07aad719001c0fc1129328452385949</td><td>yfcc_100m</td><td>YFCC100M</td><td><a href="papers/a6e695ddd07aad719001c0fc1129328452385949.html">The New Data and New Challenges in Multimedia Research</a></td><td><span class="gray">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>36%</td><td>160</td><td>57</td><td>103</td><td>11</td><td>105</td><td>4</td></tr><tr><td>abe9f3b91fd26fa1b50cd685c0d20debfb372f73</td><td>voc</td><td>VOC</td><td><a href="papers/abe9f3b91fd26fa1b50cd685c0d20debfb372f73.html">The Pascal Visual Object Classes Challenge: A Retrospective</a></td><td><a href="http://homepages.inf.ed.ac.uk/ckiw/postscript/ijcv_voc14.pdf">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td>32%</td><td>999</td><td>315</td><td>684</td><td>75</td><td>698</td><td>6</td></tr><tr><td>66e6f08873325d37e0ec20a4769ce881e04e964e</td><td>sun_attributes</td><td>SUN</td><td><a href="papers/66e6f08873325d37e0ec20a4769ce881e04e964e.html">The SUN Attribute Database: Beyond Categories for Deeper Scene Understanding</a></td><td><a href="http://www.cc.gatech.edu/~hays/papers/attribute_ijcv.pdf">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td>38%</td><td>112</td><td>43</td><td>69</td><td>14</td><td>83</td><td>2</td></tr><tr><td>8b2dd5c61b23ead5ae5508bb8ce808b5ea266730</td><td>10k_US_adult_faces</td><td>10K US Adult Faces</td><td><a href="papers/8b2dd5c61b23ead5ae5508bb8ce808b5ea266730.html">The intrinsic memorability of face photographs.</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/8b2d/d5c61b23ead5ae5508bb8ce808b5ea266730.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Journal of experimental psychology. General</td><td></td><td></td><td></td><td></td><td>36%</td><td>47</td><td>17</td><td>30</td><td>3</td><td>33</td><td>1</td></tr><tr><td>19d1b811df60f86cbd5e04a094b07f32fff7a32a</td><td>york_3d</td><td>UOY 3D Face Database</td><td><a href="papers/19d1b811df60f86cbd5e04a094b07f32fff7a32a.html">Three-dimensional face recognition: an eigensurface approach</a></td><td><a href="http://www-users.cs.york.ac.uk/~nep/research/3Dface/tomh/3DFaceRecognition-Eigensurface-ICIP(web)2.pdf">[pdf]</a></td><td>2004 International Conference on Image Processing, 2004. ICIP '04.</td><td></td><td></td><td></td><td></td><td>19%</td><td>36</td><td>7</td><td>29</td><td>4</td><td>25</td><td>1</td></tr><tr><td>2edb87494278ad11641b6cf7a3f8996de12b8e14</td><td>qmul_grid</td><td>GRID</td><td><a href="papers/2edb87494278ad11641b6cf7a3f8996de12b8e14.html">Time-Delayed Correlation Analysis for Multi-Camera Activity Understanding</a></td><td><a href="https://doi.org/10.1007/s11263-010-0347-5">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td>16%</td><td>77</td><td>12</td><td>39</td><td>3</td><td>32</td><td>0</td></tr><tr><td>298cbc3dfbbb3a20af4eed97906650a4ea1c29e0</td><td>ferplus</td><td>FER+</td><td><a href="papers/298cbc3dfbbb3a20af4eed97906650a4ea1c29e0.html">Training deep networks for facial expression recognition with crowd-sourced label distribution</a></td><td><a href="http://arxiv.org/pdf/1608.01041v1.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>34%</td><td>29</td><td>10</td><td>19</td><td>0</td><td>15</td><td>3</td></tr><tr><td>b5f2846a506fc417e7da43f6a7679146d99c5e96</td><td>ucf_101</td><td>UCF101</td><td><a href="papers/b5f2846a506fc417e7da43f6a7679146d99c5e96.html">UCF101: A Dataset of 101 Human Actions Classes From Videos in The Wild</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1212.0402.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td>edu</td><td>University of Central Florida</td><td>28.59899755</td><td>-81.19712501</td><td>54%</td><td>999</td><td>535</td><td>464</td><td>73</td><td>708</td><td>212</td></tr><tr><td>16e8b0a1e8451d5f697b94c0c2b32a00abee1d52</td><td>umb</td><td>UMB</td><td><a href="papers/16e8b0a1e8451d5f697b94c0c2b32a00abee1d52.html">UMB-DB: A database of partially occluded 3D faces</a></td><td><a href="https://doi.org/10.1109/ICCVW.2011.6130509">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td>29%</td><td>45</td><td>13</td><td>32</td><td>2</td><td>20</td><td>3</td></tr><tr><td>31b05f65405534a696a847dd19c621b7b8588263</td><td>umd_faces</td><td>UMD</td><td><a href="papers/31b05f65405534a696a847dd19c621b7b8588263.html">UMDFaces: An annotated face dataset for training deep networks</a></td><td><a href="http://arxiv.org/abs/1611.01484">[pdf]</a></td><td>2017 IEEE International Joint Conference on Biometrics (IJCB)</td><td></td><td></td><td></td><td></td><td>54%</td><td>35</td><td>19</td><td>16</td><td>5</td><td>28</td><td>6</td></tr><tr><td>8627f019882b024aef92e4eb9355c499c733e5b7</td><td>used</td><td>USED Social Event Dataset</td><td><a href="papers/8627f019882b024aef92e4eb9355c499c733e5b7.html">USED: a large-scale social event detection dataset</a></td><td><a href="http://doi.acm.org/10.1145/2910017.2910624">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>7</td><td>3</td><td>4</td><td>0</td><td>3</td><td>2</td></tr><tr><td>4b4106614c1d553365bad75d7866bff0de6056ed</td><td>czech_news_agency</td><td>UFI</td><td><a href="papers/4b4106614c1d553365bad75d7866bff0de6056ed.html">Unconstrained Facial Images: Database for Face Recognition Under Real-World Conditions</a></td><td><a href="http://pdfs.semanticscholar.org/4b41/06614c1d553365bad75d7866bff0de6056ed.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>10%</td><td>10</td><td>1</td><td>9</td><td>0</td><td>4</td><td>2</td></tr><tr><td>08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7</td><td>kin_face</td><td>UB KinFace</td><td><a href="papers/08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7.html">Understanding Kin Relationships in a Photo</a></td><td><a href="http://www1.ece.neu.edu/~yunfu/papers/Kinship-TMM.pdf">[pdf]</a></td><td>IEEE Transactions on Multimedia</td><td></td><td></td><td></td><td></td><td>24%</td><td>89</td><td>21</td><td>46</td><td>2</td><td>20</td><td>14</td></tr><tr><td>21d9d0deed16f0ad62a4865e9acf0686f4f15492</td><td>images_of_groups</td><td>Images of Groups</td><td><a href="papers/21d9d0deed16f0ad62a4865e9acf0686f4f15492.html">Understanding images of groups of people</a></td><td><a href="http://amp.ece.cmu.edu/people/Andy/Andy_files/cvpr09.pdf">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>36%</td><td>202</td><td>72</td><td>130</td><td>12</td><td>126</td><td>24</td></tr><tr><td>fd8168f1c50de85bac58a8d328df0a50248b16ae</td><td>nd_2006</td><td>ND-2006</td><td><a href="papers/fd8168f1c50de85bac58a8d328df0a50248b16ae.html">Using a Multi-Instance Enrollment Representation to Improve 3D Face Recognition</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4401928', 'linkType': 'ieee'}">[pdf]</a></td><td>2007 First IEEE International Conference on Biometrics: Theory, Applications, and Systems</td><td></td><td></td><td></td><td></td><td>25%</td><td>32</td><td>8</td><td>24</td><td>3</td><td>16</td><td>1</td></tr><tr><td>4563b46d42079242f06567b3f2e2f7a80cb3befe</td><td>vadana</td><td>VADANA</td><td><a href="papers/4563b46d42079242f06567b3f2e2f7a80cb3befe.html">VADANA: A dense dataset for facial image analysis</a></td><td><a href="http://vims.cis.udel.edu/publications/VADANA_BeFIT2011.pdf">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td>19%</td><td>16</td><td>3</td><td>13</td><td>0</td><td>6</td><td>6</td></tr><tr><td>eb027969f9310e0ae941e2adee2d42cdf07d938c</td><td>vgg_faces2</td><td>VGG Face2</td><td><a href="papers/eb027969f9310e0ae941e2adee2d42cdf07d938c.html">VGGFace2: A Dataset for Recognising Faces across Pose and Age</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1710.08092.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2018 13th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2018)</td><td>edu</td><td>University of Oxford</td><td>51.75345380</td><td>-1.25400997</td><td>38%</td><td>56</td><td>21</td><td>35</td><td>6</td><td>50</td><td>3</td></tr><tr><td>01959ef569f74c286956024866c1d107099199f7</td><td>vqa</td><td>VQA</td><td><a href="papers/01959ef569f74c286956024866c1d107099199f7.html">VQA: Visual Question Answering</a></td><td><a href="http://arxiv.org/pdf/1505.00468v3.pdf">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td>47%</td><td>731</td><td>344</td><td>387</td><td>47</td><td>628</td><td>4</td></tr><tr><td>5194cbd51f9769ab25260446b4fa17204752e799</td><td>violent_flows</td><td>Violent Flows</td><td><a href="papers/5194cbd51f9769ab25260446b4fa17204752e799.html">Violent flows: Real-time detection of violent crowd behavior</a></td><td><a href="http://www.wisdom.weizmann.ac.il/mathusers/kliper/Papers/violent_flows.pdf">[pdf]</a></td><td>2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops</td><td></td><td></td><td></td><td></td><td>20%</td><td>83</td><td>17</td><td>66</td><td>6</td><td>42</td><td>2</td></tr><tr><td>066000d44d6691d27202896691f08b27117918b9</td><td>psu</td><td>PSU</td><td><a href="papers/066000d44d6691d27202896691f08b27117918b9.html">Vision-Based Analysis of Small Groups in Pedestrian Crowds</a></td><td><a href="http://vision.cse.psu.edu/publications/pdfs/GeCollinsRubackPAMI2011.pdf">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>18%</td><td>151</td><td>27</td><td>124</td><td>9</td><td>78</td><td>2</td></tr><tr><td>dd65f71dac86e36eecbd3ed225d016c3336b4a13</td><td>families_in_the_wild</td><td>FIW</td><td><a href="papers/dd65f71dac86e36eecbd3ed225d016c3336b4a13.html">Visual Kinship Recognition of Families in the Wild</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8337841', 'linkType': 'ieee'}">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>67%</td><td>3</td><td>2</td><td>1</td><td>0</td><td>2</td><td>0</td></tr><tr><td>52d7eb0fbc3522434c13cc247549f74bb9609c5d</td><td>wider_face</td><td>WIDER FACE</td><td><a href="papers/52d7eb0fbc3522434c13cc247549f74bb9609c5d.html">WIDER FACE: A Face Detection Benchmark</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1511.06523.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>53%</td><td>148</td><td>78</td><td>70</td><td>16</td><td>107</td><td>34</td></tr><tr><td>77c81c13a110a341c140995bedb98101b9e84f7f</td><td>wildtrack</td><td>WildTrack</td><td><a href="papers/77c81c13a110a341c140995bedb98101b9e84f7f.html">WILDTRACK : A Multi-camera HD Dataset for Dense Unscripted Pedestrian Detection</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/fe1c/ec4e4995b8615855572374ae3efc94949105.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>5ad4e9f947c1653c247d418f05dad758a3f9277b</td><td>wlfdb</td><td></td><td><a href="papers/5ad4e9f947c1653c247d418f05dad758a3f9277b.html">WLFDB: Weakly Labeled Face Databases</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/5ad4/e9f947c1653c247d418f05dad758a3f9277b.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>0%</td><td>1</td><td>0</td><td>1</td><td>0</td><td>0</td><td>0</td></tr><tr><td>0dc11a37cadda92886c56a6fb5191ded62099c28</td><td>stickmen_family</td><td>We Are Family Stickmen</td><td><a href="papers/0dc11a37cadda92886c56a6fb5191ded62099c28.html">We Are Family: Joint Pose Estimation of Multiple Persons</a></td><td><a href="http://pdfs.semanticscholar.org/0dc1/1a37cadda92886c56a6fb5191ded62099c28.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>44%</td><td>77</td><td>34</td><td>43</td><td>4</td><td>57</td><td>1</td></tr><tr><td>2a75f34663a60ab1b04a0049ed1d14335129e908</td><td>mmi_facial_expression</td><td>MMI Facial Expression Dataset</td><td><a href="papers/2a75f34663a60ab1b04a0049ed1d14335129e908.html">Web-based database for facial expression analysis</a></td><td><a href="http://ibug.doc.ic.ac.uk/media/uploads/documents/PanticEtAl-ICME2005-final.pdf">[pdf]</a></td><td>2005 IEEE International Conference on Multimedia and Expo</td><td></td><td></td><td></td><td></td><td>32%</td><td>440</td><td>142</td><td>298</td><td>44</td><td>258</td><td>82</td></tr><tr><td>9b9bf5e623cb8af7407d2d2d857bc3f1b531c182</td><td>who_goes_there</td><td>WGT</td><td><a href="papers/9b9bf5e623cb8af7407d2d2d857bc3f1b531c182.html">Who goes there?: approaches to mapping facial appearance diversity</a></td><td><a href="http://doi.acm.org/10.1145/2996913.2996997">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>a94cae786d515d3450d48267e12ca954aab791c4</td><td>yawdd</td><td>YawDD</td><td><a href="papers/a94cae786d515d3450d48267e12ca954aab791c4.html">YawDD: a yawning detection dataset</a></td><td><a href="http://www.site.uottawa.ca/~shervin/pubs/CogniVue-Dataset-ACM-MMSys2014.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>14</td><td>0</td><td>14</td><td>1</td><td>2</td><td>1</td></tr></table></body></html> \ No newline at end of file
+<!doctype html><html><head><meta charset='utf-8'><title>All Papers</title><link rel='stylesheet' href='reports.css'></head><body><h2>All Papers</h2><table border='1' cellpadding='3' cellspacing='3'><th>Paper ID</th><th>Megapixels Key</th><th>Megapixels Name</th><th>Report Link</th><th>PDF Link</th><th>Journal</th><th>Type</th><th>Address</th><th>Lat</th><th>Lng</th><th>Coverage</th><th>Total Citations</th><th>Geocoded Citations</th><th>Unknown Citations</th><th>Empty Citations</th><th>With PDF</th><th>With DOI</th><tr><td>fb82681ac5d3487bd8e52dbb3d1fa220eeac855e</td><td>pilot_parliament</td><td>PPB</td><td><a href="papers/fb82681ac5d3487bd8e52dbb3d1fa220eeac855e.html">1 Network Notebook</a></td><td><a href="http://pdfs.semanticscholar.org/fb82/681ac5d3487bd8e52dbb3d1fa220eeac855e.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>9%</td><td>11</td><td>1</td><td>10</td><td>1</td><td>10</td><td>0</td></tr><tr><td>3325860c0c82a93b2eac654f5324dd6a776f609e</td><td>mpii_human_pose</td><td>MPII Human Pose</td><td><a href="papers/3325860c0c82a93b2eac654f5324dd6a776f609e.html">2D Human Pose Estimation: New Benchmark and State of the Art Analysis</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6909866', 'linkType': 'ieee'}">[pdf]</a></td><td>2014 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>50%</td><td>356</td><td>179</td><td>177</td><td>21</td><td>299</td><td>3</td></tr><tr><td>2f5d44dc3e1b5955942133ff872ebd31716ec604</td><td>frav3d</td><td>FRAV3D</td><td><a href="papers/2f5d44dc3e1b5955942133ff872ebd31716ec604.html">2D and 3D face recognition: A survey</a></td><td><a href="http://pdfs.semanticscholar.org/2f5d/44dc3e1b5955942133ff872ebd31716ec604.pdf">[pdf]</a></td><td>Pattern Recognition Letters</td><td></td><td></td><td></td><td></td><td>15%</td><td>389</td><td>57</td><td>332</td><td>28</td><td>198</td><td>17</td></tr><tr><td>2e8d0f1802e50cccfd3c0aabac0d0beab3a7846e</td><td>3dpes</td><td>3DPeS</td><td><a href="papers/2e8d0f1802e50cccfd3c0aabac0d0beab3a7846e.html">3DPeS: 3D people dataset for surveillance and forensics</a></td><td><a href="http://doi.acm.org/10.1145/2072572.2072590">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>18%</td><td>122</td><td>22</td><td>100</td><td>11</td><td>71</td><td>1</td></tr><tr><td>9696ad8b164f5e10fcfe23aacf74bd6168aebb15</td><td>4dfab</td><td>4DFAB</td><td><a href="papers/9696ad8b164f5e10fcfe23aacf74bd6168aebb15.html">4DFAB: A Large Scale 4D Facial Expression Database for Biometric Applications</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1712.01443.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>0%</td><td>4</td><td>0</td><td>4</td><td>0</td><td>2</td><td>0</td></tr><tr><td>d08cc366a4a0192a01e9a7495af1eb5d9f9e73ae</td><td>b3d_ac</td><td>B3D(AC)</td><td><a href="papers/d08cc366a4a0192a01e9a7495af1eb5d9f9e73ae.html">A 3-D Audio-Visual Corpus of Affective Communication</a></td><td><a href="http://files.is.tue.mpg.de/jgall/download/jgall_avcorpus_mm10.pdf">[pdf]</a></td><td>IEEE Transactions on Multimedia</td><td></td><td></td><td></td><td></td><td>31%</td><td>39</td><td>12</td><td>27</td><td>2</td><td>27</td><td>7</td></tr><tr><td>639937b3a1b8bded3f7e9a40e85bd3770016cf3c</td><td>bfm</td><td>BFM</td><td><a href="papers/639937b3a1b8bded3f7e9a40e85bd3770016cf3c.html">A 3D Face Model for Pose and Illumination Invariant Face Recognition</a></td><td><a href="https://pdfs.semanticscholar.org/6399/37b3a1b8bded3f7e9a40e85bd3770016cf3c.pdf">[pdf]</a></td><td>2009 Sixth IEEE International Conference on Advanced Video and Signal Based Surveillance</td><td></td><td></td><td></td><td></td><td>41%</td><td>323</td><td>131</td><td>192</td><td>29</td><td>221</td><td>25</td></tr><tr><td>c34532fe6bfbd1e6df477c9ffdbb043b77e7804d</td><td>columbia_gaze</td><td>Columbia Gaze</td><td><a href="papers/c34532fe6bfbd1e6df477c9ffdbb043b77e7804d.html">A 3D Morphable Eye Region Model for Gaze Estimation</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/0d43/3b9435b874a1eea6d7999e86986c910fa285.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Carnegie Mellon University</td><td>37.41021930</td><td>-122.05965487</td><td>67%</td><td>24</td><td>16</td><td>8</td><td>0</td><td>18</td><td>4</td></tr><tr><td>cc589c499dcf323fe4a143bbef0074c3e31f9b60</td><td>bu_3dfe</td><td>BU-3DFE</td><td><a href="papers/cc589c499dcf323fe4a143bbef0074c3e31f9b60.html">A 3D facial expression database for facial behavior research</a></td><td><a href="http://www.cs.binghamton.edu/~lijun/Research/3DFE/Yin_FGR06_a.pdf">[pdf]</a></td><td>7th International Conference on Automatic Face and Gesture Recognition (FGR06)</td><td>edu</td><td>SUNY Binghamton</td><td>42.08779975</td><td>-75.97066066</td><td>24%</td><td>555</td><td>131</td><td>424</td><td>47</td><td>283</td><td>48</td></tr><tr><td>22646e00a7ba34d1b5fbe3b1efcd91a1e1be3c2b</td><td>saivt</td><td>SAIVT SoftBio</td><td><a href="papers/22646e00a7ba34d1b5fbe3b1efcd91a1e1be3c2b.html">A Database for Person Re-Identification in Multi-Camera Surveillance Networks</a></td><td><a href="http://eprints.qut.edu.au/53437/3/Bialkowski_Database4PersonReID_DICTA.pdf">[pdf]</a></td><td>2012 International Conference on Digital Image Computing Techniques and Applications (DICTA)</td><td></td><td></td><td></td><td></td><td>21%</td><td>58</td><td>12</td><td>46</td><td>7</td><td>40</td><td>1</td></tr><tr><td>070de852bc6eb275d7ca3a9cdde8f6be8795d1a3</td><td>d3dfacs</td><td>D3DFACS</td><td><a href="papers/070de852bc6eb275d7ca3a9cdde8f6be8795d1a3.html">A FACS valid 3D dynamic action unit database with applications to 3D dynamic morphable facial modeling</a></td><td><a href="http://www.cs.bath.ac.uk/~dpc/D3DFACS/ICCV_final_2011.pdf">[pdf]</a></td><td>2011 International Conference on Computer Vision</td><td>edu</td><td>Jacobs University</td><td>53.41291480</td><td>-2.96897915</td><td>44%</td><td>52</td><td>23</td><td>29</td><td>5</td><td>37</td><td>4</td></tr><tr><td>bd88bb2e4f351352d88ee7375af834360e223498</td><td>hda_plus</td><td>HDA+</td><td><a href="papers/bd88bb2e4f351352d88ee7375af834360e223498.html">A Multi - camera video data set for research on High - Definition surveillance</a></td><td><a href="http://pdfs.semanticscholar.org/bd88/bb2e4f351352d88ee7375af834360e223498.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>1</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>bd88bb2e4f351352d88ee7375af834360e223498</td><td>hda_plus</td><td>HDA+</td><td><a href="papers/bd88bb2e4f351352d88ee7375af834360e223498.html">A Multi - camera video data set for research on High - Definition surveillance</a></td><td><a href="http://pdfs.semanticscholar.org/bd88/bb2e4f351352d88ee7375af834360e223498.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>1</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>563c940054e4b456661762c1ab858e6f730c3159</td><td>data_61</td><td>Data61 Pedestrian</td><td><a href="papers/563c940054e4b456661762c1ab858e6f730c3159.html">A Multi-modal Graphical Model for Scene Analysis</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/WACV.2015.139">[pdf]</a></td><td>2015 IEEE Winter Conference on Applications of Computer Vision</td><td></td><td></td><td></td><td></td><td>12%</td><td>8</td><td>1</td><td>7</td><td>0</td><td>5</td><td>0</td></tr><tr><td>221c18238b829c12b911706947ab38fd017acef7</td><td>rap_pedestrian</td><td>RAP</td><td><a href="papers/221c18238b829c12b911706947ab38fd017acef7.html">A Richly Annotated Dataset for Pedestrian Attribute Recognition</a></td><td><a href="http://pdfs.semanticscholar.org/221c/18238b829c12b911706947ab38fd017acef7.pdf">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>52%</td><td>21</td><td>11</td><td>10</td><td>0</td><td>18</td><td>0</td></tr><tr><td>013909077ad843eb6df7a3e8e290cfd5575999d2</td><td>fiw_300</td><td>300-W</td><td><a href="papers/013909077ad843eb6df7a3e8e290cfd5575999d2.html">A Semi-automatic Methodology for Facial Landmark Annotation</a></td><td><a href="http://ibug.doc.ic.ac.uk/media/uploads/documents/sagonas_cvpr_2013_amfg_w.pdf">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition Workshops</td><td>edu</td><td>University of Twente</td><td>52.23801390</td><td>6.85667610</td><td>59%</td><td>169</td><td>100</td><td>69</td><td>14</td><td>112</td><td>49</td></tr><tr><td>013909077ad843eb6df7a3e8e290cfd5575999d2</td><td>fiw_300</td><td>300-W</td><td><a href="papers/013909077ad843eb6df7a3e8e290cfd5575999d2.html">A Semi-automatic Methodology for Facial Landmark Annotation</a></td><td><a href="http://ibug.doc.ic.ac.uk/media/uploads/documents/sagonas_cvpr_2013_amfg_w.pdf">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition Workshops</td><td>edu</td><td>University of Twente</td><td>52.23801390</td><td>6.85667610</td><td>59%</td><td>169</td><td>100</td><td>69</td><td>14</td><td>112</td><td>49</td></tr><tr><td>013909077ad843eb6df7a3e8e290cfd5575999d2</td><td>fiw_300</td><td>300-W</td><td><a href="papers/013909077ad843eb6df7a3e8e290cfd5575999d2.html">A Semi-automatic Methodology for Facial Landmark Annotation</a></td><td><a href="http://ibug.doc.ic.ac.uk/media/uploads/documents/sagonas_cvpr_2013_amfg_w.pdf">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition Workshops</td><td>edu</td><td>University of Twente</td><td>52.23801390</td><td>6.85667610</td><td>59%</td><td>169</td><td>100</td><td>69</td><td>14</td><td>112</td><td>49</td></tr><tr><td>ad62c6e17bc39b4dec20d32f6ac667ae42d2c118</td><td>jiku_mobile</td><td>Jiku Mobile Video Dataset</td><td><a href="papers/ad62c6e17bc39b4dec20d32f6ac667ae42d2c118.html">A Synchronization Ground Truth for the Jiku Mobile Video Dataset</a></td><td><a href="http://pdfs.semanticscholar.org/ad62/c6e17bc39b4dec20d32f6ac667ae42d2c118.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>1</td><td>0</td><td>1</td><td>0</td><td>0</td><td>0</td></tr><tr><td>6403117f9c005ae81f1e8e6d1302f4a045e3d99d</td><td>alert_airport</td><td>ALERT Airport</td><td><a href="papers/6403117f9c005ae81f1e8e6d1302f4a045e3d99d.html">A Systematic Evaluation and Benchmark for Person Re-Identification: Features, Metrics, and Datasets.</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1605.09653.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>IEEE transactions on pattern analysis and machine intelligence</td><td></td><td></td><td></td><td></td><td>27%</td><td>15</td><td>4</td><td>11</td><td>1</td><td>10</td><td>0</td></tr><tr><td>7ace44190729927e5cb0dd5d363fcae966fe13f7</td><td>nudedetection</td><td>Nude Detection</td><td><a href="papers/7ace44190729927e5cb0dd5d363fcae966fe13f7.html">A bag-of-features approach based on Hue-SIFT descriptor for nude detection</a></td><td><a href="http://ieeexplore.ieee.org/document/7077625/">[pdf]</a></td><td>2009 17th European Signal Processing Conference</td><td></td><td></td><td></td><td></td><td>18%</td><td>51</td><td>9</td><td>42</td><td>1</td><td>18</td><td>0</td></tr><tr><td>0d3bb75852098b25d90f31d2f48fd0cb4944702b</td><td>face_scrub</td><td>FaceScrub</td><td><a href="papers/0d3bb75852098b25d90f31d2f48fd0cb4944702b.html">A data-driven approach to cleaning large face datasets</a></td><td><a href="https://doi.org/10.1109/ICIP.2014.7025068">[pdf]</a></td><td>2014 IEEE International Conference on Image Processing (ICIP)</td><td>edu</td><td>University of Illinois, Urbana-Champaign</td><td>40.11116745</td><td>-88.22587665</td><td>46%</td><td>123</td><td>56</td><td>67</td><td>6</td><td>95</td><td>21</td></tr><tr><td>b91f54e1581fbbf60392364323d00a0cd43e493c</td><td>bp4d_spontanous</td><td>BP4D-Spontanous</td><td><a href="papers/b91f54e1581fbbf60392364323d00a0cd43e493c.html">A high-resolution spontaneous 3D dynamic facial expression database</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6553788', 'linkType': 'ieee'}">[pdf]</a></td><td>2013 10th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)</td><td>edu</td><td>SUNY Binghamton</td><td>42.08779975</td><td>-75.97066066</td><td>36%</td><td>151</td><td>54</td><td>97</td><td>7</td><td>85</td><td>26</td></tr><tr><td>1ed1a49534ad8dd00f81939449f6389cfbc25321</td><td>bjut_3d</td><td>BJUT-3D</td><td><a href="papers/1ed1a49534ad8dd00f81939449f6389cfbc25321.html">A novel face recognition method based on 3D face model</a></td><td><a href="https://doi.org/10.1109/ROBIO.2007.4522202">[pdf]</a></td><td>2007 IEEE International Conference on Robotics and Biomimetics (ROBIO)</td><td></td><td></td><td></td><td></td><td>0%</td><td>2</td><td>0</td><td>2</td><td>0</td><td>1</td><td>1</td></tr><tr><td>57fe081950f21ca03b5b375ae3e84b399c015861</td><td>cvc_01_barcelona</td><td>CVC-01</td><td><a href="papers/57fe081950f21ca03b5b375ae3e84b399c015861.html">Adaptive Image Sampling and Windows Classification for On–board Pedestrian Detection</a></td><td><a href="http://pdfs.semanticscholar.org/57fe/081950f21ca03b5b375ae3e84b399c015861.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>9%</td><td>44</td><td>4</td><td>40</td><td>1</td><td>21</td><td>0</td></tr><tr><td>47aeb3b82f54b5ae8142b4bdda7b614433e69b9a</td><td>am_fed</td><td>AM-FED</td><td><a href="papers/47aeb3b82f54b5ae8142b4bdda7b614433e69b9a.html">Affectiva-MIT Facial Expression Dataset (AM-FED): Naturalistic and Spontaneous Facial Expressions Collected "In-the-Wild"</a></td><td><a href="http://pdfs.semanticscholar.org/5d06/437656dd94616d7d87260d5eb77513ded30f.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>36%</td><td>73</td><td>26</td><td>47</td><td>6</td><td>39</td><td>16</td></tr><tr><td>1be498d4bbc30c3bfd0029114c784bc2114d67c0</td><td>adience</td><td>Adience</td><td><a href="papers/1be498d4bbc30c3bfd0029114c784bc2114d67c0.html">Age and Gender Estimation of Unfiltered Faces</a></td><td><a href="http://www.openu.ac.il/home/hassner/Adience/EidingerEnbarHassner_tifs.pdf">[pdf]</a></td><td>IEEE Transactions on Information Forensics and Security</td><td></td><td></td><td></td><td></td><td>43%</td><td>168</td><td>72</td><td>96</td><td>7</td><td>89</td><td>53</td></tr><tr><td>6dcf418c778f528b5792104760f1fbfe90c6dd6a</td><td>agedb</td><td>AgeDB</td><td><a href="papers/6dcf418c778f528b5792104760f1fbfe90c6dd6a.html">AgeDB: The First Manually Collected, In-the-Wild Age Database</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014984', 'linkType': 'ieee'}">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td></td><td></td><td></td><td></td><td>82%</td><td>11</td><td>9</td><td>2</td><td>0</td><td>10</td><td>0</td></tr><tr><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td><td>aflw</td><td>AFLW</td><td><a href="papers/a74251efa970b92925b89eeef50a5e37d9281ad0.html">Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</a></td><td><a href="http://lrs.icg.tugraz.at/pubs/koestinger_befit_11.pdf">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td>53%</td><td>292</td><td>155</td><td>137</td><td>38</td><td>207</td><td>59</td></tr><tr><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td><td>imm_face</td><td>IMM Face Dataset</td><td><a href="papers/a74251efa970b92925b89eeef50a5e37d9281ad0.html">Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</a></td><td><a href="http://lrs.icg.tugraz.at/pubs/koestinger_befit_11.pdf">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td>53%</td><td>292</td><td>155</td><td>137</td><td>38</td><td>207</td><td>59</td></tr><tr><td>a74251efa970b92925b89eeef50a5e37d9281ad0</td><td>muct</td><td>MUCT</td><td><a href="papers/a74251efa970b92925b89eeef50a5e37d9281ad0.html">Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization</a></td><td><a href="http://lrs.icg.tugraz.at/pubs/koestinger_befit_11.pdf">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td>53%</td><td>292</td><td>155</td><td>137</td><td>38</td><td>207</td><td>59</td></tr><tr><td>2ce2560cf59db59ce313bbeb004e8ce55c5ce928</td><td>texas_3dfrd</td><td>Texas 3DFRD</td><td><a href="papers/2ce2560cf59db59ce313bbeb004e8ce55c5ce928.html">Anthropometric 3D Face Recognition</a></td><td><a href="http://live.ece.utexas.edu/publications/2010/sg_ijcv_june10.pdf">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td>22%</td><td>88</td><td>19</td><td>43</td><td>4</td><td>42</td><td>2</td></tr><tr><td>2ce2560cf59db59ce313bbeb004e8ce55c5ce928</td><td>texas_3dfrd</td><td>Texas 3DFRD</td><td><a href="papers/2ce2560cf59db59ce313bbeb004e8ce55c5ce928.html">Anthropometric 3D Face Recognition</a></td><td><a href="http://live.ece.utexas.edu/publications/2010/sg_ijcv_june10.pdf">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td>22%</td><td>88</td><td>19</td><td>43</td><td>4</td><td>42</td><td>2</td></tr><tr><td>c6526dd3060d63a6c90e8b7ff340383c4e0e0dd8</td><td>face_research_lab</td><td>Face Research Lab London</td><td><a href="papers/c6526dd3060d63a6c90e8b7ff340383c4e0e0dd8.html">Anxiety promotes memory for mood-congruent faces but does not alter loss aversion.</a></td><td><a href="http://pdfs.semanticscholar.org/c652/6dd3060d63a6c90e8b7ff340383c4e0e0dd8.pdf">[pdf]</a></td><td>Scientific reports</td><td>edu</td><td>University College London</td><td>51.52316070</td><td>-0.12820370</td><td>25%</td><td>4</td><td>1</td><td>3</td><td>0</td><td>2</td><td>2</td></tr><tr><td>0df0d1adea39a5bef318b74faa37de7f3e00b452</td><td>mpii_gaze</td><td>MPIIGaze</td><td><a href="papers/0df0d1adea39a5bef318b74faa37de7f3e00b452.html">Appearance-based gaze estimation in the wild</a></td><td><a href="https://scalable.mpi-inf.mpg.de/files/2015/09/zhang_CVPR15.pdf">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>Max Planck Institute for Informatics</td><td>49.25795660</td><td>7.04577417</td><td>38%</td><td>138</td><td>52</td><td>86</td><td>3</td><td>94</td><td>7</td></tr><tr><td>5801690199c1917fa58c35c3dead177c0b8f9f2d</td><td>camel</td><td>CAMEL</td><td><a href="papers/5801690199c1917fa58c35c3dead177c0b8f9f2d.html">Application of Object Based Classification and High Resolution Satellite Imagery for Savanna Ecosystem Analysis</a></td><td><a href="http://pdfs.semanticscholar.org/5801/690199c1917fa58c35c3dead177c0b8f9f2d.pdf">[pdf]</a></td><td>Remote Sensing</td><td></td><td></td><td></td><td></td><td>37%</td><td>19</td><td>7</td><td>12</td><td>1</td><td>16</td><td>0</td></tr><tr><td>759a3b3821d9f0e08e0b0a62c8b693230afc3f8d</td><td>pubfig</td><td>PubFig</td><td><a href="papers/759a3b3821d9f0e08e0b0a62c8b693230afc3f8d.html">Attribute and simile classifiers for face verification</a></td><td><a href="http://homes.cs.washington.edu/~neeraj/projects/faceverification/base/papers/nk_iccv2009_attrs.pdf">[pdf]</a></td><td>2009 IEEE 12th International Conference on Computer Vision</td><td>edu</td><td>Columbia University</td><td>40.84198360</td><td>-73.94368971</td><td>51%</td><td>894</td><td>454</td><td>440</td><td>55</td><td>587</td><td>222</td></tr><tr><td>faf40ce28857aedf183e193486f5b4b0a8c478a2</td><td>iit_dehli_ear</td><td>IIT Dehli Ear</td><td><a href="papers/faf40ce28857aedf183e193486f5b4b0a8c478a2.html">Automated Human Identification Using Ear Imaging</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/faf4/0ce28857aedf183e193486f5b4b0a8c478a2.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>13%</td><td>70</td><td>9</td><td>61</td><td>6</td><td>28</td><td>1</td></tr><tr><td>2160788824c4c29ffe213b2cbeb3f52972d73f37</td><td>3d_rma</td><td>3D-RMA</td><td><a href="papers/2160788824c4c29ffe213b2cbeb3f52972d73f37.html">Automatic 3D face authentication</a></td><td><a href="http://pdfs.semanticscholar.org/2160/788824c4c29ffe213b2cbeb3f52972d73f37.pdf">[pdf]</a></td><td>Image Vision Comput.</td><td></td><td></td><td></td><td></td><td>25%</td><td>95</td><td>24</td><td>71</td><td>8</td><td>60</td><td>2</td></tr><tr><td>213a579af9e4f57f071b884aa872651372b661fd</td><td>bbc_pose</td><td>BBC Pose</td><td><a href="papers/213a579af9e4f57f071b884aa872651372b661fd.html">Automatic and Efficient Human Pose Estimation for Sign Language Videos</a></td><td><a href="https://doi.org/10.1007/s11263-013-0672-6">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td>60%</td><td>25</td><td>15</td><td>10</td><td>1</td><td>18</td><td>1</td></tr><tr><td>fcc6fe6007c322641796cb8792718641856a22a7</td><td>miw</td><td>MIW</td><td><a href="papers/fcc6fe6007c322641796cb8792718641856a22a7.html">Automatic facial makeup detection with application in face recognition</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6612994', 'linkType': 'ieee'}">[pdf]</a></td><td>2013 International Conference on Biometrics (ICB)</td><td>edu</td><td>West Virginia University</td><td>39.65404635</td><td>-79.96475355</td><td>65%</td><td>46</td><td>30</td><td>16</td><td>1</td><td>18</td><td>21</td></tr><tr><td>fcc6fe6007c322641796cb8792718641856a22a7</td><td>youtube_makeup</td><td>YMU</td><td><a href="papers/fcc6fe6007c322641796cb8792718641856a22a7.html">Automatic facial makeup detection with application in face recognition</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6612994', 'linkType': 'ieee'}">[pdf]</a></td><td>2013 International Conference on Biometrics (ICB)</td><td>edu</td><td>West Virginia University</td><td>39.65404635</td><td>-79.96475355</td><td>65%</td><td>46</td><td>30</td><td>16</td><td>1</td><td>18</td><td>21</td></tr><tr><td>fcc6fe6007c322641796cb8792718641856a22a7</td><td>youtube_makeup</td><td>YMU</td><td><a href="papers/fcc6fe6007c322641796cb8792718641856a22a7.html">Automatic facial makeup detection with application in face recognition</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6612994', 'linkType': 'ieee'}">[pdf]</a></td><td>2013 International Conference on Biometrics (ICB)</td><td>edu</td><td>West Virginia University</td><td>39.65404635</td><td>-79.96475355</td><td>65%</td><td>46</td><td>30</td><td>16</td><td>1</td><td>18</td><td>21</td></tr><tr><td>0a85bdff552615643dd74646ac881862a7c7072d</td><td>pipa</td><td>PIPA</td><td><a href="papers/0a85bdff552615643dd74646ac881862a7c7072d.html">Beyond frontal faces: Improving Person Recognition using multiple cues</a></td><td><a href="https://doi.org/10.1109/CVPR.2015.7299113">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>60%</td><td>50</td><td>30</td><td>19</td><td>2</td><td>40</td><td>4</td></tr><tr><td>2acf7e58f0a526b957be2099c10aab693f795973</td><td>bosphorus</td><td>The Bosphorus</td><td><a href="papers/2acf7e58f0a526b957be2099c10aab693f795973.html">Bosphorus Database for 3D Face Analysis</a></td><td><a href="http://pdfs.semanticscholar.org/4254/fbba3846008f50671edc9cf70b99d7304543.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>18%</td><td>328</td><td>58</td><td>270</td><td>18</td><td>143</td><td>37</td></tr><tr><td>214c966d1f9c2a4b66f4535d9a0d4078e63a5867</td><td>brainwash</td><td>Brainwash</td><td><a href="papers/214c966d1f9c2a4b66f4535d9a0d4078e63a5867.html">Brainwash: A Data System for Feature Engineering</a></td><td><a href="http://pdfs.semanticscholar.org/ae44/8015b2ff2bd3b8a5c9a3266f954f5af9ffa9.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>57</td><td>34</td><td>23</td><td>2</td><td>50</td><td>0</td></tr><tr><td>37d6f0eb074d207b53885bd2eb78ccc8a04be597</td><td>vmu</td><td>VMU</td><td><a href="papers/37d6f0eb074d207b53885bd2eb78ccc8a04be597.html">Can facial cosmetics affect the matching accuracy of face recognition systems?</a></td><td><a href="http://www.cse.msu.edu/~climer/DantchevaChenRossFaceCosmetics_BTAS2012.pdf">[pdf]</a></td><td>2012 IEEE Fifth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td></td><td></td><td></td><td></td><td>49%</td><td>49</td><td>24</td><td>25</td><td>0</td><td>18</td><td>22</td></tr><tr><td>8d5998cd984e7cce307da7d46f155f9db99c6590</td><td>chalearn</td><td>ChaLearn</td><td><a href="papers/8d5998cd984e7cce307da7d46f155f9db99c6590.html">ChaLearn looking at people: A review of events and resources</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1701.02664.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2017 International Joint Conference on Neural Networks (IJCNN)</td><td></td><td></td><td></td><td></td><td>30%</td><td>10</td><td>3</td><td>7</td><td>1</td><td>6</td><td>0</td></tr><tr><td>2bf8541199728262f78d4dced6fb91479b39b738</td><td>clothing_co_parsing</td><td>CCP</td><td><a href="papers/2bf8541199728262f78d4dced6fb91479b39b738.html">Clothing Co-parsing by Joint Image Segmentation and Labeling</a></td><td><a href="https://arxiv.org/pdf/1502.00739v1.pdf">[pdf]</a></td><td>2014 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>47%</td><td>60</td><td>28</td><td>32</td><td>0</td><td>36</td><td>6</td></tr><tr><td>6dbe8e5121c534339d6e41f8683e85f87e6abf81</td><td>gallagher</td><td>Gallagher</td><td><a href="papers/6dbe8e5121c534339d6e41f8683e85f87e6abf81.html">Clothing Cosegmentation for Shopping Images With Cluttered Background</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7423747', 'linkType': 'ieee'}">[pdf]</a></td><td>IEEE Transactions on Multimedia</td><td></td><td></td><td></td><td></td><td>33%</td><td>6</td><td>2</td><td>4</td><td>0</td><td>3</td><td>2</td></tr><tr><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td><td>leeds_sports_pose</td><td>Leeds Sports Pose</td><td><a href="papers/4b1d23d17476fcf78f4cbadf69fb130b1aa627c0.html">Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</a></td><td><a href="http://pdfs.semanticscholar.org/4b1d/23d17476fcf78f4cbadf69fb130b1aa627c0.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>278</td><td>119</td><td>159</td><td>12</td><td>198</td><td>2</td></tr><tr><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td><td>stickmen_buffy</td><td>Buffy Stickmen</td><td><a href="papers/4b1d23d17476fcf78f4cbadf69fb130b1aa627c0.html">Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</a></td><td><a href="http://pdfs.semanticscholar.org/4b1d/23d17476fcf78f4cbadf69fb130b1aa627c0.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>278</td><td>119</td><td>159</td><td>12</td><td>198</td><td>2</td></tr><tr><td>4b1d23d17476fcf78f4cbadf69fb130b1aa627c0</td><td>stickmen_buffy</td><td>Buffy Stickmen</td><td><a href="papers/4b1d23d17476fcf78f4cbadf69fb130b1aa627c0.html">Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation</a></td><td><a href="http://pdfs.semanticscholar.org/4b1d/23d17476fcf78f4cbadf69fb130b1aa627c0.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>278</td><td>119</td><td>159</td><td>12</td><td>198</td><td>2</td></tr><tr><td>45c31cde87258414f33412b3b12fc5bec7cb3ba9</td><td>jaffe</td><td>JAFFE</td><td><a href="papers/45c31cde87258414f33412b3b12fc5bec7cb3ba9.html">Coding Facial Expressions with Gabor Wavelets</a></td><td><a href="http://pdfs.semanticscholar.org/45c3/1cde87258414f33412b3b12fc5bec7cb3ba9.pdf">[pdf]</a></td><td></td><td>edu</td><td>Kyushu University</td><td>33.59914655</td><td>130.22359848</td><td>36%</td><td>848</td><td>308</td><td>540</td><td>56</td><td>413</td><td>255</td></tr><tr><td>b1f4423c227fa37b9680787be38857069247a307</td><td>afew_va</td><td>AFEW-VA</td><td><a href="papers/b1f4423c227fa37b9680787be38857069247a307.html">Collecting Large, Richly Annotated Facial-Expression Databases from Movies</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6200254', 'linkType': 'ieee'}">[pdf]</a></td><td>IEEE MultiMedia</td><td>edu</td><td>Australian National University</td><td>-35.27769990</td><td>149.11852700</td><td>33%</td><td>175</td><td>58</td><td>66</td><td>7</td><td>54</td><td>49</td></tr><tr><td>b1f4423c227fa37b9680787be38857069247a307</td><td>afew_va</td><td>AFEW-VA</td><td><a href="papers/b1f4423c227fa37b9680787be38857069247a307.html">Collecting Large, Richly Annotated Facial-Expression Databases from Movies</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6200254', 'linkType': 'ieee'}">[pdf]</a></td><td>IEEE MultiMedia</td><td>edu</td><td>Australian National University</td><td>-35.27769990</td><td>149.11852700</td><td>33%</td><td>175</td><td>58</td><td>66</td><td>7</td><td>54</td><td>49</td></tr><tr><td>7f4040b482d16354d5938c1d1b926b544652bf5b</td><td>nova_emotions</td><td>Novaemötions Dataset</td><td><a href="papers/7f4040b482d16354d5938c1d1b926b544652bf5b.html">Competitive affective gaming: winning with a smile</a></td><td><a href="http://doi.acm.org/10.1145/2502081.2502115">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>8</td><td>0</td><td>3</td><td>0</td><td>2</td><td>0</td></tr><tr><td>7f4040b482d16354d5938c1d1b926b544652bf5b</td><td>nova_emotions</td><td>Novaemötions Dataset</td><td><a href="papers/7f4040b482d16354d5938c1d1b926b544652bf5b.html">Competitive affective gaming: winning with a smile</a></td><td><a href="http://doi.acm.org/10.1145/2502081.2502115">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>8</td><td>0</td><td>3</td><td>0</td><td>2</td><td>0</td></tr><tr><td>079a0a3bf5200994e1f972b1b9197bf2f90e87d4</td><td>mit_cbcl</td><td>MIT CBCL</td><td><a href="papers/079a0a3bf5200994e1f972b1b9197bf2f90e87d4.html">Component-Based Face Recognition with 3D Morphable Models</a></td><td><a href="http://www.bheisele.com/avbpa2003.pdf">[pdf]</a></td><td>2004 Conference on Computer Vision and Pattern Recognition Workshop</td><td></td><td></td><td></td><td></td><td>0%</td><td>12</td><td>0</td><td>12</td><td>0</td><td>8</td><td>0</td></tr><tr><td>23fc83c8cfff14a16df7ca497661264fc54ed746</td><td>cohn_kanade</td><td>CK</td><td><a href="papers/23fc83c8cfff14a16df7ca497661264fc54ed746.html">Comprehensive Database for Facial Expression Analysis</a></td><td><a href="http://pdfs.semanticscholar.org/23fc/83c8cfff14a16df7ca497661264fc54ed746.pdf">[pdf]</a></td><td></td><td>edu</td><td>Carnegie Mellon University</td><td>37.41021930</td><td>-122.05965487</td><td>38%</td><td>999</td><td>380</td><td>619</td><td>75</td><td>555</td><td>252</td></tr><tr><td>09d78009687bec46e70efcf39d4612822e61cb8c</td><td>raid</td><td>RAiD</td><td><a href="papers/09d78009687bec46e70efcf39d4612822e61cb8c.html">Consistent Re-identification in a Camera Network</a></td><td><a href="http://pdfs.semanticscholar.org/c27f/099e6e7e3f7f9979cbe9e0a5175fc5848ea0.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>27%</td><td>45</td><td>12</td><td>33</td><td>7</td><td>34</td><td>1</td></tr><tr><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td><td>casablanca</td><td>Casablanca</td><td><a href="papers/0ceda9dae8b9f322df65ca2ef02caca9758aec6f.html">Context-Aware CNNs for Person Head Detection</a></td><td><a href="http://arxiv.org/pdf/1511.07917v1.pdf">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td>33%</td><td>27</td><td>9</td><td>18</td><td>1</td><td>22</td><td>0</td></tr><tr><td>0ceda9dae8b9f322df65ca2ef02caca9758aec6f</td><td>hollywood_headset</td><td>HollywoodHeads</td><td><a href="papers/0ceda9dae8b9f322df65ca2ef02caca9758aec6f.html">Context-Aware CNNs for Person Head Detection</a></td><td><a href="http://arxiv.org/pdf/1511.07917v1.pdf">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td>33%</td><td>27</td><td>9</td><td>18</td><td>1</td><td>22</td><td>0</td></tr><tr><td>8355d095d3534ef511a9af68a3b2893339e3f96b</td><td>imdb_wiki</td><td>IMDB</td><td><a href="papers/8355d095d3534ef511a9af68a3b2893339e3f96b.html">DEX: Deep EXpectation of Apparent Age from a Single Image</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406390', 'linkType': 'ieee'}">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision Workshop (ICCVW)</td><td></td><td></td><td></td><td></td><td>44%</td><td>102</td><td>45</td><td>54</td><td>6</td><td>62</td><td>28</td></tr><tr><td>8355d095d3534ef511a9af68a3b2893339e3f96b</td><td>imdb_wiki</td><td>IMDB</td><td><a href="papers/8355d095d3534ef511a9af68a3b2893339e3f96b.html">DEX: Deep EXpectation of Apparent Age from a Single Image</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7406390', 'linkType': 'ieee'}">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision Workshop (ICCVW)</td><td></td><td></td><td></td><td></td><td>44%</td><td>102</td><td>45</td><td>54</td><td>6</td><td>62</td><td>28</td></tr><tr><td>162ea969d1929ed180cc6de9f0bf116993ff6e06</td><td>vgg_faces</td><td>VGG Face</td><td><a href="papers/162ea969d1929ed180cc6de9f0bf116993ff6e06.html">Deep Face Recognition</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/f372/ab9b3270d4e4f6a0258c83c2736c3a5c0454.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>39%</td><td>999</td><td>393</td><td>606</td><td>71</td><td>621</td><td>156</td></tr><tr><td>6424b69f3ff4d35249c0bb7ef912fbc2c86f4ff4</td><td>celeba</td><td>CelebA</td><td><a href="papers/6424b69f3ff4d35249c0bb7ef912fbc2c86f4ff4.html">Deep Learning Face Attributes in the Wild</a></td><td><a href="http://arxiv.org/pdf/1411.7766v2.pdf">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>42%</td><td>808</td><td>340</td><td>468</td><td>69</td><td>666</td><td>50</td></tr><tr><td>6bd36e9fd0ef20a3074e1430a6cc601e6d407fc3</td><td>cuhk03</td><td>CUHK03</td><td><a href="papers/6bd36e9fd0ef20a3074e1430a6cc601e6d407fc3.html">DeepReID: Deep Filter Pairing Neural Network for Person Re-identification</a></td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Li_DeepReID_Deep_Filter_2014_CVPR_paper.pdf">[pdf]</a></td><td>2014 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>35%</td><td>512</td><td>180</td><td>332</td><td>29</td><td>323</td><td>4</td></tr><tr><td>13f06b08f371ba8b5d31c3e288b4deb61335b462</td><td>eth_andreas_ess</td><td>ETHZ Pedestrian</td><td><a href="papers/13f06b08f371ba8b5d31c3e288b4deb61335b462.html">Depth and Appearance for Mobile Scene Analysis</a></td><td><a href="http://www.mmp.rwth-aachen.de/publications/pdf/ess-depthandappearance-iccv07.pdf/at_download/file">[pdf]</a></td><td>2007 IEEE 11th International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td>25%</td><td>319</td><td>79</td><td>240</td><td>27</td><td>192</td><td>0</td></tr><tr><td>2e384f057211426ac5922f1b33d2aa8df5d51f57</td><td>a_pascal_yahoo</td><td>aPascal</td><td><a href="papers/2e384f057211426ac5922f1b33d2aa8df5d51f57.html">Describing objects by their attributes</a></td><td><a href="http://www-2.cs.cmu.edu/~dhoiem/publications/cvpr2009_attributes.pdf">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>39%</td><td>999</td><td>393</td><td>606</td><td>71</td><td>727</td><td>73</td></tr><tr><td>7808937b46acad36e43c30ae4e9f3fd57462853d</td><td>berkeley_pose</td><td>BPAD</td><td><a href="papers/7808937b46acad36e43c30ae4e9f3fd57462853d.html">Describing people: A poselet-based approach to attribute classification</a></td><td><a href="http://ttic.uchicago.edu/~smaji/papers/attributes-iccv11.pdf">[pdf]</a></td><td>2011 International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td>43%</td><td>221</td><td>96</td><td>125</td><td>14</td><td>160</td><td>23</td></tr><tr><td>56ae6d94fc6097ec4ca861f0daa87941d1c10b70</td><td>cmdp</td><td>CMDP</td><td><a href="papers/56ae6d94fc6097ec4ca861f0daa87941d1c10b70.html">Distance Estimation of an Unknown Person from a Portrait</a></td><td><a href="http://pdfs.semanticscholar.org/56ae/6d94fc6097ec4ca861f0daa87941d1c10b70.pdf">[pdf]</a></td><td></td><td>edu</td><td>California Institute of Technology</td><td>34.13710185</td><td>-118.12527487</td><td>22%</td><td>9</td><td>2</td><td>7</td><td>0</td><td>6</td><td>1</td></tr><tr><td>84fe5b4ac805af63206012d29523a1e033bc827e</td><td>awe_ears</td><td>AWE Ears</td><td><a href="papers/84fe5b4ac805af63206012d29523a1e033bc827e.html">Ear recognition: More than a survey</a></td><td><a href="http://pdfs.semanticscholar.org/84fe/5b4ac805af63206012d29523a1e033bc827e.pdf">[pdf]</a></td><td>Neurocomputing</td><td></td><td></td><td></td><td></td><td>29%</td><td>24</td><td>7</td><td>17</td><td>0</td><td>11</td><td>0</td></tr><tr><td>133f01aec1534604d184d56de866a4bd531dac87</td><td>lfw_a</td><td>LFW-a</td><td><a href="papers/133f01aec1534604d184d56de866a4bd531dac87.html">Effective Unconstrained Face Recognition by Combining Multiple Descriptors and Learned Background Statistics</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/TPAMI.2010.230">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>42%</td><td>177</td><td>75</td><td>102</td><td>15</td><td>102</td><td>54</td></tr><tr><td>447d8893a4bdc29fa1214e53499ffe67b28a6db5</td><td>umd_faces</td><td>UMD</td><td><a href="papers/447d8893a4bdc29fa1214e53499ffe67b28a6db5.html">Electronic Transport in Quantum Confined Systems</a></td><td><a href="http://pdfs.semanticscholar.org/447d/8893a4bdc29fa1214e53499ffe67b28a6db5.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>1</td><td>0</td><td>1</td><td>0</td><td>1</td><td>0</td></tr><tr><td>447d8893a4bdc29fa1214e53499ffe67b28a6db5</td><td>umd_faces</td><td>UMD</td><td><a href="papers/447d8893a4bdc29fa1214e53499ffe67b28a6db5.html">Electronic Transport in Quantum Confined Systems</a></td><td><a href="http://pdfs.semanticscholar.org/447d/8893a4bdc29fa1214e53499ffe67b28a6db5.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>1</td><td>0</td><td>1</td><td>0</td><td>1</td><td>0</td></tr><tr><td>c900e0ad4c95948baaf0acd8449fde26f9b4952a</td><td>emotio_net</td><td>EmotioNet Database</td><td><a href="papers/c900e0ad4c95948baaf0acd8449fde26f9b4952a.html">EmotioNet: An Accurate, Real-Time Algorithm for the Automatic Annotation of a Million Facial Expressions in the Wild</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7780969', 'linkType': 'ieee'}">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>44%</td><td>72</td><td>32</td><td>40</td><td>7</td><td>54</td><td>8</td></tr><tr><td>2161f6b7ee3c0acc81603b01dc0df689683577b9</td><td>large_scale_person_search</td><td>Large Scale Person Search</td><td><a href="papers/2161f6b7ee3c0acc81603b01dc0df689683577b9.html">End-to-End Deep Learning for Person Search</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/2161/f6b7ee3c0acc81603b01dc0df689683577b9.pdf', 'linkType': 's2'}">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>34%</td><td>41</td><td>14</td><td>27</td><td>2</td><td>27</td><td>0</td></tr><tr><td>6273b3491e94ea4dd1ce42b791d77bdc96ee73a8</td><td>viper</td><td>VIPeR</td><td><a href="papers/6273b3491e94ea4dd1ce42b791d77bdc96ee73a8.html">Evaluating Appearance Models for Recognition, Reacquisition, and Tracking</a></td><td><a href="http://pdfs.semanticscholar.org/6273/b3491e94ea4dd1ce42b791d77bdc96ee73a8.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>27%</td><td>584</td><td>159</td><td>425</td><td>38</td><td>336</td><td>9</td></tr><tr><td>9e5378e7b336c89735d3bb15cf67eff96f86d39a</td><td>precarious</td><td>Precarious</td><td><a href="papers/9e5378e7b336c89735d3bb15cf67eff96f86d39a.html">Expecting the Unexpected: Training Detectors for Unusual Pedestrians with Adversarial Imposters</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1703.06283.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>8%</td><td>12</td><td>1</td><td>11</td><td>1</td><td>10</td><td>0</td></tr><tr><td>35b0331dfcd2897abd5749b49ff5e2b8ba0f7a62</td><td>coco_qa</td><td>COCO QA</td><td><a href="papers/35b0331dfcd2897abd5749b49ff5e2b8ba0f7a62.html">Exploring Models and Data for Image Question Answering</a></td><td><a href="http://pdfs.semanticscholar.org/aa79/9c29c0d44ece1864467af520fe70540c069b.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>191</td><td>83</td><td>108</td><td>12</td><td>163</td><td>1</td></tr><tr><td>42505464808dfb446f521fc6ff2cfeffd4d68ff1</td><td>gavab_db</td><td>Gavab</td><td><a href="papers/42505464808dfb446f521fc6ff2cfeffd4d68ff1.html">Expression invariant 3D face recognition with a Morphable Model</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4813376', 'linkType': 'ieee'}">[pdf]</a></td><td>2008 8th IEEE International Conference on Automatic Face & Gesture Recognition</td><td></td><td></td><td></td><td></td><td>29%</td><td>94</td><td>27</td><td>67</td><td>10</td><td>57</td><td>5</td></tr><tr><td>a5acda0e8c0937bfed013e6382da127103e41395</td><td>disfa</td><td>DISFA</td><td><a href="papers/a5acda0e8c0937bfed013e6382da127103e41395.html">Extended DISFA Dataset: Investigating Posed and Spontaneous Facial Expressions</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7789672', 'linkType': 'ieee'}">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td></td><td></td><td></td><td></td><td>38%</td><td>8</td><td>3</td><td>5</td><td>1</td><td>5</td><td>0</td></tr><tr><td>75da1df4ed319926c544eefe17ec8d720feef8c0</td><td>fddb</td><td>FDDB</td><td><a href="papers/75da1df4ed319926c544eefe17ec8d720feef8c0.html">FDDB: A Benchmark for Face Detection in Unconstrained Settings</a></td><td><a href="http://pdfs.semanticscholar.org/75da/1df4ed319926c544eefe17ec8d720feef8c0.pdf">[pdf]</a></td><td></td><td>edu</td><td>University of Massachusetts</td><td>42.38897850</td><td>-72.52869870</td><td>0%</td><td>1</td><td>0</td><td>1</td><td>0</td><td>0</td><td>0</td></tr><tr><td>3607afdb204de9a5a9300ae98aa4635d9effcda2</td><td>sheffield</td><td>Sheffield Face</td><td><a href="papers/3607afdb204de9a5a9300ae98aa4635d9effcda2.html">Face Description with Local Binary Patterns: Application to Face Recognition</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/TPAMI.2006.244">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>24%</td><td>999</td><td>238</td><td>761</td><td>65</td><td>483</td><td>87</td></tr><tr><td>28312c3a47c1be3a67365700744d3d6665b86f22</td><td>hrt_transgender</td><td>HRT Transgender</td><td><a href="papers/28312c3a47c1be3a67365700744d3d6665b86f22.html">Face Recognition: A Literature Survey1</a></td><td><a href="http://pdfs.semanticscholar.org/2831/2c3a47c1be3a67365700744d3d6665b86f22.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>11%</td><td>999</td><td>105</td><td>265</td><td>32</td><td>217</td><td>39</td></tr><tr><td>28312c3a47c1be3a67365700744d3d6665b86f22</td><td>hrt_transgender</td><td>HRT Transgender</td><td><a href="papers/28312c3a47c1be3a67365700744d3d6665b86f22.html">Face Recognition: A Literature Survey1</a></td><td><a href="http://pdfs.semanticscholar.org/2831/2c3a47c1be3a67365700744d3d6665b86f22.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>11%</td><td>999</td><td>105</td><td>265</td><td>32</td><td>217</td><td>39</td></tr><tr><td>28312c3a47c1be3a67365700744d3d6665b86f22</td><td>hrt_transgender</td><td>HRT Transgender</td><td><a href="papers/28312c3a47c1be3a67365700744d3d6665b86f22.html">Face Recognition: A Literature Survey1</a></td><td><a href="http://pdfs.semanticscholar.org/2831/2c3a47c1be3a67365700744d3d6665b86f22.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>11%</td><td>999</td><td>105</td><td>265</td><td>32</td><td>217</td><td>39</td></tr><tr><td>0e986f51fe45b00633de9fd0c94d082d2be51406</td><td>afw</td><td>AFW</td><td><a href="papers/0e986f51fe45b00633de9fd0c94d082d2be51406.html">Face detection, pose estimation, and landmark localization in the wild</a></td><td><a href="http://vision.ics.uci.edu/papers/ZhuR_CVPR_2012/ZhuR_CVPR_2012.pdf">[pdf]</a></td><td>2012 IEEE Conference on Computer Vision and Pattern Recognition</td><td>edu</td><td>University of California, Irvine</td><td>33.64319010</td><td>-117.84016494</td><td>52%</td><td>999</td><td>521</td><td>478</td><td>59</td><td>607</td><td>273</td></tr><tr><td>560e0e58d0059259ddf86fcec1fa7975dee6a868</td><td>youtube_faces</td><td>YouTubeFaces</td><td><a href="papers/560e0e58d0059259ddf86fcec1fa7975dee6a868.html">Face recognition in unconstrained videos with matched background similarity</a></td><td><a href="http://www.cs.tau.ac.il/~wolf/papers/lvfw.pdf">[pdf]</a></td><td>CVPR 2011</td><td>edu</td><td>Open University of Israel</td><td>32.77824165</td><td>34.99565673</td><td>50%</td><td>485</td><td>244</td><td>240</td><td>32</td><td>290</td><td>140</td></tr><tr><td>670637d0303a863c1548d5b19f705860a23e285c</td><td>face_tracer</td><td>FaceTracer</td><td><a href="papers/670637d0303a863c1548d5b19f705860a23e285c.html">Face swapping: automatically replacing faces in photographs</a></td><td><a href="https://classes.cs.uoregon.edu/16F/cis607photo/faces.pdf">[pdf]</a></td><td>ACM Trans. Graph.</td><td>edu</td><td>Columbia University</td><td>40.84198360</td><td>-73.94368971</td><td>100%</td><td>2</td><td>2</td><td>0</td><td>0</td><td>1</td><td>1</td></tr><tr><td>670637d0303a863c1548d5b19f705860a23e285c</td><td>face_tracer</td><td>FaceTracer</td><td><a href="papers/670637d0303a863c1548d5b19f705860a23e285c.html">Face swapping: automatically replacing faces in photographs</a></td><td><a href="https://classes.cs.uoregon.edu/16F/cis607photo/faces.pdf">[pdf]</a></td><td>ACM Trans. Graph.</td><td>edu</td><td>Columbia University</td><td>40.84198360</td><td>-73.94368971</td><td>100%</td><td>2</td><td>2</td><td>0</td><td>0</td><td>1</td><td>1</td></tr><tr><td>7ebb153704706e457ab57b432793d2b6e5d12592</td><td>vgg_celebs_in_places</td><td>CIP</td><td><a href="papers/7ebb153704706e457ab57b432793d2b6e5d12592.html">Faces in Places: compound query retrieval</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/7ebb/153704706e457ab57b432793d2b6e5d12592.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td>edu</td><td>University of Oxford</td><td>51.75345380</td><td>-1.25400997</td><td>80%</td><td>5</td><td>4</td><td>1</td><td>0</td><td>4</td><td>0</td></tr><tr><td>014b8df0180f33b9fea98f34ae611c6447d761d2</td><td>buhmap_db</td><td>BUHMAP-DB </td><td><a href="papers/014b8df0180f33b9fea98f34ae611c6447d761d2.html">Facial feature tracking and expression recognition for sign language</a></td><td><a href="http://www.cmpe.boun.edu.tr/pilab/pilabfiles/databases/buhmap/files/ari2008facialfeaturetracking.pdf">[pdf]</a></td><td>2008 23rd International Symposium on Computer and Information Sciences</td><td></td><td></td><td></td><td></td><td>16%</td><td>25</td><td>4</td><td>21</td><td>1</td><td>10</td><td>2</td></tr><tr><td>4fefd1bc8dc4e0ab37ee3324ddfa43ad9d6a04a7</td><td>deep_fashion</td><td>DeepFashion</td><td><a href="papers/4fefd1bc8dc4e0ab37ee3324ddfa43ad9d6a04a7.html">Fashion Landmark Detection in the Wild</a></td><td><a href="http://pdfs.semanticscholar.org/d8ca/e259c1c5bba0c096f480dc7322bbaebfac1a.pdf">[pdf]</a></td><td></td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>39%</td><td>23</td><td>9</td><td>8</td><td>0</td><td>15</td><td>0</td></tr><tr><td>4fefd1bc8dc4e0ab37ee3324ddfa43ad9d6a04a7</td><td>deep_fashion</td><td>DeepFashion</td><td><a href="papers/4fefd1bc8dc4e0ab37ee3324ddfa43ad9d6a04a7.html">Fashion Landmark Detection in the Wild</a></td><td><a href="http://pdfs.semanticscholar.org/d8ca/e259c1c5bba0c096f480dc7322bbaebfac1a.pdf">[pdf]</a></td><td></td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>39%</td><td>23</td><td>9</td><td>8</td><td>0</td><td>15</td><td>0</td></tr><tr><td>45e616093a92e5f1e61a7c6037d5f637aa8964af</td><td>malf</td><td>MALF</td><td><a href="papers/45e616093a92e5f1e61a7c6037d5f637aa8964af.html">Fine-grained evaluation on face detection in the wild</a></td><td><a href="http://www.cs.toronto.edu/~byang/papers/malf_fg15.pdf">[pdf]</a></td><td>2015 11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)</td><td>edu</td><td>Chinese Academy of Sciences</td><td>40.00447950</td><td>116.37023800</td><td>71%</td><td>17</td><td>12</td><td>5</td><td>0</td><td>13</td><td>4</td></tr><tr><td>1aad2da473888cb7ebc1bfaa15bfa0f1502ce005</td><td>jpl_pose</td><td>JPL-Interaction dataset</td><td><a href="papers/1aad2da473888cb7ebc1bfaa15bfa0f1502ce005.html">First-Person Activity Recognition: What Are They Doing to Me?</a></td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2013/papers/Ryoo_First-Person_Activity_Recognition_2013_CVPR_paper.pdf">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>51%</td><td>148</td><td>76</td><td>72</td><td>8</td><td>109</td><td>3</td></tr><tr><td>774cbb45968607a027ae4729077734db000a1ec5</td><td>urban_tribes</td><td>Urban Tribes</td><td><a href="papers/774cbb45968607a027ae4729077734db000a1ec5.html">From Bikers to Surfers: Visual Recognition of Urban Tribes</a></td><td><a href="http://pdfs.semanticscholar.org/774c/bb45968607a027ae4729077734db000a1ec5.pdf">[pdf]</a></td><td></td><td>edu</td><td>Columbia University</td><td>40.84198360</td><td>-73.94368971</td><td>47%</td><td>17</td><td>8</td><td>9</td><td>1</td><td>12</td><td>1</td></tr><tr><td>22f656d0f8426c84a33a267977f511f127bfd7f3</td><td>expw</td><td>ExpW</td><td><a href="papers/22f656d0f8426c84a33a267977f511f127bfd7f3.html">From Facial Expression Recognition to Interpersonal Relation Prediction</a></td><td><a href="http://arxiv.org/abs/1609.06426">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td>44%</td><td>9</td><td>4</td><td>5</td><td>0</td><td>5</td><td>1</td></tr><tr><td>22f656d0f8426c84a33a267977f511f127bfd7f3</td><td>expw</td><td>ExpW</td><td><a href="papers/22f656d0f8426c84a33a267977f511f127bfd7f3.html">From Facial Expression Recognition to Interpersonal Relation Prediction</a></td><td><a href="http://arxiv.org/abs/1609.06426">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td>44%</td><td>9</td><td>4</td><td>5</td><td>0</td><td>5</td><td>1</td></tr><tr><td>18c72175ddbb7d5956d180b65a96005c100f6014</td><td>yale_faces</td><td>YaleFaces</td><td><a href="papers/18c72175ddbb7d5956d180b65a96005c100f6014.html">From Few to Many: Illumination Cone Models for Face Recognition under Variable Lighting and Pose</a></td><td><a href="http://pdfs.semanticscholar.org/97bb/c2b439a79d4dc0dc7199d71ed96ad5e3fd0e.pdf">[pdf]</a></td><td>IEEE Trans. Pattern Anal. Mach. Intell.</td><td></td><td></td><td></td><td></td><td>42%</td><td>999</td><td>422</td><td>577</td><td>80</td><td>538</td><td>295</td></tr><tr><td>18c72175ddbb7d5956d180b65a96005c100f6014</td><td>yale_faces</td><td>YaleFaces</td><td><a href="papers/18c72175ddbb7d5956d180b65a96005c100f6014.html">From Few to Many: Illumination Cone Models for Face Recognition under Variable Lighting and Pose</a></td><td><a href="http://pdfs.semanticscholar.org/97bb/c2b439a79d4dc0dc7199d71ed96ad5e3fd0e.pdf">[pdf]</a></td><td>IEEE Trans. Pattern Anal. Mach. Intell.</td><td></td><td></td><td></td><td></td><td>42%</td><td>999</td><td>422</td><td>577</td><td>80</td><td>538</td><td>295</td></tr><tr><td>93884e46c49f7ae1c7c34046fbc28882f2bd6341</td><td>kdef</td><td>KDEF</td><td><a href="papers/93884e46c49f7ae1c7c34046fbc28882f2bd6341.html">Gaze fixation and the neural circuitry of face processing in autism</a></td><td><a href="{'url': 'http://doi.org/10.1038/nn1421', 'linkType': 'nature'}">[pdf]</a></td><td>Nature Neuroscience</td><td></td><td></td><td></td><td></td><td>31%</td><td>608</td><td>190</td><td>418</td><td>92</td><td>463</td><td>0</td></tr><tr><td>b6b1b0632eb9d4ab1427278f5e5c46f97753c73d</td><td>fei</td><td>FEI</td><td><a href="papers/b6b1b0632eb9d4ab1427278f5e5c46f97753c73d.html">Generalização cartográfica automatizada para um banco de dados cadastral</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/b6b1/b0632eb9d4ab1427278f5e5c46f97753c73d.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>17b46e2dad927836c689d6787ddb3387c6159ece</td><td>geofaces</td><td>GeoFaces</td><td><a href="papers/17b46e2dad927836c689d6787ddb3387c6159ece.html">GeoFaceExplorer: exploring the geo-dependence of facial attributes</a></td><td><a href="http://doi.acm.org/10.1145/2676440.2676443">[pdf]</a></td><td></td><td>edu</td><td>University of Kentucky</td><td>38.03337420</td><td>-84.50177580</td><td>50%</td><td>2</td><td>1</td><td>1</td><td>0</td><td>1</td><td>0</td></tr><tr><td>3cd40bfa1ff193a96bde0207e5140a399476466c</td><td>tvhi</td><td>TVHI</td><td><a href="papers/3cd40bfa1ff193a96bde0207e5140a399476466c.html">High Five: Recognising human interactions in TV shows</a></td><td><a href="http://pdfs.semanticscholar.org/3cd4/0bfa1ff193a96bde0207e5140a399476466c.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>34%</td><td>91</td><td>31</td><td>60</td><td>11</td><td>64</td><td>1</td></tr><tr><td>24830e3979d4ed01b9fd0feebf4a8fd22e0c35fd</td><td>hi4d_adsip</td><td>Hi4D-ADSIP</td><td><a href="papers/24830e3979d4ed01b9fd0feebf4a8fd22e0c35fd.html">High-resolution comprehensive 3-D dynamic database for facial articulation analysis</a></td><td><a href="http://www.researchgate.net/profile/Wei_Quan3/publication/221430048_High-resolution_comprehensive_3-D_dynamic_database_for_facial_articulation_analysis/links/0deec534309495805d000000.pdf">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td>0%</td><td>5</td><td>0</td><td>5</td><td>0</td><td>1</td><td>0</td></tr><tr><td>04c2cda00e5536f4b1508cbd80041e9552880e67</td><td>hipsterwars</td><td>Hipsterwars</td><td><a href="papers/04c2cda00e5536f4b1508cbd80041e9552880e67.html">Hipster Wars: Discovering Elements of Fashion Styles</a></td><td><a href="http://pdfs.semanticscholar.org/04c2/cda00e5536f4b1508cbd80041e9552880e67.pdf">[pdf]</a></td><td></td><td>edu</td><td>Tohoku University</td><td>38.25309450</td><td>140.87365930</td><td>53%</td><td>91</td><td>48</td><td>43</td><td>5</td><td>60</td><td>15</td></tr><tr><td>10d6b12fa07c7c8d6c8c3f42c7f1c061c131d4c5</td><td>inria_person</td><td>INRIA Pedestrian</td><td><a href="papers/10d6b12fa07c7c8d6c8c3f42c7f1c061c131d4c5.html">Histograms of oriented gradients for human detection</a></td><td><a href="http://nichol.as/papers/Dalai/Histograms%20of%20oriented%20gradients%20for%20human%20detection.pdf">[pdf]</a></td><td>2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)</td><td></td><td></td><td></td><td></td><td>22%</td><td>999</td><td>217</td><td>782</td><td>67</td><td>520</td><td>22</td></tr><tr><td>041d3eedf5e45ce5c5229f0181c5c576ed1fafd6</td><td>ucf_selfie</td><td>UCF Selfie</td><td><a href="papers/041d3eedf5e45ce5c5229f0181c5c576ed1fafd6.html">How to Take a Good Selfie?</a></td><td><a href="http://doi.acm.org/10.1145/2733373.2806365">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>22%</td><td>9</td><td>2</td><td>7</td><td>0</td><td>5</td><td>0</td></tr><tr><td>44d23df380af207f5ac5b41459c722c87283e1eb</td><td>wider_attribute</td><td>WIDER Attribute</td><td><a href="papers/44d23df380af207f5ac5b41459c722c87283e1eb.html">Human Attribute Recognition by Deep Hierarchical Contexts</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/8e28/07f2dd53b03a759e372e07f7191cae65c9fd.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>67%</td><td>18</td><td>12</td><td>6</td><td>0</td><td>16</td><td>0</td></tr><tr><td>44484d2866f222bbb9b6b0870890f9eea1ffb2d0</td><td>cuhk01</td><td>CUHK01</td><td><a href="papers/44484d2866f222bbb9b6b0870890f9eea1ffb2d0.html">Human Reidentification with Transferred Metric Learning</a></td><td><a href="http://pdfs.semanticscholar.org/4448/4d2866f222bbb9b6b0870890f9eea1ffb2d0.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>26%</td><td>258</td><td>67</td><td>191</td><td>12</td><td>141</td><td>1</td></tr><tr><td>57178b36c21fd7f4529ac6748614bb3374714e91</td><td>ijb_c</td><td>IJB-C</td><td><a href="papers/57178b36c21fd7f4529ac6748614bb3374714e91.html">IARPA Janus Benchmark - C: Face Dataset and Protocol</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8411217', 'linkType': 'ieee'}">[pdf]</a></td><td>2018 International Conference on Biometrics (ICB)</td><td></td><td></td><td></td><td></td><td>33%</td><td>9</td><td>3</td><td>6</td><td>2</td><td>9</td><td>0</td></tr><tr><td>0cb2dd5f178e3a297a0c33068961018659d0f443</td><td>ijb_b</td><td>IJB-B</td><td><a href="papers/0cb2dd5f178e3a297a0c33068961018659d0f443.html">IARPA Janus Benchmark-B Face Dataset</a></td><td><a href="http://www.vislab.ucr.edu/Biometrics2017/program_slides/Noblis_CVPRW_IJBB.pdf">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td></td><td></td><td></td><td></td><td>24%</td><td>25</td><td>6</td><td>19</td><td>6</td><td>21</td><td>3</td></tr><tr><td>0297448f3ed948e136bb06ceff10eccb34e5bb77</td><td>ilids_mcts</td><td></td><td><a href="papers/0297448f3ed948e136bb06ceff10eccb34e5bb77.html">Imagery Library for Intelligent Detection Systems (i-LIDS); A Standard for Testing Video Based Detection Systems</a></td><td><span class="gray">[pdf]</a></td><td>Proceedings 40th Annual 2006 International Carnahan Conference on Security Technology</td><td></td><td></td><td></td><td></td><td>22%</td><td>32</td><td>7</td><td>25</td><td>2</td><td>17</td><td>0</td></tr><tr><td>55c40cbcf49a0225e72d911d762c27bb1c2d14aa</td><td>ifad</td><td>IFAD</td><td><a href="papers/55c40cbcf49a0225e72d911d762c27bb1c2d14aa.html">Indian Face Age Database : A Database for Face Recognition with Age Variation</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/55c4/0cbcf49a0225e72d911d762c27bb1c2d14aa.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>50%</td><td>2</td><td>1</td><td>1</td><td>0</td><td>2</td><td>0</td></tr><tr><td>ca3e88d87e1344d076c964ea89d91a75c417f5ee</td><td>imfdb</td><td>IMFDB</td><td><a href="papers/ca3e88d87e1344d076c964ea89d91a75c417f5ee.html">Indian Movie Face Database: A benchmark for face recognition under wide variations</a></td><td><span class="gray">[pdf]</a></td><td>2013 Fourth National Conference on Computer Vision, Pattern Recognition, Image Processing and Graphics (NCVPRIPG)</td><td></td><td></td><td></td><td></td><td>60%</td><td>15</td><td>9</td><td>6</td><td>0</td><td>10</td><td>4</td></tr><tr><td>95f12d27c3b4914e0668a268360948bce92f7db3</td><td>helen</td><td>Helen</td><td><a href="papers/95f12d27c3b4914e0668a268360948bce92f7db3.html">Interactive Facial Feature Localization</a></td><td><a href="http://pdfs.semanticscholar.org/95f1/2d27c3b4914e0668a268360948bce92f7db3.pdf">[pdf]</a></td><td></td><td>edu</td><td>University of Illinois, Urbana-Champaign</td><td>40.11116745</td><td>-88.22587665</td><td>52%</td><td>339</td><td>177</td><td>162</td><td>27</td><td>208</td><td>100</td></tr><tr><td>4d423acc78273b75134e2afd1777ba6d3a398973</td><td>cmu_pie</td><td>CMU PIE</td><td><a href="papers/4d423acc78273b75134e2afd1777ba6d3a398973.html">International Conference on Automatic Face and Gesture Recognition The CMU Pose , Illumination , and Expression ( PIE ) Database</a></td><td><a href="http://pdfs.semanticscholar.org/4d42/3acc78273b75134e2afd1777ba6d3a398973.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>44%</td><td>742</td><td>330</td><td>412</td><td>61</td><td>410</td><td>232</td></tr><tr><td>ad01687649d95cd5b56d7399a9603c4b8e2217d7</td><td>mrp_drone</td><td>MRP Drone</td><td><a href="papers/ad01687649d95cd5b56d7399a9603c4b8e2217d7.html">Investigating Open-World Person Re-identi cation Using a Drone</a></td><td><a href="http://pdfs.semanticscholar.org/ad01/687649d95cd5b56d7399a9603c4b8e2217d7.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>40%</td><td>5</td><td>2</td><td>3</td><td>0</td><td>3</td><td>0</td></tr><tr><td>066d71fcd997033dce4ca58df924397dfe0b5fd1</td><td>ifdb</td><td>IFDB</td><td><a href="papers/066d71fcd997033dce4ca58df924397dfe0b5fd1.html">Iranian Face Database and Evaluation with a New Detection Algorithm</a></td><td><a href="http://pdfs.semanticscholar.org/066d/71fcd997033dce4ca58df924397dfe0b5fd1.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>066d71fcd997033dce4ca58df924397dfe0b5fd1</td><td>ifdb</td><td>IFDB</td><td><a href="papers/066d71fcd997033dce4ca58df924397dfe0b5fd1.html">Iranian Face Database and Evaluation with a New Detection Algorithm</a></td><td><a href="http://pdfs.semanticscholar.org/066d/71fcd997033dce4ca58df924397dfe0b5fd1.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>0b440695c822a8e35184fb2f60dcdaa8a6de84ae</td><td>kinectface</td><td>KinectFaceDB</td><td><a href="papers/0b440695c822a8e35184fb2f60dcdaa8a6de84ae.html">KinectFaceDB: A Kinect Database for Face Recognition</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6866883', 'linkType': 'ieee'}">[pdf]</a></td><td>IEEE Transactions on Systems, Man, and Cybernetics: Systems</td><td></td><td></td><td></td><td></td><td>16%</td><td>75</td><td>12</td><td>63</td><td>6</td><td>25</td><td>8</td></tr><tr><td>2d3482dcff69c7417c7b933f22de606a0e8e42d4</td><td>lfw</td><td>LFW</td><td><a href="papers/2d3482dcff69c7417c7b933f22de606a0e8e42d4.html">Labeled Faces in the Wild : Updates and New Reporting Procedures</a></td><td><a href="http://pdfs.semanticscholar.org/2d34/82dcff69c7417c7b933f22de606a0e8e42d4.pdf">[pdf]</a></td><td></td><td>edu</td><td>University of Massachusetts</td><td>42.38897850</td><td>-72.52869870</td><td>41%</td><td>116</td><td>47</td><td>46</td><td>4</td><td>62</td><td>19</td></tr><tr><td>2d3482dcff69c7417c7b933f22de606a0e8e42d4</td><td>lfw</td><td>LFW</td><td><a href="papers/2d3482dcff69c7417c7b933f22de606a0e8e42d4.html">Labeled Faces in the Wild : Updates and New Reporting Procedures</a></td><td><a href="http://pdfs.semanticscholar.org/2d34/82dcff69c7417c7b933f22de606a0e8e42d4.pdf">[pdf]</a></td><td></td><td>edu</td><td>University of Massachusetts</td><td>42.38897850</td><td>-72.52869870</td><td>41%</td><td>116</td><td>47</td><td>46</td><td>4</td><td>62</td><td>19</td></tr><tr><td>2d3482dcff69c7417c7b933f22de606a0e8e42d4</td><td>lfw</td><td>LFW</td><td><a href="papers/2d3482dcff69c7417c7b933f22de606a0e8e42d4.html">Labeled Faces in the Wild : Updates and New Reporting Procedures</a></td><td><a href="http://pdfs.semanticscholar.org/2d34/82dcff69c7417c7b933f22de606a0e8e42d4.pdf">[pdf]</a></td><td></td><td>edu</td><td>University of Massachusetts</td><td>42.38897850</td><td>-72.52869870</td><td>41%</td><td>116</td><td>47</td><td>46</td><td>4</td><td>62</td><td>19</td></tr><tr><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td><td>3dddb_unconstrained</td><td>3D Dynamic</td><td><a href="papers/370b5757a5379b15e30d619e4d3fb9e8e13f3256.html">Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments</a></td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>999</td><td>472</td><td>526</td><td>71</td><td>619</td><td>260</td></tr><tr><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td><td>ar_facedb</td><td>AR Face</td><td><a href="papers/370b5757a5379b15e30d619e4d3fb9e8e13f3256.html">Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments</a></td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>999</td><td>472</td><td>526</td><td>71</td><td>619</td><td>260</td></tr><tr><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td><td>m2vtsdb_extended</td><td>xm2vtsdb</td><td><a href="papers/370b5757a5379b15e30d619e4d3fb9e8e13f3256.html">Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments</a></td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>999</td><td>472</td><td>526</td><td>71</td><td>619</td><td>260</td></tr><tr><td>370b5757a5379b15e30d619e4d3fb9e8e13f3256</td><td>put_face</td><td>Put Face</td><td><a href="papers/370b5757a5379b15e30d619e4d3fb9e8e13f3256.html">Labeled Faces in the Wild: A Database for Studying Face Recognition in Unconstrained Environments</a></td><td><a href="http://pdfs.semanticscholar.org/c6b3/ca4f939e36a9679a70e14ce8b1bbbc5618f3.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>47%</td><td>999</td><td>472</td><td>526</td><td>71</td><td>619</td><td>260</td></tr><tr><td>0d2dd4fc016cb6a517d8fb43a7cc3ff62964832e</td><td>lag</td><td>LAG</td><td><a href="papers/0d2dd4fc016cb6a517d8fb43a7cc3ff62964832e.html">Large Age-Gap face verification by feature injection in deep networks</a></td><td><a href="http://pdfs.semanticscholar.org/0d2d/d4fc016cb6a517d8fb43a7cc3ff62964832e.pdf">[pdf]</a></td><td>Pattern Recognition Letters</td><td></td><td></td><td></td><td></td><td>29%</td><td>7</td><td>2</td><td>5</td><td>0</td><td>3</td><td>2</td></tr><tr><td>f3b84a03985de3890b400b68e2a92c0a00afd9d0</td><td>scface</td><td>SCface</td><td><a href="papers/f3b84a03985de3890b400b68e2a92c0a00afd9d0.html">Large Variability Surveillance Camera Face Database</a></td><td><span class="gray">[pdf]</a></td><td>2015 Seventh International Conference on Computational Intelligence, Modelling and Simulation (CIMSim)</td><td></td><td></td><td></td><td></td><td>0%</td><td>1</td><td>0</td><td>1</td><td>0</td><td>0</td><td>0</td></tr><tr><td>07fcbae86f7a3ad3ea1cf95178459ee9eaf77cb1</td><td>uccs</td><td>UCCS</td><td><a href="papers/07fcbae86f7a3ad3ea1cf95178459ee9eaf77cb1.html">Large scale unconstrained open set face database</a></td><td><a href="http://www.vast.uccs.edu/~tboult/PAPERS/BTAS13-Sapkota-Boult-UCCSFaceDB.pdf">[pdf]</a></td><td>2013 IEEE Sixth International Conference on Biometrics: Theory, Applications and Systems (BTAS)</td><td>edu</td><td>University of Colorado at Colorado Springs</td><td>38.89646790</td><td>-104.80505940</td><td>60%</td><td>5</td><td>3</td><td>2</td><td>0</td><td>3</td><td>0</td></tr><tr><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td><td>mafl</td><td>MAFL</td><td><a href="papers/a0fd85b3400c7b3e11122f44dc5870ae2de9009a.html">Learning Deep Representation for Face Alignment with Auxiliary Attributes</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1408.3967.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>33%</td><td>105</td><td>35</td><td>50</td><td>8</td><td>55</td><td>16</td></tr><tr><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td><td>mafl</td><td>MAFL</td><td><a href="papers/a0fd85b3400c7b3e11122f44dc5870ae2de9009a.html">Learning Deep Representation for Face Alignment with Auxiliary Attributes</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1408.3967.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>33%</td><td>105</td><td>35</td><td>50</td><td>8</td><td>55</td><td>16</td></tr><tr><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td><td>mtfl</td><td>MTFL</td><td><a href="papers/a0fd85b3400c7b3e11122f44dc5870ae2de9009a.html">Learning Deep Representation for Face Alignment with Auxiliary Attributes</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1408.3967.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>33%</td><td>105</td><td>35</td><td>50</td><td>8</td><td>55</td><td>16</td></tr><tr><td>a0fd85b3400c7b3e11122f44dc5870ae2de9009a</td><td>mtfl</td><td>MTFL</td><td><a href="papers/a0fd85b3400c7b3e11122f44dc5870ae2de9009a.html">Learning Deep Representation for Face Alignment with Auxiliary Attributes</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1408.3967.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>33%</td><td>105</td><td>35</td><td>50</td><td>8</td><td>55</td><td>16</td></tr><tr><td>69a68f9cf874c69e2232f47808016c2736b90c35</td><td>celeba_plus</td><td>CelebFaces+</td><td><a href="papers/69a68f9cf874c69e2232f47808016c2736b90c35.html">Learning Deep Representation for Imbalanced Classification</a></td><td><a href="http://personal.ie.cuhk.edu.hk/~ccloy/files/cvpr_2016_imbalanced.pdf">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>Shenzhen Institutes of Advanced Technology</td><td>22.59805605</td><td>113.98533784</td><td>33%</td><td>51</td><td>17</td><td>34</td><td>1</td><td>39</td><td>2</td></tr><tr><td>853bd61bc48a431b9b1c7cab10c603830c488e39</td><td>casia_webface</td><td>CASIA Webface</td><td><a href="papers/853bd61bc48a431b9b1c7cab10c603830c488e39.html">Learning Face Representation from Scratch</a></td><td><a href="http://pdfs.semanticscholar.org/b8a2/0ed7e74325da76d7183d1ab77b082a92b447.pdf">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>53%</td><td>436</td><td>233</td><td>203</td><td>32</td><td>284</td><td>115</td></tr><tr><td>2a171f8d14b6b8735001a11c217af9587d095848</td><td>social_relation</td><td>Social Relation</td><td><a href="papers/2a171f8d14b6b8735001a11c217af9587d095848.html">Learning Social Relation Traits from Face Images</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.414">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>30%</td><td>20</td><td>6</td><td>14</td><td>5</td><td>15</td><td>0</td></tr><tr><td>2a171f8d14b6b8735001a11c217af9587d095848</td><td>social_relation</td><td>Social Relation</td><td><a href="papers/2a171f8d14b6b8735001a11c217af9587d095848.html">Learning Social Relation Traits from Face Images</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.414">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>30%</td><td>20</td><td>6</td><td>14</td><td>5</td><td>15</td><td>0</td></tr><tr><td>4e4746094bf60ee83e40d8597a6191e463b57f76</td><td>leeds_sports_pose_extended</td><td>Leeds Sports Pose Extended</td><td><a href="papers/4e4746094bf60ee83e40d8597a6191e463b57f76.html">Learning effective human pose estimation from inaccurate annotation</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995318', 'linkType': 'ieee'}">[pdf]</a></td><td>CVPR 2011</td><td></td><td></td><td></td><td></td><td>40%</td><td>173</td><td>70</td><td>103</td><td>9</td><td>116</td><td>2</td></tr><tr><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td><a href="papers/6dd0597f8513dc100cd0bc1b493768cde45098a9.html">Learning to parse images of articulated bodies</a></td><td><a href="http://pdfs.semanticscholar.org/9cd7/4c43dbf9be0b9caae4606ee53e6d45850471.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>31%</td><td>373</td><td>117</td><td>256</td><td>30</td><td>238</td><td>2</td></tr><tr><td>6dd0597f8513dc100cd0bc1b493768cde45098a9</td><td>stickmen_pascal</td><td>Stickmen PASCAL</td><td><a href="papers/6dd0597f8513dc100cd0bc1b493768cde45098a9.html">Learning to parse images of articulated bodies</a></td><td><a href="http://pdfs.semanticscholar.org/9cd7/4c43dbf9be0b9caae4606ee53e6d45850471.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>31%</td><td>373</td><td>117</td><td>256</td><td>30</td><td>238</td><td>2</td></tr><tr><td>c9bda86e23cab9e4f15ea0c4cbb6cc02b9dfb709</td><td>stanford_drone</td><td>Stanford Drone</td><td><a href="papers/c9bda86e23cab9e4f15ea0c4cbb6cc02b9dfb709.html">Learning to predict human behaviour in crowded scenes</a></td><td><a href="http://pdfs.semanticscholar.org/c9bd/a86e23cab9e4f15ea0c4cbb6cc02b9dfb709.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>20%</td><td>5</td><td>1</td><td>4</td><td>1</td><td>5</td><td>0</td></tr><tr><td>28d4e027c7e90b51b7d8908fce68128d1964668a</td><td>megaface</td><td>MegaFace</td><td><a href="papers/28d4e027c7e90b51b7d8908fce68128d1964668a.html">Level Playing Field for Million Scale Face Recognition</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1705.00393.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>41%</td><td>27</td><td>11</td><td>16</td><td>2</td><td>22</td><td>4</td></tr><tr><td>28d4e027c7e90b51b7d8908fce68128d1964668a</td><td>megaface</td><td>MegaFace</td><td><a href="papers/28d4e027c7e90b51b7d8908fce68128d1964668a.html">Level Playing Field for Million Scale Face Recognition</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1705.00393.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>41%</td><td>27</td><td>11</td><td>16</td><td>2</td><td>22</td><td>4</td></tr><tr><td>140438a77a771a8fb656b39a78ff488066eb6b50</td><td>lfw_p</td><td>LFWP</td><td><a href="papers/140438a77a771a8fb656b39a78ff488066eb6b50.html">Localizing Parts of Faces Using a Consensus of Exemplars</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2011.5995602">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td>edu</td><td>Columbia University</td><td>40.84198360</td><td>-73.94368971</td><td>53%</td><td>521</td><td>274</td><td>247</td><td>40</td><td>321</td><td>144</td></tr><tr><td>38b55d95189c5e69cf4ab45098a48fba407609b4</td><td>cuhk02</td><td>CUHK02</td><td><a href="papers/38b55d95189c5e69cf4ab45098a48fba407609b4.html">Locally Aligned Feature Transforms across Views</a></td><td><a href="http://vigir.missouri.edu/~gdesouza/Research/Conference_CDs/IEEE_CVPR2013/data/Papers/4989d594.pdf">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>24%</td><td>242</td><td>57</td><td>185</td><td>17</td><td>139</td><td>1</td></tr><tr><td>c0387e788a52f10bf35d4d50659cfa515d89fbec</td><td>mars</td><td>MARS</td><td><a href="papers/c0387e788a52f10bf35d4d50659cfa515d89fbec.html">MARS: A Video Benchmark for Large-Scale Person Re-Identification</a></td><td><a href="http://pdfs.semanticscholar.org/c038/7e788a52f10bf35d4d50659cfa515d89fbec.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>34%</td><td>146</td><td>49</td><td>97</td><td>6</td><td>96</td><td>0</td></tr><tr><td>9055b155cbabdce3b98e16e5ac9c0edf00f9552f</td><td>morph</td><td>MORPH Commercial</td><td><a href="papers/9055b155cbabdce3b98e16e5ac9c0edf00f9552f.html">MORPH: a longitudinal image database of normal adult age-progression</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/FGR.2006.78">[pdf]</a></td><td>7th International Conference on Automatic Face and Gesture Recognition (FGR06)</td><td></td><td></td><td></td><td></td><td>46%</td><td>424</td><td>195</td><td>229</td><td>27</td><td>231</td><td>155</td></tr><tr><td>9055b155cbabdce3b98e16e5ac9c0edf00f9552f</td><td>morph_nc</td><td>MORPH Non-Commercial</td><td><a href="papers/9055b155cbabdce3b98e16e5ac9c0edf00f9552f.html">MORPH: a longitudinal image database of normal adult age-progression</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/FGR.2006.78">[pdf]</a></td><td>7th International Conference on Automatic Face and Gesture Recognition (FGR06)</td><td></td><td></td><td></td><td></td><td>46%</td><td>424</td><td>195</td><td>229</td><td>27</td><td>231</td><td>155</td></tr><tr><td>291265db88023e92bb8c8e6390438e5da148e8f5</td><td>msceleb</td><td>MsCeleb</td><td><a href="papers/291265db88023e92bb8c8e6390438e5da148e8f5.html">MS-Celeb-1M: A Dataset and Benchmark for Large-Scale Face Recognition</a></td><td><a href="http://pdfs.semanticscholar.org/4603/cb8e05258bb0572ae912ad20903b8f99f4b1.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>50%</td><td>167</td><td>83</td><td>84</td><td>15</td><td>131</td><td>27</td></tr><tr><td>3dc3f0b64ef80f573e3a5f96e456e52ee980b877</td><td>georgia_tech_face_database</td><td>Georgia Tech Face</td><td><a href="papers/3dc3f0b64ef80f573e3a5f96e456e52ee980b877.html">Maximum Likelihood Training of the Embedded HMM for Face Detection and Recognition</a></td><td><a href="http://pdfs.semanticscholar.org/3dc3/f0b64ef80f573e3a5f96e456e52ee980b877.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>3</td><td>0</td><td>3</td><td>0</td><td>2</td><td>0</td></tr><tr><td>5753b2b5e442eaa3be066daa4a2ca8d8a0bb1725</td><td>50_people_one_question</td><td>50 People One Question</td><td><a href="papers/5753b2b5e442eaa3be066daa4a2ca8d8a0bb1725.html">Merging Pose Estimates Across Space and Time</a></td><td><a href="http://pdfs.semanticscholar.org/63b2/f5348af0f969dfc2afb4977732393c6459ec.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>60%</td><td>15</td><td>9</td><td>6</td><td>0</td><td>11</td><td>2</td></tr><tr><td>696ca58d93f6404fea0fc75c62d1d7b378f47628</td><td>coco</td><td>COCO</td><td><a href="papers/696ca58d93f6404fea0fc75c62d1d7b378f47628.html">Microsoft COCO Captions: Data Collection and Evaluation Server</a></td><td><a href="http://pdfs.semanticscholar.org/ba95/81c33a7eebe87c50e61763e4c8d1723538f2.pdf">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>46%</td><td>283</td><td>129</td><td>154</td><td>16</td><td>231</td><td>4</td></tr><tr><td>a5a44a32a91474f00a3cda671a802e87c899fbb4</td><td>moments_in_time</td><td>Moments in Time</td><td><a href="papers/a5a44a32a91474f00a3cda671a802e87c899fbb4.html">Moments in Time Dataset: one million videos for event understanding</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1801.03150.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>60%</td><td>25</td><td>15</td><td>10</td><td>2</td><td>25</td><td>0</td></tr><tr><td>436f798d1a4e54e5947c1e7d7375c31b2bdb4064</td><td>tud_multiview</td><td>TUD-Multiview</td><td><a href="papers/436f798d1a4e54e5947c1e7d7375c31b2bdb4064.html">Monocular 3D pose estimation and tracking by detection</a></td><td><a href="http://lmb.informatik.uni-freiburg.de/lectures/seminar_brox/seminar_ws1011/cvpr10_andriluka.pdf">[pdf]</a></td><td>2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>25%</td><td>302</td><td>76</td><td>226</td><td>32</td><td>199</td><td>1</td></tr><tr><td>436f798d1a4e54e5947c1e7d7375c31b2bdb4064</td><td>tud_stadtmitte</td><td>TUD-Stadtmitte</td><td><a href="papers/436f798d1a4e54e5947c1e7d7375c31b2bdb4064.html">Monocular 3D pose estimation and tracking by detection</a></td><td><a href="http://lmb.informatik.uni-freiburg.de/lectures/seminar_brox/seminar_ws1011/cvpr10_andriluka.pdf">[pdf]</a></td><td>2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>25%</td><td>302</td><td>76</td><td>226</td><td>32</td><td>199</td><td>1</td></tr><tr><td>6ad5a38df8dd4cdddd74f31996ce096d41219f72</td><td>tud_brussels</td><td>TUD-Brussels</td><td><a href="papers/6ad5a38df8dd4cdddd74f31996ce096d41219f72.html">Multi-cue onboard pedestrian detection</a></td><td><a href="https://www.mpi-inf.mpg.de/fileadmin/inf/d2/wojek/poster_cwojek_cvpr09.pdf">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>19%</td><td>217</td><td>41</td><td>176</td><td>14</td><td>131</td><td>1</td></tr><tr><td>6ad5a38df8dd4cdddd74f31996ce096d41219f72</td><td>tud_motionpairs</td><td>TUD-Motionparis</td><td><a href="papers/6ad5a38df8dd4cdddd74f31996ce096d41219f72.html">Multi-cue onboard pedestrian detection</a></td><td><a href="https://www.mpi-inf.mpg.de/fileadmin/inf/d2/wojek/poster_cwojek_cvpr09.pdf">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>19%</td><td>217</td><td>41</td><td>176</td><td>14</td><td>131</td><td>1</td></tr><tr><td>32c801cb7fbeb742edfd94cccfca4934baec71da</td><td>ucf_crowd</td><td>UCF-CC-50</td><td><a href="papers/32c801cb7fbeb742edfd94cccfca4934baec71da.html">Multi-source Multi-scale Counting in Extremely Dense Crowd Images</a></td><td><a href="http://www.cs.ucf.edu/~haroon/datafiles/Idrees_Counting_CVPR_2013.pdf">[pdf]</a></td><td>2013 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>38%</td><td>125</td><td>48</td><td>77</td><td>6</td><td>72</td><td>1</td></tr><tr><td>1e3df3ca8feab0b36fd293fe689f93bb2aaac591</td><td>immediacy</td><td>Immediacy</td><td><a href="papers/1e3df3ca8feab0b36fd293fe689f93bb2aaac591.html">Multi-task Recurrent Neural Network for Immediacy Prediction</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/ICCV.2015.383">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td>60%</td><td>25</td><td>15</td><td>10</td><td>2</td><td>20</td><td>0</td></tr><tr><td>53ae38a6bb2b21b42bac4f0c4c8ed1f9fa02f9d4</td><td>bp4d_plus</td><td>BP4D+</td><td><a href="papers/53ae38a6bb2b21b42bac4f0c4c8ed1f9fa02f9d4.html">Multimodal Spontaneous Emotion Corpus for Human Behavior Analysis</a></td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Zhang_Multimodal_Spontaneous_Emotion_CVPR_2016_paper.pdf">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>25%</td><td>40</td><td>10</td><td>30</td><td>0</td><td>20</td><td>6</td></tr><tr><td>2fda164863a06a92d3a910b96eef927269aeb730</td><td>names_and_faces_news</td><td>News Dataset</td><td><a href="papers/2fda164863a06a92d3a910b96eef927269aeb730.html">Names and faces in the news</a></td><td><a href="http://www.cs.utexas.edu/~grauman/courses/spring2007/395T/papers/berg_names_and_faces.pdf">[pdf]</a></td><td>Proceedings of the 2004 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2004. CVPR 2004.</td><td></td><td></td><td></td><td></td><td>41%</td><td>294</td><td>120</td><td>174</td><td>24</td><td>207</td><td>45</td></tr><tr><td>4156b7e88f2e0ab0a7c095b9bab199ae2b23bd06</td><td>distance_nighttime</td><td>Long Distance Heterogeneous Face</td><td><a href="papers/4156b7e88f2e0ab0a7c095b9bab199ae2b23bd06.html">Nighttime Face Recognition at Long Distance: Cross-Distance and Cross-Spectral Matching</a></td><td><a href="http://pdfs.semanticscholar.org/4156/b7e88f2e0ab0a7c095b9bab199ae2b23bd06.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>24%</td><td>21</td><td>5</td><td>16</td><td>3</td><td>11</td><td>1</td></tr><tr><td>31b58ced31f22eab10bd3ee2d9174e7c14c27c01</td><td>tiny_images</td><td>Tiny Images</td><td><a href="papers/31b58ced31f22eab10bd3ee2d9174e7c14c27c01.html">Nonparametric Object and Scene Recognition</a></td><td><a href="http://pdfs.semanticscholar.org/31b5/8ced31f22eab10bd3ee2d9174e7c14c27c01.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>31%</td><td>999</td><td>305</td><td>694</td><td>93</td><td>670</td><td>9</td></tr><tr><td>55206f0b5f57ce17358999145506cd01e570358c</td><td>orl</td><td>ORL</td><td><a href="papers/55206f0b5f57ce17358999145506cd01e570358c.html">O M 4 . 1 The Subject Database 4 . 2 Experiment Plan 5 . 1 Varying the Overlap 4 Experimental Setup 5 Parameterisation Results</a></td><td><a href="http://pdfs.semanticscholar.org/5520/6f0b5f57ce17358999145506cd01e570358c.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>21%</td><td>999</td><td>214</td><td>785</td><td>96</td><td>550</td><td>57</td></tr><tr><td>3394168ff0719b03ff65bcea35336a76b21fe5e4</td><td>penn_fudan</td><td>Penn Fudan</td><td><a href="papers/3394168ff0719b03ff65bcea35336a76b21fe5e4.html">Object Detection Combining Recognition and Segmentation</a></td><td><a href="http://pdfs.semanticscholar.org/f531/a554cade14b9b340de6730683a28c292dd74.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>23%</td><td>101</td><td>23</td><td>78</td><td>11</td><td>58</td><td>0</td></tr><tr><td>4f93cd09785c6e77bf4bc5a788e079df524c8d21</td><td>soton</td><td>SOTON HiD</td><td><a href="papers/4f93cd09785c6e77bf4bc5a788e079df524c8d21.html">On a large sequence-based human gait database</a></td><td><a href="http://pdfs.semanticscholar.org/4f93/cd09785c6e77bf4bc5a788e079df524c8d21.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>36%</td><td>148</td><td>54</td><td>94</td><td>16</td><td>98</td><td>0</td></tr><tr><td>6618cff7f2ed440a0d2fa9e74ad5469df5cdbe4c</td><td>afad</td><td>AFAD</td><td><a href="papers/6618cff7f2ed440a0d2fa9e74ad5469df5cdbe4c.html">Ordinal Regression with Multiple Output CNN for Age Estimation</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2016.532">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>44%</td><td>68</td><td>30</td><td>38</td><td>8</td><td>49</td><td>7</td></tr><tr><td>a7fe834a0af614ce6b50dc093132b031dd9a856b</td><td>market1203</td><td>Market 1203</td><td><a href="papers/a7fe834a0af614ce6b50dc093132b031dd9a856b.html">Orientation Driven Bag of Appearances for Person Re-identification</a></td><td><a href="http://pdfs.semanticscholar.org/a7fe/834a0af614ce6b50dc093132b031dd9a856b.pdf">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>29%</td><td>7</td><td>2</td><td>5</td><td>0</td><td>3</td><td>0</td></tr><tr><td>a7fe834a0af614ce6b50dc093132b031dd9a856b</td><td>pku_reid</td><td>PKU-Reid</td><td><a href="papers/a7fe834a0af614ce6b50dc093132b031dd9a856b.html">Orientation Driven Bag of Appearances for Person Re-identification</a></td><td><a href="http://pdfs.semanticscholar.org/a7fe/834a0af614ce6b50dc093132b031dd9a856b.pdf">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>29%</td><td>7</td><td>2</td><td>5</td><td>0</td><td>3</td><td>0</td></tr><tr><td>18ae7c9a4bbc832b8b14bc4122070d7939f5e00e</td><td>frgc</td><td>FRGC</td><td><a href="papers/18ae7c9a4bbc832b8b14bc4122070d7939f5e00e.html">Overview of the face recognition grand challenge</a></td><td><a href="http://www3.nd.edu/~kwb/PhillipsEtAlCVPR_2005.pdf">[pdf]</a></td><td>2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)</td><td></td><td></td><td></td><td></td><td>25%</td><td>999</td><td>253</td><td>746</td><td>110</td><td>572</td><td>64</td></tr><tr><td>22909dd19a0ec3b6065334cb5be5392cb24d839d</td><td>pets</td><td>PETS 2017</td><td><a href="papers/22909dd19a0ec3b6065334cb5be5392cb24d839d.html">PETS 2017: Dataset and Challenge</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8014998', 'linkType': 'ieee'}">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</td><td></td><td></td><td></td><td></td><td>0%</td><td>8</td><td>0</td><td>8</td><td>0</td><td>2</td><td>0</td></tr><tr><td>56ffa7d906b08d02d6d5a12c7377a57e24ef3391</td><td>unbc_shoulder_pain</td><td>UNBC-McMaster Pain</td><td><a href="papers/56ffa7d906b08d02d6d5a12c7377a57e24ef3391.html">Painful data: The UNBC-McMaster shoulder pain expression archive database</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5771462', 'linkType': 'ieee'}">[pdf]</a></td><td>Face and Gesture 2011</td><td></td><td></td><td></td><td></td><td>32%</td><td>184</td><td>58</td><td>126</td><td>23</td><td>112</td><td>23</td></tr><tr><td>0486214fb58ee9a04edfe7d6a74c6d0f661a7668</td><td>chokepoint</td><td>ChokePoint</td><td><a href="papers/0486214fb58ee9a04edfe7d6a74c6d0f661a7668.html">Patch-based probabilistic image quality assessment for face selection and improved video-based face recognition</a></td><td><a href="http://conradsanderson.id.au/pdfs/wong_face_selection_cvpr_biometrics_2011.pdf">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td>edu</td><td>University of Queensland</td><td>-27.49741805</td><td>153.01316956</td><td>30%</td><td>128</td><td>39</td><td>89</td><td>6</td><td>68</td><td>14</td></tr><tr><td>488e475eeb3bb39a145f23ede197cd3620f1d98a</td><td>apis</td><td>APiS1.0</td><td><a href="papers/488e475eeb3bb39a145f23ede197cd3620f1d98a.html">Pedestrian Attribute Classification in Surveillance: Database and Evaluation</a></td><td><a href="http://www.cv-foundation.org/openaccess/content_iccv_workshops_2013/W10/papers/Zhu_Pedestrian_Attribute_Classification_2013_ICCV_paper.pdf">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision Workshops</td><td></td><td></td><td></td><td></td><td>38%</td><td>26</td><td>10</td><td>16</td><td>1</td><td>13</td><td>2</td></tr><tr><td>488e475eeb3bb39a145f23ede197cd3620f1d98a</td><td>svs</td><td>SVS</td><td><a href="papers/488e475eeb3bb39a145f23ede197cd3620f1d98a.html">Pedestrian Attribute Classification in Surveillance: Database and Evaluation</a></td><td><a href="http://www.cv-foundation.org/openaccess/content_iccv_workshops_2013/W10/papers/Zhu_Pedestrian_Attribute_Classification_2013_ICCV_paper.pdf">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision Workshops</td><td></td><td></td><td></td><td></td><td>38%</td><td>26</td><td>10</td><td>16</td><td>1</td><td>13</td><td>2</td></tr><tr><td>2a4bbee0b4cf52d5aadbbc662164f7efba89566c</td><td>peta</td><td>PETA</td><td><a href="papers/2a4bbee0b4cf52d5aadbbc662164f7efba89566c.html">Pedestrian Attribute Recognition At Far Distance</a></td><td><a href="http://personal.ie.cuhk.edu.hk/~pluo/pdf/mm14.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>46%</td><td>80</td><td>37</td><td>43</td><td>2</td><td>51</td><td>3</td></tr><tr><td>f72f6a45ee240cc99296a287ff725aaa7e7ebb35</td><td>caltech_pedestrians</td><td>Caltech Pedestrians</td><td><a href="papers/f72f6a45ee240cc99296a287ff725aaa7e7ebb35.html">Pedestrian Detection: An Evaluation of the State of the Art</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5975165', 'linkType': 'ieee'}">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>10%</td><td>999</td><td>96</td><td>355</td><td>26</td><td>252</td><td>5</td></tr><tr><td>f72f6a45ee240cc99296a287ff725aaa7e7ebb35</td><td>caltech_pedestrians</td><td>Caltech Pedestrians</td><td><a href="papers/f72f6a45ee240cc99296a287ff725aaa7e7ebb35.html">Pedestrian Detection: An Evaluation of the State of the Art</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5975165', 'linkType': 'ieee'}">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>10%</td><td>999</td><td>96</td><td>355</td><td>26</td><td>252</td><td>5</td></tr><tr><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td><td>tud_campus</td><td>TUD-Campus</td><td><a href="papers/3316521a5527c7700af8ae6aef32a79a8b83672c.html">People-tracking-by-detection and people-detection-by-tracking</a></td><td><a href="http://mplab.ucsd.edu/wp-content/uploads/CVPR2008/Conference/data/papers/243.pdf">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>22%</td><td>529</td><td>116</td><td>413</td><td>41</td><td>316</td><td>1</td></tr><tr><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td><td>tud_crossing</td><td>TUD-Crossing</td><td><a href="papers/3316521a5527c7700af8ae6aef32a79a8b83672c.html">People-tracking-by-detection and people-detection-by-tracking</a></td><td><a href="http://mplab.ucsd.edu/wp-content/uploads/CVPR2008/Conference/data/papers/243.pdf">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>22%</td><td>529</td><td>116</td><td>413</td><td>41</td><td>316</td><td>1</td></tr><tr><td>3316521a5527c7700af8ae6aef32a79a8b83672c</td><td>tud_pedestrian</td><td>TUD-Pedestrian</td><td><a href="papers/3316521a5527c7700af8ae6aef32a79a8b83672c.html">People-tracking-by-detection and people-detection-by-tracking</a></td><td><a href="http://mplab.ucsd.edu/wp-content/uploads/CVPR2008/Conference/data/papers/243.pdf">[pdf]</a></td><td>2008 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>22%</td><td>529</td><td>116</td><td>413</td><td>41</td><td>316</td><td>1</td></tr><tr><td>27a2fad58dd8727e280f97036e0d2bc55ef5424c</td><td>duke_mtmc</td><td>Duke MTMC</td><td><a href="papers/27a2fad58dd8727e280f97036e0d2bc55ef5424c.html">Performance Measures and a Data Set for Multi-target, Multi-camera Tracking</a></td><td><a href="http://pdfs.semanticscholar.org/b5f2/4f49f9a5e47d6601399dc068158ad88d7651.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>136</td><td>58</td><td>78</td><td>6</td><td>107</td><td>0</td></tr><tr><td>16c7c31a7553d99f1837fc6e88e77b5ccbb346b8</td><td>prid</td><td>PRID</td><td><a href="papers/16c7c31a7553d99f1837fc6e88e77b5ccbb346b8.html">Person Re-identification by Descriptive and Discriminative Classification</a></td><td><a href="http://pdfs.semanticscholar.org/4c1b/f0592be3e535faf256c95e27982db9b3d3d3.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>27%</td><td>352</td><td>94</td><td>258</td><td>26</td><td>195</td><td>3</td></tr><tr><td>99eb4cea0d9bc9fe777a5c5172f8638a37a7f262</td><td>ilids_vid_reid</td><td>iLIDS-VID</td><td><a href="papers/99eb4cea0d9bc9fe777a5c5172f8638a37a7f262.html">Person Re-identification by Exploiting Spatio-Temporal Cues and Multi-view Metric Learning</a></td><td><a href="https://doi.org/10.1109/LSP.2016.2574323">[pdf]</a></td><td>IEEE Signal Processing Letters</td><td></td><td></td><td></td><td></td><td>29%</td><td>7</td><td>2</td><td>5</td><td>0</td><td>4</td><td>0</td></tr><tr><td>98bb029afe2a1239c3fdab517323066f0957b81b</td><td>sdu_vid</td><td>SDU-VID</td><td><a href="papers/98bb029afe2a1239c3fdab517323066f0957b81b.html">Person Re-identification by Video Ranking</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>25%</td><td>196</td><td>49</td><td>124</td><td>11</td><td>98</td><td>1</td></tr><tr><td>98bb029afe2a1239c3fdab517323066f0957b81b</td><td>sdu_vid</td><td>SDU-VID</td><td><a href="papers/98bb029afe2a1239c3fdab517323066f0957b81b.html">Person Re-identification by Video Ranking</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>25%</td><td>196</td><td>49</td><td>124</td><td>11</td><td>98</td><td>1</td></tr><tr><td>98bb029afe2a1239c3fdab517323066f0957b81b</td><td>sdu_vid</td><td>SDU-VID</td><td><a href="papers/98bb029afe2a1239c3fdab517323066f0957b81b.html">Person Re-identification by Video Ranking</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/98bb/029afe2a1239c3fdab517323066f0957b81b.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>25%</td><td>196</td><td>49</td><td>124</td><td>11</td><td>98</td><td>1</td></tr><tr><td>0b84f07af44f964817675ad961def8a51406dd2e</td><td>prw</td><td>PRW</td><td><a href="papers/0b84f07af44f964817675ad961def8a51406dd2e.html">Person Re-identification in the Wild</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2017.357">[pdf]</a></td><td>2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>University of Technology Sydney</td><td>-33.88096510</td><td>151.20107299</td><td>38%</td><td>65</td><td>25</td><td>40</td><td>1</td><td>46</td><td>0</td></tr><tr><td>ec792ad2433b6579f2566c932ee414111e194537</td><td>msmt_17</td><td>MSMT17</td><td><a href="papers/ec792ad2433b6579f2566c932ee414111e194537.html">Person Transfer GAN to Bridge Domain Gap for Person Re-Identification</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1711.08565.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>50%</td><td>14</td><td>7</td><td>7</td><td>1</td><td>11</td><td>0</td></tr><tr><td>1c2802c2199b6d15ecefe7ba0c39bfe44363de38</td><td>youtube_poses</td><td>YouTube Pose</td><td><a href="papers/1c2802c2199b6d15ecefe7ba0c39bfe44363de38.html">Personalizing Human Video Pose Estimation</a></td><td><a href="http://arxiv.org/pdf/1511.06676v1.pdf">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>44%</td><td>32</td><td>14</td><td>18</td><td>2</td><td>27</td><td>0</td></tr><tr><td>b92a1ed9622b8268ae3ac9090e25789fc41cc9b8</td><td>pornodb</td><td>Pornography DB</td><td><a href="papers/b92a1ed9622b8268ae3ac9090e25789fc41cc9b8.html">Pooling in image representation: The visual codeword point of view</a></td><td><a href="http://pdfs.semanticscholar.org/b92a/1ed9622b8268ae3ac9090e25789fc41cc9b8.pdf">[pdf]</a></td><td>Computer Vision and Image Understanding</td><td></td><td></td><td></td><td></td><td>9%</td><td>77</td><td>7</td><td>70</td><td>7</td><td>43</td><td>2</td></tr><tr><td>2830fb5282de23d7784b4b4bc37065d27839a412</td><td>h3d</td><td>H3D</td><td><a href="papers/2830fb5282de23d7784b4b4bc37065d27839a412.html">Poselets: Body part detectors trained using 3D human pose annotations</a></td><td><a href="http://vision.stanford.edu/teaching/cs231b_spring1213/papers/ICCV09_BourdevMalik.pdf">[pdf]</a></td><td>2009 IEEE 12th International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td>32%</td><td>707</td><td>223</td><td>484</td><td>62</td><td>482</td><td>18</td></tr><tr><td>3765df816dc5a061bc261e190acc8bdd9d47bec0</td><td>rafd</td><td>RaFD</td><td><a href="papers/3765df816dc5a061bc261e190acc8bdd9d47bec0.html">Presentation and validation of the Radboud Faces Database</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/3765/df816dc5a061bc261e190acc8bdd9d47bec0.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>28%</td><td>446</td><td>127</td><td>319</td><td>43</td><td>307</td><td>19</td></tr><tr><td>636b8ffc09b1b23ff714ac8350bb35635e49fa3c</td><td>caltech_10k_web_faces</td><td>Caltech 10K Web Faces</td><td><a href="papers/636b8ffc09b1b23ff714ac8350bb35635e49fa3c.html">Pruning training sets for learning of object categories</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1467308', 'linkType': 'ieee'}">[pdf]</a></td><td>2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)</td><td></td><td></td><td></td><td></td><td>58%</td><td>60</td><td>35</td><td>25</td><td>5</td><td>42</td><td>12</td></tr><tr><td>377f2b65e6a9300448bdccf678cde59449ecd337</td><td>ufdd</td><td>UFDD</td><td><a href="papers/377f2b65e6a9300448bdccf678cde59449ecd337.html">Pushing the Limits of Unconstrained Face Detection: a Challenge Dataset and Baseline Results</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1804.10275.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td>edu</td><td>Johns Hopkins University</td><td>39.32905300</td><td>-76.61942500</td><td>0%</td><td>2</td><td>0</td><td>2</td><td>0</td><td>2</td><td>0</td></tr><tr><td>140c95e53c619eac594d70f6369f518adfea12ef</td><td>ijb_a</td><td>IJB-A</td><td><a href="papers/140c95e53c619eac594d70f6369f518adfea12ef.html">Pushing the frontiers of unconstrained face detection and recognition: IARPA Janus Benchmark A</a></td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2015/app/1B_089_ext.pdf">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>48%</td><td>222</td><td>107</td><td>115</td><td>21</td><td>158</td><td>48</td></tr><tr><td>d80a3d1f3a438e02a6685e66ee908446766fefa9</td><td>megaage</td><td>MegaAge</td><td><a href="papers/d80a3d1f3a438e02a6685e66ee908446766fefa9.html">Quantifying Facial Age by Posterior of Age Comparisons</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1708.09687.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>25%</td><td>4</td><td>1</td><td>3</td><td>1</td><td>4</td><td>0</td></tr><tr><td>4946ba10a4d5a7d0a38372f23e6622bd347ae273</td><td>coco_action</td><td>COCO-a</td><td><a href="papers/4946ba10a4d5a7d0a38372f23e6622bd347ae273.html">RONCHI AND PERONA: DESCRIBING COMMON HUMAN VISUAL ACTIONS IN IMAGES 1 Describing Common Human Visual Actions in Images</a></td><td><a href="http://pdfs.semanticscholar.org/b38d/cf5fa5174c0d718d65cc4f3889b03c4a21df.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>54%</td><td>26</td><td>14</td><td>12</td><td>0</td><td>25</td><td>0</td></tr><tr><td>922e0a51a3b8c67c4c6ac09a577ff674cbd28b34</td><td>v47</td><td>V47</td><td><a href="papers/922e0a51a3b8c67c4c6ac09a577ff674cbd28b34.html">Re-identification of pedestrians with variable occlusion and scale</a></td><td><a href="https://doi.org/10.1109/ICCVW.2011.6130477">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td>0%</td><td>10</td><td>0</td><td>10</td><td>2</td><td>6</td><td>0</td></tr><tr><td>6f3c76b7c0bd8e1d122c6ea808a271fd4749c951</td><td>ward</td><td>WARD</td><td><a href="papers/6f3c76b7c0bd8e1d122c6ea808a271fd4749c951.html">Re-identify people in wide area camera network</a></td><td><a href="https://doi.org/10.1109/CVPRW.2012.6239203">[pdf]</a></td><td>2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops</td><td></td><td></td><td></td><td></td><td>9%</td><td>55</td><td>5</td><td>50</td><td>2</td><td>35</td><td>0</td></tr><tr><td>54983972aafc8e149259d913524581357b0f91c3</td><td>reseed</td><td>ReSEED</td><td><a href="papers/54983972aafc8e149259d913524581357b0f91c3.html">ReSEED: social event dEtection dataset</a></td><td><a href="https://pub.uni-bielefeld.de/download/2663466/2686734">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>17%</td><td>6</td><td>1</td><td>5</td><td>1</td><td>1</td><td>1</td></tr><tr><td>65355cbb581a219bd7461d48b3afd115263ea760</td><td>complex_activities</td><td>Ongoing Complex Activities</td><td><a href="papers/65355cbb581a219bd7461d48b3afd115263ea760.html">Recognition of ongoing complex activities by sequence prediction over a hierarchical label space</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/WACV.2016.7477586">[pdf]</a></td><td>2016 IEEE Winter Conference on Applications of Computer Vision (WACV)</td><td></td><td></td><td></td><td></td><td>0%</td><td>2</td><td>0</td><td>2</td><td>0</td><td>2</td><td>0</td></tr><tr><td>e8de844fefd54541b71c9823416daa238be65546</td><td>visual_phrases</td><td>Phrasal Recognition</td><td><a href="papers/e8de844fefd54541b71c9823416daa238be65546.html">Recognition using visual phrases</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5995711', 'linkType': 'ieee'}">[pdf]</a></td><td>CVPR 2011</td><td></td><td></td><td></td><td></td><td>41%</td><td>233</td><td>95</td><td>138</td><td>18</td><td>174</td><td>5</td></tr><tr><td>356b431d4f7a2a0a38cf971c84568207dcdbf189</td><td>wider</td><td>WIDER</td><td><a href="papers/356b431d4f7a2a0a38cf971c84568207dcdbf189.html">Recognize complex events from static images by fusing deep channels</a></td><td><a href="http://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Xiong_Recognize_Complex_Events_2015_CVPR_paper.pdf">[pdf]</a></td><td>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>Shenzhen Institutes of Advanced Technology</td><td>22.59805605</td><td>113.98533784</td><td>58%</td><td>45</td><td>26</td><td>19</td><td>1</td><td>30</td><td>12</td></tr><tr><td>25474c21613607f6bb7687a281d5f9d4ffa1f9f3</td><td>faceplace</td><td>Face Place</td><td><a href="papers/25474c21613607f6bb7687a281d5f9d4ffa1f9f3.html">Recognizing disguised faces</a></td><td><a href="http://pdfs.semanticscholar.org/d936/7ceb0be378c3a9ddf7cb741c678c1a3c574c.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>25%</td><td>24</td><td>6</td><td>18</td><td>0</td><td>16</td><td>1</td></tr><tr><td>4053e3423fb70ad9140ca89351df49675197196a</td><td>bio_id</td><td>BioID Face</td><td><a href="papers/4053e3423fb70ad9140ca89351df49675197196a.html">Robust Face Detection Using the Hausdorff Distance</a></td><td><a href="http://pdfs.semanticscholar.org/4053/e3423fb70ad9140ca89351df49675197196a.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>26%</td><td>498</td><td>127</td><td>371</td><td>55</td><td>319</td><td>32</td></tr><tr><td>2724ba85ec4a66de18da33925e537f3902f21249</td><td>cofw</td><td>COFW</td><td><a href="papers/2724ba85ec4a66de18da33925e537f3902f21249.html">Robust Face Landmark Estimation under Occlusion</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6751298', 'linkType': 'ieee'}">[pdf]</a></td><td>2013 IEEE International Conference on Computer Vision</td><td></td><td></td><td></td><td></td><td>55%</td><td>305</td><td>167</td><td>138</td><td>16</td><td>186</td><td>95</td></tr><tr><td>c570d1247e337f91e555c3be0e8c8a5aba539d9f</td><td>mcgill</td><td>McGill Real World</td><td><a href="papers/c570d1247e337f91e555c3be0e8c8a5aba539d9f.html">Robust semi-automatic head pose labeling for real-world face video sequences</a></td><td><a href="https://doi.org/10.1007/s11042-012-1352-1">[pdf]</a></td><td>Multimedia Tools and Applications</td><td>edu</td><td>McGill University</td><td>45.50397610</td><td>-73.57496870</td><td>28%</td><td>18</td><td>5</td><td>6</td><td>0</td><td>7</td><td>4</td></tr><tr><td>c570d1247e337f91e555c3be0e8c8a5aba539d9f</td><td>mcgill</td><td>McGill Real World</td><td><a href="papers/c570d1247e337f91e555c3be0e8c8a5aba539d9f.html">Robust semi-automatic head pose labeling for real-world face video sequences</a></td><td><a href="https://doi.org/10.1007/s11042-012-1352-1">[pdf]</a></td><td>Multimedia Tools and Applications</td><td>edu</td><td>McGill University</td><td>45.50397610</td><td>-73.57496870</td><td>28%</td><td>18</td><td>5</td><td>6</td><td>0</td><td>7</td><td>4</td></tr><tr><td>e27ef52c641c2b5100a1b34fd0b819e84a31b4df</td><td>sarc3d</td><td>Sarc3D</td><td><a href="papers/e27ef52c641c2b5100a1b34fd0b819e84a31b4df.html">SARC3D: A New 3D Body Model for People Tracking and Re-identification</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/e27e/f52c641c2b5100a1b34fd0b819e84a31b4df.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>14%</td><td>29</td><td>4</td><td>25</td><td>3</td><td>17</td><td>0</td></tr><tr><td>bd26dabab576adb6af30484183c9c9c8379bf2e0</td><td>scut_fbp</td><td>SCUT-FBP</td><td><a href="papers/bd26dabab576adb6af30484183c9c9c8379bf2e0.html">SCUT-FBP: A Benchmark Dataset for Facial Beauty Perception</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1511.02459.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2015 IEEE International Conference on Systems, Man, and Cybernetics</td><td>edu</td><td>South China University of Technology</td><td>23.05020420</td><td>113.39880323</td><td>43%</td><td>14</td><td>6</td><td>8</td><td>3</td><td>5</td><td>7</td></tr><tr><td>d3f5a1848b0028d8ab51d0b0673732cad2e3c8c9</td><td>stair_actions</td><td>STAIR Action</td><td><a href="papers/d3f5a1848b0028d8ab51d0b0673732cad2e3c8c9.html">STAIR Actions: A Video Dataset of Everyday Home Actions</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1804.04326.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>833fa04463d90aab4a9fe2870d480f0b40df446e</td><td>sun_attributes</td><td>SUN</td><td><a href="papers/833fa04463d90aab4a9fe2870d480f0b40df446e.html">SUN attribute database: Discovering, annotating, and recognizing scene attributes</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2012.6247998">[pdf]</a></td><td>2012 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>32%</td><td>260</td><td>84</td><td>85</td><td>18</td><td>143</td><td>10</td></tr><tr><td>833fa04463d90aab4a9fe2870d480f0b40df446e</td><td>sun_attributes</td><td>SUN</td><td><a href="papers/833fa04463d90aab4a9fe2870d480f0b40df446e.html">SUN attribute database: Discovering, annotating, and recognizing scene attributes</a></td><td><a href="http://doi.ieeecomputersociety.org/10.1109/CVPR.2012.6247998">[pdf]</a></td><td>2012 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>32%</td><td>260</td><td>84</td><td>85</td><td>18</td><td>143</td><td>10</td></tr><tr><td>4308bd8c28e37e2ed9a3fcfe74d5436cce34b410</td><td>market_1501</td><td>Market 1501</td><td><a href="papers/4308bd8c28e37e2ed9a3fcfe74d5436cce34b410.html">Scalable Person Re-identification: A Benchmark</a></td><td><a href="https://www.microsoft.com/en-us/research/wp-content/uploads/2017/01/ICCV15-ReIDDataset.pdf">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td>38%</td><td>394</td><td>149</td><td>245</td><td>18</td><td>271</td><td>3</td></tr><tr><td>9c23859ec7313f2e756a3e85575735e0c52249f4</td><td>facebook_100</td><td>Facebook100</td><td><a href="papers/9c23859ec7313f2e756a3e85575735e0c52249f4.html">Scaling up biologically-inspired computer vision: A case study in unconstrained face recognition on facebook</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5981788', 'linkType': 'ieee'}">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td>edu</td><td>Harvard University</td><td>42.36782045</td><td>-71.12666653</td><td>50%</td><td>50</td><td>25</td><td>25</td><td>3</td><td>39</td><td>4</td></tr><tr><td>9c23859ec7313f2e756a3e85575735e0c52249f4</td><td>pubfig_83</td><td>pubfig83</td><td><a href="papers/9c23859ec7313f2e756a3e85575735e0c52249f4.html">Scaling up biologically-inspired computer vision: A case study in unconstrained face recognition on facebook</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5981788', 'linkType': 'ieee'}">[pdf]</a></td><td>CVPR 2011 WORKSHOPS</td><td>edu</td><td>Harvard University</td><td>42.36782045</td><td>-71.12666653</td><td>50%</td><td>50</td><td>25</td><td>25</td><td>3</td><td>39</td><td>4</td></tr><tr><td>109df0e8e5969ddf01e073143e83599228a1163f</td><td>multi_pie</td><td>MULTIPIE</td><td><a href="papers/109df0e8e5969ddf01e073143e83599228a1163f.html">Scheduling heterogeneous multi-cores through performance impact estimation (PIE)</a></td><td><a href="http://dl.acm.org/citation.cfm?id=2337184">[pdf]</a></td><td>2012 39th Annual International Symposium on Computer Architecture (ISCA)</td><td></td><td></td><td></td><td></td><td>25%</td><td>192</td><td>48</td><td>144</td><td>8</td><td>99</td><td>0</td></tr><tr><td>51eba481dac6b229a7490f650dff7b17ce05df73</td><td>imsitu</td><td>imSitu</td><td><a href="papers/51eba481dac6b229a7490f650dff7b17ce05df73.html">Situation Recognition: Visual Semantic Role Labeling for Image Understanding</a></td><td><a href="http://grail.cs.washington.edu/wp-content/uploads/2016/09/yatskar2016srv.pdf">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>University of Washington</td><td>47.65432380</td><td>-122.30800894</td><td>60%</td><td>48</td><td>29</td><td>19</td><td>2</td><td>45</td><td>2</td></tr><tr><td>f152b6ee251cca940dd853c54e6a7b78fbc6b235</td><td>affectnet</td><td>AffectNet</td><td><a href="papers/f152b6ee251cca940dd853c54e6a7b78fbc6b235.html">Skybiometry and AffectNet on Facial Emotion Recognition Using Supervised Machine Learning Algorithms</a></td><td><a href="{'url': 'http://dl.acm.org/citation.cfm?id=3232665', 'linkType': 'acm'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>23e824d1dfc33f3780dd18076284f07bd99f1c43</td><td>mifs</td><td>MIFS</td><td><a href="papers/23e824d1dfc33f3780dd18076284f07bd99f1c43.html">Spoofing faces using makeup: An investigative study</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7947686', 'linkType': 'ieee'}">[pdf]</a></td><td>2017 IEEE International Conference on Identity, Security and Behavior Analysis (ISBA)</td><td>edu</td><td>INRIA Méditerranée</td><td>43.61581310</td><td>7.06838000</td><td>20%</td><td>5</td><td>1</td><td>4</td><td>0</td><td>1</td><td>2</td></tr><tr><td>1a40092b493c6b8840257ab7f96051d1a4dbfeb2</td><td>sports_videos_in_the_wild</td><td>SVW</td><td><a href="papers/1a40092b493c6b8840257ab7f96051d1a4dbfeb2.html">Sports Videos in the Wild (SVW): A video dataset for sports analysis</a></td><td><a href="http://web.cse.msu.edu/~liuxm/publication/Safdarnejad_Liu_Udpa_Andrus_Wood_Craven_FG2015.pdf">[pdf]</a></td><td>2015 11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)</td><td>edu</td><td>Michigan State University</td><td>42.71856800</td><td>-84.47791571</td><td>67%</td><td>6</td><td>4</td><td>2</td><td>1</td><td>5</td><td>0</td></tr><tr><td>9361b784e73e9238d5cefbea5ac40d35d1e3103f</td><td>towncenter</td><td>TownCenter</td><td><a href="papers/9361b784e73e9238d5cefbea5ac40d35d1e3103f.html">Stable Multi-Target Tracking in Real-Time Surveillance Video (Preprint)</a></td><td><a href="http://pdfs.semanticscholar.org/9361/b784e73e9238d5cefbea5ac40d35d1e3103f.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>21%</td><td>310</td><td>64</td><td>246</td><td>24</td><td>177</td><td>4</td></tr><tr><td>c866a2afc871910e3282fd9498dce4ab20f6a332</td><td>qmul_surv_face</td><td>QMUL-SurvFace</td><td><a href="papers/c866a2afc871910e3282fd9498dce4ab20f6a332.html">Surveillance Face Recognition Challenge</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1804.09691.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>f6c8d5e35d7e4d60a0104f233ac1a3ab757da53f</td><td>pku</td><td>PKU</td><td><a href="papers/f6c8d5e35d7e4d60a0104f233ac1a3ab757da53f.html">Swiss-System Based Cascade Ranking for Gait-Based Person Re-Identification</a></td><td><a href="http://pdfs.semanticscholar.org/f6c8/d5e35d7e4d60a0104f233ac1a3ab757da53f.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>3</td><td>0</td><td>3</td><td>0</td><td>1</td><td>0</td></tr><tr><td>2485c98aa44131d1a2f7d1355b1e372f2bb148ad</td><td>cas_peal</td><td>CAS-PEAL</td><td><a href="papers/2485c98aa44131d1a2f7d1355b1e372f2bb148ad.html">The CAS-PEAL Large-Scale Chinese Face Database and Baseline Evaluations</a></td><td><a href="https://doi.org/10.1109/TSMCA.2007.909557">[pdf]</a></td><td>IEEE Transactions on Systems, Man, and Cybernetics - Part A: Systems and Humans</td><td></td><td></td><td></td><td></td><td>18%</td><td>415</td><td>76</td><td>339</td><td>39</td><td>182</td><td>35</td></tr><tr><td>2485c98aa44131d1a2f7d1355b1e372f2bb148ad</td><td>m2vts</td><td>m2vts</td><td><a href="papers/2485c98aa44131d1a2f7d1355b1e372f2bb148ad.html">The CAS-PEAL Large-Scale Chinese Face Database and Baseline Evaluations</a></td><td><a href="https://doi.org/10.1109/TSMCA.2007.909557">[pdf]</a></td><td>IEEE Transactions on Systems, Man, and Cybernetics - Part A: Systems and Humans</td><td></td><td></td><td></td><td></td><td>18%</td><td>415</td><td>76</td><td>339</td><td>39</td><td>182</td><td>35</td></tr><tr><td>47662d1a368daf70ba70ef2d59eb6209f98b675d</td><td>fia</td><td>CMU FiA</td><td><a href="papers/47662d1a368daf70ba70ef2d59eb6209f98b675d.html">The CMU Face In Action (FIA) Database</a></td><td><a href="http://pdfs.semanticscholar.org/bb47/a03401811f9d2ca2da12138697acbc7b97a3.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>29%</td><td>55</td><td>16</td><td>39</td><td>5</td><td>38</td><td>7</td></tr><tr><td>4df3143922bcdf7db78eb91e6b5359d6ada004d2</td><td>cfd</td><td>CFD</td><td><a href="papers/4df3143922bcdf7db78eb91e6b5359d6ada004d2.html">The Chicago face database: A free stimulus set of faces and norming data.</a></td><td><a href="http://pdfs.semanticscholar.org/4df3/143922bcdf7db78eb91e6b5359d6ada004d2.pdf">[pdf]</a></td><td>Behavior research methods</td><td></td><td></td><td></td><td></td><td>39%</td><td>83</td><td>32</td><td>51</td><td>2</td><td>62</td><td>3</td></tr><tr><td>20388099cc415c772926e47bcbbe554e133343d1</td><td>cafe</td><td>CAFE</td><td><a href="papers/20388099cc415c772926e47bcbbe554e133343d1.html">The Child Affective Facial Expression (CAFE) set: validity and reliability from untrained adults</a></td><td><a href="http://pdfs.semanticscholar.org/2038/8099cc415c772926e47bcbbe554e133343d1.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>48%</td><td>33</td><td>16</td><td>17</td><td>3</td><td>28</td><td>1</td></tr><tr><td>32cde90437ab5a70cf003ea36f66f2de0e24b3ab</td><td>cityscapes</td><td>Cityscapes</td><td><a href="papers/32cde90437ab5a70cf003ea36f66f2de0e24b3ab.html">The Cityscapes Dataset for Semantic Urban Scene Understanding</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1604.01685.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>33%</td><td>771</td><td>252</td><td>519</td><td>54</td><td>622</td><td>0</td></tr><tr><td>32cde90437ab5a70cf003ea36f66f2de0e24b3ab</td><td>cityscapes</td><td>Cityscapes</td><td><a href="papers/32cde90437ab5a70cf003ea36f66f2de0e24b3ab.html">The Cityscapes Dataset for Semantic Urban Scene Understanding</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1604.01685.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td></td><td></td><td></td><td></td><td>33%</td><td>771</td><td>252</td><td>519</td><td>54</td><td>622</td><td>0</td></tr><tr><td>4e6ee936eb50dd032f7138702fa39b7c18ee8907</td><td>dartmouth_children</td><td>Dartmouth Children</td><td><a href="papers/4e6ee936eb50dd032f7138702fa39b7c18ee8907.html">The Dartmouth Database of Children’s Faces: Acquisition and Validation of a New Face Stimulus Set</a></td><td><a href="http://pdfs.semanticscholar.org/4e6e/e936eb50dd032f7138702fa39b7c18ee8907.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>40%</td><td>20</td><td>8</td><td>12</td><td>2</td><td>16</td><td>0</td></tr><tr><td>f0e17f27f029db4ad650ff278fe3c10ecb6cb0c4</td><td>europersons</td><td>EuroCity Persons</td><td><a href="papers/f0e17f27f029db4ad650ff278fe3c10ecb6cb0c4.html">The EuroCity Persons Dataset: A Novel Benchmark for Object Detection</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1805.07193.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>0%</td><td>1</td><td>0</td><td>1</td><td>0</td><td>1</td><td>0</td></tr><tr><td>4d9a02d080636e9666c4d1cc438b9893391ec6c7</td><td>cohn_kanade_plus</td><td>CK+</td><td><a href="papers/4d9a02d080636e9666c4d1cc438b9893391ec6c7.html">The Extended Cohn-Kanade Dataset (CK+): A complete dataset for action unit and emotion-specified expression</a></td><td><a href="http://www.iainm.com/iainm/Publications_files/2010_The%20Extended.pdf">[pdf]</a></td><td>2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition - Workshops</td><td></td><td></td><td></td><td></td><td>41%</td><td>975</td><td>403</td><td>572</td><td>65</td><td>460</td><td>345</td></tr><tr><td>0f0fcf041559703998abf310e56f8a2f90ee6f21</td><td>feret</td><td>FERET</td><td><a href="papers/0f0fcf041559703998abf310e56f8a2f90ee6f21.html">The FERET Evaluation Methodology for Face-Recognition Algorithms</a></td><td><a href="http://pdfs.semanticscholar.org/0f0f/cf041559703998abf310e56f8a2f90ee6f21.pdf">[pdf]</a></td><td>IEEE Trans. Pattern Anal. Mach. Intell.</td><td></td><td></td><td></td><td></td><td>11%</td><td>999</td><td>109</td><td>259</td><td>32</td><td>213</td><td>51</td></tr><tr><td>0f0fcf041559703998abf310e56f8a2f90ee6f21</td><td>feret</td><td>FERET</td><td><a href="papers/0f0fcf041559703998abf310e56f8a2f90ee6f21.html">The FERET Evaluation Methodology for Face-Recognition Algorithms</a></td><td><a href="http://pdfs.semanticscholar.org/0f0f/cf041559703998abf310e56f8a2f90ee6f21.pdf">[pdf]</a></td><td>IEEE Trans. Pattern Anal. Mach. Intell.</td><td></td><td></td><td></td><td></td><td>11%</td><td>999</td><td>109</td><td>259</td><td>32</td><td>213</td><td>51</td></tr><tr><td>0f0fcf041559703998abf310e56f8a2f90ee6f21</td><td>feret</td><td>FERET</td><td><a href="papers/0f0fcf041559703998abf310e56f8a2f90ee6f21.html">The FERET Evaluation Methodology for Face-Recognition Algorithms</a></td><td><a href="http://pdfs.semanticscholar.org/0f0f/cf041559703998abf310e56f8a2f90ee6f21.pdf">[pdf]</a></td><td>IEEE Trans. Pattern Anal. Mach. Intell.</td><td></td><td></td><td></td><td></td><td>11%</td><td>999</td><td>109</td><td>259</td><td>32</td><td>213</td><td>51</td></tr><tr><td>0f0fcf041559703998abf310e56f8a2f90ee6f21</td><td>feret</td><td>FERET</td><td><a href="papers/0f0fcf041559703998abf310e56f8a2f90ee6f21.html">The FERET Evaluation Methodology for Face-Recognition Algorithms</a></td><td><a href="http://pdfs.semanticscholar.org/0f0f/cf041559703998abf310e56f8a2f90ee6f21.pdf">[pdf]</a></td><td>IEEE Trans. Pattern Anal. Mach. Intell.</td><td></td><td></td><td></td><td></td><td>11%</td><td>999</td><td>109</td><td>259</td><td>32</td><td>213</td><td>51</td></tr><tr><td>ea050801199f98a1c7c1df6769f23f658299a3ae</td><td>mpi_large</td><td>Large MPI Facial Expression</td><td><a href="papers/ea050801199f98a1c7c1df6769f23f658299a3ae.html">The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</a></td><td><a href="http://pdfs.semanticscholar.org/ea05/0801199f98a1c7c1df6769f23f658299a3ae.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>46%</td><td>28</td><td>13</td><td>15</td><td>4</td><td>24</td><td>3</td></tr><tr><td>ea050801199f98a1c7c1df6769f23f658299a3ae</td><td>mpi_small</td><td>Small MPI Facial Expression</td><td><a href="papers/ea050801199f98a1c7c1df6769f23f658299a3ae.html">The MPI Facial Expression Database — A Validated Database of Emotional and Conversational Facial Expressions</a></td><td><a href="http://pdfs.semanticscholar.org/ea05/0801199f98a1c7c1df6769f23f658299a3ae.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>46%</td><td>28</td><td>13</td><td>15</td><td>4</td><td>24</td><td>3</td></tr><tr><td>f1af714b92372c8e606485a3982eab2f16772ad8</td><td>mug_faces</td><td>MUG Faces</td><td><a href="papers/f1af714b92372c8e606485a3982eab2f16772ad8.html">The MUG facial expression database</a></td><td><a href="http://ieeexplore.ieee.org/document/5617662/">[pdf]</a></td><td>11th International Workshop on Image Analysis for Multimedia Interactive Services WIAMIS 10</td><td>edu</td><td>Aristotle University of Thessaloniki</td><td>40.62984145</td><td>22.95889350</td><td>28%</td><td>68</td><td>19</td><td>49</td><td>5</td><td>28</td><td>19</td></tr><tr><td>79828e6e9f137a583082b8b5a9dfce0c301989b8</td><td>mapillary</td><td>Mapillary</td><td><a href="papers/79828e6e9f137a583082b8b5a9dfce0c301989b8.html">The Mapillary Vistas Dataset for Semantic Understanding of Street Scenes</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8237796', 'linkType': 'ieee'}">[pdf]</a></td><td>2017 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td>39%</td><td>44</td><td>17</td><td>27</td><td>0</td><td>36</td><td>0</td></tr><tr><td>a6e695ddd07aad719001c0fc1129328452385949</td><td>yfcc_100m</td><td>YFCC100M</td><td><a href="papers/a6e695ddd07aad719001c0fc1129328452385949.html">The New Data and New Challenges in Multimedia Research</a></td><td><span class="gray">[pdf]</a></td><td>CoRR</td><td></td><td></td><td></td><td></td><td>36%</td><td>160</td><td>57</td><td>103</td><td>11</td><td>105</td><td>4</td></tr><tr><td>abe9f3b91fd26fa1b50cd685c0d20debfb372f73</td><td>voc</td><td>VOC</td><td><a href="papers/abe9f3b91fd26fa1b50cd685c0d20debfb372f73.html">The Pascal Visual Object Classes Challenge: A Retrospective</a></td><td><a href="http://homepages.inf.ed.ac.uk/ckiw/postscript/ijcv_voc14.pdf">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td>32%</td><td>999</td><td>315</td><td>684</td><td>75</td><td>698</td><td>6</td></tr><tr><td>8b2dd5c61b23ead5ae5508bb8ce808b5ea266730</td><td>10k_US_adult_faces</td><td>10K US Adult Faces</td><td><a href="papers/8b2dd5c61b23ead5ae5508bb8ce808b5ea266730.html">The intrinsic memorability of face photographs.</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/8b2d/d5c61b23ead5ae5508bb8ce808b5ea266730.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Journal of experimental psychology. General</td><td></td><td></td><td></td><td></td><td>36%</td><td>47</td><td>17</td><td>30</td><td>3</td><td>33</td><td>1</td></tr><tr><td>19d1b811df60f86cbd5e04a094b07f32fff7a32a</td><td>york_3d</td><td>UOY 3D Face Database</td><td><a href="papers/19d1b811df60f86cbd5e04a094b07f32fff7a32a.html">Three-dimensional face recognition: an eigensurface approach</a></td><td><a href="http://www-users.cs.york.ac.uk/~nep/research/3Dface/tomh/3DFaceRecognition-Eigensurface-ICIP(web)2.pdf">[pdf]</a></td><td>2004 International Conference on Image Processing, 2004. ICIP '04.</td><td></td><td></td><td></td><td></td><td>19%</td><td>36</td><td>7</td><td>29</td><td>4</td><td>25</td><td>1</td></tr><tr><td>2edb87494278ad11641b6cf7a3f8996de12b8e14</td><td>qmul_grid</td><td>GRID</td><td><a href="papers/2edb87494278ad11641b6cf7a3f8996de12b8e14.html">Time-Delayed Correlation Analysis for Multi-Camera Activity Understanding</a></td><td><a href="https://doi.org/10.1007/s11263-010-0347-5">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td>16%</td><td>77</td><td>12</td><td>39</td><td>3</td><td>32</td><td>0</td></tr><tr><td>2edb87494278ad11641b6cf7a3f8996de12b8e14</td><td>qmul_grid</td><td>GRID</td><td><a href="papers/2edb87494278ad11641b6cf7a3f8996de12b8e14.html">Time-Delayed Correlation Analysis for Multi-Camera Activity Understanding</a></td><td><a href="https://doi.org/10.1007/s11263-010-0347-5">[pdf]</a></td><td>International Journal of Computer Vision</td><td></td><td></td><td></td><td></td><td>16%</td><td>77</td><td>12</td><td>39</td><td>3</td><td>32</td><td>0</td></tr><tr><td>298cbc3dfbbb3a20af4eed97906650a4ea1c29e0</td><td>ferplus</td><td>FER+</td><td><a href="papers/298cbc3dfbbb3a20af4eed97906650a4ea1c29e0.html">Training deep networks for facial expression recognition with crowd-sourced label distribution</a></td><td><a href="http://arxiv.org/pdf/1608.01041v1.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>34%</td><td>29</td><td>10</td><td>19</td><td>0</td><td>15</td><td>3</td></tr><tr><td>b5f2846a506fc417e7da43f6a7679146d99c5e96</td><td>ucf_101</td><td>UCF101</td><td><a href="papers/b5f2846a506fc417e7da43f6a7679146d99c5e96.html">UCF101: A Dataset of 101 Human Actions Classes From Videos in The Wild</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1212.0402.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>CoRR</td><td>edu</td><td>University of Central Florida</td><td>28.59899755</td><td>-81.19712501</td><td>54%</td><td>999</td><td>535</td><td>464</td><td>73</td><td>708</td><td>212</td></tr><tr><td>16e8b0a1e8451d5f697b94c0c2b32a00abee1d52</td><td>umb</td><td>UMB</td><td><a href="papers/16e8b0a1e8451d5f697b94c0c2b32a00abee1d52.html">UMB-DB: A database of partially occluded 3D faces</a></td><td><a href="https://doi.org/10.1109/ICCVW.2011.6130509">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td>29%</td><td>45</td><td>13</td><td>32</td><td>2</td><td>20</td><td>3</td></tr><tr><td>8627f019882b024aef92e4eb9355c499c733e5b7</td><td>used</td><td>USED Social Event Dataset</td><td><a href="papers/8627f019882b024aef92e4eb9355c499c733e5b7.html">USED: a large-scale social event detection dataset</a></td><td><a href="http://doi.acm.org/10.1145/2910017.2910624">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>43%</td><td>7</td><td>3</td><td>4</td><td>0</td><td>3</td><td>2</td></tr><tr><td>4b4106614c1d553365bad75d7866bff0de6056ed</td><td>czech_news_agency</td><td>UFI</td><td><a href="papers/4b4106614c1d553365bad75d7866bff0de6056ed.html">Unconstrained Facial Images: Database for Face Recognition Under Real-World Conditions</a></td><td><a href="http://pdfs.semanticscholar.org/4b41/06614c1d553365bad75d7866bff0de6056ed.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>10%</td><td>10</td><td>1</td><td>9</td><td>0</td><td>4</td><td>2</td></tr><tr><td>08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7</td><td>kin_face</td><td>UB KinFace</td><td><a href="papers/08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7.html">Understanding Kin Relationships in a Photo</a></td><td><a href="http://www1.ece.neu.edu/~yunfu/papers/Kinship-TMM.pdf">[pdf]</a></td><td>IEEE Transactions on Multimedia</td><td></td><td></td><td></td><td></td><td>24%</td><td>89</td><td>21</td><td>46</td><td>2</td><td>20</td><td>14</td></tr><tr><td>08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7</td><td>kin_face</td><td>UB KinFace</td><td><a href="papers/08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7.html">Understanding Kin Relationships in a Photo</a></td><td><a href="http://www1.ece.neu.edu/~yunfu/papers/Kinship-TMM.pdf">[pdf]</a></td><td>IEEE Transactions on Multimedia</td><td></td><td></td><td></td><td></td><td>24%</td><td>89</td><td>21</td><td>46</td><td>2</td><td>20</td><td>14</td></tr><tr><td>08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7</td><td>kin_face</td><td>UB KinFace</td><td><a href="papers/08f6745bc6c1b0fb68953ea61054bdcdde6d2fc7.html">Understanding Kin Relationships in a Photo</a></td><td><a href="http://www1.ece.neu.edu/~yunfu/papers/Kinship-TMM.pdf">[pdf]</a></td><td>IEEE Transactions on Multimedia</td><td></td><td></td><td></td><td></td><td>24%</td><td>89</td><td>21</td><td>46</td><td>2</td><td>20</td><td>14</td></tr><tr><td>21d9d0deed16f0ad62a4865e9acf0686f4f15492</td><td>images_of_groups</td><td>Images of Groups</td><td><a href="papers/21d9d0deed16f0ad62a4865e9acf0686f4f15492.html">Understanding images of groups of people</a></td><td><a href="http://amp.ece.cmu.edu/people/Andy/Andy_files/cvpr09.pdf">[pdf]</a></td><td>2009 IEEE Conference on Computer Vision and Pattern Recognition</td><td></td><td></td><td></td><td></td><td>36%</td><td>202</td><td>72</td><td>130</td><td>12</td><td>126</td><td>24</td></tr><tr><td>fd8168f1c50de85bac58a8d328df0a50248b16ae</td><td>nd_2006</td><td>ND-2006</td><td><a href="papers/fd8168f1c50de85bac58a8d328df0a50248b16ae.html">Using a Multi-Instance Enrollment Representation to Improve 3D Face Recognition</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4401928', 'linkType': 'ieee'}">[pdf]</a></td><td>2007 First IEEE International Conference on Biometrics: Theory, Applications, and Systems</td><td></td><td></td><td></td><td></td><td>25%</td><td>32</td><td>8</td><td>24</td><td>3</td><td>16</td><td>1</td></tr><tr><td>4563b46d42079242f06567b3f2e2f7a80cb3befe</td><td>vadana</td><td>VADANA</td><td><a href="papers/4563b46d42079242f06567b3f2e2f7a80cb3befe.html">VADANA: A dense dataset for facial image analysis</a></td><td><a href="http://vims.cis.udel.edu/publications/VADANA_BeFIT2011.pdf">[pdf]</a></td><td>2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)</td><td></td><td></td><td></td><td></td><td>19%</td><td>16</td><td>3</td><td>13</td><td>0</td><td>6</td><td>6</td></tr><tr><td>eb027969f9310e0ae941e2adee2d42cdf07d938c</td><td>vgg_faces2</td><td>VGG Face2</td><td><a href="papers/eb027969f9310e0ae941e2adee2d42cdf07d938c.html">VGGFace2: A Dataset for Recognising Faces across Pose and Age</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1710.08092.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2018 13th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2018)</td><td>edu</td><td>University of Oxford</td><td>51.75345380</td><td>-1.25400997</td><td>38%</td><td>56</td><td>21</td><td>35</td><td>6</td><td>50</td><td>3</td></tr><tr><td>01959ef569f74c286956024866c1d107099199f7</td><td>vqa</td><td>VQA</td><td><a href="papers/01959ef569f74c286956024866c1d107099199f7.html">VQA: Visual Question Answering</a></td><td><a href="http://arxiv.org/pdf/1505.00468v3.pdf">[pdf]</a></td><td>2015 IEEE International Conference on Computer Vision (ICCV)</td><td></td><td></td><td></td><td></td><td>47%</td><td>731</td><td>344</td><td>387</td><td>47</td><td>628</td><td>4</td></tr><tr><td>5194cbd51f9769ab25260446b4fa17204752e799</td><td>violent_flows</td><td>Violent Flows</td><td><a href="papers/5194cbd51f9769ab25260446b4fa17204752e799.html">Violent flows: Real-time detection of violent crowd behavior</a></td><td><a href="http://www.wisdom.weizmann.ac.il/mathusers/kliper/Papers/violent_flows.pdf">[pdf]</a></td><td>2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops</td><td></td><td></td><td></td><td></td><td>20%</td><td>83</td><td>17</td><td>66</td><td>6</td><td>42</td><td>2</td></tr><tr><td>066000d44d6691d27202896691f08b27117918b9</td><td>psu</td><td>PSU</td><td><a href="papers/066000d44d6691d27202896691f08b27117918b9.html">Vision-Based Analysis of Small Groups in Pedestrian Crowds</a></td><td><a href="http://vision.cse.psu.edu/publications/pdfs/GeCollinsRubackPAMI2011.pdf">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>18%</td><td>151</td><td>27</td><td>124</td><td>9</td><td>78</td><td>2</td></tr><tr><td>dd65f71dac86e36eecbd3ed225d016c3336b4a13</td><td>families_in_the_wild</td><td>FIW</td><td><a href="papers/dd65f71dac86e36eecbd3ed225d016c3336b4a13.html">Visual Kinship Recognition of Families in the Wild</a></td><td><a href="{'url': 'http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8337841', 'linkType': 'ieee'}">[pdf]</a></td><td>IEEE Transactions on Pattern Analysis and Machine Intelligence</td><td></td><td></td><td></td><td></td><td>67%</td><td>3</td><td>2</td><td>1</td><td>0</td><td>2</td><td>0</td></tr><tr><td>52d7eb0fbc3522434c13cc247549f74bb9609c5d</td><td>wider_face</td><td>WIDER FACE</td><td><a href="papers/52d7eb0fbc3522434c13cc247549f74bb9609c5d.html">WIDER FACE: A Face Detection Benchmark</a></td><td><a href="{'url': 'https://arxiv.org/pdf/1511.06523.pdf', 'linkType': 'arxiv'}">[pdf]</a></td><td>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</td><td>edu</td><td>Chinese University of Hong Kong</td><td>22.42031295</td><td>114.20788644</td><td>53%</td><td>148</td><td>78</td><td>70</td><td>16</td><td>107</td><td>34</td></tr><tr><td>77c81c13a110a341c140995bedb98101b9e84f7f</td><td>wildtrack</td><td>WildTrack</td><td><a href="papers/77c81c13a110a341c140995bedb98101b9e84f7f.html">WILDTRACK : A Multi-camera HD Dataset for Dense Unscripted Pedestrian Detection</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/fe1c/ec4e4995b8615855572374ae3efc94949105.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>5ad4e9f947c1653c247d418f05dad758a3f9277b</td><td>wlfdb</td><td></td><td><a href="papers/5ad4e9f947c1653c247d418f05dad758a3f9277b.html">WLFDB: Weakly Labeled Face Databases</a></td><td><a href="{'url': 'https://pdfs.semanticscholar.org/5ad4/e9f947c1653c247d418f05dad758a3f9277b.pdf', 'linkType': 's2'}">[pdf]</a></td><td>Unknown</td><td></td><td></td><td></td><td></td><td>0%</td><td>1</td><td>0</td><td>1</td><td>0</td><td>0</td><td>0</td></tr><tr><td>0dc11a37cadda92886c56a6fb5191ded62099c28</td><td>stickmen_family</td><td>We Are Family Stickmen</td><td><a href="papers/0dc11a37cadda92886c56a6fb5191ded62099c28.html">We Are Family: Joint Pose Estimation of Multiple Persons</a></td><td><a href="http://pdfs.semanticscholar.org/0dc1/1a37cadda92886c56a6fb5191ded62099c28.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>44%</td><td>77</td><td>34</td><td>43</td><td>4</td><td>57</td><td>1</td></tr><tr><td>2a75f34663a60ab1b04a0049ed1d14335129e908</td><td>mmi_facial_expression</td><td>MMI Facial Expression Dataset</td><td><a href="papers/2a75f34663a60ab1b04a0049ed1d14335129e908.html">Web-based database for facial expression analysis</a></td><td><a href="http://ibug.doc.ic.ac.uk/media/uploads/documents/PanticEtAl-ICME2005-final.pdf">[pdf]</a></td><td>2005 IEEE International Conference on Multimedia and Expo</td><td></td><td></td><td></td><td></td><td>32%</td><td>440</td><td>142</td><td>298</td><td>44</td><td>258</td><td>82</td></tr><tr><td>9b9bf5e623cb8af7407d2d2d857bc3f1b531c182</td><td>who_goes_there</td><td>WGT</td><td><a href="papers/9b9bf5e623cb8af7407d2d2d857bc3f1b531c182.html">Who goes there?: approaches to mapping facial appearance diversity</a></td><td><a href="http://doi.acm.org/10.1145/2996913.2996997">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>100%</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>a94cae786d515d3450d48267e12ca954aab791c4</td><td>yawdd</td><td>YawDD</td><td><a href="papers/a94cae786d515d3450d48267e12ca954aab791c4.html">YawDD: a yawning detection dataset</a></td><td><a href="http://www.site.uottawa.ca/~shervin/pubs/CogniVue-Dataset-ACM-MMSys2014.pdf">[pdf]</a></td><td></td><td></td><td></td><td></td><td></td><td>0%</td><td>14</td><td>0</td><td>14</td><td>1</td><td>2</td><td>1</td></tr></table></body></html> \ No newline at end of file
diff --git a/scraper/s2-citation-report.py b/scraper/s2-citation-report.py
index e0d812d7..b5849329 100644
--- a/scraper/s2-citation-report.py
+++ b/scraper/s2-citation-report.py
@@ -32,16 +32,73 @@ def s2_citation_report():
print("citations: {}".format(paper_count))
print("geocoded: {} ({}%)".format(geocode_count, percent(geocode_count, paper_count)))
- # fetch_google_sheet
+ write_master_report('{}/{}'.format(DIR_PUBLIC_CITATIONS, "datasets.csv"), papers)
sts = subprocess.call([
- "s3cmd", "sync",
+ "s3cmd", "put", "-P", "--recursive",
DIR_PUBLIC_CITATIONS + '/',
"s3://megapixels/v1/citations/",
])
-def write_master_report(fn, title, papers, key):
- keys, rows = fetch_google_sheet('statistics')
+def write_master_report(fn, papers):
+ # first make a lookup of the keys that have papers
+ paper_key_lookup = {}
+ for paper in papers:
+ if paper['key'] not in paper_key_lookup:
+ paper_key_lookup[paper['key']] = paper
+
+ # then fetch the statistics csv which has things like "year"
+ fields, rows = fetch_google_sheet('statistics')
+ master_papers = []
+ statistics = {}
+
+ def clean(n):
+ if n:
+ return int(n.replace(',','').replace('.','').replace('?','').strip())
+ return None
+
+ for row in rows:
+ key = row[0]
+ if key not in paper_key_lookup:
+ continue
+ paper = paper_key_lookup[key]
+ stats = {}
+ for index, field in enumerate(fields):
+ stats[field] = row[index]
+ report_fn = '../site/content/datasets/{}/index.md'.format(key)
+ has_report = os.path.exists(report_fn)
+ statistics[key] = stats
+ search_result = read_json('./datasets/s2/entries/{}.json'.format(paper['paperId']))
+
+ image_count = stats['images']
+ if type(image_count) is str:
+ if len(image_count):
+ image_count = clean(image_count)
+ else:
+ image_count = None,
+ master_papers.append([
+ stats['key'],
+ stats['name'],
+ '/datasets/{}/'.format(key) if has_report else '',
+ image_count,
+ clean(stats['faces_unique']) or None,
+ stats['year_published'],
+ clean(paper['citation_count']) or 0,
+ clean(search_result['citationStats']['numKeyCitations']) or 0,
+ # origin
+ ])
+ master_paper_keys = [
+ 'key',
+ 'title',
+ 'link',
+ 'images',
+ 'people',
+ 'year',
+ 'citations',
+ 'influenced',
+ # 'origin'
+ ]
+ write_csv(fn, keys=master_paper_keys, rows=master_papers)
def write_papers_report(fn, title, papers, key, reverse=False):
sorted_papers = []
@@ -275,7 +332,7 @@ def process_paper(row, addresses, success):
f.write('<script src="../map.js"></script>')
f.write("</html>")
# template = env.get_template('paper.html')
- with open('{}/{}.json'.format(DIR_PUBLIC_CITATIONS, paper.paper_id), 'w') as f:
+ with open('{}/{}.json'.format(DIR_PUBLIC_CITATIONS, paper.key), 'w') as f:
json.dump({
'id': paper.paper_id,
'paper': res,
diff --git a/scraper/s2-search.py b/scraper/s2-search.py
index 9ec20cc9..d9b1beca 100644
--- a/scraper/s2-search.py
+++ b/scraper/s2-search.py
@@ -38,22 +38,24 @@ def fetch_entries(index, refresh):
if len(clean_title) < 2:
continue
dump_fn = './datasets/s2/dumps/{}.json'.format(key)
- entry_fn = './datasets/s2/entries/{}.json'.format(key)
result = None
- if not refresh and os.path.exists(entry_fn):
- result = read_json(entry_fn)
+ if not refresh and os.path.exists(dump_fn):
+ results = read_json(dump_fn)
else:
results = s2.search(clean_title)
write_json(dump_fn, results)
- if len(results['results']) == 0:
- print("- {}".format(title))
- else:
- print("+ {}".format(title))
- result = results['results'][0]
- write_json(entry_fn, result)
+
+ if len(results['results']) == 0:
+ print("- {}".format(title))
+ else:
+ print("+ {}".format(title))
+ result = results['results'][0]
+
if result:
paper_id = result['id']
paper = fetch_paper(s2, paper_id)
+ entry_fn = './datasets/s2/entries/{}.json'.format(paper_id)
+ write_json(entry_fn, result)
citation_lookup.append([key, name, title, paper_id])
write_csv("datasets/citation_lookup.csv", keys=['key', 'name', 'title', 'paper_id'], rows=citation_lookup)
diff --git a/scraper/util.py b/scraper/util.py
index c02f018c..47e5a4aa 100644
--- a/scraper/util.py
+++ b/scraper/util.py
@@ -259,7 +259,7 @@ class AddressBook (object):
def __init__(self):
entities = {}
lookup = {}
- keys, data = fetch_google_sheet()
+ keys, data = fetch_google_sheet('institutions')
# keys, data = read_csv('reports/pdf_institutions_deduped.csv', keys=True)
for index, line in enumerate(data):
if line[0] == line[1] or line[0] not in entities:
diff --git a/site/datasets/citations/22f656d0f8426c84a33a267977f511f127bfd7f3.json b/site/datasets/citations/22f656d0f8426c84a33a267977f511f127bfd7f3.json
index e0e664a5..d40093c9 100644
--- a/site/datasets/citations/22f656d0f8426c84a33a267977f511f127bfd7f3.json
+++ b/site/datasets/citations/22f656d0f8426c84a33a267977f511f127bfd7f3.json
@@ -1 +1 @@
-{"id": "22f656d0f8426c84a33a267977f511f127bfd7f3", "paper": {"paperId": "22f656d0f8426c84a33a267977f511f127bfd7f3", "key": "social_relation", "title": "From Facial Expression Recognition to Interpersonal Relation Prediction", "journal": "International Journal of Computer Vision", "address": "", "address_type": "", "lat": "", "lng": "", "pdf_link": "http://arxiv.org/abs/1609.06426", "report_link": "papers/22f656d0f8426c84a33a267977f511f127bfd7f3.html", "citation_count": 9, "citations_geocoded": 4, "citations_unknown": 5, "citations_empty": 0, "citations_pdf": 5, "citations_doi": 1, "name": "Social Relation"}, "address": null, "citations": [["Multiple-Human Parsing in the Wild", "National University of Singapore", "National University of Singapore", "National University of Singapore", "NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore", "1.29620180", "103.77689944", "edu", ""], ["Understanding Humans in Crowded Scenes: Deep Nested Adversarial Learning and A New Benchmark for Multi-Human Parsing", "Qihoo 360 AI Institute", "National University of Singapore", "National University of Singapore", "NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore", "1.29620180", "103.77689944", "edu", ""], ["MicroExpNet: An Extremely Small and Fast Model For Expression Recognition From Frontal Face Images", "Middle East Technical University", "Middle East Technical University", "Middle East Technical University", "ODT\u00dc, 1, 1591.sk(315.sk), \u00c7i\u011fdem Mahallesi, Ankara, \u00c7ankaya, Ankara, \u0130\u00e7 Anadolu B\u00f6lgesi, 06800, T\u00fcrkiye", "39.87549675", "32.78553506", "edu", ""], ["Group emotion recognition with individual facial emotion CNNs and global image based CNNs", "SIAT at Chinese Academy of Sciences, China", "National Taiwan University", "National Taiwan University", "\u81fa\u5927;\u53f0\u5927, 1, \u7f85\u65af\u798f\u8def\u56db\u6bb5, \u5b78\u5e9c\u91cc, \u5927\u5b89\u5340, \u81fa\u5317\u5e02, 10617, \u81fa\u7063", "25.01682835", "121.53846924", "edu", ""]]} \ No newline at end of file
+{"id": "22f656d0f8426c84a33a267977f511f127bfd7f3", "paper": {"paperId": "22f656d0f8426c84a33a267977f511f127bfd7f3", "key": "expw", "title": "From Facial Expression Recognition to Interpersonal Relation Prediction", "journal": "International Journal of Computer Vision", "address": "", "address_type": "", "lat": "", "lng": "", "pdf_link": "http://arxiv.org/abs/1609.06426", "report_link": "papers/22f656d0f8426c84a33a267977f511f127bfd7f3.html", "citation_count": 9, "citations_geocoded": 4, "citations_unknown": 5, "citations_empty": 0, "citations_pdf": 5, "citations_doi": 1, "name": "ExpW"}, "address": null, "citations": [["Multiple-Human Parsing in the Wild", "National University of Singapore", "National University of Singapore", "National University of Singapore", "NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore", "1.29620180", "103.77689944", "edu", ""], ["Understanding Humans in Crowded Scenes: Deep Nested Adversarial Learning and A New Benchmark for Multi-Human Parsing", "Qihoo 360 AI Institute", "National University of Singapore", "National University of Singapore", "NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore", "1.29620180", "103.77689944", "edu", ""], ["MicroExpNet: An Extremely Small and Fast Model For Expression Recognition From Frontal Face Images", "Middle East Technical University", "Middle East Technical University", "Middle East Technical University", "ODT\u00dc, 1, 1591.sk(315.sk), \u00c7i\u011fdem Mahallesi, Ankara, \u00c7ankaya, Ankara, \u0130\u00e7 Anadolu B\u00f6lgesi, 06800, T\u00fcrkiye", "39.87549675", "32.78553506", "edu", ""], ["Group emotion recognition with individual facial emotion CNNs and global image based CNNs", "SIAT at Chinese Academy of Sciences, China", "National Taiwan University", "National Taiwan University", "\u81fa\u5927;\u53f0\u5927, 1, \u7f85\u65af\u798f\u8def\u56db\u6bb5, \u5b78\u5e9c\u91cc, \u5927\u5b89\u5340, \u81fa\u5317\u5e02, 10617, \u81fa\u7063", "25.01682835", "121.53846924", "edu", ""]]} \ No newline at end of file
diff --git a/site/datasets/citations/27a2fad58dd8727e280f97036e0d2bc55ef5424c.json b/site/datasets/citations/27a2fad58dd8727e280f97036e0d2bc55ef5424c.json
index 4ca3236d..7de56f0a 100644
--- a/site/datasets/citations/27a2fad58dd8727e280f97036e0d2bc55ef5424c.json
+++ b/site/datasets/citations/27a2fad58dd8727e280f97036e0d2bc55ef5424c.json
@@ -1 +1 @@
-{"id": "27a2fad58dd8727e280f97036e0d2bc55ef5424c", "paper": {"paperId": "27a2fad58dd8727e280f97036e0d2bc55ef5424c", "key": "mot", "title": "Performance Measures and a Data Set for Multi-target, Multi-camera Tracking", "journal": "", "address": "", "address_type": "", "lat": "", "lng": "", "pdf_link": "http://pdfs.semanticscholar.org/b5f2/4f49f9a5e47d6601399dc068158ad88d7651.pdf", "report_link": "papers/27a2fad58dd8727e280f97036e0d2bc55ef5424c.html", "citation_count": 136, "citations_geocoded": 58, "citations_unknown": 78, "citations_empty": 6, "citations_pdf": 107, "citations_doi": 0, "name": "MOT"}, "address": null, "citations": [["Learning Discriminative Features with Multiple Granularities for Person Re-Identification", "", "Shanghai Jiao Tong University", "Shanghai Jiao Tong University", "\u4e0a\u6d77\u4ea4\u901a\u5927\u5b66\uff08\u5f90\u6c47\u6821\u533a\uff09, \u6dee\u6d77\u897f\u8def, \u756a\u79ba\u5c0f\u533a, \u5e73\u9634\u6865, \u5f90\u6c47\u533a, \u4e0a\u6d77\u5e02, 200052, \u4e2d\u56fd", "31.20081505", "121.42840681", "edu", ""], ["Virtual CNN Branching: Efficient Feature Ensemble for Person Re-Identification", "", "Duke University", "Duke University", "Nasher Museum of Art, 2001, Campus Drive, Burch Avenue, Durham, Durham County, North Carolina, 27705, USA", "35.99905220", "-78.92906290", "edu", ""], ["The Unmanned Aerial Vehicle Benchmark: Object Detection and Tracking", "", "University of Texas at San Antonio", "University of Texas at San Antonio", "UTSA, Paseo Principal, San Antonio, Bexar County, Texas, 78249-1620, USA", "29.58333105", "-98.61944505", "edu", ""], ["Semantically Selective Augmentation for Deep Compact Person Re-Identification", "", "University of Bristol", "University of Bristol", "Victoria Rooms, Whiteladies Road, Cliftonwood, Spike Island, Bristol, City of Bristol, South West England, England, BS8 2PY, UK", "51.45848370", "-2.60977520", "edu", ""], ["The Devil is in the Middle: Exploiting Mid-level Representations for Cross-Domain Instance Matching", "", "University of Edinburgh", "University of Edinburgh", "New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK", "55.94951105", "-3.19534913", "edu", ""], ["Discriminative Feature Representation for Person Re-identification by Batch-contrastive Loss", "", "East China Normal University", "East China Normal University", "\u534e\u4e1c\u5e08\u8303\u5927\u5b66, 3663, \u4e2d\u5c71\u5317\u8def, \u66f9\u5bb6\u6e21, \u666e\u9640\u533a, \u666e\u9640\u533a (Putuo), \u4e0a\u6d77\u5e02, 200062, \u4e2d\u56fd", "31.22849230", "121.40211389", "edu", ""], ["Image-Image Domain Adaptation with Preserved Self-Similarity and Domain-Dissimilarity for Person Re-identification", "", "University of Technology Sydney", "University of Technology Sydney", "University of Technology Sydney, Omnibus Lane, Ultimo, Sydney, NSW, 2007, Australia", "-33.88096510", "151.20107299", "edu", ""], ["Person Re-identification with Deep Similarity-Guided Graph Neural Network", "", "Chinese University of Hong Kong", "The Chinese University of Hong Kong", "\u4e2d\u5927 CUHK, NA\u68af New Asia Stairs, \u99ac\u6599\u6c34 Ma Liu Shui, \u4e5d\u809a\u6751 Kau To Village, \u6c99\u7530\u5340 Sha Tin District, \u65b0\u754c New Territories, HK, DD193 1191, \u4e2d\u56fd", "22.42031295", "114.20788644", "edu", ""], ["Robust Pedestrian Detection for Semi-automatic Construction of a Crowded Person Re-Identification Dataset", "", "Jiangnan University", "Jiangnan University", "\u6c5f\u5357\u5927\u5b66\u7ad9, \u8821\u6e56\u5927\u9053, \u6ee8\u6e56\u533a, \u5357\u573a\u6751, \u6ee8\u6e56\u533a (Binhu), \u65e0\u9521\u5e02 / Wuxi, \u6c5f\u82cf\u7701, 214121, \u4e2d\u56fd", "31.48542550", "120.27395810", "edu", ""], ["STA: Spatial-Temporal Attention for Large-Scale Video-based Person Re-Identification", "", "University of Illinois, Urbana-Champaign", "University of Illinois, Urbana-Champaign", "B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA", "40.11116745", "-88.22587665", "edu", ""], ["Human Semantic Parsing for Person Re-identification", "", "Istanbul Technical University", "Istanbul Technical University", "Istanbul Technical University, walking path from main road to Simit restaurant, \u0130stanbul Teknik \u00dcniversitesi, Maslak, F.S.M Mahallesi, Sar\u0131yer, \u0130stanbul, Marmara B\u00f6lgesi, 34469, T\u00fcrkiye", "41.10427915", "29.02231159", "edu", ""], ["SVDNet for Pedestrian Retrieval", "", "University of Technology Sydney", "University of Technology Sydney", "University of Technology Sydney, Omnibus Lane, Ultimo, Sydney, NSW, 2007, Australia", "-33.88096510", "151.20107299", "edu", ""], ["Multi-object Tracking with Neural Gating Using Bilinear LSTM", "", "Oregon State University", "Oregon State University", "OSU Beaver Store, 538, Southwest 6th Avenue, Portland Downtown, Portland, Multnomah County, Oregon, 97204, USA", "45.51982890", "-122.67797964", "edu", ""], ["MaskReID: A Mask Based Deep Ranking Neural Network for Person Re-identification", "", "University of Wollongong", "University of Wollongong", "University of Wollongong, Admin Road, Keiraville, Wollongong, NSW, 2500, Australia", "-34.40505545", "150.87834655", "edu", ""], ["Let Features Decide for Themselves: Feature Mask Network for Person Re-identification", "", "Nanjing University", "Nanjing University", "NJU, \u4e09\u6c5f\u8def, \u9f13\u697c\u533a, \u5357\u4eac\u5e02, \u6c5f\u82cf\u7701, 210093, \u4e2d\u56fd", "32.05659570", "118.77408833", "edu", ""], ["Exploit the Unknown Gradually : One-Shot Video-Based Person Re-Identification by Stepwise Learning", "", "University of Sydney", "University of Sydney", "USyd, Fisher Road, Camperdown, Sydney, NSW, 2006, Australia", "-33.88890695", "151.18943366", "edu", ""], ["Person Re-identification by Mid-level Attribute and Part-based Identity Learning", "", "East China Normal University", "East China Normal University", "\u534e\u4e1c\u5e08\u8303\u5927\u5b66, 3663, \u4e2d\u5c71\u5317\u8def, \u66f9\u5bb6\u6e21, \u666e\u9640\u533a, \u666e\u9640\u533a (Putuo), \u4e0a\u6d77\u5e02, 200062, \u4e2d\u56fd", "31.22849230", "121.40211389", "edu", ""], ["NCA-Net for Tracking Multiple Objects across Multiple Cameras", "", "Huazhong University of Science and Technology", "Huazhong University of Science and Technology", "\u534e\u4e2d\u5927, \u73de\u55bb\u8def, \u4e1c\u6e56\u65b0\u6280\u672f\u5f00\u53d1\u533a, \u5173\u4e1c\u8857\u9053, \u4e1c\u6e56\u65b0\u6280\u672f\u5f00\u53d1\u533a\uff08\u6258\u7ba1\uff09, \u6d2a\u5c71\u533a (Hongshan), \u6b66\u6c49\u5e02, \u6e56\u5317\u7701, 430074, \u4e2d\u56fd", "30.50975370", "114.40628810", "edu", ""], ["A Pose-Sensitive Embedding for Person Re-Identification with Expanded Cross Neighborhood Re-Ranking", "", "Karlsruhe Institute of Technology", "Karlsruhe Institute of Technology", "KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-W\u00fcrttemberg, 76351, Deutschland", "49.10184375", "8.43312560", "edu", ""], ["Sparse Label Smoothing for Semi-supervised Person Re-Identification", "", "University of Electronic Science and Technology of China", "University of Electronic Science and Technology of China", "Columbus, OH 43210, USA", "40.01419050", "-83.03091430", "edu", ""], ["Unsupervised Person Re-identification by Deep Learning Tracklet Association", "", "Queen Mary University of London", "Queen Mary University of London", "Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK", "51.52472720", "-0.03931035", "edu", ""], ["Sequential Attend, Infer, Repeat: Generative Modelling of Moving Objects", "", "University of Oxford", "University of Oxford", "Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK", "51.75345380", "-1.25400997", "edu", ""], ["Improved Person Re-Identification Based on Saliency and Semantic Parsing with Deep Neural Network Models", "", "University of Campinas", "University of Campinas", "USJ, 97, Rua S\u00edlvia Maria Fabro, Kobrasol, Campinas, S\u00e3o Jos\u00e9, Microrregi\u00e3o de Florian\u00f3polis, Mesorregi\u00e3o da Grande Florian\u00f3polis, SC, Regi\u00e3o Sul, 88102-130, Brasil", "-27.59539950", "-48.61542180", "edu", ""], ["SCPNet: Spatial-Channel Parallelism Network for Joint Holistic and Partial Person Re-Identification", "", "Chinese Academy of Sciences", "Chinese Academy of Sciences", "\u4e2d\u56fd\u79d1\u5b66\u9662\u5fc3\u7406\u7814\u7a76\u6240, 16, \u6797\u8403\u8def, \u671d\u9633\u533a / Chaoyang, \u5317\u4eac\u5e02, 100101, \u4e2d\u56fd", "40.00447950", "116.37023800", "edu", ""], ["Improving Person Re-identification by Attribute and Identity Learning", "University of Technology Sydney", "University of Technology Sydney", "University of Technology Sydney", "University of Technology Sydney, Omnibus Lane, Ultimo, Sydney, NSW, 2007, Australia", "-33.88096510", "151.20107299", "edu", ""], ["Deep Group-shuffling Random Walk for Person Re-identification", "", "Chinese University of Hong Kong", "The Chinese University of Hong Kong", "\u4e2d\u5927 CUHK, NA\u68af New Asia Stairs, \u99ac\u6599\u6c34 Ma Liu Shui, \u4e5d\u809a\u6751 Kau To Village, \u6c99\u7530\u5340 Sha Tin District, \u65b0\u754c New Territories, HK, DD193 1191, \u4e2d\u56fd", "22.42031295", "114.20788644", "edu", ""], ["Part-Aligned Bilinear Representations for Person Re-identification", "", "Microsoft Research Asia", "Microsoft Live Labs Research, China", "Microsoft Research Asia", "35.86166000", "104.19539700", "company", ""], ["Weighted Bilinear Coding over Salient Body Parts for Person Re-identification", "", "Tsinghua University", "Tsinghua University", "\u6e05\u534e\u5927\u5b66, 30, \u53cc\u6e05\u8def, \u4e94\u9053\u53e3, \u540e\u516b\u5bb6, \u6d77\u6dc0\u533a, 100084, \u4e2d\u56fd", "40.00229045", "116.32098908", "edu", ""], ["Features for Multi-Target Multi-Camera Tracking and Re-Identification", "", "Duke University", "Duke University", "Nasher Museum of Art, 2001, Campus Drive, Burch Avenue, Durham, Durham County, North Carolina, 27705, USA", "35.99905220", "-78.92906290", "edu", ""], ["Occluded Person Re-Identification", "", "Sun Yat-Sen University", "Sun Yat-Sen University", "\u4e2d\u5927, \u65b0\u6e2f\u897f\u8def, \u9f99\u8239\u6ed8, \u5eb7\u4e50, \u6d77\u73e0\u533a (Haizhu), \u5e7f\u5dde\u5e02, \u5e7f\u4e1c\u7701, 510105, \u4e2d\u56fd", "23.09461185", "113.28788994", "edu", ""], ["Person Re-identification with Cascaded Pairwise Convolutions", "", "University of Science and Technology of China", "University of Science and Technology of China", "\u4e2d\u56fd\u79d1\u5b66\u6280\u672f\u5927\u5b66 \u4e1c\u6821\u533a, 96\u53f7, \u91d1\u5be8\u8def, \u6c5f\u6dee\u5316\u80a5\u5382\u5c0f\u533a, \u829c\u6e56\u8def\u8857\u9053, \u5408\u80a5\u5e02\u533a, \u5408\u80a5\u5e02, \u5b89\u5fbd\u7701, 230026, \u4e2d\u56fd", "31.83907195", "117.26420748", "edu", ""], ["Scaling Video Analytics Systems to Large Camera Deployments", "", "Microsoft Research Asia", "Microsoft Live Labs Research, China", "Microsoft Research Asia", "35.86166000", "104.19539700", "company", ""], ["Unlabeled Samples Generated by GAN Improve the Person Re-identification Baseline in Vitro", "", "University of Technology Sydney", "University of Technology Sydney", "University of Technology Sydney, Omnibus Lane, Ultimo, Sydney, NSW, 2007, Australia", "-33.88096510", "151.20107299", "edu", ""], ["ReXCam: Resource-Efficient, Cross-Camera Video Analytics at Enterprise Scale", "", "Microsoft Research Asia", "Microsoft Live Labs Research, China", "Microsoft Research Asia", "35.86166000", "104.19539700", "company", ""], ["Pose Transferrable Person Re-Identification", "", "Shanghai Jiao Tong University", "Shanghai Jiao Tong University", "\u4e0a\u6d77\u4ea4\u901a\u5927\u5b66\uff08\u5f90\u6c47\u6821\u533a\uff09, \u6dee\u6d77\u897f\u8def, \u756a\u79ba\u5c0f\u533a, \u5e73\u9634\u6865, \u5f90\u6c47\u533a, \u4e0a\u6d77\u5e02, 200052, \u4e2d\u56fd", "31.20081505", "121.42840681", "edu", ""], ["Horizontal Pyramid Matching for Person Re-identification", "", "University of Illinois, Urbana-Champaign", "University of Illinois, Urbana-Champaign", "B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA", "40.11116745", "-88.22587665", "edu", ""], ["Generalizing a Person Retrieval Model Hetero- and Homogeneously", "", "Australian National University", "Australian National University", "Canberra ACT 0200, Australia", "-35.27769990", "149.11852700", "edu", ""], ["FD-GAN: Pose-guided Feature Distilling GAN for Robust Person Re-identification", "", "University of Science and Technology of China", "University of Science and Technology of China", "\u4e2d\u56fd\u79d1\u5b66\u6280\u672f\u5927\u5b66 \u4e1c\u6821\u533a, 96\u53f7, \u91d1\u5be8\u8def, \u6c5f\u6dee\u5316\u80a5\u5382\u5c0f\u533a, \u829c\u6e56\u8def\u8857\u9053, \u5408\u80a5\u5e02\u533a, \u5408\u80a5\u5e02, \u5b89\u5fbd\u7701, 230026, \u4e2d\u56fd", "31.83907195", "117.26420748", "edu", ""], ["Attention in Multimodal Neural Networks for Person Re-identification", "", "Aalborg University", "Aalborg University", "AAU, Pontoppidanstr\u00e6de, S\u00f8nder Tranders, Aalborg, Aalborg Kommune, Region Nordjylland, 9220, Danmark", "57.01590275", "9.97532827", "edu", ""], ["SphereReID: Deep Hypersphere Manifold Embedding for Person Re-Identification", "", "Zhejiang University", "Zhejiang University", "\u6d59\u6c5f\u5927\u5b66\u4e4b\u6c5f\u6821\u533a, \u4e4b\u6c5f\u8def, \u8f6c\u5858\u8857\u9053, \u897f\u6e56\u533a (Xihu), \u676d\u5dde\u5e02 Hangzhou, \u6d59\u6c5f\u7701, 310008, \u4e2d\u56fd", "30.19331415", "120.11930822", "edu", ""], ["Harmonious Attention Network for Person Re-Identification", "", "Queen Mary University of London", "Queen Mary University of London", "Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK", "51.52472720", "-0.03931035", "edu", ""], ["Towards a Principled Integration of Multi-camera Re-identification and Tracking Through Optimal Bayes Filters", "", "RWTH Aachen University", "RWTH Aachen University", "RWTH Aachen, Mies-van-der-Rohe-Stra\u00dfe, K\u00f6nigsh\u00fcgel, Aachen-Mitte, Aachen, St\u00e4dteregion Aachen, Regierungsbezirk K\u00f6ln, Nordrhein-Westfalen, 52074, Deutschland", "50.77917030", "6.06728733", "edu", ""], ["Dual Attention Matching Network for Context-Aware Feature Sequence based Person Re-Identification", "", "Nanyang Technological University", "Nanyang Technological University", "NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore", "1.34841040", "103.68297965", "edu", ""], ["Fusion of Head and Full-Body Detectors for Multi-Object Tracking", "", "Technical University Munich", "Technical University Munich", "TUM, 21, Arcisstra\u00dfe, Bezirksteil K\u00f6nigsplatz, Stadtbezirk 03 Maxvorstadt, M\u00fcnchen, Obb, Bayern, 80333, Deutschland", "48.14955455", "11.56775314", "edu", ""], ["A Dataset for Persistent Multi-target Multi-camera Tracking in RGB-D", "", "Queen Mary University of London", "Queen Mary University of London", "Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK", "51.52472720", "-0.03931035", "edu", ""], ["Multi-Level Factorisation Net for Person Re-Identification", "", "University of Edinburgh", "University of Edinburgh", "New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK", "55.94951105", "-3.19534913", "edu", ""], ["End-to-End Deep Kronecker-Product Matching for Person Re-identification", "", "Chinese University of Hong Kong", "The Chinese University of Hong Kong", "\u4e2d\u5927 CUHK, NA\u68af New Asia Stairs, \u99ac\u6599\u6c34 Ma Liu Shui, \u4e5d\u809a\u6751 Kau To Village, \u6c99\u7530\u5340 Sha Tin District, \u65b0\u754c New Territories, HK, DD193 1191, \u4e2d\u56fd", "22.42031295", "114.20788644", "edu", ""], ["Group Consistent Similarity Learning via Deep CRF for Person Re-Identification", "", "University of Trento", "University of Trento", "University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia", "46.06588360", "11.11598940", "edu", ""], ["Trajectory Factory: Tracklet Cleaving and Re-Connection by Deep Siamese Bi-GRU for Multiple Object Tracking", "", "Peking University", "Peking University", "\u5317\u4eac\u5927\u5b66, 5\u53f7, \u9890\u548c\u56ed\u8def, \u7a3b\u9999\u56ed\u5357\u793e\u533a, \u6d77\u6dc0\u533a, \u5317\u4eac\u5e02, 100871, \u4e2d\u56fd", "39.99223790", "116.30393816", "edu", ""], ["Self Attention Grid for Person Re-Identification", "", "University of Electronic Science and Technology of China", "University of Electronic Science and Technology of China", "Columbus, OH 43210, USA", "40.01419050", "-83.03091430", "edu", ""], ["Multi-task Mid-level Feature Alignment Network for Unsupervised Cross-Dataset Person Re-Identification", "", "Charles Sturt University", "Charles Sturt University", "Charles Sturt University, Wagga Wagga, NSW, 2678, Australia", "-35.06360710", "147.35522340", "edu", ""], ["Person Transfer GAN to Bridge Domain Gap for Person Re-Identification", "", "University of Texas at San Antonio", "University of Texas at San Antonio", "UTSA, Paseo Principal, San Antonio, Bexar County, Texas, 78249-1620, USA", "29.58333105", "-98.61944505", "edu", ""], ["Real-Time Multiple People Tracking with Deeply Learned Candidate Selection and Person Re-Identification", "", "Tsinghua University", "Tsinghua University", "\u6e05\u534e\u5927\u5b66, 30, \u53cc\u6e05\u8def, \u4e94\u9053\u53e3, \u540e\u516b\u5bb6, \u6d77\u6dc0\u533a, 100084, \u4e2d\u56fd", "40.00229045", "116.32098908", "edu", ""], ["Random Erasing Data Augmentation", "", "Xiamen University", "Xiamen University", "\u53a6\u95e8\u5927\u5b66, \u601d\u660e\u5357\u8def Siming South Road, \u601d\u660e\u533a, \u601d\u660e\u533a (Siming), \u53a6\u95e8\u5e02 / Xiamen, \u798f\u5efa\u7701, 361005, \u4e2d\u56fd", "24.43994190", "118.09301781", "edu", ""], ["In Defense of the Classification Loss for Person Re-Identification", "", "University of Science and Technology of China", "University of Science and Technology of China", "\u4e2d\u56fd\u79d1\u5b66\u6280\u672f\u5927\u5b66 \u4e1c\u6821\u533a, 96\u53f7, \u91d1\u5be8\u8def, \u6c5f\u6dee\u5316\u80a5\u5382\u5c0f\u533a, \u829c\u6e56\u8def\u8857\u9053, \u5408\u80a5\u5e02\u533a, \u5408\u80a5\u5e02, \u5b89\u5fbd\u7701, 230026, \u4e2d\u56fd", "31.83907195", "117.26420748", "edu", ""], ["Resource Aware Person Re-identification across Multiple Resolutions", "", "Tsinghua University", "Tsinghua University", "\u6e05\u534e\u5927\u5b66, 30, \u53cc\u6e05\u8def, \u4e94\u9053\u53e3, \u540e\u516b\u5bb6, \u6d77\u6dc0\u533a, 100084, \u4e2d\u56fd", "40.00229045", "116.32098908", "edu", ""], ["Video Person Re-identification with Competitive Snippet-similarity Aggregation and Co-attentive Snippet Embedding", "", "Chinese University of Hong Kong", "The Chinese University of Hong Kong", "\u4e2d\u5927 CUHK, NA\u68af New Asia Stairs, \u99ac\u6599\u6c34 Ma Liu Shui, \u4e5d\u809a\u6751 Kau To Village, \u6c99\u7530\u5340 Sha Tin District, \u65b0\u754c New Territories, HK, DD193 1191, \u4e2d\u56fd", "22.42031295", "114.20788644", "edu", ""], ["Multi-Object Tracking with Correlation Filter for Autonomous Vehicle", "", "National University of Defense Technology, China", "National University of Defence Technology, Changsha 410000, China", "\u56fd\u9632\u79d1\u5b66\u6280\u672f\u5927\u5b66, \u4e09\u4e00\u5927\u9053, \u5f00\u798f\u533a, \u5f00\u798f\u533a (Kaifu), \u957f\u6c99\u5e02 / Changsha, \u6e56\u5357\u7701, 410073, \u4e2d\u56fd", "28.22902090", "112.99483204", "edu", ""]]} \ No newline at end of file
+{"id": "27a2fad58dd8727e280f97036e0d2bc55ef5424c", "paper": {"paperId": "27a2fad58dd8727e280f97036e0d2bc55ef5424c", "key": "duke_mtmc", "title": "Performance Measures and a Data Set for Multi-target, Multi-camera Tracking", "journal": "", "address": "", "address_type": "", "lat": "", "lng": "", "pdf_link": "http://pdfs.semanticscholar.org/b5f2/4f49f9a5e47d6601399dc068158ad88d7651.pdf", "report_link": "papers/27a2fad58dd8727e280f97036e0d2bc55ef5424c.html", "citation_count": 136, "citations_geocoded": 58, "citations_unknown": 78, "citations_empty": 6, "citations_pdf": 107, "citations_doi": 0, "name": "Duke MTMC"}, "address": null, "citations": [["Learning Discriminative Features with Multiple Granularities for Person Re-Identification", "", "Shanghai Jiao Tong University", "Shanghai Jiao Tong University", "\u4e0a\u6d77\u4ea4\u901a\u5927\u5b66\uff08\u5f90\u6c47\u6821\u533a\uff09, \u6dee\u6d77\u897f\u8def, \u756a\u79ba\u5c0f\u533a, \u5e73\u9634\u6865, \u5f90\u6c47\u533a, \u4e0a\u6d77\u5e02, 200052, \u4e2d\u56fd", "31.20081505", "121.42840681", "edu", ""], ["Virtual CNN Branching: Efficient Feature Ensemble for Person Re-Identification", "", "Duke University", "Duke University", "Nasher Museum of Art, 2001, Campus Drive, Burch Avenue, Durham, Durham County, North Carolina, 27705, USA", "35.99905220", "-78.92906290", "edu", ""], ["The Unmanned Aerial Vehicle Benchmark: Object Detection and Tracking", "", "University of Texas at San Antonio", "University of Texas at San Antonio", "UTSA, Paseo Principal, San Antonio, Bexar County, Texas, 78249-1620, USA", "29.58333105", "-98.61944505", "edu", ""], ["Semantically Selective Augmentation for Deep Compact Person Re-Identification", "", "University of Bristol", "University of Bristol", "Victoria Rooms, Whiteladies Road, Cliftonwood, Spike Island, Bristol, City of Bristol, South West England, England, BS8 2PY, UK", "51.45848370", "-2.60977520", "edu", ""], ["The Devil is in the Middle: Exploiting Mid-level Representations for Cross-Domain Instance Matching", "", "University of Edinburgh", "University of Edinburgh", "New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK", "55.94951105", "-3.19534913", "edu", ""], ["Discriminative Feature Representation for Person Re-identification by Batch-contrastive Loss", "", "East China Normal University", "East China Normal University", "\u534e\u4e1c\u5e08\u8303\u5927\u5b66, 3663, \u4e2d\u5c71\u5317\u8def, \u66f9\u5bb6\u6e21, \u666e\u9640\u533a, \u666e\u9640\u533a (Putuo), \u4e0a\u6d77\u5e02, 200062, \u4e2d\u56fd", "31.22849230", "121.40211389", "edu", ""], ["Image-Image Domain Adaptation with Preserved Self-Similarity and Domain-Dissimilarity for Person Re-identification", "", "University of Technology Sydney", "University of Technology Sydney", "University of Technology Sydney, Omnibus Lane, Ultimo, Sydney, NSW, 2007, Australia", "-33.88096510", "151.20107299", "edu", ""], ["Person Re-identification with Deep Similarity-Guided Graph Neural Network", "", "Chinese University of Hong Kong", "The Chinese University of Hong Kong", "\u4e2d\u5927 CUHK, NA\u68af New Asia Stairs, \u99ac\u6599\u6c34 Ma Liu Shui, \u4e5d\u809a\u6751 Kau To Village, \u6c99\u7530\u5340 Sha Tin District, \u65b0\u754c New Territories, HK, DD193 1191, \u4e2d\u56fd", "22.42031295", "114.20788644", "edu", ""], ["Robust Pedestrian Detection for Semi-automatic Construction of a Crowded Person Re-Identification Dataset", "", "Jiangnan University", "Jiangnan University", "\u6c5f\u5357\u5927\u5b66\u7ad9, \u8821\u6e56\u5927\u9053, \u6ee8\u6e56\u533a, \u5357\u573a\u6751, \u6ee8\u6e56\u533a (Binhu), \u65e0\u9521\u5e02 / Wuxi, \u6c5f\u82cf\u7701, 214121, \u4e2d\u56fd", "31.48542550", "120.27395810", "edu", ""], ["STA: Spatial-Temporal Attention for Large-Scale Video-based Person Re-Identification", "", "University of Illinois, Urbana-Champaign", "University of Illinois, Urbana-Champaign", "B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA", "40.11116745", "-88.22587665", "edu", ""], ["Human Semantic Parsing for Person Re-identification", "", "Istanbul Technical University", "Istanbul Technical University", "Istanbul Technical University, walking path from main road to Simit restaurant, \u0130stanbul Teknik \u00dcniversitesi, Maslak, F.S.M Mahallesi, Sar\u0131yer, \u0130stanbul, Marmara B\u00f6lgesi, 34469, T\u00fcrkiye", "41.10427915", "29.02231159", "edu", ""], ["SVDNet for Pedestrian Retrieval", "", "University of Technology Sydney", "University of Technology Sydney", "University of Technology Sydney, Omnibus Lane, Ultimo, Sydney, NSW, 2007, Australia", "-33.88096510", "151.20107299", "edu", ""], ["Multi-object Tracking with Neural Gating Using Bilinear LSTM", "", "Oregon State University", "Oregon State University", "OSU Beaver Store, 538, Southwest 6th Avenue, Portland Downtown, Portland, Multnomah County, Oregon, 97204, USA", "45.51982890", "-122.67797964", "edu", ""], ["MaskReID: A Mask Based Deep Ranking Neural Network for Person Re-identification", "", "University of Wollongong", "University of Wollongong", "University of Wollongong, Admin Road, Keiraville, Wollongong, NSW, 2500, Australia", "-34.40505545", "150.87834655", "edu", ""], ["Let Features Decide for Themselves: Feature Mask Network for Person Re-identification", "", "Nanjing University", "Nanjing University", "NJU, \u4e09\u6c5f\u8def, \u9f13\u697c\u533a, \u5357\u4eac\u5e02, \u6c5f\u82cf\u7701, 210093, \u4e2d\u56fd", "32.05659570", "118.77408833", "edu", ""], ["Exploit the Unknown Gradually : One-Shot Video-Based Person Re-Identification by Stepwise Learning", "", "University of Sydney", "University of Sydney", "USyd, Fisher Road, Camperdown, Sydney, NSW, 2006, Australia", "-33.88890695", "151.18943366", "edu", ""], ["Person Re-identification by Mid-level Attribute and Part-based Identity Learning", "", "East China Normal University", "East China Normal University", "\u534e\u4e1c\u5e08\u8303\u5927\u5b66, 3663, \u4e2d\u5c71\u5317\u8def, \u66f9\u5bb6\u6e21, \u666e\u9640\u533a, \u666e\u9640\u533a (Putuo), \u4e0a\u6d77\u5e02, 200062, \u4e2d\u56fd", "31.22849230", "121.40211389", "edu", ""], ["NCA-Net for Tracking Multiple Objects across Multiple Cameras", "", "Huazhong University of Science and Technology", "Huazhong University of Science and Technology", "\u534e\u4e2d\u5927, \u73de\u55bb\u8def, \u4e1c\u6e56\u65b0\u6280\u672f\u5f00\u53d1\u533a, \u5173\u4e1c\u8857\u9053, \u4e1c\u6e56\u65b0\u6280\u672f\u5f00\u53d1\u533a\uff08\u6258\u7ba1\uff09, \u6d2a\u5c71\u533a (Hongshan), \u6b66\u6c49\u5e02, \u6e56\u5317\u7701, 430074, \u4e2d\u56fd", "30.50975370", "114.40628810", "edu", ""], ["A Pose-Sensitive Embedding for Person Re-Identification with Expanded Cross Neighborhood Re-Ranking", "", "Karlsruhe Institute of Technology", "Karlsruhe Institute of Technology", "KIT, Leopoldshafener Allee, Linkenheim, Linkenheim-Hochstetten, Landkreis Karlsruhe, Regierungsbezirk Karlsruhe, Baden-W\u00fcrttemberg, 76351, Deutschland", "49.10184375", "8.43312560", "edu", ""], ["Sparse Label Smoothing for Semi-supervised Person Re-Identification", "", "University of Electronic Science and Technology of China", "University of Electronic Science and Technology of China", "Columbus, OH 43210, USA", "40.01419050", "-83.03091430", "edu", ""], ["Unsupervised Person Re-identification by Deep Learning Tracklet Association", "", "Queen Mary University of London", "Queen Mary University of London", "Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK", "51.52472720", "-0.03931035", "edu", ""], ["Sequential Attend, Infer, Repeat: Generative Modelling of Moving Objects", "", "University of Oxford", "University of Oxford", "Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK", "51.75345380", "-1.25400997", "edu", ""], ["Improved Person Re-Identification Based on Saliency and Semantic Parsing with Deep Neural Network Models", "", "University of Campinas", "University of Campinas", "USJ, 97, Rua S\u00edlvia Maria Fabro, Kobrasol, Campinas, S\u00e3o Jos\u00e9, Microrregi\u00e3o de Florian\u00f3polis, Mesorregi\u00e3o da Grande Florian\u00f3polis, SC, Regi\u00e3o Sul, 88102-130, Brasil", "-27.59539950", "-48.61542180", "edu", ""], ["SCPNet: Spatial-Channel Parallelism Network for Joint Holistic and Partial Person Re-Identification", "", "Chinese Academy of Sciences", "Chinese Academy of Sciences", "\u4e2d\u56fd\u79d1\u5b66\u9662\u5fc3\u7406\u7814\u7a76\u6240, 16, \u6797\u8403\u8def, \u671d\u9633\u533a / Chaoyang, \u5317\u4eac\u5e02, 100101, \u4e2d\u56fd", "40.00447950", "116.37023800", "edu", ""], ["Improving Person Re-identification by Attribute and Identity Learning", "University of Technology Sydney", "University of Technology Sydney", "University of Technology Sydney", "University of Technology Sydney, Omnibus Lane, Ultimo, Sydney, NSW, 2007, Australia", "-33.88096510", "151.20107299", "edu", ""], ["Deep Group-shuffling Random Walk for Person Re-identification", "", "Chinese University of Hong Kong", "The Chinese University of Hong Kong", "\u4e2d\u5927 CUHK, NA\u68af New Asia Stairs, \u99ac\u6599\u6c34 Ma Liu Shui, \u4e5d\u809a\u6751 Kau To Village, \u6c99\u7530\u5340 Sha Tin District, \u65b0\u754c New Territories, HK, DD193 1191, \u4e2d\u56fd", "22.42031295", "114.20788644", "edu", ""], ["Part-Aligned Bilinear Representations for Person Re-identification", "", "Microsoft Research Asia", "Microsoft Live Labs Research, China", "Microsoft Research Asia", "35.86166000", "104.19539700", "company", ""], ["Weighted Bilinear Coding over Salient Body Parts for Person Re-identification", "", "Tsinghua University", "Tsinghua University", "\u6e05\u534e\u5927\u5b66, 30, \u53cc\u6e05\u8def, \u4e94\u9053\u53e3, \u540e\u516b\u5bb6, \u6d77\u6dc0\u533a, 100084, \u4e2d\u56fd", "40.00229045", "116.32098908", "edu", ""], ["Features for Multi-Target Multi-Camera Tracking and Re-Identification", "", "Duke University", "Duke University", "Nasher Museum of Art, 2001, Campus Drive, Burch Avenue, Durham, Durham County, North Carolina, 27705, USA", "35.99905220", "-78.92906290", "edu", ""], ["Occluded Person Re-Identification", "", "Sun Yat-Sen University", "Sun Yat-Sen University", "\u4e2d\u5927, \u65b0\u6e2f\u897f\u8def, \u9f99\u8239\u6ed8, \u5eb7\u4e50, \u6d77\u73e0\u533a (Haizhu), \u5e7f\u5dde\u5e02, \u5e7f\u4e1c\u7701, 510105, \u4e2d\u56fd", "23.09461185", "113.28788994", "edu", ""], ["Person Re-identification with Cascaded Pairwise Convolutions", "", "University of Science and Technology of China", "University of Science and Technology of China", "\u4e2d\u56fd\u79d1\u5b66\u6280\u672f\u5927\u5b66 \u4e1c\u6821\u533a, 96\u53f7, \u91d1\u5be8\u8def, \u6c5f\u6dee\u5316\u80a5\u5382\u5c0f\u533a, \u829c\u6e56\u8def\u8857\u9053, \u5408\u80a5\u5e02\u533a, \u5408\u80a5\u5e02, \u5b89\u5fbd\u7701, 230026, \u4e2d\u56fd", "31.83907195", "117.26420748", "edu", ""], ["Scaling Video Analytics Systems to Large Camera Deployments", "", "Microsoft Research Asia", "Microsoft Live Labs Research, China", "Microsoft Research Asia", "35.86166000", "104.19539700", "company", ""], ["Unlabeled Samples Generated by GAN Improve the Person Re-identification Baseline in Vitro", "", "University of Technology Sydney", "University of Technology Sydney", "University of Technology Sydney, Omnibus Lane, Ultimo, Sydney, NSW, 2007, Australia", "-33.88096510", "151.20107299", "edu", ""], ["ReXCam: Resource-Efficient, Cross-Camera Video Analytics at Enterprise Scale", "", "Microsoft Research Asia", "Microsoft Live Labs Research, China", "Microsoft Research Asia", "35.86166000", "104.19539700", "company", ""], ["Pose Transferrable Person Re-Identification", "", "Shanghai Jiao Tong University", "Shanghai Jiao Tong University", "\u4e0a\u6d77\u4ea4\u901a\u5927\u5b66\uff08\u5f90\u6c47\u6821\u533a\uff09, \u6dee\u6d77\u897f\u8def, \u756a\u79ba\u5c0f\u533a, \u5e73\u9634\u6865, \u5f90\u6c47\u533a, \u4e0a\u6d77\u5e02, 200052, \u4e2d\u56fd", "31.20081505", "121.42840681", "edu", ""], ["Horizontal Pyramid Matching for Person Re-identification", "", "University of Illinois, Urbana-Champaign", "University of Illinois, Urbana-Champaign", "B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA", "40.11116745", "-88.22587665", "edu", ""], ["Generalizing a Person Retrieval Model Hetero- and Homogeneously", "", "Australian National University", "Australian National University", "Canberra ACT 0200, Australia", "-35.27769990", "149.11852700", "edu", ""], ["FD-GAN: Pose-guided Feature Distilling GAN for Robust Person Re-identification", "", "University of Science and Technology of China", "University of Science and Technology of China", "\u4e2d\u56fd\u79d1\u5b66\u6280\u672f\u5927\u5b66 \u4e1c\u6821\u533a, 96\u53f7, \u91d1\u5be8\u8def, \u6c5f\u6dee\u5316\u80a5\u5382\u5c0f\u533a, \u829c\u6e56\u8def\u8857\u9053, \u5408\u80a5\u5e02\u533a, \u5408\u80a5\u5e02, \u5b89\u5fbd\u7701, 230026, \u4e2d\u56fd", "31.83907195", "117.26420748", "edu", ""], ["Attention in Multimodal Neural Networks for Person Re-identification", "", "Aalborg University", "Aalborg University", "AAU, Pontoppidanstr\u00e6de, S\u00f8nder Tranders, Aalborg, Aalborg Kommune, Region Nordjylland, 9220, Danmark", "57.01590275", "9.97532827", "edu", ""], ["SphereReID: Deep Hypersphere Manifold Embedding for Person Re-Identification", "", "Zhejiang University", "Zhejiang University", "\u6d59\u6c5f\u5927\u5b66\u4e4b\u6c5f\u6821\u533a, \u4e4b\u6c5f\u8def, \u8f6c\u5858\u8857\u9053, \u897f\u6e56\u533a (Xihu), \u676d\u5dde\u5e02 Hangzhou, \u6d59\u6c5f\u7701, 310008, \u4e2d\u56fd", "30.19331415", "120.11930822", "edu", ""], ["Harmonious Attention Network for Person Re-Identification", "", "Queen Mary University of London", "Queen Mary University of London", "Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK", "51.52472720", "-0.03931035", "edu", ""], ["Towards a Principled Integration of Multi-camera Re-identification and Tracking Through Optimal Bayes Filters", "", "RWTH Aachen University", "RWTH Aachen University", "RWTH Aachen, Mies-van-der-Rohe-Stra\u00dfe, K\u00f6nigsh\u00fcgel, Aachen-Mitte, Aachen, St\u00e4dteregion Aachen, Regierungsbezirk K\u00f6ln, Nordrhein-Westfalen, 52074, Deutschland", "50.77917030", "6.06728733", "edu", ""], ["Dual Attention Matching Network for Context-Aware Feature Sequence based Person Re-Identification", "", "Nanyang Technological University", "Nanyang Technological University", "NTU, Faculty Avenue, Jurong West, Southwest, 637460, Singapore", "1.34841040", "103.68297965", "edu", ""], ["Fusion of Head and Full-Body Detectors for Multi-Object Tracking", "", "Technical University Munich", "Technical University Munich", "TUM, 21, Arcisstra\u00dfe, Bezirksteil K\u00f6nigsplatz, Stadtbezirk 03 Maxvorstadt, M\u00fcnchen, Obb, Bayern, 80333, Deutschland", "48.14955455", "11.56775314", "edu", ""], ["A Dataset for Persistent Multi-target Multi-camera Tracking in RGB-D", "", "Queen Mary University of London", "Queen Mary University of London", "Queen Mary (University of London), Mile End Road, Globe Town, Mile End, London Borough of Tower Hamlets, London, Greater London, England, E1 4NS, UK", "51.52472720", "-0.03931035", "edu", ""], ["Multi-Level Factorisation Net for Person Re-Identification", "", "University of Edinburgh", "University of Edinburgh", "New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK", "55.94951105", "-3.19534913", "edu", ""], ["End-to-End Deep Kronecker-Product Matching for Person Re-identification", "", "Chinese University of Hong Kong", "The Chinese University of Hong Kong", "\u4e2d\u5927 CUHK, NA\u68af New Asia Stairs, \u99ac\u6599\u6c34 Ma Liu Shui, \u4e5d\u809a\u6751 Kau To Village, \u6c99\u7530\u5340 Sha Tin District, \u65b0\u754c New Territories, HK, DD193 1191, \u4e2d\u56fd", "22.42031295", "114.20788644", "edu", ""], ["Group Consistent Similarity Learning via Deep CRF for Person Re-Identification", "", "University of Trento", "University of Trento", "University of Trento, Via Giuseppe Verdi, Piedicastello, Trento, Territorio Val d'Adige, TN, TAA, 38122, Italia", "46.06588360", "11.11598940", "edu", ""], ["Trajectory Factory: Tracklet Cleaving and Re-Connection by Deep Siamese Bi-GRU for Multiple Object Tracking", "", "Peking University", "Peking University", "\u5317\u4eac\u5927\u5b66, 5\u53f7, \u9890\u548c\u56ed\u8def, \u7a3b\u9999\u56ed\u5357\u793e\u533a, \u6d77\u6dc0\u533a, \u5317\u4eac\u5e02, 100871, \u4e2d\u56fd", "39.99223790", "116.30393816", "edu", ""], ["Self Attention Grid for Person Re-Identification", "", "University of Electronic Science and Technology of China", "University of Electronic Science and Technology of China", "Columbus, OH 43210, USA", "40.01419050", "-83.03091430", "edu", ""], ["Multi-task Mid-level Feature Alignment Network for Unsupervised Cross-Dataset Person Re-Identification", "", "Charles Sturt University", "Charles Sturt University", "Charles Sturt University, Wagga Wagga, NSW, 2678, Australia", "-35.06360710", "147.35522340", "edu", ""], ["Person Transfer GAN to Bridge Domain Gap for Person Re-Identification", "", "University of Texas at San Antonio", "University of Texas at San Antonio", "UTSA, Paseo Principal, San Antonio, Bexar County, Texas, 78249-1620, USA", "29.58333105", "-98.61944505", "edu", ""], ["Real-Time Multiple People Tracking with Deeply Learned Candidate Selection and Person Re-Identification", "", "Tsinghua University", "Tsinghua University", "\u6e05\u534e\u5927\u5b66, 30, \u53cc\u6e05\u8def, \u4e94\u9053\u53e3, \u540e\u516b\u5bb6, \u6d77\u6dc0\u533a, 100084, \u4e2d\u56fd", "40.00229045", "116.32098908", "edu", ""], ["Random Erasing Data Augmentation", "", "Xiamen University", "Xiamen University", "\u53a6\u95e8\u5927\u5b66, \u601d\u660e\u5357\u8def Siming South Road, \u601d\u660e\u533a, \u601d\u660e\u533a (Siming), \u53a6\u95e8\u5e02 / Xiamen, \u798f\u5efa\u7701, 361005, \u4e2d\u56fd", "24.43994190", "118.09301781", "edu", ""], ["In Defense of the Classification Loss for Person Re-Identification", "", "University of Science and Technology of China", "University of Science and Technology of China", "\u4e2d\u56fd\u79d1\u5b66\u6280\u672f\u5927\u5b66 \u4e1c\u6821\u533a, 96\u53f7, \u91d1\u5be8\u8def, \u6c5f\u6dee\u5316\u80a5\u5382\u5c0f\u533a, \u829c\u6e56\u8def\u8857\u9053, \u5408\u80a5\u5e02\u533a, \u5408\u80a5\u5e02, \u5b89\u5fbd\u7701, 230026, \u4e2d\u56fd", "31.83907195", "117.26420748", "edu", ""], ["Resource Aware Person Re-identification across Multiple Resolutions", "", "Tsinghua University", "Tsinghua University", "\u6e05\u534e\u5927\u5b66, 30, \u53cc\u6e05\u8def, \u4e94\u9053\u53e3, \u540e\u516b\u5bb6, \u6d77\u6dc0\u533a, 100084, \u4e2d\u56fd", "40.00229045", "116.32098908", "edu", ""], ["Video Person Re-identification with Competitive Snippet-similarity Aggregation and Co-attentive Snippet Embedding", "", "Chinese University of Hong Kong", "The Chinese University of Hong Kong", "\u4e2d\u5927 CUHK, NA\u68af New Asia Stairs, \u99ac\u6599\u6c34 Ma Liu Shui, \u4e5d\u809a\u6751 Kau To Village, \u6c99\u7530\u5340 Sha Tin District, \u65b0\u754c New Territories, HK, DD193 1191, \u4e2d\u56fd", "22.42031295", "114.20788644", "edu", ""], ["Multi-Object Tracking with Correlation Filter for Autonomous Vehicle", "", "National University of Defense Technology, China", "National University of Defence Technology, Changsha 410000, China", "\u56fd\u9632\u79d1\u5b66\u6280\u672f\u5927\u5b66, \u4e09\u4e00\u5927\u9053, \u5f00\u798f\u533a, \u5f00\u798f\u533a (Kaifu), \u957f\u6c99\u5e02 / Changsha, \u6e56\u5357\u7701, 410073, \u4e2d\u56fd", "28.22902090", "112.99483204", "edu", ""]]} \ No newline at end of file
diff --git a/site/datasets/citations/37d6f0eb074d207b53885bd2eb78ccc8a04be597.json b/site/datasets/citations/37d6f0eb074d207b53885bd2eb78ccc8a04be597.json
index f124b20e..a75fe9b3 100644
--- a/site/datasets/citations/37d6f0eb074d207b53885bd2eb78ccc8a04be597.json
+++ b/site/datasets/citations/37d6f0eb074d207b53885bd2eb78ccc8a04be597.json
@@ -1 +1 @@
-{"id": "37d6f0eb074d207b53885bd2eb78ccc8a04be597", "paper": {"paperId": "37d6f0eb074d207b53885bd2eb78ccc8a04be597", "key": "youtube_makeup", "title": "Can facial cosmetics affect the matching accuracy of face recognition systems?", "journal": "2012 IEEE Fifth International Conference on Biometrics: Theory, Applications and Systems (BTAS)", "address": "", "address_type": "", "lat": "", "lng": "", "pdf_link": "http://www.cse.msu.edu/~climer/DantchevaChenRossFaceCosmetics_BTAS2012.pdf", "report_link": "papers/37d6f0eb074d207b53885bd2eb78ccc8a04be597.html", "citation_count": 49, "citations_geocoded": 24, "citations_unknown": 25, "citations_empty": 0, "citations_pdf": 18, "citations_doi": 22, "name": "YMU"}, "address": null, "citations": [["The Potential of Using Brain Images for Authentication", "College of Mechatronic Engineering and Automation, National University of Defense Technology", "National University of Defense Technology, China", "National University of Defence Technology, Changsha 410000, China", "\u56fd\u9632\u79d1\u5b66\u6280\u672f\u5927\u5b66, \u4e09\u4e00\u5927\u9053, \u5f00\u798f\u533a, \u5f00\u798f\u533a (Kaifu), \u957f\u6c99\u5e02 / Changsha, \u6e56\u5357\u7701, 410073, \u4e2d\u56fd", "28.22902090", "112.99483204", "edu", ""], ["Transgender face recognition with off-the-shelf pre-trained CNNs: A comprehensive study", "Norwegian Biometrics Laboratory, NTNU - Gj\u2298vik, Norway", "Norwegian Biometrics Lab, NTNU, Norway", "Norwegian Biometrics Lab, NTNU, Gj\u2298vik, Norway", "Teknologivegen 22, 2815 Gj\u00f8vik, Norway", "60.78973180", "10.68219270", "edu", ""], ["Jukka Komulainen SOFTWARE - BASED COUNTERMEASURES TO 2 D FACIAL", "University Lecturer Veli-Matti Ulvinen", "University of Oulu", "University of Oulu", "Oulun yliopisto, Biologintie, Linnanmaa, Oulu, Oulun seutukunta, Pohjois-Pohjanmaa, Pohjois-Suomen aluehallintovirasto, Pohjois-Suomi, Manner-Suomi, 90540, Suomi", "65.05921570", "25.46632601", "edu", ""], ["Spoofing faces using makeup: An investigative study", "Michigan State University, United States of America", "Michigan State University", "Michigan State University", "Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA", "42.71856800", "-84.47791571", "edu", ""], ["Face recognition via semi-supervised discriminant local analysis", "Faculty of Information Science and Technology, Multimedia University, Melaka, Malaysia", "Multimedia University", "Multimedia University", "Universiti Multimedia, Persiaran Neuron, Bandar Nusaputra, Cyberjaya, Selangor, 63000, Malaysia", "2.92749755", "101.64185301", "edu", ""], ["Integrating facial makeup detection into multimodal biometric user verification system", "", "EURECOM", "EURECOM", "Campus SophiaTech, 450 Route des Chappes, 06410 Biot, France", "43.61438600", "7.07112500", "edu", ""], ["Assessment of female facial beauty based on anthropometric, non-permanent and acquisition characteristics", "INRIA, Sophia Antipolis, France", "EURECOM", "EURECOM", "Campus SophiaTech, 450 Route des Chappes, 06410 Biot, France", "43.61438600", "7.07112500", "edu", ""], ["Face Authentication With Makeup Changes", "Lane Department of Computer Science and Electrical Engineering, West Virginia University, Morgantown, WV, USA", "West Virginia University", "West Virginia University", "88, Windsor Avenue, The Flatts, Morgantown, Monongalia County, West Virginia, 26505, USA", "39.65404635", "-79.96475355", "edu", ""], ["Impact of facial cosmetics on automatic gender and age estimation algorithms", "Computer Science and Engineering, Michigan State University, East Lansing, USA", "Michigan State University", "Michigan State University", "Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA", "42.71856800", "-84.47791571", "edu", ""], ["Facial makeup detection via selected gradient orientation of entropy information", "Research Center for Information Technology Innovation, Academia Sinica, Taiwan", "Academia Sinica, Taiwan", "Research Center for Institute of Information Science, Academia Sinica, Taiwan", "115, Taiwan, Taipei City, Nangang District, \u7814\u7a76\u9662\u8def\u4e8c\u6bb5128\u865f", "25.04117270", "121.61465180", "edu", ""], ["A new approach for face recognition under makeup changes", "Department of Computer and Information Sciences, University of Delaware, Newark, DE, USA", "University of Delaware", "University of Delaware", "University of Delaware, South College Avenue, Newark, New Castle County, Delaware, 19713, USA", "39.68103280", "-75.75401840", "edu", ""], ["Facial cosmetics database and impact analysis on automatic face recognition", "", "EURECOM", "EURECOM", "Campus SophiaTech, 450 Route des Chappes, 06410 Biot, France", "43.61438600", "7.07112500", "edu", ""], ["Anti-Makeup: Learning A Bi-Level Adversarial Network for Makeup-Invariant Face Verification", "University of Chinese Academy of Sciences, Beijing 100190, China", "University of Chinese Academy of Sciences", "University of Chinese Academy of Sciences", "University of Chinese Academy of Sciences, UCAS, Yuquanlu, \u7389\u6cc9\u8def, \u7530\u6751, \u6d77\u6dc0\u533a, 100049, \u4e2d\u56fd", "39.90828040", "116.24585270", "edu", ""], ["50 years of biometric research: Accomplishments, challenges, and opportunities", "Michigan State University, East Lansing, MI 48824, USA", "Michigan State University", "Michigan State University", "Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA", "42.71856800", "-84.47791571", "edu", ""], ["Facial makeup detection technique based on texture and shape analysis", "Institut Mines-Telecom, Telecom ParisTech, CNRS LTCI, Sophia Antipolis, France", "T\u00e9l\u00e9com ParisTech", "Institut Mines-Telecom, Telecom ParisTech, CNRS LTCI, Sophia Antipolis, France", "Business P\u00f4le. 1047 route des Dolines. All\u00e9e Pierre Ziller, 06560 Sophia Antipolis, France", "43.62716550", "7.04109170", "edu", ""], ["Application of power laws to biometrics, forensics and network traffic analysis", "University of Surrey", "University of Surrey", "University of Surrey", "University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK", "51.24303255", "-0.59001382", "edu", ""], ["Automatic facial makeup detection with application in face recognition", "Computer Science and Engineering Michigan State University, East Lansing, USA", "Michigan State University", "Michigan State University", "Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA", "42.71856800", "-84.47791571", "edu", ""], ["What Else Does Your Biometric Data Reveal? A Survey on Soft Biometrics", "STARS Team, Institut National de Recherche en Informatique et en Automatique, Sophia Antipolis, France", "EURECOM", "EURECOM", "Campus SophiaTech, 450 Route des Chappes, 06410 Biot, France", "43.61438600", "7.07112500", "edu", ""], ["Makeup-insensitive face recognition by facial depth reconstruction and Gabor filter bank from women's real-world images", "Semnan University, Semnan, Iran", "Semnan University", "Semnan University", "\u062f\u0627\u0646\u0634\u06af\u0627\u0647 \u0633\u0645\u0646\u0627\u0646, \u0628\u0632\u0631\u06af\u0631\u0627\u0647 \u0627\u0645\u0627\u0645 \u0631\u0636\u0627, \u0634\u0647\u0631\u06a9 \u0645\u0633\u06a9\u0646 \u0645\u0647\u0631 \u0645\u0635\u0644\u06cc, \u0646\u0627\u0633\u0627\u0631, \u0633\u0645\u0646\u0627\u0646, \u0628\u062e\u0634 \u0645\u0631\u06a9\u0632\u06cc, \u0634\u0647\u0631\u0633\u062a\u0627\u0646 \u0633\u0645\u0646\u0627\u0646, \u0627\u0633\u062a\u0627\u0646 \u0633\u0645\u0646\u0627\u0646, \u200f\u0627\u06cc\u0631\u0627\u0646\u200e", "35.60374440", "53.43445877", "edu", ""], ["Enhanced independent spectral histogram representations in face recognition", "Yonsei University, Seoul, South Korea", "Yonsei University", "Yonsei University", "\uc5f0\uc138\ub300, \uc5f0\uc138\ub85c, \uc2e0\ucd0c\ub3d9, \ucc3d\ucc9c\ub3d9, \uc11c\ub300\ubb38\uad6c, \uc11c\uc6b8\ud2b9\ubcc4\uc2dc, 03789, \ub300\ud55c\ubbfc\uad6d", "37.56004060", "126.93692480", "edu", ""], ["Digital Images Authentication Technique Based on DWT, DCT and Local Binary Patterns", "", "University of Kent", "University of Kent", "University of Kent, St. Stephen's Hill, Hackington, Canterbury, Kent, South East, England, CT2 7AS, UK", "51.29753440", "1.07296165", "edu", ""], ["Evaluating real-life performance of the state-of-the-art in facial expression recognition using a novel YouTube-based datasets", "Department of Computer Science, University of Science & Technology, Bannu, Pakistan", "SungKyunKwan University", "SungKyunKwan University", "\uc131\uade0\uad00\ub300, \ub355\uc601\ub300\ub85c, \ucc9c\ucc9c\ub3d9, \uc7a5\uc548\uad6c, \uc218\uc6d0\uc2dc, \uacbd\uae30, 16357, \ub300\ud55c\ubbfc\uad6d", "37.30031270", "126.97212300", "edu", ""], ["Recognizing human faces under disguise and makeup", "The Hong Kong Polytechnic University", "Hong Kong Polytechnic University", "Hong Kong Polytechnic University", "hong kong, 11, \u80b2\u624d\u9053 Yuk Choi Road, \u5c16\u6c99\u5480 Tsim Sha Tsui, \u6cb9\u5c16\u65fa\u5340 Yau Tsim Mong District, \u4e5d\u9f8d Kowloon, HK, 00000, \u4e2d\u56fd", "22.30457200", "114.17976285", "edu", ""], ["Understanding OSN-based facial disclosure against face authentication systems", "Singapore Management University, Singapore, Singapore", "Singapore Management University", "Singapore Management University", "Singapore Management University, Fort Canning Tunnel, Clarke Quay, City Hall, Singapore, Central, 178895, Singapore", "1.29500195", "103.84909214", "edu", ""]]} \ No newline at end of file
+{"id": "37d6f0eb074d207b53885bd2eb78ccc8a04be597", "paper": {"paperId": "37d6f0eb074d207b53885bd2eb78ccc8a04be597", "key": "vmu", "title": "Can facial cosmetics affect the matching accuracy of face recognition systems?", "journal": "2012 IEEE Fifth International Conference on Biometrics: Theory, Applications and Systems (BTAS)", "address": "", "address_type": "", "lat": "", "lng": "", "pdf_link": "http://www.cse.msu.edu/~climer/DantchevaChenRossFaceCosmetics_BTAS2012.pdf", "report_link": "papers/37d6f0eb074d207b53885bd2eb78ccc8a04be597.html", "citation_count": 49, "citations_geocoded": 24, "citations_unknown": 25, "citations_empty": 0, "citations_pdf": 18, "citations_doi": 22, "name": "VMU"}, "address": null, "citations": [["The Potential of Using Brain Images for Authentication", "College of Mechatronic Engineering and Automation, National University of Defense Technology", "National University of Defense Technology, China", "National University of Defence Technology, Changsha 410000, China", "\u56fd\u9632\u79d1\u5b66\u6280\u672f\u5927\u5b66, \u4e09\u4e00\u5927\u9053, \u5f00\u798f\u533a, \u5f00\u798f\u533a (Kaifu), \u957f\u6c99\u5e02 / Changsha, \u6e56\u5357\u7701, 410073, \u4e2d\u56fd", "28.22902090", "112.99483204", "edu", ""], ["Transgender face recognition with off-the-shelf pre-trained CNNs: A comprehensive study", "Norwegian Biometrics Laboratory, NTNU - Gj\u2298vik, Norway", "Norwegian Biometrics Lab, NTNU, Norway", "Norwegian Biometrics Lab, NTNU, Gj\u2298vik, Norway", "Teknologivegen 22, 2815 Gj\u00f8vik, Norway", "60.78973180", "10.68219270", "edu", ""], ["Jukka Komulainen SOFTWARE - BASED COUNTERMEASURES TO 2 D FACIAL", "University Lecturer Veli-Matti Ulvinen", "University of Oulu", "University of Oulu", "Oulun yliopisto, Biologintie, Linnanmaa, Oulu, Oulun seutukunta, Pohjois-Pohjanmaa, Pohjois-Suomen aluehallintovirasto, Pohjois-Suomi, Manner-Suomi, 90540, Suomi", "65.05921570", "25.46632601", "edu", ""], ["Spoofing faces using makeup: An investigative study", "Michigan State University, United States of America", "Michigan State University", "Michigan State University", "Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA", "42.71856800", "-84.47791571", "edu", ""], ["Face recognition via semi-supervised discriminant local analysis", "Faculty of Information Science and Technology, Multimedia University, Melaka, Malaysia", "Multimedia University", "Multimedia University", "Universiti Multimedia, Persiaran Neuron, Bandar Nusaputra, Cyberjaya, Selangor, 63000, Malaysia", "2.92749755", "101.64185301", "edu", ""], ["Integrating facial makeup detection into multimodal biometric user verification system", "", "EURECOM", "EURECOM", "Campus SophiaTech, 450 Route des Chappes, 06410 Biot, France", "43.61438600", "7.07112500", "edu", ""], ["Assessment of female facial beauty based on anthropometric, non-permanent and acquisition characteristics", "INRIA, Sophia Antipolis, France", "EURECOM", "EURECOM", "Campus SophiaTech, 450 Route des Chappes, 06410 Biot, France", "43.61438600", "7.07112500", "edu", ""], ["Face Authentication With Makeup Changes", "Lane Department of Computer Science and Electrical Engineering, West Virginia University, Morgantown, WV, USA", "West Virginia University", "West Virginia University", "88, Windsor Avenue, The Flatts, Morgantown, Monongalia County, West Virginia, 26505, USA", "39.65404635", "-79.96475355", "edu", ""], ["Impact of facial cosmetics on automatic gender and age estimation algorithms", "Computer Science and Engineering, Michigan State University, East Lansing, USA", "Michigan State University", "Michigan State University", "Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA", "42.71856800", "-84.47791571", "edu", ""], ["Facial makeup detection via selected gradient orientation of entropy information", "Research Center for Information Technology Innovation, Academia Sinica, Taiwan", "Academia Sinica, Taiwan", "Research Center for Institute of Information Science, Academia Sinica, Taiwan", "115, Taiwan, Taipei City, Nangang District, \u7814\u7a76\u9662\u8def\u4e8c\u6bb5128\u865f", "25.04117270", "121.61465180", "edu", ""], ["A new approach for face recognition under makeup changes", "Department of Computer and Information Sciences, University of Delaware, Newark, DE, USA", "University of Delaware", "University of Delaware", "University of Delaware, South College Avenue, Newark, New Castle County, Delaware, 19713, USA", "39.68103280", "-75.75401840", "edu", ""], ["Facial cosmetics database and impact analysis on automatic face recognition", "", "EURECOM", "EURECOM", "Campus SophiaTech, 450 Route des Chappes, 06410 Biot, France", "43.61438600", "7.07112500", "edu", ""], ["Anti-Makeup: Learning A Bi-Level Adversarial Network for Makeup-Invariant Face Verification", "University of Chinese Academy of Sciences, Beijing 100190, China", "University of Chinese Academy of Sciences", "University of Chinese Academy of Sciences", "University of Chinese Academy of Sciences, UCAS, Yuquanlu, \u7389\u6cc9\u8def, \u7530\u6751, \u6d77\u6dc0\u533a, 100049, \u4e2d\u56fd", "39.90828040", "116.24585270", "edu", ""], ["50 years of biometric research: Accomplishments, challenges, and opportunities", "Michigan State University, East Lansing, MI 48824, USA", "Michigan State University", "Michigan State University", "Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA", "42.71856800", "-84.47791571", "edu", ""], ["Facial makeup detection technique based on texture and shape analysis", "Institut Mines-Telecom, Telecom ParisTech, CNRS LTCI, Sophia Antipolis, France", "T\u00e9l\u00e9com ParisTech", "Institut Mines-Telecom, Telecom ParisTech, CNRS LTCI, Sophia Antipolis, France", "Business P\u00f4le. 1047 route des Dolines. All\u00e9e Pierre Ziller, 06560 Sophia Antipolis, France", "43.62716550", "7.04109170", "edu", ""], ["Application of power laws to biometrics, forensics and network traffic analysis", "University of Surrey", "University of Surrey", "University of Surrey", "University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK", "51.24303255", "-0.59001382", "edu", ""], ["Automatic facial makeup detection with application in face recognition", "Computer Science and Engineering Michigan State University, East Lansing, USA", "Michigan State University", "Michigan State University", "Michigan State University, Farm Lane, East Lansing, Ingham County, Michigan, 48824, USA", "42.71856800", "-84.47791571", "edu", ""], ["What Else Does Your Biometric Data Reveal? A Survey on Soft Biometrics", "STARS Team, Institut National de Recherche en Informatique et en Automatique, Sophia Antipolis, France", "EURECOM", "EURECOM", "Campus SophiaTech, 450 Route des Chappes, 06410 Biot, France", "43.61438600", "7.07112500", "edu", ""], ["Makeup-insensitive face recognition by facial depth reconstruction and Gabor filter bank from women's real-world images", "Semnan University, Semnan, Iran", "Semnan University", "Semnan University", "\u062f\u0627\u0646\u0634\u06af\u0627\u0647 \u0633\u0645\u0646\u0627\u0646, \u0628\u0632\u0631\u06af\u0631\u0627\u0647 \u0627\u0645\u0627\u0645 \u0631\u0636\u0627, \u0634\u0647\u0631\u06a9 \u0645\u0633\u06a9\u0646 \u0645\u0647\u0631 \u0645\u0635\u0644\u06cc, \u0646\u0627\u0633\u0627\u0631, \u0633\u0645\u0646\u0627\u0646, \u0628\u062e\u0634 \u0645\u0631\u06a9\u0632\u06cc, \u0634\u0647\u0631\u0633\u062a\u0627\u0646 \u0633\u0645\u0646\u0627\u0646, \u0627\u0633\u062a\u0627\u0646 \u0633\u0645\u0646\u0627\u0646, \u200f\u0627\u06cc\u0631\u0627\u0646\u200e", "35.60374440", "53.43445877", "edu", ""], ["Enhanced independent spectral histogram representations in face recognition", "Yonsei University, Seoul, South Korea", "Yonsei University", "Yonsei University", "\uc5f0\uc138\ub300, \uc5f0\uc138\ub85c, \uc2e0\ucd0c\ub3d9, \ucc3d\ucc9c\ub3d9, \uc11c\ub300\ubb38\uad6c, \uc11c\uc6b8\ud2b9\ubcc4\uc2dc, 03789, \ub300\ud55c\ubbfc\uad6d", "37.56004060", "126.93692480", "edu", ""], ["Digital Images Authentication Technique Based on DWT, DCT and Local Binary Patterns", "", "University of Kent", "University of Kent", "University of Kent, St. Stephen's Hill, Hackington, Canterbury, Kent, South East, England, CT2 7AS, UK", "51.29753440", "1.07296165", "edu", ""], ["Evaluating real-life performance of the state-of-the-art in facial expression recognition using a novel YouTube-based datasets", "Department of Computer Science, University of Science & Technology, Bannu, Pakistan", "SungKyunKwan University", "SungKyunKwan University", "\uc131\uade0\uad00\ub300, \ub355\uc601\ub300\ub85c, \ucc9c\ucc9c\ub3d9, \uc7a5\uc548\uad6c, \uc218\uc6d0\uc2dc, \uacbd\uae30, 16357, \ub300\ud55c\ubbfc\uad6d", "37.30031270", "126.97212300", "edu", ""], ["Recognizing human faces under disguise and makeup", "The Hong Kong Polytechnic University", "Hong Kong Polytechnic University", "Hong Kong Polytechnic University", "hong kong, 11, \u80b2\u624d\u9053 Yuk Choi Road, \u5c16\u6c99\u5480 Tsim Sha Tsui, \u6cb9\u5c16\u65fa\u5340 Yau Tsim Mong District, \u4e5d\u9f8d Kowloon, HK, 00000, \u4e2d\u56fd", "22.30457200", "114.17976285", "edu", ""], ["Understanding OSN-based facial disclosure against face authentication systems", "Singapore Management University, Singapore, Singapore", "Singapore Management University", "Singapore Management University", "Singapore Management University, Fort Canning Tunnel, Clarke Quay, City Hall, Singapore, Central, 178895, Singapore", "1.29500195", "103.84909214", "edu", ""]]} \ No newline at end of file
diff --git a/site/datasets/citations/4b1d23d17476fcf78f4cbadf69fb130b1aa627c0.json b/site/datasets/citations/4b1d23d17476fcf78f4cbadf69fb130b1aa627c0.json
index aa15e357..dd08d6b2 100644
--- a/site/datasets/citations/4b1d23d17476fcf78f4cbadf69fb130b1aa627c0.json
+++ b/site/datasets/citations/4b1d23d17476fcf78f4cbadf69fb130b1aa627c0.json
@@ -1 +1 @@
-{"id": "4b1d23d17476fcf78f4cbadf69fb130b1aa627c0", "paper": {"paperId": "4b1d23d17476fcf78f4cbadf69fb130b1aa627c0", "key": "stickmen_pascal", "title": "Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation", "journal": "", "address": "", "address_type": "", "lat": "", "lng": "", "pdf_link": "http://pdfs.semanticscholar.org/4b1d/23d17476fcf78f4cbadf69fb130b1aa627c0.pdf", "report_link": "papers/4b1d23d17476fcf78f4cbadf69fb130b1aa627c0.html", "citation_count": 278, "citations_geocoded": 119, "citations_unknown": 159, "citations_empty": 12, "citations_pdf": 198, "citations_doi": 2, "name": "Stickmen PASCAL"}, "address": null, "citations": [["PoseTrack: Joint Multi-person Pose Estimation and Tracking", "", "University of Adelaide", "University of Adelaide", "University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia", "-34.91892260", "138.60423668", "edu", ""], ["ChaLearn Looking at People Challenge 2014: Dataset and Results", "", "Microsoft Research Asia", "Microsoft Live Labs Research, China", "Microsoft Research Asia", "35.86166000", "104.19539700", "company", ""], ["Quantized Densely Connected U-Nets for Efficient Landmark Localization", "", "SenseTime", "SenseTime", "China, Beijing Shi, Haidian Qu, WuDaoKou, Zhongguancun E Rd, 1\u53f7-7", "39.99300800", "116.32988200", "company", "1 Zhongguancun E Rd, Haidian Qu, China"], ["VNect: real-time 3D human pose estimation with a single RGB camera", "", "Max Planck Institute for Informatics", "Max Planck Institute for Informatics", "MPII, E1 4, Campus, Universit\u00e4t, Sankt Johann, Bezirk Mitte, Saarbr\u00fccken, Regionalverband Saarbr\u00fccken, Saarland, 66123, Deutschland", "49.25795660", "7.04577417", "edu", ""], ["Human Pose Estimation Using Deep Consensus Voting", "", "Weizmann Institute of Science", "Weizmann Institute of Science", "\u05de\u05db\u05d5\u05df \u05d5\u05d9\u05e6\u05de\u05df \u05dc\u05de\u05d3\u05e2, \u05e9\u05d3\u05e8\u05ea \u05de\u05e8\u05db\u05d5\u05e1 \u05d6\u05d9\u05d5, \u05de\u05e2\u05d5\u05e0\u05d5\u05ea \u05e9\u05d9\u05d9\u05df, \u05d0\u05d7\u05d5\u05d6\u05d5\u05ea \u05d4\u05e0\u05e9\u05d9\u05d0, \u05e8\u05d7\u05d5\u05d1\u05d5\u05ea, \u05de\u05d7\u05d5\u05d6 \u05d4\u05de\u05e8\u05db\u05d6, NO, \u05d9\u05e9\u05e8\u05d0\u05dc", "31.90784990", "34.81334092", "edu", ""], ["Surveillance Video Analysis with External Knowledge and Internal Constraints", "", "Carnegie Mellon University", "Carnegie Mellon University", "Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA", "37.41021930", "-122.05965487", "edu", ""], ["Entropy Driven Hierarchical Search for 3D Human Pose Estimation", "", "Swansea University", "Swansea University", "Swansea University, University Footbridge, Sketty, Swansea, Wales, SA2 8PZ, UK", "51.60915780", "-3.97934429", "edu", ""], ["Single-Shot Multi-Person 3D Body Pose Estimation From Monocular RGB Input", "", "Stanford University", "Stanford University", "Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA", "37.43131385", "-122.16936535", "edu", ""], ["Articulated Pose Estimation Using Hierarchical Exemplar-Based Models", "Columbia University in the City of New York", "Columbia University ", "Columbia University in the City of New York", "Columbia University In The City Of New York, College Walk, Morningside Heights, Manhattan, Manhattan Community Board 9, New York County, NYC, New York, 10027, USA", "40.80717720", "-73.96252798", "edu", ""], ["Benchmarking and Error Diagnosis in Multi-instance Pose Estimation", "", "California Institute of Technology", "California Institute of Technology", "California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA", "34.13710185", "-118.12527487", "edu", ""], ["Personalizing Human Video Pose Estimation", "", "University of Oxford", "University of Oxford", "Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK", "51.75345380", "-1.25400997", "edu", ""], ["Robust Optimization for Deep Regression", "", "University of Adelaide", "University of Adelaide", "University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia", "-34.91892260", "138.60423668", "edu", ""], ["Stacked Hourglass Networks for Human Pose Estimation", "", "University of Michigan", "University of Michigan", "University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA", "42.29421420", "-83.71003894", "edu", ""], ["Depth Sweep Regression Forests for Estimating 3D Human Pose from Images", "", "University of Bonn", "University of Bonn", "Rheinische Friedrich-Wilhelms-Universit\u00e4t Bonn, Arkadenhof, Bonn-Zentrum, Stadtbezirk Bonn, Bonn, Regierungsbezirk K\u00f6ln, Nordrhein-Westfalen, 53113, Deutschland", "50.73381240", "7.10224650", "edu", ""], ["Synthesizing Training Images for Boosting Human 3D Pose Estimation", "", "Shandong University", "Shandong University", "\u5c71\u4e1c\u5927\u5b66, \u6cf0\u5b89\u8857, \u9ccc\u5c71\u536b\u8857\u9053, \u5373\u58a8\u533a, \u9752\u5c9b\u5e02, \u5c71\u4e1c\u7701, 266200, \u4e2d\u56fd", "36.36934730", "120.67381800", "edu", ""], ["3D Pose Estimation from a Single Monocular Image", "", "University of Bonn", "University of Bonn", "Rheinische Friedrich-Wilhelms-Universit\u00e4t Bonn, Arkadenhof, Bonn-Zentrum, Stadtbezirk Bonn, Bonn, Regierungsbezirk K\u00f6ln, Nordrhein-Westfalen, 53113, Deutschland", "50.73381240", "7.10224650", "edu", ""], ["Implicit models for automatic pose estimation in static images", "", "University of Surrey", "University of Surrey", "University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK", "51.24303255", "-0.59001382", "edu", ""], ["Gamesourcing to acquire labeled human pose estimation data", "", "University of North Carolina at Charlotte", "University of North Carolina at Charlotte", "Lot 20, Poplar Terrace Drive, Charlotte, Mecklenburg County, North Carolina, 28223, USA", "35.31034410", "-80.73261617", "edu", ""], ["Latent SVMs for Human Detection with a Locally Affine Deformation Field", "", "Oxford Brookes University", "Oxford Brookes University", "Oxford Brookes University, Headington Road, Headington, Oxford, Oxon, South East, England, OX3 0BL, UK", "51.75552050", "-1.22615970", "edu", ""], ["Learning to Localize Little Landmarks", "", "University of Illinois, Urbana-Champaign", "University of Illinois, Urbana-Champaign", "B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA", "40.11116745", "-88.22587665", "edu", ""], ["Human Pose Estimation in Videos", "", "University of Central Florida", "University of Central Florida", "University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA", "28.59899755", "-81.19712501", "edu", ""], ["Ordinal Depth Supervision for 3D Human Pose Estimation", "", "Zhejiang University", "Zhejiang University", "\u6d59\u6c5f\u5927\u5b66\u4e4b\u6c5f\u6821\u533a, \u4e4b\u6c5f\u8def, \u8f6c\u5858\u8857\u9053, \u897f\u6e56\u533a (Xihu), \u676d\u5dde\u5e02 Hangzhou, \u6d59\u6c5f\u7701, 310008, \u4e2d\u56fd", "30.19331415", "120.11930822", "edu", ""], ["Improving Multi-Person Pose Estimation using Label Correction", "", "Keio University", "Keio University", "\u7db1\u5cf6\u5e02\u6c11\u306e\u68ee, \u3051\u3064\u308f\u308a\u5742, \u6e2f\u5317\u533a, \u6a2a\u6d5c\u5e02, \u795e\u5948\u5ddd\u770c, \u95a2\u6771\u5730\u65b9, 223-0053, \u65e5\u672c", "35.54169690", "139.63471840", "edu", ""], ["R-CNNs for Pose Estimation and Action Detection", "", "University of California, Berkeley", "University of California, Berkeley", "Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA", "37.86871260", "-122.25586815", "edu", ""], ["CU-Net: Coupled U-Nets", "", "Binghamton University", "Binghamton University", "Binghamton University Downtown Center, Washington Street, Downtown, Binghamton, Broome County, New York, 13901, USA", "42.09580770", "-75.91455689", "edu", ""], ["Pose Machines: Articulated Pose Estimation via Inference Machines", "", "Carnegie Mellon University", "Carnegie Mellon University", "Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA", "37.41021930", "-122.05965487", "edu", ""], ["3D Human Pose Estimation = 2D Pose Estimation + Matching", "", "Carnegie Mellon University", "Carnegie Mellon University", "Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA", "37.41021930", "-122.05965487", "edu", ""], ["Leveraging Inexpensive Supervision Signals for Visual Learning", "The Robotics Institute", "Carnegie Mellon University", "Carnegie Mellon University", "Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA", "37.41021930", "-122.05965487", "edu", ""], ["Fashion Landmark Detection in the Wild", "The Chinese University of Hong Kong", "Chinese University of Hong Kong", "The Chinese University of Hong Kong", "\u4e2d\u5927 CUHK, NA\u68af New Asia Stairs, \u99ac\u6599\u6c34 Ma Liu Shui, \u4e5d\u809a\u6751 Kau To Village, \u6c99\u7530\u5340 Sha Tin District, \u65b0\u754c New Territories, HK, DD193 1191, \u4e2d\u56fd", "22.42031295", "114.20788644", "edu", ""], ["DeeperCut: A Deeper, Stronger, and Faster Multi-person Pose Estimation Model", "", "Stanford University", "Stanford University", "Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA", "37.43131385", "-122.16936535", "edu", ""], ["Keep it SMPL: Automatic Estimation of 3D Human Pose and Shape from a Single Image", "", "Microsoft Research Asia", "Microsoft Live Labs Research, China", "Microsoft Research Asia", "35.86166000", "104.19539700", "company", ""], ["Learning Visual Symbols for Parsing Human Poses in Images", "", "Nanjing University", "Nanjing University", "NJU, \u4e09\u6c5f\u8def, \u9f13\u697c\u533a, \u5357\u4eac\u5e02, \u6c5f\u82cf\u7701, 210093, \u4e2d\u56fd", "32.05659570", "118.77408833", "edu", ""], ["DGPose: Disentangled Semi-supervised Deep Generative Models for Human Body Analysis", "", "University of Oxford", "University of Oxford", "Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK", "51.75345380", "-1.25400997", "edu", ""], ["Fast Online Upper Body Pose Estimation from Video", "", "Beijing, China", "Beijing, China", "\u5317\u4eac\u5e02, \u4e1c\u57ce\u533a, \u5317\u4eac\u5e02, 100010, \u4e2d\u56fd", "39.90621700", "116.39127570", "edu", ""], ["Beyond Tree Structure Models: A New Occlusion Aware Graphical Model for Human Pose Estimation", "", "Chinese Academy of Sciences", "Chinese Academy of Sciences", "\u4e2d\u56fd\u79d1\u5b66\u9662\u5fc3\u7406\u7814\u7a76\u6240, 16, \u6797\u8403\u8def, \u671d\u9633\u533a / Chaoyang, \u5317\u4eac\u5e02, 100101, \u4e2d\u56fd", "40.00447950", "116.37023800", "edu", ""], ["Convolutional Pose Machines: A Deep Architecture for Estimating Articulated Poses", "", "Carnegie Mellon University", "Carnegie Mellon University", "Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA", "37.41021930", "-122.05965487", "edu", ""], ["Athlete Pose Estimation by a Global-Local Network", "", "Seoul National University", "Seoul National University", "\uc11c\uc6b8\ub300\ud559\uad50, \uc11c\ud638\ub3d9\ub85c, \uc11c\ub454\ub3d9, \uad8c\uc120\uad6c, \uc218\uc6d0\uc2dc, \uacbd\uae30, 16614, \ub300\ud55c\ubbfc\uad6d", "37.26728000", "126.98411510", "edu", ""], ["DeepCut: Joint Subset Partition and Labeling for Multi Person Pose Estimation", "", "Stanford University", "Stanford University", "Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA", "37.43131385", "-122.16936535", "edu", ""], ["Jointly Optimize Data Augmentation and Network Training: Adversarial Data Augmentation in Human Pose Estimation", "", "Rutgers University", "Rutgers University", "Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA", "40.47913175", "-74.43168868", "edu", ""], ["Exploring the Spatial Hierarchy of Mixture Models for Human Pose Estimation", "", "Microsoft Research Asia", "Microsoft Live Labs Research, China", "Microsoft Research Asia", "35.86166000", "104.19539700", "company", ""], ["Detangling People: Individuating Multiple Close People and Their Body Parts via Region Assembly", "", "University of Texas at Austin", "University of Texas at Austin", "University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA", "30.28415100", "-97.73195598", "edu", ""], ["Parsing occluded people by flexible compositions", "University of California, Los Angeles", "University of California, Los Angeles", "University of California, Los Angeles", "200 UCLA, Medical Plaza Driveway Suite 540, Los Angeles, CA 90095, USA", "34.06877880", "-118.44500940", "edu", ""], ["Multi-context Attention for Human Pose Estimation", "", "Tsinghua University", "Tsinghua University", "\u6e05\u534e\u5927\u5b66, 30, \u53cc\u6e05\u8def, \u4e94\u9053\u53e3, \u540e\u516b\u5bb6, \u6d77\u6dc0\u533a, 100084, \u4e2d\u56fd", "40.00229045", "116.32098908", "edu", ""], ["Theory and Practice of Globally Optimal Deformation Estimation", "", "Microsoft Research Asia", "Microsoft Live Labs Research, China", "Microsoft Research Asia", "35.86166000", "104.19539700", "company", ""], ["Learning a sequential search for landmarks", "", "University of Illinois, Urbana-Champaign", "University of Illinois, Urbana-Champaign", "B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA", "40.11116745", "-88.22587665", "edu", ""], ["Human Pose Estimation Using Global and Local Normalization", "", "Chinese Academy of Sciences", "Chinese Academy of Sciences", "\u4e2d\u56fd\u79d1\u5b66\u9662\u5fc3\u7406\u7814\u7a76\u6240, 16, \u6797\u8403\u8def, \u671d\u9633\u533a / Chaoyang, \u5317\u4eac\u5e02, 100101, \u4e2d\u56fd", "40.00447950", "116.37023800", "edu", ""], ["Realtime Multi-person 2D Pose Estimation Using Part Affinity Fields", "", "Carnegie Mellon University", "Carnegie Mellon University", "Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA", "37.41021930", "-122.05965487", "edu", ""], ["Real-time Human Pose Estimation from Video with Convolutional Neural Networks", "", "Aalto University", "Aalto University", "Aalto, 24, Otakaari, Otaniemi, Suur-Tapiola, Espoo, Helsingin seutukunta, Uusimaa, Etel\u00e4-Suomi, Manner-Suomi, 02150, Suomi", "60.18558755", "24.82427330", "edu", ""], ["MultiPoseNet: Fast Multi-Person Pose Estimation Using Pose Residual Network", "", "Middle East Technical University", "Middle East Technical University", "ODT\u00dc, 1, 1591.sk(315.sk), \u00c7i\u011fdem Mahallesi, Ankara, \u00c7ankaya, Ankara, \u0130\u00e7 Anadolu B\u00f6lgesi, 06800, T\u00fcrkiye", "39.87549675", "32.78553506", "edu", ""], ["Articulated Pose Estimation Using Discriminative Armlet Classifiers", "", "Facebook", "Facebook", "250 Bryant St, Mountain View, CA 94041, USA", "37.39367170", "-122.08072620", "company", "Facebook, Mountain View, CA"], ["Human Context: Modeling Human-Human Interactions for Monocular 3D Pose Estimation", "", "Max Planck Institute for Informatics", "Max Planck Institute for Informatics", "MPII, E1 4, Campus, Universit\u00e4t, Sankt Johann, Bezirk Mitte, Saarbr\u00fccken, Regionalverband Saarbr\u00fccken, Saarland, 66123, Deutschland", "49.25795660", "7.04577417", "edu", ""], ["Human Pose Estimation : Extension and Application", "International Institute of Information Technology", "International Institute of Information Technology", "International Institute of Information Technology", "International Institute of Information Technology, Hyderabad, Campus Road, Ward 105 Gachibowli, Greater Hyderabad Municipal Corporation West Zone, Hyderabad, Rangareddy District, Telangana, 500032, India", "17.44549570", "78.34854698", "edu", ""], ["EXTRACTING THE X FACTOR IN HUMAN PARSING 3 Factored module Factored task Aggregation module Input Main task Shared features Silhouette Body parts The X Factor bottleneck layers bottleneck layers bottleneck layers Initial module bottleneck layers initial block", "", "University of Cambridge", "University of Cambridge", "Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK", "52.17638955", "0.14308882", "edu", ""], ["User qualified ? N Feedback Pass ? Pass ? Annotator training Annotating images", "", "University of Edinburgh", "University of Edinburgh", "New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK", "55.94951105", "-3.19534913", "edu", ""], ["Deep Multitask Architecture for Integrated 2D and 3D Human Sensing", "", "Lund University", "Lund University", "TEM at Lund University, 9, Klostergatan, Stadsk\u00e4rnan, Centrum, Lund, Sk\u00e5ne, G\u00f6taland, 22222, Sverige", "55.70395710", "13.19020110", "edu", ""], ["ConvNets with Smooth Adaptive Activation Functions for Regression", "Stony Brook University Hospital", "Stony Brook University Hospital", "Stony Brook University Hospital", "Stony Brook University Hospital, 101, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA", "40.90826665", "-73.11520891", "edu", ""], ["Self Adversarial Training for Human Pose Estimation", "", "National Tsing Hua University", "National Tsing Hua University", "\u570b\u7acb\u6e05\u83ef\u5927\u5b78, 101, \u514b\u606d\u6a4b, \u5149\u660e\u91cc, \u8d64\u571f\u5d0e, \u6771\u5340, \u65b0\u7af9\u5e02, 30013, \u81fa\u7063", "24.79254840", "120.99511830", "edu", ""], ["Joint Attention in Driver-Pedestrian Interaction: from Theory to Practice", "York University, Toronto, ON, Canada", "York University", "York University", "York University, Keele Campus, Campus Walk, North York, Toronto, Ontario, M3J 2S5, Canada", "43.77439110", "-79.50481085", "edu", ""], ["End-to-End Learning of Deformable Mixture of Parts and Deep Convolutional Neural Networks for Human Pose Estimation", "", "Chinese University of Hong Kong", "The Chinese University of Hong Kong", "\u4e2d\u5927 CUHK, NA\u68af New Asia Stairs, \u99ac\u6599\u6c34 Ma Liu Shui, \u4e5d\u809a\u6751 Kau To Village, \u6c99\u7530\u5340 Sha Tin District, \u65b0\u754c New Territories, HK, DD193 1191, \u4e2d\u56fd", "22.42031295", "114.20788644", "edu", ""], ["CRF-CNN: Modeling Structured Information in Human Pose Estimation", "", "Chinese University of Hong Kong", "The Chinese University of Hong Kong", "\u4e2d\u5927 CUHK, NA\u68af New Asia Stairs, \u99ac\u6599\u6c34 Ma Liu Shui, \u4e5d\u809a\u6751 Kau To Village, \u6c99\u7530\u5340 Sha Tin District, \u65b0\u754c New Territories, HK, DD193 1191, \u4e2d\u56fd", "22.42031295", "114.20788644", "edu", ""], ["A Complete View Depended Volleyball Video Dataset under the Uncontrolled Conditions", "", "Islamic Azad University", "Islamic Azad University", "\u062f\u0627\u0646\u0634\u06af\u0627\u0647 \u0622\u0632\u0627\u062f \u0627\u0633\u0644\u0627\u0645\u06cc, \u0647\u0645\u062f\u0627\u0646, \u0628\u062e\u0634 \u0645\u0631\u06a9\u0632\u06cc \u0634\u0647\u0631\u0633\u062a\u0627\u0646 \u0647\u0645\u062f\u0627\u0646, \u0634\u0647\u0631\u0633\u062a\u0627\u0646 \u0647\u0645\u062f\u0627\u0646, \u0627\u0633\u062a\u0627\u0646 \u0647\u0645\u062f\u0627\u0646, \u200f\u0627\u06cc\u0631\u0627\u0646\u200e", "34.84529990", "48.55962120", "edu", ""], ["Real-time Factored ConvNets: Extracting the X Factor in Human Parsing", "", "University of Cambridge", "University of Cambridge", "Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK", "52.17638955", "0.14308882", "edu", ""], ["Modeling Instance Appearance for Recognition \u2013 Can We Do Better Than EM?", "", "Stanford University", "Stanford University", "Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA", "37.43131385", "-122.16936535", "edu", ""], ["Part-based pose estimation with local and non-local contextual information", "", "Nanjing University", "Nanjing University", "NJU, \u4e09\u6c5f\u8def, \u9f13\u697c\u533a, \u5357\u4eac\u5e02, \u6c5f\u82cf\u7701, 210093, \u4e2d\u56fd", "32.05659570", "118.77408833", "edu", ""], ["Inner Space Preserving Generative Pose Machine", "", "Northeastern University", "Northeastern University", "Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA", "42.33836680", "-71.08793524", "edu", ""], ["End-to-end Recovery of Human Shape and Pose", "", "University of California, Berkeley", "University of California, Berkeley", "Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA", "37.86871260", "-122.25586815", "edu", ""], ["Convolutional Pose Machines", "", "Carnegie Mellon University", "Carnegie Mellon University", "Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA", "37.41021930", "-122.05965487", "edu", ""], ["Joint Training of a Convolutional Network and a Graphical Model for Human Pose Estimation", "", "New York University", "New York University", "NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA", "40.72925325", "-73.99625394", "edu", ""], ["Multi-person Pose Estimation with Local Joint-to-Person Associations", "", "University of Bonn", "University of Bonn", "Rheinische Friedrich-Wilhelms-Universit\u00e4t Bonn, Arkadenhof, Bonn-Zentrum, Stadtbezirk Bonn, Bonn, Regierungsbezirk K\u00f6ln, Nordrhein-Westfalen, 53113, Deutschland", "50.73381240", "7.10224650", "edu", ""], ["Fine-grained classification of pedestrians in video: Benchmark and state of the art", "California Institute of Technology", "California Institute of Technology", "California Institute of Technology", "California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA", "34.13710185", "-118.12527487", "edu", ""], ["DeePM: A Deep Part-Based Model for Object Detection and Semantic Part Localization", "", "University of California, Los Angeles", "University of California, Los Angeles", "200 UCLA, Medical Plaza Driveway Suite 540, Los Angeles, CA 90095, USA", "34.06877880", "-118.44500940", "edu", ""], ["Fast Human Pose Estimation", "", "University of Electronic Science and Technology of China", "University of Electronic Science and Technology of China", "Columbus, OH 43210, USA", "40.01419050", "-83.03091430", "edu", ""], ["MoDeep: A Deep Learning Framework Using Motion Features for Human Pose Estimation", "", "New York University", "New York University", "NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA", "40.72925325", "-73.99625394", "edu", ""], ["A Semi-Supervised Data Augmentation Approach using 3D Graphical Engines", "", "Northeastern University", "Northeastern University", "Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA", "42.33836680", "-71.08793524", "edu", ""], ["Deeply Learned Compositional Models for Human Pose Estimation", "", "Northwestern University", "Northwestern University", "Northwestern University, Northwestern Place, Downtown, Evanston, Cook County, Illinois, 60208, USA", "42.05511640", "-87.67581113", "edu", ""], ["Efficient object localization using Convolutional Networks", "", "New York University", "New York University", "NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA", "40.72925325", "-73.99625394", "edu", ""], ["Part-Pair Representation for Part Localization", "Columbia University", "Columbia University", "Columbia University", "Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA", "40.84198360", "-73.94368971", "edu", ""], ["Daily Living Activities Recognition via Efficient High and Low Level Cues Combination and Fisher Kernel Representation", "", "University Politehnica of Bucharest", "University Politehnica of Bucharest", "Universitatea Politehnica din Bucure\u0219ti, Novum Invest, Bucure\u0219ti, Militari, Sector 6, Municipiul Bucure\u0219ti, 060042, Rom\u00e2nia", "44.43918115", "26.05044565", "edu", ""], ["Deep Fully-Connected Part-Based Models for Human Pose Estimation", "", "University of Oxford", "University of Oxford", "Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK", "51.75345380", "-1.25400997", "edu", ""], ["Appearance Sharing for Collective Human Pose Estimation", "", "University of Edinburgh", "University of Edinburgh", "New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK", "55.94951105", "-3.19534913", "edu", ""], ["2D-3D Pose Consistency-based Conditional Random Fields for 3D Human Pose Estimation", "", "Seoul National University", "Seoul National University", "\uc11c\uc6b8\ub300\ud559\uad50, \uc11c\ud638\ub3d9\ub85c, \uc11c\ub454\ub3d9, \uad8c\uc120\uad6c, \uc218\uc6d0\uc2dc, \uacbd\uae30, 16614, \ub300\ud55c\ubbfc\uad6d", "37.26728000", "126.98411510", "edu", ""], ["Localization of Humans in Images Using Convolutional Networks", "", "New York University", "New York University", "NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA", "40.72925325", "-73.99625394", "edu", ""], ["Metadata of the chapter that will be visualized in SpringerLink", "", "Stanford University", "Stanford University", "Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA", "37.43131385", "-122.16936535", "edu", ""], ["Combining local appearance and holistic view: Dual-Source Deep Neural Networks for human pose estimation", "", "University of South Carolina", "University of South Carolina", "University of South Carolina, Wheat Street, Columbia, Richland County, South Carolina, 29205, USA", "33.99282980", "-81.02685168", "edu", ""], ["Stacked Deformable Part Model with Shape Regression for Object Part Localization", "of Pattern Recognition, Institute of Automation, Chinese Academy of Sciences, China", "Chinese Academy of Sciences", "Chinese Academy of Sciences", "\u4e2d\u56fd\u79d1\u5b66\u9662\u5fc3\u7406\u7814\u7a76\u6240, 16, \u6797\u8403\u8def, \u671d\u9633\u533a / Chaoyang, \u5317\u4eac\u5e02, 100101, \u4e2d\u56fd", "40.00447950", "116.37023800", "edu", ""], ["Compact Real-time avoidance on a Humanoid Robot for Human-robot Interaction", "", "Yale University", "Yale University", "Yale University, West Campus Drive, West Haven, New Haven County, Connecticut, 06516, USA", "41.25713055", "-72.98966960", "edu", ""], ["It's all Relative: Monocular 3D Human Pose Estimation from Weakly Supervised Data", "", "California Institute of Technology", "California Institute of Technology", "California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA", "34.13710185", "-118.12527487", "edu", ""], ["Pose-Driven Deep Convolutional Model for Person Re-identification", "", "University of Texas at San Antonio", "University of Texas at San Antonio", "UTSA, Paseo Principal, San Antonio, Bexar County, Texas, 78249-1620, USA", "29.58333105", "-98.61944505", "edu", ""], ["Multi-task Recurrent Neural Network for Immediacy Prediction", "", "Chinese University of Hong Kong", "The Chinese University of Hong Kong", "\u4e2d\u5927 CUHK, NA\u68af New Asia Stairs, \u99ac\u6599\u6c34 Ma Liu Shui, \u4e5d\u809a\u6751 Kau To Village, \u6c99\u7530\u5340 Sha Tin District, \u65b0\u754c New Territories, HK, DD193 1191, \u4e2d\u56fd", "22.42031295", "114.20788644", "edu", ""], ["Self-supervised Learning of Geometrically Stable Features Through Probabilistic Introspection", "", "University of Oxford", "University of Oxford", "Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK", "51.75345380", "-1.25400997", "edu", ""], ["Structured Feature Learning for Pose Estimation", "", "Chinese University of Hong Kong", "The Chinese University of Hong Kong", "\u4e2d\u5927 CUHK, NA\u68af New Asia Stairs, \u99ac\u6599\u6c34 Ma Liu Shui, \u4e5d\u809a\u6751 Kau To Village, \u6c99\u7530\u5340 Sha Tin District, \u65b0\u754c New Territories, HK, DD193 1191, \u4e2d\u56fd", "22.42031295", "114.20788644", "edu", ""], ["Beyond Physical Connections: Tree Models in Human Pose Estimation", "", "Nanjing University", "Nanjing University", "NJU, \u4e09\u6c5f\u8def, \u9f13\u697c\u533a, \u5357\u4eac\u5e02, \u6c5f\u82cf\u7701, 210093, \u4e2d\u56fd", "32.05659570", "118.77408833", "edu", ""], ["Human Pose Estimation from Monocular Images: A Comprehensive Survey", "", "Shandong University", "Shandong University", "\u5c71\u4e1c\u5927\u5b66, \u6cf0\u5b89\u8857, \u9ccc\u5c71\u536b\u8857\u9053, \u5373\u58a8\u533a, \u9752\u5c9b\u5e02, \u5c71\u4e1c\u7701, 266200, \u4e2d\u56fd", "36.36934730", "120.67381800", "edu", ""], ["Training object class detectors with click supervision Dim", "", "University of Edinburgh", "University of Edinburgh", "New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK", "55.94951105", "-3.19534913", "edu", ""], ["PoseTrack: A Benchmark for Human Pose Estimation and Tracking", "", "University of Bonn", "University of Bonn", "Rheinische Friedrich-Wilhelms-Universit\u00e4t Bonn, Arkadenhof, Bonn-Zentrum, Stadtbezirk Bonn, Bonn, Regierungsbezirk K\u00f6ln, Nordrhein-Westfalen, 53113, Deutschland", "50.73381240", "7.10224650", "edu", ""], ["DenseReg: Fully Convolutional Dense Shape Regression In-the-Wild", "University College London, UK", "University College London", "University College London", "UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK", "51.52316070", "-0.12820370", "edu", ""], ["Shape Models of the Human Body for Distributed Inference", "", "Brown University", "Brown University", "Brown University, Waterman Street, College Hill, Providence, Bristol, Rhode Island, 02912, USA", "41.82686820", "-71.40123146", "edu", ""], ["Lifting from the Deep: Convolutional 3D Pose Estimation from a Single Image", "", "University College London", "University College London", "UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK", "51.52316070", "-0.12820370", "edu", ""], ["Learning in an Uncertain World: Representing Ambiguity Through Multiple Hypotheses", "", "Technical University Munich", "Technical University Munich", "TUM, 21, Arcisstra\u00dfe, Bezirksteil K\u00f6nigsplatz, Stadtbezirk 03 Maxvorstadt, M\u00fcnchen, Obb, Bayern, 80333, Deutschland", "48.14955455", "11.56775314", "edu", ""], ["From Pictorial Structures to deformable structures", "", "Brown University", "Brown University", "Brown University, Waterman Street, College Hill, Providence, Bristol, Rhode Island, 02912, USA", "41.82686820", "-71.40123146", "edu", ""], ["Human Pose Estimation Using Body Parts Dependent Joint Regressors", "", "ETH Z\u00fcrich", "ETH Z\u00fcrich", "ETH Z\u00fcrich, 101, R\u00e4mistrasse, Hochschulen, Altstadt, Z\u00fcrich, Bezirk Z\u00fcrich, Z\u00fcrich, 8092, Schweiz/Suisse/Svizzera/Svizra", "47.37645340", "8.54770931", "edu", ""], ["Multi-view Pictorial Structures for 3D Human Pose Estimation", "", "Max Planck Institute for Informatics", "Max Planck Institute for Informatics", "MPII, E1 4, Campus, Universit\u00e4t, Sankt Johann, Bezirk Mitte, Saarbr\u00fccken, Regionalverband Saarbr\u00fccken, Saarland, 66123, Deutschland", "49.25795660", "7.04577417", "edu", ""], ["Towards Understanding Action Recognition", "", "Brown University", "Brown University", "Brown University, Waterman Street, College Hill, Providence, Bristol, Rhode Island, 02912, USA", "41.82686820", "-71.40123146", "edu", ""], ["A Rotation Invariant Latent Factor Model for Moveme Discovery from Static Poses", "California Institute of Technology, Pasadena, CA, USA", "California Institute of Technology", "California Institute of Technology", "California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA", "34.13710185", "-118.12527487", "edu", ""], ["Human Pose Estimation via Convolutional Part Heatmap Regression", "", "University of Nottingham", "University of Nottingham", "University of Nottingham, Lenton Abbey, Wollaton, City of Nottingham, East Midlands, England, UK", "52.93874280", "-1.20029569", "edu", ""], ["Learning Feature Pyramids for Human Pose Estimation", "", "University of Sydney", "University of Sydney", "USyd, Fisher Road, Camperdown, Sydney, NSW, 2006, Australia", "-33.88890695", "151.18943366", "edu", ""], ["Input Reconstruction Side and top down view Part Segmentation Input Reconstruction Side and top down view Part Segmentation", "", "University of California, Berkeley", "University of California, Berkeley", "Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA", "37.86871260", "-122.25586815", "edu", ""], ["Learning Kinematic Descriptions using SPARE: Simulated and Physical ARticulated Extendable dataset", "", "University of Michigan", "University of Michigan", "University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA", "42.29421420", "-83.71003894", "edu", ""], ["Adversarial PoseNet: A Structure-Aware Convolutional Network for Human Pose Estimation", "", "Nanjing University", "Nanjing University", "NJU, \u4e09\u6c5f\u8def, \u9f13\u697c\u533a, \u5357\u4eac\u5e02, \u6c5f\u82cf\u7701, 210093, \u4e2d\u56fd", "32.05659570", "118.77408833", "edu", ""], ["GANerated Hands for Real-time 3D Hand Tracking from Monocular RGB", "", "Stanford University", "Stanford University", "Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA", "37.43131385", "-122.16936535", "edu", ""], ["Training Object Class Detectors with Click Supervision", "", "University of Edinburgh", "University of Edinburgh", "New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK", "55.94951105", "-3.19534913", "edu", ""], ["Pose-Guided Human Parsing by an AND/OR Graph Using Pose-Context Features", "", "University of California, Los Angeles", "University of California, Los Angeles", "200 UCLA, Medical Plaza Driveway Suite 540, Los Angeles, CA 90095, USA", "34.06877880", "-118.44500940", "edu", ""], ["MONET: Multiview Semi-supervised Keypoint via Epipolar Divergence", "", "University of Minnesota", "University of Minnesota", "WeismanArt, 333, East River Parkway, Marcy-Holmes, Phillips, Minneapolis, Hennepin County, Minnesota, 55455, USA", "44.97308605", "-93.23708813", "edu", ""], ["Thin-Slicing Network: A Deep Structured Model for Pose Estimation in Videos", "", "ETH Zurich", "Computer Vision Laboratory, ETH Zurich, Zurich, Switzerland", "Sternwartstrasse 7, 8092 Z\u00fcrich, Switzerland", "47.37723980", "8.55216180", "edu", ""], ["Dual Path Networks for Multi-Person Human Pose Estimation", "", "University of Missouri", "University of Missouri", "L1, Maguire Boulevard, Lemone Industrial Park, Columbia, Boone County, Missouri, 65201, USA", "38.92676100", "-92.29193783", "edu", ""], ["A Coarse-Fine Network for Keypoint Localization", "", "University of Technology Sydney", "University of Technology Sydney", "University of Technology Sydney, Omnibus Lane, Ultimo, Sydney, NSW, 2007, Australia", "-33.88096510", "151.20107299", "edu", ""], ["Pose-Guided Human Parsing with Deep Learned Features", "", "University of California, Los Angeles", "University of California, Los Angeles", "200 UCLA, Medical Plaza Driveway Suite 540, Los Angeles, CA 90095, USA", "34.06877880", "-118.44500940", "edu", ""], ["Neural Networks with Smooth Adaptive Activation Functions for Regression", "Stony Brook University, NY, USA", "Stony Brook University", "Stony Brook University", "Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA", "40.91531960", "-73.12706260", "edu", ""], ["Marker-Less 3D Human Motion Capture with Monocular Image Sequence and Height-Maps", "", "National University of Singapore", "National University of Singapore", "NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore", "1.29620180", "103.77689944", "edu", ""]]} \ No newline at end of file
+{"id": "4b1d23d17476fcf78f4cbadf69fb130b1aa627c0", "paper": {"paperId": "4b1d23d17476fcf78f4cbadf69fb130b1aa627c0", "key": "stickmen_buffy", "title": "Clustered Pose and Nonlinear Appearance Models for Human Pose Estimation", "journal": "", "address": "", "address_type": "", "lat": "", "lng": "", "pdf_link": "http://pdfs.semanticscholar.org/4b1d/23d17476fcf78f4cbadf69fb130b1aa627c0.pdf", "report_link": "papers/4b1d23d17476fcf78f4cbadf69fb130b1aa627c0.html", "citation_count": 278, "citations_geocoded": 119, "citations_unknown": 159, "citations_empty": 12, "citations_pdf": 198, "citations_doi": 2, "name": "Buffy Stickmen"}, "address": null, "citations": [["PoseTrack: Joint Multi-person Pose Estimation and Tracking", "", "University of Adelaide", "University of Adelaide", "University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia", "-34.91892260", "138.60423668", "edu", ""], ["ChaLearn Looking at People Challenge 2014: Dataset and Results", "", "Microsoft Research Asia", "Microsoft Live Labs Research, China", "Microsoft Research Asia", "35.86166000", "104.19539700", "company", ""], ["Quantized Densely Connected U-Nets for Efficient Landmark Localization", "", "SenseTime", "SenseTime", "China, Beijing Shi, Haidian Qu, WuDaoKou, Zhongguancun E Rd, 1\u53f7-7", "39.99300800", "116.32988200", "company", "1 Zhongguancun E Rd, Haidian Qu, China"], ["VNect: real-time 3D human pose estimation with a single RGB camera", "", "Max Planck Institute for Informatics", "Max Planck Institute for Informatics", "MPII, E1 4, Campus, Universit\u00e4t, Sankt Johann, Bezirk Mitte, Saarbr\u00fccken, Regionalverband Saarbr\u00fccken, Saarland, 66123, Deutschland", "49.25795660", "7.04577417", "edu", ""], ["Human Pose Estimation Using Deep Consensus Voting", "", "Weizmann Institute of Science", "Weizmann Institute of Science", "\u05de\u05db\u05d5\u05df \u05d5\u05d9\u05e6\u05de\u05df \u05dc\u05de\u05d3\u05e2, \u05e9\u05d3\u05e8\u05ea \u05de\u05e8\u05db\u05d5\u05e1 \u05d6\u05d9\u05d5, \u05de\u05e2\u05d5\u05e0\u05d5\u05ea \u05e9\u05d9\u05d9\u05df, \u05d0\u05d7\u05d5\u05d6\u05d5\u05ea \u05d4\u05e0\u05e9\u05d9\u05d0, \u05e8\u05d7\u05d5\u05d1\u05d5\u05ea, \u05de\u05d7\u05d5\u05d6 \u05d4\u05de\u05e8\u05db\u05d6, NO, \u05d9\u05e9\u05e8\u05d0\u05dc", "31.90784990", "34.81334092", "edu", ""], ["Surveillance Video Analysis with External Knowledge and Internal Constraints", "", "Carnegie Mellon University", "Carnegie Mellon University", "Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA", "37.41021930", "-122.05965487", "edu", ""], ["Entropy Driven Hierarchical Search for 3D Human Pose Estimation", "", "Swansea University", "Swansea University", "Swansea University, University Footbridge, Sketty, Swansea, Wales, SA2 8PZ, UK", "51.60915780", "-3.97934429", "edu", ""], ["Single-Shot Multi-Person 3D Body Pose Estimation From Monocular RGB Input", "", "Stanford University", "Stanford University", "Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA", "37.43131385", "-122.16936535", "edu", ""], ["Articulated Pose Estimation Using Hierarchical Exemplar-Based Models", "Columbia University in the City of New York", "Columbia University ", "Columbia University in the City of New York", "Columbia University In The City Of New York, College Walk, Morningside Heights, Manhattan, Manhattan Community Board 9, New York County, NYC, New York, 10027, USA", "40.80717720", "-73.96252798", "edu", ""], ["Benchmarking and Error Diagnosis in Multi-instance Pose Estimation", "", "California Institute of Technology", "California Institute of Technology", "California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA", "34.13710185", "-118.12527487", "edu", ""], ["Personalizing Human Video Pose Estimation", "", "University of Oxford", "University of Oxford", "Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK", "51.75345380", "-1.25400997", "edu", ""], ["Robust Optimization for Deep Regression", "", "University of Adelaide", "University of Adelaide", "University of Adelaide, North Terrace, Adelaide, 5000, City of Adelaide, South Australia, 5000, Australia", "-34.91892260", "138.60423668", "edu", ""], ["Stacked Hourglass Networks for Human Pose Estimation", "", "University of Michigan", "University of Michigan", "University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA", "42.29421420", "-83.71003894", "edu", ""], ["Depth Sweep Regression Forests for Estimating 3D Human Pose from Images", "", "University of Bonn", "University of Bonn", "Rheinische Friedrich-Wilhelms-Universit\u00e4t Bonn, Arkadenhof, Bonn-Zentrum, Stadtbezirk Bonn, Bonn, Regierungsbezirk K\u00f6ln, Nordrhein-Westfalen, 53113, Deutschland", "50.73381240", "7.10224650", "edu", ""], ["Synthesizing Training Images for Boosting Human 3D Pose Estimation", "", "Shandong University", "Shandong University", "\u5c71\u4e1c\u5927\u5b66, \u6cf0\u5b89\u8857, \u9ccc\u5c71\u536b\u8857\u9053, \u5373\u58a8\u533a, \u9752\u5c9b\u5e02, \u5c71\u4e1c\u7701, 266200, \u4e2d\u56fd", "36.36934730", "120.67381800", "edu", ""], ["3D Pose Estimation from a Single Monocular Image", "", "University of Bonn", "University of Bonn", "Rheinische Friedrich-Wilhelms-Universit\u00e4t Bonn, Arkadenhof, Bonn-Zentrum, Stadtbezirk Bonn, Bonn, Regierungsbezirk K\u00f6ln, Nordrhein-Westfalen, 53113, Deutschland", "50.73381240", "7.10224650", "edu", ""], ["Implicit models for automatic pose estimation in static images", "", "University of Surrey", "University of Surrey", "University of Surrey, Spine Road, Guildford Park, Guildford, Surrey, South East, England, GU2 7XH, UK", "51.24303255", "-0.59001382", "edu", ""], ["Gamesourcing to acquire labeled human pose estimation data", "", "University of North Carolina at Charlotte", "University of North Carolina at Charlotte", "Lot 20, Poplar Terrace Drive, Charlotte, Mecklenburg County, North Carolina, 28223, USA", "35.31034410", "-80.73261617", "edu", ""], ["Latent SVMs for Human Detection with a Locally Affine Deformation Field", "", "Oxford Brookes University", "Oxford Brookes University", "Oxford Brookes University, Headington Road, Headington, Oxford, Oxon, South East, England, OX3 0BL, UK", "51.75552050", "-1.22615970", "edu", ""], ["Learning to Localize Little Landmarks", "", "University of Illinois, Urbana-Champaign", "University of Illinois, Urbana-Champaign", "B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA", "40.11116745", "-88.22587665", "edu", ""], ["Human Pose Estimation in Videos", "", "University of Central Florida", "University of Central Florida", "University of Central Florida, Libra Drive, University Park, Orange County, Florida, 32816, USA", "28.59899755", "-81.19712501", "edu", ""], ["Ordinal Depth Supervision for 3D Human Pose Estimation", "", "Zhejiang University", "Zhejiang University", "\u6d59\u6c5f\u5927\u5b66\u4e4b\u6c5f\u6821\u533a, \u4e4b\u6c5f\u8def, \u8f6c\u5858\u8857\u9053, \u897f\u6e56\u533a (Xihu), \u676d\u5dde\u5e02 Hangzhou, \u6d59\u6c5f\u7701, 310008, \u4e2d\u56fd", "30.19331415", "120.11930822", "edu", ""], ["Improving Multi-Person Pose Estimation using Label Correction", "", "Keio University", "Keio University", "\u7db1\u5cf6\u5e02\u6c11\u306e\u68ee, \u3051\u3064\u308f\u308a\u5742, \u6e2f\u5317\u533a, \u6a2a\u6d5c\u5e02, \u795e\u5948\u5ddd\u770c, \u95a2\u6771\u5730\u65b9, 223-0053, \u65e5\u672c", "35.54169690", "139.63471840", "edu", ""], ["R-CNNs for Pose Estimation and Action Detection", "", "University of California, Berkeley", "University of California, Berkeley", "Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA", "37.86871260", "-122.25586815", "edu", ""], ["CU-Net: Coupled U-Nets", "", "Binghamton University", "Binghamton University", "Binghamton University Downtown Center, Washington Street, Downtown, Binghamton, Broome County, New York, 13901, USA", "42.09580770", "-75.91455689", "edu", ""], ["Pose Machines: Articulated Pose Estimation via Inference Machines", "", "Carnegie Mellon University", "Carnegie Mellon University", "Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA", "37.41021930", "-122.05965487", "edu", ""], ["3D Human Pose Estimation = 2D Pose Estimation + Matching", "", "Carnegie Mellon University", "Carnegie Mellon University", "Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA", "37.41021930", "-122.05965487", "edu", ""], ["Leveraging Inexpensive Supervision Signals for Visual Learning", "The Robotics Institute", "Carnegie Mellon University", "Carnegie Mellon University", "Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA", "37.41021930", "-122.05965487", "edu", ""], ["Fashion Landmark Detection in the Wild", "The Chinese University of Hong Kong", "Chinese University of Hong Kong", "The Chinese University of Hong Kong", "\u4e2d\u5927 CUHK, NA\u68af New Asia Stairs, \u99ac\u6599\u6c34 Ma Liu Shui, \u4e5d\u809a\u6751 Kau To Village, \u6c99\u7530\u5340 Sha Tin District, \u65b0\u754c New Territories, HK, DD193 1191, \u4e2d\u56fd", "22.42031295", "114.20788644", "edu", ""], ["DeeperCut: A Deeper, Stronger, and Faster Multi-person Pose Estimation Model", "", "Stanford University", "Stanford University", "Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA", "37.43131385", "-122.16936535", "edu", ""], ["Keep it SMPL: Automatic Estimation of 3D Human Pose and Shape from a Single Image", "", "Microsoft Research Asia", "Microsoft Live Labs Research, China", "Microsoft Research Asia", "35.86166000", "104.19539700", "company", ""], ["Learning Visual Symbols for Parsing Human Poses in Images", "", "Nanjing University", "Nanjing University", "NJU, \u4e09\u6c5f\u8def, \u9f13\u697c\u533a, \u5357\u4eac\u5e02, \u6c5f\u82cf\u7701, 210093, \u4e2d\u56fd", "32.05659570", "118.77408833", "edu", ""], ["DGPose: Disentangled Semi-supervised Deep Generative Models for Human Body Analysis", "", "University of Oxford", "University of Oxford", "Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK", "51.75345380", "-1.25400997", "edu", ""], ["Fast Online Upper Body Pose Estimation from Video", "", "Beijing, China", "Beijing, China", "\u5317\u4eac\u5e02, \u4e1c\u57ce\u533a, \u5317\u4eac\u5e02, 100010, \u4e2d\u56fd", "39.90621700", "116.39127570", "edu", ""], ["Beyond Tree Structure Models: A New Occlusion Aware Graphical Model for Human Pose Estimation", "", "Chinese Academy of Sciences", "Chinese Academy of Sciences", "\u4e2d\u56fd\u79d1\u5b66\u9662\u5fc3\u7406\u7814\u7a76\u6240, 16, \u6797\u8403\u8def, \u671d\u9633\u533a / Chaoyang, \u5317\u4eac\u5e02, 100101, \u4e2d\u56fd", "40.00447950", "116.37023800", "edu", ""], ["Convolutional Pose Machines: A Deep Architecture for Estimating Articulated Poses", "", "Carnegie Mellon University", "Carnegie Mellon University", "Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA", "37.41021930", "-122.05965487", "edu", ""], ["Athlete Pose Estimation by a Global-Local Network", "", "Seoul National University", "Seoul National University", "\uc11c\uc6b8\ub300\ud559\uad50, \uc11c\ud638\ub3d9\ub85c, \uc11c\ub454\ub3d9, \uad8c\uc120\uad6c, \uc218\uc6d0\uc2dc, \uacbd\uae30, 16614, \ub300\ud55c\ubbfc\uad6d", "37.26728000", "126.98411510", "edu", ""], ["DeepCut: Joint Subset Partition and Labeling for Multi Person Pose Estimation", "", "Stanford University", "Stanford University", "Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA", "37.43131385", "-122.16936535", "edu", ""], ["Jointly Optimize Data Augmentation and Network Training: Adversarial Data Augmentation in Human Pose Estimation", "", "Rutgers University", "Rutgers University", "Rutgers Cook Campus - North, Biel Road, New Brunswick, Middlesex County, New Jersey, 08901, USA", "40.47913175", "-74.43168868", "edu", ""], ["Exploring the Spatial Hierarchy of Mixture Models for Human Pose Estimation", "", "Microsoft Research Asia", "Microsoft Live Labs Research, China", "Microsoft Research Asia", "35.86166000", "104.19539700", "company", ""], ["Detangling People: Individuating Multiple Close People and Their Body Parts via Region Assembly", "", "University of Texas at Austin", "University of Texas at Austin", "University of Texas at Austin, 1, East 23rd Street, The Drag, Austin, Travis County, Texas, 78712, USA", "30.28415100", "-97.73195598", "edu", ""], ["Parsing occluded people by flexible compositions", "University of California, Los Angeles", "University of California, Los Angeles", "University of California, Los Angeles", "200 UCLA, Medical Plaza Driveway Suite 540, Los Angeles, CA 90095, USA", "34.06877880", "-118.44500940", "edu", ""], ["Multi-context Attention for Human Pose Estimation", "", "Tsinghua University", "Tsinghua University", "\u6e05\u534e\u5927\u5b66, 30, \u53cc\u6e05\u8def, \u4e94\u9053\u53e3, \u540e\u516b\u5bb6, \u6d77\u6dc0\u533a, 100084, \u4e2d\u56fd", "40.00229045", "116.32098908", "edu", ""], ["Theory and Practice of Globally Optimal Deformation Estimation", "", "Microsoft Research Asia", "Microsoft Live Labs Research, China", "Microsoft Research Asia", "35.86166000", "104.19539700", "company", ""], ["Learning a sequential search for landmarks", "", "University of Illinois, Urbana-Champaign", "University of Illinois, Urbana-Champaign", "B-3, South Mathews Avenue, Urbana, Champaign County, Illinois, 61801, USA", "40.11116745", "-88.22587665", "edu", ""], ["Human Pose Estimation Using Global and Local Normalization", "", "Chinese Academy of Sciences", "Chinese Academy of Sciences", "\u4e2d\u56fd\u79d1\u5b66\u9662\u5fc3\u7406\u7814\u7a76\u6240, 16, \u6797\u8403\u8def, \u671d\u9633\u533a / Chaoyang, \u5317\u4eac\u5e02, 100101, \u4e2d\u56fd", "40.00447950", "116.37023800", "edu", ""], ["Realtime Multi-person 2D Pose Estimation Using Part Affinity Fields", "", "Carnegie Mellon University", "Carnegie Mellon University", "Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA", "37.41021930", "-122.05965487", "edu", ""], ["Real-time Human Pose Estimation from Video with Convolutional Neural Networks", "", "Aalto University", "Aalto University", "Aalto, 24, Otakaari, Otaniemi, Suur-Tapiola, Espoo, Helsingin seutukunta, Uusimaa, Etel\u00e4-Suomi, Manner-Suomi, 02150, Suomi", "60.18558755", "24.82427330", "edu", ""], ["MultiPoseNet: Fast Multi-Person Pose Estimation Using Pose Residual Network", "", "Middle East Technical University", "Middle East Technical University", "ODT\u00dc, 1, 1591.sk(315.sk), \u00c7i\u011fdem Mahallesi, Ankara, \u00c7ankaya, Ankara, \u0130\u00e7 Anadolu B\u00f6lgesi, 06800, T\u00fcrkiye", "39.87549675", "32.78553506", "edu", ""], ["Articulated Pose Estimation Using Discriminative Armlet Classifiers", "", "Facebook", "Facebook", "250 Bryant St, Mountain View, CA 94041, USA", "37.39367170", "-122.08072620", "company", "Facebook, Mountain View, CA"], ["Human Context: Modeling Human-Human Interactions for Monocular 3D Pose Estimation", "", "Max Planck Institute for Informatics", "Max Planck Institute for Informatics", "MPII, E1 4, Campus, Universit\u00e4t, Sankt Johann, Bezirk Mitte, Saarbr\u00fccken, Regionalverband Saarbr\u00fccken, Saarland, 66123, Deutschland", "49.25795660", "7.04577417", "edu", ""], ["Human Pose Estimation : Extension and Application", "International Institute of Information Technology", "International Institute of Information Technology", "International Institute of Information Technology", "International Institute of Information Technology, Hyderabad, Campus Road, Ward 105 Gachibowli, Greater Hyderabad Municipal Corporation West Zone, Hyderabad, Rangareddy District, Telangana, 500032, India", "17.44549570", "78.34854698", "edu", ""], ["EXTRACTING THE X FACTOR IN HUMAN PARSING 3 Factored module Factored task Aggregation module Input Main task Shared features Silhouette Body parts The X Factor bottleneck layers bottleneck layers bottleneck layers Initial module bottleneck layers initial block", "", "University of Cambridge", "University of Cambridge", "Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK", "52.17638955", "0.14308882", "edu", ""], ["User qualified ? N Feedback Pass ? Pass ? Annotator training Annotating images", "", "University of Edinburgh", "University of Edinburgh", "New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK", "55.94951105", "-3.19534913", "edu", ""], ["Deep Multitask Architecture for Integrated 2D and 3D Human Sensing", "", "Lund University", "Lund University", "TEM at Lund University, 9, Klostergatan, Stadsk\u00e4rnan, Centrum, Lund, Sk\u00e5ne, G\u00f6taland, 22222, Sverige", "55.70395710", "13.19020110", "edu", ""], ["ConvNets with Smooth Adaptive Activation Functions for Regression", "Stony Brook University Hospital", "Stony Brook University Hospital", "Stony Brook University Hospital", "Stony Brook University Hospital, 101, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA", "40.90826665", "-73.11520891", "edu", ""], ["Self Adversarial Training for Human Pose Estimation", "", "National Tsing Hua University", "National Tsing Hua University", "\u570b\u7acb\u6e05\u83ef\u5927\u5b78, 101, \u514b\u606d\u6a4b, \u5149\u660e\u91cc, \u8d64\u571f\u5d0e, \u6771\u5340, \u65b0\u7af9\u5e02, 30013, \u81fa\u7063", "24.79254840", "120.99511830", "edu", ""], ["Joint Attention in Driver-Pedestrian Interaction: from Theory to Practice", "York University, Toronto, ON, Canada", "York University", "York University", "York University, Keele Campus, Campus Walk, North York, Toronto, Ontario, M3J 2S5, Canada", "43.77439110", "-79.50481085", "edu", ""], ["End-to-End Learning of Deformable Mixture of Parts and Deep Convolutional Neural Networks for Human Pose Estimation", "", "Chinese University of Hong Kong", "The Chinese University of Hong Kong", "\u4e2d\u5927 CUHK, NA\u68af New Asia Stairs, \u99ac\u6599\u6c34 Ma Liu Shui, \u4e5d\u809a\u6751 Kau To Village, \u6c99\u7530\u5340 Sha Tin District, \u65b0\u754c New Territories, HK, DD193 1191, \u4e2d\u56fd", "22.42031295", "114.20788644", "edu", ""], ["CRF-CNN: Modeling Structured Information in Human Pose Estimation", "", "Chinese University of Hong Kong", "The Chinese University of Hong Kong", "\u4e2d\u5927 CUHK, NA\u68af New Asia Stairs, \u99ac\u6599\u6c34 Ma Liu Shui, \u4e5d\u809a\u6751 Kau To Village, \u6c99\u7530\u5340 Sha Tin District, \u65b0\u754c New Territories, HK, DD193 1191, \u4e2d\u56fd", "22.42031295", "114.20788644", "edu", ""], ["A Complete View Depended Volleyball Video Dataset under the Uncontrolled Conditions", "", "Islamic Azad University", "Islamic Azad University", "\u062f\u0627\u0646\u0634\u06af\u0627\u0647 \u0622\u0632\u0627\u062f \u0627\u0633\u0644\u0627\u0645\u06cc, \u0647\u0645\u062f\u0627\u0646, \u0628\u062e\u0634 \u0645\u0631\u06a9\u0632\u06cc \u0634\u0647\u0631\u0633\u062a\u0627\u0646 \u0647\u0645\u062f\u0627\u0646, \u0634\u0647\u0631\u0633\u062a\u0627\u0646 \u0647\u0645\u062f\u0627\u0646, \u0627\u0633\u062a\u0627\u0646 \u0647\u0645\u062f\u0627\u0646, \u200f\u0627\u06cc\u0631\u0627\u0646\u200e", "34.84529990", "48.55962120", "edu", ""], ["Real-time Factored ConvNets: Extracting the X Factor in Human Parsing", "", "University of Cambridge", "University of Cambridge", "Clifford Allbutt Lecture Theatre, Robinson Way, Romsey, Cambridge, Cambridgeshire, East of England, England, CB2 0QH, UK", "52.17638955", "0.14308882", "edu", ""], ["Modeling Instance Appearance for Recognition \u2013 Can We Do Better Than EM?", "", "Stanford University", "Stanford University", "Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA", "37.43131385", "-122.16936535", "edu", ""], ["Part-based pose estimation with local and non-local contextual information", "", "Nanjing University", "Nanjing University", "NJU, \u4e09\u6c5f\u8def, \u9f13\u697c\u533a, \u5357\u4eac\u5e02, \u6c5f\u82cf\u7701, 210093, \u4e2d\u56fd", "32.05659570", "118.77408833", "edu", ""], ["Inner Space Preserving Generative Pose Machine", "", "Northeastern University", "Northeastern University", "Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA", "42.33836680", "-71.08793524", "edu", ""], ["End-to-end Recovery of Human Shape and Pose", "", "University of California, Berkeley", "University of California, Berkeley", "Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA", "37.86871260", "-122.25586815", "edu", ""], ["Convolutional Pose Machines", "", "Carnegie Mellon University", "Carnegie Mellon University", "Carnegie Mellon University Silicon Valley, South Akron Road, ARC, Santa Clara County, California, 94035-0016, USA", "37.41021930", "-122.05965487", "edu", ""], ["Joint Training of a Convolutional Network and a Graphical Model for Human Pose Estimation", "", "New York University", "New York University", "NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA", "40.72925325", "-73.99625394", "edu", ""], ["Multi-person Pose Estimation with Local Joint-to-Person Associations", "", "University of Bonn", "University of Bonn", "Rheinische Friedrich-Wilhelms-Universit\u00e4t Bonn, Arkadenhof, Bonn-Zentrum, Stadtbezirk Bonn, Bonn, Regierungsbezirk K\u00f6ln, Nordrhein-Westfalen, 53113, Deutschland", "50.73381240", "7.10224650", "edu", ""], ["Fine-grained classification of pedestrians in video: Benchmark and state of the art", "California Institute of Technology", "California Institute of Technology", "California Institute of Technology", "California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA", "34.13710185", "-118.12527487", "edu", ""], ["DeePM: A Deep Part-Based Model for Object Detection and Semantic Part Localization", "", "University of California, Los Angeles", "University of California, Los Angeles", "200 UCLA, Medical Plaza Driveway Suite 540, Los Angeles, CA 90095, USA", "34.06877880", "-118.44500940", "edu", ""], ["Fast Human Pose Estimation", "", "University of Electronic Science and Technology of China", "University of Electronic Science and Technology of China", "Columbus, OH 43210, USA", "40.01419050", "-83.03091430", "edu", ""], ["MoDeep: A Deep Learning Framework Using Motion Features for Human Pose Estimation", "", "New York University", "New York University", "NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA", "40.72925325", "-73.99625394", "edu", ""], ["A Semi-Supervised Data Augmentation Approach using 3D Graphical Engines", "", "Northeastern University", "Northeastern University", "Snell Library, 360, Huntington Avenue, Roxbury Crossing, Fenway, Boston, Suffolk County, Massachusetts, 02115, USA", "42.33836680", "-71.08793524", "edu", ""], ["Deeply Learned Compositional Models for Human Pose Estimation", "", "Northwestern University", "Northwestern University", "Northwestern University, Northwestern Place, Downtown, Evanston, Cook County, Illinois, 60208, USA", "42.05511640", "-87.67581113", "edu", ""], ["Efficient object localization using Convolutional Networks", "", "New York University", "New York University", "NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA", "40.72925325", "-73.99625394", "edu", ""], ["Part-Pair Representation for Part Localization", "Columbia University", "Columbia University", "Columbia University", "Columbia University Medical Center, 630, West 168th Street, Washington Heights, Manhattan, Manhattan Community Board 12, New York County, NYC, New York, 10031, USA", "40.84198360", "-73.94368971", "edu", ""], ["Daily Living Activities Recognition via Efficient High and Low Level Cues Combination and Fisher Kernel Representation", "", "University Politehnica of Bucharest", "University Politehnica of Bucharest", "Universitatea Politehnica din Bucure\u0219ti, Novum Invest, Bucure\u0219ti, Militari, Sector 6, Municipiul Bucure\u0219ti, 060042, Rom\u00e2nia", "44.43918115", "26.05044565", "edu", ""], ["Deep Fully-Connected Part-Based Models for Human Pose Estimation", "", "University of Oxford", "University of Oxford", "Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK", "51.75345380", "-1.25400997", "edu", ""], ["Appearance Sharing for Collective Human Pose Estimation", "", "University of Edinburgh", "University of Edinburgh", "New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK", "55.94951105", "-3.19534913", "edu", ""], ["2D-3D Pose Consistency-based Conditional Random Fields for 3D Human Pose Estimation", "", "Seoul National University", "Seoul National University", "\uc11c\uc6b8\ub300\ud559\uad50, \uc11c\ud638\ub3d9\ub85c, \uc11c\ub454\ub3d9, \uad8c\uc120\uad6c, \uc218\uc6d0\uc2dc, \uacbd\uae30, 16614, \ub300\ud55c\ubbfc\uad6d", "37.26728000", "126.98411510", "edu", ""], ["Localization of Humans in Images Using Convolutional Networks", "", "New York University", "New York University", "NYU, West 4th Street, NoHo Historic District, NoHo, Manhattan, Manhattan Community Board 2, New York County, NYC, New York, 10012, USA", "40.72925325", "-73.99625394", "edu", ""], ["Metadata of the chapter that will be visualized in SpringerLink", "", "Stanford University", "Stanford University", "Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA", "37.43131385", "-122.16936535", "edu", ""], ["Combining local appearance and holistic view: Dual-Source Deep Neural Networks for human pose estimation", "", "University of South Carolina", "University of South Carolina", "University of South Carolina, Wheat Street, Columbia, Richland County, South Carolina, 29205, USA", "33.99282980", "-81.02685168", "edu", ""], ["Stacked Deformable Part Model with Shape Regression for Object Part Localization", "of Pattern Recognition, Institute of Automation, Chinese Academy of Sciences, China", "Chinese Academy of Sciences", "Chinese Academy of Sciences", "\u4e2d\u56fd\u79d1\u5b66\u9662\u5fc3\u7406\u7814\u7a76\u6240, 16, \u6797\u8403\u8def, \u671d\u9633\u533a / Chaoyang, \u5317\u4eac\u5e02, 100101, \u4e2d\u56fd", "40.00447950", "116.37023800", "edu", ""], ["Compact Real-time avoidance on a Humanoid Robot for Human-robot Interaction", "", "Yale University", "Yale University", "Yale University, West Campus Drive, West Haven, New Haven County, Connecticut, 06516, USA", "41.25713055", "-72.98966960", "edu", ""], ["It's all Relative: Monocular 3D Human Pose Estimation from Weakly Supervised Data", "", "California Institute of Technology", "California Institute of Technology", "California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA", "34.13710185", "-118.12527487", "edu", ""], ["Pose-Driven Deep Convolutional Model for Person Re-identification", "", "University of Texas at San Antonio", "University of Texas at San Antonio", "UTSA, Paseo Principal, San Antonio, Bexar County, Texas, 78249-1620, USA", "29.58333105", "-98.61944505", "edu", ""], ["Multi-task Recurrent Neural Network for Immediacy Prediction", "", "Chinese University of Hong Kong", "The Chinese University of Hong Kong", "\u4e2d\u5927 CUHK, NA\u68af New Asia Stairs, \u99ac\u6599\u6c34 Ma Liu Shui, \u4e5d\u809a\u6751 Kau To Village, \u6c99\u7530\u5340 Sha Tin District, \u65b0\u754c New Territories, HK, DD193 1191, \u4e2d\u56fd", "22.42031295", "114.20788644", "edu", ""], ["Self-supervised Learning of Geometrically Stable Features Through Probabilistic Introspection", "", "University of Oxford", "University of Oxford", "Radcliffe Camera, Radcliffe Square, Grandpont, Oxford, Oxon, South East, England, OX1 4AJ, UK", "51.75345380", "-1.25400997", "edu", ""], ["Structured Feature Learning for Pose Estimation", "", "Chinese University of Hong Kong", "The Chinese University of Hong Kong", "\u4e2d\u5927 CUHK, NA\u68af New Asia Stairs, \u99ac\u6599\u6c34 Ma Liu Shui, \u4e5d\u809a\u6751 Kau To Village, \u6c99\u7530\u5340 Sha Tin District, \u65b0\u754c New Territories, HK, DD193 1191, \u4e2d\u56fd", "22.42031295", "114.20788644", "edu", ""], ["Beyond Physical Connections: Tree Models in Human Pose Estimation", "", "Nanjing University", "Nanjing University", "NJU, \u4e09\u6c5f\u8def, \u9f13\u697c\u533a, \u5357\u4eac\u5e02, \u6c5f\u82cf\u7701, 210093, \u4e2d\u56fd", "32.05659570", "118.77408833", "edu", ""], ["Human Pose Estimation from Monocular Images: A Comprehensive Survey", "", "Shandong University", "Shandong University", "\u5c71\u4e1c\u5927\u5b66, \u6cf0\u5b89\u8857, \u9ccc\u5c71\u536b\u8857\u9053, \u5373\u58a8\u533a, \u9752\u5c9b\u5e02, \u5c71\u4e1c\u7701, 266200, \u4e2d\u56fd", "36.36934730", "120.67381800", "edu", ""], ["Training object class detectors with click supervision Dim", "", "University of Edinburgh", "University of Edinburgh", "New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK", "55.94951105", "-3.19534913", "edu", ""], ["PoseTrack: A Benchmark for Human Pose Estimation and Tracking", "", "University of Bonn", "University of Bonn", "Rheinische Friedrich-Wilhelms-Universit\u00e4t Bonn, Arkadenhof, Bonn-Zentrum, Stadtbezirk Bonn, Bonn, Regierungsbezirk K\u00f6ln, Nordrhein-Westfalen, 53113, Deutschland", "50.73381240", "7.10224650", "edu", ""], ["DenseReg: Fully Convolutional Dense Shape Regression In-the-Wild", "University College London, UK", "University College London", "University College London", "UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK", "51.52316070", "-0.12820370", "edu", ""], ["Shape Models of the Human Body for Distributed Inference", "", "Brown University", "Brown University", "Brown University, Waterman Street, College Hill, Providence, Bristol, Rhode Island, 02912, USA", "41.82686820", "-71.40123146", "edu", ""], ["Lifting from the Deep: Convolutional 3D Pose Estimation from a Single Image", "", "University College London", "University College London", "UCL Institute of Education, 20, Bedford Way, Holborn, Bloomsbury, London Borough of Camden, London, Greater London, England, WC1H 0AL, UK", "51.52316070", "-0.12820370", "edu", ""], ["Learning in an Uncertain World: Representing Ambiguity Through Multiple Hypotheses", "", "Technical University Munich", "Technical University Munich", "TUM, 21, Arcisstra\u00dfe, Bezirksteil K\u00f6nigsplatz, Stadtbezirk 03 Maxvorstadt, M\u00fcnchen, Obb, Bayern, 80333, Deutschland", "48.14955455", "11.56775314", "edu", ""], ["From Pictorial Structures to deformable structures", "", "Brown University", "Brown University", "Brown University, Waterman Street, College Hill, Providence, Bristol, Rhode Island, 02912, USA", "41.82686820", "-71.40123146", "edu", ""], ["Human Pose Estimation Using Body Parts Dependent Joint Regressors", "", "ETH Z\u00fcrich", "ETH Z\u00fcrich", "ETH Z\u00fcrich, 101, R\u00e4mistrasse, Hochschulen, Altstadt, Z\u00fcrich, Bezirk Z\u00fcrich, Z\u00fcrich, 8092, Schweiz/Suisse/Svizzera/Svizra", "47.37645340", "8.54770931", "edu", ""], ["Multi-view Pictorial Structures for 3D Human Pose Estimation", "", "Max Planck Institute for Informatics", "Max Planck Institute for Informatics", "MPII, E1 4, Campus, Universit\u00e4t, Sankt Johann, Bezirk Mitte, Saarbr\u00fccken, Regionalverband Saarbr\u00fccken, Saarland, 66123, Deutschland", "49.25795660", "7.04577417", "edu", ""], ["Towards Understanding Action Recognition", "", "Brown University", "Brown University", "Brown University, Waterman Street, College Hill, Providence, Bristol, Rhode Island, 02912, USA", "41.82686820", "-71.40123146", "edu", ""], ["A Rotation Invariant Latent Factor Model for Moveme Discovery from Static Poses", "California Institute of Technology, Pasadena, CA, USA", "California Institute of Technology", "California Institute of Technology", "California Institute of Technology, San Pasqual Walk, Madison Heights, Pasadena, Los Angeles County, California, 91126, USA", "34.13710185", "-118.12527487", "edu", ""], ["Human Pose Estimation via Convolutional Part Heatmap Regression", "", "University of Nottingham", "University of Nottingham", "University of Nottingham, Lenton Abbey, Wollaton, City of Nottingham, East Midlands, England, UK", "52.93874280", "-1.20029569", "edu", ""], ["Learning Feature Pyramids for Human Pose Estimation", "", "University of Sydney", "University of Sydney", "USyd, Fisher Road, Camperdown, Sydney, NSW, 2006, Australia", "-33.88890695", "151.18943366", "edu", ""], ["Input Reconstruction Side and top down view Part Segmentation Input Reconstruction Side and top down view Part Segmentation", "", "University of California, Berkeley", "University of California, Berkeley", "Berkeley Art Museum and Pacific Film Archive, Bancroft Way, Southside, Berkeley, Alameda County, California, 94720-1076, USA", "37.86871260", "-122.25586815", "edu", ""], ["Learning Kinematic Descriptions using SPARE: Simulated and Physical ARticulated Extendable dataset", "", "University of Michigan", "University of Michigan", "University of Michigan, 500, Hayward Street, Ann Arbor, Washtenaw County, Michigan, 48109, USA", "42.29421420", "-83.71003894", "edu", ""], ["Adversarial PoseNet: A Structure-Aware Convolutional Network for Human Pose Estimation", "", "Nanjing University", "Nanjing University", "NJU, \u4e09\u6c5f\u8def, \u9f13\u697c\u533a, \u5357\u4eac\u5e02, \u6c5f\u82cf\u7701, 210093, \u4e2d\u56fd", "32.05659570", "118.77408833", "edu", ""], ["GANerated Hands for Real-time 3D Hand Tracking from Monocular RGB", "", "Stanford University", "Stanford University", "Stanford University, Memorial Way, Stanford, Santa Clara County, California, 94305-6015, USA", "37.43131385", "-122.16936535", "edu", ""], ["Training Object Class Detectors with Click Supervision", "", "University of Edinburgh", "University of Edinburgh", "New College, New College Courtyard, The Mound, Old Town, Edinburgh, City of Edinburgh, Scotland, EH1 2LX, UK", "55.94951105", "-3.19534913", "edu", ""], ["Pose-Guided Human Parsing by an AND/OR Graph Using Pose-Context Features", "", "University of California, Los Angeles", "University of California, Los Angeles", "200 UCLA, Medical Plaza Driveway Suite 540, Los Angeles, CA 90095, USA", "34.06877880", "-118.44500940", "edu", ""], ["MONET: Multiview Semi-supervised Keypoint via Epipolar Divergence", "", "University of Minnesota", "University of Minnesota", "WeismanArt, 333, East River Parkway, Marcy-Holmes, Phillips, Minneapolis, Hennepin County, Minnesota, 55455, USA", "44.97308605", "-93.23708813", "edu", ""], ["Thin-Slicing Network: A Deep Structured Model for Pose Estimation in Videos", "", "ETH Zurich", "Computer Vision Laboratory, ETH Zurich, Zurich, Switzerland", "Sternwartstrasse 7, 8092 Z\u00fcrich, Switzerland", "47.37723980", "8.55216180", "edu", ""], ["Dual Path Networks for Multi-Person Human Pose Estimation", "", "University of Missouri", "University of Missouri", "L1, Maguire Boulevard, Lemone Industrial Park, Columbia, Boone County, Missouri, 65201, USA", "38.92676100", "-92.29193783", "edu", ""], ["A Coarse-Fine Network for Keypoint Localization", "", "University of Technology Sydney", "University of Technology Sydney", "University of Technology Sydney, Omnibus Lane, Ultimo, Sydney, NSW, 2007, Australia", "-33.88096510", "151.20107299", "edu", ""], ["Pose-Guided Human Parsing with Deep Learned Features", "", "University of California, Los Angeles", "University of California, Los Angeles", "200 UCLA, Medical Plaza Driveway Suite 540, Los Angeles, CA 90095, USA", "34.06877880", "-118.44500940", "edu", ""], ["Neural Networks with Smooth Adaptive Activation Functions for Regression", "Stony Brook University, NY, USA", "Stony Brook University", "Stony Brook University", "Stony Brook University, 100, Nicolls Road, Stony Brook, Suffolk County, New York, 11794, USA", "40.91531960", "-73.12706260", "edu", ""], ["Marker-Less 3D Human Motion Capture with Monocular Image Sequence and Height-Maps", "", "National University of Singapore", "National University of Singapore", "NUS, Former 1936 British Outpost, Nepal Hill, Clementi, Southwest, 117542, Singapore", "1.29620180", "103.77689944", "edu", ""]]} \ No newline at end of file
diff --git a/site/datasets/citations/datasets.csv b/site/datasets/citations/datasets.csv
new file mode 100644
index 00000000..70115ede
--- /dev/null
+++ b/site/datasets/citations/datasets.csv
@@ -0,0 +1,241 @@
+key,title,link,images,people,year,citations,influenced
+10k_US_adult_faces,10K US Adult Faces,,"10,168 ","10,168 ",2013,47,10
+3d_rma,3D-RMA,,0,120 ,1999,95,10
+3dddb_unconstrained,3D Dynamic,,0,58 ,2013,999,673
+3dpes,3DPeS,,"1,011 ",192 ,2011,122,8
+4dfab,4DFAB,,"1,835,513 ",180 ,2017,4,1
+50_people_one_question,50 People One Question,,0,,2013,15,3
+a_pascal_yahoo,aPascal,,0,,2009,999,177
+adience,Adience,,"26,580 ","2,284 ",2014,168,56
+afad,AFAD,,"164,432 ",,2017,68,14
+afew_va,AFEW-VA,,"30,000 ",249 ,2017,175,36
+affectnet,AffectNet,,"100,000 ","450,000 ",2017,0,0
+aflw,AFLW,,"25,993 ",,2011,292,99
+afw,AFW,,205 ,,2012,999,319
+agedb,AgeDB,,"16,488 ",568 ,2017,11,0
+alert_airport,ALERT Airport,,"39,902 ","9,651 ",2016,15,2
+am_fed,AM-FED,,0,"168,359 ",2013,73,17
+apis,APiS1.0,,"3,661 ",,2013,26,7
+ar_facedb,AR Face,,"4,000 ",126 ,1998,999,673
+awe_ears,AWE Ears,,"1,000 ",100 ,2016,24,6
+b3d_ac,B3D(AC),,0,,2010,39,5
+bbc_pose,BBC Pose,,0,,2013,25,2
+berkeley_pose,BPAD,,"8,035 ",,2011,221,25
+bfm,BFM,,0,200 ,2009,323,77
+bio_id,BioID Face,,"1,521 ",23 ,2001,498,103
+bjut_3d,BJUT-3D,,0,500 ,2005,2,0
+bosphorus,The Bosphorus,,"3,545 ",81 ,2007,328,70
+bp4d_plus,BP4D+,,"196,000,000 ",140 ,2016,40,5
+bp4d_spontanous,BP4D-Spontanous,,0,41 ,2013,151,39
+brainwash,Brainwash,,"11,917 ",,2015,57,4
+bu_3dfe,BU-3DFE,,"10,000 ",100 ,2006,555,130
+buhmap_db,BUHMAP-DB ,,0,11 ,2007,25,2
+cafe,CAFE,,"1,192 ",154 ,2015,33,3
+caltech_10k_web_faces,Caltech 10K Web Faces,,"7,092 ",,2003,60,6
+caltech_pedestrians,Caltech Pedestrians,,"250,000 ","2,300 ",2009,999,255
+camel,CAMEL,,0,,2018,19,0
+cas_peal,CAS-PEAL,,"99,594 ","1,040 ",2004,415,83
+casablanca,Casablanca,,"1,466 ",,2015,27,5
+casia_webface,CASIA Webface,,"494,414 ","10,575 ",2014,436,126
+celeba,CelebA,,"202,599 ","10,177 ",2015,808,227
+celeba_plus,CelebFaces+,,"202,599 ","10,177 ",2014,51,5
+cfd,CFD,,0,600 ,2015,83,13
+chalearn,ChaLearn,,0,,,10,1
+chokepoint,ChokePoint,,"64,204 ",54 ,2011,128,33
+cityscapes,Cityscapes,,"25,000 ",,2016,771,216
+clothing_co_parsing,CCP,,"2,098 ",,2014,60,6
+cmdp,CMDP,,583 ,53 ,2014,9,5
+cmu_pie,CMU PIE,,"41,368 ",68 ,2003,742,156
+coco,COCO,,"328,000 ",,2014,283,55
+coco_action,COCO-a,,"10,000 ",,2015,26,2
+coco_qa,COCO QA,,"123,287 ",,2015,191,45
+cofw,COFW,,"1,007 ",,2013,305,115
+cohn_kanade,CK,,486 ,97 ,2000,999,174
+cohn_kanade_plus,CK+,,593 ,123 ,2010,975,235
+columbia_gaze,Columbia Gaze,,"5,880 ",56 ,2013,24,2
+complex_activities,Ongoing Complex Activities,,0,,2016,2,0
+cuhk01,CUHK01,,"1,942 ",971 ,2012,258,92
+cuhk02,CUHK02,,"7,264 ","1,816 ",2013,242,70
+cuhk03,CUHK03,,"13,164 ","1,360 ",2014,512,207
+cvc_01_barcelona,CVC-01,,"6,000 ",,2007,44,5
+czech_news_agency,UFI,,0,"1,135 ",2015,10,3
+d3dfacs,D3DFACS,,"46,710 ",10 ,2011,52,8
+dartmouth_children,Dartmouth Children,,0,80 ,2013,20,3
+data_61,Data61 Pedestrian,,"50,000 ","25,551 ",2008,8,1
+deep_fashion,DeepFashion,,"800,000 ",,,23,4
+disfa,DISFA,,"130,815 ",27 ,2013,8,2
+distance_nighttime,Long Distance Heterogeneous Face,,0,100 ,2012,21,1
+duke_mtmc,Duke MTMC,,"2,000,000 ","2,834 ",2016,136,55
+emotio_net,EmotioNet Database,,"100,000 ",,2016,72,10
+eth_andreas_ess,ETHZ Pedestrian,,"2,293 ",,2007,319,73
+europersons,EuroCity Persons,,"47,300 ",,2018,1,0
+expw,ExpW,,"8,306 ",,2015,9,2
+face_research_lab,Face Research Lab London,,0,,2017,4,0
+face_scrub,FaceScrub,,"106,863 ",530 ,2014,123,20
+face_tracer,FaceTracer,,0,,2008,2,0
+facebook_100,Facebook100,,100 ,100 ,2011,50,18
+faceplace,Face Place,,0,200 ,2012,24,4
+families_in_the_wild,FIW,,"11,193 ","10,676 ",2016,3,0
+fddb,FDDB,,"2,845 ",,2010,1,0
+fei,FEI,,"2,800 ",200 ,2006,0,0
+feret,FERET,,"14,126 ","1,199 ",1996,999,534
+ferplus,FER+,,"35,887 ",,2016,29,7
+fia,CMU FiA,,0,180 ,2010,55,9
+fiw_300,300-W,,600 ,,2013,169,15
+frav3d,FRAV3D,,"1,696 ",106 ,2006,389,21
+frgc,FRGC,,"50,000",688,2004,999,253
+gallagher,Gallagher,,589 ,32 ,2008,6,0
+gavab_db,Gavab,,549 ,61 ,2013,94,11
+geofaces,GeoFaces,,"248,000 ",,,2,0
+georgia_tech_face_database,Georgia Tech Face,,0,50 ,1999,3,0
+h3d,H3D,,0,,2009,707,105
+hda_plus,HDA+,,"2,976 ",53 ,2014,1,0
+helen,Helen,,"2,330 ",,2012,339,119
+hi4d_adsip,Hi4D-ADSIP,,"3,360 ",80 ,2011,5,0
+hipsterwars,Hipsterwars,,"1,893 ",,,91,7
+hollywood_headset,HollywoodHeads,,"224,740 ",,2015,27,5
+hrt_transgender,HRT Transgender,,"10,564 ",38 ,2013,999,170
+ifad,IFAD,,"3,296 ",55 ,2015,2,0
+ifdb,IFDB,,"3,600 ",487 ,2007,0,0
+iit_dehli_ear,IIT Dehli Ear,,465 ,125 ,2011,70,8
+ijb_a,IJB-A,,"24,327 ",500 ,2015,222,80
+ijb_b,IJB-B,,"11,754 ",,2017,25,11
+ijb_c,IJB-C,,"21,294 ","3,531 ",2017,9,3
+ilids_mcts,,,0,119 ,2009,32,3
+ilids_vid_reid,iLIDS-VID,,"43,800 ",300 ,2014,7,0
+images_of_groups,Images of Groups,,"5,080 ",,2009,202,44
+imdb_wiki,IMDB,,"523,051 ","82,612 ",2016,102,28
+imfdb,IMFDB,,"34,512 ",100 ,2013,15,2
+imm_face,IMM Face Dataset,,240 ,40 ,2004,292,99
+immediacy,Immediacy,,"10,000 ",,2015,25,7
+imsitu,imSitu,,"126,102 ",0 ,,48,8
+inria_person,INRIA Pedestrian,,"1,805 ",,2005,999,3574
+jaffe,JAFFE,,213 ,10 ,,848,172
+jiku_mobile,Jiku Mobile Video Dataset,,0,,2013,1,0
+jpl_pose,JPL-Interaction dataset,,0,,2013,148,12
+kdef,KDEF,,"4,900 ",70 ,1998,608,95
+kin_face,UB KinFace,,600 ,400 ,2011,89,12
+kinectface,KinectFaceDB,,0,52 ,2014,75,24
+lag,LAG,,"3,828 ","1,010 ",2017,7,1
+large_scale_person_search,Large Scale Person Search,,"18,184 ","8,432 ",2016,41,9
+leeds_sports_pose,Leeds Sports Pose,,"2,000 ",,2010,278,93
+leeds_sports_pose_extended,Leeds Sports Pose Extended,,"10,000 ",,2011,173,30
+lfw,LFW,/datsets/lfw/,"13,233 ","5,749 ",2007,116,29
+lfw_p,LFWP,,0,,2011,521,196
+m2vts,m2vts,,185 ,37 ,1997,415,83
+m2vtsdb_extended,xm2vtsdb,,"2,360 ",295 ,1999,999,673
+mafl,MAFL,,"20,000 ",,2015,105,36
+malf,MALF,,"5,250 ",,2015,17,5
+mapillary,Mapillary,,"25,000 ",,2017,44,12
+market_1501,Market 1501,,"32,217 ","1,501 ",2015,394,161
+market1203,Market 1203,,"8,569 ","1,203 ",2016,7,0
+mars,MARS,,"1,191,003 ","1,261 ",2016,146,52
+mcgill,McGill Real World,,"18,000 ",60 ,2012,18,2
+megaage,MegaAge,,"41,941 ",,2017,4,2
+megaface,MegaFace,,"4,753,520 ","672,057 ",2016,27,11
+mifs,MIFS,,642 ,214 ,2017,5,0
+mit_cbcl,MIT CBCL,,"2,000 ",10 ,2003,12,1
+miw,MIW,,154 ,125 ,2013,46,8
+mmi_facial_expression,MMI Facial Expression Dataset,,2900?,75 ,2005,440,64
+moments_in_time,Moments in Time,,0,,2018,25,5
+morph,MORPH Commercial,,0,515 ,2006,424,108
+morph_nc,MORPH Non-Commercial,,"55,134 ","13,618 ",2007,424,108
+mpi_large,Large MPI Facial Expression,,0,19 ,2012,28,2
+mpi_small,Small MPI Facial Expression,,"12,595 ",6 ,2005,28,2
+mpii_gaze,MPIIGaze,,"213,659 ",15 ,2015,138,30
+mpii_human_pose,MPII Human Pose,,"1,021,720 ",,2014,356,91
+mrp_drone,MRP Drone,,0,,2014,5,1
+msceleb,MsCeleb,,"1,000,000 ","100,000 ",2016,167,46
+msmt_17,MSMT17,,"126,441 ","4,101 ",2018,14,3
+muct,MUCT,,"3,755 ",276 ,2010,292,99
+mug_faces,MUG Faces,,"1,222 ",26 ,2010,68,16
+multi_pie,MULTIPIE,,"750,000 ",337 ,2008,192,41
+mtfl,MTFL,,"12,995 ",,2014,105,36
+names_and_faces_news,News Dataset,,"30,281 ",,2004,294,29
+nd_2006,ND-2006,,"13,450 ",888 ,2007,32,6
+nova_emotions,Novaemötions Dataset,,"42,911 ",46 ,2013,8,2
+nudedetection,Nude Detection,,180 ,,2009,51,3
+orl,ORL,,400 ,40 ,1994,999,303
+penn_fudan,Penn Fudan,,170 ,,2007,101,15
+peta,PETA,,0,"19,000 ",2014,80,29
+pets,PETS 2017,,"33,264 ",,2017,8,0
+pilot_parliament,PPB,,"1,270 ","1,270 ",2017,11,0
+pipa,PIPA,,"63,188 ","2,356 ",2015,50,20
+pku,PKU,,0,18 ,2015,3,0
+pku_reid,PKU-Reid,,"1,824 ",114 ,2016,7,0
+pornodb,Pornography DB,,"16,727 ",,2013,77,8
+precarious,Precarious,,951 ,,2017,12,1
+prid,PRID,,"24,541 ",934 ,2011,352,116
+prw,PRW,,"34,304 ",932 ,2016,65,10
+psu,PSU,,0,"6,156 ",2012,151,20
+pubfig,PubFig,,"58,797 ",200 ,2009,894,170
+pubfig_83,pubfig83,,"8,300 ",83 ,2011,50,18
+put_face,Put Face,,"9,971 ",100 ,2008,999,673
+qmul_grid,GRID,,"1,275 ","1,025 ",2009,77,15
+qmul_surv_face,QMUL-SurvFace,,"463,507 ","15,573 ",2018,0,0
+rafd,RaFD,,536 ,67 ,2010,446,69
+raid,RAiD,,"6,920 ",43 ,2014,45,6
+rap_pedestrian,RAP,,0,,2016,21,5
+reseed,ReSEED,,"430,000 ",,2014,6,2
+saivt,SAIVT SoftBio,,"64,472 ",152 ,2012,58,15
+sarc3d,Sarc3D,,200 ,50 ,2011,29,1
+scface,SCface,,"4,160 ",130 ,2011,1,0
+scut_fbp,SCUT-FBP,,0,500 ,2015,14,5
+sdu_vid,SDU-VID,,0,300 ,2015,196,77
+sheffield,Sheffield Face,,564 ,20 ,1998,999,547
+social_relation,Social Relation,,"8,306 ",,2015,20,3
+soton,SOTON HiD,,0,100 ,2004,148,19
+sports_videos_in_the_wild,SVW,,0,,2015,6,1
+stair_actions,STAIR Action,,0,,2018,0,0
+stanford_drone,Stanford Drone,,0,,2016,5,0
+stickmen_buffy,Buffy Stickmen,,748 ,,2008,278,93
+stickmen_family,We Are Family Stickmen,,525 ,,2010,77,8
+stickmen_pascal,Stickmen PASCAL,,549 ,,2008,373,89
+sun_attributes,SUN,,"16,873 ",,2010,260,57
+svs,SVS,,0,,2013,26,7
+texas_3dfrd,Texas 3DFRD,,"1,149 ",118 ,2010,88,10
+tiny_images,Tiny Images,,"7,527,697 ",,2008,999,92
+towncenter,TownCenter,,0,,2011,310,61
+tud_brussels,TUD-Brussels,,508 ,,2009,217,25
+tud_campus,TUD-Campus,,71 ,,2008,529,45
+tud_crossing,TUD-Crossing,,201 ,,2008,529,45
+tud_motionpairs,TUD-Motionparis,,"1,092 ",,2009,217,25
+tud_multiview,TUD-Multiview,,"1,982 ",,2010,302,46
+tud_pedestrian,TUD-Pedestrian,,250 ,,2008,529,45
+tud_stadtmitte,TUD-Stadtmitte,,170 ,,2010,302,46
+tvhi,TVHI,,0,,2010,91,23
+uccs,UCCS,,0,"1,732 ",2018,5,1
+ucf_101,UCF101,,0,,2015,999,354
+ucf_crowd,UCF-CC-50,,50 ,,2013,125,45
+ucf_selfie,UCF Selfie,,"46,836 ",,2015,9,1
+ufdd,UFDD,,"6,424 ",,2018,2,2
+umb,UMB,,"1,473 ",143 ,2011,45,14
+umd_faces,UMD,,"367,888 ","8,501 ",2016,1,0
+unbc_shoulder_pain,UNBC-McMaster Pain,,"48,398 ",29 ,2011,184,58
+urban_tribes,Urban Tribes,,340 ,,2013,17,1
+used,USED Social Event Dataset,,"525,000 ",,2016,7,0
+v47,V47,,752 ,47 ,2011,10,1
+vadana,VADANA,,"2,298 ",43 ,2011,16,4
+vgg_celebs_in_places,,,"38,000 ","4,611 ",2016,5,1
+vgg_faces,VGG Face,,"982,803 ","2,622 ",2015,999,275
+vgg_faces2,VGG Face2,,"3,310,000 ","9,131 ",2018,56,19
+violent_flows,Violent Flows,,0,,2012,83,25
+viper,VIPeR,,"1,264 ",632 ,2007,584,235
+visual_phrases,Phrasal Recognition,,"2,769 ",,2011,233,39
+vmu,VMU,,204 ,51 ,2012,49,13
+voc,VOC,,"11,530 ",,2012,999,222
+vqa,VQA,,"204,721 ",,2017,731,202
+ward,WARD,,"4,786 ",70 ,2012,55,10
+who_goes_there,WGT,,"2,106,468 ",,2016,0,0
+wider,WIDER,,"50,574 ",,2015,45,9
+wider_attribute,,,"13,789 ",,,18,3
+wider_face,WIDER FACE,,"32,203 ",0 ,2016,148,55
+wildtrack,WildTrack,,0,300 ,2018,0,0
+yale_faces,YaleFaces,,165 ,15 ,1997,999,353
+yawdd,YawDD,,0,,2014,14,3
+yfcc_100m,YFCC100M,,"99,171,688 ",,2014,160,23
+york_3d,UOY 3D Face Database,,"5,250 ",350 ,2008,36,1
+youtube_faces,YouTubeFaces,,0,"1,595 ",2011,485,177
+youtube_makeup,YMU,,604 ,151 ,2012,46,8
+youtube_poses,YouTube Pose,,0,,2016,32,5
+wlfdb,WLFDB,,"714,454 ","6,025 ",2014,1,0
diff --git a/site/public/index.html b/site/public/index.html
index d60d50e9..78d02434 100644
--- a/site/public/index.html
+++ b/site/public/index.html
@@ -50,7 +50,7 @@
<section><h2>Facial Recognition Datasets</h2>
<p>Regular Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.</p>
<h3>Summary</h3>
-</section><section><div class='meta'><div><div class='gray'>Found</div><div>275 datasets</div></div><div><div class='gray'>Created between</div><div>1993-2018</div></div><div><div class='gray'>Smallest dataset</div><div>20 images</div></div><div><div class='gray'>Largest dataset</div><div>10,000,000 images</div></div></div></section><section><div class='meta'><div><div class='gray'>Highest resolution faces</div><div>450x500 (Unconstrained College Students)</div></div><div><div class='gray'>Lowest resolution faces</div><div>16x20 pixels (QMUL SurvFace)</div></div></div></section><section></section>
+</section><section><div class='meta'><div><div class='gray'>Found</div><div>275 datasets</div></div><div><div class='gray'>Created between</div><div>1993-2018</div></div><div><div class='gray'>Smallest dataset</div><div>20 images</div></div><div><div class='gray'>Largest dataset</div><div>10,000,000 images</div></div></div></section><section><div class='meta'><div><div class='gray'>Highest resolution faces</div><div>450x500 (Unconstrained College Students)</div></div><div><div class='gray'>Lowest resolution faces</div><div>16x20 pixels (QMUL SurvFace)</div></div></div></section><section><div class='applet' data-payload='{"command": "load file", "opt": "https://megapixels.nyc3.digitaloceanspaces.com/v1/citations/datasets.csv"}'></div></section>
</div>
diff --git a/site/public/test/citations/index.html b/site/public/test/citations/index.html
index 32b9115c..fec2e674 100644
--- a/site/public/test/citations/index.html
+++ b/site/public/test/citations/index.html
@@ -28,6 +28,7 @@
<div class="content">
<section><h1>Citations</h1>
+<h2><a href="/test/">&larr; Back to test index</a></h2>
</section><section><div class='applet' data-payload='{"command": "citations lfw"}'></div></section>
</div>
diff --git a/site/public/test/csv/index.html b/site/public/test/csv/index.html
index a201c94a..0ef3592d 100644
--- a/site/public/test/csv/index.html
+++ b/site/public/test/csv/index.html
@@ -29,7 +29,7 @@
<section><h1>CSV Test</h1>
<h2><a href="/test/">&larr; Back to test index</a></h2>
-</section><section><div class='applet' data-payload='{"command": "load file", "opt": "/datasets/lfw/assets/lfw_names_gender_kg_min.csv", "fields": "Name, Images, Gender, Description"}'></div></section>
+</section><section><div class='applet' data-payload='{"command": "load file", "opt": "https://nyc3.digitaloceanspaces.com/megapixels/v1/site/test//datasets/lfw/assets/lfw_names_gender_kg_min.csv", "fields": "Name, Images, Gender, Description"}'></div></section>
</div>
<footer>
diff --git a/site/public/test/datasets/index.html b/site/public/test/datasets/index.html
index 2874ee2d..976f8ffe 100644
--- a/site/public/test/datasets/index.html
+++ b/site/public/test/datasets/index.html
@@ -28,7 +28,8 @@
<div class="content">
<section><h1>Index of datasets</h1>
-</section><section><div class='applet' data-payload='{"command": "load file", "opt": "https://nyc3.digitaloceanspaces.com/megapixels/v1/site/test/https://megapixels.nyc3.digitaloceanspaces.com/v1/datasets/datasets.csv"}'></div></section>
+<h2><a href="/test/">&larr; Back to test index</a></h2>
+</section><section><div class='applet' data-payload='{"command": "load file", "opt": "https://megapixels.nyc3.digitaloceanspaces.com/v1/citations/datasets.csv"}'></div></section>
</div>
<footer>
diff --git a/site/public/test/face_search/index.html b/site/public/test/face_search/index.html
index 3ae3e3f0..c6c49c7f 100644
--- a/site/public/test/face_search/index.html
+++ b/site/public/test/face_search/index.html
@@ -28,6 +28,7 @@
<div class="content">
<section><h1>Face search</h1>
+<h2><a href="/test/">&larr; Back to test index</a></h2>
</section><section><div class='applet' data-payload='{"command": "face_search lfw"}'></div></section>
</div>
diff --git a/site/public/test/gallery/index.html b/site/public/test/gallery/index.html
index fa7ab0ec..841c8225 100644
--- a/site/public/test/gallery/index.html
+++ b/site/public/test/gallery/index.html
@@ -28,6 +28,7 @@
<div class="content">
<section><h1>Gallery test</h1>
+<h2><a href="/test/">&larr; Back to test index</a></h2>
</section><section class='images'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/site/test/assets/man.jpg' alt='Modal image 1'><div class='caption'>Modal image 1</div></div>
<div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/site/test/assets/man.jpg' alt='Modal image 2'><div class='caption'>Modal image 2</div></div>
<div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/site/test/assets/man.jpg' alt='Modal image 3'><div class='caption'>Modal image 3</div></div></section>
diff --git a/site/public/test/map/index.html b/site/public/test/map/index.html
index cb8a216e..ae75bcc6 100644
--- a/site/public/test/map/index.html
+++ b/site/public/test/map/index.html
@@ -27,7 +27,9 @@
</header>
<div class="content">
- <section><div class='applet' data-payload='{"command": "map lfw"}'></div></section>
+ <section><h1>Map test</h1>
+<h2><a href="/test/">&larr; Back to test index</a></h2>
+</section><section><div class='applet' data-payload='{"command": "map lfw"}'></div></section>
</div>
<footer>
diff --git a/site/public/test/name_search/index.html b/site/public/test/name_search/index.html
index f67b8723..b8ce1e3a 100644
--- a/site/public/test/name_search/index.html
+++ b/site/public/test/name_search/index.html
@@ -27,7 +27,9 @@
</header>
<div class="content">
- <section><div class='applet' data-payload='{"command": "name_search lfw"}'></div></section>
+ <section><h1>Name search</h1>
+<h2><a href="/test/">&larr; Back to test index</a></h2>
+</section><section><div class='applet' data-payload='{"command": "name_search lfw"}'></div></section>
</div>
<footer>
diff --git a/site/public/test/style/index.html b/site/public/test/style/index.html
index 5e3ac6ea..34eeae53 100644
--- a/site/public/test/style/index.html
+++ b/site/public/test/style/index.html
@@ -28,6 +28,7 @@
<div class="content">
<section><h1>Style Examples</h1>
+<h2><a href="/test/">&larr; Back to test index</a></h2>
</section><section class='wide'><div class='image'><img src='https://nyc3.digitaloceanspaces.com/megapixels/v1/site/test/assets/test.jpg' alt='Style Guide Test'><div class='caption'>Style Guide Test</div></div></section><section><div class='meta'><div><div class='gray'>Date</div><div>17-Jan-2019</div></div><div><div class='gray'>Numbers</div><div>17</div></div><div><div class='gray'>Identities</div><div>12,139</div></div><div><div class='gray'>But also</div><div>This is a test of the stylesheet</div></div></div></section><section><h1>Header 1</h1>
<h2>Header 2</h2>
<h3>Header 3</h3>